diff --git "a/1366.jsonl" "b/1366.jsonl" new file mode 100644--- /dev/null +++ "b/1366.jsonl" @@ -0,0 +1,692 @@ +{"seq_id": "13279785181", "text": "from sklearn.naive_bayes import ComplementNB\nimport pickle\n\ndef train(dataset): \n\tX = dataset[0] # vecteur d'image\n\ty = dataset[1] # vecteur de classes\n\n\talgo = ComplementNB()\n\ttrain = algo.fit(X, y)\n\tpickle.dump(train, open(\"trainModel.joblib\", \"wb\"))\n\n\ndef predict (dataset):\n\ttrain = pickle.load(open(\"trainModel.joblib\", \"rb\"))\n\tresult = train.predict(dataset)\n\t\n\treturn result\n", "repo_name": "Roger-ELIAS/Apprentissage-Automatique", "sub_path": "naiveBayes.py", "file_name": "naiveBayes.py", "file_ext": "py", "file_size_in_byte": 390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sklearn.naive_bayes.ComplementNB", "line_number": 8, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 10, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "42837766106", "text": "import sys\nimport os\nimport math\nimport random\nfrom sklearn import datasets\nimport numpy as np\nfrom utils.distances import ciede_distance\nfrom utils.distances import euclidean_distance\nfrom utils.PCA import PCA\n\n# Import helper functions\n# dir_path = os.path.dirname(os.path.realpath(__file__))\n# sys.path.insert(0, dir_path + \"/../utils\")\n# from data_manipulation import normalize\n# from data_operation import euclidean_distance\n#\n# sys.path.insert(0, dir_path + \"/../unsupervised_learning/\")\n# from principal_component_analysis import PCA\n\n\nclass PAM():\n \"\"\"A simple clustering method that forms k clusters by first assigning\n samples to the closest medoids, and then swapping medoids with non-medoid\n samples if the total distance (cost) between the cluster members and their medoid\n is smaller than prevoisly.\n\n\n Parameters:\n -----------\n k: int\n The number of clusters the algorithm will form.\n \"\"\"\n\n def __init__(self, k=2):\n self.k = k\n\n # Initialize the medoids as random samples\n def _init_random_medoids(self, X):\n n_samples, n_features = np.shape(X) #кол-во примеров, размерность параметров\n\n medoids = np.zeros((self.k, n_features)) #массив из нулей, размерность - кол-во кластеров Х кол-во фич\n\n #while (len(np.unique(medoids, axis=0)) != self.k):\n for i in range(self.k):\n medoid = X[np.random.choice(range(n_samples), replace=False)]\n medoids[i] = medoid\n return medoids\n\n # Return the index of the closest medoid to the sample\n def _closest_medoid(self, sample, medoids):\n closest_i = None\n closest_distance = float(\"inf\")\n for i, medoid in enumerate(medoids):\n #distance = euclidean_distance(sample, medoid)\n distance = ciede_distance(sample, medoid)\n if distance < closest_distance:\n closest_i = i\n closest_distance = distance\n return closest_i\n\n # Assign the samples to the closest medoids to create clusters\n def _create_clusters(self, X, medoids):\n clusters = [[] for _ in range(self.k)]\n for sample_i, sample in enumerate(X): #sample_i - счетчик, sample - элемент в X\n medoid_i = self._closest_medoid(sample, medoids)\n clusters[medoid_i].append(sample_i)\n if((sample_i + 1) % 10000 == 0):\n print(sample_i)\n return clusters\n\n # Calculate the cost (total distance between samples and their medoids)\n def _calculate_cost(self, X, clusters, medoids):\n cost = 0\n # For each cluster\n for i, cluster in enumerate(clusters):\n medoid = medoids[i]\n for sample_i in cluster:\n # Add distance between sample and medoid as cost\n #cost += euclidean_distance(X[sample_i], medoid)\n cost += ciede_distance(X[sample_i], medoid)\n #print(cost)\n\n return cost\n\n # Returns a list of all samples that are not currently medoids\n def _get_non_medoids(self, X, medoids):\n non_medoids = []\n for sample in X:\n if not sample in medoids:\n non_medoids.append(sample)\n return non_medoids\n\n # Classify samples as the index of their clusters\n def _get_cluster_labels(self, clusters, X):\n # One prediction for each sample\n y_pred = np.zeros(np.shape(X)[0])\n for cluster_i in range(len(clusters)):\n cluster = clusters[cluster_i]\n for sample_i in cluster:\n y_pred[sample_i] = cluster_i\n return y_pred\n\n\n # Do Partitioning Around Medoids and return the cluster labels\n def fit(self, X):\n # Initialize medoids randomly\n medoids = self._init_random_medoids(X) #содержит семплы\n # Assign samples to closest medoids\n clusters = self._create_clusters(X, medoids) #содержит индексы X\n\n # Calculate the initial cost (total distance between samples and\n # corresponding medoids)\n cost = self._calculate_cost(X, clusters, medoids)\n\n # Iterate until we no longer have a cheaper cost\n i = 0\n while True:\n print(\"iteration: \" + i.__str__())\n i = i + 1\n best_medoids = medoids\n lowest_cost = cost\n for medoid in medoids:\n # Get all non-medoid samples\n non_medoids = self._get_non_medoids(X, medoids)\n # Calculate the cost when swapping medoid and samples\n for sample in non_medoids:\n # Swap sample with the medoid\n new_medoids = medoids.copy()\n #new_medoids[medoids == medoid] = sample\n new_medoids[np.argwhere(medoids == medoid)[0][0]] = sample\n # Assign samples to new medoids\n new_clusters = self._create_clusters(X, new_medoids)\n # Calculate the cost with the new set of medoids\n new_cost = self._calculate_cost(X, new_clusters, new_medoids)\n # If the swap gives us a lower cost we save the medoids and cost\n if new_cost < lowest_cost:\n lowest_cost = new_cost\n best_medoids = new_medoids\n # If there was a swap that resultet in a lower cost we save the\n # resulting medoids from the best swap and the new cost\n if lowest_cost < cost:\n cost = lowest_cost\n medoids = best_medoids\n # Else finished\n else:\n print(lowest_cost)\n break\n\n final_clusters = new_clusters#self._create_clusters(X, medoids)\n self.cluster_medoids_ = medoids\n # Return the samples cluster indices as labels\n return self._get_cluster_labels(final_clusters, X)\n\n def predict(self, X):\n final_clusters = self._create_clusters(X, self.cluster_medoids_)\n return self._get_cluster_labels(final_clusters, X)\n\n\ndef main():\n # Load the dataset\n X, y = datasets.make_blobs()\n\n # Cluster the data using K-Medoids\n clf = PAM(k=3)\n clf.fit(X[:20])\n y_pred = clf.predict(X)\n print(y_pred)\n print(clf.cluster_medoids_)\n\n # Project the data onto the 2 primary principal components\n pca = PCA()\n pca.plot_in_2d(X, y_pred)\n pca.plot_in_2d(X, y)\n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "ipospelov/color-constancy", "sub_path": "k_medoids.py", "file_name": "k_medoids.py", "file_ext": "py", "file_size_in_byte": 6576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.shape", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "utils.distances.ciede_distance", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.distances.ciede_distance", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 161, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 161, "usage_type": "name"}, {"api_name": "utils.PCA.PCA", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "36540831363", "text": "import numpy as np\nfrom sklearn.decomposition import PCA\nf = open(\"data/heartAttackClean.csv\")\nf.readline() # skip the header\ndata = np.loadtxt(f, delimiter=\",\")\npca = PCA(n_components=data.shape[1])\npca.fit(data)\n\nprint(pca.explained_variance_ratio_)\n\nprint(pca.singular_values_)\n", "repo_name": "HenryDykhne/heartAttackPrediction", "sub_path": "pca.py", "file_name": "pca.py", "file_ext": "py", "file_size_in_byte": 282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.loadtxt", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "25057213297", "text": "#ウインドウ枠(行と列)の固定\r\n\r\nfrom openpyxl import load_workbook\r\n\r\nwb = load_workbook(\"作業時間.xlsx\")\r\nws = wb.active\r\n\r\nws.freeze_panes = \"E4\"\r\n# A列以外の列で2行目以降のセル番地を指定すると、指定列の前列と前の行までを固定する\r\n\r\nwb.save(\"作業時間_変更後.xlsx\")\r\n", "repo_name": "jun-yoshiyoshi/python_for_excel", "sub_path": "freeze_column_row.py", "file_name": "freeze_column_row.py", "file_ext": "py", "file_size_in_byte": 333, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "37404832465", "text": "import pytest\nfrom love_and_stats import utils\n\nfrom itertools import permutations\nimport statistics\n\n\ndef brute_force_expt_score(mar_list):\n return statistics.mean(\n utils.play_game(item_ranks, mar_list)\n for item_ranks in permutations(range(len(mar_list)))\n )\n\n\n@pytest.mark.parametrize('some_mar_list', list(utils.gen_mar_lists(4)))\ndef test_expt_score(some_mar_list):\n assert (\n float(utils.expt_score(some_mar_list))\n == brute_force_expt_score(some_mar_list)\n )\n", "repo_name": "CrepeGoat/love-and-stats", "sub_path": "tests/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 507, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "statistics.mean", "line_number": 9, "usage_type": "call"}, {"api_name": "love_and_stats.utils.play_game", "line_number": 10, "usage_type": "call"}, {"api_name": "love_and_stats.utils", "line_number": 10, "usage_type": "name"}, {"api_name": "itertools.permutations", "line_number": 11, "usage_type": "call"}, {"api_name": "love_and_stats.utils.expt_score", "line_number": 18, "usage_type": "call"}, {"api_name": "love_and_stats.utils", "line_number": 18, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 15, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 15, "usage_type": "attribute"}, {"api_name": "love_and_stats.utils.gen_mar_lists", "line_number": 15, "usage_type": "call"}, {"api_name": "love_and_stats.utils", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "7905665522", "text": "from behave import given, when, then\nimport requests\n\nrequest_headers = {}\nrequest_bodies = {}\nresponse_codes = {}\napi_url = None\n\n\n@given('a pagina de registro da praga')\ndef step_impl_given(context):\n global api_url\n api_url = 'https://smartvit-pest-stg.herokuapp.com/pest'\n print('url :'+api_url)\n\n\n@when('ele registar os campos da praga')\ndef step_impl_when(context):\n request_bodies['POST'] = {\"idVineyard\": \"5fad331b38b2670687db57e2\",\n \"type\": \"cigarras\",\n \"startTime\": \"12-11-2020\"}\n response = requests.post(\n api_url,\n json=request_bodies['POST']\n )\n statuscode = response.status_code\n response_codes['POST'] = statuscode\n\n\n@then('os dados devem passar pelo servico atraves do BFF e armazenar no banco')\ndef step_impl_then(context):\n print('Post rep code ;'+str(response_codes['POST']))\n assert response_codes['POST'] == 200", "repo_name": "PI2-viticultura/SmartVit-Pest", "sub_path": "app/features/steps/test_pest.py", "file_name": "test_pest.py", "file_ext": "py", "file_size_in_byte": 999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "behave.given", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 22, "usage_type": "call"}, {"api_name": "behave.when", "line_number": 17, "usage_type": "call"}, {"api_name": "behave.then", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "17388795153", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 30 10:43:02 2019\n\n@author: lindsay.hu\n\"\"\"\n\n'''\n【课程3.3】 图表的样式参数\n\nlinestyle、style、color、marker\n \n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#不打印警告信息\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n#linestyle参数\n\nplt.plot([i**2 for i in range(100)],linestyle = '-.')\n# '-' solid line style\n# '--' dashed line style\n# '-.' dash-dot line style\n# ':' dotted line style\n\n# marker参数\n\ns = pd.Series(np.random.randn(100).cumsum())\ns.plot(linestyle='--',marker='2')\n# '.' point marker\n# ',' pixel marker\n# 'o' circle marker\n# 'v' triangle_down marker\n# '^' triangle_up marker\n# '<' triangle_left marker\n# '>' triangle_right marker\n# '1' tri_down marker\n# '2' tri_up marker\n# '3' tri_left marker\n# '4' tri_right marker\n# 's' square marker\n# 'p' pentagon marker\n# '*' star marker\n# 'h' hexagon1 marker\n# 'H' hexagon2 marker\n# '+' plus marker\n# 'x' x marker\n# 'D' diamond marker\n# 'd' thin_diamond marker\n# '|' vline marker\n# '_' hline marker\n\n# color参数\n\nplt.hist(np.random.randn(100),color='g',alpha=0.8)\nplt.hist(np.random.randn(100),color='#FFBBB4',alpha=0.8)#也可以自己找颜色参数\n# alpha:0-1,透明度\n# 常用颜色简写:red-r, green-g, black-k, blue-b, yellow-y\n\n \ndf = pd.DataFrame(np.random.randn(1000, 4),columns=list('ABCD'))\ndf = df.cumsum()\ndf.plot(style='--',alpha=0.8,colormap='GnBu')\n# colormap:颜色板,包括:\n# Accent, Accent_r, Blues, Blues_r, BrBG, BrBG_r, BuGn, BuGn_r, BuPu, BuPu_r, CMRmap, CMRmap_r, Dark2, Dark2_r, GnBu, GnBu_r, Greens, Greens_r,\n# Greys, Greys_r, OrRd, OrRd_r, Oranges, Oranges_r, PRGn, PRGn_r, Paired, Paired_r, Pastel1, Pastel1_r, Pastel2, Pastel2_r, PiYG, PiYG_r, \n# PuBu, PuBuGn, PuBuGn_r, PuBu_r, PuOr, PuOr_r, PuRd, PuRd_r, Purples, Purples_r, RdBu, RdBu_r, RdGy, RdGy_r, RdPu, RdPu_r, RdYlBu, RdYlBu_r, \n# RdYlGn, RdYlGn_r, Reds, Reds_r, Set1, Set1_r, Set2, Set2_r, Set3, Set3_r, Spectral, Spectral_r, Wistia, Wistia_r, YlGn, YlGnBu, YlGnBu_r, \n# YlGn_r, YlOrBr, YlOrBr_r, YlOrRd, YlOrRd_r, afmhot, afmhot_r, autumn, autumn_r, binary, binary_r, bone, bone_r, brg, brg_r, bwr, bwr_r, \n# cool, cool_r, coolwarm, coolwarm_r, copper, copper_r, cubehelix, cubehelix_r, flag, flag_r, gist_earth, gist_earth_r, gist_gray, gist_gray_r,\n# gist_heat, gist_heat_r, gist_ncar, gist_ncar_r, gist_rainbow, gist_rainbow_r, gist_stern, gist_stern_r, gist_yarg, gist_yarg_r, gnuplot, \n# gnuplot2, gnuplot2_r, gnuplot_r, gray, gray_r, hot, hot_r, hsv, hsv_r, inferno, inferno_r, jet, jet_r, magma, magma_r, nipy_spectral, \n# nipy_spectral_r, ocean, ocean_r, pink, pink_r, plasma, plasma_r, prism, prism_r, rainbow, rainbow_r, seismic, seismic_r, spectral, \n# spectral_r ,spring, spring_r, summer, summer_r, terrain, terrain_r, viridis, viridis_r, winter, winter_r\n\n# 其他参数见“颜色参数.docx”\n\n# style参数,可以包含linestyle,marker,color\n\nts = pd.Series(np.random.randn(1000).cumsum(), index=pd.date_range('1/1/2000', periods=1000))\nts.plot(style='--g.',grid=True)\n# style → 风格字符串,这里包括了linestyle(-),marker(.),color(g)\n# plot()内也有grid参数\n\n#整体风格样式\n\nimport matplotlib.style as psl\n#查看样式表\nprint(plt.style.available)\n\n# 一旦选用样式后,所有图表都会有样式,重启后才能关掉\npsl.use('ggplot')\n\nts = pd.Series(np.random.randn(1000).cumsum(), index=pd.date_range('1/1/2000', periods=1000))\nts.plot(style = '--g.',grid = True,figsize=(10,6))\n\n\n", "repo_name": "hulinjuan/Python_data_analysis_action_netease", "sub_path": "03 重点工具掌握:数据分析核心技巧/第3章 图表绘制工具:Matplotlib/Part1 基本知识/课程3.3 图表的样式参数.py", "file_name": "课程3.3 图表的样式参数.py", "file_ext": "py", "file_size_in_byte": 3723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "warnings.simplefilter", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.random.randn", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.random.randn", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pandas.date_range", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 94, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.style.use", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 97, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pandas.date_range", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "27569775423", "text": "import re\nfrom collections import defaultdict\n\nwith open('input_04.txt') as f:\n\tlines = f.read().splitlines()\n\nguards = defaultdict(lambda:[0]*59)\nid = 0\nfalls = 0\nfor line in sorted(lines):\t\n\tif 'Guard' in line:\n\t\tid = int(re.findall('#(\\d+)', line)[0])\n\t\tcontinue\n\tminute = int(re.findall(':(\\d+)\\]', line)[0])\n\tif 'falls asleep' in line: \n\t\tfalls = minute\n\telse:\n\t\tfor m in range(falls, minute):\n\t\t\tguards[id][m] += 1\n\ndef star1(guards):\n\t_, guard_id = max((sum(minutes), guard) for (guard, minutes) in guards.items())\n\t_, chosen_minute = max((m, idx) for (idx, m) in enumerate(guards[guard_id]))\n\treturn guard_id * chosen_minute\n\ndef star2(guards):\t\n\t_, chosen_minute, guard_id = max((minute, idx, guard) for (guard, minutes) in guards.items() for (idx, minute) in enumerate(minutes))\n\treturn guard_id * chosen_minute\n\nprint(\"Star 1:\", star1(guards))\nprint(\"Star 2:\", star2(guards))", "repo_name": "lukaszroz/advent-of-code-2018", "sub_path": "day04.py", "file_name": "day04.py", "file_ext": "py", "file_size_in_byte": 886, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 12, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "10017250962", "text": "from scipy.spatial.distance import cdist\nimport numpy as np\n\n\ndef mds(X, no_dims):\n \"\"\"\n This function performs MDS embedding.\n\n Parameters are:\n\n 'X' - N by D matrix. Each row in X represents an observation.\n 'no_dims' - A positive integer specifying the number of dimension of the representation Y.\n\n \"\"\"\n n = X.shape[0]\n D = cdist(X, X) ** 2\n sumd = np.mean(D, axis=1)\n sumD = np.mean(sumd)\n B = np.zeros((n, n))\n for i in range(n):\n for j in range(i+1, n):\n B[i][j] = -0.5 * (D[i][j] - sumd[i] - sumd[j] + sumD)\n B[j][i] = B[i][j]\n value, U = np.linalg.eig(B)\n embedX = U[:, :no_dims] @ np.diag(np.sqrt(np.abs(value[:no_dims])))\n return embedX\n", "repo_name": "ZPGuiGroupWhu/scml", "sub_path": "scml_py/mds.py", "file_name": "mds.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "scipy.spatial.distance.cdist", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "8290868315", "text": "from enum import Enum\nfrom typing import TypedDict\n\nfrom sqlalchemy import Table\nfrom sqlalchemy.orm import DeclarativeBase\n\n\nclass CheckResultType(str, Enum):\n \"\"\"This class contains the types of a check result.\"\"\"\n\n PASSED = \"PASSED\"\n FAILED = \"FAILED\"\n # A check is skipped from another check's result.\n SKIPPED = \"SKIPPED\"\n # A check is disabled from the user configuration.\n DISABLED = \"DISABLED\"\n # The result of the check is unknown or Macaron cannot resolve the\n # implementation of this check.\n UNKNOWN = \"UNKNOWN\"\n\n\nclass CheckResult(TypedDict):\n \"\"\"This class stores the result of a check in a dictionary.\"\"\"\n\n check_id: str\n check_description: str\n # The string representations of the slsa requirements and their\n # corresponding slsa level.\n slsa_requirements: list[str]\n # If an element in the justification is a string,\n # it will be displayed as a string, if it is a mapping,\n # the value will be rendered as a hyperlink in the html report.\n justification: list[str | dict[str, str]]\n # human_readable_justification: str\n # result_values: dict[str, str | float | int] | list[dict[str, str | float | int]]\n result_tables: list[DeclarativeBase | Table]\n # recommendation: str\n result_type: CheckResultType\n\n\nclass SkippedInfo(TypedDict):\n \"\"\"This class stores the information about a skipped check.\"\"\"\n\n check_id: str\n suppress_comment: str\n\n\ndef get_result_as_bool(check_result_type: CheckResultType) -> bool:\n \"\"\"Return the CheckResultType as bool.\n\n This method returns True only if the result type is PASSED else it returns False.\n\n Parameters\n ----------\n check_result_type : CheckResultType\n The check result type to return the bool value.\n\n Returns\n -------\n bool\n \"\"\"\n if check_result_type in (CheckResultType.FAILED, CheckResultType.UNKNOWN):\n return False\n\n return True\n", "repo_name": "laurentsimon/macaron", "sub_path": "src/macaron/slsa_analyzer/checks/check_result.py", "file_name": "check_result.py", "file_ext": "py", "file_size_in_byte": 1925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "21", "api": [{"api_name": "enum.Enum", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.DeclarativeBase", "line_number": 36, "usage_type": "name"}, {"api_name": "sqlalchemy.Table", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "11944012053", "text": "from __future__ import absolute_import, division, print_function\nimport torch\n\n\nclass CascorUtil:\n \"\"\"Class that holds the utilities for training and evaluating Cascor\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def quickprop_update(weights, nunits, ncandidates, noutputs, deltas, slopes, prevs, epsilon, decay, mu,\n shrink_factor, is_input):\n \"\"\"Perform quickprop as indicated in the original paper by Fahlman\"\"\"\n n_columns = nunits + 1 if is_input else nunits\n n_rows = ncandidates if is_input else noutputs\n next_step = torch.zeros((n_rows, n_columns))\n w = weights[:n_rows, :n_columns]\n d = deltas[:n_rows, :n_columns]\n s = slopes[:n_rows, :n_columns] + (w * decay)\n p = prevs[:n_rows, :n_columns]\n t = torch.where(p == s, torch.ones(p.shape), p - s)\n next_step -= torch.where(d * s <= 0, epsilon * s, torch.zeros(next_step.shape))\n mask1 = (((d < 0) & (s >= shrink_factor * p)) | ((d > 0) & (s <= shrink_factor * p))).type(torch.FloatTensor)\n mask2 = (((d < 0) & (s < shrink_factor * p)) | ((d > 0) & (s > shrink_factor * p))).type(torch.FloatTensor)\n next_step += mu * d * mask1\n next_step += (d * s / t) * mask2\n\n deltas[:n_rows, :n_columns] = next_step\n weights[:n_rows, :n_columns] += next_step\n prevs[:n_rows, :n_columns] = slopes[:n_rows, :n_columns] + (w * decay)\n slopes[:n_rows, :n_columns] *= 0.0\n\n\nclass CascorStats:\n \"\"\"Class that stores data that is required to persist over multiple stages in the training and evaluation process\"\"\"\n def __init__(self, epoch=0):\n self.epoch = epoch\n\n\n", "repo_name": "ianjchiu/Cascor", "sub_path": "Cascor-PyTorch/CascorUtil.py", "file_name": "CascorUtil.py", "file_ext": "py", "file_size_in_byte": 1689, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "3981348258", "text": "from django.contrib import admin\n\nfrom .models import Address, Coupon, Item, Order, OrderItem, Payment, Refund, UserProfile\n\n\"\"\"\n This function updates the order model in django custum admin just\n like custom select and delete row in django admin\n\"\"\"\n\n\ndef make_refund_accepted(modeladmin, request, queryset):\n queryset.update(refund_requested=False, refund_granted=True)\n\n\n# Default name is just the name of the function\nmake_refund_accepted.short_description = 'Update orders to refund granted'\n\n\n# Customizing admin panel\nclass OrderAdmin(admin.ModelAdmin):\n # Displays the list of all the fields mentioned in django admin panel row\n list_display = [\n 'user', 'ordered', 'being_delivered', 'received', 'refund_requested',\n 'refund_granted', 'shipping_address', 'billing_address',\n 'payment', 'coupon'\n ]\n\n # displays all the foreign key, on-to-one fields, ... with links\n list_display_links = [\n 'user', 'shipping_address', 'billing_address', 'payment', 'coupon'\n ]\n\n # all the list we can filter by. in the right corner in django admin panel\n list_filter = [\n 'ordered', 'being_delivered', 'received', 'refund_requested',\n 'refund_granted'\n ]\n\n # searchable fields in the admin panel on top\n search_fields = [\n 'user__username', 'ref_code'\n ]\n\n # allows to update order just like custom delete method in admin panel\n actions = [make_refund_accepted]\n\n\nclass AddressAdmin(admin.ModelAdmin):\n list_display = [\n 'user', 'street_address', 'apartment_address', 'country', 'zip',\n 'address_type', 'default'\n ]\n list_filter = ['default', 'address_type']\n\n # user__username looks for field username in user model\n search_fields = [\n 'user__username', 'street_address', 'apartment_address',\n 'zip', 'country'\n ]\n\n\nadmin.site.register(Item)\nadmin.site.register(OrderItem)\nadmin.site.register(Order, OrderAdmin)\nadmin.site.register(Address, AddressAdmin)\nadmin.site.register(Payment)\nadmin.site.register(Coupon)\nadmin.site.register(Refund)\nadmin.site.register(UserProfile)\n", "repo_name": "afroz102/django_e-com", "sub_path": "core/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 2114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 48, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 62, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 62, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 62, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 63, "usage_type": "call"}, {"api_name": "models.OrderItem", "line_number": 63, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 63, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Order", "line_number": 64, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 64, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 64, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Address", "line_number": 65, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 65, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Payment", "line_number": 66, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 66, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Coupon", "line_number": 67, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 67, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 67, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Refund", "line_number": 68, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 68, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 69, "usage_type": "call"}, {"api_name": "models.UserProfile", "line_number": 69, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "74618348533", "text": "# -*- coding: utf-8 -*-\n\n\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponse\nfrom loginSystem.views import loadLoginPage\nimport json\nfrom .mailserverManager import MailServerManager\nfrom .pluginManager import pluginManager\n\ndef loadEmailHome(request):\n try:\n msM = MailServerManager(request)\n return msM.loadEmailHome()\n except KeyError:\n return redirect(loadLoginPage)\n\ndef createEmailAccount(request):\n try:\n msM = MailServerManager(request)\n return msM.createEmailAccount()\n except KeyError:\n return redirect(loadLoginPage)\n\ndef listEmails(request):\n try:\n msM = MailServerManager(request)\n return msM.listEmails()\n except KeyError:\n return redirect(loadLoginPage)\n\n\ndef fetchEmails(request):\n try:\n msM = MailServerManager(request)\n return msM.fetchEmails()\n except KeyError:\n return redirect(loadLoginPage)\n\ndef submitEmailCreation(request):\n try:\n\n result = pluginManager.preSubmitEmailCreation(request)\n if result != 200:\n return result\n\n msM = MailServerManager(request)\n coreResult = msM.submitEmailCreation()\n\n result = pluginManager.postSubmitEmailCreation(request, coreResult)\n if result != 200:\n return result\n\n return coreResult\n except KeyError:\n return redirect(loadLoginPage)\n\ndef deleteEmailAccount(request):\n try:\n msM = MailServerManager(request)\n return msM.deleteEmailAccount()\n except KeyError:\n return redirect(loadLoginPage)\n\ndef getEmailsForDomain(request):\n try:\n msM = MailServerManager(request)\n return msM.getEmailsForDomain()\n except KeyError as msg:\n data_ret = {'fetchStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\ndef submitEmailDeletion(request):\n try:\n\n result = pluginManager.preSubmitEmailDeletion(request)\n if result != 200:\n return result\n\n msM = MailServerManager(request)\n coreResult = msM.submitEmailDeletion()\n\n result = pluginManager.postSubmitEmailDeletion(request, coreResult)\n if result != 200:\n return result\n\n return coreResult\n except KeyError as msg:\n data_ret = {'deleteEmailStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\ndef fixMailSSL(request):\n try:\n\n msM = MailServerManager(request)\n coreResult = msM.fixMailSSL()\n\n return coreResult\n except KeyError as msg:\n data_ret = {'deleteEmailStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\ndef emailForwarding(request):\n try:\n msM = MailServerManager(request)\n return msM.emailForwarding()\n except KeyError:\n return redirect(loadLoginPage)\n\ndef fetchCurrentForwardings(request):\n try:\n msM = MailServerManager(request)\n return msM.fetchCurrentForwardings()\n except KeyError as msg:\n data_ret = {'fetchStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\ndef submitForwardDeletion(request):\n try:\n\n result = pluginManager.preSubmitForwardDeletion(request)\n if result != 200:\n return result\n\n msM = MailServerManager(request)\n coreResult = msM.submitForwardDeletion()\n\n result = pluginManager.postSubmitForwardDeletion(request, coreResult)\n if result != 200:\n return result\n\n return coreResult\n except KeyError as msg:\n data_ret = {'deleteEmailStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\ndef submitEmailForwardingCreation(request):\n try:\n\n result = pluginManager.preSubmitEmailForwardingCreation(request)\n if result != 200:\n return result\n\n msM = MailServerManager(request)\n coreResult = msM.submitEmailForwardingCreation()\n\n result = pluginManager.postSubmitEmailForwardingCreation(request, coreResult)\n if result != 200:\n return result\n\n return coreResult\n except KeyError as msg:\n data_ret = {'createStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\n#######\n\ndef changeEmailAccountPassword(request):\n try:\n msM = MailServerManager(request)\n return msM.changeEmailAccountPassword()\n except KeyError:\n return redirect(loadLoginPage)\n\ndef submitPasswordChange(request):\n try:\n\n result = pluginManager.preSubmitPasswordChange(request)\n if result != 200:\n return result\n\n msM = MailServerManager(request)\n coreResult = msM.submitPasswordChange()\n\n result = pluginManager.postSubmitPasswordChange(request, coreResult)\n if result != 200:\n return result\n\n return coreResult\n except KeyError as msg:\n data_ret = {'passChangeStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\n#######\n\ndef dkimManager(request):\n try:\n msM = MailServerManager(request)\n return msM.dkimManager()\n except KeyError:\n return redirect(loadLoginPage)\n\ndef fetchDKIMKeys(request):\n try:\n msM = MailServerManager(request)\n return msM.fetchDKIMKeys()\n except KeyError as msg:\n data_ret = {'fetchStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\ndef generateDKIMKeys(request):\n try:\n\n result = pluginManager.preGenerateDKIMKeys(request)\n if result != 200:\n return result\n\n msM = MailServerManager(request)\n coreResult = msM.generateDKIMKeys()\n\n result = pluginManager.postGenerateDKIMKeys(request, coreResult)\n if result != 200:\n return result\n\n return coreResult\n except BaseException as msg:\n data_ret = {'generateStatus': 0, 'error_message': str(msg)}\n json_data = json.dumps(data_ret)\n return HttpResponse(json_data)\n\ndef installOpenDKIM(request):\n try:\n msM = MailServerManager(request)\n return msM.installOpenDKIM()\n except KeyError:\n final_dic = {'installOpenDKIM': 0, 'error_message': \"Not Logged In, please refresh the page or login again.\"}\n final_json = json.dumps(final_dic)\n return HttpResponse(final_json)\n\ndef installStatusOpenDKIM(request):\n try:\n msM = MailServerManager()\n return msM.installStatusOpenDKIM()\n except KeyError:\n final_dic = {'abort':1,'installed':0, 'error_message': \"Not Logged In, please refresh the page or login again.\"}\n final_json = json.dumps(final_dic)\n return HttpResponse(final_json)\n\n\n", "repo_name": "usmannasir/cyberpanel", "sub_path": "mailServer/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1302, "dataset": "github-code", "pt": "21", "api": [{"api_name": "mailserverManager.MailServerManager", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 16, "usage_type": "argument"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 23, "usage_type": "argument"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 30, "usage_type": "argument"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 38, "usage_type": "argument"}, {"api_name": "pluginManager.pluginManager.preSubmitEmailCreation", "line_number": 43, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 43, "usage_type": "name"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 47, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.postSubmitEmailCreation", "line_number": 50, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 56, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 56, "usage_type": "argument"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 63, "usage_type": "argument"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 67, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.preSubmitEmailDeletion", "line_number": 77, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 77, "usage_type": "name"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 81, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.postSubmitEmailDeletion", "line_number": 84, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 84, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 91, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 92, "usage_type": "call"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 97, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 104, "usage_type": "call"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 111, "usage_type": "argument"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 119, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 120, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.preSubmitForwardDeletion", "line_number": 125, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 125, "usage_type": "name"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 129, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.postSubmitForwardDeletion", "line_number": 132, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 132, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 139, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 140, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.preSubmitEmailForwardingCreation", "line_number": 145, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 145, "usage_type": "name"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 149, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.postSubmitEmailForwardingCreation", "line_number": 152, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 152, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 159, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 160, "usage_type": "call"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 166, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 169, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 169, "usage_type": "argument"}, {"api_name": "pluginManager.pluginManager.preSubmitPasswordChange", "line_number": 174, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 174, "usage_type": "name"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 178, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.postSubmitPasswordChange", "line_number": 181, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 181, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 188, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 189, "usage_type": "call"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 195, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 198, "usage_type": "call"}, {"api_name": "loginSystem.views.loadLoginPage", "line_number": 198, "usage_type": "argument"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 202, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 206, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 207, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.preGenerateDKIMKeys", "line_number": 212, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 212, "usage_type": "name"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 216, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager.postGenerateDKIMKeys", "line_number": 219, "usage_type": "call"}, {"api_name": "pluginManager.pluginManager", "line_number": 219, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 226, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 227, "usage_type": "call"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 231, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 235, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 236, "usage_type": "call"}, {"api_name": "mailserverManager.MailServerManager", "line_number": 240, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 244, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 245, "usage_type": "call"}]} +{"seq_id": "15281347105", "text": "# 타이핑 게임 제작 및 기본 완성\n\nimport random\nimport time\nimport winsound # 사운드 출력 필요 모듈\nimport sqlite3\nimport datetime\n\n#DB생성 & Auto Commit\n#본인 DB경로\nconn = sqlite3.connect('C:/python_basic/resource/records.db', isolation_level=None)\n\n#Cursor 연결\ncursor = conn.cursor()\n\ncursor.execute(\"CREATE TABLE IF NOT EXISTS records( id IMTEGER PRIMARY KEY AUTOINCREMEMT, cor_cnt INTEGER , record text, regdate text )\")\n\n\nwords =[] #영어 단어 리스트\nn = 1 #게임 시도 횟수\ncor_cnt = 0 # 정답 개수 \n\nwith open('./resource/word.txt', 'r') as f : # with 문은 알아서 close가 됨\n for c in f :\n words.append(c.strip())\n\nprint(words) # 단어 리스트 확인\n\ninput(\"Ready? Press Enter Key!\") #게임 스타트 // 무조건 String 형태로 들어온다.\n\nstart = time.time()\n\nwhile n <= 5 :\n random.shuffle(words)\n q = random.choice(words)\n \n print()\n\n print(\"*문제 # {}\".format(n)) # {}사이에 format 안 변수 대입\n\n print(q)\n\n x = input() #타이핑 입력\n\n print()\n\n if str(q).strip() == str(x).strip(): # 입력확인(공백제거)\n print(\"PASSS\")\n #정답 소리 재생\n winsound.PlaySound('./sound/good.wav', winsound.SND_FILENAME)\n cor_cnt += 1\n else :\n\n #오답 소리 재생\n winsound.PlaySound('./sound/bad.wav', winsound.SND_FILENAME)\n\n print (\"Wrong!\")\n n += 1 # 다음 문제 전환\n\nend = time.time() # End Time 기록\n\net = end - start # 총게임 시간\net = format(et,\".3f\") # et 를 3번째 소수점 까지 나타내라 라는 format\n\nif cor_cnt>= 3:\n print(\"합격\")\nelse:\n print (\"탈락\")\n\n\n# 기록 DB 삽입\n\ncursor.execute(\"INSERT INTO records('cor_cnt','records'.'regdate') VALUES(?,?,?)\",(cor_cnt, et, datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')))\n# 수행 시간 출력\n\nprint(\"게임 시간 : \",et,\"초\", \"정답 개수:{}\".format(cor_cnt))\n\n#시작 지점 \n\nif __name__ == '__main__':\n pass\n\n\n\n", "repo_name": "shinb-bong/python_basic_study", "sub_path": "section_fin -2.py", "file_name": "section_fin -2.py", "file_ext": "py", "file_size_in_byte": 1998, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 34, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 35, "usage_type": "call"}, {"api_name": "winsound.PlaySound", "line_number": 50, "usage_type": "call"}, {"api_name": "winsound.SND_FILENAME", "line_number": 50, "usage_type": "attribute"}, {"api_name": "winsound.PlaySound", "line_number": 55, "usage_type": "call"}, {"api_name": "winsound.SND_FILENAME", "line_number": 55, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "17452790331", "text": "import pickle\nimport sqlite3 as sqlite\nfrom pprint import pprint\n\nimport pandas as pd\n\nconn = sqlite.connect(\"/home/luiscarlos/PycharmProjects/LiveBot/sandbox/configuration/memory.db\")\n\ncursor = conn.cursor()\ncursor.execute(\"SELECT * FROM RESERVED_RAW_INTENTS\")\noriginal_intent_list = cursor.fetchall()\n\noriginal_intents = {}\nfor c, p in original_intent_list:\n if c in original_intents:\n original_intents[c].append(p)\n else:\n original_intents[c] = [p]\n\nmodified_df = pd.read_sql_query(f\"SELECT * from RESERVED_TFIDF\", conn)[[\"__documents\", \"__class\"]]\nmodified_intents = {}\nfor c, p in zip(modified_df[\"__class\"], modified_df[\"__documents\"]):\n if c in modified_intents:\n modified_intents[c].append(p)\n else:\n modified_intents[c] = [p]\n\ndiff = {}\nfor km, vm in modified_intents.items():\n if km not in original_intents:\n diff[km] = vm\n elif vm != original_intents[km]:\n diff[km] = list(set(vm) - set(original_intents[km]))\n\ncursor.execute(\"SELECT response, data FROM RESERVED_RESPONSES\")\nresponse_list = cursor.fetchall()\n# responses = {k: pickle.loads(v) for k, v in response_list if k in diff.keys()}\n\ncursor.execute(\"SELECT classification, data FROM RESERVED_INTENTS\")\nintents_list = cursor.fetchall()\nintents = {k: pickle.loads(v) for k, v in intents_list if k in diff.keys()}\n\npprint(diff.keys())\npprint(diff)\npprint(intents)\n\ncursor.close()\nconn.close()\n", "repo_name": "LukasRepos/LiveBot", "sub_path": "Chatty/diff.py", "file_name": "diff.py", "file_ext": "py", "file_size_in_byte": 1418, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 20, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 43, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 44, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "4982329713", "text": "from datetime import date, time, datetime\nfrom PyQt5.QtGui import QColor\n\n# Defines custom data types for capra. Currently all are only used in conjunction\n# with the projector\n\n\nclass CapraDataType:\n \"\"\"Superclass for shared functionality between Picture and Hike objects\"\"\"\n def _parse_hr_min(self, timestamp: float) -> str:\n \"\"\"Parses timestamp into string\n ::\n\n :param timestamp: Unix timestamp\n :return str: '5:19 PM'\n \"\"\"\n t = datetime.fromtimestamp(timestamp)\n s = t.strftime('%-I:%M')\n return s\n\n def _parse_sec(self, timestamp: float) -> str:\n \"\"\"Parses timestamp into string\n ::\n\n :param timestamp: Unix timestamp\n :return str: ':24 PM'\n \"\"\"\n t = datetime.fromtimestamp(timestamp)\n s = t.strftime(':%S %p')\n return s\n\n def _parse_am_pm(self, timestamp: float) -> str:\n \"\"\"Parses timestamp into AM or PM\n ::\n\n :param timestamp: Unix timestamp\n :return str: 'PM'\n \"\"\"\n t = datetime.fromtimestamp(timestamp)\n s = t.strftime(' %p')\n return s\n\n def _parse_date(self, timestamp: float) -> str:\n \"\"\"Parses timestamp into string\n ::\n\n :param timestamp: Unix timestamp\n :return str: 'April 27, 2019'\n \"\"\"\n d = datetime.fromtimestamp(timestamp)\n s = d.strftime('%B %-d, %Y')\n return s\n\n def _parse_hike(self, hike: int) -> str:\n \"\"\"Parses `int` into string\n ::\n\n :param hike: int\n :return str: 'Hike 2'\n \"\"\"\n s = 'Hike {h}'.format(h=hike)\n return s\n\n def _parse_color(self, text: str) -> QColor:\n \"\"\"Parses `,` separated text into QColor\n ::\n\n :param text: input format \"217,220,237\"\n :return: QColor in RBG format\n \"\"\"\n\n c = text.split(',')\n color = QColor(int(c[0]), int(c[1]), int(c[2]))\n return color\n\n def _parse_color_HSV(self, text: str) -> QColor:\n \"\"\"Parses `,` separated text into QColor\n ::\n\n :param text: input format \"217,220,237\"\n :return: QColor in HSV format\n \"\"\"\n c = text.split(',')\n color = QColor()\n color.setHsv(int(c[0]), int(c[1]), int(c[2]))\n return color\n\n def _parse_color_list(self, text: str) -> list:\n \"\"\"Parses `|` and `,` separated text into list of QColors\n ::\n\n :param text: input format \"217,220,237|92,78,69|50,49,43|50,42,31|78,75,82\"\n :return: list of QColors\n \"\"\"\n\n color_strs = text.split('|')\n colorlist = list()\n for c in color_strs:\n color = self._parse_color(c)\n colorlist.append(color)\n return colorlist\n\n def _parse_percent_list(self, text: str) -> list:\n \"\"\"Parses `,` separated text into a list of floats\n ::\n\n :param text: input format \"0.46,0.16,0.15,0.15,0.09\"\n :return: list of float (each value is between 0.0 - .99)\n \"\"\"\n\n percents = text.split(',')\n percents = list(map(float, percents))\n return percents\n\n\nclass Picture(CapraDataType):\n \"\"\"Defines object which hold a row from the database table 'pictures'\"\"\"\n\n def __init__(self, picture_id, time, year, month, day, minute, dayofweek, hike_id, index_in_hike, timerank_global,\n altitude, altrank_hike, altrank_global, altrank_global_h,\n color_hsv, color_rgb, colorrank_hike, colorrank_global, colorrank_global_h,\n colors_count, colors_rgb, colors_conf,\n camera1, camera2, camera3, cameraf, created, updated):\n super().__init__()\n self.picture_id = picture_id\n self.time = time\n self.year = year\n self.month = month\n self.day = day\n self.minute = minute\n self.dayofweek = dayofweek\n\n self.hike_id = hike_id\n self.index_in_hike = index_in_hike\n self.timerank_global = timerank_global\n\n self.altitude = altitude\n self.altrank_hike = altrank_hike\n self.altrank_global = altrank_global\n self.altrank_global_h = altrank_global_h\n\n self.color_hsv = self._parse_color_HSV(color_hsv)\n self.color_rgb = self._parse_color(color_rgb)\n self.colorrank_hike = colorrank_hike\n self.colorrank_global = colorrank_global\n self.colorrank_global_h = colorrank_global_h\n self.colors_count = colors_count\n self.colors_rgb = self._parse_color_list(colors_rgb)\n self.colors_conf = self._parse_percent_list(colors_conf)\n\n self.camera1 = camera1\n self.camera2 = camera2\n self.camera3 = camera3\n self.cameraf = cameraf\n\n self.created = created\n self.updated = updated\n\n # Labels for the UI\n self.uitime_hrmm = self._parse_hr_min(self.time)\n self.uitime_sec = self._parse_sec(self.time)\n self.uitime_ampm = self._parse_am_pm(self.time)\n self.uidate = self._parse_date(self.time)\n self.uihike = self._parse_hike(self.hike_id)\n self.uialtitude = str(int(self.altitude))\n\n def print_obj_mvp(self):\n print('({id}, {t}, {yr}, {mth}, {day}, {min}, {dow}, {hike_id}, {index}, {alt}, {altr}, {hsv}, {rgb}, {crh}, {crg}, {c1}, {c2}, {c3}, {cf})\\\n '.format(id=self.picture_id, t=self.time, yr=self.year, mth=self.month, day=self.day,\n min=self.minute, dow=self.dayofweek, hike_id=self.hike_id, index=self.index_in_hike,\n alt=self.altitude, altr=self.altrank_global, hsv=self.color_hsv, rgb=self.color_rgb,\n crh=self.colorrank_hike, crg=self.colorrank_global, c1=self.camera1, c2=self.camera2, c3=self.camera3, cf=self.cameraf))\n\n def print_obj(self):\n print('id\\ttime\\t\\taltitude\\thike_id\\tindex\\taltrank_hike\\tcolorrank_hike\\tpath')\n print('{id}\\t{t}\\t{alt}\\t\\t{hike_id}\\t{index}\\t{ar}\\t\\t{cr}\\t{pth}\\n\\\n '.format(id=self.picture_id, t=self.time, alt=self.altitude,\n hike_id=self.hike_id, index=self.index_in_hike, ar=self.altrank_hike, cr=self.colorrank_hike, pth=self.cameraf))\n\n\nclass Hike(CapraDataType):\n \"\"\"Defines object which hold a row from the database table 'hikes'\"\"\"\n\n def __init__(self, hike_id, avg_altitude, avg_altrank,\n start_time, start_year, start_month, start_day, start_minute, start_dayofweek,\n end_time, end_year, end_month, end_day, end_minute, end_dayofweek,\n color_rgb, color_rank,\n num_pictures, path, created, updated):\n super().__init__()\n\n self.hike_id = hike_id\n self.avg_altitude = avg_altitude\n self.avg_altrank = avg_altrank\n\n self.start_time = start_time\n self.start_year = start_year\n self.start_month = start_month\n self.start_day = start_day\n self.start_minute = start_minute\n self.start_dayofweek = start_dayofweek\n self.end_time = end_time\n self.end_year = end_year\n self.end_month = end_month\n self.end_day = end_day\n self.end_minute = end_minute\n self.end_dayofweek = end_dayofweek\n\n self.color_rgb = self._parse_color(color_rgb)\n self.color_rank = color_rank\n self.num_pictures = num_pictures\n self.path = path\n\n self.created = created\n self.updated = updated\n\n self.uistarttime_hrmm = self._parse_hr_min(self.start_time)\n self.uistartdate = self._parse_date(self.start_time)\n self.uihike = self._parse_hike(self.hike_id)\n self.uialtitude = str(int(self.avg_altitude))\n\n def print_obj(self):\n print('Hike ID\\tstart time\\t\\tavg_alt\\tavg_altrank\\tcolor\\tcolor_rank\\tpictures\\tpath')\n print('{id}\\t{t}\\t{avg_alt}\\t{avg_altrank}\\t{color}\\t{color_rank}\\t{pic}\\t{path}\\n\\\n '.format(id=self.hike_id, t=self.start_time, avg_alt=self.avg_altitude, avg_altrank=self.avg_altrank,\n color=self.color_rgb, color_rank=self.color_rank, pic=self.num_pictures, path=self.path))\n\n def get_hike_length_seconds(self) -> float:\n return round(self.end_time - self.start_time, 0)\n\n def get_hike_length_minutes(self) -> float:\n return round((self.end_time - self.start_time)/60, 1)\n\n\nclass UIData:\n \"\"\"Defines object which holds all the UI data for the archive\"\"\"\n\n def __init__(self):\n super().__init__()\n\n # Hikes UI data\n self.indexListForHike = {}\n\n self.altitudesSortByAltitudeForHike = {}\n self.altitudesSortByColorForHike = {}\n self.altitudesSortByTimeForHike = {}\n\n self.colorSortByAltitudeForHike = {}\n self.colorSortByColorForHike = {}\n self.colorSortByTimeForHike = {}\n\n # Archive UI data\n self.indexListForArchive = []\n\n self.altitudesSortByAltitudeForArchive = []\n self.altitudesSortByColorForArchive = []\n self.altitudesSortByTimeForArchive = []\n\n self.colorSortByAltitudeForArchive = []\n self.colorSortByColorForArchive = []\n self.colorSortByTimeForArchive = []\n", "repo_name": "EverydayDesignStudio/capra", "sub_path": "classes/capra_data_types.py", "file_name": "capra_data_types.py", "file_ext": "py", "file_size_in_byte": 9197, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.datetime.fromtimestamp", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 64, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 76, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "74617674613", "text": "import subprocess\nimport shlex\nimport argparse\nimport os\n\n\n\nclass servCTRL:\n pidfile = '/usr/local/CyberCP/WebTerminal/pid'\n\n def prepareArguments(self):\n\n parser = argparse.ArgumentParser(description='CyberPanel Policy Control Parser!')\n parser.add_argument('function', help='Specific a operation to perform!')\n\n return parser.parse_args()\n\n def start(self):\n\n if os.path.exists(servCTRL.pidfile):\n self.stop()\n\n command = '/usr/local/CyberCP/bin/python /usr/local/CyberCP/WebTerminal/CPWebSocket.py'\n subprocess.Popen(shlex.split(command))\n\n def stop(self):\n try:\n path = servCTRL.pidfile\n command = 'kill -9 %s' % (open(path, 'r').read())\n subprocess.Popen(shlex.split(command))\n except:\n pass\n\n\ndef main():\n\n policy = servCTRL()\n args = policy.prepareArguments()\n\n ## Website functions\n\n if args.function == \"start\":\n policy.start()\n elif args.function == \"stop\":\n policy.stop()\n elif args.function == \"restart\":\n policy.stop()\n policy.start()\n\nif __name__ == \"__main__\":\n main()", "repo_name": "usmannasir/cyberpanel", "sub_path": "WebTerminal/servCTRL.py", "file_name": "servCTRL.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1302, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 24, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 24, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 30, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "73824127733", "text": "import sys\nimport webbrowser\nimport pyperclip\n\ndef main():\n url = 'https://www.google.com/maps/place/'\n\n # For Google Chrome on Windows: brower_path usually = \"C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s --incognito\"\n browser_path = 'your browser here'\n\n if len(sys.argv) > 1:\n address = ' '.join(sys.argv[1:])\n else:\n address = pyperclip.paste()\n\n browser = webbrowser.get(browser_path)\n browser.open(url + address)\n\n\nif __name__ == '__main__':\n main()", "repo_name": "patrickluong/python-scripts", "sub_path": "map-it/mapit.py", "file_name": "mapit.py", "file_ext": "py", "file_size_in_byte": 510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyperclip.paste", "line_number": 14, "usage_type": "call"}, {"api_name": "webbrowser.get", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "10239829800", "text": "from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom tendenci.apps.perms.admin import TendenciBaseModelAdmin\nfrom tendenci.apps.regions.models import Region\nfrom tendenci.apps.regions.forms import RegionForm\n\n\nclass RegionAdmin(TendenciBaseModelAdmin):\n list_display = ['region_name', 'region_code',\n 'owner_link', 'admin_perms',\n 'admin_status']\n list_filter = ['status_detail', 'owner_username']\n search_fields = ['region_name', 'region_code']\n fieldsets = (\n (_('Region Information'), {\n 'fields': ('region_name',\n 'region_code',\n 'description',\n )\n }),\n (_('Permissions'), {'fields': ('allow_anonymous_view',)}),\n (_('Advanced Permissions'), {'classes': ('collapse',), 'fields': (\n 'user_perms',\n 'member_perms',\n 'group_perms',\n )}),\n ('Status', {'fields': (\n 'status_detail',\n )}),\n )\n form = RegionForm\n ordering = ['-update_dt']\n\nadmin.site.register(Region, RegionAdmin)\n", "repo_name": "musasesay/membershiptemplet", "sub_path": "tendenci/apps/regions/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "tendenci.apps.perms.admin.TendenciBaseModelAdmin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 23, "usage_type": "call"}, {"api_name": "tendenci.apps.regions.forms.RegionForm", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 35, "usage_type": "call"}, {"api_name": "tendenci.apps.regions.models.Region", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "74952326772", "text": "from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nfrom apps.pages.views import (\n HomeView,\n PhanHoaView,\n SuaOngChuaView,\n TinhBotNgheView,\n BangGiaView\n)\n\nfrom apps.posts.views import PostDetailView, PostListView\nfrom django.views.generic import TemplateView\nfrom django.contrib.sitemaps.views import sitemap\nfrom apps.pages.views import PostSitemap,StaticSitemap\n\nsitemaps = {\n 'post':PostSitemap,\n 'static': StaticSitemap,\n}\nurlpatterns = [\n # Examples:\n url(r'^$', HomeView.as_view(), name='home'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n\n url(r'^mat-ong/$', PostListView.as_view(), name='mat_ong'),\n url(r'^mat-ong/(?P[\\w-]+)/$', PostDetailView.as_view(), name='mat_ong_detail'),\n\n url(r'^phan-hoa/$', PhanHoaView.as_view(), name='phan_hoa'),\n url(r'^sua-ong-chua/$', SuaOngChuaView.as_view(), name='sua_ong_chua'),\n url(r'^tinh-bot-nghe/$', TinhBotNgheView.as_view(), name='tinh_bot_nghe'),\n url(r'^bang-gia/$', BangGiaView.as_view(), name='bang_gia'),\n\n\n url(r'^robots.txt$', TemplateView.as_view(template_name=\"robots.txt\", content_type=\"text/plain\")),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},name='django.contrib.sitemaps.views.sitemap')\n\n]\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nimport debug_toolbar\n\nurlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n]\n", "repo_name": "cash2one/matongdaklak", "sub_path": "config/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1673, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "apps.pages.views.PostSitemap", "line_number": 20, "usage_type": "name"}, {"api_name": "apps.pages.views.StaticSitemap", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "apps.pages.views.HomeView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "apps.pages.views.HomeView", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "apps.posts.views.PostListView.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "apps.posts.views.PostListView", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "apps.posts.views.PostDetailView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "apps.posts.views.PostDetailView", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "apps.pages.views.PhanHoaView.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "apps.pages.views.PhanHoaView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "apps.pages.views.SuaOngChuaView.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "apps.pages.views.SuaOngChuaView", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "apps.pages.views.TinhBotNgheView.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "apps.pages.views.TinhBotNgheView", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "apps.pages.views.BangGiaView.as_view", "line_number": 35, "usage_type": "call"}, {"api_name": "apps.pages.views.BangGiaView", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 38, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.sitemaps.views.sitemap", "line_number": 39, "usage_type": "argument"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 45, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.conf.urls.static.static", "line_number": 46, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 46, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.conf.urls.url", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 50, "usage_type": "call"}, {"api_name": "debug_toolbar.urls", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "74439483251", "text": "from flask import Blueprint\nfrom flask_restx import Namespace, Resource, reqparse\nfrom . import is_api, cors_allow\n\ntest1_app = Blueprint('test1', __name__, url_prefix='/test1')\ntest1_api = Namespace('test1', path='/test1')\n\n\n@test1_api.route('/resource1')\nclass FirstResource(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('num', type=int, default=1, help='페이지 번호')\n\n @test1_api.expect(parser)\n @test1_api.response(200, 'Success')\n @test1_api.response(403, 'No Permission')\n @is_api(required_keys=['num'])\n def get(self, data):\n print(data)\n result = {'success': data['num']}\n return result, 200\n\n @test1_api.hide\n @cors_allow('http://127.0.0.1:5000')\n def options(self):\n pass\n", "repo_name": "BranKein/Swagger-Test-By-Flask-Restx", "sub_path": "endpoints/test1.py", "file_name": "test1.py", "file_ext": "py", "file_size_in_byte": 767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Blueprint", "line_number": 5, "usage_type": "call"}, {"api_name": "flask_restx.Namespace", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_restx.Resource", "line_number": 10, "usage_type": "name"}, {"api_name": "flask_restx.reqparse.RequestParser", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_restx.reqparse", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "5201836403", "text": "# check data-retreive.py and pandasnumpytrials.py for some basics\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport seaborn as sns\nimport numpy as np\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n# settign default style for charts\nsns.set_style('darkgrid')\nmatplotlib.rcParams['font.size'] = 14\nmatplotlib.rcParams['figure.figsize'] = (10, 6)\nmatplotlib.rcParams['figure.facecolor'] = '#00000000'\n\nmedical_df = pd.read_csv('medical.csv')\n\nnon_smoker_df=medical_df[medical_df.smoker=='no']\n\n# Convert non-nums to numerical\nsmoker_values = {'no': 0, 'yes': 1}\nsmoker_numeric = medical_df.smoker.map(smoker_values)\n# print(medical_df.charges.corr(smoker_numeric))\nmedical_df['smoker_code']=smoker_numeric\n\nsex_codes = {'female': 0, 'male': 1}\nmedical_df['sex_code'] = medical_df.sex.map(sex_codes)\n\nenc=preprocessing.OneHotEncoder()\nenc.fit(medical_df[['region']])\nenc.categories_\nprint(enc.categories_)\none_hot=enc.transform(medical_df[['region']]).toarray()\n\nmedical_df[['northeast', 'northwest', 'southeast', 'southwest']] = one_hot\n\n# print(medical_df)\n\n\"\"\"/\"\"\"\nmodel=LinearRegression()\n\n# inputs=non_smoker_df[['age','bmi','children']]\n# targets=non_smoker_df.charges\ninput_cols=['age','bmi','children','smoker_code','sex_code', \n 'northeast', 'northwest', 'southeast', 'southwest']\ninputs=medical_df[input_cols]\ntargets=medical_df.charges\nmodel=model.fit(inputs,targets)\nprint(\"coeff:\",model.coef_)\nprint(\"intcpt:\",model.intercept_)\n\n# print(model.predict(np.array([[23],[37],[61]])))\npredictions=model.predict(inputs)\ndef rmse(targets,predictions):\n return np.sqrt(np.mean(np.square(targets - predictions)))\n# print(rmse(targets,predictions))\nprint(\"rmse loss =\",rmse(targets,predictions))\n\n# sns.barplot(data=medical_df,x='region',y='charges')\n# plt.show()\n\n# fig = px.scatter_3d(non_smoker_df, x='age', y='bmi', z='charges')\n# fig.update_traces(marker_size=3, marker_opacity=0.5)\n# fig.show()\n\nprint(\"coeff, intcpt:\",model.coef_,model.intercept_)\nweights_df = pd.DataFrame({\n 'feature': np.append(input_cols,\"Intercept\"),\n 'weight': np.append(model.coef_,model.intercept_)\n})\nprint(weights_df)\n\nnumeric_cols=['age','bmi','children']\nscaler=StandardScaler()\nscaler.fit(medical_df[numeric_cols])\n\nscaled_inputs=scaler.transform(medical_df[numeric_cols])\nprint(scaled_inputs)\n\ncat_cols = ['smoker_code', 'sex_code', 'northeast', 'northwest', \n 'southeast', 'southwest']\ncategorical_data = medical_df[cat_cols].values\n\ninputs = np.concatenate((scaled_inputs, categorical_data), axis=1)\ntargets = medical_df.charges\n\n# Create and train the model\nmodel = LinearRegression().fit(inputs, targets)\n\n# Generate predictions\npredictions = model.predict(inputs)\n\n# Compute loss to evalute the model\nloss = rmse(targets, predictions)\nprint('Loss:', loss)\n\nweights_df = pd.DataFrame({\n 'feature': np.append(numeric_cols + cat_cols, 1),\n 'weight': np.append(model.coef_, model.intercept_)\n})\nprint(weights_df.sort_values('weight', ascending=False))\n\ninputs_train, inputs_test, targets_train, targets_test = train_test_split(inputs, targets, test_size=0.1)\n\n# Create and train the model\nmodel = LinearRegression().fit(inputs_train, targets_train)\n\n# Generate predictions\npredictions_test = model.predict(inputs_test)\n\n# Compute loss to evalute the model\nloss = rmse(targets_test, predictions_test)\nprint('Test Loss:', loss)\n\n# Generate predictions\npredictions_train = model.predict(inputs_train)\n\n# Compute loss to evalute the model\nloss = rmse(targets_train, predictions_train)\nprint('Training Loss:', loss)\n\n\"\"\"/\"\"\"\n", "repo_name": "gachempa/KRBV0723", "sub_path": "MachineLearning/ML-BinaryCategories.py", "file_name": "ML-BinaryCategories.py", "file_ext": "py", "file_size_in_byte": 3748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "seaborn.set_style", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 33, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "14021429110", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Documentation file arguments.py.\"\"\"\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport argparse\nfrom typing import NoReturn, Text\n\n# =============================================================================\n# CLASS - ARGUMENTS\n# =============================================================================\n\nclass Arguments(object):\n\n def __init__(self, *args, **kwargs) -> NoReturn:\n self._parser = self._create_parser_object(*args, **kwargs)\n self._build()\n\n @staticmethod\n def _create_parser_object(*args, **kwargs) -> argparse.ArgumentParser:\n try:\n return argparse.ArgumentParser(*args, **kwargs)\n except argparse.ArgumentError as error:\n print(f\"\\nError when we create a parser object - {error}\")\n except Exception as error:\n print(f\"\\nError general exception in create a parser object - {error}\")\n\n def _adding_arguments(self) -> NoReturn:\n try:\n self._parser.add_argument(\"-n\", \"--namespace\",\n type=str,\n metavar=\"\",\n default=None,\n help=\"Kubernetes get specific namespace\")\n self._parser.add_argument(\"-all\", \"--allnamespaces\",\n action=\"store_true\",\n default=False,\n help=\"Kubernetes get all namespaces\")\n self._parser.add_argument(\"-kcp\", \"--kubeconfig_path\",\n type=str,\n metavar=\"\",\n default=None,\n help=\"Just the Kubeconfig path\")\n self._parser.add_argument(\"-hrfd\", \"--helm_release_filter_days\",\n type=str,\n metavar=\"\",\n default=None,\n help=\"Helm release creation days to delete\")\n self._parser.add_argument(\"-lp\", \"--logpath\",\n type=str,\n metavar=\"\",\n default=None,\n help=\"Custom Log path name\")\n self._parser.add_argument(\"-lf\", \"--logfile\",\n type=str,\n metavar=\"\",\n default=None,\n help=\"Custom Log file name\")\n except Exception as error:\n print(f\"\\nError general exception in define all arguments used on the command line - {error}\")\n\n def _parser_args(self) -> argparse.ArgumentParser.parse_args:\n try:\n return self._parser.parse_args()\n except Exception as error:\n print(f\"\\nError general exception in parser the arguments from standard input - {error}\")\n\n def _build(self) -> NoReturn:\n try:\n self._adding_arguments()\n self._args = vars(self._parser_args())\n except Exception as error:\n print(f\"\\nError general exception to populate the parser object with the information - {error}\")\n\n @property\n def args(self) -> Text:\n return self._args\n", "repo_name": "lahkurb/helm-clean-releases", "sub_path": "code/settings/arguments.py", "file_name": "arguments.py", "file_ext": "py", "file_size_in_byte": 3544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.NoReturn", "line_number": 18, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.ArgumentError", "line_number": 26, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "attribute"}, {"api_name": "typing.NoReturn", "line_number": 31, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 65, "usage_type": "attribute"}, {"api_name": "typing.NoReturn", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Text", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "13913573567", "text": "import torch\nimport math\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models import TextCNN\nfrom configs import get_args\nfrom utils import data_selfattention\nfrom torch.nn import CrossEntropyLoss\nfrom transformers import Trainer\n\n\ndef cl_cal_loss(vec1,vec2):\n loss_for_cl=CrossEntropyLoss(ignore_index=-100)\n labels=torch.arange(0,vec1.shape[0],device='cuda')\n vec1=F.normalize(vec1, p=2, dim=1)#归一化为单位向量[bs,hiden_len]\n vec2=F.normalize(vec2, p=2, dim=1)#[bs,hiden_len]\n sims=vec1.matmul(vec2.T)*20\n loss=loss_for_cl(sims,labels)#拉近二者距离\n return loss\n\n\ndef cos_similarity(p, z, version='simplified'): # negative cosine similarity\n if version == 'original':\n z = z.detach() # stop gradient\n p = F.normalize(p, dim=1) # l2-normalize \n z = F.normalize(z, dim=1) # l2-normalize \n return -(p*z).sum(dim=1).mean()\n\n elif version == 'simplified':# same thing, much faster. Scroll down, speed test in __main__\n return - F.cosine_similarity(p, z.detach(), dim=-1).mean()\n else:\n raise Exception\n\n\n\nclass projection_MLP(nn.Module):\n def __init__(self, in_dim, hidden_dim, out_dim=4096):\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Linear(in_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.layer2 = nn.Sequential(\n nn.Linear(hidden_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.layer3 = nn.Sequential(\n nn.Linear(hidden_dim, out_dim),\n nn.BatchNorm1d(out_dim)\n )\n self.num_layers = 3\n def set_layers(self, num_layers):\n self.num_layers = num_layers\n\n def forward(self, x):\n if self.num_layers == 3:\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n elif self.num_layers == 2:\n x = self.layer1(x)\n x = self.layer3(x)\n else:\n raise Exception\n return x \n\n\nclass prediction_MLP(nn.Module):\n def __init__(self, in_dim=4096, hidden_dim=1024, out_dim=4096): # bottleneck structure\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Linear(in_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.layer2 = nn.Linear(hidden_dim, out_dim)\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n return x\n\nclass Plus_Proj_layer(nn.Module):# 继承类nn.Module\n def __init__(self, backbone):\n super().__init__()\n model_opt = TextCNN.ModelConfig()\n args=get_args()\n self.backbone = backbone #\n\n if args.backbone=='textcnn':\n self.projector = projection_MLP(model_opt.model_dim,4096)\n \n else:\n self.projector = projection_MLP(768)\n self.encoder = nn.Sequential( # f encoder\n self.backbone,\n self.projector\n )\n self.predictor = prediction_MLP()\n \n def forward(self, x1, x2, mask):\n x1=data_selfattention(x1,x1,x1,mask)#不加attention为42.69\n x2=data_selfattention(x2,x2,x2,mask)\n z1,z2=self.encoder(x1),self.encoder(x2)\n p1,p2=self.predictor(z1),self.predictor(z2)\n return p1,z2,p2,z1\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n\n\n model = Plus_Proj_layer()\n x1 = torch.randn((2, 3, 224, 224))\n x2 = torch.randn_like(x1)#创建像x1的大小的张量\n\n model.forward(x1, x2).backward()\n print(\"forward backwork check\")\n\n z1 = torch.randn((200, 2560))\n z2 = torch.randn_like(z1)\n import time\n tic = time.time()\n print(cos_similarity(z1, z2, version='original'))\n toc = time.time()\n print(toc - tic)\n tic = time.time()\n print(cos_similarity(z1, z2, version='simplified'))\n toc = time.time()\n print(toc - tic)\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "qianandfei/GCSRL-Grouped-Contrastive-Self-supervised-Representation-Learning-of-Sentence", "sub_path": "models/plus_proj_layer.py", "file_name": "plus_proj_layer.py", "file_ext": "py", "file_size_in_byte": 3976, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.nn.CrossEntropyLoss", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.functional.cosine_similarity", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "models.TextCNN.ModelConfig", "line_number": 87, "usage_type": "call"}, {"api_name": "models.TextCNN", "line_number": 87, "usage_type": "name"}, {"api_name": "configs.get_args", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "utils.data_selfattention", "line_number": 103, "usage_type": "call"}, {"api_name": "utils.data_selfattention", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 126, "usage_type": "call"}, {"api_name": "time.time", "line_number": 128, "usage_type": "call"}, {"api_name": "time.time", "line_number": 130, "usage_type": "call"}, {"api_name": "time.time", "line_number": 132, "usage_type": "call"}, {"api_name": "time.time", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "21634204260", "text": "import pandas as pd\nimport os\nfrom env import get_db_url\n\n\ndef get_telco_data(use_cache = True):\n filename = 'telco_churn.csv'\n\n if os.path.exists(filename) and use_cache:\n return pd.read_csv(filename)\n\n url = get_db_url('telco_churn')\n \n sql = '''\n SELECT * \n FROM customers\n JOIN contract_types USING (contract_type_id)\n JOIN internet_service_types USING (internet_service_type_id)\n JOIN payment_types USING (payment_type_id)\n '''\n\n df = pd.read_sql(sql, url)\n df.to_csv(filename, index=False)\n return df", "repo_name": "javahava1222/classification-project", "sub_path": "acquire.py", "file_name": "acquire.py", "file_ext": "py", "file_size_in_byte": 583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "env.get_db_url", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "73034206453", "text": "import unittest\nfrom pycpfcnpj import cpf\n\n\nclass CPFTests(unittest.TestCase):\n \"\"\"docstring for CPFTests\"\"\"\n\n def setUp(self):\n self.valid_cpf = '11144477735'\n self.invalid_cpf = '11144477736'\n\n def test_validate_cpf_true(self):\n self.assertTrue(cpf.validate(self.valid_cpf))\n\n def test_validate_cpf_false(self):\n self.assertFalse(cpf.validate(self.invalid_cpf))\n for i in range(10):\n self.assertFalse(cpf.validate(\n '{0}'.format(i) * 11\n ))\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n", "repo_name": "LiuFang816/SALSTM_py_data", "sub_path": "python/matheuscas_pycpfcnpj/pycpfcnpj-master/tests/cpf_tests.py", "file_name": "cpf_tests.py", "file_ext": "py", "file_size_in_byte": 586, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "21", "api": [{"api_name": "unittest.TestCase", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pycpfcnpj.cpf.validate", "line_number": 13, "usage_type": "call"}, {"api_name": "pycpfcnpj.cpf", "line_number": 13, "usage_type": "name"}, {"api_name": "pycpfcnpj.cpf.validate", "line_number": 16, "usage_type": "call"}, {"api_name": "pycpfcnpj.cpf", "line_number": 16, "usage_type": "name"}, {"api_name": "pycpfcnpj.cpf.validate", "line_number": 18, "usage_type": "call"}, {"api_name": "pycpfcnpj.cpf", "line_number": 18, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "9495552445", "text": "\"\"\"\nabstract: Tests [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651)\n\n Tests for [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651).\n\nnote: Tests ported from:\n\n - [ethereum/tests/pull/1082](https://github.com/ethereum/tests/pull/1082).\n\"\"\"\n\nimport pytest\n\nfrom ethereum_test_forks import Shanghai, is_fork\nfrom ethereum_test_tools import (\n Account,\n CodeGasMeasure,\n Environment,\n TestAddress,\n Transaction,\n to_address,\n)\nfrom ethereum_test_tools.vm.opcode import Opcodes as Op\n\nREFERENCE_SPEC_GIT_PATH = \"EIPS/eip-3651.md\"\nREFERENCE_SPEC_VERSION = \"d94c694c6f12291bb6626669c3e8587eef3adff1\"\n\n# Amount of gas required to make a call to a warm account.\n# Calling a cold account with this amount of gas results in exception.\nGAS_REQUIRED_CALL_WARM_ACCOUNT = 100\n\n\n@pytest.mark.valid_from(\"Shanghai\")\n@pytest.mark.parametrize(\n \"use_sufficient_gas\",\n [True, False],\n ids=[\"sufficient_gas\", \"insufficient_gas\"],\n)\n@pytest.mark.parametrize(\n \"opcode,contract_under_test_code,call_gas_exact\",\n [\n (\n \"call\",\n Op.POP(Op.CALL(0, Op.COINBASE, 0, 0, 0, 0, 0)),\n # Extra gas: COINBASE + 4*PUSH1 + 2*DUP1 + POP\n GAS_REQUIRED_CALL_WARM_ACCOUNT + 22,\n ),\n (\n \"callcode\",\n Op.POP(Op.CALLCODE(0, Op.COINBASE, 0, 0, 0, 0, 0)),\n # Extra gas: COINBASE + 4*PUSH1 + 2*DUP1 + POP\n GAS_REQUIRED_CALL_WARM_ACCOUNT + 22,\n ),\n (\n \"delegatecall\",\n Op.POP(Op.DELEGATECALL(0, Op.COINBASE, 0, 0, 0, 0)),\n # Extra: COINBASE + 3*PUSH1 + 2*DUP1 + POP\n GAS_REQUIRED_CALL_WARM_ACCOUNT + 19,\n ),\n (\n \"staticcall\",\n Op.POP(Op.STATICCALL(0, Op.COINBASE, 0, 0, 0, 0)),\n # Extra: COINBASE + 3*PUSH1 + 2*DUP1 + POP\n GAS_REQUIRED_CALL_WARM_ACCOUNT + 19,\n ),\n ],\n ids=[\"CALL\", \"CALLCODE\", \"DELEGATECALL\", \"STATICCALL\"],\n)\ndef test_warm_coinbase_call_out_of_gas(\n state_test,\n fork,\n opcode,\n contract_under_test_code,\n call_gas_exact,\n use_sufficient_gas,\n):\n \"\"\"\n Test that the coinbase is warm by accessing the COINBASE with each\n of the following opcodes:\n\n - CALL\n - CALLCODE\n - DELEGATECALL\n - STATICCALL\n \"\"\"\n env = Environment(\n coinbase=\"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba\",\n difficulty=0x20000,\n gas_limit=10000000000,\n number=1,\n timestamp=1000,\n )\n caller_address = \"0xcccccccccccccccccccccccccccccccccccccccc\"\n contract_under_test_address = 0x100\n\n if not use_sufficient_gas:\n call_gas_exact -= 1\n\n caller_code = Op.SSTORE(\n 0,\n Op.CALL(call_gas_exact, contract_under_test_address, 0, 0, 0, 0, 0),\n )\n\n pre = {\n TestAddress: Account(balance=1000000000000000000000),\n caller_address: Account(code=caller_code),\n to_address(contract_under_test_address): Account(code=contract_under_test_code),\n }\n\n tx = Transaction(\n ty=0x0,\n chain_id=0x01,\n nonce=0,\n to=caller_address,\n gas_limit=100000000,\n gas_price=10,\n )\n\n post = {}\n\n if use_sufficient_gas and is_fork(fork=fork, which=Shanghai):\n post[caller_address] = Account(\n storage={\n # On shanghai and beyond, calls with only 100 gas to\n # coinbase will succeed.\n 0: 1,\n }\n )\n else:\n post[caller_address] = Account(\n storage={\n # Before shanghai, calls with only 100 gas to\n # coinbase will fail.\n 0: 0,\n }\n )\n\n state_test(\n env=env,\n pre=pre,\n post=post,\n txs=[tx],\n tag=\"opcode_\" + opcode,\n )\n\n\n# List of opcodes that are affected by EIP-3651\ngas_measured_opcodes = [\n (\n \"EXTCODESIZE\",\n CodeGasMeasure(\n code=Op.EXTCODESIZE(Op.COINBASE),\n overhead_cost=2,\n extra_stack_items=1,\n ),\n ),\n (\n \"EXTCODECOPY\",\n CodeGasMeasure(\n code=Op.EXTCODECOPY(Op.COINBASE, 0, 0, 0),\n overhead_cost=2 + 3 + 3 + 3,\n ),\n ),\n (\n \"EXTCODEHASH\",\n CodeGasMeasure(\n code=Op.EXTCODEHASH(Op.COINBASE),\n overhead_cost=2,\n extra_stack_items=1,\n ),\n ),\n (\n \"BALANCE\",\n CodeGasMeasure(\n code=Op.BALANCE(Op.COINBASE),\n overhead_cost=2,\n extra_stack_items=1,\n ),\n ),\n (\n \"CALL\",\n CodeGasMeasure(\n code=Op.CALL(0xFF, Op.COINBASE, 0, 0, 0, 0, 0),\n overhead_cost=3 + 2 + 3 + 3 + 3 + 3 + 3,\n extra_stack_items=1,\n ),\n ),\n (\n \"CALLCODE\",\n CodeGasMeasure(\n code=Op.CALLCODE(0xFF, Op.COINBASE, 0, 0, 0, 0, 0),\n overhead_cost=3 + 2 + 3 + 3 + 3 + 3 + 3,\n extra_stack_items=1,\n ),\n ),\n (\n \"DELEGATECALL\",\n CodeGasMeasure(\n code=Op.DELEGATECALL(0xFF, Op.COINBASE, 0, 0, 0, 0),\n overhead_cost=3 + 2 + 3 + 3 + 3 + 3,\n extra_stack_items=1,\n ),\n ),\n (\n \"STATICCALL\",\n CodeGasMeasure(\n code=Op.STATICCALL(0xFF, Op.COINBASE, 0, 0, 0, 0),\n overhead_cost=3 + 2 + 3 + 3 + 3 + 3,\n extra_stack_items=1,\n ),\n ),\n]\n\n\n@pytest.mark.valid_from(\"Merge\") # these tests fill for fork >= Berlin\n@pytest.mark.parametrize(\n \"opcode,code_gas_measure\",\n gas_measured_opcodes,\n ids=[i[0] for i in gas_measured_opcodes],\n)\ndef test_warm_coinbase_gas_usage(state_test, fork, opcode, code_gas_measure):\n \"\"\"\n Test the gas usage of opcodes affected by assuming a warm coinbase:\n\n - EXTCODESIZE\n - EXTCODECOPY\n - EXTCODEHASH\n - BALANCE\n - CALL\n - CALLCODE\n - DELEGATECALL\n - STATICCALL\n \"\"\"\n env = Environment(\n coinbase=\"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba\",\n difficulty=0x20000,\n gas_limit=10000000000,\n number=1,\n timestamp=1000,\n )\n\n measure_address = to_address(0x100)\n pre = {\n TestAddress: Account(balance=1000000000000000000000),\n measure_address: Account(code=code_gas_measure, balance=1000000000000000000000),\n }\n\n if is_fork(fork, Shanghai):\n expected_gas = GAS_REQUIRED_CALL_WARM_ACCOUNT # Warm account access cost after EIP-3651\n else:\n expected_gas = 2600 # Cold account access cost before EIP-3651\n\n post = {\n measure_address: Account(\n storage={\n 0x00: expected_gas,\n }\n )\n }\n tx = Transaction(\n ty=0x0,\n chain_id=0x01,\n nonce=0,\n to=measure_address,\n gas_limit=100000000,\n gas_price=10,\n )\n\n state_test(\n env=env,\n pre=pre,\n post=post,\n txs=[tx],\n tag=\"opcode_\" + opcode.lower(),\n )\n", "repo_name": "ethereum/tests", "sub_path": "src/BlockchainTestsFiller/Pyspecs/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py", "file_name": "test_warm_coinbase.py", "file_ext": "py", "file_size_in_byte": 7023, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 481, "dataset": "github-code", "pt": "21", "api": [{"api_name": "ethereum_test_tools.Environment", "line_number": 85, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.SSTORE", "line_number": 98, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 98, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.CALL", "line_number": 100, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 100, "usage_type": "name"}, {"api_name": "ethereum_test_tools.TestAddress", "line_number": 104, "usage_type": "name"}, {"api_name": "ethereum_test_tools.to_address", "line_number": 106, "usage_type": "call"}, {"api_name": "ethereum_test_tools.Account", "line_number": 104, "usage_type": "call"}, {"api_name": "ethereum_test_tools.Account", "line_number": 105, "usage_type": "call"}, {"api_name": "ethereum_test_tools.Account", "line_number": 106, "usage_type": "call"}, {"api_name": "ethereum_test_tools.Transaction", "line_number": 109, "usage_type": "call"}, {"api_name": "ethereum_test_forks.is_fork", "line_number": 120, "usage_type": "call"}, {"api_name": "ethereum_test_forks.Shanghai", "line_number": 120, "usage_type": "name"}, {"api_name": "ethereum_test_tools.Account", "line_number": 121, "usage_type": "call"}, {"api_name": "ethereum_test_tools.Account", "line_number": 129, "usage_type": "call"}, {"api_name": "pytest.mark.valid_from", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 38, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.POP", "line_number": 43, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 43, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.CALL", "line_number": 43, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.POP", "line_number": 49, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 49, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.CALLCODE", "line_number": 49, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.POP", "line_number": 55, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 55, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.DELEGATECALL", "line_number": 55, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.POP", "line_number": 61, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 61, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.STATICCALL", "line_number": 61, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 150, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.EXTCODESIZE", "line_number": 151, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 151, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 158, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.EXTCODECOPY", "line_number": 159, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 159, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 159, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 165, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.EXTCODEHASH", "line_number": 166, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 166, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 166, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 173, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.BALANCE", "line_number": 174, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 174, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 174, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 181, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.CALL", "line_number": 182, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 182, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 182, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 189, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.CALLCODE", "line_number": 190, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 190, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 190, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 197, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.DELEGATECALL", "line_number": 198, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 198, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 198, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.CodeGasMeasure", "line_number": 205, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.STATICCALL", "line_number": 206, "usage_type": "call"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes", "line_number": 206, "usage_type": "name"}, {"api_name": "ethereum_test_tools.vm.opcode.Opcodes.COINBASE", "line_number": 206, "usage_type": "attribute"}, {"api_name": "ethereum_test_tools.Environment", "line_number": 233, "usage_type": "call"}, {"api_name": "ethereum_test_tools.to_address", "line_number": 241, "usage_type": "call"}, {"api_name": "ethereum_test_tools.TestAddress", "line_number": 243, "usage_type": "name"}, {"api_name": "ethereum_test_tools.Account", "line_number": 243, "usage_type": "call"}, {"api_name": "ethereum_test_tools.Account", "line_number": 244, "usage_type": "call"}, {"api_name": "ethereum_test_forks.is_fork", "line_number": 247, "usage_type": "call"}, {"api_name": "ethereum_test_forks.Shanghai", "line_number": 247, "usage_type": "argument"}, {"api_name": "ethereum_test_tools.Account", "line_number": 253, "usage_type": "call"}, {"api_name": "ethereum_test_tools.Transaction", "line_number": 259, "usage_type": "call"}, {"api_name": "pytest.mark.valid_from", "line_number": 214, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 215, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 215, "usage_type": "attribute"}]} +{"seq_id": "26918752473", "text": "from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom datetime import date, timedelta\nfrom typing import List, Optional\n\n@dataclass(frozen=True)\nclass ValidityPeriod:\n start_date: date\n end_date: Optional[date]\n\n def is_active(self, on_date) -> bool:\n return self._end_date >= on_date and self.start_date <= on_date\n\n def is_included_in(self, other: ValidityPeriod) -> bool:\n return (\n other.start_date <= self.start_date\n and other._end_date >= self._end_date\n )\n\n def do_overlap(self, other: ValidityPeriod) -> bool:\n return (\n self.start_date <= other._end_date\n and self._end_date >= other.start_date\n )\n\n def subtract(self, other: ValidityPeriod) -> List[ValidityPeriod]:\n if self.is_included_in(other):\n return []\n elif not self.do_overlap(other):\n return [self]\n elif self._end_date <= other._end_date or other.end_date is None:\n return [\n ValidityPeriod(\n start_date=self.start_date,\n end_date=other.start_date - timedelta(days=1),\n )\n ]\n elif self.start_date >= other.start_date:\n return [\n ValidityPeriod(\n start_date=other.end_date + timedelta(days=1),\n end_date=self.end_date,\n )\n ]\n else:\n return [\n ValidityPeriod(\n start_date=self.start_date,\n end_date=other.start_date - timedelta(days=1),\n ),\n ValidityPeriod(\n start_date=other.end_date + timedelta(days=1),\n end_date=self.end_date,\n )\n ]\n\n @property\n def _end_date(self) -> date:\n \"\"\"Approximation to avoid `end_date is None` checks.\"\"\"\n return self.end_date or date.max\n\n def __post_init__(self):\n if self.start_date > self._end_date:\n raise ValueError(\n \"Invalid ValidityPeriod: start_date is after end_date\"\n )\n", "repo_name": "nicoolas25/temporal-examples", "sub_path": "temporal/validity_period.py", "file_name": "validity_period.py", "file_ext": "py", "file_size_in_byte": 2162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.date", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 53, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.date.max", "line_number": 61, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 61, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 59, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "10106378813", "text": "#!/usr/local/bin/python3\n#-*- coding:utf-8 -*-\n\nimport telnetlib\nimport time\nfrom sqlalchemy import *\nfrom sys import argv\n\n#操作设备的命令脚本变量\norders = \"\"\"\nscreen-length disable\ndis acl 2044\nquit\n\"\"\"\n\n#定义登陆设备使用的用户名密码\nusername = 'user1'\npassword = 'XXX'\n\n#用于telnet设备的函数\ndef tel_dev(hostname, dev_ip):\n print(hostname, ' ', dev_ip)\n global tn\n global msg_all\n global msg_error\n msg_all = ''\n msg_error = ''\n try:\n tn = telnetlib.Telnet(dev_ip, timeout=3)\n time.sleep(1)\n try:\n msg_dev_user = (tn.read_until(b\"sername:\" or b\"ogin:\", timeout=3)).decode('utf-8')\n tn.write(username.encode('utf-8') + b'\\n')\n time.sleep(0.5)\n msg_dev_pwd = (tn.read_until(b\"assword:\", timeout=3)).decode('utf-8')\n tn.write(password.encode('utf-8') + b'\\n')\n time.sleep(1)\n order(orders)\n msg_dev_act = tn.read_all().decode(encoding='utf-8')\n tn.close()\n msg_all = msg_dev_user + msg_dev_pwd + msg_dev_act\n except:\n msg_error = \"orders error!!!\"\n # print(msg_all)\n tn.close()\n # print(msg_all)\n except:\n msg_error = \"Connection refused!!!\"\n\n#将命令转换成一行一行写入的命令格式\ndef order(orders):\n orders = orders.strip().split('\\n')\n for x in orders:\n tn.write(x.encode('utf-8') + b'\\n')\n\n\n\n# ======== 程序开始执行 ======== #\nprint(\"程序开始时间:\", end='')\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S \",time.localtime(time.time())))\n\n#定义IP地址文件名,并打开读取文件\nfilename = \"ip.txt\"\nprint('filename: ', filename, '\\n')\nf = open(filename, mode='r')\nf_line = f.readlines()\n#print(f_line)\n\n#循环读取文件,并登陆设备修改ACL\nfor x in f_line:\n print('='*40)\n line = x.strip() #去首尾无用字符\n lines = line.split(',') #按,分割成列表\n tel_dev(lines[0],lines[1])\n # print(msg_all)\n print(msg_error)\n if('Basic' in msg_all):\n print('Have.')\n else:\n print('Not have!')\n\nprint('='*40)\nf.close()\n\n", "repo_name": "dzlovebeyond/Network-device---Automation-script", "sub_path": "tel-show.py", "file_name": "tel-show.py", "file_ext": "py", "file_size_in_byte": 2163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "telnetlib.Telnet", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 60, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 60, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "4034822770", "text": "import numpy as np\nimport pytest\nfrom hypothesis import given\nfrom thewalrus.fock_gradients import (\n beamsplitter,\n displacement,\n mzgate,\n squeezing,\n two_mode_squeezing,\n)\n\nfrom mrmustard.lab import (\n Attenuator,\n BSgate,\n Coherent,\n Dgate,\n Interferometer,\n MZgate,\n RealInterferometer,\n Rgate,\n S2gate,\n Sgate,\n)\nfrom mrmustard.lab.states import TMSV, Fock, SqueezedVacuum, State\nfrom mrmustard.math import Math\nfrom mrmustard.math.lattice import strategies\nfrom mrmustard.physics import fock\nfrom tests.random import (\n angle,\n array_of_,\n medium_float,\n n_mode_pure_state,\n r,\n single_mode_cv_channel,\n single_mode_unitary_gate,\n two_mode_unitary_gate,\n)\n\nmath = Math()\n\n\n@given(state=n_mode_pure_state(num_modes=1), x=medium_float, y=medium_float)\ndef test_Dgate_1mode(state, x, y):\n state_out = state >> Dgate(x, y) >> Dgate(-x, -y)\n assert state_out == state\n\n\ndef test_attenuator_on_fock():\n \"tests that attenuating a fock state makes it mixed\"\n assert not (Fock(10) >> Attenuator(0.5)).is_pure\n\n\n@given(state=n_mode_pure_state(num_modes=2), xxyy=array_of_(medium_float, minlen=4, maxlen=4))\ndef test_Dgate_2mode(state, xxyy):\n x1, x2, y1, y2 = xxyy\n state_out = state >> Dgate([x1, x2], [y1, y2]) >> Dgate([-x1, -x2], [-y1, -y2])\n assert state_out == state\n\n\n@given(gate=single_mode_cv_channel())\ndef test_single_mode_fock_equals_gaussian_dm(gate):\n \"\"\"Test same state is obtained via fock representation or phase space\n for single mode circuits.\"\"\"\n cutoffs = [60]\n gaussian_state = SqueezedVacuum(0.5) >> Attenuator(0.5)\n fock_state = State(dm=gaussian_state.dm(cutoffs))\n\n via_fock_space_dm = (fock_state >> gate).dm(cutoffs)\n via_phase_space_dm = (gaussian_state >> gate).dm(cutoffs)\n assert np.allclose(via_fock_space_dm, via_phase_space_dm)\n\n\n@given(gate=single_mode_unitary_gate())\ndef test_single_mode_fock_equals_gaussian_ket(gate):\n \"\"\"Test same state is obtained via fock representation or phase space\n for single mode circuits.\"\"\"\n cutoffs = [70]\n gaussian_state = SqueezedVacuum(-0.1)\n fock_state = State(ket=gaussian_state.ket(cutoffs))\n\n via_fock_space_ket = (fock_state >> gate).ket([10])\n via_phase_space_ket = (gaussian_state >> gate).ket([10])\n phase = np.exp(1j * np.angle(via_fock_space_ket[0]))\n assert np.allclose(via_fock_space_ket, phase * via_phase_space_ket)\n\n\n@given(gate=single_mode_unitary_gate())\ndef test_single_mode_fock_equals_gaussian_ket_dm(gate):\n \"\"\"Test same state is obtained via fock representation or phase space\n for single mode circuits.\"\"\"\n cutoffs = (70,)\n gaussian_state = SqueezedVacuum(-0.1)\n fock_state = State(ket=gaussian_state.ket(cutoffs))\n\n via_fock_space_dm = (fock_state >> gate >> Attenuator(0.1)).dm([10])\n via_phase_space_dm = (gaussian_state >> gate >> Attenuator(0.1)).dm([10])\n assert np.allclose(via_fock_space_dm, via_phase_space_dm, atol=1e-5)\n\n\n@given(gate=two_mode_unitary_gate())\ndef test_two_mode_fock_equals_gaussian(gate):\n \"\"\"Test same state is obtained via fock representation or phase space\n for two modes circuits.\"\"\"\n cutoffs = (20, 20)\n gaussian_state = TMSV(0.1) >> BSgate(np.pi / 2) >> Attenuator(0.5)\n fock_state = State(dm=gaussian_state.dm(cutoffs))\n\n via_fock_space_dm = (fock_state >> gate).dm(cutoffs)\n via_phase_space_dm = (gaussian_state >> gate).dm(cutoffs)\n assert np.allclose(via_fock_space_dm, via_phase_space_dm)\n\n\n@pytest.mark.parametrize(\n \"cutoffs,x,y\",\n [\n [[5, 5], 0.3, 0.5],\n [[5, 5], 0.0, 0.0],\n [[2, 2, 2, 2], [0.1, 0.1], [0.25, -0.2]],\n [[3, 3, 3, 3], [0.0, 0.0], [0.0, 0.0]],\n [[2, 5, 1, 2, 5, 1], [0.1, 5.0, 1.0], [-0.3, 0.1, 0.0]],\n [[3, 3, 3, 3, 3, 3, 3, 3], [0.1, 0.2, 0.3, 0.4], [-0.5, -4, 3.1, 4.2]],\n ],\n)\ndef test_fock_representation_displacement(cutoffs, x, y):\n \"\"\"Tests that DGate returns the correct unitary.\"\"\"\n\n # apply gate\n dgate = Dgate(x, y)\n Ud = dgate.U(cutoffs)\n\n # compare with the standard way of calculating\n # transformation unitaries using the Choi isomorphism\n X, _, d = dgate.XYd(allow_none=False)\n expected_Ud = fock.wigner_to_fock_U(X, d, cutoffs)\n\n assert np.allclose(Ud, expected_Ud, atol=1e-5)\n\n\n@given(x1=medium_float, x2=medium_float, y1=medium_float, y2=medium_float)\ndef test_parallel_displacement(x1, x2, y1, y2):\n \"\"\"Tests that parallel Dgate returns the correct unitary.\"\"\"\n U12 = Dgate([x1, x2], [y1, y2]).U([2, 7, 2, 7])\n U1 = Dgate(x1, y1).U([2, 2])\n U2 = Dgate(x2, y2).U([7, 7])\n assert np.allclose(U12, np.transpose(np.tensordot(U1, U2, [[], []]), [0, 2, 1, 3]))\n\n\ndef test_squeezer_grad_against_finite_differences():\n \"\"\"tests fock squeezer gradient against finite differences\"\"\"\n cutoffs = (5, 5)\n r = math.new_variable(0.5, None, \"r\")\n phi = math.new_variable(0.1, None, \"phi\")\n delta = 1e-6\n dUdr = (Sgate(r + delta, phi).U(cutoffs) - Sgate(r - delta, phi).U(cutoffs)) / (2 * delta)\n dUdphi = (Sgate(r, phi + delta).U(cutoffs) - Sgate(r, phi - delta).U(cutoffs)) / (2 * delta)\n _, (gradr, gradphi) = math.value_and_gradients(\n lambda: fock.squeezer(r, phi, shape=cutoffs), [r, phi]\n )\n assert np.allclose(gradr, 2 * np.real(np.sum(dUdr)))\n assert np.allclose(gradphi, 2 * np.real(np.sum(dUdphi)))\n\n\ndef test_displacement_grad():\n \"\"\"tests fock displacement gradient against finite differences\"\"\"\n cutoffs = [5, 5]\n x = math.new_variable(0.1, None, \"x\")\n y = math.new_variable(0.1, None, \"y\")\n alpha = math.make_complex(x, y).numpy()\n delta = 1e-6\n dUdx = (fock.displacement(x + delta, y, cutoffs) - fock.displacement(x - delta, y, cutoffs)) / (\n 2 * delta\n )\n dUdy = (fock.displacement(x, y + delta, cutoffs) - fock.displacement(x, y - delta, cutoffs)) / (\n 2 * delta\n )\n\n D = fock.displacement(x, y, shape=cutoffs)\n dD_da, dD_dac = strategies.jacobian_displacement(math.asnumpy(D), alpha)\n assert np.allclose(dD_da + dD_dac, dUdx)\n assert np.allclose(1j * (dD_da - dD_dac), dUdy)\n\n\ndef test_fock_representation_displacement_rectangular():\n \"\"\"Tests that DGate returns the correct unitary.\"\"\"\n x, y = 0.3, 0.5\n cutoffs = 5, 10\n # apply gate\n dgate = Dgate(x, y)\n Ud = dgate.U(cutoffs)\n\n # compare with tw implementation\n expected_Ud = displacement(np.sqrt(x * x + y * y), np.arctan2(y, x), 10)[:5, :10]\n\n assert np.allclose(Ud, expected_Ud, atol=1e-5)\n\n\ndef test_fock_representation_displacement_rectangular2():\n \"\"\"Tests that DGate returns the correct unitary.\"\"\"\n x, y = 0.3, 0.5\n cutoffs = 10, 5\n # apply gate\n dgate = Dgate(x, y)\n Ud = dgate.U(cutoffs)\n\n # compare with tw implementation\n expected_Ud = displacement(np.sqrt(x * x + y * y), np.arctan2(y, x), 10)[:10, :5]\n\n assert np.allclose(Ud, expected_Ud, atol=1e-5)\n\n\n@given(r=r, phi=angle)\ndef test_fock_representation_squeezing(r, phi):\n S = Sgate(r=r, phi=phi)\n expected = squeezing(r=r, theta=phi, cutoff=20)\n assert np.allclose(expected, S.U(cutoffs=[20, 20]), atol=1e-5)\n\n\n@given(r1=r, phi1=angle, r2=r, phi2=angle)\ndef test_parallel_squeezing(r1, phi1, r2, phi2):\n \"\"\"Tests that two parallel squeezers return the correct unitary.\"\"\"\n U12 = Sgate([r1, r2], [phi1, phi2]).U([5, 7, 5, 7])\n U1 = Sgate(r1, phi1).U([5, 5])\n U2 = Sgate(r2, phi2).U([7, 7])\n assert np.allclose(U12, np.transpose(np.tensordot(U1, U2, [[], []]), [0, 2, 1, 3]))\n\n\n@given(theta=angle, phi=angle)\ndef test_fock_representation_beamsplitter(theta, phi):\n BS = BSgate(theta=theta, phi=phi)\n expected = beamsplitter(theta=theta, phi=phi, cutoff=10)\n assert np.allclose(expected, BS.U(cutoffs=[10, 10, 10, 10]), atol=1e-5)\n\n\n@given(r=r, phi=angle)\ndef test_fock_representation_two_mode_squeezing(r, phi):\n S2 = S2gate(r=r, phi=phi)\n expected = two_mode_squeezing(r=r, theta=phi, cutoff=10)\n assert np.allclose(expected, S2.U(cutoffs=[10, 10, 10, 10]), atol=1e-5)\n\n\n@given(phi_a=angle, phi_b=angle)\ndef test_fock_representation_mzgate(phi_a, phi_b):\n MZ = MZgate(phi_a=phi_a, phi_b=phi_b, internal=False)\n expected = mzgate(theta=phi_b, phi=phi_a, cutoff=10)\n assert np.allclose(expected, MZ.U(cutoffs=[10, 10, 10, 10]), atol=1e-5)\n\n\n@pytest.mark.parametrize(\n \"cutoffs,angles,modes\",\n [\n [[5, 4, 3], [np.pi, np.pi / 2, np.pi / 4], None],\n [[3, 4], [np.pi / 3, np.pi / 2], [0, 1]],\n [[3], np.pi / 6, [0]],\n ],\n)\ndef test_fock_representation_rgate(cutoffs, angles, modes):\n \"\"\"Tests that DGate returns the correct unitary.\"\"\"\n\n # apply gate\n rgate = Rgate(angles, modes=modes)\n R = rgate.U(cutoffs)\n\n # compare with the standard way of calculating\n # transformation unitaries using the Choi isomorphism\n d = np.zeros(len(cutoffs) * 2)\n expected_R = fock.wigner_to_fock_U(rgate.X_matrix, d, tuple(cutoffs + cutoffs))\n assert np.allclose(R, expected_R, atol=1e-5)\n\n\ndef test_raise_interferometer_error():\n \"\"\"test Interferometer raises an error when both `modes` and `num_modes` don't match\"\"\"\n num_modes = 3\n modes = [0, 2]\n with pytest.raises(ValueError):\n Interferometer(num_modes=num_modes, modes=modes)\n with pytest.raises(ValueError):\n RealInterferometer(num_modes=num_modes, modes=modes)\n modes = [2, 5, 6, 7]\n with pytest.raises(ValueError):\n Interferometer(num_modes=num_modes, modes=modes)\n with pytest.raises(ValueError):\n RealInterferometer(num_modes=num_modes, modes=modes)\n\n\ndef test_choi_cutoffs():\n output = State(dm=Coherent([1.0, 1.0]).dm([5, 8])) >> Attenuator(0.5, modes=[1])\n assert output.cutoffs == [5, 8] # cutoffs are respected by the gate\n\n\ndef test_measure_with_fock():\n \"tests that the autocutoff respects the fock projection cutoff\"\n cov = np.array(\n [\n [1.08341848, 0.26536937, 0.0, 0.0],\n [0.26536937, 1.05564949, 0.0, 0.0],\n [0.0, 0.0, 0.98356475, -0.24724869],\n [0.0, 0.0, -0.24724869, 1.00943755],\n ]\n )\n\n state = State(means=np.zeros(4), cov=cov)\n\n n_detect = 2\n state_out = state << Fock([n_detect], modes=[1])\n assert np.allclose(state_out.ket(), np.array([0.00757899, 0.0]))\n\n\n@given(theta=angle, phi=angle)\ndef test_schwinger_bs_equals_vanilla_bs_for_small_cutoffs(theta, phi):\n \"\"\"Tests that the Schwinger boson BS gate is equivalent to the vanilla BS gate for low cutoffs.\"\"\"\n U_vanilla = BSgate(theta, phi).U([10, 10, 10, 10], method=\"vanilla\")\n U_schwinger = BSgate(theta, phi).U([10, 10, 10, 10], method=\"schwinger\")\n\n assert np.allclose(U_vanilla, U_schwinger, atol=1e-6)\n", "repo_name": "sabinthapa100/MrMustard", "sub_path": "tests/test_lab/test_gates_fock.py", "file_name": "test_gates_fock.py", "file_ext": "py", "file_size_in_byte": 10699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "21", "api": [{"api_name": "mrmustard.math.Math", "line_number": 39, "usage_type": "call"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 44, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 42, "usage_type": "call"}, {"api_name": "tests.random.n_mode_pure_state", "line_number": 42, "usage_type": "call"}, {"api_name": "tests.random.medium_float", "line_number": 42, "usage_type": "name"}, {"api_name": "mrmustard.lab.states.Fock", "line_number": 50, "usage_type": "call"}, {"api_name": "mrmustard.lab.Attenuator", "line_number": 50, "usage_type": "call"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 56, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 53, "usage_type": "call"}, {"api_name": "tests.random.n_mode_pure_state", "line_number": 53, "usage_type": "call"}, {"api_name": "tests.random.array_of_", "line_number": 53, "usage_type": "call"}, {"api_name": "tests.random.medium_float", "line_number": 53, "usage_type": "argument"}, {"api_name": "mrmustard.lab.states.SqueezedVacuum", "line_number": 65, "usage_type": "call"}, {"api_name": "mrmustard.lab.Attenuator", "line_number": 65, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.State", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 70, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 60, "usage_type": "call"}, {"api_name": "tests.random.single_mode_cv_channel", "line_number": 60, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.SqueezedVacuum", "line_number": 78, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.State", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 84, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 73, "usage_type": "call"}, {"api_name": "tests.random.single_mode_unitary_gate", "line_number": 73, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.SqueezedVacuum", "line_number": 92, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.State", "line_number": 93, "usage_type": "call"}, {"api_name": "mrmustard.lab.Attenuator", "line_number": 95, "usage_type": "call"}, {"api_name": "mrmustard.lab.Attenuator", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 97, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 87, "usage_type": "call"}, {"api_name": "tests.random.single_mode_unitary_gate", "line_number": 87, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.TMSV", "line_number": 105, "usage_type": "call"}, {"api_name": "mrmustard.lab.BSgate", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 105, "usage_type": "attribute"}, {"api_name": "mrmustard.lab.Attenuator", "line_number": 105, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.State", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 110, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 100, "usage_type": "call"}, {"api_name": "tests.random.two_mode_unitary_gate", "line_number": 100, "usage_type": "call"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 128, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock.wigner_to_fock_U", "line_number": 134, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 136, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 113, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 113, "usage_type": "attribute"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 142, "usage_type": "call"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 143, "usage_type": "call"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.tensordot", "line_number": 145, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 139, "usage_type": "call"}, {"api_name": "tests.random.medium_float", "line_number": 139, "usage_type": "name"}, {"api_name": "tests.random.r", "line_number": 151, "usage_type": "name"}, {"api_name": "mrmustard.lab.Sgate", "line_number": 154, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 154, "usage_type": "name"}, {"api_name": "mrmustard.lab.Sgate", "line_number": 155, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 155, "usage_type": "argument"}, {"api_name": "mrmustard.physics.fock.squeezer", "line_number": 157, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 157, "usage_type": "argument"}, {"api_name": "mrmustard.physics.fock", "line_number": 157, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 160, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock.displacement", "line_number": 170, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock", "line_number": 170, "usage_type": "name"}, {"api_name": "mrmustard.physics.fock.displacement", "line_number": 173, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock", "line_number": 173, "usage_type": "name"}, {"api_name": "mrmustard.physics.fock.displacement", "line_number": 177, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock", "line_number": 177, "usage_type": "name"}, {"api_name": "mrmustard.math.lattice.strategies.jacobian_displacement", "line_number": 178, "usage_type": "call"}, {"api_name": "mrmustard.math.lattice.strategies", "line_number": 178, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 180, "usage_type": "call"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 188, "usage_type": "call"}, {"api_name": "thewalrus.fock_gradients.displacement", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 194, "usage_type": "call"}, {"api_name": "mrmustard.lab.Dgate", "line_number": 202, "usage_type": "call"}, {"api_name": "thewalrus.fock_gradients.displacement", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 208, "usage_type": "call"}, {"api_name": "mrmustard.lab.Sgate", "line_number": 213, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 213, "usage_type": "name"}, {"api_name": "thewalrus.fock_gradients.squeezing", "line_number": 214, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 214, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 215, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 211, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 211, "usage_type": "name"}, {"api_name": "tests.random.angle", "line_number": 211, "usage_type": "name"}, {"api_name": "mrmustard.lab.Sgate", "line_number": 221, "usage_type": "call"}, {"api_name": "mrmustard.lab.Sgate", "line_number": 222, "usage_type": "call"}, {"api_name": "mrmustard.lab.Sgate", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.tensordot", "line_number": 224, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 218, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 218, "usage_type": "name"}, {"api_name": "tests.random.angle", "line_number": 218, "usage_type": "name"}, {"api_name": "mrmustard.lab.BSgate", "line_number": 229, "usage_type": "call"}, {"api_name": "thewalrus.fock_gradients.beamsplitter", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 231, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 227, "usage_type": "call"}, {"api_name": "tests.random.angle", "line_number": 227, "usage_type": "name"}, {"api_name": "mrmustard.lab.S2gate", "line_number": 236, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 236, "usage_type": "name"}, {"api_name": "thewalrus.fock_gradients.two_mode_squeezing", "line_number": 237, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 237, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 238, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 234, "usage_type": "call"}, {"api_name": "tests.random.r", "line_number": 234, "usage_type": "name"}, {"api_name": "tests.random.angle", "line_number": 234, "usage_type": "name"}, {"api_name": "mrmustard.lab.MZgate", "line_number": 243, "usage_type": "call"}, {"api_name": "thewalrus.fock_gradients.mzgate", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 245, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 241, "usage_type": "call"}, {"api_name": "tests.random.angle", "line_number": 241, "usage_type": "name"}, {"api_name": "mrmustard.lab.Rgate", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 265, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock.wigner_to_fock_U", "line_number": 266, "usage_type": "call"}, {"api_name": "mrmustard.physics.fock", "line_number": 266, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 267, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 248, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 253, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 274, "usage_type": "call"}, {"api_name": "mrmustard.lab.Interferometer", "line_number": 275, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 276, "usage_type": "call"}, {"api_name": "mrmustard.lab.RealInterferometer", "line_number": 277, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 279, "usage_type": "call"}, {"api_name": "mrmustard.lab.Interferometer", "line_number": 280, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 281, "usage_type": "call"}, {"api_name": "mrmustard.lab.RealInterferometer", "line_number": 282, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.State", "line_number": 286, "usage_type": "call"}, {"api_name": "mrmustard.lab.Coherent", "line_number": 286, "usage_type": "call"}, {"api_name": "mrmustard.lab.Attenuator", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.State", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 301, "usage_type": "call"}, {"api_name": "mrmustard.lab.states.Fock", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 305, "usage_type": "call"}, {"api_name": "mrmustard.lab.BSgate", "line_number": 311, "usage_type": "call"}, {"api_name": "mrmustard.lab.BSgate", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 314, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 308, "usage_type": "call"}, {"api_name": "tests.random.angle", "line_number": 308, "usage_type": "name"}]} +{"seq_id": "36050457637", "text": "import numpy as np\n\nfrom valuereps.vr_approximate import VrApproximateLinear\nfrom valuereps.polynomial_terms import StateAction, Feature, AVAILABLE_FEATURES\n\nfrom utils.scaler import scaler\n\nfrom utils.type_aliases import TypeValidState, TypeAction, TypeNDarray64\n\n\nclass VrPolynomial(VrApproximateLinear):\n\n def __init__(self) -> None:\n\n super().__init__()\n\n # based on a forward-backward model search against reference case (10^8 episodes of MC off)\n # best 4th degree polynomial model, AICc= -1038.7\n\n self._used_term_indexes: list[int] = [0, 2, 4, 6, 7, 8, 9, 10, 13, 14, 16, 18, 19, 20, 28, 32,\n 37, 39, 40, 41, 52, 64, 81, 84, 88, 89, 95]\n\n # best 3th degree model, AICc= -1006.2\n # self._used_term_indexes: list[int] = [0, 2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 20,\n # 24, 29, 31, 32, 35, 41, 43, 44, 48, 53, 60]\n\n # based on sqrt_lasso against reference case\n # self._used_term_indexes: list[int] = [1, 3, 5, 6, 8, 13, 16, 17, 18, 19, 21, 22, 24, 36, 37, 38, 40,\n # 45, 60, 65, 85, 86, 87, 88, 89, 90, 91]\n\n self._terms: list[Feature] = [AVAILABLE_FEATURES[f] for f in self._used_term_indexes]\n\n for feature in self._terms:\n scaler.register_scale(feature['name'], feature['min_value'], feature['max_value'])\n\n #self._initial_weight = constants.INITIAL_WEIGHT\n #self._weights: TypeNDarray64 = np.asarray([ self._initial_weight ] * len(self._used_term_indexes))\n weight_range = 0.2\n self._weights: TypeNDarray64 = np.random.uniform(-weight_range, weight_range,\n len(self._used_term_indexes)).tolist()\n\n\n def get_features(self, state: TypeValidState, action: TypeAction) -> list[float]:\n\n dealer = state[0]\n player = state[1]\n\n soft_ind = 1 if state[2] else 0\n action_ind = 1 if action else 0\n\n state_action = StateAction(dealer, player, soft_ind, action_ind)\n\n scaled_values: list[float] = []\n\n for term in self._terms:\n feature_name = term['name']\n value_func = term['func']\n\n feature_value = value_func(state_action)\n scaled_value = scaler.scale_value( feature_value ,feature_name)\n\n scaled_values.append(scaled_value)\n\n return scaled_values\n", "repo_name": "mmakipaa/rl", "sub_path": "valuereps/vr_polynomial_blackjack.py", "file_name": "vr_polynomial_blackjack.py", "file_ext": "py", "file_size_in_byte": 2474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "valuereps.vr_approximate.VrApproximateLinear", "line_number": 11, "usage_type": "name"}, {"api_name": "valuereps.polynomial_terms.Feature", "line_number": 31, "usage_type": "name"}, {"api_name": "valuereps.polynomial_terms.AVAILABLE_FEATURES", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.scaler.scaler.register_scale", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.scaler.scaler", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeNDarray64", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeAction", "line_number": 43, "usage_type": "name"}, {"api_name": "valuereps.polynomial_terms.StateAction", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.scaler.scaler.scale_value", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.scaler.scaler", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "40608883391", "text": "import json\nfrom requests import Session\n\nfrom exceptions import NoGitHubTokenException\nfrom config import get_token, get_credentials, APP_NAME\n\n\nclass PullRequest:\n def __init__(self, event=None):\n self.event = event\n self._session = Session()\n try:\n self._session.headers.update({\"Authorization\": f\"token {get_token()}\"})\n except NoGitHubTokenException:\n self._session.auth = get_credentials()\n self._session.headers.update({\"User-Agent\": APP_NAME})\n if event is not None:\n self.issue_url = event['pull_request']['issue_url']\n\n @property\n def labels(self):\n return self.request_labels_json()\n\n def request_labels_json(self):\n response = self._session.get(self.label_url)\n if response.status_code >= 300:\n print(\"Got a non-2xx status: \", response.url, response.headers,\n response.content)\n return response.json()\n\n @property\n def label_url(self):\n return \"{}/labels\".format(self.issue_url)\n\n def compute_and_post_status(self, required_any, required_all, banned):\n return self.post_status(self.create_status_json(required_any, required_all, banned))\n\n def post_status(self, status_json):\n response = self._session.post(self.statuses_url, data=status_json)\n return response.status_code\n\n @property\n def statuses_url(self):\n return self.event['pull_request']['statuses_url']\n\n def create_status_json(self, required_any, required_all, banned):\n passes_label_requirements = self.validate_labels(required_any, required_all, banned)\n if passes_label_requirements:\n description = \"Label requirements satisfied.\"\n else:\n description = \"Label requirements not satisfied.\"\n response_json = {\n \"state\": \"success\" if passes_label_requirements else \"failure\",\n \"target_url\": \"\",\n \"description\": description,\n \"context\": APP_NAME,\n }\n return json.dumps(response_json)\n\n def validate_labels(self, required_any, required_all, banned):\n try:\n labels_json = self.labels\n labels_list = [l['name'] for l in labels_json]\n if required_any is not None and not any(l in required_any for l in labels_list):\n return False\n if required_all is not None and any(l not in labels_list for l in required_all):\n return False\n if banned is not None and any(l in labels_list for l in banned):\n return False\n return True\n except TypeError:\n print('self.labels was of unexpected format for PR event {}: {}'.format(self.issue_url, labels_json))\n return False\n", "repo_name": "dimagi/required-labels", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.Session", "line_number": 11, "usage_type": "call"}, {"api_name": "config.get_token", "line_number": 13, "usage_type": "call"}, {"api_name": "exceptions.NoGitHubTokenException", "line_number": 14, "usage_type": "name"}, {"api_name": "config.get_credentials", "line_number": 15, "usage_type": "call"}, {"api_name": "config.APP_NAME", "line_number": 16, "usage_type": "name"}, {"api_name": "config.APP_NAME", "line_number": 56, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "27729486191", "text": "import sqlalchemy as sa\nfrom gs.core import to_unicode_or_bust\nfrom gs.database import getSession\nfrom Products.XWFMailingListManager.queries import MessageQuery\\\n as MailingListQuery\n\n\ndef topic_sorter_desc(x, y):\n if x['last_post_date'] < y['last_post_date']:\n return 1\n else:\n return -1\n\n\nclass MessageQuery(MailingListQuery):\n \"\"\"Query the message database\"\"\"\n\n# --=mpj17=-- We are moving the post-searching to gs.group.messages.posts\n\n#lint:disable\n TOPIC_SEARCH = 1\n TOPIC_SEARCH_COUNT = 2\n POST_SEARCH = 4\n POST_SEARCH_COUNT = 8\n#lint:enable\n\n def __init__(self, context):\n MailingListQuery.__init__(self, context)\n\n def add_standard_where_clauses(self, statement, table,\n site_id, group_ids, hidden):\n statement.append_whereclause(table.c.site_id == site_id)\n if group_ids:\n inStatement = table.c.group_id.in_(group_ids)\n statement.append_whereclause(inStatement)\n else:\n # --=mpj17=-- No, I am not smoking (much) crack. If the\n # \"group_ids\" are not specified, I want to return nothing in\n # all cases. However, I cannot append \"False\" to the\n # statement, so I append two items that are mutually\n # exclusive.\n statement.append_whereclause(table.c.group_id == '')\n statement.append_whereclause(table.c.group_id != '')\n if not(hidden):\n # We normally want to exclude hidden posts and topics.\n statement.append_whereclause(table.c.hidden == None) # lint:ok\n return statement\n\n def __add_topic_keyword_search_where_clauses(self, statement, tokens):\n tt = self.topicTable\n if tokens.keywords:\n q = ' & '.join(tokens.keywords)\n statement.append_whereclause(tt.c.fts_vectors.match(q))\n assert statement is not None\n return statement\n\n def __add_post_keyword_search_where_clauses(self, statement,\n searchTokens):\n \"\"\"Post searching is easier than topic searching, as there is no\n natural join between the topic and post tables.\"\"\"\n pt = self.postTable\n if searchTokens.keywords:\n q = ' & '.join(searchTokens.keywords)\n statement.append_whereclause(pt.c.fts_vectors.match(q))\n assert statement is not None\n return statement\n\n def __add_author_where_clauses(self, statement, author_ids):\n pt = self.postTable\n author_ids = author_ids and [a for a in author_ids if a] or []\n if author_ids:\n statement.append_whereclause(pt.c.user_id.in_(author_ids))\n return statement\n\n def topic_search_keyword(self, searchTokens, site_id, group_ids=None,\n limit=12, offset=0, use_cache=True, hidden=False):\n \"\"\" Search for the search text in the content and subject-lines of\n topics.\"\"\"\n if group_ids is None:\n group_ids = []\n\n tt = self.topicTable\n tkt = self.topicKeywordsTable\n pt = self.postTable\n\n cols = [tt.c.topic_id, tt.c.last_post_id,\n tt.c.first_post_id, tt.c.group_id, tt.c.site_id, tkt.c.keywords,\n tt.c.original_subject, tt.c.last_post_date, tt.c.num_posts,\n sa.select([pt.c.user_id], tt.c.last_post_id ==\n pt.c.post_id).as_scalar().label('user_id')]\n statement = sa.select(cols, limit=limit, offset=offset,\n order_by=sa.desc(tt.c.last_post_date))\n statement.append_whereclause(tkt.c.topic_id == tt.c.topic_id)\n statement = self.add_standard_where_clauses(statement,\n self.topicTable, site_id, group_ids, False)\n statement = self.__add_topic_keyword_search_where_clauses(statement,\n searchTokens)\n session = getSession()\n r = session.execute(statement)\n retval = []\n for x in r:\n retval.append({'topic_id': x['topic_id'],\n 'last_post_id': x['last_post_id'],\n 'first_post_id': x['first_post_id'],\n 'group_id': x['group_id'],\n 'site_id': x['site_id'],\n 'subject': x['original_subject'],\n 'keywords': [to_unicode_or_bust(k)\n for k in x['keywords']],\n 'last_post_date': x['last_post_date'],\n 'last_post_user_id': x['user_id'],\n 'num_posts': x['num_posts']})\n\n return retval\n\n def post_search_keyword(self, searchTokens, site_id, group_ids=None,\n author_ids=None, limit=12, offset=0):\n if group_ids is None:\n group_ids = []\n if author_ids is None:\n author_ids = []\n pt = self.postTable\n cols = [pt.c.post_id, pt.c.user_id, pt.c.group_id, pt.c.subject,\n pt.c.date, pt.c.body, pt.c.has_attachments]\n statement = sa.select(cols, limit=limit, offset=offset,\n order_by=sa.desc(pt.c.date))\n self.add_standard_where_clauses(statement, pt, site_id, group_ids,\n False)\n statement = self.__add_author_where_clauses(statement, author_ids)\n statement = self.__add_post_keyword_search_where_clauses(statement,\n searchTokens)\n\n session = getSession()\n r = session.execute(statement)\n retval = []\n for x in r:\n p = {\n 'post_id': x['post_id'],\n 'user_id': x['user_id'],\n 'group_id': x['group_id'],\n 'subject': x['subject'],\n 'date': x['date'],\n 'body': x['body'],\n 'files_metadata': x['has_attachments']\n and self.files_metadata(x['post_id'])\n or [],\n }\n retval.append(p)\n return retval\n\n def post_ids_from_file_ids(self, fileIds, hidden=False):\n p = self.postTable\n f = self.fileTable\n statement = f.select()\n inStatement = f.c.file_id.in_(fileIds)\n statement.append_whereclause(inStatement)\n statement.append_whereclause(p.c.post_id == f.c.post_id)\n if not hidden:\n statement.append_whereclause(p.c.hidden == None) # lint:ok\n\n session = getSession()\n r = session.execute(statement)\n retval = {}\n if r.rowcount:\n for x in r:\n retval[x['file_id']] = x['post_id']\n return retval\n\n def files_metadata_topic(self, topic_ids):\n ft = self.fileTable\n pt = self.postTable\n cols = [\n pt.c.site_id, pt.c.group_id, pt.c.topic_id, pt.c.user_id,\n ft.c.post_id, ft.c.file_id, ft.c.mime_type, ft.c.file_name,\n ft.c.file_size, ft.c.date]\n statement = sa.select(cols, ft.c.topic_id.in_(topic_ids),\n order_by=self.fileTable.c.date)\n statement.append_whereclause(ft.c.post_id == pt.c.post_id)\n\n session = getSession()\n r = session.execute(statement)\n\n retval = [{\n 'site_id': x['site_id'],\n 'group_id': x['group_id'],\n 'topic_id': x['topic_id'],\n 'user_id': x['user_id'],\n 'post_id': x['post_id'],\n 'file_id': x['file_id'],\n 'file_size': x['file_size'],\n 'mime_type': x['mime_type'],\n 'file_name': x['file_name'],\n 'date': x['date'],\n } for x in r]\n return retval\n", "repo_name": "groupserver/Products.GSSearch", "sub_path": "Products/GSSearch/querymessage.py", "file_name": "querymessage.py", "file_ext": "py", "file_size_in_byte": 7787, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "Products.XWFMailingListManager.queries.MessageQuery", "line_number": 15, "usage_type": "name"}, {"api_name": "Products.XWFMailingListManager.queries.MessageQuery.__init__", "line_number": 28, "usage_type": "call"}, {"api_name": "Products.XWFMailingListManager.queries.MessageQuery", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 89, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 91, "usage_type": "call"}, {"api_name": "sqlalchemy.desc", "line_number": 92, "usage_type": "call"}, {"api_name": "gs.database.getSession", "line_number": 98, "usage_type": "call"}, {"api_name": "gs.core.to_unicode_or_bust", "line_number": 108, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 125, "usage_type": "call"}, {"api_name": "sqlalchemy.desc", "line_number": 126, "usage_type": "call"}, {"api_name": "gs.database.getSession", "line_number": 133, "usage_type": "call"}, {"api_name": "gs.database.getSession", "line_number": 161, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 176, "usage_type": "call"}, {"api_name": "gs.database.getSession", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "18589468581", "text": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose, assert_equal\n\nfrom astropy.timeseries.periodograms.lombscargle.implementations.utils import (\n bitceil,\n extirpolate,\n trig_sum,\n)\n\n\n@pytest.mark.parametrize(\"N\", 2 ** np.arange(1, 12))\n@pytest.mark.parametrize(\"offset\", [-1, 0, 1])\ndef test_bitceil(N, offset):\n assert_equal(bitceil(N + offset), int(2 ** np.ceil(np.log2(N + offset))))\n\n\n@pytest.fixture\ndef extirpolate_data():\n rng = np.random.default_rng(0)\n x = 100 * rng.random(50)\n y = np.sin(x)\n f = lambda x: np.sin(x / 10)\n return x, y, f\n\n\n@pytest.mark.parametrize(\"N\", [100, None])\n@pytest.mark.parametrize(\"M\", [5])\ndef test_extirpolate(N, M, extirpolate_data):\n x, y, f = extirpolate_data\n y_hat = extirpolate(x, y, N, M)\n x_hat = np.arange(len(y_hat))\n assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.5e-5)\n\n\n@pytest.fixture\ndef extirpolate_int_data():\n rng = np.random.default_rng(0)\n x = 100 * rng.random(50)\n x[:25] = x[:25].astype(int)\n y = np.sin(x)\n f = lambda x: np.sin(x / 10)\n return x, y, f\n\n\n@pytest.mark.parametrize(\"N\", [100, None])\n@pytest.mark.parametrize(\"M\", [5])\ndef test_extirpolate_with_integers(N, M, extirpolate_int_data):\n x, y, f = extirpolate_int_data\n y_hat = extirpolate(x, y, N, M)\n x_hat = np.arange(len(y_hat))\n assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.7e-5)\n\n\n@pytest.fixture\ndef trig_sum_data():\n rng = np.random.default_rng(0)\n t = 10 * rng.random(50)\n h = np.sin(t)\n return t, h\n\n\n@pytest.mark.parametrize(\"f0\", [0, 1])\n@pytest.mark.parametrize(\"adjust_t\", [True, False])\n@pytest.mark.parametrize(\"freq_factor\", [1, 2])\n@pytest.mark.parametrize(\"df\", [0.1])\ndef test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data):\n t, h = trig_sum_data\n\n tfit = t - t.min() if adjust_t else t\n S1, C1 = trig_sum(\n tfit,\n h,\n df,\n N=1000,\n use_fft=True,\n f0=f0,\n freq_factor=freq_factor,\n oversampling=10,\n )\n S2, C2 = trig_sum(\n tfit,\n h,\n df,\n N=1000,\n use_fft=False,\n f0=f0,\n freq_factor=freq_factor,\n oversampling=10,\n )\n assert_allclose(S1, S2, atol=1e-2)\n assert_allclose(C1, C2, atol=1e-2)\n", "repo_name": "astropy/astropy", "sub_path": "astropy/timeseries/periodograms/lombscargle/implementations/tests/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 2320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4015, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.testing.assert_equal", "line_number": 15, "usage_type": "call"}, {"api_name": "astropy.timeseries.periodograms.lombscargle.implementations.utils.bitceil", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 15, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "attribute"}, {"api_name": "astropy.timeseries.periodograms.lombscargle.implementations.utils.extirpolate", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 42, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 36, "usage_type": "attribute"}, {"api_name": "astropy.timeseries.periodograms.lombscargle.implementations.utils.extirpolate", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 46, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 47, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 59, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 55, "usage_type": "attribute"}, {"api_name": "astropy.timeseries.periodograms.lombscargle.implementations.utils.trig_sum", "line_number": 71, "usage_type": "call"}, {"api_name": "astropy.timeseries.periodograms.lombscargle.implementations.utils.trig_sum", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 92, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 63, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 64, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 65, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 66, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "22414274845", "text": "import pandas as pd\nfrom sklearn.metrics import mean_absolute_error\n\nfrom e2eml.regression import regression_blueprints as rb\n\n# track memory consumption in terminal: dmesg\n\n\ndef load_housingprices_data():\n \"\"\"\n Load & preprocess Housing prices dataset. T\n :return: Several dataframes and series to be processed by blueprint.\n \"\"\"\n data = pd.read_csv(\"housingprices_train.csv\")\n\n def new_features(X):\n X[\"HasWoodDeck\"] = (X[\"WoodDeckSF\"] == 0) * 1\n X[\"HasOpenPorch\"] = (X[\"OpenPorchSF\"] == 0) * 1\n X[\"HasEnclosedPorch\"] = (X[\"EnclosedPorch\"] == 0) * 1\n X[\"Has3SsnPorch\"] = (X[\"3SsnPorch\"] == 0) * 1\n X[\"HasScreenPorch\"] = (X[\"ScreenPorch\"] == 0) * 1\n X[\"Total_Home_Quality\"] = X[\"OverallQual\"] + X[\"OverallCond\"]\n X[\"TotalSF\"] = X[\"TotalBsmtSF\"] + X[\"1stFlrSF\"] + X[\"2ndFlrSF\"]\n X[\"TotalSquareFootage\"] = (\n X[\"BsmtFinSF1\"] + X[\"BsmtFinSF2\"] + X[\"1stFlrSF\"] + X[\"2ndFlrSF\"]\n )\n X[\"HasPool\"] = X[\"PoolArea\"].apply(lambda x: 1 if x > 0 else 0)\n X[\"Has2ndFloor\"] = X[\"2ndFlrSF\"].apply(lambda x: 1 if x > 0 else 0)\n X[\"HasGarage\"] = X[\"GarageArea\"].apply(lambda x: 1 if x > 0 else 0)\n X[\"HasBsmt\"] = X[\"TotalBsmtSF\"].apply(lambda x: 1 if x > 0 else 0)\n X[\"HasFireplace\"] = X[\"Fireplaces\"].apply(lambda x: 1 if x > 0 else 0)\n return X\n\n data = new_features(data)\n print(\"Do dataframe splits.\")\n test_df = data.head(1000).copy()\n val_df = data.tail(460).copy()\n val_df_target = val_df[\"SalePrice\"].copy()\n del val_df[\"SalePrice\"]\n test_target = \"SalePrice\"\n test_categorical_cols = [\n \"MSZoning\",\n \"Street\",\n \"Alley\",\n \"LotShape\",\n \"LotFrontage\",\n \"Street\",\n \"LandContour\",\n \"Utilities\",\n \"LotConfig\",\n \"LandSlope\",\n \"Neighborhood\",\n \"Condition1\",\n \"Condition2\",\n \"BldgType\",\n \"HouseStyle\",\n \"RoofStyle\",\n \"RoofMatl\",\n \"Exterior1st\",\n \"Exterior2nd\",\n \"ExterQual\",\n \"ExterCond\",\n \"Foundation\",\n \"BsmtQual\",\n \"BsmtCond\",\n \"BsmtExposure\",\n \"BsmtFinType1\",\n \"BsmtFinType2\",\n \"Heating\",\n \"HeatingQC\",\n \"Electrical\",\n \"KitchenQual\",\n \"Functional\",\n \"FireplaceQU\",\n \"GarageType\",\n \"GarageYrBlt\",\n \"GarageFinish\",\n \"GarageQual\",\n \"GarageCond\",\n \"PavedDrive\",\n \"PoolQC\",\n \"Fence\",\n \"MiscFeature\",\n \"SaleType\",\n \"SaleCondition\",\n ]\n return test_df, test_target, val_df, val_df_target, test_categorical_cols\n\n\ndef test_ml_special_regression_multiclass_full_processing_multimodel_avg_blender():\n (\n test_df,\n test_target,\n val_df,\n val_df_target,\n test_categorical_cols,\n ) = load_housingprices_data()\n titanic_auto_ml = rb.RegressionBluePrint(\n datasource=test_df,\n target_variable=test_target,\n categorical_columns=test_categorical_cols,\n preferred_training_mode=\"auto\",\n tune_mode=\"accurate\",\n ml_task=\"regression\",\n )\n titanic_auto_ml.hyperparameter_tuning_rounds = {\n \"xgboost\": 10,\n \"lgbm\": 500,\n \"tabnet\": 3,\n \"ngboost\": 10,\n \"sklearn_ensemble\": 3,\n \"catboost\": 10,\n \"ridge\": 3,\n \"bruteforce_random\": 10,\n \"elasticnet\": 10,\n \"autoencoder_based_oversampling\": 20,\n \"final_kernel_pca_dimensionality_reduction\": 100,\n \"final_pca_dimensionality_reduction\": 20,\n \"synthetic_data_augmentation\": 100,\n }\n\n titanic_auto_ml.special_blueprint_algorithms = {\n \"ridge\": False,\n \"xgboost\": False,\n \"ngboost\": False,\n \"lgbm\": True,\n \"tabnet\": False,\n \"vowpal_wabbit\": False,\n \"sklearn_ensemble\": False,\n \"catboost\": False,\n \"elasticnet\": False,\n }\n titanic_auto_ml.blueprint_step_selection_non_nlp[\n \"final_pca_dimensionality_reduction\"\n ] = False\n titanic_auto_ml.blueprint_step_selection_non_nlp[\n \"autoencoder_based_oversampling\"\n ] = False\n titanic_auto_ml.blueprint_step_selection_non_nlp[\"scaling\"] = False\n titanic_auto_ml.hyperparameter_tuning_sample_size = 800\n\n titanic_auto_ml.ml_special_regression_full_processing_multimodel_avg_blender()\n titanic_auto_ml.ml_special_regression_full_processing_multimodel_avg_blender(val_df)\n val_y_hat = titanic_auto_ml.predicted_values[\"blended_preds\"]\n mae = mean_absolute_error(val_df_target, val_y_hat)\n finished = True\n assert finished is True\n assert mae >= 0\n\n\ndef test_ml_bp10_train_test_regression_full_processing_linear_reg():\n (\n test_df,\n test_target,\n val_df,\n val_df_target,\n test_categorical_cols,\n ) = load_housingprices_data()\n titanic_auto_ml = rb.RegressionBluePrint(\n datasource=test_df,\n target_variable=test_target,\n categorical_columns=test_categorical_cols,\n preferred_training_mode=\"auto\",\n tune_mode=\"accurate\",\n )\n titanic_auto_ml.ml_bp10_train_test_regression_full_processing_linear_reg()\n titanic_auto_ml.ml_bp10_train_test_regression_full_processing_linear_reg(val_df)\n val_y_hat = titanic_auto_ml.predicted_values[\"linear_regression\"]\n mae = mean_absolute_error(val_df_target, val_y_hat)\n finished = True\n assert finished is True\n assert mae >= 0\n\n\nif __name__ == \"__main__\":\n test_ml_special_regression_multiclass_full_processing_multimodel_avg_blender()\n", "repo_name": "ThomasMeissnerDS/e2e_ml", "sub_path": "e2eml/test/regression_blueprints_test.py", "file_name": "regression_blueprints_test.py", "file_ext": "py", "file_size_in_byte": 5594, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "e2eml.regression.regression_blueprints.RegressionBluePrint", "line_number": 98, "usage_type": "call"}, {"api_name": "e2eml.regression.regression_blueprints", "line_number": 98, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 145, "usage_type": "call"}, {"api_name": "e2eml.regression.regression_blueprints.RegressionBluePrint", "line_number": 159, "usage_type": "call"}, {"api_name": "e2eml.regression.regression_blueprints", "line_number": 159, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "74662569012", "text": "import dash\nimport dash_core_components as dcc\nimport plotly.graph_objs as go\nimport dash_html_components as html\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\n\n\ndef q_ke_tgl(quarter,tahun):\n q = int(quarter)\n t = int(tahun)\n bulan = [None,3,6,9,12] #1: maret, 2: juni ; 3:sept; 4: des\n int_tgl = [None,31,30,30,31] #kalau nggak, jadi tanggal 1\n str_tgl = str(t) + \"-\" + str(bulan[q]) +\"-\" + str(int_tgl[q])\n date_time_obj = datetime.strptime(str_tgl, '%Y-%m-%d')\n return date_time_obj\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n#external_stylesheets = []\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n\n\ndf_qoq = pd.read_csv(\"https://gist.githubusercontent.com/yudiwbs/ed50c1de101f2d0ebaf118540d6c2656/raw/0039e7093fc0ce3356af7ca5e6991d6f8a05d6e3/daya_beli_qoq.csv\")\ndf_pred_qoq = df_qoq.tail(1)\ntgl = pd.to_datetime('30-06-2019', format='%d-%m-%Y')\ndf_pred_qoq = df_pred_qoq.append({'quarter-tahun':tgl, 'daya_beli' : -2}, ignore_index=True)\n\n\n\ndf_yoy = pd.read_csv(\"https://gist.githubusercontent.com/yudiwbs/a447d01c52b359502147af679ca5f6f6/raw/c5d7677122594a5d1b10ff5f56a855a5d2d859b8/gistfile1.txt\")\ndf_yoy[\"quarter-tahun\"] = df_yoy.apply(lambda row: q_ke_tgl(row['Quarter'],row['Tahun']), axis=1)\ndf_pred_yoy = df_yoy.tail(1) #nanti diganti!!\ntgl = pd.to_datetime('30-06-2019', format='%d-%m-%Y')\ndf_pred_yoy = df_pred_yoy.append({'quarter-tahun':tgl,'Daya Beli Nasional' : -1}, ignore_index=True)\n\n\n\ndf_motor_yoy = pd.read_csv(\"https://gist.githubusercontent.com/yudiwbs/475dddbf88b9aa70ea5283e990b83cf7/raw/056a5152f06098e2c4d82cba83188946896e02f4/motor_yoy_qt\")\n\ndf_telur_yoy = pd.read_csv(\"https://gist.githubusercontent.com/yudiwbs/f0238a2bfde15d95e8c15920d428c6f6/raw/fd256199b46ae00ae9a6475129283d4c05086faa/telur_yoy.csv\")\ndf_telur_yoy[\"quarter-tahun\"] = df_motor_yoy.apply(lambda row: q_ke_tgl(row['quarter'],row['tahun']), axis=1)\n\ndf_ihk_gdp = pd.read_csv(\"https://gist.githubusercontent.com/yudiwbs/1dfe3ce1c60d95c7c95d518810aff2f8/raw/ea9e85e49e9326d83125e41da8488ee4671d206e/ihk_gdp.csv\")\ndf_ihk_gdp[\"quarter-tahun\"] = df_ihk_gdp.apply(lambda row: q_ke_tgl(row['quarter'],row['tahun']), axis=1)\n\ncolors = {\n# 'background': '#111111',\n 'text': '#4287f5',\n 'text_pred':'#000000'\n}\n#style={'backgroundColor': colors['background']}\nx2 = np.arange(10)\napp.layout = html.Div( children=[\n html.H1(children='Sistem Peringatan Dini Daya Beli',style={'textAlign': 'center','color': colors['text']}),\n html.Div([\n dcc.Tabs(id=\"tabs\", children=[\n dcc.Tab(label='Ringkasan', children=[\n html.Div([\n html.Div([\n dcc.Graph(\n id='dbeli-qoq-graph',\n figure={\n 'data': [\n go.Scatter(\n x= df_qoq[\"quarter-tahun\"],\n y= df_qoq[\"daya_beli\"],\n mode='lines+markers',\n name='Daya Beli QoQ'\n ),\n go.Scatter(\n x=df_pred_qoq[\"quarter-tahun\"],\n y=df_pred_qoq[\"daya_beli\"],\n mode='lines+markers',\n name='Daya Beli QoQ Prediksi'\n )\n ],\n 'layout': {\n 'title': 'Daya Beli QoQ',\n 'font': {\n 'color': colors['text']\n }\n }\n }\n ),\n html.H3(children='Prediksi (Q2 2019): -2.00',style={'textAlign': 'Center','color': colors['text_pred']}),\n html.P(\"Penjelasan: Prediksi diperoleh berdasarkan peningkatan penjualan motor (0.4) dan daging (0.5) dan penurunan harga bawang (0.2)\", style={'padding': '5px 10px 5px 10px'})\n ],style={'width': '45%', 'vertical-align':'top', 'display': 'inline-block', 'margin': '0px 0px 5px 0px', 'border': '1px solid #000'}),\n\n html.Div([\n dcc.Graph(\n id='dbeli-yoy-graph',\n figure={\n 'data': [\n go.Scatter(\n x= df_yoy[\"quarter-tahun\"],\n y= df_yoy[\"Daya Beli Nasional\"],\n mode='lines+markers',\n name='Daya Beli YoY'\n ),\n go.Scatter(\n x=df_pred_yoy[\"quarter-tahun\"],\n y=df_pred_yoy[\"Daya Beli Nasional\"],\n mode='lines+markers',\n name='Daya Beli YoY Prediksi'\n )\n ],\n 'layout': {\n 'title': 'Daya Beli YoY',\n 'font': {\n 'color': colors['text']\n }\n }\n }\n ),\n html.H3(children='Prediksi (Q2 2019): -1.00',style={'textAlign': 'Center','color': colors['text_pred']}),\n html.P(\"Penjelasan: Prediksi diperoleh berdasarkan peningkatan harga telur (0.4) dan daging (0.5) dan penurunan harga bawang (0.2)\", style={'padding': '5px 10px 5px 10px'})\n ],style={'width': '45%', 'vertical-align':'top', 'display': 'inline-block', 'margin': '0px 5px 5px 5px', 'border': '1px solid #000'})\n ])\n ]),\n dcc.Tab(label='Rincian', children=[\n #==========================================================================================\n html.Div([\n html.Div([\n dcc.Dropdown(\n options=[\n {'label': 'QoQ', 'value': 'NYC'},\n {'label': u'YoY', 'value': 'MTL'},\n ],\n value='MTL',style={'width': '50%'}\n ),\n dcc.Graph(\n id='ihk-gdp-yoy-graph',\n figure={\n 'data': [\n go.Scatter(\n x= df_ihk_gdp[\"quarter-tahun\"],\n y=df_ihk_gdp[\"pct_total_yoy\"],\n mode='lines+markers',\n name='Penghasilan YoY'\n ),\n go.Scatter(\n x= df_ihk_gdp[\"quarter-tahun\"],\n y=df_ihk_gdp[\"ihk_yoy\"],\n mode='lines+markers',\n name='IHK YoY'\n )\n ],\n 'layout': {\n 'title': 'Penghasilan dan IHK YoY',\n 'font': {\n 'color': colors['text']\n }\n }\n }\n ),\n ],style={'width': '90%', 'vertical-align':'top', 'display': 'inline-block', 'margin': '0px 0px 5px 0px', 'border': '1px solid #000'}),\n html.Div([\n dcc.Graph(\n id='motor-yoy-graph',\n figure={\n 'data': [\n go.Scatter(\n x= df_motor_yoy[\"quarter-tahun\"],\n y= df_motor_yoy[\"pct_yoy\"],\n mode='lines+markers',\n name='Daya Beli QoQ'\n )\n ],\n 'layout': {\n 'title': 'Motor YoY',\n 'font': {\n 'color': colors['text']\n }\n }\n }\n ),\n ],style={'width': '45%', 'vertical-align':'top', 'display': 'inline-block', 'margin': '0px 0px 5px 0px', 'border': '1px solid #000'}),\n html.Div([\n dcc.Graph(\n id='telur-yoy-graph',\n figure={\n 'data': [\n go.Scatter(\n x= df_telur_yoy[\"quarter-tahun\"],\n y= df_telur_yoy[\"pct_telur_yoy\"],\n mode='lines+markers',\n name='Harga Telur YoY'\n )\n ],\n 'layout': {\n 'title': 'Telur YoY',\n 'font': {\n 'color': colors['text']\n }\n }\n }\n ),\n ],style={'width': '45%', 'vertical-align':'top', 'display': 'inline-block', 'margin': '0px 5px 5px 5px', 'border': '1px solid #000'})\n ])\n ])\n ],style={'width': '60%','margin': '50px 0px 0px 0px'})\n ])\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)", "repo_name": "yudiwbs/coba_dash", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 10466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "dash.Dash", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 56, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 57, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 58, "usage_type": "call"}, {"api_name": "dash_core_components.Tabs", "line_number": 59, "usage_type": "call"}, {"api_name": "dash_core_components.Tab", "line_number": 60, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 61, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 62, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 63, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 67, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 73, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 73, "usage_type": "name"}, {"api_name": "dash_html_components.H3", "line_number": 88, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 89, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 92, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 93, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 97, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 97, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 103, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 103, "usage_type": "name"}, {"api_name": "dash_html_components.H3", "line_number": 118, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 119, "usage_type": "call"}, {"api_name": "dash_core_components.Tab", "line_number": 123, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 125, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 126, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 127, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 134, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 138, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 138, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 144, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 144, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 160, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 161, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 165, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 165, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 181, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 182, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 186, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 186, "usage_type": "name"}]} +{"seq_id": "32081881891", "text": "# Imports\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\nfrom loguru import logger\nfrom random import choices\n\nimport privacy\nfrom simulate_row import simulate_row\n\n# Set root and data directories\nROOT_DIRECTORY = Path(__file__).absolute().parent.parent.parent\nDATA_DIRECTORY = ROOT_DIRECTORY / \"data\"\nground_truth_file = DATA_DIRECTORY / \"ground_truth_s2.csv\"\noutput_file = ROOT_DIRECTORY / \"submission2.csv\"\n\n# Select the columns\nheader = ['PUMA', 'YEAR', 'HHWT', 'GQ', 'PERWT', 'SEX', 'AGE', 'MARST', 'RACE',\n 'HISPAN', 'CITIZEN', 'SPEAKENG', 'HCOVANY', 'HCOVPRIV', 'HINSEMP',\n 'HINSCAID', 'HINSCARE', 'EDUC', 'EMPSTAT', 'EMPSTATD', 'LABFORCE',\n 'WRKLSTWK', 'ABSENT', 'LOOKING', 'AVAILBLE', 'WRKRECAL', 'WORKEDYR',\n 'INCTOT', 'INCWAGE', 'INCWELFR', 'INCINVST', 'INCEARN', 'POVERTY',\n 'DEPARTS', 'ARRIVES', 'sim_individual_id'\n ]\n\nnumber_histos = 7 # Create 7 histograms\npopulation_queries = 1 # Use 1 population query\nepsilons = [0.1, 1.0, 10.0] # Use epsilon values of .1, 1 and 10\nmax_records = 1350000 # Maximum number of records\nmrpi = 7 # Maximum records per individual\nsample = 0 # Do not use sampling\n\n# Define the combined columns for the 5 combined histograms\n# Note: There are two individual histograms HHWT and DEPARTS\n# 7 histograms total\ncombo_dict = {'DEMO': ['GQ_c', 'SEX_c', 'MARST_c', 'RACE_c', 'HISPAN_c',\n 'CITIZEN_c', 'SPEAKENG_c'],\n 'AGEEDUC': ['AGE_c', 'EDUC_c'],\n 'HEALTH': ['HCOVANY_c', 'HCOVPRIV_c', 'HINSEMP_c', 'HINSCAID_c',\n 'HINSCARE_c'],\n 'WORK': ['EMPSTAT_c', 'EMPSTATD_c', 'LABFORCE_c', 'WRKLSTWK_c',\n 'ABSENT_c', 'LOOKING_c', 'AVAILBLE_c', 'WRKRECAL_c',\n 'WORKEDYR_c'],\n 'INCOME': ['INCTOT_n', 'INCWAGE_n', 'INCINVST_n', 'POVERTY_n']\n }\n\n# Custom numeric ranges for DEPART\ndepart_list = [0, 300, 330, 400, 430, 500, 530, 600, 630, 700, 730, 800, 830, 900,\n 930, 1000, 1030, 1100, 1130, 1200, 1230, 1300, 1330, 1400, 1430, 1500,\n 1530, 1600, 1630, 1700, 1730, 1800, 1830, 1900, 1930, 2000, 2030, 2100,\n 2130, 2200, 2230, 2300, 2330]\n\n# Define the number dictionary for each numeric combined column\nnum_dict = {'INCTOT_n': [240000, 10000, 300000],\n 'INCWAGE_n': [240000, 10000, 300000],\n 'INCINVST_n': [150000, 10000, 250000],\n 'POVERTY_n': [500, 100, 600]\n }\n\n\n# Creates the weights for the bins adding noise to the bin counts\n# using the Laplace mechanism (for the individual histograms)\n# df - the input dataframe\n# c - the column\n# b - the bins (population)\n# m - max_records_per_individual\n# h - number of histograms\n# e - epsilon\n# return the weights for the population\ndef weight(df, c, b, m, h, e):\n\n new_df = df[c].groupby(pd.cut(df[c], b)).count()\n\n for i in range(len(new_df)):\n new_df[i] = privacy.laplaceMechanism(new_df[i], (m * h) + 1, e)\n\n final_df = new_df / new_df.sum()\n w = []\n for i in range(len(final_df.index)):\n w.append(final_df[i])\n return w\n\n\ndef main():\n\n # Preprocessing - load formatted ground truth, check for proper formatting\n # and check the number of bins\n logger.info(\"begin pre-processing\")\n ground_truth = pd.read_csv(ground_truth_file)\n valid = privacy.check_input(ground_truth, combo_dict, num_dict)\n if valid != 1:\n return\n df, num_decodes, col_decodes = privacy.preprocess(ground_truth, combo_dict, num_dict)\n privacy.histo_test(df, combo_dict)\n logger.info(\"end pre-processing\")\n\n # Initialize variables\n pumas = df['PUMA_x'].unique().tolist()\n years = df['YEAR_x'].unique().tolist()\n num_pumas = len(pumas)\n num_years = len(years)\n num_records = len(df.index)\n rows_per_py = int((max_records - num_records)/(num_pumas * num_years))\n sensitivity = (number_histos * mrpi) + population_queries\n\n # Create dataframe for final results and initialize final list\n final_df = pd.DataFrame(columns=header)\n final_list = []\n\n # The main loop\n # For each epsilon cycle through the PUMA-years\n # For epsilons <= 1.0 use all the data\n # For epsilons > 1.0 use PUMA data\n for epsilon in epsilons:\n if epsilon <= 1.0:\n logger.info(f\"begin histogram creation {epsilon}\")\n\n # Create the individual histogram for HHWT\n hhwt_bins = np.r_[np.arange(0, 500, 20), np.inf]\n hhwt_pop = [i * 20 for i in range(len(hhwt_bins))]\n hhwt_pop.pop()\n hhwt_w = weight(df, 'HHWT_x', hhwt_bins, mrpi, number_histos, epsilon)\n\n # Create the 5 combined histograms\n demo_pop, demo_w = privacy.create_private_histo(df, 'DEMO', sample, mrpi, sensitivity, epsilon)\n ageeduc_pop, ageeduc_w = privacy.create_private_histo(df, 'AGEEDUC', sample, mrpi, sensitivity, epsilon)\n health_pop, health_w = privacy.create_private_histo(df, 'HEALTH', sample, mrpi, sensitivity, epsilon)\n work_pop, work_w = privacy.create_private_histo(df, 'WORK', sample, mrpi, sensitivity, epsilon)\n income_pop, income_w = privacy.create_private_histo(df, 'INCOME', sample, mrpi, sensitivity, epsilon)\n\n # Create the individual histogram for DEPARTS (custom numeric ranges)\n departs_bins = [-np.inf, 0, 300, 330, 400, 430, 500, 530, 600, 630, 700, 730, 800, 830, 900,\n 930, 1000, 1030, 1100, 1130, 1200, 1230, 1300, 1330, 1400, 1430, 1500,\n 1530, 1600, 1630, 1700, 1730, 1800, 1830, 1900, 1930, 2000, 2030, 2100,\n 2130, 2200, 2230, np.inf]\n departs_pop = [0, 300, 330, 400, 430, 500, 530, 600, 630, 700, 730, 800, 830, 900,\n 930, 1000, 1030, 1100, 1130, 1200, 1230, 1300, 1330, 1400, 1430, 1500,\n 1530, 1600, 1630, 1700, 1730, 1800, 1830, 1900, 1930, 2000, 2030, 2100,\n 2130, 2200, 2230, 2300]\n departs_w = weight(df, 'DEPARTS_x', departs_bins, mrpi, number_histos, epsilon)\n\n logger.info(f\"end histogram creation {epsilon}\")\n\n i = 1\n for puma in pumas:\n logger.info(f\"processing {epsilon} {puma}\")\n for year in years:\n if epsilon > 1.0:\n puma_data = df[(df['PUMA_x'] == puma)]\n\n # Create the individual histogram for HHWT\n hhwt_bins = np.r_[np.arange(0, 500, 20), np.inf]\n hhwt_pop = [i * 20 for i in range(len(hhwt_bins))]\n hhwt_pop.pop()\n hhwt_w = weight(puma_data, 'HHWT_x', hhwt_bins, mrpi, number_histos, epsilon)\n\n # hhwt_pop, hhwt_w = utilities.create_private_histo(puma_data, 'HHWT', sample, mrpi, sensitivity, epsilon)\n demo_pop, demo_w = privacy.create_private_histo(puma_data, 'DEMO', sample, mrpi, sensitivity, epsilon)\n ageeduc_pop, ageeduc_w = privacy.create_private_histo(puma_data, 'AGEEDUC', sample, mrpi, sensitivity, epsilon)\n health_pop, health_w = privacy.create_private_histo(puma_data, 'HEALTH', sample, mrpi, sensitivity, epsilon)\n work_pop, work_w = privacy.create_private_histo(puma_data, 'WORK', sample, mrpi, sensitivity, epsilon)\n income_pop, income_w = privacy.create_private_histo(puma_data, 'INCOME', sample, mrpi, sensitivity, epsilon)\n\n # Create the individual histogram for DEPARTS (custom numeric ranges)\n departs_bins = [-np.inf, 0, 300, 330, 400, 430, 500, 530, 600, 630, 700, 730, 800, 830, 900,\n 930, 1000, 1030, 1100, 1130, 1200, 1230, 1300, 1330, 1400, 1430, 1500,\n 1530, 1600, 1630, 1700, 1730, 1800, 1830, 1900, 1930, 2000, 2030, 2100,\n 2130, 2200, 2230, np.inf]\n departs_pop = [0, 300, 330, 400, 430, 500, 530, 600, 630, 700, 730, 800, 830, 900,\n 930, 1000, 1030, 1100, 1130, 1200, 1230, 1300, 1330, 1400, 1430, 1500,\n 1530, 1600, 1630, 1700, 1730, 1800, 1830, 1900, 1930, 2000, 2030, 2100,\n 2130, 2200, 2230, 2300]\n departs_w = weight(puma_data, 'DEPARTS_x', departs_bins, mrpi, number_histos, epsilon)\n\n # Create simulated individuals for each PUMA-year\n puma_year = df[(df['PUMA_x'] == puma) &\n (df['YEAR_x'] == year)]\n sim_count = len(puma_year.index)\n sim_count_noise = int(privacy.laplaceMechanism(sim_count, sensitivity, epsilon))\n\n # Check for bias\n if ((sim_count_noise - sim_count) > rows_per_py):\n sim_count_noise = sim_count + rows_per_py - 1\n elif (((sim_count_noise - sim_count) < -rows_per_py)):\n sim_count_noise = sim_count - rows_per_py + 1\n\n # For each PUMA-year create simulated individuals\n for j in range(sim_count_noise):\n hhwt_value = choices(hhwt_pop, hhwt_w, k=1)\n demo_value = choices(demo_pop, demo_w, k=1)\n ageeduc_value = choices(ageeduc_pop, ageeduc_w, k=1)\n health_value = choices(health_pop, health_w, k=1)\n work_value = choices(work_pop, work_w, k=1)\n income_value = choices(income_pop, income_w, k=1)\n departs_value = choices(departs_pop, departs_w, k=1)\n row = simulate_row(epsilon,\n puma,\n year,\n hhwt_value[0],\n demo_value[0],\n ageeduc_value[0],\n health_value[0],\n work_value[0],\n income_value[0],\n departs_value[0],\n num_dict,\n num_decodes,\n col_decodes\n )\n row['sim_individual_id'] = i\n i = i + 1\n final_list.append(row)\n\n # Write the final results to the output file\n logger.info('writing data to output file')\n final_df = pd.DataFrame.from_dict(final_list)\n final_df.to_csv(output_file, index=False)\n\n\nmain()\n", "repo_name": "JimKing100/PrivacyHistos", "sub_path": "examples/sprint2/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 10857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 72, "usage_type": "call"}, {"api_name": "privacy.laplaceMechanism", "line_number": 75, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 88, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 88, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 89, "usage_type": "call"}, {"api_name": "privacy.check_input", "line_number": 90, "usage_type": "call"}, {"api_name": "privacy.preprocess", "line_number": 93, "usage_type": "call"}, {"api_name": "privacy.histo_test", "line_number": 94, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 95, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 95, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 116, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.r_", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 119, "usage_type": "attribute"}, {"api_name": "privacy.create_private_histo", "line_number": 125, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 126, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 127, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 128, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 135, "usage_type": "attribute"}, {"api_name": "loguru.logger.info", "line_number": 142, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 142, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 146, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 146, "usage_type": "name"}, {"api_name": "numpy.r_", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 152, "usage_type": "attribute"}, {"api_name": "privacy.create_private_histo", "line_number": 158, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 159, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 160, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 161, "usage_type": "call"}, {"api_name": "privacy.create_private_histo", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 168, "usage_type": "attribute"}, {"api_name": "privacy.laplaceMechanism", "line_number": 179, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 189, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 190, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 191, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 192, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 193, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 194, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 195, "usage_type": "call"}, {"api_name": "simulate_row.simulate_row", "line_number": 196, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 215, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 215, "usage_type": "name"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 216, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 216, "usage_type": "attribute"}]} +{"seq_id": "41261213777", "text": "#!/usr/bin/python3\n\nimport sys # for stdout\nimport functools # for wraps\nimport os # for remove\n\noutfile = '/tmp/out.txt'\n\n\ndef with_output_to_outfile(f):\n '''Decorate f to run with stdout redirected to [outfile].\n\n The file is opened for appending each time f will be called and\n closed when it returns.\n '''\n @functools.wraps(f)\n def decorated_f(*args, **kw):\n old_stdout = sys.stdout\n new_stdout = sys.stdout = open(outfile, 'a')\n try:\n return f(*args, **kw)\n finally:\n sys.stdout = old_stdout\n new_stdout.close()\n return decorated_f\n\n\n@with_output_to_outfile\ndef hello(name):\n print('Hello, {0}!'.format(name))\n\n# Running this will destroy [outfile]!\n# make sure file is empty\nopen(outfile, 'w').close()\n# test\nprint('This should output nothing:')\nhello('Fred')\nhello('Barney')\nprint('The file now contains this:')\nprint(open(outfile).read())\n# clean up\nos.remove(outfile)\n", "repo_name": "nonZero/demos-python", "sub_path": "src/exercises/advanced/standrad_stream_redirect_using_decorator/solution1.py", "file_name": "solution1.py", "file_ext": "py", "file_size_in_byte": 965, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.stdout", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 23, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 16, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "16086240660", "text": "\"\"\"Initial Migration\n\nRevision ID: 079e8fbdd5b2\nRevises: \nCreate Date: 2018-02-03 22:10:11.138278\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '079e8fbdd5b2'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pitches',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users')\n op.drop_table('pitches')\n # ### end Alembic commands ###\n", "repo_name": "SamNgigi/Pitch-Ip", "sub_path": "migrations/versions/079e8fbdd5b2_initial_migration.py", "file_name": "079e8fbdd5b2_initial_migration.py", "file_ext": "py", "file_size_in_byte": 815, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "25815666685", "text": "import math\nfrom typing import List, Optional, Union\n\nimport torch\nimport triton\nimport triton.language as tl\nfrom torch.autograd.function import FunctionCtx\nfrom torch.cuda.amp import custom_fwd\n\n\n# CREDITS: Initially inspired by the Triton tutorial\n\n\n# Similar to https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L213\ndef attention_reference(\n q: torch.Tensor,\n k: torch.Tensor,\n v: torch.Tensor,\n output: torch.Tensor,\n sm_scale: float,\n is_causal: bool,\n attention_mask: Union[torch.Tensor, None],\n) -> torch.Tensor:\n \"\"\"\n Reference implementation for attention\n @param q: Query matrix size (batch, heads, m_size, BLOCK_DHEAD)\n @param k: Key matrix size (batch, heads, n_size, BLOCK_DHEAD)\n @param v: Value matrix size (batch, heads, n_size, BLOCK_DHEAD)\n @param output: Output matrix size (batch, heads, m_size, BLOCK_DHEAD)\n @param sm_scale: SM (softmax) scaling factor applied on Q•K^T just before the softmax\n @param is_causal: Whether to apply causal attention\n @param attention_mask: Attention mask broadcastable to (batch, heads, m_size, n_size). Warning the mask\n isn't a binary mask like the one you use normally. This mask is directly added to QxK.\n @return:\n \"\"\"\n p = torch.matmul(q, k.transpose(2, 3)) * sm_scale\n\n if attention_mask is not None:\n p += attention_mask\n if is_causal:\n m_size = q.size(2)\n n_size = k.size(2)\n M = torch.tril(torch.ones((m_size, n_size), device=\"cuda\"))\n p = torch.where(M == 0, float(\"-inf\"), p)\n p = torch.nn.functional.softmax(p, dim=-1)\n ref_out = torch.matmul(p.to(v.dtype), v, out=output)\n return ref_out\n\n\ndef closest_power_of_2(n: int, min_range: int = 16, max_range: int = 128) -> List[int]:\n \"\"\"return the closests power of 2 for n, in 16-128 range\"\"\"\n n = max(min(n, max_range), min_range)\n min_range = math.floor(math.log2(n - 1))\n max_range = math.ceil(math.log2(n + 1))\n ranges = [2**i for i in range(min_range, max_range + 1)]\n return ranges\n\n\ndef prune(configs, named_args):\n \"\"\"remove block shapes unlikely to provide optimal speedup\"\"\"\n pruned_configs = []\n sizes_m = closest_power_of_2(named_args[\"m_size\"])\n sizes_n = closest_power_of_2(named_args[\"n_size\"])\n is_causal = named_args[\"IS_CAUSAL\"]\n for c in configs:\n if is_causal and c.kwargs[\"BLOCK_M_SIZE\"] != c.kwargs[\"BLOCK_N_SIZE\"]:\n continue\n if c.kwargs[\"BLOCK_M_SIZE\"] in sizes_m and c.kwargs[\"BLOCK_N_SIZE\"] in sizes_n:\n pruned_configs.append(c)\n\n assert len(pruned_configs) > 0\n return pruned_configs\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({\"BLOCK_M_SIZE\": 16, \"BLOCK_N_SIZE\": 16}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 16, \"BLOCK_N_SIZE\": 32}, num_stages=1, num_warps=1),\n# triton.Config({\"BLOCK_M_SIZE\": 16, \"BLOCK_N_SIZE\": 64}, num_stages=1, num_warps=1),\n# triton.Config({\"BLOCK_M_SIZE\": 16, \"BLOCK_N_SIZE\": 128}, num_stages=1, num_warps=1),\n# triton.Config({\"BLOCK_M_SIZE\": 32, \"BLOCK_N_SIZE\": 16}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 32, \"BLOCK_N_SIZE\": 32}, num_stages=1, num_warps=2),\n# triton.Config({\"BLOCK_M_SIZE\": 32, \"BLOCK_N_SIZE\": 64}, num_stages=1, num_warps=2),\n# triton.Config({\"BLOCK_M_SIZE\": 32, \"BLOCK_N_SIZE\": 128}, num_stages=1, num_warps=2),\n# triton.Config({\"BLOCK_M_SIZE\": 64, \"BLOCK_N_SIZE\": 16}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 64, \"BLOCK_N_SIZE\": 32}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 64, \"BLOCK_N_SIZE\": 64}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 64, \"BLOCK_N_SIZE\": 128}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 128, \"BLOCK_N_SIZE\": 16}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 128, \"BLOCK_N_SIZE\": 32}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 128, \"BLOCK_N_SIZE\": 64}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 128, \"BLOCK_N_SIZE\": 128}, num_stages=1, num_warps=4),\n# triton.Config({\"BLOCK_M_SIZE\": 128, \"BLOCK_N_SIZE\": 128}, num_stages=1, num_warps=8),\n# # triton.Config({\"BLOCK_M_SIZE\": 128, \"BLOCK_N_SIZE\": 256}, num_stages=1, num_warps=8),\n# # triton.Config({\"BLOCK_M_SIZE\": 256, \"BLOCK_N_SIZE\": 128}, num_stages=1, num_warps=8),\n# # triton.Config({\"BLOCK_M_SIZE\": 256, \"BLOCK_N_SIZE\": 256}, num_stages=1, num_warps=16),\n# ],\n# prune_configs_by={\"early_config_prune\": prune, \"perf_model\": None, \"top_k\": None},\n# key=[\"m_size\", \"n_size\", \"head_size\", \"HAS_MASK\", \"IS_MATRIX_MASK\", \"IS_CAUSAL\"],\n# )\n# @triton.heuristics( # order should be the same as in function args, otherwise expect strange bugs\n# {\n# # load mask is needed if one dim (n_size / m_size) of tensors do not align with block size\n# \"M_LOAD_MASK_NEEDED\": lambda args: args[\"m_size\"] % args[\"BLOCK_M_SIZE\"] != 0,\n# \"N_LOAD_MASK_NEEDED\": lambda args: args[\"n_size\"] % args[\"BLOCK_N_SIZE\"] != 0,\n# }\n# )\n@triton.jit\ndef _fwd_kernel(\n head_size,\n m_size,\n n_size,\n cache_key_m_size,\n cache_key_n_size,\n q_ptr,\n k_ptr,\n v_ptr,\n sm_scale,\n attention_mask_ptr,\n output_ptr,\n q_batch_stride,\n q_head_stride,\n q_m_stride,\n q_k_stride,\n k_batch_stride,\n k_head_stride,\n k_n_stride,\n k_k_stride, # axis named n,k instead of k,n because of the transpose of K matrix\n v_batch_stride,\n v_head_stride,\n v_k_stride,\n v_n_stride,\n output_batch_stride,\n output_head_stride,\n output_row_stride,\n output_col_stride,\n attention_mask_batch_stride,\n attention_mask_head_stride,\n attention_mask_m_stride,\n attention_mask_n_stride,\n min_clamp_value,\n attention_mask_batch_size,\n attention_mask_head_size,\n attention_mask_m_size,\n attention_mask_n_size,\n HAS_MASK: tl.constexpr,\n IS_MATRIX_MASK: tl.constexpr,\n IS_CAUSAL: tl.constexpr,\n BLOCK_DHEAD_SIZE: tl.constexpr,\n BLOCK_M_SIZE: tl.constexpr, # this parameter and below are managed by the autotune and need to be at the end\n BLOCK_N_SIZE: tl.constexpr,\n M_LOAD_MASK_NEEDED: tl.constexpr,\n N_LOAD_MASK_NEEDED: tl.constexpr,\n):\n \"\"\"\n Computes attention\n\n Q•K^T Naming conventions. V multiply not represented here.\n\n N Dimension\n n_size\n ───────────────\n ┌───┬───┬───────┐\n │ │ │ │\n │ │ │ │\n K Dimension │ │ │ │\n │ │ │ │\n │ │ │ │\n │ │ │ │\n BLOCK_DHEAD └───┴───┴───────┘\n ┌────────────┐\n │ │ │\n M Dimension│ ├────────────┤ ┌───┐\n m_size │ │ │ │ │ BLOCK_M_SIZE\n │ ├────────────┤ └───┘\n │ │ │ BLOCK_N_SIZE\n │ │ │\n └────────────┘\n\n @param head_size: number of heads per batch\n @param m_size: size of M axis\n @param n_size: size of N axis\n @param q_ptr: query matrix size (batch, head_size, m_size, BLOCK_DHEAD)\n @param k_ptr: key matrix size (batch, head_size, n_size, BLOCK_DHEAD)\n @param v_ptr: value matrix size (batch, head_size, n_size, BLOCK_DHEAD)\n @param sm_scale: scaling factor applied after operation QxK\n @param output_ptr: output matrix size (batch, head_size, m_size, BLOCK_DHEAD)\n @param q_batch_stride: matrix Q stride for batch dimension\n @param q_head_stride: matrix Q stride for head dimension\n @param q_m_stride: matrix Q stride for rows, called \"M dimension\"\n @param q_k_stride: matrix Q stride for columns, called \"K dimension\"\n @param k_batch_stride: matrix K stride for batch dimension\n @param k_head_stride: matrix K stride for head dimension\n @param k_n_stride: matrix K stride for rows, called \"N dimension\", will be columns after transpose\n @param k_k_stride: matrix K stride for columns, called \"K dimension\", will be rows after transpose\n @param v_batch_stride: matrix V stride for batch dimension\n @param v_head_stride: matrix V stride for head dimension\n @param v_k_stride: matrix V stride for columns\n @param v_n_stride: matrix V stride for rows\n @param output_batch_stride: matrix OUTPUT stride for batch dimension\n @param output_head_stride: matrix OUTPUT stride for head dimension\n @param output_row_stride: matrix OUTPUT stride for rows\n @param output_col_stride: matrix OUTPUT stride for columns\n @param attention_mask_ptr: attention mask matrix broadcastable to (batch, head_size, m_size, n_size)\n @param attention_mask_batch_stride: matrix mask stride for batch dimension\n @param attention_mask_head_stride: matrix mask stride for head dimension\n @param attention_mask_m_stride: matrix mask stride for rows\n @param attention_mask_n_stride: matrix mask stride for columns\n @param attention_mask_batch_size: matrix mask size for batch dimension\n @param attention_mask_head_size: matrix mask size for head dimension\n @param attention_mask_m_size: matrix mask size for rows (equal to m_size)\n @param attention_mask_n_size: matrix mask size for columns (equal to n_size)\n @param HAS_MASK: whether the mask is applied\n @param IS_MATRIX_MASK: whether the mask is a vector or a matrix\n @param IS_CAUSAL: whether the mask is applied\n @param BLOCK_DHEAD_SIZE: number of columns per head\n @param BLOCK_M_SIZE: number of rows computed in a single instance for matrix Q\n @param BLOCK_N_SIZE: number of rows computed at each loop in the main loop for matrix K and V\n @param M_LOAD_MASK_NEEDED: use boundary check when loading/saving from/to Q/Output tensors\n @param N_LOAD_MASK_NEEDED: use boundary check when loading K/V/Attention mask tensors\n \"\"\"\n\n # Index of the block on M axis (M axis is the rows of matrix K)\n block_m_idx = tl.program_id(0)\n\n # Global index of the current head (batch and heads are mixed into one program id)\n head_idx = tl.program_id(1)\n\n # Index of the current batch\n current_batch_idx = head_idx // head_size\n # Index of the head inside current batch\n current_head_idx = head_idx % head_size\n\n m_range_offs = tl.arange(0, BLOCK_M_SIZE) # first block on M dimension\n n_range_offs = tl.arange(0, BLOCK_N_SIZE) # first block on N dimension\n dhead_range_offs = tl.arange(0, BLOCK_DHEAD_SIZE) # full head\n\n m_offs = block_m_idx * BLOCK_M_SIZE + m_range_offs # rows offsets on M axis\n\n # memory offsets matrices on whole Q, K, V, output matrices\n # offsets for the current block on matrix Q\n q_offs = (\n current_batch_idx * q_batch_stride\n + current_head_idx * q_head_stride\n + (m_offs[:, None] * q_m_stride + dhead_range_offs[None, :] * q_k_stride)\n )\n\n # offsets for the first block on matrix K\n k_offs = (\n current_batch_idx * k_batch_stride\n + current_head_idx * k_head_stride\n + (n_range_offs[:, None] * k_n_stride + dhead_range_offs[None, :] * k_k_stride)\n )\n\n # offsets for the first block on matrix V\n v_offs = (\n current_batch_idx * v_batch_stride\n + current_head_idx * v_head_stride\n + (n_range_offs[:, None] * v_k_stride + dhead_range_offs[None, :] * v_n_stride)\n )\n\n # offsets for the current block on matrix Output\n output_offs = (\n current_batch_idx * output_batch_stride\n + current_head_idx * output_head_stride\n + (m_offs[:, None] * output_row_stride + dhead_range_offs[None, :] * output_col_stride)\n )\n\n # pointers to blocks in Q, K, V\n q_ptrs = q_ptr + q_offs\n k_ptrs = k_ptr + k_offs\n v_ptrs = v_ptr + v_offs\n output_ptrs = output_ptr + output_offs\n\n # initialize pointer to m and d used to compute normalizer for softmax\n l_i = tl.zeros((BLOCK_M_SIZE,), dtype=tl.float32) - float(\"inf\")\n d_i = tl.zeros((BLOCK_M_SIZE,), dtype=tl.float32)\n\n # initialize the main loop accumulator, it is the size of a block of full rows, written to the output for the\n # current head\n acc = tl.zeros((BLOCK_M_SIZE, BLOCK_DHEAD_SIZE), dtype=tl.float32)\n\n # load q, a block of full rows of matrix q\n # there is a bug on n_size, it is not related to Q tensor but if a load mask is needed and BLOCK_N > n_size,\n # output is wrong.\n if M_LOAD_MASK_NEEDED | N_LOAD_MASK_NEEDED:\n q = tl.load(q_ptrs, mask=m_offs[:, None] < m_size, other=0.0)\n else:\n q = tl.load(q_ptrs)\n\n block_n_end = n_size\n if IS_CAUSAL:\n # in causal mode, we expect that BLOCK_M_SIZE == BLOCK_N_SIZE\n # autotune will prune shapes not matching this rule\n block_n_end = (block_m_idx + 1) * BLOCK_N_SIZE\n\n if HAS_MASK:\n attention_mask_batch_idx = (current_batch_idx,)\n if attention_mask_batch_size == 1:\n attention_mask_batch_idx = 0\n\n attention_mask_head_idx = current_head_idx\n if attention_mask_head_size == 1:\n attention_mask_head_idx = 0\n\n attention_mask_off = (\n attention_mask_batch_idx * attention_mask_batch_stride\n + attention_mask_head_idx * attention_mask_head_stride\n )\n\n # loop over k, v and update accumulator\n # block_n_start_idx is the row offset on dimension N of the current block\n # It's used for both the N dimension of K and V because they are handled at the same time\n for block_n_start_idx in range(0, block_n_end, BLOCK_N_SIZE):\n # block_n_start_idx = tl.multiple_of(block_n_start_idx, BLOCK_N_SIZE)\n block_n_offs = block_n_start_idx + n_range_offs\n # We load the current block in K in SRAM\n # We do the first multiplication between the block in Q and the current block in K\n # We finish with the scaling (sqrt(BLOCK_DHEAD) in Vaswani et al. but sm_scale here)\n if N_LOAD_MASK_NEEDED:\n k_ptr_mask = block_n_offs[:, None] < n_size\n k = tl.load(k_ptrs + block_n_start_idx * k_n_stride, mask=k_ptr_mask, other=0.0)\n else:\n k = tl.load(k_ptrs + block_n_start_idx * k_n_stride)\n qk = tl.zeros((BLOCK_M_SIZE, BLOCK_N_SIZE), dtype=tl.float32)\n\n # required to fix a Triton compiler bug, if not done, there is a precision issue\n if N_LOAD_MASK_NEEDED:\n qk = tl.where(n_range_offs[None, :] < n_size, qk, float(\"-inf\"))\n qk += tl.dot(q, tl.trans(k))\n qk *= sm_scale\n if IS_CAUSAL:\n qk += tl.where(m_offs[:, None] >= block_n_offs[None, :], 0, float(\"-inf\"))\n\n if HAS_MASK:\n # we assume mask has a vector shape\n attention_mask_offs = attention_mask_off + block_n_offs * attention_mask_n_stride\n if IS_MATRIX_MASK: # mask has a matrix shape, we load (BLOCK_M, BLOCK_N) elements\n attention_mask_offs = attention_mask_offs[None, :] + m_offs[:, None] * attention_mask_m_stride\n\n if N_LOAD_MASK_NEEDED & (not IS_MATRIX_MASK): # mask has a vector shape + need a load mask\n attention_mask_ptr_mask = block_n_offs < attention_mask_n_size\n if IS_MATRIX_MASK: # mask has a matrix shape\n if M_LOAD_MASK_NEEDED & (not N_LOAD_MASK_NEEDED): # load mask on M axis\n attention_mask_ptr_mask = m_offs[:, None] < attention_mask_m_size\n elif (not M_LOAD_MASK_NEEDED) & N_LOAD_MASK_NEEDED: # load mask on N axis\n attention_mask_ptr_mask = block_n_offs[None, :] < attention_mask_n_size\n elif M_LOAD_MASK_NEEDED & N_LOAD_MASK_NEEDED: # load mask on both axis\n attention_mask_ptr_mask = (block_n_offs[None, :] < attention_mask_n_size) & (\n m_offs[:, None] < attention_mask_m_size\n )\n\n if (M_LOAD_MASK_NEEDED & IS_MATRIX_MASK) | N_LOAD_MASK_NEEDED:\n attention_mask = tl.load(\n attention_mask_ptr + attention_mask_offs,\n eviction_policy=\"evict_first\",\n mask=attention_mask_ptr_mask,\n other=float(\"-inf\"),\n )\n else:\n attention_mask = tl.load(\n attention_mask_ptr + attention_mask_offs,\n eviction_policy=\"evict_first\",\n )\n # Avoids NaN\n attention_mask = tl.where(attention_mask == float(\"-inf\"), min_clamp_value, attention_mask)\n # if IS_MATRIX_MASK we already added the dimensions, else we need to add one\n if IS_MATRIX_MASK:\n qk += attention_mask\n else: # related to https://github.com/openai/triton/issues/1273\n qk += attention_mask[None, :]\n\n # We compute softmax normalization like in Milakov et al.\n # We renamed m (in the original article) to l to avoid confusions\n # We start with the current block qk\n l_j = tl.max(qk, 1)\n\n numerators = tl.exp(qk - l_j[:, None])\n d_j = tl.sum(numerators, 1)\n\n l_new = tl.maximum(l_i, l_j)\n alpha = tl.exp(l_i - l_new)\n beta = tl.exp(l_j - l_new)\n d_new = alpha * d_i + beta * d_j\n\n # We correct the numerator for the current softmax (*beta) -> exp(l_j - l_new) * exp(qk - mj) = exp(l_new) We\n # divide by the normalization. It's strange to do it this way instead of simply computing the softmax for qk,\n # but since all needed operations are already done for updating m and d, it seems faster\n p_scale = beta / d_new\n\n qk_softmax = numerators * p_scale[:, None]\n\n # From here, qk_softmax is correct related to all over previously done block\n # However it is wrong related to the full output row if the qk isn't the last block\n # And at this stage all the previously done qk_softmax blocks are also wrong and needs to be corrected\n # To correct previous blocks we will scale the accumulator\n # d_i / d_new is for correcting denominator\n # alpha is for correcting numerator\n acc_scale = d_i / d_new * alpha\n\n # acc scaling\n acc = acc * acc_scale[:, None]\n\n # We now apply the last operation, the multiplication by a block of matrix V\n if N_LOAD_MASK_NEEDED:\n v_ptr_mask = block_n_offs[:, None] < n_size # repeated otherwise triton segfault\n v = tl.load(v_ptrs + block_n_start_idx * v_k_stride, mask=v_ptr_mask, other=0.0)\n else:\n v = tl.load(v_ptrs + block_n_start_idx * v_k_stride)\n qk_softmax = qk_softmax.to(q_ptr.dtype.element_ty)\n acc += tl.dot(qk_softmax, v)\n\n # We update the normalizer for the next iteration\n d_i = d_new\n l_i = l_new\n\n if M_LOAD_MASK_NEEDED:\n output_ptr_mask = m_offs[:, None] < m_size\n tl.store(output_ptrs, acc, mask=output_ptr_mask)\n else:\n tl.store(output_ptrs, acc)\n\n\nclass Attention(torch.autograd.Function):\n @staticmethod\n @custom_fwd(cast_inputs=torch.float16)\n def forward(\n ctx: FunctionCtx,\n q: torch.Tensor,\n k: torch.Tensor,\n v: torch.Tensor,\n output: torch.Tensor,\n sm_scale: float,\n is_causal: bool,\n attention_mask: Optional[torch.Tensor] = None,\n ):\n \"\"\"\n Computes attention.\n FP32 input and output are not supported.\n https://github.com/openai/triton/issues/674\n Not an issue as the function is annotated with @custom_fwd(cast_inputs=torch.float16) so the input is casted to\n float16 before the function is called.\n\n @param ctx: context for autograd\n @param q: Query matrix size (batch, head_size, m_size, dhead)\n @param k: Key matrix size (batch, head_size, n_size, dhead)\n @param v: Value matrix size (batch, head_size, n_size, dhead)\n @param output: Output matrix size (batch, head_size, m_size, dhead)\n @param sm_scale: SM (softmax) scaling factor applied on Q•K^T just before the softmax\n @param is_causal: Autoregressive decoder attention\n @param attention_mask: Attention mask matrix broadcastable to (batch, head_size, m_size, n_size)\n @return:\n \"\"\"\n # Constraints\n # Queries and keys have the same d_k size\n assert q.shape[-1] == k.shape[-1]\n assert (\n q.dtype == k.dtype == v.dtype == output.dtype\n ), f\"All tensors must have the same dtype: {q.dtype}, {k.dtype}, {v.dtype}, {output.dtype}\"\n assert q.dtype in [torch.float16, torch.bfloat16], f\"Only float16 and bfloat16 are supported, got {q.dtype}\"\n batch, head_size, m_size, dhead = q.size()\n n_size = k.size(2)\n\n grid = lambda args: (triton.cdiv(m_size, args[\"BLOCK_M_SIZE\"]), batch * head_size) # noqa: E731\n # tmp should match m_size rounded to the next multiple of block_m\n # if unknown because of autotune, we put 128 as a safe value\n\n HAS_MASK = False\n IS_MATRIX_MASK = False\n if attention_mask is not None:\n assert (\n attention_mask.size(0) == batch or attention_mask.size(0) == 1\n ), \"Incompatible broadcast batch dimension\"\n assert (\n attention_mask.size(1) == head_size or attention_mask.size(1) == 1\n ), \"Incompatible broadcast heads dimension\"\n assert (\n attention_mask.size(2) == m_size or attention_mask.size(2) == 1\n ), \"Incompatible broadcast m_size dimension\"\n assert attention_mask.size(3) == n_size, \"Last size of mask must broadcast on QK^t\"\n\n HAS_MASK = True\n IS_MATRIX_MASK = attention_mask.size(2) != 1\n\n _fwd_kernel[grid]( # can't use name args because of the way autotune is implemented :-(\n head_size, # heads\n m_size, # m_size\n n_size, # n_size\n m_size // 32, # cache_key_m_size\n n_size // 32, # cache_key_n_size\n q, # Q\n k, # K\n v, # V\n sm_scale, # sm_scale\n attention_mask, # attention_mask\n output, # output\n *q.stride(), # (batch, heads, m_size, size_k)\n *k.stride(), # (batch, heads, n_size, size_k)\n *v.stride(), # (batch, heads, size_k, n_size)\n *output.stride(), # (batch, heads, m_size, n_size)\n *attention_mask.stride() if HAS_MASK else (0, 0, 0, 0), # (batch, heads, m_size, size_k)\n torch.finfo(attention_mask.dtype).min if HAS_MASK else 0, # min_clamp_value\n *attention_mask.size() if HAS_MASK else (0, 0, 0, 0), # (batch, heads, m_size, size_k)\n HAS_MASK, # HAS_MASK\n IS_MATRIX_MASK, # IS_MATRIX_MASK\n is_causal, # IS_CAUSAL\n dhead, # BLOCK_DHEAD\n 128, # BLOCK_M_SIZE\n 128, # BLOCK_N_SIZE\n m_size % 128 != 0, # M_LOAD_MASK_NEEDED\n n_size % 128 != 0, # N_LOAD_MASK_NEEDED\n num_warps=4 if k.size(3) <= 64 else 8,\n num_stages=2,\n )\n return output\n\n\ndef attention_forward(\n q: torch.Tensor,\n k: torch.Tensor,\n v: torch.Tensor,\n output: torch.Tensor,\n sm_scale: float,\n is_causal: bool = False,\n attention_mask: Optional[torch.Tensor] = None,\n):\n return Attention.apply(q, k, v, output, sm_scale, is_causal, attention_mask)\n", "repo_name": "ELS-RD/kernl", "sub_path": "src/kernl/implementations/attention.py", "file_name": "attention.py", "file_ext": "py", "file_size_in_byte": 24109, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1388, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.Tensor", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 19, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.matmul", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.tril", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.matmul", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 23, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 53, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 53, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 54, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 54, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 146, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 146, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 147, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 147, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 148, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 148, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 149, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 149, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 150, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 150, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 151, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 151, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 152, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 152, "usage_type": "name"}, {"api_name": "triton.language.constexpr", "line_number": 153, "usage_type": "attribute"}, {"api_name": "triton.language", "line_number": 153, "usage_type": "name"}, {"api_name": "triton.language.program_id", "line_number": 224, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 224, "usage_type": "name"}, {"api_name": "triton.language.program_id", "line_number": 227, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 227, "usage_type": "name"}, {"api_name": "triton.language.arange", "line_number": 234, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 234, "usage_type": "name"}, {"api_name": "triton.language.arange", "line_number": 235, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 235, "usage_type": "name"}, {"api_name": "triton.language.arange", "line_number": 236, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 236, "usage_type": "name"}, {"api_name": "triton.language.zeros", "line_number": 276, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 276, "usage_type": "name"}, {"api_name": "triton.language.float32", "line_number": 276, "usage_type": "attribute"}, {"api_name": "triton.language.zeros", "line_number": 277, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 277, "usage_type": "name"}, {"api_name": "triton.language.float32", "line_number": 277, "usage_type": "attribute"}, {"api_name": "triton.language.zeros", "line_number": 281, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 281, "usage_type": "name"}, {"api_name": "triton.language.float32", "line_number": 281, "usage_type": "attribute"}, {"api_name": "triton.language.load", "line_number": 287, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 287, "usage_type": "name"}, {"api_name": "triton.language.load", "line_number": 289, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 289, "usage_type": "name"}, {"api_name": "triton.language.load", "line_number": 322, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 322, "usage_type": "name"}, {"api_name": "triton.language.load", "line_number": 324, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 324, "usage_type": "name"}, {"api_name": "triton.language.zeros", "line_number": 325, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 325, "usage_type": "name"}, {"api_name": "triton.language.float32", "line_number": 325, "usage_type": "attribute"}, {"api_name": "triton.language.where", "line_number": 329, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 329, "usage_type": "name"}, {"api_name": "triton.language.dot", "line_number": 330, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 330, "usage_type": "name"}, {"api_name": "triton.language.trans", "line_number": 330, "usage_type": "call"}, {"api_name": "triton.language.where", "line_number": 333, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 333, "usage_type": "name"}, {"api_name": "triton.language.load", "line_number": 354, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 354, "usage_type": "name"}, {"api_name": "triton.language.load", "line_number": 361, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 361, "usage_type": "name"}, {"api_name": "triton.language.where", "line_number": 366, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 366, "usage_type": "name"}, {"api_name": "triton.language.max", "line_number": 376, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 376, "usage_type": "name"}, {"api_name": "triton.language.exp", "line_number": 378, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 378, "usage_type": "name"}, {"api_name": "triton.language.sum", "line_number": 379, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 379, "usage_type": "name"}, {"api_name": "triton.language.maximum", "line_number": 381, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 381, "usage_type": "name"}, {"api_name": "triton.language.exp", "line_number": 382, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 382, "usage_type": "name"}, {"api_name": "triton.language.exp", "line_number": 383, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 383, "usage_type": "name"}, {"api_name": "triton.language.load", "line_number": 407, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 407, "usage_type": "name"}, {"api_name": "triton.language.load", "line_number": 409, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 409, "usage_type": "name"}, {"api_name": "triton.language.dot", "line_number": 411, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 411, "usage_type": "name"}, {"api_name": "triton.language.store", "line_number": 419, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 419, "usage_type": "name"}, {"api_name": "triton.language.store", "line_number": 421, "usage_type": "call"}, {"api_name": "triton.language", "line_number": 421, "usage_type": "name"}, {"api_name": "triton.jit", "line_number": 108, "usage_type": "attribute"}, {"api_name": "torch.autograd", "line_number": 424, "usage_type": "attribute"}, {"api_name": "torch.autograd.function.FunctionCtx", "line_number": 428, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 429, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 430, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 431, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 432, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 435, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 435, "usage_type": "attribute"}, {"api_name": "torch.float16", "line_number": 460, "usage_type": "attribute"}, {"api_name": "torch.bfloat16", "line_number": 460, "usage_type": "attribute"}, {"api_name": "triton.cdiv", "line_number": 464, "usage_type": "call"}, {"api_name": "torch.finfo", "line_number": 502, "usage_type": "call"}, {"api_name": "torch.cuda.amp.custom_fwd", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.float16", "line_number": 426, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 519, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 520, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 521, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 522, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 525, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 525, "usage_type": "attribute"}]} +{"seq_id": "31615438677", "text": "#coding=utf-8\r\n'''\r\nCreated on 2016年11月9日\r\n\r\n@author: huangning\r\n'''\r\nimport MySQLdb\r\n\r\nconnection = MySQLdb.connect(host=\"192.168.1.237\",\r\n user=\"root\",\r\n passwd=\"admin\")\r\ntry:\r\n #with connection.cursor() as cursor:\r\n cursor = connection.cursor()\r\n sql = \"CREATE DATABASE IF NOT EXISTS crimemap\"\r\n cursor.execute(sql)\r\n sql = \"\"\"CREATE TABLE IF NOT EXISTS crimemap.crimes (\r\n id int NOT NULL AUTO_INCREMENT,\r\n latitude FLOAT(10,6),\r\n longitude FLOAT(10,6),\r\n date DATETIME,\r\n category VARCHAR(50),\r\n description VARCHAR(1000),\r\n updated_at TIMESTAMP,\r\n PRIMARY KEY (id)\r\n )\"\"\"\r\n cursor.execute(sql);\r\n connection.commit()\r\nfinally:\r\n connection.close()", "repo_name": "ricardonhuang/crime_map", "sub_path": "create_database.py", "file_name": "create_database.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "MySQLdb.connect", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "10685912418", "text": "\nimport os\nimport logging\nimport random\nfrom flask import Flask, request\n\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\nlogger = logging.getLogger(__name__)\n\napp = Flask(__name__)\nmoves = ['F', 'L', 'R']\n\n@app.route(\"/\", methods=['POST'])\ndef move():\n request.get_data()\n logger.info(request.json)\n \n data = request.json\n self_link = (data['_links']['self']['href'])\n states = data['arena']['state']\n\n self_location = states[self_link]\n self_x = self_location['x']\n self_y = self_location['y']\n \n # print (self_link)\n # print (states)\n # print (len(states))\n\n\n for state in states:\n if state != self_link: \n p = states[state]\n if (p['x'] == self_x) and abs(p['y'] - self_y) <= 3:\n return ('T')\n if (p['y'] == self_y) and abs(p['x'] - self_x) <= 3:\n return ('T')\n \n return moves[random.randrange(len(moves))]\n\nif __name__ == \"__main__\":\n app.run(debug=False,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))\n ", "repo_name": "adhishpanta96/adhishmsbattle", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.basicConfig", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 39, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 42, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "9985062502", "text": "# ch9_8.py\nimport openpyxl\nfrom openpyxl.utils import get_column_letter\n\nfn = \"data9_3.xlsx\"\nwb = openpyxl.load_workbook(fn)\nws = wb.active\nfor i in range(3,7):\n ch = get_column_letter(i) # 將數字轉成欄位\n index = ch + str(7)\n start_index = ch + str(4)\n end_index = ch + str(6)\n ws[index] = \"=SUM({}:{})\".format(start_index,end_index)\nwb.save(\"out9_8.xlsx\")\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "June0608/Python", "sub_path": "Program Examples/ch9/ch9_8.py", "file_name": "ch9_8.py", "file_ext": "py", "file_size_in_byte": 398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 6, "usage_type": "call"}, {"api_name": "openpyxl.utils.get_column_letter", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "31289448244", "text": "from typing import List\n\n\nclass Solution: # DP\n def trap(self, height: List[int]) -> int:\n L = len(height)\n if L < 3:\n return 0\n ans = 0\n left_max = height[0]\n right_max_list = height[:]\n for i in range(L - 2, 0, -1):\n right_max_list[i] = max(right_max_list[i], right_max_list[i + 1])\n for i in range(1, L - 1):\n cur = height[i]\n if left_max > cur:\n right_max = right_max_list[i]\n if right_max > cur:\n ans += min(left_max, right_max) - cur\n elif cur > left_max:\n left_max = cur\n return ans\n\n\nclass Solution2:\n def trap(self, height: List[int]) -> int:\n L = len(height)\n if L < 3:\n return 0\n ans = 0\n st = []\n for i in range(L):\n while st and height[i] > height[st[-1]]:\n j = st.pop()\n if st:\n ans += (min(height[i], height[st[-1]]) - height[j]) * (i - st[-1] - 1)\n st.append(i)\n print(st)\n return ans\n\n\nclass Solution3:\n def trap(self, height: List[int]) -> int:\n ans = 0\n left, right = 0, len(height) - 1\n leftMax = height[left]\n rightMax = height[right]\n while left < right:\n if height[left] < height[right]:\n ans += leftMax - height[left]\n left += 1\n leftMax = max(leftMax, height[left])\n else:\n ans += rightMax - height[right]\n right -= 1\n rightMax = max(rightMax, height[right])\n return ans\n\n\nsolution = Solution3()\nans = solution.trap([0,1,0,2,1,0,1,3,2,1,2,1])\nassert ans == 6, ans\n\nans = solution.trap([4,2,0,3,2,5])\nassert ans == 9, ans\n", "repo_name": "sengami-yuka/py", "sub_path": "coding_problems/leetcode/hot_100/42_trapping_rain_water.py", "file_name": "42_trapping_rain_water.py", "file_ext": "py", "file_size_in_byte": 1820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "35330648780", "text": "import sqlite3 \nconn = sqlite3.connect('myshop.db')\n\ndef createProductTable(conn):\n c = conn.cursor()\n sql = \"\"\"\n CREATE TABLE products (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n price REAL NOT NULL,\n qty INTEGER NOT NULL\n )\n \"\"\"\n c.execute(sql)\n conn.commit()\n\ndef insertProduct(conn, name, price, qty):\n c = conn.cursor()\n sql = \"\"\"\n INSERT INTO products (name, price, qty)\n VALUES (?, ?, ?)\n \"\"\"\n #c.execute(sql, ('Pen', 15, 45))\n #c.execute(sql, ('Cup', 80, 5))\n #c.execute(sql, ('Notebook', 25, 20))\n c.execute(sql, (name, price, qty))\n conn.commit()\n\ndef listProducts(conn, where):\n c = conn.cursor()\n sql = '''SELECT id, name, price, qty \n FROM products \n WHERE {}\n '''.format(where)\n c.execute(sql)\n products = c.fetchall()\n for pd in products:\n print(pd)\n\ndef update_product(conn, id, price, qty): \n sql = \"\"\"\n UPDATE products\n SET price = ?,\n qty = ?\n WHERE id = ?\n \"\"\"\n c = conn.cursor()\n c.execute(sql, (price, qty, id))\n conn.commit()\n\ndef delete_by_id(conn, id):\n sql=\"\"\"\n DELETE FROM products WHERE id = ?\n \"\"\"\n c = conn.cursor()\n c.execute(sql, (id,))\n conn.commit()\n\ndef getProducts(conn):\n c = conn.cursor()\n sql = 'SELECT id, name, price, qty FROM products'\n c.execute(sql)\n products = c.fetchall()\n return products\n\ndef test():\n #insertProduct(conn, 'Stapler', 1000, 1)\n #insertProduct(conn, 'Pen', 1000000, 100)\n #insertProduct(conn, 'Nothing', 1000000000, 10)\n #insertProduct(conn, 'Ha', 1000, 100)\n #insertProduct(conn, 'Leuk', 1000, 10)\n #createProductTable(conn)\n delete_by_id(conn, 1)\n #update_product(conn, 1, 999, 100)\n listProducts(conn, 'price < 1000000')\n conn.close()\n\ntest()", "repo_name": "Walfzz/my_site", "sub_path": "2020.4.18/sqlite_test.py", "file_name": "sqlite_test.py", "file_ext": "py", "file_size_in_byte": 1842, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 2, "usage_type": "call"}]} +{"seq_id": "28502928271", "text": "# 92. Reverse Linked List II\n\n# Reverse a linked list from position m to n. Do it in one-pass.\n\n# Note: 1 ≤ m ≤ n ≤ length of list.\n\n# Example:\n\n# Input: 1->2->3->4->5->NULL, m = 2, n = 4\n# Output: 1->4->3->2->5->NULL\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nfrom typing import List\n\n\nclass Solution:\n @staticmethod\n def fromArray(values: List) -> ListNode:\n l = len(values)\n if not l:\n return None\n head = ListNode(values[0])\n cur = head\n for i in range(1, l):\n cur.next = ListNode(values[i])\n cur = cur.next\n return head\n\n @staticmethod\n def toArray(head: ListNode) -> List:\n arr = []\n cur = head\n while cur:\n arr.append(cur.val)\n cur = cur.next\n return arr\n\n def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:\n c = n - m\n if not c:\n return head\n\n subtail = None\n while m > 1:\n subtail = subtail.next if subtail else head\n m -= 1\n\n prev = None\n subhead = subtail.next if subtail else head\n cur = subhead\n while c >= 0:\n next = cur.next\n cur.next = prev\n prev = cur\n cur = next\n c -= 1\n subhead.next = cur\n if subtail:\n subtail.next = prev\n else:\n head = prev\n\n return head\n\n\ntest = Solution()\n\nprint(\"[1, 4, 3, 2, 5] >> \", test.toArray(test.reverseBetween(test.fromArray([1, 2, 3, 4, 5]), 2, 4)))\nprint(\"[3, 2, 1, 4, 5] >> \", test.toArray(test.reverseBetween(test.fromArray([1, 2, 3, 4, 5]), 1, 3)))\nprint(\"[1, 2, 3, 5, 4] >> \", test.toArray(test.reverseBetween(test.fromArray([1, 2, 3, 4, 5]), 4, 5)))\nprint(\"[5, 4, 3, 2, 1] >> \", test.toArray(test.reverseBetween(test.fromArray([1, 2, 3, 4, 5]), 1, 5)))\nprint(\"[1, 2, 3, 4, 5] >> \", test.toArray(test.reverseBetween(test.fromArray([1, 2, 3, 4, 5]), 3, 3)))\nprint(\"[2, 1] >> \", test.toArray(test.reverseBetween(test.fromArray([1, 2]), 1, 2)))\nprint(\"[2] >> \", test.toArray(test.reverseBetween(test.fromArray([2]), 1, 1)))\n", "repo_name": "DmitryVlaznev/leetcode", "sub_path": "92-reverse-linked-list-ii.py", "file_name": "92-reverse-linked-list-ii.py", "file_ext": "py", "file_size_in_byte": 2221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "19162263729", "text": "import itertools as it\n\n\nalph = 'КРЕСЛО'\ncnt = 0\nfor word in it.product(alph, repeat=4):\n word = ''.join(word)\n if word[0] in 'КРСЛ' and word[-1] in 'ЕО':\n cnt += 1\n\nprint(cnt)\n", "repo_name": "Richtermnd/Exams", "sub_path": "tasks/task08/homework/n1.py", "file_name": "n1.py", "file_ext": "py", "file_size_in_byte": 202, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "itertools.product", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "16420946686", "text": "import torchvision\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\ntrain_data = torchvision.datasets.MNIST(\n root=r'mnist',\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=True\n)\n\ntrainDataLoader = DataLoader(dataset=train_data, batch_size=4, shuffle=True)\nprint(train_data[0][0].shape)\nprint(train_data[0][1])\nwriter = SummaryWriter(r\"logs\")\nfor idx, (img, target) in enumerate(trainDataLoader):\n for id1, i in enumerate(img):\n writer.add_image(f'{idx}', i, id1)\n if idx == 8:\n break\n\nwriter.add_image(\"twet\", train_data[0][0], train_data[0][1])\nwriter.close()\n", "repo_name": "fl0w2Bloom/note", "sub_path": "torch/dataloaderExample.py", "file_name": "dataloaderExample.py", "file_ext": "py", "file_size_in_byte": 658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torchvision.datasets.MNIST", "line_number": 5, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "39440460283", "text": "#!/usr/bin/env python3\n\n# lib imports\nfrom flask import Blueprint, request\n\n# project imports\nfrom pos.logger_manager.logger_manager import LogManger\nfrom pos.persistance_db_manager.db_driver import DBDriver\nfrom pos.web_app.services.standard_response import StandardResponse\n\nlogger = LogManger().get_logger(__name__)\n\nstocks_blueprint = Blueprint('stocks_blueprint', __name__)\n\n\n@stocks_blueprint.route('/stocks', methods=['GET'])\ndef get_stocks():\n try:\n return StandardResponse(DBDriver().get_stocks(**request.args), 200).to_json()\n except:\n return StandardResponse('stocks do not exist', 404).to_json()\n\n\n@stocks_blueprint.route('/stocks', methods=['POST'])\ndef create_stock():\n json_data = request.json\n\n stock = (DBDriver().get_stocks(product_id=json_data['product_id']) or [None])[0]\n\n if stock is None:\n try:\n created_stock = DBDriver().create_stock(quantity=json_data.get('quantity', None),\n retail_price=json_data.get('retail_price', None),\n last_modified_by=json_data['last_modified_by'],\n supplier_id=json_data.get('supplier_id', None),\n product_id=json_data.get('product_id', None))\n return StandardResponse(created_stock, 200).to_json()\n except:\n return StandardResponse(\"check request json format\", 400).to_json()\n else:\n return StandardResponse(\"stock already exists\", 406).to_json()\n\n\n@stocks_blueprint.route('/stocks/', methods=['PUT', 'PATCH'])\ndef update_stocks(stock_id):\n json_data = request.json\n\n stock = (DBDriver().get_stocks(id=stock_id) or [None])[0]\n\n if stock is not None:\n try:\n updated_stock = DBDriver().update_stock(id=stock_id, quantity=json_data.get('quantity', None),\n retail_price=json_data.get('retail_price', None),\n last_modified_by=json_data['last_modified_by'],\n supplier_id=json_data.get('supplier_id', None),\n product_id=json_data.get('product_id', None))\n return StandardResponse(updated_stock, 200).to_json()\n except:\n return StandardResponse(\"check request json format\", 400).to_json()\n else:\n return StandardResponse(\"stock does not exist\", 406).to_json()\n\n\n@stocks_blueprint.route('/stocks/', methods=['DELETE'])\ndef delete_stock(stock_id):\n\n stock = (DBDriver().get_stocks(id=stock_id) or [None])[0]\n\n if stock is not None:\n try:\n DBDriver().delete_stock(id=stock_id)\n return StandardResponse(\"stock deleted successfully\", 200).to_json()\n except:\n return StandardResponse(\"check request json format\", 400).to_json()\n else:\n return StandardResponse(\"stock does not exist\", 406).to_json()\n", "repo_name": "Ramzy993/SuperMarketPOS-Backend", "sub_path": "pos/web_app/api/v1/stocks.py", "file_name": "stocks.py", "file_ext": "py", "file_size_in_byte": 3082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pos.logger_manager.logger_manager.LogManger", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 13, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "pos.persistance_db_manager.db_driver.DBDriver", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "pos.persistance_db_manager.db_driver.DBDriver", "line_number": 28, "usage_type": "call"}, {"api_name": "pos.persistance_db_manager.db_driver.DBDriver", "line_number": 32, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 39, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "pos.persistance_db_manager.db_driver.DBDriver", "line_number": 48, "usage_type": "call"}, {"api_name": "pos.persistance_db_manager.db_driver.DBDriver", "line_number": 52, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 57, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 59, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 61, "usage_type": "call"}, {"api_name": "pos.persistance_db_manager.db_driver.DBDriver", "line_number": 67, "usage_type": "call"}, {"api_name": "pos.persistance_db_manager.db_driver.DBDriver", "line_number": 71, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "pos.web_app.services.standard_response.StandardResponse", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "1049692110", "text": "import streamlit as st\nfrom PIL import Image\nimport io\nimport requests\nfrom Utils import *\n\n# Streamlit App\nhide_streamlit_style = \"\"\"\n \n \"\"\"\nst.set_page_config(page_title=\"YouTube Video Summarizer\", layout=\"centered\", page_icon=\"ShreyIconS2.png\")\nst.markdown(hide_streamlit_style, unsafe_allow_html=True)\n\nst.title(\"YouTube Video Summarizer\")\n\nvideo_url = st.text_input(\"Enter a YouTube video URL:\")\nchat_history = []\n\n# Initialize the previous_video_url, message_history, and other variables in session_state\nif \"previous_video_url\" not in st.session_state:\n st.session_state.previous_video_url = \"\"\nif \"message_history\" not in st.session_state:\n st.session_state.message_history = []\nif \"thumbnail\" not in st.session_state:\n st.session_state.thumbnail = None\nif \"video_title\" not in st.session_state:\n st.session_state.video_title = \"\"\nif \"channel_name\" not in st.session_state:\n st.session_state.channel_name = \"\"\nif \"summary\" not in st.session_state:\n st.session_state.summary = \"\"\n\n# Create placeholders for video information, thumbnail, and summary\nvideo_info_placeholder1 = st.empty()\nvideo_info_placeholder2 = st.empty()\n\nthumbnail_placeholder = st.empty()\nsummary_placeholder1 = st.empty()\nsummary_placeholder2 = st.empty()\nworking = True\n\nif video_url:\n working = True\n # Call get_summary() and get_info() only if the video_url has changed\n if st.session_state.previous_video_url != video_url:\n try:\n st.session_state.video_title, st.session_state.channel_name, thumbnail_url, info2send = get_info(video_url)\n working = True\n except Exception as e:\n video_info_placeholder1.error(\"Incorrect Video Link\")\n video_url = \"\" # Reset the video_url to prevent further processing\n working = False\n print(e)\n\n if working:\n response = requests.get(thumbnail_url)\n st.session_state.thumbnail = Image.open(io.BytesIO(response.content))\n print(\"Done Thumbnail\")\n else:\n print(\"Error\", 1)\n if working:\n # Update video information, thumbnail placeholders\n video_info_placeholder1.subheader(st.session_state.video_title)\n video_info_placeholder2.write(f\"By {st.session_state.channel_name}\")\n thumbnail_placeholder.image(st.session_state.thumbnail)\n summary_placeholder1.subheader(\"Summary:\")\n summary_placeholder2.write(f\"

Loading Summary

\", unsafe_allow_html=True)\n\n\n if st.session_state.previous_video_url != video_url:\n try:\n st.session_state.summary, st.session_state.message_history = get_summary(info2send)\n except:\n summary_placeholder2.error(\"Video length too long\")\n \n\n # Update the previous_video_url in session_state\n st.session_state.previous_video_url = video_url\n\n # Update video information, thumbnail placeholders\n video_info_placeholder1.subheader(str(st.session_state.video_title))\n video_info_placeholder2.write(f\"By {st.session_state.channel_name}\")\n thumbnail_placeholder.image(st.session_state.thumbnail)\n summary_placeholder1.subheader(\"Summary:\")\n\n # Check if the summary is available and display it\n if st.session_state.summary:\n summary_placeholder1.subheader(\"Summary:\")\n summary_placeholder2.write(f\"

{st.session_state.summary}

\", unsafe_allow_html=True)\n\n st.markdown(\"\"\"---\"\"\")\n st.subheader(\"Ask something about the Video:\")\n # Create a placeholder for chat history\n chat_history_placeholder = st.empty()\n\n # Create a placeholder for user input\n user_input_placeholder = st.empty()\n\n user_message = user_input_placeholder.text_input(\"Ask a Question\")\n\n if user_message:\n bot_response, st.session_state.message_history = get_response(user_message, st.session_state.message_history)\n chat_history.append({\"name\": \"Summarizer\", \"message\": bot_response})\n\n # Update the chat history\n with chat_history_placeholder:\n st.write(\"\\n\")\n for chat in chat_history:\n print((f\"{chat['name']}: {chat['message']}\"))\n st.write(f\"{chat['name']}: {chat['message']}\")\n\n else:\n print(\"Error\", 2)", "repo_name": "Shrey-Sheladia/Summarizer", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "streamlit.set_page_config", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 23, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 24, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 25, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 26, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 27, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 28, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 29, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 30, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 31, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 32, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 33, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 34, "usage_type": "attribute"}, {"api_name": "streamlit.empty", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 48, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 50, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 66, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 67, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 68, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 73, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 75, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 81, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 84, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 85, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 86, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 90, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 92, "usage_type": "attribute"}, {"api_name": "streamlit.markdown", "line_number": 94, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 95, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 97, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 100, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 105, "usage_type": "attribute"}, {"api_name": "streamlit.write", "line_number": 110, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "24591090092", "text": "from dash import Dash, dcc, html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\n\napp = Dash(__name__)\n\ndf = pd.DataFrame({\n 'student_id' : range(1, 11),\n 'score' : [1, 5, 2, 5, 2, 3, 1, 5, 1, 5]\n})\n\napp.layout = html.Div([\n\tdcc.Dropdown(list(range(1, 6)), 1, id='score'),\n\t'Foi pontuado pela seguinte quantidade de estudantes:',\n\thtml.Div(id='output'),\n dcc.Store(id='store')#div invisivel - guarda por sessão dataframes e variaveis \n])\n\n@app.callback(Output('store', 'data'), Input('score', 'value'))\ndef update_output(value):\n\tglobal df\n\t#df = df[df['score'] == value] - não faça alteração de variaveis globais dentro de funções ou callbacks\n\tfiltered_df = df[df['score'] == value]\n\n\treturn filtered_df.to_dict()# dado pertinente apenas para o usuario - guardar informação\n#to_dict - para dicionario\n\n@app.callback(Output('output', 'children'), Input('store', 'data'))\ndef update_output(data):\n\tfiltered_df = pd.DataFrame(data)#transfoma de volta em dataframe\n\treturn len(filtered_df)\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n \n\t", "repo_name": "AnaMarcacini/Asimov_Academy", "sub_path": "Dashboards/Dash/Médio/4-variaveis_globais.py", "file_name": "4-variaveis_globais.py", "file_ext": "py", "file_size_in_byte": 1084, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dash.Dash", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 7, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 12, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 12, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 13, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 13, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 15, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 15, "usage_type": "name"}, {"api_name": "dash.dcc.Store", "line_number": 16, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 16, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 19, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 28, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "74346851894", "text": "import time\n\nimport matplotlib.pyplot as plt\n\nfrom multistrand.concurrent import MergeSim\nfrom case2 import Experiment, setupSimulationOptions, runExperiment, \\\n CL_LONG_GATE_A_SEQ, CL_LONG_GATE_B_SEQ\nfrom multistrand.experiment import ClampedSeesawGate\n\n\nmyMultistrand = MergeSim()\n\n\n# For a greater understanding of how this specific code actually runs, please see case2.py\ndef runSimulations():\n # Here we say we are going to use 2 threads, storing only succesful data.\n # We demand at 100 successful trials\n setupSimulationOptions(7, True, 100, 2.5e6, 1000)\n\n # Here we create two clamped seesaw gates, according to the defined interface.\n gateA = ClampedSeesawGate(*CL_LONG_GATE_A_SEQ)\n gateB = ClampedSeesawGate(*CL_LONG_GATE_B_SEQ)\n\n trialsPerThreadIncremenet = 200\n trialsIncrement = trialsPerThreadIncremenet * myMultistrand.numOfThreads\n\n xValues = []\n times = []\n\n for ssample in range(1, 26, 5):\n start = time.time()\n results = runExperiment(trialsIncrement, gateA,\n Experiment.GATE_OUTPUT_PRODUCTION, None, ssample)\n end = time.time()\n elapsedTime = end-start\n timePerTrial = elapsedTime / results.nTotal\n xValues.append(ssample)\n times.append(timePerTrial)\n \n return xValues, times\n\ndef plotFigure():\n plt.rcdefaults()\n results = runSimulations()\n xValues = results[0]\n times = results[1] \n plt.plot(xValues, times, linewidth=2.0, marker='.', markersize=10.0)\n plt.xlim(xmin=1)\n plt.title(\"Time vs Supersampling Number\")\n plt.xlabel('Supersample')\n plt.ylabel('Time per Trial(s)')\n plt.gca().set_ylim(bottom=0)\n plt.show()\n\nif __name__ == \"__main__\":\n plotFigure()\n", "repo_name": "DNA-and-Natural-Algorithms-Group/multistrand", "sub_path": "tutorials/leak_casestudy/case3.py", "file_name": "case3.py", "file_ext": "py", "file_size_in_byte": 1727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "21", "api": [{"api_name": "multistrand.concurrent.MergeSim", "line_number": 11, "usage_type": "call"}, {"api_name": "case2.setupSimulationOptions", "line_number": 18, "usage_type": "call"}, {"api_name": "multistrand.experiment.ClampedSeesawGate", "line_number": 21, "usage_type": "call"}, {"api_name": "case2.CL_LONG_GATE_A_SEQ", "line_number": 21, "usage_type": "name"}, {"api_name": "multistrand.experiment.ClampedSeesawGate", "line_number": 22, "usage_type": "call"}, {"api_name": "case2.CL_LONG_GATE_B_SEQ", "line_number": 22, "usage_type": "name"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "case2.runExperiment", "line_number": 32, "usage_type": "call"}, {"api_name": "case2.Experiment.GATE_OUTPUT_PRODUCTION", "line_number": 33, "usage_type": "attribute"}, {"api_name": "case2.Experiment", "line_number": 33, "usage_type": "name"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcdefaults", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "74399849974", "text": "import matplotlib.pyplot as plt\n\ndef build_graph(sizes_arr, time_without_omp, time_with_omp):\n\n fig1 = plt.figure(figsize = (10, 7))\n plot = fig1.add_subplot()\n plot.plot(sizes_arr, time_without_omp, label = \"Без распараллеливания\")\n plot.plot(sizes_arr, time_with_omp, label = \"C распараллеливанием\")\n\n plt.legend()\n plt.grid()\n plt.title(\"Сравнение затраченного времени\")\n plt.ylabel(\"Затраченное время (мс)\")\n plt.xlabel(\"Размер сцены (кол-во клеток)\")\n \n plt.show()\n \n\ndef compare_time():\n time_without_omp = [25, 152, 203, 227, 251, 296]\n time_with_omp = [33, 92, 123, 163, 195, 242]\n\n sizes_arr = [\"1x1\", \"10x10\", \"20x20\", \"30x30\", \"40x40\", \"50x50\"]\n\n build_graph(sizes_arr, time_without_omp, time_with_omp)\n\nif __name__ == \"__main__\":\n compare_time()\n", "repo_name": "kovkir/bmstu_cg_course_project", "sub_path": "proj/compare.py", "file_name": "compare.py", "file_ext": "py", "file_size_in_byte": 922, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "24002520694", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[44]:\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pandas_profiling import ProfileReport\nimport matplotlib.pyplot as plt\n\n# Load the Spambase csv data using the Code provided in class\nfilename = 'C:/Users/klara/Documents/Studium/Master/IntroductiontoMachineLearning/spambase.csv'\ndf = pd.read_csv(filename)\n\nraw_data = df.to_numpy() \ncols = range(0, 58) \nX = raw_data[:, cols]\n\n# Extract the attribute names that came from the header of the csv\nattributeNames = np.asarray(df.columns[cols])\nclassNames = np.unique(classLabels)\nclassDict = dict(zip(classNames,range(len(classNames))))\ny = np.array([classDict[cl] for cl in classLabels])\nN, M = X.shape\nC = len(classNames)\n\n\n# In[45]:\n\n\n#function which returns top correlations \ndef get_redundant_pairs(df):\n '''Get diagonal and lower triangular pairs of correlation matrix'''\n pairs_to_drop = set()\n cols = df.columns\n for i in range(0, df.shape[1]):\n for j in range(0, i+1):\n pairs_to_drop.add((cols[i], cols[j]))\n return pairs_to_drop\n\ndef get_top_abs_correlations(df, n=5):\n au_corr = df.corr().abs().unstack()\n labels_to_drop = get_redundant_pairs(df)\n au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)\n return au_corr[0:n]\n\nprint(\"Top Absolute Correlations\")\nprint(get_top_abs_correlations(df, 5))\n\n\n# In[46]:\n\n\n#function which gets the lowest correlation\ndef get_lowest_abs_correlations(df, n=5):\n au_corr = df.corr().abs().unstack()\n labels_to_drop = get_redundant_pairs(df)\n au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=True)\n return au_corr[0:n]\n\nprint(\"Lowest Absolute Correlations\")\nprint(get_lowest_abs_correlations(df, 5))\n\n\n# In[47]:\n\n\n#scatter the variables 857 and 415, classified by spam/no spam\na=plt.scatter(df[df['spam(Y/N)']==0][' word_freq_857'], df[df['spam(Y/N)']==0][' word_freq_415'], color='green', label='No Spam')\nb=plt.scatter(df[df['spam(Y/N)']==1][' word_freq_857'], df[df['spam(Y/N)']==1][' word_freq_415'], color='red', alpha=0.4, label='Spam')\nplt.ylabel('Frequency of 415')\nplt.xlabel('Frequency of 857')\nplt.legend(handles=[a,b])\nplt.show()\n\n\n# In[48]:\n\n\n#scatter the variables direct and 415, classified by spam/no spam\na=plt.scatter(df[df['spam(Y/N)']==0][' word_freq_415'], df[df['spam(Y/N)']==0][' word_freq_direct'], color='green', label='No Spam')\nb=plt.scatter(df[df['spam(Y/N)']==1][' word_freq_415'], df[df['spam(Y/N)']==1][' word_freq_direct'], color='red', alpha=0.4, label='Spam')\nplt.ylabel('Frequency of direct')\nplt.xlabel('Frequency of 415')\nplt.legend(handles=[a,b])\nplt.show()\n\n\n# In[49]:\n\n\n#print the correlation matrix \ncorrMatrix= df.corr()\n#print the values most correlated with \"spam\"\nspamCorrelation=corrMatrix[-1:]\nmaxSpamCorr = spamCorrelation.abs().unstack()\nmaxSpamCorr=maxSpamCorr.sort_values(ascending=False)\nprint(maxSpamCorr[:10])\n#print the correlation between george and spam\nprint(corrMatrix[' word_freq_george']['spam(Y/N)'])\n\n\n# In[38]:\n\n\n#scatter frequency of \"your\"\nx=np.random.normal(0,1, (len(df[df['spam(Y/N)']==0])))\nx2=np.random.normal(0,1, (len(df[df['spam(Y/N)']==1])))\nplt.figure(figsize=(3,7))\nplt.scatter(x, df[df['spam(Y/N)']==0][' word_freq_your'], color='green', label='No Spam')\nplt.scatter(x2, df[df['spam(Y/N)']==1][' word_freq_your'], color='red', alpha=0.4, label='Spam')\nplt.legend(handles=[a,b])\nplt.ylabel('Frequency of your')\n\n\n# In[39]:\n\n\n#scatter frequency of \"george\"\nx=np.random.normal(0,1, (len(df[df['spam(Y/N)']==0])))\nx2=np.random.normal(0,1, (len(df[df['spam(Y/N)']==1])))\nplt.figure(figsize=(3,7))\na=plt.scatter(x, df[df['spam(Y/N)']==0][' word_freq_george'], color='green', label='No Spam')\nb=plt.scatter(x2, df[df['spam(Y/N)']==1][' word_freq_george'], color='red', alpha=0.4, label='Spam')\nplt.legend(handles=[a,b])\nplt.ylabel('Frequency of the word george')\n\n\n# In[40]:\n\n\n#heatmap of the correlation matrix\ncorrMatrix= df.corr()\nsns.heatmap(corrMatrix, annot=False)\nplt.show()\n\n\n# In[41]:\n\n\n#slice the heatmap to zoom in on the more significant parts\nindexes=np.append(np.arange(22,40),-2)\nindexes\ncorrMatrix2= df.iloc[:,indexes].corr()\nsns.heatmap(corrMatrix2, annot=False)\nplt.show()\n\n\n\n\n\n\n", "repo_name": "georgiostsimplis/Introduction_to_Machine_Learning", "sub_path": "Project_1/correlations.py", "file_name": "correlations.py", "file_ext": "py", "file_size_in_byte": 4224, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 110, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 123, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 144, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}]} +{"seq_id": "11439088202", "text": "import csv\nfrom multiprocessing import Process\n\nimport os\n\nfrom arch.api import eggroll, StoreType\nfrom arch.api.storage import save_data\nfrom arch.api.standalone.eggroll import Standalone\nfrom arch.api.utils import file_utils\n\nfrom fdp.helper import WORK_MODE, gen_data_namespace, gen_model_namespace, eggroll_init\n\n\ndef list_to_str(input_list):\n str1 = ''\n size = len(input_list)\n for i in range(size):\n if i == size - 1:\n str1 += str(input_list[i])\n else:\n str1 += str(input_list[i]) + ','\n\n return str1\n\n\ndef csv_read_data(input_file, head=True):\n with open(input_file) as csv_file:\n csv_reader = csv.reader(csv_file)\n if head:\n next(csv_reader)\n for row in csv_reader:\n yield (row[0], list_to_str(row[1:]))\n\n\ndef upload_csv(nid, path, table_name):\n eggroll_init()\n # df = pd.read_csv(path, index_col=id_col)\n # if label_col is not None:\n # df[label_col] = df[label_col].astype('bool')\n # kv_data = [(idx, ','.join(row.astype(str).values)) for idx, row in df.iterrows()]\n kv_data = csv_read_data(path)\n namespace = gen_data_namespace(nid)\n return save_data(kv_data, name=table_name, namespace=namespace, error_if_exist=True)\n\n\ndef start_proc(func, *args):\n proc = Process(target=func, args=args)\n proc.start()\n proc.join()\n\n\ndef upload_csv_proc(nid, path, table_name):\n proc = Process(target=upload_csv, args=(nid, path, table_name))\n proc.start()\n proc.join()\n\n\ndef list_files(namespace):\n _type = StoreType.LMDB.value\n _base_dir = os.sep.join([os.path.join(file_utils.get_project_base_directory(), 'data'), _type])\n _namespace_dir = os.sep.join([_base_dir, namespace])\n if not os.path.isdir(_base_dir):\n raise EnvironmentError(\"illegal datadir\")\n return [file for file in os.listdir(_namespace_dir) if not file.endswith('.meta')]\n\n\ndef list_tables(nid):\n return list_files(gen_data_namespace(nid))\n\n\ndef list_models(nid):\n return list_files(gen_model_namespace(nid))\n\n\ndef del_table(nid, name):\n eggroll_init()\n eggroll.cleanup(name, gen_data_namespace(nid), True)\n\n\ndef del_model(nid, name):\n eggroll_init()\n eggroll.cleanup(name, gen_model_namespace(nid), True)\n\n\ndef show_table(nid, table_name):\n eggroll_init()\n table = eggroll.table(table_name, gen_data_namespace(nid))\n data = list(table.collect())\n print(data)\n\nif __name__ == '__main__':\n pass\n # from fdp.helper import guest_file, host_file\n # gid = 1\n # hid = 2\n # g_table = 'guest_table'\n # h_table = 'host_table'\n # for item in ((gid, guest_file, g_table), (hid, host_file, h_table)):\n # upload_csv_proc(*item)\n #\n # for i in (gid, hid):\n # print('nid:', i)\n # print('list tables')\n # tables = list_tables(i)\n # for table in tables:\n # print(table)\n # del_table(i, table)\n # print('delete tables')\n # print(list_tables(i))\n #\n # for item in ((gid, guest_file, g_table), (hid, host_file, h_table)):\n # upload_csv_proc(*item)\n #\n # for i in (gid, hid):\n # print('nid:', i)\n # print('list tables')\n # tables = list_tables(i)\n # for table in tables:\n # print(table)", "repo_name": "cleanerleon/FDP_SERVER", "sub_path": "fdp/file_mgr.py", "file_name": "file_mgr.py", "file_ext": "py", "file_size_in_byte": 3277, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "csv.reader", "line_number": 28, "usage_type": "call"}, {"api_name": "fdp.helper.eggroll_init", "line_number": 36, "usage_type": "call"}, {"api_name": "fdp.helper.gen_data_namespace", "line_number": 42, "usage_type": "call"}, {"api_name": "arch.api.storage.save_data", "line_number": 43, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 47, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 53, "usage_type": "call"}, {"api_name": "arch.api.StoreType.LMDB", "line_number": 59, "usage_type": "attribute"}, {"api_name": "arch.api.StoreType", "line_number": 59, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "arch.api.utils.file_utils.get_project_base_directory", "line_number": 60, "usage_type": "call"}, {"api_name": "arch.api.utils.file_utils", "line_number": 60, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 64, "usage_type": "call"}, {"api_name": "fdp.helper.gen_data_namespace", "line_number": 68, "usage_type": "call"}, {"api_name": "fdp.helper.gen_model_namespace", "line_number": 72, "usage_type": "call"}, {"api_name": "fdp.helper.eggroll_init", "line_number": 76, "usage_type": "call"}, {"api_name": "arch.api.eggroll.cleanup", "line_number": 77, "usage_type": "call"}, {"api_name": "arch.api.eggroll", "line_number": 77, "usage_type": "name"}, {"api_name": "fdp.helper.gen_data_namespace", "line_number": 77, "usage_type": "call"}, {"api_name": "fdp.helper.eggroll_init", "line_number": 81, "usage_type": "call"}, {"api_name": "arch.api.eggroll.cleanup", "line_number": 82, "usage_type": "call"}, {"api_name": "arch.api.eggroll", "line_number": 82, "usage_type": "name"}, {"api_name": "fdp.helper.gen_model_namespace", "line_number": 82, "usage_type": "call"}, {"api_name": "fdp.helper.eggroll_init", "line_number": 86, "usage_type": "call"}, {"api_name": "arch.api.eggroll.table", "line_number": 87, "usage_type": "call"}, {"api_name": "arch.api.eggroll", "line_number": 87, "usage_type": "name"}, {"api_name": "fdp.helper.gen_data_namespace", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "24068401570", "text": "from datetime import datetime\nimport json # noqa\nfrom pymongo import DESCENDING\nfrom bson.objectid import ObjectId\n\nfrom util.database import Database\n\nDB_NAME = 'discoverybot'\nCOLLECTION_NAME = 'discovery_requests'\n\n\nclass DbClientDiscovery(Database):\n def __init__(self):\n super().__init__(DB_NAME)\n \"\"\"\n Encapsulates a database accessor for discovery directed to our client\n \"\"\"\n def get_one(self, id: str) -> dict:\n \"\"\"\n Return a discovery requests record given an ID.\n\n Args:\n id (str): The mongodb ID of the discovery requests document to retrieve\n Returns:\n (dict): The located document or None\n \"\"\"\n filter_ = {'_id': ObjectId(id)}\n document = self.dbconn[COLLECTION_NAME].find_one(filter_)\n return document\n\n def get_list(self, email: str, clients_id: str, where: dict = None, page_num: int = 1, page_size: int = 25) -> list:\n \"\"\"\n Retrieve a list of discovery requests viewable by this admin user.\n This method supports pagination.\n\n Args:\n email (str): Email address of admin user.\n clients_id (str): ID of client for whom to retrieve discovery requests\n where (dict): Filter\n page_num (int): Which page number is going to be displayed? (default=1)\n page_size (int): Number of documents per page (default=25)\n Returns:\n (list): List of documents from 'discovery_requests' or None\n \"\"\"\n\n skips = page_size * (page_num - 1)\n\n order_by = [\n ('time', DESCENDING)\n ]\n\n if where:\n filter_ = {\n '$and': [\n where,\n {'client_id': ObjectId(clients_id)}\n ]\n }\n else:\n filter_ = {'client_id': ObjectId(clients_id)}\n\n discovery = self.dbconn[COLLECTION_NAME].find(filter_).sort(order_by).skip(skips).limit(page_size)\n\n if not discovery:\n return None\n\n return list(discovery)\n\n def has_any(self, clients_id: str) -> bool:\n \"\"\"\n See if there are any discovery requests for this client. This is used in rendering the UI.\n\n Args:\n clients_id (str): ID of client for whom we are to check for discovery requests.\n \n Returns:\n (bool): True if there are discovery requests; otherwise False\n \"\"\"\n discovery_requests = self.dbconn[COLLECTION_NAME].find_one({'client_id': ObjectId(clients_id)})\n if discovery_requests:\n return True\n return False\n\n def del_one_request(self, email: str, doc_id: str, request_number: str) -> dict:\n \"\"\"\n Delete one discovery request from the discovery_requests document.\n\n Args:\n email (str): Email of user making the request.\n doc_id (str): discovery_requests._id value\n request_number (str): Index of item to be deleted.\n \n Returns:\n (dict): success: True/False, message: explains problems; number: request_number\n \"\"\"\n result = self.dbconn[COLLECTION_NAME].update_one(\n {'_id': ObjectId(doc_id)},\n {'$pull': {'requests': {'number': int(request_number)}}}\n )\n\n if result.matched_count == 0:\n return {'success': False, 'message': \"Document not found.\"}\n if result.modified_count == 0:\n return {'success': False, 'message': \"Request not found.\"}\n return {'success': True, 'message': \"OK\", 'number': request_number}\n\n def update_one_request(self, email: str, doc: dict) -> dict:\n \"\"\"\n Update one discovery request from the discovery_requests document.\n\n Args:\n email (str): Email of user making the request.\n doc (dict): Fields to be updated.\n \n Returns:\n (dict): success: True/False, message: explains problems; number: request_number\n \"\"\"\n request_items = ['request', 'privileges', 'objections', 'withholding_statement', 'response']\n update_doc = {f'request.$.{f}': doc.get(f) for f in request_items if f in doc}\n query = {'_id': ObjectId(doc.get('doc_id', None)), 'requests.number': int(doc.get('request_number'))}\n print(' QUERY '.center(80, '*'))\n print(query)\n print(' UPDATE '.center(80, '*'))\n print(update_doc)\n updates = {'$set': update_doc}\n try:\n result = self.dbconn[COLLECTION_NAME].update_one(query, updates)\n except Exception as e:\n self.logger.error(e)\n return {'success': False, 'message': str(e)}\n\n if result.matched_count == 0:\n return {'success': False, 'message': \"Document not found.\"}\n if result.modified_count == 0:\n return {'success': False, 'message': \"Request not found.\"}\n return {'success': True, 'message': \"OK\", 'number': doc.get('request_number', -1), 'modified_count': result.modified_count}\n\n def save(self, email: str, doc: dict) -> dict:\n \"\"\"\n Save a discovery request record\n \"\"\"\n doc['client_id'] = ObjectId(doc['clients_id'])\n doc['last_editor'] = email\n doc['last_edit_date'] = datetime.now()\n\n # Insert new discovery request record\n if doc['_id'] == '0':\n del doc['_id']\n doc['created_by'] = email\n doc['created_date'] = datetime.now()\n\n result = self.dbconn[COLLECTION_NAME].insert_one(doc)\n if result.inserted_id:\n message = \"Discovery requests added\"\n return {'success': True, 'message': message}\n message = \"Failed to add new discovery request\"\n return {'success': False, 'message': message}\n\n # Update existing discovery request record\n filter_ = {'_id': ObjectId(doc['_id'])}\n del doc['_id']\n result = self.dbconn[COLLECTION_NAME].update_one(filter_, {'$set': doc})\n if result.modified_count == 1:\n message = \"Discovery request updated\"\n return {'success': True, 'message': message}\n\n message = f\"Discovery request did not update ({result.modified_count})\"\n return {'success': False, 'message': message}\n", "repo_name": "tjdaley/payment_redirect", "sub_path": "app/util/db_client_discovery.py", "file_name": "db_client_discovery.py", "file_ext": "py", "file_size_in_byte": 6285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "util.database.Database", "line_number": 12, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 27, "usage_type": "call"}, {"api_name": "pymongo.DESCENDING", "line_number": 49, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 56, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 60, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 79, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 97, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 120, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "70818501172", "text": "from numpy.core.fromnumeric import size\nimport pandas as pd\nimport numpy as np\nfrom pandas.core.frame import DataFrame\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy import sparse\nimport sys, json\nimport traceback\n\n\nclass Recommendation_System(object):\n def __init__(self, data, userLimit):\n self.data = data\n self.userLimit = userLimit # number of user neighbours\n self.normalized_data = self.data.copy()\n\n def normalize_data(self):\n users = np.unique(self.data[:, 0]).astype(\n np.int32\n ) # take the unique users column\n items = np.unique(self.data[:, 1]).astype(np.int32)\n\n for user in users:\n this_user_index = np.where(\n self.data[:, 0] == user\n ) # get index of this user\n this_user_ratings = self.data[\n this_user_index, 2\n ] # take rating of this user\n mean_rating = np.mean(this_user_ratings)\n self.normalized_data[this_user_index, 2] -= mean_rating\n\n self.normalized_matrix = sparse.coo_matrix(\n (\n self.normalized_data[:, 2],\n (self.normalized_data[:, 1], self.normalized_data[:, 0]),\n ),\n (int(np.max(items)) + 1, int(np.max(users)) + 1),\n )\n self.normalized_matrix = self.normalized_matrix.tocsr()\n\n def generate_user_similarity_matrix(self):\n self.similar_user = cosine_similarity(\n self.normalized_matrix.T, self.normalized_matrix.T\n )\n\n def predict(self, user, item):\n index_user_rated_this_item = np.where(self.data[:, 1] == item)[\n 0\n ] # get rated user index to find users\n\n all_users_rated_this_item = (self.data[index_user_rated_this_item, 0]).astype(\n np.int32\n ) # get rated users\n\n if user >= len(self.similar_user):\n return np.NaN\n else:\n rated_user_similarity = self.similar_user[\n user, all_users_rated_this_item\n ] # get users similarity to this user\n\n index_of_nearest_limited_rated_user = np.argsort(rated_user_similarity)[\n -self.userLimit :\n ].astype(\n np.int32\n ) # get index of users which have the highest similarity\n\n rating_of_nearest_user = self.normalized_matrix[\n item, all_users_rated_this_item[index_of_nearest_limited_rated_user]\n ] # get rating of those nearest users\n\n all_nearest_user_similarity = rated_user_similarity[\n index_of_nearest_limited_rated_user\n ]\n\n return (rating_of_nearest_user * all_nearest_user_similarity)[0] / (\n np.abs(all_nearest_user_similarity).sum() + 1e-8\n )\n\n def generate_prerequisite(self):\n self.normalize_data()\n self.generate_user_similarity_matrix()\n\n def recommend(self, user):\n self.generate_prerequisite()\n\n recommended_items_dict = {}\n ids = np.where(self.data[:, 0] == user)[0]\n items_rated_by_this_user = self.data[ids, 1].astype(np.int32)\n all_items = np.unique(self.data[:, 1].astype(np.int32))\n\n for item in all_items:\n if item not in items_rated_by_this_user:\n rating = self.predict(user, item)\n if rating > 0:\n recommended_items_dict[item] = rating\n recommended_items_dict = sorted(\n recommended_items_dict.items(), key=lambda x: x[1], reverse=True\n )\n recommended_item = []\n if len(recommended_items_dict) > 6:\n for i in range(6):\n recommended_item.append(list(recommended_items_dict)[i][0])\n else:\n for i in range(len(recommended_items_dict)):\n recommended_item.append(list(recommended_items_dict)[i][0])\n\n return recommended_item\n\n\ntry:\n jsondata = json.loads(sys.argv[1])\n arrayOfReviews = np.asarray(jsondata, dtype=np.float)\n rs = Recommendation_System(arrayOfReviews, 4)\n data = rs.recommend(int(sys.argv[2]))\n print(data)\n sys.stdout.flush()\nexcept Exception as e:\n # print(traceback.format_exc())\n sys.stdout.flush()\n\n\n", "repo_name": "ZNhatAnhZ/Bookstore", "sub_path": "src/controllers/recommender.py", "file_name": "recommender.py", "file_ext": "py", "file_size_in_byte": 4204, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.unique", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 118, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 118, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 121, "usage_type": "attribute"}]} +{"seq_id": "27845013445", "text": "import pylsl\nimport asyncio\nimport logging\nfrom Util import util\nimport pandas as pd\nimport os\nfrom configparser import ConfigParser\n\nlogger = logging.getLogger(__name__)\n\nconfigObject = ConfigParser()\nconfigObject.read(\"config.ini\")\nfileInfo = configObject['File Info']\n\ndef get_file_info_from_config():\n return fileInfo['directory'], fileInfo['participant_name'], fileInfo['participant_session'], fileInfo['file_name_prefix']\n\n\nasync def save_current_streams(directory, participant_name, participant_session, file_name_prefix):\n stream_names = []\n print(\"looking for streams\")\n streams = pylsl.resolve_streams()\n for stream in streams:\n stream_names.append(stream.name())\n print(stream.name())\n print(util.obtain_stream_channel_names(stream))\n\n # Initialize Inlets and Dataframes\n inlets = []\n df_dict = {}\n path = os.path.join(directory, participant_name, participant_session)\n for stream in streams:\n inlets.append(pylsl.StreamInlet(stream))\n header = util.obtain_stream_channel_names(stream)\n header.append('Device_Time')\n df_dict[stream.name()] = pd.DataFrame(columns=header)\n # Create directory for files\n try:\n os.makedirs(os.path.join(path, stream.name()))\n except:\n logger.info('Directory ', os.path.join(path, stream.name()), 'already exists')\n\n while True:\n # get a new sample (you can also omit the timestamp part if you're not\n # interested in it)\n for inlet in inlets:\n samples, timestamps = inlet.pull_chunk()\n if timestamps:\n inlet_name = inlet.info().name()\n # print(inlet_name)\n current_df = df_dict[inlet_name]\n df_temp = util.format_data_into_dataframe(samples, timestamps, current_df.columns.values.tolist())\n current_df = current_df.append(df_temp)\n df_dict[inlet_name] = current_df\n file_name = os.path.join(path, inlet_name, file_name_prefix + '_data.csv')\n hdr = False if os.path.isfile(file_name) else True\n current_df.to_csv(file_name, mode='a', index_label='Timestamp', header=hdr)\n col_names = [i for i in current_df.columns]\n df_dict[inlet_name] = pd.DataFrame(columns=col_names)\n\n\nasync def main():\n directory, participant_name, participant_session, file_name_prefix = get_file_info_from_config()\n await save_current_streams(directory, participant_name, participant_session, file_name_prefix)\n\n\nif __name__ == '__main__':\n asyncio.ensure_future(main())\n loop = asyncio.get_event_loop()\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n logger.info(\"Ctrl-C pressed.\")\n finally:\n loop.close()", "repo_name": "EspenP/EMG_DL_Classification", "sub_path": "save_stream_as_csv.py", "file_name": "save_stream_as_csv.py", "file_ext": "py", "file_size_in_byte": 2801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 11, "usage_type": "call"}, {"api_name": "pylsl.resolve_streams", "line_number": 22, "usage_type": "call"}, {"api_name": "Util.util.obtain_stream_channel_names", "line_number": 26, "usage_type": "call"}, {"api_name": "Util.util", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pylsl.StreamInlet", "line_number": 33, "usage_type": "call"}, {"api_name": "Util.util.obtain_stream_channel_names", "line_number": 34, "usage_type": "call"}, {"api_name": "Util.util", "line_number": 34, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Util.util.format_data_into_dataframe", "line_number": 52, "usage_type": "call"}, {"api_name": "Util.util", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 68, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "32412539298", "text": "import enum\nimport io\nimport os\n\nfrom asn1crypto.core import ObjectIdentifier\n\nfrom minikerberos.protocol.constants import EncryptionType\nfrom minikerberos.protocol import encryption\nfrom minikerberos.crypto.hashing import md5, hmac_md5\nfrom minikerberos.crypto.RC4 import RC4\n\n#TODO: RC4 support!\n\n# https://tools.ietf.org/html/draft-raeburn-krb-rijndael-krb-05\n# https://tools.ietf.org/html/rfc2478\n# https://tools.ietf.org/html/draft-ietf-krb-wg-gssapi-cfx-02\n# https://tools.ietf.org/html/rfc4757\n# https://www.rfc-editor.org/errata/rfc4757\n\nGSS_WRAP_HEADER = b'\\x60\\x2b\\x06\\x09\\x2a\\x86\\x48\\x86\\xf7\\x12\\x01\\x02\\x02'\nGSS_WRAP_HEADER_OID = b'\\x60\\x2b\\x06\\x09\\x2a\\x86\\x48\\x86\\xf7\\x12\\x01\\x02\\x02'\n\nclass KRB5_MECH_INDEP_TOKEN:\n\t# https://tools.ietf.org/html/rfc2743#page-81\n\t# Mechanism-Independent Token Format\n\n\tdef __init__(self, data, oid, remlen = None):\n\t\tself.oid = oid\n\t\tself.data = data\n\n\t\t#dont set this\n\t\tself.length = remlen\n\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn KRB5_MECH_INDEP_TOKEN.from_buffer(io.BytesIO(data))\n\t\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\t\n\t\tstart = buff.read(1)\n\t\tif start != b'\\x60':\n\t\t\traise Exception('Incorrect token data!')\n\t\tremaining_length = KRB5_MECH_INDEP_TOKEN.decode_length_buffer(buff)\n\t\ttoken_data = buff.read(remaining_length)\n\t\t\n\t\tbuff = io.BytesIO(token_data)\n\t\tpos = buff.tell()\n\t\tbuff.read(1)\n\t\toid_length = KRB5_MECH_INDEP_TOKEN.decode_length_buffer(buff)\n\t\tbuff.seek(pos)\n\t\ttoken_oid = ObjectIdentifier.load(buff.read(oid_length+2))\n\t\t\n\t\treturn KRB5_MECH_INDEP_TOKEN(buff.read(), str(token_oid), remlen = remaining_length)\n\t\t\n\t@staticmethod\n\tdef decode_length_buffer(buff):\n\t\tlf = buff.read(1)[0]\n\t\tif lf <= 127:\n\t\t\tlength = lf\n\t\telse:\n\t\t\tbcount = lf - 128\n\t\t\tlength = int.from_bytes(buff.read(bcount), byteorder = 'big', signed = False)\n\t\treturn length\n\t\t\n\t@staticmethod\n\tdef encode_length(length):\n\t\tif length <= 127:\n\t\t\treturn length.to_bytes(1, byteorder = 'big', signed = False)\n\t\telse:\n\t\t\tlb = length.to_bytes((length.bit_length() + 7) // 8, 'big')\n\t\t\treturn (128+len(lb)).to_bytes(1, byteorder = 'big', signed = False) + lb\n\t\t\n\t\t\n\tdef to_bytes(self):\n\t\tt = ObjectIdentifier(self.oid).dump() + self.data\n\t\tt = b'\\x60' + KRB5_MECH_INDEP_TOKEN.encode_length(len(t)) + t\n\t\treturn t[:-len(self.data)] , self.data\n\n\nclass GSSAPIFlags(enum.IntFlag):\n\tGSS_C_DCE_STYLE = 0x1000\n\tGSS_C_DELEG_FLAG = 1\n\tGSS_C_MUTUAL_FLAG = 2\n\tGSS_C_REPLAY_FLAG = 4\n\tGSS_C_SEQUENCE_FLAG = 8\n\tGSS_C_CONF_FLAG = 0x10\n\tGSS_C_INTEG_FLAG = 0x20\n\t\nclass KG_USAGE(enum.Enum):\n\tACCEPTOR_SEAL = 22\n\tACCEPTOR_SIGN = 23\n\tINITIATOR_SEAL = 24\n\tINITIATOR_SIGN = 25\n\t\nclass FlagsField(enum.IntFlag):\n\tSentByAcceptor = 0\n\tSealed = 2\n\tAcceptorSubkey = 4\n\n# https://tools.ietf.org/html/rfc4757 (7.2)\nclass GSSMIC_RC4:\n\tdef __init__(self):\n\t\tself.TOK_ID = b'\\x01\\x01'\n\t\tself.SGN_ALG = b'\\x11\\x00' #HMAC\n\t\tself.Filler = b'\\xff'*4\n\t\tself.SND_SEQ = None\n\t\tself.SGN_CKSUM = None\n\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn GSSMIC_RC4.from_buffer(io.BytesIO(data))\n\t\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tmic = GSSMIC_RC4()\n\t\tmic.TOK_ID = buff.read(2)\n\t\tmic.SGN_ALG = buff.read(2)\n\t\tmic.Filler = buff.read(4)\n\t\tmic.SND_SEQ = buff.read(8)\n\t\tmic.SGN_CKSUM = buff.read(8)\n\t\t\n\t\treturn mic\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.TOK_ID\n\t\tt += self.SGN_ALG\n\t\tt += self.Filler\n\t\tt += self.SND_SEQ\n\t\tif self.SGN_CKSUM is not None:\n\t\t\tt += self.SGN_CKSUM\n\t\n\t\treturn t\n\t\t\nclass GSSWRAP_RC4:\n\tdef __init__(self):\n\t\tself.TOK_ID = b'\\x02\\x01'\n\t\tself.SGN_ALG = b'\\x11\\x00' #HMAC\n\t\tself.SEAL_ALG = None\n\t\tself.Filler = b'\\xFF' * 2\n\t\tself.SND_SEQ = None\n\t\tself.SGN_CKSUM = None\n\t\tself.Confounder = None\n\n\tdef __str__(self):\n\t\tt = 'GSSWRAP_RC4\\r\\n'\n\t\tt += 'TOK_ID : %s\\r\\n' % self.TOK_ID.hex()\n\t\tt += 'SGN_ALG : %s\\r\\n' % self.SGN_ALG.hex()\n\t\tt += 'SEAL_ALG : %s\\r\\n' % self.SEAL_ALG.hex()\n\t\tt += 'Filler : %s\\r\\n' % self.Filler.hex()\n\t\tt += 'SND_SEQ : %s\\r\\n' % self.SND_SEQ.hex()\n\t\tt += 'SGN_CKSUM : %s\\r\\n' % self.SGN_CKSUM.hex()\n\t\tt += 'Confounder : %s\\r\\n' % self.Confounder.hex()\n\t\treturn t\n\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn GSSWRAP_RC4.from_buffer(io.BytesIO(data))\n\t\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\twrap = GSSWRAP_RC4()\n\t\twrap.TOK_ID = buff.read(2)\n\t\twrap.SGN_ALG = buff.read(2)\n\t\twrap.SEAL_ALG = buff.read(2)\n\t\twrap.Filler = buff.read(2)\n\t\twrap.SND_SEQ = buff.read(8)\n\t\twrap.SGN_CKSUM = buff.read(8)\n\t\twrap.Confounder = buff.read(8)\n\t\t\n\t\treturn wrap\n\t\n\tdef to_bytes(self):\n\t\tt = self.TOK_ID\n\t\tt += self.SGN_ALG\n\t\tt += self.SEAL_ALG\n\t\tt += self.Filler\n\t\tt += self.SND_SEQ\n\t\t\n\t\tif self.SGN_CKSUM:\n\t\t\tt += self.SGN_CKSUM\n\t\t\tif self.Confounder:\n\t\t\t\tt += self.Confounder\n\t\t\n\t\n\t\treturn t\n\t\t\nclass GSSAPI_RC4:\n\tdef __init__(self, session_key):\n\t\tself.session_key = session_key\n\t\n\tdef GSS_GetMIC(self, data, sequenceNumber, direction = 'init'):\n\t\traise Exception('Not tested! Sure it needs some changes')\n\t\tGSS_GETMIC_HEADER = b'\\x60\\x23\\x06\\x09\\x2a\\x86\\x48\\x86\\xf7\\x12\\x01\\x02\\x02'\n\t\t\n\t\t# Let's pad the data\n\t\tpad = (4 - (len(data) % 4)) & 0x3\n\t\tpadStr = bytes([pad]) * pad\n\t\tdata += padStr\n\t\t\n\t\tmic = GSSMIC_RC4()\n\t\t\n\t\tif direction == 'init':\n\t\t\tmic.SND_SEQ = sequenceNumber.to_bytes(4, 'big', signed = False) + b'\\x00'*4\n\t\telse:\n\t\t\tmic.SND_SEQ = sequenceNumber.to_bytes(4, 'big', signed = False) + b'\\xff'*4\n\t\t\n\t\tKsign_ctx = hmac_md5(self.session_key.contents)\n\t\tKsign_ctx.update(b'signaturekey\\0')\n\t\tKsign = Ksign_ctx.digest()\n\t\t\n\t\tid = 15\n\t\ttemp = md5( id.to_bytes(4, 'little', signed = False) + mic.to_bytes()[:8] ).digest()\n\t\tchksum_ctx = hmac_md5(Ksign)\n\t\tchksum_ctx.update(temp)\n\t\tmic.SGN_CKSUM = chksum_ctx.digest()[:8]\n\t\t\n\t\tid = 0\n\t\ttemp = hmac_md5(self.session_key.contents)\n\t\ttemp.update(id.to_bytes(4, 'little', signed = False))\n\t\t\n\t\tKseq_ctx = hmac_md5(temp.digest())\n\t\tKseq_ctx.update(mic.SGN_CKSUM)\n\t\tKseq = Kseq_ctx.digest()\n\t\t\n\t\tmic.SGN_CKSUM = RC4(Kseq).encrypt(mic.SND_SEQ)\n\t\t\n\t\treturn GSS_GETMIC_HEADER + mic.to_bytes()\n\t\t\n\t\n\tdef GSS_Wrap(self, data, seq_num, direction = 'init', encrypt=True, cofounder = None):\n\t\t#direction = 'a'\n\t\t#seq_num = 0\n\t\t#print('[GSS_Wrap] data: %s' % data)\n\t\t#print('[GSS_Wrap] seq_num: %s' % seq_num.to_bytes(4, 'big', signed = False).hex())\n\t\t#print('[GSS_Wrap] direction: %s' % direction)\n\t\t#print('[GSS_Wrap] encrypt: %s' % encrypt)\n\t\t#\n\t\t#print('[GSS_Wrap] auth_data: %s' % auth_data)\n\t\t\n\t\t#pad = 0\n\t\tif encrypt is True:\n\t\t\tdata += b'\\x01'\n\t\t\t#pad = (8 - (len(data) % 8)) & 0x7\n\t\t\t#padStr = bytes([pad]) * pad\n\t\t\t#data += padStr\n\t\t\t#\n\t\t\t##data += b'\\x08' * 8\n\t\t\t#print('[GSS_Wrap] pad: %s' % pad)\n\t\t\t#print('[GSS_Wrap] data padded: %s' % data)\n\t\t\n\n\t\ttoken = GSSWRAP_RC4()\n\t\ttoken.SEAL_ALG = b'\\x10\\x00' # RC4\n\t\t\n\t\tif direction == 'init':\n\t\t\ttoken.SND_SEQ = seq_num.to_bytes(4, 'big', signed = False) + b'\\x00'*4\n\t\telse:\n\t\t\ttoken.SND_SEQ = seq_num.to_bytes(4, 'big', signed = False) + b'\\xff'*4\n\t\t\n\t\ttoken.Confounder = os.urandom(8)\n\t\t#if cofounder is not None:\n\t\t#\ttoken.Confounder = cofounder\n\t\t#\t#testing purposes only, pls remove\n\t\t\t\n\t\t\n\t\ttemp = hmac_md5(self.session_key.contents)\n\t\ttemp.update(b'signaturekey\\0')\n\t\tKsign = temp.digest()\n\t\t\n\t\tid = 13\n\t\tSgn_Cksum = md5(id.to_bytes(4, 'little', signed = False) + token.to_bytes()[:8] + token.Confounder + data).digest()\n\t\t\n\t\tklocal = b''\n\t\tfor b in self.session_key.contents:\n\t\t\tklocal += bytes([b ^ 0xf0])\n\n\t\tid = 0\n\t\ttemp = hmac_md5(klocal)\n\t\ttemp.update(id.to_bytes(4, 'little', signed = False))\n\t\ttemp = hmac_md5(temp.digest())\n\t\ttemp.update(seq_num.to_bytes(4, 'big', signed = False))\n\t\tKcrypt = temp.digest()\n\n\t\ttemp = hmac_md5(Ksign)\n\t\ttemp.update(Sgn_Cksum)\n\t\ttoken.SGN_CKSUM = temp.digest()[:8]\n\t\t\n\t\tid = 0\n\t\ttemp = hmac_md5(self.session_key.contents)\n\t\ttemp.update(id.to_bytes(4, 'little', signed = False))\n\t\ttemp = hmac_md5(temp.digest())\n\t\ttemp.update(token.SGN_CKSUM)\n\t\tKseq = temp.digest()\n\t\t\n\t\ttoken.SND_SEQ = RC4(Kseq).encrypt(token.SND_SEQ)\n\t\t\n\t\t\n\t\t#if auth_data is not None:\n\t\tif encrypt is False:\n\t\t\t#print('Unwrap sessionkey: %s' % self.session_key.contents.hex())\n\t\t\t#print('Unwrap data : %s' % data.hex())\n\n\t\t\tsspi_wrap = KRB5_MECH_INDEP_TOKEN.from_bytes(data)\n\n\t\t\thdr = sspi_wrap.data[:32]\n\t\t\tdata = sspi_wrap.data[32:]\n\n\t\t\twrap = GSSWRAP_RC4.from_bytes(hdr)\n\t\t\t\n\t\t\tid = 0\n\t\t\ttemp = hmac_md5(self.session_key.contents)\n\t\t\ttemp.update(id.to_bytes(4, 'little', signed = False))\n\t\t\ttemp = hmac_md5(temp.digest())\n\t\t\ttemp.update(wrap.SGN_CKSUM)\n\t\t\tKseq = temp.digest()\n\t\t\t\n\t\t\tsnd_seq = RC4(Kseq).encrypt(wrap.SND_SEQ)\n\t\t\t\n\t\t\tid = 0\n\t\t\ttemp = hmac_md5(klocal)\n\t\t\ttemp.update(id.to_bytes(4, 'little', signed = False))\n\t\t\ttemp = hmac_md5(temp.digest())\n\t\t\ttemp.update(snd_seq[:4])\n\t\t\tKcrypt = temp.digest()\n\t\t\t\n\t\t\trc4 = RC4(Kcrypt)\n\t\t\tdec_cofounder = rc4.decrypt(wrap.Confounder)\n\t\t\tdec_data = rc4.decrypt(data)\n\n\t\t\tid = 13\n\t\t\tSgn_Cksum_calc = md5(id.to_bytes(4, 'little', signed = False) + wrap.to_bytes()[:8] + dec_cofounder + dec_data).digest()\n\n\t\t\ttemp = hmac_md5(Ksign)\n\t\t\ttemp.update(Sgn_Cksum_calc)\n\t\t\tSgn_Cksum_calc = temp.digest()[:8]\n\n\t\t\tif wrap.SGN_CKSUM != Sgn_Cksum_calc[:8]:\n\t\t\t\treturn None, Exception('Integrity verification failed')\n\n\t\t\tpad = 1\n\t\t\treturn dec_data[:-pad], None\n\t\t\t\n\t\telif encrypt is True:\n\t\t\trc4 = RC4(Kcrypt)\n\t\t\ttoken.Confounder = rc4.encrypt(token.Confounder)\n\t\t\tcipherText = rc4.encrypt(data)\n\t\t\tfinalData, cipherText = KRB5_MECH_INDEP_TOKEN( token.to_bytes() + cipherText, '1.2.840.113554.1.2.2' ).to_bytes()\n\n\n\t\t\t#print('cipherText %s' % cipherText.hex())\n\t\t\t#print('finalData %s' % finalData.hex())\n\t\t\t#print('sessionkey %s' % self.session_key.contents.hex())\n\t\t\treturn cipherText, finalData\n\t\t\n\n\tdef GSS_Unwrap(self, data, seq_num, direction='init'):\n\t\t#print('GSS_Unwrap data : %s' % data)\n\t\tdec_data, err = self.GSS_Wrap(data, seq_num, direction=direction, encrypt = False)\n\t\t#print('GSS_Unwrap decrypted data : %s' % dec_data)\n\t\treturn dec_data, err\n\t\n# 4.2.6.1. MIC Tokens\nclass GSSMIC:\n\tdef __init__(self):\n\t\tself.TOK_ID = b'\\x04\\x04'\n\t\tself.Flags = None\n\t\tself.Filler = b'\\xFF' * 5\n\t\tself.SND_SEQ = None\n\t\tself.SGN_CKSUM = None\n\t\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn GSSMIC.from_buffer(io.BytesIO(data))\n\t\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tm = GSSMIC()\n\t\tm.TOK_ID = buff.read(2)\n\t\tm.Flags = FlagsField(int.from_bytes(buff.read(1), 'big', signed = False))\n\t\tm.Filler = buff.read(5)\n\t\tm.SND_SEQ = int.from_bytes(buff.read(8), 'big', signed = False)\n\t\tm.SGN_CKSUM = buff.read() #should know the size based on the algo!\n\t\treturn m\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.TOK_ID\n\t\tt += self.Flags.to_bytes(1, 'big', signed = False)\n\t\tt += self.Filler\n\t\tt += self.SND_SEQ.to_bytes(8, 'big', signed = False)\n\t\tif self.SGN_CKSUM is not None:\n\t\t\tt += self.SGN_CKSUM\n\t\t\n\t\treturn t\n\t\t\n# 4.2.6.2. Wrap Tokens\nclass GSSWrapToken:\n\tdef __init__(self):\n\t\tself.TOK_ID = b'\\x05\\x04'\n\t\tself.Flags = None\n\t\tself.Filler = b'\\xFF'\n\t\tself.EC = None\n\t\tself.RRC = None\n\t\tself.SND_SEQ = None\n\t\tself.Data = None\n\t\t\n\t@staticmethod\n\tdef from_bytes(data):\n\t\treturn GSSWrapToken.from_buffer(io.BytesIO(data))\n\t\n\t@staticmethod\n\tdef from_buffer(buff):\n\t\tm = GSSWrapToken()\n\t\tm.TOK_ID = buff.read(2)\n\t\tm.Flags = FlagsField(int.from_bytes(buff.read(1), 'big', signed = False))\n\t\tm.Filler = buff.read(1)\n\t\tm.EC = int.from_bytes(buff.read(2), 'big', signed = False)\n\t\tm.RRC = int.from_bytes(buff.read(2), 'big', signed = False)\n\t\tm.SND_SEQ = int.from_bytes(buff.read(8), 'big', signed = False)\n\t\treturn m\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.TOK_ID\n\t\tt += self.Flags.to_bytes(1, 'big', signed = False)\n\t\tt += self.Filler\n\t\tt += self.EC.to_bytes(2, 'big', signed = False)\n\t\tt += self.RRC.to_bytes(2, 'big', signed = False)\n\t\tt += self.SND_SEQ.to_bytes(8, 'big', signed = False)\n\t\tif self.Data is not None:\n\t\t\tt += self.Data\n\t\t\n\t\treturn t\n\t\t\nclass GSSAPI_AES:\n\tdef __init__(self, session_key, cipher_type, checksum_profile):\n\t\tself.session_key = session_key\n\t\tself.checksum_profile = checksum_profile\n\t\tself.cipher_type = cipher_type\n\t\tself.cipher = None\n\t\t\n\tdef rotate(self, data, numBytes):\n\t\tnumBytes %= len(data)\n\t\tleft = len(data) - numBytes\n\t\tresult = data[left:] + data[:left]\n\t\treturn result\n\t\t\n\tdef unrotate(self, data, numBytes):\n\t\tnumBytes %= len(data)\n\t\tresult = data[numBytes:] + data[:numBytes]\n\t\treturn result\n\t\t\n\tdef GSS_GetMIC(self, data, seq_num):\n\t\tpad = (4 - (len(data) % 4)) & 0x3\n\t\tpadStr = bytes([pad]) * pad\n\t\tdata += padStr\n\t\t\n\t\tm = GSSMIC()\n\t\tm.Flags = FlagsField.AcceptorSubkey\n\t\tm.SND_SEQ = seq_num\n\t\tchecksum_profile = self.checksum_profile()\n\t\tm.checksum = checksum_profile.checksum(self.session_key, KG_USAGE.INITIATOR_SIGN.value, data + m.to_bytes()[:16])\n\t\t\n\t\treturn m.to_bytes()\n\t\t\n\tdef GSS_Wrap(self, data, seq_num, use_padding = False):\n\t\t#print('[GSS_Wrap] seq_num: %s' % seq_num.to_bytes(4, 'big', signed = False).hex())\n\t\tcipher = self.cipher_type()\n\t\tpad = 0\n\t\tif use_padding is True:\n\t\t\tpad = ((cipher.blocksize - len(data)) % cipher.blocksize) #(cipher.blocksize - (len(data) % cipher.blocksize)) & 15\n\t\t\tpadStr = b'\\xFF' * pad\n\t\t\tdata += padStr\n\t\t\n\t\tt = GSSWrapToken()\n\t\tt.Flags = FlagsField.AcceptorSubkey | FlagsField.Sealed\n\t\tt.EC = pad\n\t\tt.RRC = 0\n\t\tt.SND_SEQ = seq_num\n\t\t\n\t\t#print('Wrap data: %s' % (data + t.to_bytes()))\n\t\tcipher_text = cipher.encrypt(self.session_key, KG_USAGE.INITIATOR_SEAL.value, data + t.to_bytes(), None)\n\t\tt.RRC = 28 #[RFC4121] section 4.2.5\n\t\tcipher_text = self.rotate(cipher_text, t.RRC + t.EC)\n\t\t\n\t\tret1 = cipher_text\n\t\tret2 = t.to_bytes()\n\n\t\treturn ret1, ret2\n\t\t\n\tdef GSS_Unwrap(self, data, seq_num, direction='init', auth_data = None, use_padding = False):\n\t\t#print('')\n\t\t#print('Unwrap data %s' % data[16:])\n\t\t#print('Unwrap hdr %s' % data[:16])\n\n\t\tcipher = self.cipher_type()\n\t\toriginal_hdr = GSSWrapToken.from_bytes(data[:16])\n\t\trotated = data[16:]\n\t\t\n\t\tcipher_text = self.unrotate(rotated, original_hdr.RRC + original_hdr.EC)\n\t\tplain_text = cipher.decrypt(self.session_key, KG_USAGE.ACCEPTOR_SEAL.value, cipher_text)\n\t\tnew_hdr = GSSWrapToken.from_bytes(plain_text[-16:])\n\n\t\t#signature checking\n\t\tnew_hdr.RRC = 28\n\t\tif data[:16] != new_hdr.to_bytes():\n\t\t\treturn None, Exception('GSS_Unwrap signature mismatch!')\n\t\t\n\n\t\t#print('Unwrap checksum: %s' % plain_text[-(original_hdr.EC + 16):])\n\t\t#print('Unwrap orig chk: %s' % original_hdr.to_bytes())\n\t\t#print('Unwrap result 1: %s' % plain_text)\n\t\t#print('Unwrap result : %s' % plain_text[:-(original_hdr.EC + 16)])\n\t\treturn plain_text[:-(original_hdr.EC + 16)], None\n\t\t\ndef get_gssapi(session_key):\n\tif session_key.enctype == encryption.Enctype.AES256:\n\t\treturn GSSAPI_AES(session_key, encryption._AES256CTS, encryption._SHA1AES256)\n\tif session_key.enctype == encryption.Enctype.AES128:\n\t\treturn GSSAPI_AES(session_key, encryption._AES128CTS, encryption._SHA1AES128)\n\telif session_key.enctype == encryption.Enctype.RC4:\n\t\treturn GSSAPI_RC4(session_key)\n\telse:\n\t\traise Exception('Unsupported etype %s' % session_key.enctype)\n\t\t\n\t\t\ndef test():\n\tdata = b'\\xAF' * 1024\n\tsession_key = encryption.Key( encryption.Enctype.AES256 , bytes.fromhex('3e242e91996aadd513ecb1bc2369e44183e08e08c51550fa4b681e77f75ed8e1'))\n\tsequenceNumber = 0\n\tgssapi = get_gssapi(session_key)\n\n\tr1, r2 = gssapi.GSS_Wrap(data, sequenceNumber)\n\tprint(len(r2))\n\tsent = r2 + r1\n\tprint(r1)\n\tret1, ret2 = gssapi.GSS_Unwrap(sent, sequenceNumber)\n\n\tprint(r1.hex())\n\tprint(ret1.hex())\n\n\nif __name__ == '__main__':\n\ttest()", "repo_name": "ryanmrestivo/red-team", "sub_path": "Exploitation-Tools/CrackMapExec/site-packages/msldap/authentication/kerberos/gssapi.py", "file_name": "gssapi.py", "file_ext": "py", "file_size_in_byte": 15226, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 91, "dataset": "github-code", "pt": "21", "api": [{"api_name": "io.BytesIO", "line_number": 36, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 47, "usage_type": "call"}, {"api_name": "asn1crypto.core.ObjectIdentifier.load", "line_number": 52, "usage_type": "call"}, {"api_name": "asn1crypto.core.ObjectIdentifier", "line_number": 52, "usage_type": "name"}, {"api_name": "asn1crypto.core.ObjectIdentifier", "line_number": 76, "usage_type": "call"}, {"api_name": "enum.IntFlag", "line_number": 81, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 90, "usage_type": "attribute"}, {"api_name": "enum.IntFlag", "line_number": 96, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 112, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 158, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 208, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.md5", "line_number": 213, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 214, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 219, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 222, "usage_type": "call"}, {"api_name": "minikerberos.crypto.RC4.RC4", "line_number": 226, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 261, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 267, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.md5", "line_number": 272, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 279, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 281, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 285, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 290, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 292, "usage_type": "call"}, {"api_name": "minikerberos.crypto.RC4.RC4", "line_number": 296, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 312, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 314, "usage_type": "call"}, {"api_name": "minikerberos.crypto.RC4.RC4", "line_number": 318, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 321, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 323, "usage_type": "call"}, {"api_name": "minikerberos.crypto.RC4.RC4", "line_number": 327, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.md5", "line_number": 332, "usage_type": "call"}, {"api_name": "minikerberos.crypto.hashing.hmac_md5", "line_number": 334, "usage_type": "call"}, {"api_name": "minikerberos.crypto.RC4.RC4", "line_number": 345, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 374, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 409, "usage_type": "call"}, {"api_name": "minikerberos.protocol.encryption.Enctype", "line_number": 516, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.encryption", "line_number": 516, "usage_type": "name"}, {"api_name": "minikerberos.protocol.encryption._AES256CTS", "line_number": 517, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.encryption", "line_number": 517, "usage_type": "name"}, {"api_name": "minikerberos.protocol.encryption._SHA1AES256", "line_number": 517, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.encryption.Enctype", "line_number": 518, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.encryption", "line_number": 518, "usage_type": "name"}, {"api_name": "minikerberos.protocol.encryption._AES128CTS", "line_number": 519, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.encryption", "line_number": 519, "usage_type": "name"}, {"api_name": "minikerberos.protocol.encryption._SHA1AES128", "line_number": 519, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.encryption.Enctype", "line_number": 520, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.encryption", "line_number": 520, "usage_type": "name"}, {"api_name": "minikerberos.protocol.encryption.Key", "line_number": 528, "usage_type": "call"}, {"api_name": "minikerberos.protocol.encryption", "line_number": 528, "usage_type": "name"}, {"api_name": "minikerberos.protocol.encryption.Enctype", "line_number": 528, "usage_type": "attribute"}]} +{"seq_id": "21624326664", "text": "import os\nimport numpy as np\nimport torch\nimport cv2\nimport scipy.io as sio\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom torchvision.datasets.vision import VisionDataset\n\nfrom .feature_extraction.hog import hog\n\nfeature_extraction_dict = {\n 'hog': hog\n}\n\ndef get_dataset(path='../data/', feature_extraction=None, **kwargs):\n # define normalization and transforms\n normalize = transforms.Normalize(mean=[0.449, 0.450, 0.450], std=[0.200, 0.199, 0.199])\n\n if feature_extraction is None:\n train_dataset = SVHNDataset(root=path, split='train', transform=normalize)\n test_dataset = SVHNDataset(root=path, split='test', transform=normalize)\n # train_dataset = SVHNDataset(root=path, split='train')\n # test_dataset = SVHNDataset(root=path, split='test')\n elif feature_extraction in feature_extraction_dict:\n feature_extraction_func = feature_extraction_dict[feature_extraction]\n train_dataset = SVHNDataset(root=path, split='train', feature_extraction=feature_extraction_func, **kwargs)\n test_dataset = SVHNDataset(root=path, split='test', feature_extraction=feature_extraction_func, **kwargs)\n else:\n raise NotImplementedError\n\n return train_dataset, test_dataset\n\ndef get_VAE_dataset(path='../data/'):\n # define normalization and transforms\n\n train_dataset = SVHNDataset(root=path, split='train')\n test_dataset = SVHNDataset(root=path, split='test')\n\n train_dataset.data = train_dataset.data * 2 - 1\n test_dataset.data = test_dataset.data * 2 - 1\n\n return train_dataset, test_dataset\n\n\nclass SVHNDataset(VisionDataset):\n def __init__(self, root, split='train', feature_extraction=None, transform=None, target_transform=None, **kwargs):\n super(SVHNDataset, self).__init__(root=root, transform=transform, target_transform=target_transform)\n loaded_mat = sio.loadmat(os.path.join(root, f'{split}_32x32.mat'))\n self.data = loaded_mat['X'].astype(float) / 255\n self.data = self.data.transpose(3, 2, 0, 1)\n\n # if feature_extraction is not None:\n # self.data = feature_extraction(self.data, **kwargs)\n\n if feature_extraction is not None:\n num_orientations = kwargs.get('num_orientations', 9)\n pixels_per_cell = kwargs.get('pixels_per_cell', (4, 4))\n cells_per_block = kwargs.get('cells_per_block', (1, 1))\n\n features_path = os.path.join(root, f'{split}_{feature_extraction.__name__}_{num_orientations}_{pixels_per_cell[0]}_{cells_per_block[0]}.npy')\n if not os.path.exists(features_path):\n self.data = feature_extraction(self.data, **kwargs)\n np.save(features_path, self.data)\n else:\n self.data = np.load(features_path)\n\n self.labels = loaded_mat['y'].astype(np.int64).squeeze()\n np.place(self.labels, self.labels == 10, 0)\n\n def __getitem__(self, index: int):\n img, target = torch.tensor(self.data[index]).float(), int(self.labels[index])\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\ndef get_numpy_dataset(path='../data/', feature_extraction=None, **kwargs):\n mean = [0.449, 0.450, 0.450]\n std = [0.200, 0.199, 0.199]\n\n if feature_extraction is None:\n train_dataset = SVHNDataset(root=path, split='train')\n test_dataset = SVHNDataset(root=path, split='test')\n for i in range(3):\n train_dataset.data[:, i, :, :] = (train_dataset.data[:, i, :, :] - mean[i]) / std[i]\n test_dataset.data[:, i, :, :] = (test_dataset.data[:, i, :, :] - mean[i]) / std[i]\n # train_dataset.data[:, i, :, :] = train_dataset.data[:, i, :, :] * 2 - 1\n # test_dataset.data[:, i, :, :] = test_dataset.data[:, i, :, :] * 2 - 1\n train_dataset.data = train_dataset.data.reshape(train_dataset.data.shape[0], -1)\n test_dataset.data = test_dataset.data.reshape(test_dataset.data.shape[0], -1)\n elif feature_extraction in feature_extraction_dict:\n feature_extraction_func = feature_extraction_dict[feature_extraction]\n train_dataset = SVHNDataset(root=path, split='train', feature_extraction=feature_extraction_func, **kwargs)\n test_dataset = SVHNDataset(root=path, split='test', feature_extraction=feature_extraction_func, **kwargs)\n else:\n raise NotImplementedError\n\n return train_dataset.data, train_dataset.labels, test_dataset.data, test_dataset.labels\n\nif __name__ == '__main__':\n # a, b, c, d = get_numpy_dataset(feature_extraction='hog', num_orientations=9, pixels_per_cell=(4, 4), cells_per_block=(2, 2))\n a, b = get_dataset()", "repo_name": "Dazz993/Digit-Classification", "sub_path": "utils/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 4847, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "feature_extraction.hog.hog", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "feature_extraction.hog", "line_number": 20, "usage_type": "name"}, {"api_name": "feature_extraction.hog", "line_number": 25, "usage_type": "name"}, {"api_name": "feature_extraction.hog", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.datasets.vision.VisionDataset", "line_number": 46, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "feature_extraction.hog", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "feature_extraction.hog.__name__", "line_number": 61, "usage_type": "attribute"}, {"api_name": "feature_extraction.hog", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "feature_extraction.hog", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.place", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 72, "usage_type": "call"}, {"api_name": "feature_extraction.hog", "line_number": 89, "usage_type": "name"}, {"api_name": "feature_extraction.hog", "line_number": 99, "usage_type": "name"}, {"api_name": "feature_extraction.hog", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "1709691680", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 30 12:23:16 2022\n\n@author: aiuser\n\"\"\"\n\n# pip install openpyxl\n# pip install ipywidgets\n\nimport re \nimport requests\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm_notebook as tqdm\nfrom simhash import Simhash\nimport matplotlib.pyplot as plt\nimport editdistance\n\ndef seed_everything(seed: int):\n import random, os\n import numpy as np\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\nseed_everything(42)\n\n\n# 核心算法\n# MSKTS文本相似度比对算法\n# url = 'https://gist.githubusercontent.com/skywalker0803r/7c00d680d731b99ab549dd40a96006ce/raw/d9bb060a42285053faa5227df521b43f381f1a0b/MSKTS.py'\n# exec(requests.get(url).text)\n\n#from simhash import Simhash\n\nclass MSKTS(object):\n '''\n most similar k text search\n '''\n def __init__(self):\n self.name = 'most similar k text search'\n \n def fit(self,database):\n self.database = map(lambda x:str(x).upper(), database)\n \n def predict(self,input_data,k=3):\n input_data = input_data.upper()\n score = {}\n for history_data in self.database:\n score[history_data] = Simhash(input_data).distance(Simhash(history_data))\n return sorted(score.items(), key=lambda x:x[1],reverse=False)[:k]\n\n\n\n# help function\n# 只保留英文字母\ndef keep_alpha(str1): \n char = \"\" \n for x in str(str1):\n if x.isalpha(): \n char = \"\".join([char, x])\n return char\n\n# 基于规则之关键字匹配算法\ndef matching(sentence,database,use_X000D=False):\n candidate_list = []\n for word in database:\n if word in sentence: \n candidate_list.append(word)\n if (use_X000D == True) and (len(candidate_list) == 0):\n candidate_list.append(sentence.split('_X000D')[0])\n if len(candidate_list) == 0:\n candidate_list.append('matching函数失效')#1\n return candidate_list\n\n# # string_list中的string若为其他string的\"子集\"则剔除\n# def substringSieve(string_list):\n# string_list.sort(key=lambda s: len(s), reverse=True)\n# out = []\n# for s in string_list:\n# if not any([s in o for o in out]):\n# out.append(s)\n# return out\n\n# string_list中的string若为其他string的\"子集\"则剔除\ndef substringSieve(string_list):\n string_list = [item.strip() for item in string_list]\n string_list.sort(key=lambda s: len(s), reverse=True)\n out = []\n for s in string_list:\n if not any([s in o for o in out]):\n out.append(s)\n return out\n\n# 去除多重空白\ndef remove_multiple_blanks(x):\n for i in range(2,10,1):\n x = x.replace(' '*i,' ')\n return x\n\n# 输入sentence前处理\ndef preprocess_raw_sentence(x):\n x = str(x).upper() # 转大写字串\n x = re.sub('[\\u4e00-\\u9fa5]', '', x) # 去除中文\n x = re.sub(r'[^\\w\\s]','',x) # 去除标点符号\n x = x.replace('\\n', '').replace('\\r', '').replace('\\t', '') # 去除换行符号\n str.strip(x) # 移除左右空白\n x = remove_multiple_blanks(x) # 去除多重空白\n x = ' ' + x + ' '# 出现在头的 就不可能对到前后加空格的 这种情形要想想怎么对照(加上左右空白)\n return x\n\n# 字串长度小于3的单词前后加空白\ndef add_space(x):\n if (' ' not in x) and (len(x)<=3): \n return ' ' + x + ' '\n else:\n return x\n\n# 产品后处理\ndef product_name_postprocess(x):\n x = str(x).split('(')[0] # 撷取括号前面的字串\n x = str(x).upper() # 转大写字串\n x = re.sub(r'[^\\w\\s]','',x) # 去除标点符号\n x = x.strip() # 去除空白\n x = add_space(x)# 字串长度小于3的单词前后加空白\n return x\n\n# 基于关键字比对方法的预测函数\ndef predict_keyword(title,test_df,Unrecognized,input_col,database,output_col,use_X000D=False):\n result = []\n for i in tqdm(test_df.index):\n candidate_list = matching(\n sentence = test_df.loc[i,input_col],\n database = set(database) - set(Unrecognized),\n use_X000D = use_X000D\n )\n result.append(substringSieve(candidate_list))\n test_df[output_col] = result\n return test_df\n\n# 取得dataframe的空列表索引\ndef get_empty_list_idx(df,col):\n error_idx = []\n for idx,name in enumerate(df[col].values.tolist()):\n if len(name) == 0:\n error_idx.append(idx)\n return error_idx\n\n# 公司英文名称模糊比对函数\ndef 公司英文名称模糊比对函数(input_data,公司宝典):\n # 去[]\n input_data = input_data[0]\n # 去尾缀\n for 尾缀 in 公司宝典['尾缀']:\n input_data = input_data.replace(尾缀,'')\n # 去空白\n input_data = input_data.strip()\n # 几种意外情况\n if input_data == 'not find2':#2\n return [input_data]\n if input_data.encode('utf-8').isalpha() == False:\n return [input_data]\n # 模糊搜索最相似公司\n score = {}\n for history_data in 公司宝典['公司英文名称']:\n score[history_data] = editdistance.eval(input_data,history_data)\n return min(score,key=score.get)\n\n# 公司英文名称2代号函数\ndef 公司英文名称2代号函数(input_data,公司宝典):\n score = {}\n for history_data in 公司宝典['公司英文名称']:\n score[history_data] = editdistance.eval(input_data,history_data)\n return 公司宝典.loc[公司宝典['公司英文名称']==min(score,key=score.get),'代号']\n\n\n\n\n\n# 载入数据\n# 历史数据库\ndatabase = pd.read_excel('data/combined_excel.xlsx')\n# 新的测试数据\ntest_data = pd.read_csv('data/测试数据/0927到2022.csv')\n\n# 读取\"产品名\"宝典\n# 品名宝典 = pd.read_excel('data/宝典/宝典人工处理后/宝典.v8.202111202.xlsx',engine='openpyxl')[['CODIV','DIVNM','ITEMNM']]\n品名宝典 = pd.read_excel('data/宝典/宝典人工处理后/宝典.v9.xlsx',engine='openpyxl')[['CODIV','DIVNM','ITEMNM']]\n品名宝典 = 品名宝典.rename(columns={'ITEMNM':'品名','DIVNM':'公司事业部门','CODIV':'公司代号'})\n品名宝典['品名'] = 品名宝典['品名'].apply(lambda x:product_name_postprocess(x))\n\n# 读取\"开状人\"宝典\n开状人宝典 = pd.read_csv('data/宝典/开状人宝典.csv')\n\n# 读取\"公司\"宝典\n#公司宝典 = pd.read_csv('data/宝典/公司宝典加尾缀.csv',index_col=0).astype(str).reset_index(drop=True)\n公司宝典 = pd.read_csv('data/宝典/公司宝典加尾缀v2.csv',index_col=0, encoding='ANSI').astype(str).reset_index(drop=True)\n\n\n\n\n\n\n大公司列表 = []\nfor i in 公司宝典['代号']:\n if (len(i) == 1):\n 大公司列表.append(i)\n大公司列表\n\n\n小公司列表 = []\nfor i in 公司宝典['代号']:\n if (len(i) == 2) and (i[0].isalpha()) and (i[1].isalpha()):\n 小公司列表.append(i)\n#小公司列表\n小公司列表.append('J7')\n\n\n\n\n# 按照时间排序资料\ndef sort_by_form(df):\n df = df.sort_values(by='from')\n df = df.reset_index(drop=True)\n return df\ndatabase = sort_by_form(database)\ntest_data = sort_by_form(test_data)\n\n\n\n# 定义字段\n产品名输入 = '45A' #产品名\n开状人输入 = '50' #开状人\n受益人输入 = '59' #受益人\n开状银行输入 = 'LTADDRESS.1' #银行输入\n输出字段 = ['产品名','开状人','受益人','开状银行']\n输入字段 = ['45A','50','59','LTADDRESS.1']\n\n\n\n# 预处理函数\n# 针对模型输入做预处理\ndef 预处理(df):\n 产品名输入 = '45A' #产品名\n 开状人输入 = '50' #开状人\n 受益人输入 = '59' #受益人\n 开状银行输入 = 'LTADDRESS.1' #银行输入\n for i in [产品名输入,开状人输入,受益人输入]:\n df[i] = df[i].apply(lambda x:preprocess_raw_sentence(x))\n return df\n\n\n\n\n\n# 抽特征函数\ndef 抽特征(df,品名宝典=None,开状人宝典=None,公司宝典=None):\n # 预测产品(利用品名宝典)\n df = predict_keyword(\n title = '正在预测产品',\n test_df = df,\n Unrecognized = ['PE','MA','EA','GRADE','INA','PACK','PP','PA','']+[' '*i for i in range(1,10,1)],\n input_col = 产品名输入,\n database = list(map(lambda x:str(x).upper(),品名宝典['品名'].values.tolist())),\n output_col = '产品名',\n use_X000D = False,\n )\n\n # 预测开状人(善用X000D)\n df = predict_keyword(\n title = '正在预测开状人',\n test_df = df,\n Unrecognized = ['']+[' '*i for i in range(1,10,1)],\n input_col = 开状人输入,\n database = list(map(lambda x:str(x).upper(),开状人宝典['开状人'].values.tolist())),\n output_col = '开状人',\n use_X000D = True,\n )\n\n # 搜索法预测公司(受益人)\n df = predict_keyword(\n title = '正在预测受益人',\n test_df = df,\n Unrecognized = ['']+[' '*i for i in range(1,10,1)],\n input_col = 受益人输入,\n database = list(map(lambda x:str(x).upper(),公司宝典['公司英文名称'].values.tolist())),\n output_col = '受益人',\n use_X000D = True,\n )\n # 受益人(公司)模糊比对,确保跟宝典上写的一致\n df['受益人'] = df['受益人'].apply(lambda x:公司英文名称模糊比对函数(x,公司宝典))\n\n # 预测开状银行靠规则比对筛选前8码即可\n df['开状银行'] = df[开状银行输入].apply(lambda x:str(x)[:8])\n return df\n\n\n\n# 准备训练资料\ndatabase = 抽特征(预处理(database),品名宝典=品名宝典,开状人宝典=开状人宝典,公司宝典=公司宝典)\nfor i in 输出字段:\n number = get_empty_list_idx(df=database,col=i)\n print(i,'空列表数量:',len(number))\ndatabase[输出字段].tail(5)\n\n# 准备测试资料\ntest_data = 抽特征(预处理(test_data),品名宝典=品名宝典,开状人宝典=开状人宝典,公司宝典=公司宝典)\nfor i in 输出字段:\n number = get_empty_list_idx(df=test_data,col=i)\n print(i,'空列表数量:',len(number))\ntest_data[输出字段].tail(5)\n\n\n\n\n\n\n\n\n\n\n# 模型测试\ndef 根据特定字段和索引给出候选答案清单(col,idx,k,database_size=100,database=None,test_data=None):\n # 判断是否为空列表\n if len(test_data[col][idx]) == 0:\n return []\n # 预处理\n database['处理过的资料'] = (database[col]).apply(keep_alpha)\n test_data['处理过的资料'] = (test_data[col]).apply(keep_alpha)\n # 建立模型\n model = MSKTS()\n model.fit(list(set(database['处理过的资料'].sample(database_size).values.tolist()) - set(['']+[' '*i for i in range(1,10,1)])))\n # 产生预测答案清单\n predict_answer = [i for i in model.predict(test_data['处理过的资料'][idx],k=k)]\n # 预测最相似文本\n 预测最相似文本 = [i[0] for i in predict_answer]\n 预测EXPNO前两码 = database.loc[database['处理过的资料'].isin(预测最相似文本),'EXPNO'].dropna().apply(lambda x:str(x)[:2]).values.tolist()\n # 相似度距离\n 相似度距离 = [i[1] for i in predict_answer]\n # 预测完整EXPNO\n 预测完整EXPNO = database.loc[database['处理过的资料'].isin(预测最相似文本),'EXPNO'].dropna().apply(lambda x:str(x)[:]).values.tolist()\n # 最相似前案\n 最相似前案 = database.loc[database['处理过的资料'].isin(预测最相似文本),col].dropna().apply(lambda x:str(x)[:]).values.tolist()\n # 最相似前案时间\n 最相似前案时间 = database.loc[database['处理过的资料'].isin(预测最相似文本),'from'].dropna().apply(lambda x:str(x)[:]).values.tolist()\n return 预测EXPNO前两码,相似度距离,预测完整EXPNO,最相似前案,最相似前案时间\nissue_idx = test_data['受益人'][test_data['受益人'].apply(lambda x:str(x)[0])=='not find3'].index#3\ntest_data.loc[issue_idx,['59','受益人']]\n\ntest_data[输出字段].head()\n\n\n\n\n# 模型测试\ndef 根据特定字段和索引给出候选答案清单v2(col,idx,k,database_size=100,database=None,test_data=None):\n # 判断是否为空列表\n if len(test_data[col][idx]) == 0:\n return []\n # 预处理\n database['处理过的资料'] = (database[col]).apply(keep_alpha)\n test_data['处理过的资料'] = (test_data[col]).apply(keep_alpha)\n # 建立模型\n model = MSKTS()\n model.fit(list(set(database['处理过的资料'].sample(database_size).values.tolist()) - set(['']+[' '*i for i in range(1,10,1)])))\n # 产生预测答案清单\n predict_answer = [i for i in model.predict(test_data['处理过的资料'][idx],k=k)]\n # 预测最相似文本\n 预测最相似文本 = [i[0] for i in predict_answer]\n 预测EXPNO前两码 = database.loc[database['处理过的资料'].isin(预测最相似文本),'EXPNO'].dropna().apply(lambda x:str(x)[:2]).values.tolist()\n # 相似度距离\n 相似度距离 = [i[1] for i in predict_answer]\n # 预测完整EXPNO\n 预测完整EXPNO = database.loc[database['处理过的资料'].isin(预测最相似文本),'EXPNO'].dropna().apply(lambda x:str(x)[:]).values.tolist()\n # 最相似前案\n 最相似前案 = database.loc[database['处理过的资料'].isin(预测最相似文本),col].dropna().apply(lambda x:str(x)[:]).values.tolist()\n # 最相似前案时间\n 最相似前案时间 = database.loc[database['处理过的资料'].isin(预测最相似文本),'from'].dropna().apply(lambda x:str(x)[:]).values.tolist()\n return 预测EXPNO前两码,相似度距离,预测完整EXPNO,最相似前案,最相似前案时间\n\n\n\n\ndef 根据受益人限缩database(database,受益人,公司宝典):\n 代号 = 公司英文名称2代号函数(受益人,公司宝典).values[0]\n cond = database['EXPNO'].apply(lambda x:str(x)[0]) == str(代号)[0]\n return database.loc[cond,:]\n\nA = test_data.loc[0,'受益人'][0]\nprint(A)\n公司英文名称2代号函数(A,公司宝典).values[0]\n根据受益人限缩database(database,A,公司宝典)[['受益人','EXPNO']]\n\n# def 根据受益人限缩品名宝典(品名宝典,受益人,公司宝典):\n# 代号 = 公司英文名称2代号函数(受益人,公司宝典).values[0]\n# cond = 品名宝典['公司代号'].apply(lambda x:str(x)[0]) == str(代号)[0]\n# return 品名宝典.loc[cond,:]\n\n# A = test_data.loc[1,'受益人'][0]\n# print(A)\n# 公司英文名称2代号函数(A,公司宝典).values[0]\n# 根据受益人限缩品名宝典(品名宝典,A,公司宝典)\n\n\n\n\ndef 目标函数(database_size,database,品名宝典,公司宝典,test_data,test_n=20,k=1):\n # 初始化'预测EXPNO'和correct\n test_data['预测EXPNO'] = None\n test_data['正确'] = None\n correct = []\n # 遍历test_data做推论\n for idx in tqdm(range(test_n)):\n # 先用品名映射到代号\n \n try:\n 受益人 = test_data.loc[idx,'受益人'][0]\n 代号_受益人 = 公司英文名称2代号函数(受益人,公司宝典).values[0]\n # 若为小公司就直接给公司事业部代码\n if 小公司列表.count(代号_受益人) > 0:\n 代号 = 代号_受益人\n else: # 若为大公司\n # 先根将萃取之品名与公司事业部代码抓出来,再判断其公司事业部代码的第一码是否属该受益人\n #restricted_品名宝典 = 根据受益人限缩品名宝典(品名宝典,受益人,公司宝典)\n # 品名可能会有多个,取其公司事业部代码的众数\n 品名_tmp = test_data.loc[idx,'产品名']\n 代号_tmp = []\n 品名 = []\n for i in range(len(品名_tmp)):\n #代号.append(dict(zip(restricted_品名宝典['品名'],restricted_品名宝典['公司代号']))[品名[i]])\n #if dict(zip(品名宝典['品名'],品名宝典['公司代号']))[add_space(品名_tmp[i])][:1] == 代号_受益人:\n #代号_tmp.append(dict(zip(品名宝典['品名'],品名宝典['公司代号']))[品名_tmp[i]])\n #品名.append(品名_tmp[i]) \n 代号_tmp2 = pd.DataFrame(品名宝典.loc[np.where(品名宝典['品名']==add_space(品名_tmp[i])),'公司代号']).reset_index()\n for j in range(len(代号_tmp2)):\n if str(代号_tmp2.loc[j,'公司代号'])[:1] == str(代号_受益人):\n 代号_tmp.append(代号_tmp2.loc[j,'公司代号'])\n 品名.append(品名_tmp[i]) \n 代号 = max(set(代号_tmp), key=代号_tmp.count)\n # 有无可能将\"品名\"这个物件取代特征萃取所得的test_data.at[idx,'产品名']?��另存在另外一栏\"产品名_受益人限缩\"\n # if 品名_tmp != 品名:\n # #test_data.at[idx,'产品名'] = None\n # test_data.at[idx,'产品名'] = set(品名)\n except:\n 代号 = None\n \n # 先根据受益人限缩database\n try:\n restricted_database = 根据受益人限缩database(database,test_data.loc[idx,'受益人'][0],公司宝典)\n except:\n restricted_database = database\n # 根据四个字段预测答案\n o1,d1,e1,n1,t1 = 根据特定字段和索引给出候选答案清单(\n col='产品名',idx=idx,k=k,\n database_size = min(database_size,len(restricted_database)),\n database = restricted_database,\n test_data = test_data)\n o2,d2,e2,n2,t2 = 根据特定字段和索引给出候选答案清单(\n col='开状人',idx=idx,k=k,\n database_size = min(database_size,len(restricted_database)),\n database = restricted_database,\n test_data = test_data)\n o3,d3,e3,n3,t3 = 根据特定字段和索引给出候选答案清单(\n col='受益人',idx=idx,k=k,\n database_size = min(database_size,len(restricted_database)),\n database = restricted_database,\n test_data = test_data)\n o4,d4,e4,n4,t4 = 根据特定字段和索引给出候选答案清单(\n col='开状银行',idx=idx,k=k,\n database_size = min(database_size,len(restricted_database)),\n database = restricted_database,\n test_data = test_data)\n # 判断是否为小公司\n if len(set(o3) & set(小公司列表)) > 0:\n o = list(set(o3) & set(小公司列表))\n # 判断是否为大公司\n elif len(set([str(i[0]) for i in o3]) & set(大公司列表)) > 0:\n if len([i for i in o1 if str(i[0]) in 大公司列表]) > 0: \n o = [i for i in o1 if str(i[0]) in 大公司列表]\n else:\n o = list(set(o3) & set(大公司列表))\n # 其他情况\n else:\n o = o1 + o2 + o3 + o4\n # 对o取众数得到ensemble_output\n try:\n try:\n ensemble_output = max(o,key=o.count)\n except:\n ensemble_output = max(o1+o2+o3+o4,key=(o1+o2+o3+o4).count)\n except:\n ensemble_output = 'not find4'#4\n # 如果代号 != None 指派代号至ensemble_output\n if str(代号) != 'nan':\n ensemble_output = 代号\n # 指派前案特征至test_data\n for i in [n1,n2,n3,n4]:\n if len(i) == 0:\n i.append('最相似前案dropna后是空值')#5\n test_data.loc[idx,'前案产品名'] = max(n1,key=n1.count)\n test_data.loc[idx,'前案开状人'] = max(n2,key=n2.count)\n test_data.loc[idx,'前案受益人'] = max(n3,key=n3.count)\n test_data.loc[idx,'前案开状银行'] = max(n4,key=n4.count)\n # 指派前案时间至test_data\n try:\n test_data.loc[idx,'最相似前案时间'] = max(t1+t2+t3+t4,key=(t1+t2+t3+t4).count)\n except:\n test_data.loc[idx,'最相似前案时间'] = '最相似前案时间dropna后是空值'\n # 指派预测值至test_data\n test_data.loc[idx,'预测EXPNO'] = ensemble_output\n # 指派距离至test_data\n test_data.loc[idx,'相似度距离'] = np.sum(d1+d2+d3+d4)\n # 指派完整EXPNO至test_data\n 完整EXPNO候选清单 =[]\n for expno in e1+e3+e3+e4:\n if expno[:2] == ensemble_output:\n 完整EXPNO候选清单.append(expno)\n try:\n test_data.loc[idx,'预测完整EXPNO'] = max(完整EXPNO候选清单,key=完整EXPNO候选清单.count)\n except:\n try:\n test_data.loc[idx,'预测完整EXPNO'] = database.loc[database['EXPNO'].apply(lambda x:str(x)[:2]).isin(o1+o2+o3+o4),'EXPNO'].dropna().sample(1).values.tolist()\n except:\n try:\n test_data.loc[idx,'预测完整EXPNO'] = database.loc[database['EXPNO'].apply(lambda x:str(x)[:2]).isin([ensemble_output]),'EXPNO'].dropna().sample(1).values.tolist()\n except:\n test_data.loc[idx,'预测完整EXPNO'] = None\n # 当\"预测EXPNO\"是空、且\"预测完整EXPNO\"非空,用\"预测完整EXPNO\"前两码去补空的\"预测EXPNO\"\n #if str(test_data.loc[idx,'预测EXPNO']) == 'nan':\n #test_data.loc[idx,'预测EXPNO'] = test_data.loc[idx,'预测完整EXPNO'].apply(lambda x:str(x)[:2]).values.tolist()[0]\n if (test_data.loc[idx,'预测EXPNO'] == None and test_data.loc[idx,'预测完整EXPNO'] != None):\n test_data.loc[idx,'预测EXPNO'] = test_data.loc[idx,'预测完整EXPNO'][:2]\n # # 用预测完整EXPNO去补预测EXPNO\n # test_data['预测EXPNO'] = test_data['预测EXPNO'].astype(str)\n # test_data['预测完整EXPNO'] = test_data['预测完整EXPNO'].astype(str)\n # error_idx = (test_data['预测EXPNO'].apply(lambda x:x[:1]) != test_data['预测完整EXPNO'].apply(lambda x:x[:1])).values\n # test_data.loc[error_idx,'预测EXPNO'] = test_data.loc[error_idx,'预测完整EXPNO'].apply(lambda x:x[:2])\n # 计算正确率\n if ensemble_output == test_data['推荐公司事业部'][idx]:\n correct.append(True)\n test_data.loc[idx,'正确'] = True\n else:\n correct.append(False)\n test_data.loc[idx,'正确'] = False\n return np.mean(correct),test_data\n\n\nacc,test_data = 目标函数(database_size=len(database),database=database,品名宝典=品名宝典,公司宝典=公司宝典,test_data=test_data,test_n=len(test_data))#test_n=len(test_data))\nprint('正确率:',acc)\n\n\n最终所有必须字段 = 输入字段+输出字段+['相似度距离','from','20','预测EXPNO','预测完整EXPNO','推荐公司事业部',\n '最相似前案时间','前案产品名','前案开状人','前案受益人','前案开状银行']\n# test_data.head(20).loc[test_data['预测EXPNO']!=test_data['推荐公司事业部'],最终所有必须字段]\n\n\n\n#输出\ntest_data[最终所有必须字段].to_excel('predict_result/预测结果.xlsx')\n\n\n\n\n#错误确认\ntest_data.loc[test_data['正确']==False,最终所有必须字段].to_excel('predict_result/错误预测结果.xlsx')\ntest_data.loc[test_data['正确']==False,最终所有必须字段]\n", "repo_name": "skywalker0803r/Letter-of-Credit-Intelligent-Auxiliary-Semantic-Analysis-System", "sub_path": "LC_AD.py", "file_name": "LC_AD.py", "file_ext": "py", "file_size_in_byte": 22005, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "random.seed", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "simhash.Simhash", "line_number": 50, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 104, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 105, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 123, "usage_type": "call"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 131, "usage_type": "call"}, {"api_name": "editdistance.eval", "line_number": 166, "usage_type": "call"}, {"api_name": "editdistance.eval", "line_number": 173, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 188, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 197, "usage_type": "call"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 413, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 512, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 545, "usage_type": "call"}]} +{"seq_id": "7408814451", "text": "import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='biggmodules',\n version='0.0.3',\n author='Michele Cantoni',\n author_email='mcantoni81@gmail.com',\n description='Testing installation of Package',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/misterkilgore/test_repo',\n project_urls = {\n \"Bug Tracker\": \"https://github.com/misterkilgore/test_repo/issues\"\n },\n license='MIT',\n packages=['biggmodules'],\n install_requires=['numpy'],\n)\n", "repo_name": "misterkilgore/test_repo", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "32439528243", "text": "from tkinter import *\nimport numpy as np\nfrom functools import partial\nimport torch\nfrom PIL import ImageTk, Image\nfrom models import Generator\nimport click\n\nclass LatentCodePanel(Frame):\n def __init__(self, parent, model_path):\n Frame.__init__(self, parent)\n self.parent = parent\n self.model_path = model_path\n self.scales = []\n self.latent = []\n self.labels = []\n self.device = torch.device(\"cuda:0\" if (torch.cuda.is_available()) else \"cpu\")\n print(self.device)\n self.photo = None\n self.initialize()\n\n def initialize(self):\n \"\"\"\n Draw GUI\n \"\"\"\n\n # Load Model\n checkpoint = torch.load(\"\".join([self.model_path, \".pt\"]), map_location=self.device)\n self.latent_vector_size = 100\n netG_params = {\n \"latent_vector_size\": 100,\n \"feature_maps_size\": 64,\n \"channels_num\": 3\n }\n # Crate and load generator\n self.netG = Generator(**netG_params).to(self.device)\n self.netG.load_state_dict(checkpoint[\"netG_state_dict\"])\n print(self.netG)\n self.netG.eval()\n self.latent_code = torch.randn(1, self.latent_vector_size, 1, 1, device=self.device)\n\n self.frame = Frame(self.parent)\n self.frame.grid(row=0, column=0)\n self.frame2 = Frame(self.parent)\n self.frame2.grid(row=0, column=1)\n latent_code_label = Label(self.frame, text=\"Latent Code\")\n latent_code_label.grid(row=0, column=0)\n\n # Create a latent code panel\n for i in range(0, 10):\n var = DoubleVar(value=self.latent_code[0][i])\n self.latent.append(var)\n label = Label(self.frame, textvariable=var)\n self.labels.append(label)\n scale_manipulation_partial = partial(self.scale_manipulation, index=i)\n s = Scale(self.frame, variable=self.latent[i], orient=HORIZONTAL, from_=-1.0, to=1.0, resolution=0.01, showvalue=False, command=scale_manipulation_partial)\n self.scales.append(s)\n self.scales[i].grid(row=i+1, column=0)\n self.labels[i].grid(row=i+1, column=1)\n\n\n def generate_image(self):\n img = self.netG(self.latent_code).detach().numpy().squeeze()\n\n img = np.transpose(img, (1, 2, 0))\n\n canvas = Canvas(self.frame2, width = 256, height = 256) \n canvas.grid(row=0, column=1)\n img *= 255\n img += 127.5\n img = img.astype(\"uint8\")\n img = Image.fromarray(img)\n img = img.resize((256, 256))\n self.photo = ImageTk.PhotoImage(image=img)\n canvas.create_image(20, 20, anchor=NW, image=self.photo)\n \n def scale_manipulation(self, selection, index):\n print(\"Index: {}, selection: {}\".format(index, selection))\n self.latent_code[0][index] = float(selection)\n self.generate_image()\n\n@click.command()\n@click.option(\"--mn\", default=\"first_model\", help=\"Give the name of the model\")\n@click.option(\"--mv\", default=\"0\", help=\"Give the name of the model\")\ndef main(mn, mv):\n root = Tk()\n root.title(\"Manipulate GAN's generated image\")\n root.geometry(\"600x600\")\n root.grid_rowconfigure(1, weight=1)\n root.grid_columnconfigure(1, weight=1)\n\n model_path = \"\".join([\"./models/\", mn, \"_\", mv])\n print(f\"Model path: {model_path}\")\n _ = LatentCodePanel(root, model_path)\n\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()", "repo_name": "mindfigment222/synesthesian-dreams", "sub_path": "jupyter_notebooks/graphical_interface.py", "file_name": "graphical_interface.py", "file_ext": "py", "file_size_in_byte": 3446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.device", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Generator", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 40, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 72, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 74, "usage_type": "name"}, {"api_name": "click.command", "line_number": 82, "usage_type": "call"}, {"api_name": "click.option", "line_number": 83, "usage_type": "call"}, {"api_name": "click.option", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "35394627572", "text": "\"\"\"Get Detailed info about any message\nSyntax: .json\"\"\"\nimport io\n\nfrom telebot import CMD_HELP\nfrom telebot.utils import admin_cmd\n\n\n@telebot.on(admin_cmd(pattern=\"json\"))\n@telebot.on(sudo_cmd(pattern=\"json\", allow_sudo=True))\nasync def _(event):\n if event.fwd_from:\n return\n the_real_message = None\n reply_to_id = None\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n the_real_message = previous_message.stringify()\n reply_to_id = event.reply_to_msg_id\n else:\n the_real_message = event.stringify()\n reply_to_id = event.message.id\n if len(the_real_message) > Config.MAX_MESSAGE_SIZE_LIMIT:\n with io.BytesIO(str.encode(the_real_message)) as out_file:\n out_file.name = \"json.text\"\n await borg.send_file(\n event.chat_id,\n out_file,\n force_document=True,\n allow_cache=False,\n reply_to=reply_to_id,\n )\n await event.delete()\n else:\n await eor(event, \"`{}`\".format(the_real_message))\n\n\nCMD_HELP.update({\"json\": \".json \\nUse - Get json encoding of the mssg.\"})\n", "repo_name": "xditya/TeleBot", "sub_path": "telebot/plugins/json.py", "file_name": "json.py", "file_ext": "py", "file_size_in_byte": 1188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 197, "dataset": "github-code", "pt": "21", "api": [{"api_name": "io.BytesIO", "line_number": 24, "usage_type": "call"}, {"api_name": "telebot.on", "line_number": 9, "usage_type": "call"}, {"api_name": "telebot.utils.admin_cmd", "line_number": 9, "usage_type": "call"}, {"api_name": "telebot.on", "line_number": 10, "usage_type": "call"}, {"api_name": "telebot.CMD_HELP.update", "line_number": 38, "usage_type": "call"}, {"api_name": "telebot.CMD_HELP", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "21498321771", "text": "from typing import List\n\nfrom fastapi import APIRouter, Security\nfrom fastapi.security import HTTPAuthorizationCredentials, HTTPBearer\n\nfrom app.users.schemas import (\n ExceptionResponseSchema,\n GetUserListResponseSchema,\n CreateUserRequestSchema,\n CreateUserResponseSchema,\n)\nfrom app.users.usecases import CreateUserUsecase, GetUserListUsecase\nfrom core.utils import extract_payload_from_token\nfrom core.transaction import Transaction\n\nuser_router = APIRouter()\nsecurity = HTTPBearer()\n\n\n@user_router.get(\n '/',\n response_model=List[GetUserListResponseSchema],\n responses={\n '400': {'model': ExceptionResponseSchema},\n },\n)\nasync def get_user_list(\n limit: int = 10,\n prev: int = None,\n authorization: HTTPAuthorizationCredentials = Security(HTTPBearer()),\n):\n extract_payload_from_token(token=authorization.credentials)\n return await GetUserListUsecase().execute(limit=limit, prev=prev)\n\n\n@user_router.post(\n '/',\n response_model=CreateUserResponseSchema,\n responses={\n '400': {'model': ExceptionResponseSchema},\n },\n)\nasync def create_user(\n request: CreateUserRequestSchema,\n authorization: HTTPAuthorizationCredentials = Security(HTTPBearer()),\n):\n extract_payload_from_token(token=authorization.credentials)\n\n with Transaction():\n user = await CreateUserUsecase().execute(**request.dict())\n return user\n", "repo_name": "MahirMahbub/communication", "sub_path": "communication/backend/app/api/v1/user_view.py", "file_name": "user_view.py", "file_ext": "py", "file_size_in_byte": 1397, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "fastapi.APIRouter", "line_number": 16, "usage_type": "call"}, {"api_name": "fastapi.security.HTTPBearer", "line_number": 17, "usage_type": "call"}, {"api_name": "fastapi.security.HTTPAuthorizationCredentials", "line_number": 30, "usage_type": "name"}, {"api_name": "fastapi.Security", "line_number": 30, "usage_type": "call"}, {"api_name": "fastapi.security.HTTPBearer", "line_number": 30, "usage_type": "call"}, {"api_name": "core.utils.extract_payload_from_token", "line_number": 32, "usage_type": "call"}, {"api_name": "app.users.usecases.GetUserListUsecase", "line_number": 33, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "app.users.schemas.GetUserListResponseSchema", "line_number": 22, "usage_type": "name"}, {"api_name": "app.users.schemas.ExceptionResponseSchema", "line_number": 24, "usage_type": "name"}, {"api_name": "app.users.schemas.CreateUserRequestSchema", "line_number": 44, "usage_type": "name"}, {"api_name": "fastapi.security.HTTPAuthorizationCredentials", "line_number": 45, "usage_type": "name"}, {"api_name": "fastapi.Security", "line_number": 45, "usage_type": "call"}, {"api_name": "fastapi.security.HTTPBearer", "line_number": 45, "usage_type": "call"}, {"api_name": "core.utils.extract_payload_from_token", "line_number": 47, "usage_type": "call"}, {"api_name": "core.transaction.Transaction", "line_number": 49, "usage_type": "call"}, {"api_name": "app.users.usecases.CreateUserUsecase", "line_number": 50, "usage_type": "call"}, {"api_name": "app.users.schemas.CreateUserResponseSchema", "line_number": 38, "usage_type": "name"}, {"api_name": "app.users.schemas.ExceptionResponseSchema", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "8415097718", "text": "from __future__ import print_function\n\nimport os\nimport re\nimport subprocess\nimport sys\nimport argparse\nimport errno\n\n\ndef hip(**kwargs):\n return kwargs\n\n\ndef _get_hip_info(dir):\n \"\"\"Search for HIP file upwards starting from dir. \"\"\"\n while dir:\n f = os.path.join(dir, 'HIP')\n if os.path.isfile(f):\n return eval(open(f).read(), globals(), None)\n dir = os.path.dirname(dir)\n\n return { }\n\n\ndef parse():\n # Local compiler may be prefixed by ccache/distcc\n for i, arg in enumerate(sys.argv):\n if arg.startswith('-'):\n break\n compiler = sys.argv[i - 1]\n args = sys.argv[i:]\n\n parser = argparse.ArgumentParser(prog = 'hipcc')\n parser.add_argument('input',\n help = 'the input source file')\n parser.add_argument('-o', dest = 'output',\n help = 'the output object file')\n parser.add_argument('-I', dest = 'include_directories', action = 'append',\n help = 'include directories used for header search')\n parser.add_argument('-MMD', dest = 'MMD', action='store_true', default=False,\n help = 'The gcc -MMD')\n parser.add_argument('-MF', dest = 'MF', type=str,\n help = 'the output dependency file')\n parser.add_argument('-H', dest = 'H', action='store_true', default=False,\n help = 'output inclusion stack')\n compile_info, options = parser.parse_known_args(args)\n\n source = compile_info.input\n target = compile_info.output\n # Here we determine whether to invoke hipcc according\n # to the path of source:\n #\n # 1. If source is located in the source directory, then\n # compile directly with the compiler and args passed in\n #\n # 2. If source is located in the build directory, which\n # means that it's a placeholder generated by blade\n parts = os.path.normpath(target).split('/')\n if not source.startswith(parts[0]):\n return False, sys.argv[1:]\n\n hip_source = source\n parts = os.path.normpath(source).split('/')\n source = '/'.join(parts[1:])\n # Search for HIP in source directory\n hip_info = _get_hip_info(os.path.dirname(source))\n version = hip_info.get('version')\n if not version:\n print(source, 'version not found')\n sys.exit(1)\n # For now ignore absolute directories\n include_directories = ':'.join([d for d in compile_info.include_directories\n if not os.path.isabs(d)])\n options = ' '.join(options)\n return (True,\n (compile_info, source, hip_source, version, target, include_directories, options, compiler))\n\n\ndef makedirs(dir):\n \"\"\"Make directory tree, ignore existance errors.\"\"\"\n try:\n if not os.path.isdir(dir):\n os.makedirs(dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef remove_file(filename):\n try:\n os.remove(filename)\n except OSError:\n pass\n\n\ndef compile(compile_info,\n source,\n hip_source,\n version,\n target,\n include_directories,\n options,\n local_compiler):\n hipcc = os.path.join(os.path.dirname(__file__), 'hipcc')\n hipcc_conf = os.path.join(os.path.dirname(__file__), 'hip_cc.conf')\n log_dir = os.path.join('blade-bin', 'hipcc')\n makedirs(log_dir)\n\n # The hipcc executable need this directory to obtain complete inclusion information.\n makedirs(os.path.dirname(hip_source))\n\n output = subprocess.check_output('%s -dumpversion' % local_compiler,\n stderr = subprocess.STDOUT,\n shell = True)\n compiler_version = output.strip()\n hip_rpc_timeout = 240000 # ms\n cmd = ('%s --source=%s --hip_source=%s --source_version=%s --target=%s '\n '--include_paths=\"%s\" --compile_options=\"%s\" '\n '--compiler=%s --compiler_version=\"%s\" --hip_rpc_timeout=%s '\n '--hip_cc_conf=%s --log_dir=%s --stderrthreshold=3' % (\n hipcc, source, hip_source, version, target,\n include_directories, rewrite_options(compile_info, options),\n local_compiler, compiler_version, hip_rpc_timeout,\n hipcc_conf, log_dir))\n p = subprocess.Popen(cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n process_stderr(stderr, compile_info, target, hip_source)\n return p.returncode\n\n\ndef rewrite_options(compile_info, options):\n # Force add a `-H` option to obtain inclusion stack output\n if compile_info.H or compile_info.MF:\n return options + ' -H'\n return options\n\n\n_INCLUSION_STACK_RE = re.compile(r'\\.+ ')\n\n\ndef process_stderr(stderr, compile_info, target, hip_source):\n headers = []\n new_stderr = []\n for raw_line in stderr.splitlines():\n line = raw_line.rstrip()\n if _INCLUSION_STACK_RE.match(line):\n pos = line.find(' ')\n header = line[line.find(' ') + 1:]\n if not os.path.exists(header): # Remote only, ignore\n continue\n headers.append(header)\n if compile_info.H:\n new_stderr.append(line)\n continue\n if line.startswith('Multiple include guards may be useful for:') and not compile_info.H:\n break\n new_stderr.append(line)\n\n # Reprint the filtered stderr\n sys.stderr.write('\\n'.join(new_stderr))\n\n if not compile_info.MF:\n return\n # Generate a `xxx.o.d` file for the `-MF` option\n with open(compile_info.MF, 'w') as mf:\n first_line = \"%s: %s\" % (target, hip_source)\n if headers:\n first_line += ' \\\\'\n print(first_line, file=mf)\n for index, header in enumerate(headers):\n if compile_info.MMD and header.startswith('/'):\n continue\n if index == len(headers) - 1:\n print(' %s' % header, file=mf)\n else:\n print(' %s \\\\' % header, file=mf)\n\n\nif __name__ == \"__main__\":\n hip, args = parse()\n if hip:\n result = compile(*args)\n else:\n result = subprocess.call(' '.join(args), shell = True)\n sys.exit(result)\n\n", "repo_name": "Tencent/flare", "sub_path": "thirdparty/blade/hip/hip_cc.py", "file_name": "hip_cc.py", "file_ext": "py", "file_size_in_byte": 6274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1199, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 84, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 113, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 114, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 126, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 126, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 161, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 161, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 185, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 186, "usage_type": "call"}]} +{"seq_id": "30688472651", "text": "#https://adventofcode.com/2022/day/11\r\n#from __future__ import annotations\r\n\r\nfrom typing import Iterator, Callable\r\nimport itertools_recipes as ir\r\nfrom functools import partial\r\nimport operator\r\nimport re\r\nfrom dataclasses import dataclass, field\r\nfrom collections import Counter\r\nfrom math import prod\r\n\r\n\r\n\r\ntest_input=\"\"\"\r\nMonkey 0:\r\n Starting items: 79, 98\r\n Operation: new = old * 19\r\n Test: divisible by 23\r\n If true: throw to monkey 2\r\n If false: throw to monkey 3\r\n\r\nMonkey 1:\r\n Starting items: 54, 65, 75, 74\r\n Operation: new = old + 6\r\n Test: divisible by 19\r\n If true: throw to monkey 2\r\n If false: throw to monkey 0\r\n\r\nMonkey 2:\r\n Starting items: 79, 60, 97\r\n Operation: new = old * old\r\n Test: divisible by 13\r\n If true: throw to monkey 1\r\n If false: throw to monkey 3\r\n\r\nMonkey 3:\r\n Starting items: 74\r\n Operation: new = old + 3\r\n Test: divisible by 17\r\n If true: throw to monkey 0\r\n If false: throw to monkey 1\r\n\"\"\"\r\n\r\ndef make_operation(text:str) -> Callable[[int],int]:\r\n def fun1(x,fun):\r\n return fun(x,x)\r\n for char,op in zip(\"+*\",[operator.add, operator.mul]):\r\n if char in text:\r\n x,y = map(str.strip, text.split(char))\r\n if x==y:\r\n return partial(fun1,fun=op)\r\n return partial(op,int(y))\r\n raise ValueError(f\"Unexpedted token: {text!r}\")\r\n\r\n\r\n@dataclass\r\nclass Monkey:\r\n m_id:int\r\n items:list[int]\r\n operation:Callable[[int],int]\r\n test:int\r\n t_true:int\r\n t_false:int\r\n\r\n @classmethod\r\n def from_strs(cls, m_id, items, operation, test, t_true, t_false):\r\n args = [\r\n int(re.match(\"Monkey (\\d+):\", m_id).groups()[0]),\r\n list(map(int,items.split(\":\")[-1].split(\",\"))),\r\n make_operation(operation.split(\"=\")[-1]),\r\n int(test.split()[-1]),\r\n int(t_true.split()[-1]),\r\n int(t_false.split()[-1]),\r\n ]\r\n return cls(*args)\r\n \r\n@dataclass \r\nclass Game:\r\n monkeys:list[Monkey]\r\n inspect:dict[int,int] = field(default_factory=Counter, init=False)\r\n worry_divider:Callable[[int],int] = lambda x:x//3\r\n\r\n\r\n def turn(self, monkey_id):\r\n monkeys = self.monkeys\r\n monkey = monkeys[monkey_id]\r\n worry_divider = self.worry_divider\r\n test = monkey.test\r\n t_true = monkeys[monkey.t_true].items.append\r\n t_false = monkeys[monkey.t_false].items.append\r\n inspect = self.inspect\r\n operation = monkey.operation\r\n for worry_level in monkey.items:\r\n new = worry_divider( operation(worry_level) )\r\n if new%test == 0:\r\n t_true(new)\r\n else:\r\n t_false(new)\r\n inspect[monkey_id] += 1\r\n monkey.items.clear()\r\n\r\n def round(self):\r\n for n in range(len(self.monkeys)):\r\n self.turn(n)\r\n\r\n def play(self, n_round:int):\r\n for _ in range(n_round):\r\n self.round()\r\n\r\n def monkey_business(self, n_monkey:int) -> int:\r\n return prod( v for _,v in self.inspect.most_common(n_monkey))\r\n pass\r\n \r\n \r\n\r\n def pprint(self):\r\n for m in self.monkeys:\r\n print(m)\r\n print()\r\n \r\n \r\n\r\ndef process_data(data:str) -> Iterator[Monkey]:\r\n \"\"\"transform the raw data into a procesable form\"\"\"\r\n for monkey in ir.isplit(data.splitlines()):\r\n yield Monkey.from_strs(*map(str.strip, monkey))\r\n \r\n \r\ndef get_raw_data(path:str=\"./input.txt\") -> str:\r\n with open(path) as file:\r\n return file.read()\r\n\r\ndef part1(data:str) -> int:\r\n \"\"\"part 1 of the puzzle \"\"\"\r\n game = Game(list(process_data(data)))\r\n game.play(20)\r\n return game.monkey_business(2)\r\n\r\n\r\ndef part2(data:str, on:str=\"#\", off:str=\".\" ) -> str:\r\n \"\"\"part 2 of the puzzle \"\"\"\r\n game = Game(list(process_data(data)))\r\n mod = prod( m.test for m in game.monkeys )\r\n game.worry_divider = lambda x : x%mod\r\n game.play(10000)\r\n return game.monkey_business(2)\r\n \r\n \r\n \r\n \r\n \r\ndef test1() -> bool:\r\n return part1(test_input) == 10605\r\n\r\ndef test2() -> bool:\r\n return part2(test_input) == 2713310158\r\n\r\n\r\n\r\ndata = get_raw_data()\r\nassert test1(),\"fail test 1\"\r\nprint(\"solution part1:\", part1(data)) # \r\nassert test2(),\"fail test 2\"\r\nprint(\"solution part2:\", part2(data)) #\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "copperfield42/Advent-of-Code-2022", "sub_path": "day11/day11.py", "file_name": "day11.py", "file_ext": "py", "file_size_in_byte": 4413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "operator.add", "line_number": 48, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 48, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 52, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 53, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 61, "usage_type": "name"}, {"api_name": "re.match", "line_number": 69, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 57, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 82, "usage_type": "name"}, {"api_name": "math.prod", "line_number": 112, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 78, "usage_type": "name"}, {"api_name": "itertools_recipes.isplit", "line_number": 126, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 124, "usage_type": "name"}, {"api_name": "math.prod", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "26158515260", "text": "from collections import OrderedDict, defaultdict\nfrom typing import Union, List, ValuesView, Optional, Dict, Any\n\nimport networkx\nimport numpy as np\nfrom numpy.random import RandomState\n\nfrom textworld import g_rng\nfrom textworld.utils import uniquify\nfrom textworld.generator.data import KnowledgeBase\nfrom textworld.generator.vtypes import get_new\n\nfrom textworld.generator.graph_networks import direction\nfrom textworld.generator.graph_networks import DIRECTIONS, reverse_direction\nfrom textworld.logic import Proposition, State, Variable\n\n\nclass NoFreeExitError(Exception):\n pass\n\n\ndef connect(room1: Variable, direction: str, room2: Variable,\n door: Optional[Variable] = None) -> List[Proposition]:\n \"\"\" Generate predicates that connect two rooms.\n\n Args:\n room1: A room variable.\n direction: Direction that we need to travel to go from\n room1 to room2.\n room2: A room variable.\n door: The door separating the two rooms. If `None`, there is no\n door between the rooms.\n \"\"\"\n r_direction = reverse_direction(direction) + \"_of\"\n direction += \"_of\"\n facts = [Proposition(direction, [room2, room1]),\n Proposition(r_direction, [room1, room2]),\n Proposition(\"free\", [room1, room2]),\n Proposition(\"free\", [room2, room1])]\n\n if door is not None:\n facts += [Proposition(\"link\", [room1, door, room2]),\n Proposition(\"link\", [room2, door, room1])]\n\n return facts\n\n\ndef graph2state(G: networkx.Graph, rooms: Dict[str, Variable]) -> List[Proposition]:\n \"\"\" Convert Graph object to a list of `Proposition`.\n\n Args:\n G: Graph defining the structure of the world.\n rooms: information about the rooms in the world.\n \"\"\"\n state = []\n for src, dest in G.edges():\n d = direction(src, dest)\n\n d_r = direction(dest, src)\n e = G[src][dest]\n\n room_src = rooms[src]\n room_dest = rooms[dest]\n if e[\"has_door\"]:\n door = Variable(e['door_name'], \"d\")\n pred1 = Proposition(\"{}_of\".format(d), [room_dest, room_src])\n pred2 = Proposition(\"{}_of\".format(d_r), [room_src, room_dest])\n state.append(Proposition(e[\"door_state\"], [door]))\n state.append(Proposition(\"link\", [room_src, door, room_dest]))\n state.append(Proposition(\"link\", [room_dest, door, room_src]))\n if e[\"door_state\"] == \"open\":\n state.append(Proposition(\"free\", [room_dest, room_src]))\n state.append(Proposition(\"free\", [room_src, room_dest]))\n else:\n pred1 = Proposition(\"{}_of\".format(d), [room_dest, room_src])\n pred2 = Proposition(\"{}_of\".format(d_r), [room_src, room_dest])\n state.append(Proposition(\"free\", [room_dest, room_src]))\n state.append(Proposition(\"free\", [room_src, room_dest]))\n\n state.append(pred1)\n state.append(pred2)\n\n return state\n\n\nclass WorldEntity(Variable):\n \"\"\"\n A WorldEntity is an abstract concept representing anything with a name and a type.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.content = []\n self.related_facts = []\n self.properties = []\n self.matching_entity_id = None\n\n @classmethod\n def create(cls, var: Variable) -> Union[\"WorldRoom\", \"WorldObject\"]:\n # TODO: make a small factory instead of classmethod.\n if var.type == \"r\":\n return WorldRoom(var.name, var.type)\n\n return WorldObject(var.name, var.type)\n\n @property\n def id(self) -> str:\n return self.name\n\n def add_related_fact(self, fact: Proposition) -> None:\n if len(fact.arguments) == 1:\n # Fact considered as an object's property.\n self.properties.append(fact.name)\n\n self.related_facts.append(fact)\n\n def get_attributes(self) -> List[Proposition]:\n return self.related_facts\n\n\nclass WorldObject(WorldEntity):\n \"\"\"\n A WorldObject is anything we can directly interact with.\n \"\"\"\n pass\n\n\nclass WorldRoom(WorldEntity):\n \"\"\"\n WorldRooms can be linked with each other through exits.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.exits = OrderedDict()\n self.doors = OrderedDict()\n\n\nclass World:\n def __init__(self, kb: Optional[KnowledgeBase] = None) -> None:\n self.kb = kb or KnowledgeBase.default()\n self._state = State(self.kb.logic)\n self._entities = OrderedDict()\n self._rooms = []\n self._objects = []\n self._update()\n self._player_room = None\n\n @classmethod\n def from_facts(cls, facts: List[Proposition], kb: Optional[KnowledgeBase] = None) -> \"World\":\n world = cls(kb=kb)\n world.add_facts(facts)\n return world\n\n @classmethod\n def deserialize(cls, serialized_facts: List, kb: Optional[KnowledgeBase] = None) -> \"World\":\n return cls.from_facts([Proposition.deserialize(f) for f in serialized_facts],\n kb=kb)\n\n def serialize(self) -> List:\n return [f.serialize() for f in self.facts]\n\n @classmethod\n def from_map(cls, map: networkx.Graph, kb: Optional[KnowledgeBase] = None) -> \"World\":\n \"\"\"\n Args:\n map: Graph defining the structure of the world.\n \"\"\"\n world = cls(kb=kb)\n names = [d.get(\"name\", \"r_{}\".format(i)) for i, (n, d) in enumerate(map.nodes.items())]\n rooms = OrderedDict((n, Variable(names[i], \"r\")) for i, n in enumerate(map.nodes()))\n world.add_facts(graph2state(map, rooms))\n return world\n\n @property\n def player_room(self) -> WorldRoom:\n return self._player_room\n\n @property\n def rooms(self) -> List[WorldRoom]:\n return self._rooms\n\n @property\n def objects(self) -> List[WorldObject]:\n return self._objects\n\n @property\n def entities(self) -> ValuesView[WorldEntity]:\n return self._entities.values()\n\n @property\n def state(self) -> State:\n return self._state\n\n @state.setter\n def state(self, state: State) -> None:\n self._state = State(self.kb.logic)\n self.add_facts(state.facts)\n\n @property\n def facts(self) -> List[Proposition]:\n # Sort the facts for deterministic world generation\n return sorted(self._state.facts)\n\n def add_fact(self, fact: Proposition) -> None:\n self.add_facts([fact])\n\n def add_facts(self, facts: List[Proposition]) -> None:\n self._state.add_facts(facts)\n self._update() # Update the internal representation of the world.\n\n def _get_entity(self, var: Variable) -> WorldEntity:\n if var.name not in self._entities:\n self._entities[var.name] = WorldEntity.create(var)\n\n return self._entities[var.name]\n\n def _get_room(self, var: Variable) -> WorldRoom:\n entity = self._get_entity(var)\n assert isinstance(entity, WorldRoom)\n return entity\n\n def _get_object(self, var: Variable) -> WorldObject:\n entity = self._get_entity(var)\n assert isinstance(entity, WorldObject)\n return entity\n\n def _update(self) -> None:\n \"\"\" Update the internal representation of the world.\n\n This method will create new entities based on facts. It should be called whenever\n backing facts are changed.\n \"\"\"\n self._entities = OrderedDict() # Clear entities.\n self.player = self._get_entity(Variable(\"P\"))\n self.inventory = self._get_entity(Variable(\"I\"))\n self._player_room = None\n self._process_rooms()\n self._process_objects()\n self._rooms = [entity for entity in self._entities.values() if isinstance(entity, WorldRoom)]\n self._objects = [entity for entity in self._entities.values() if isinstance(entity, WorldObject)]\n\n self._entities_per_type = defaultdict(list)\n for entity in self._entities.values():\n self._entities_per_type[entity.type].append(entity)\n\n def _process_rooms(self) -> None:\n for fact in self.facts:\n if not self.kb.types.is_descendant_of(fact.arguments[0].type, 'r'):\n continue # Skip non room facts.\n\n room = self._get_room(fact.arguments[0])\n room.add_related_fact(fact)\n\n if fact.name.endswith(\"_of\"):\n # Handle room positioning facts.\n exit = reverse_direction(fact.name.split(\"_of\")[0])\n dest = self._get_room(fact.arguments[1])\n dest.add_related_fact(fact)\n assert exit not in room.exits\n room.exits[exit] = dest\n\n # Handle door link facts.\n for fact in self.facts:\n if fact.name != \"link\":\n continue\n\n src = self._get_room(fact.arguments[0])\n door = self._get_object(fact.arguments[1])\n dest = self._get_room(fact.arguments[2])\n door.add_related_fact(fact)\n src.content.append(door)\n\n exit_found = False\n for exit, room in src.exits.items():\n if dest == room:\n src.doors[exit] = door\n exit_found = True\n break\n\n if not exit_found:\n # Need to position both rooms w.r.t. each other.\n src_free_exits = [exit for exit in DIRECTIONS if exit not in src.exits]\n for exit in src_free_exits:\n r_exit = reverse_direction(exit)\n if r_exit not in dest.exits:\n src.exits[exit] = dest\n dest.exits[r_exit] = src\n src.doors[exit] = door\n exit_found = True\n break\n\n # Relax the Cartesian grid constraint.\n if not exit_found:\n # Need to position both rooms w.r.t. each other.\n src_free_exits = [exit for exit in DIRECTIONS if exit not in src.exits]\n dest_free_exits = [exit for exit in DIRECTIONS if exit not in dest.exits]\n if len(src_free_exits) > 0 and len(dest_free_exits) > 0:\n exit = src_free_exits[0]\n r_exit = dest_free_exits[0]\n src.exits[exit] = dest\n dest.exits[r_exit] = src\n src.doors[exit] = door\n exit_found = True\n\n if not exit_found: # If there is still no exit found.\n raise NoFreeExitError(\"Cannot connect {} and {}.\".format(src, dest))\n\n def _process_objects(self) -> None:\n for fact in self.facts:\n if self.kb.types.is_descendant_of(fact.arguments[0].type, 'r'):\n continue # Skip room facts.\n\n obj = self._get_entity(fact.arguments[0])\n obj.add_related_fact(fact)\n\n if fact.name == \"match\":\n other_obj = self._get_entity(fact.arguments[1])\n obj.matching_entity_id = fact.arguments[1].name\n other_obj.matching_entity_id = fact.arguments[0].name\n\n if fact.name in [\"in\", \"on\", \"at\"]:\n holder = self._get_entity(fact.arguments[1])\n holder.content.append(obj)\n\n if fact.arguments[0].type == \"P\":\n self._player_room = holder\n\n def get_facts_in_scope(self) -> List[Proposition]:\n facts = []\n facts += [fact for exit in self.player_room.exits.values() for fact in exit.related_facts]\n facts += [fact for door in self.player_room.doors.values() for fact in door.related_facts]\n facts += [fact for obj in self.get_visible_objects_in(self.player_room) for fact in obj.related_facts]\n facts += [fact for obj in self.get_objects_in_inventory() for fact in obj.related_facts]\n\n return uniquify(facts)\n\n def get_visible_objects_in(self, obj: WorldObject) -> List[WorldObject]:\n if \"locked\" in obj.properties or \"closed\" in obj.properties:\n return []\n\n objects = list(obj.content)\n for obj in obj.content:\n objects += self.get_visible_objects_in(obj)\n\n return objects\n\n def get_all_objects_in(self, obj: WorldObject) -> List[WorldObject]:\n objects = list(obj.content)\n for obj in obj.content:\n objects += self.get_all_objects_in(obj)\n\n return objects\n\n def get_objects_in_inventory(self) -> List[WorldObject]:\n return self.inventory.content\n\n def get_entities_per_type(self, type: str) -> List[WorldEntity]:\n \"\"\" Get all entities of a certain type. \"\"\"\n return self._entities_per_type.get(type, [])\n\n def find_object_by_id(self, id: str) -> Optional[WorldObject]:\n return self._entities.get(id)\n\n def find_room_by_id(self, id: str) -> Optional[WorldRoom]:\n return self._entities.get(id)\n\n def set_player_room(self, start_room: Union[None, WorldRoom, str] = None) -> Proposition:\n if start_room is None:\n if len(self.rooms) == 0:\n start_room = WorldRoom(\"r_0\", \"r\")\n else:\n start_room = self.rooms[0]\n\n elif start_room in self._entities:\n start_room = self._entities[start_room]\n elif isinstance(start_room, Variable) and start_room.name in self._entities:\n start_room = self._entities[start_room.name]\n else:\n raise ValueError(\"Unknown room: {}\".format(start_room))\n\n fact = Proposition(\"at\", [self.player, start_room])\n self.add_fact(fact)\n return fact\n\n def populate_room(self, nb_objects: int, room: Variable,\n rng: Optional[RandomState] = None,\n object_types_probs: Optional[Dict[str, float]] = None) -> List[Proposition]:\n rng = g_rng.next() if rng is None else rng\n state = []\n types_counts = self.kb.types.count(self.state)\n\n inventory = Variable(\"I\", \"I\")\n objects_holder = [inventory, room]\n\n locked_or_closed_objects = []\n lockable_objects = []\n for s in self.facts:\n # Look for containers and supporters to put stuff in/on them.\n if s.name == \"at\" and s.arguments[0].type in [\"c\", \"s\"] and s.arguments[1].name == room.name:\n objects_holder.append(s.arguments[0])\n\n # Look for containers and doors without a matching key.\n if s.name == \"at\" and s.arguments[0].type in [\"c\", \"d\"] and s.arguments[1].name == room.name:\n obj_propositions = [p.name for p in self.facts if s.arguments[0].name in p.names]\n if \"match\" not in obj_propositions and s.arguments[0] not in lockable_objects:\n lockable_objects.append(s.arguments[0])\n\n if \"locked\" in obj_propositions or \"closed\" in obj_propositions:\n locked_or_closed_objects.append(s.arguments[0])\n\n object_id = 0\n while object_id < nb_objects:\n if len(locked_or_closed_objects) > 0:\n # Prioritize adding key if there are locked or closed things in the room.\n obj_type = \"k\"\n else:\n obj_type = self.kb.types.sample(parent_type='t', rng=rng, exceptions=[\"d\", \"r\"],\n include_parent=False, probs=object_types_probs)\n\n if self.kb.types.is_descendant_of(obj_type, \"o\"):\n obj_name = get_new(obj_type, types_counts)\n obj = Variable(obj_name, obj_type)\n allowed_objects_holder = list(objects_holder)\n\n if obj_type == \"k\":\n if len(locked_or_closed_objects) > 0:\n # Look for a *locked* container or a door.\n rng.shuffle(locked_or_closed_objects)\n locked_or_closed_obj = locked_or_closed_objects.pop()\n state.append(Proposition(\"match\", [obj, locked_or_closed_obj]))\n lockable_objects.remove(locked_or_closed_obj)\n\n # Do not place the key in its own matching container.\n if locked_or_closed_obj in allowed_objects_holder:\n allowed_objects_holder.remove(locked_or_closed_obj)\n\n elif len(lockable_objects) > 0:\n # Look for a container or a door.\n rng.shuffle(lockable_objects)\n lockable_obj = lockable_objects.pop()\n state.append(Proposition(\"match\", [obj, lockable_obj]))\n else:\n continue # Unuseful key is not allowed.\n\n elif obj_type == \"f\":\n # HACK: manually add the edible property to food items.\n state.append(Proposition(\"edible\", [obj]))\n\n # Place the object somewhere.\n obj_holder = rng.choice(allowed_objects_holder)\n if self.kb.types.is_descendant_of(obj_holder.type, \"s\"):\n state.append(Proposition(\"on\", [obj, obj_holder]))\n elif self.kb.types.is_descendant_of(obj_holder.type, \"c\"):\n state.append(Proposition(\"in\", [obj, obj_holder]))\n elif self.kb.types.is_descendant_of(obj_holder.type, \"I\"):\n state.append(Proposition(\"in\", [obj, obj_holder]))\n elif self.kb.types.is_descendant_of(obj_holder.type, \"r\"):\n state.append(Proposition(\"at\", [obj, obj_holder]))\n else:\n raise ValueError(\"Unknown type for object holder: {}\".format(obj_holder))\n\n elif self.kb.types.is_descendant_of(obj_type, \"s\"):\n supporter_name = get_new(obj_type, types_counts)\n supporter = Variable(supporter_name, obj_type)\n state.append(Proposition(\"at\", [supporter, room]))\n objects_holder.append(supporter)\n\n elif self.kb.types.is_descendant_of(obj_type, \"c\"):\n container_name = get_new(obj_type, types_counts)\n container = Variable(container_name, obj_type)\n state.append(Proposition(\"at\", [container, room]))\n objects_holder.append(container)\n\n container_state = rng.choice([\"open\", \"closed\", \"locked\"])\n state.append(Proposition(container_state, [container]))\n\n lockable_objects.append(container)\n if container_state in [\"locked\", \"closed\"]:\n locked_or_closed_objects.append(container)\n\n else:\n raise ValueError(\"Unknown object type: {}\".format(obj_type))\n\n object_id += 1\n\n self.add_facts(state)\n return state\n\n def populate(self, nb_objects: int,\n rng: Optional[RandomState] = None,\n object_types_probs: Optional[Dict[str, float]] = None) -> List[Proposition]:\n rng = g_rng.next() if rng is None else rng\n room_names = [room.id for room in self.rooms]\n nb_objects_per_room = {room_name: 0 for room_name in room_names}\n indices = np.arange(len(room_names))\n for _ in range(nb_objects):\n idx = rng.choice(indices)\n nb_objects_per_room[room_names[idx]] += 1\n\n state = []\n for room in self.rooms:\n state += self.populate_room(nb_objects_per_room[room.id], room, rng, object_types_probs)\n\n return state\n\n def populate_room_with(self, objects: WorldObject, room: WorldRoom,\n rng: Optional[RandomState] = None) -> List[Proposition]:\n rng = g_rng.next() if rng is None else rng\n state = []\n\n objects_holder = [room]\n\n locked_or_closed_objects = []\n lockable_objects = []\n for s in self.facts:\n # Look for containers and supporters to put stuff in/on them.\n if s.name == \"at\" and s.arguments[0].type in [\"c\", \"s\"] and s.arguments[1].name == room.name:\n objects_holder.append(s.arguments[0])\n\n # Look for containers and doors without a matching key.\n if s.name == \"at\" and s.arguments[0].type in [\"c\", \"d\"] and s.arguments[1].name == room.name:\n obj_propositions = [p.name for p in self.facts if s.arguments[0].name in p.names]\n if \"match\" not in obj_propositions and s.arguments[0] not in lockable_objects:\n lockable_objects.append(s.arguments[0])\n\n if \"locked\" in obj_propositions or \"closed\" in obj_propositions:\n locked_or_closed_objects.append(s.arguments[0])\n\n remaining_objects_id = list(range(len(objects)))\n rng.shuffle(remaining_objects_id)\n for idx in remaining_objects_id:\n obj = objects[idx]\n obj_type = obj.type\n\n if self.kb.types.is_descendant_of(obj_type, \"o\"):\n allowed_objects_holder = list(objects_holder)\n\n # Place the object somewhere.\n obj_holder = rng.choice(allowed_objects_holder)\n if self.kb.types.is_descendant_of(obj_holder.type, \"s\"):\n state.append(Proposition(\"on\", [obj, obj_holder]))\n elif self.kb.types.is_descendant_of(obj_holder.type, \"c\"):\n state.append(Proposition(\"in\", [obj, obj_holder]))\n elif self.kb.types.is_descendant_of(obj_holder.type, \"r\"):\n state.append(Proposition(\"at\", [obj, obj_holder]))\n else:\n raise ValueError(\"Unknown type for object holder: {}\".format(obj_holder))\n\n elif self.kb.types.is_descendant_of(obj_type, \"s\"):\n supporter = obj\n state.append(Proposition(\"at\", [supporter, room]))\n objects_holder.append(supporter)\n\n elif self.kb.types.is_descendant_of(obj_type, \"c\"):\n container = obj\n state.append(Proposition(\"at\", [container, room]))\n objects_holder.append(container)\n\n container_state = rng.choice([\"open\", \"closed\", \"locked\"])\n state.append(Proposition(container_state, [container]))\n\n lockable_objects.append(container)\n if container_state in [\"locked\", \"closed\"]:\n locked_or_closed_objects.append(container)\n\n else:\n raise ValueError(\"Unknown object type: {}\".format(obj_type))\n\n self.add_facts(state)\n return state\n\n def populate_with(self, objects: List[WorldObject],\n rng: Optional[RandomState] = None) -> List[Proposition]:\n rng = g_rng.next() if rng is None else rng\n room_names = [room.id for room in self.rooms]\n nb_objects_per_room = {room_name: 0 for room_name in room_names}\n indices = np.arange(len(room_names))\n for _ in range(len(objects)):\n idx = rng.choice(indices)\n nb_objects_per_room[room_names[idx]] += 1\n\n state = []\n for room in self.rooms:\n state += self.populate_room_with(objects[:nb_objects_per_room[room.id]], room, rng)\n objects = objects[nb_objects_per_room[room.id]:]\n\n self.add_facts(state)\n return state\n\n def __eq__(self, other: Any) -> bool:\n return (isinstance(other, World) and self.state == other.state)\n\n def __hash__(self) -> int:\n return hash(frozenset(self.facts))\n", "repo_name": "microsoft/TextWorld", "sub_path": "textworld/generator/world.py", "file_name": "world.py", "file_ext": "py", "file_size_in_byte": 23659, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1102, "dataset": "github-code", "pt": "21", "api": [{"api_name": "textworld.logic.Variable", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 23, "usage_type": "name"}, {"api_name": "textworld.generator.graph_networks.reverse_direction", "line_number": 34, "usage_type": "call"}, {"api_name": "textworld.generator.graph_networks.direction", "line_number": 34, "usage_type": "argument"}, {"api_name": "textworld.generator.graph_networks.direction", "line_number": 35, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 36, "usage_type": "call"}, {"api_name": "textworld.generator.graph_networks.direction", "line_number": 36, "usage_type": "argument"}, {"api_name": "textworld.logic.Proposition", "line_number": 37, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 38, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 39, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 42, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 23, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 48, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 48, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 48, "usage_type": "name"}, {"api_name": "textworld.generator.graph_networks.direction", "line_number": 57, "usage_type": "call"}, {"api_name": "textworld.generator.graph_networks.direction", "line_number": 59, "usage_type": "call"}, {"api_name": "textworld.logic.Variable", "line_number": 65, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 66, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 67, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 68, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 69, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 70, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 72, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 73, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 75, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 76, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 77, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 48, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 48, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 86, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 99, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 117, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 117, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 135, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 136, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 140, "usage_type": "name"}, {"api_name": "textworld.generator.data.KnowledgeBase", "line_number": 140, "usage_type": "name"}, {"api_name": "textworld.generator.data.KnowledgeBase.default", "line_number": 141, "usage_type": "call"}, {"api_name": "textworld.generator.data.KnowledgeBase", "line_number": 141, "usage_type": "name"}, {"api_name": "textworld.logic.State", "line_number": 142, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 143, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 150, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 150, "usage_type": "name"}, {"api_name": "textworld.generator.data.KnowledgeBase", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 156, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 156, "usage_type": "name"}, {"api_name": "textworld.generator.data.KnowledgeBase", "line_number": 156, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition.deserialize", "line_number": 157, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 157, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 160, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 164, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 164, "usage_type": "name"}, {"api_name": "textworld.generator.data.KnowledgeBase", "line_number": 164, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 171, "usage_type": "call"}, {"api_name": "textworld.logic.Variable", "line_number": 171, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 180, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 184, "usage_type": "name"}, {"api_name": "typing.ValuesView", "line_number": 188, "usage_type": "name"}, {"api_name": "textworld.logic.State", "line_number": 192, "usage_type": "name"}, {"api_name": "textworld.logic.State", "line_number": 196, "usage_type": "name"}, {"api_name": "textworld.logic.State", "line_number": 197, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 201, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 201, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 205, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 208, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 208, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 212, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 218, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 223, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 234, "usage_type": "call"}, {"api_name": "textworld.logic.Variable", "line_number": 235, "usage_type": "call"}, {"api_name": "textworld.logic.Variable", "line_number": 236, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 243, "usage_type": "call"}, {"api_name": "textworld.generator.graph_networks.reverse_direction", "line_number": 257, "usage_type": "call"}, {"api_name": "textworld.generator.graph_networks.DIRECTIONS", "line_number": 283, "usage_type": "name"}, {"api_name": "textworld.generator.graph_networks.reverse_direction", "line_number": 285, "usage_type": "call"}, {"api_name": "textworld.generator.graph_networks.DIRECTIONS", "line_number": 296, "usage_type": "name"}, {"api_name": "textworld.generator.graph_networks.DIRECTIONS", "line_number": 297, "usage_type": "name"}, {"api_name": "textworld.utils.uniquify", "line_number": 336, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 329, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 329, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 338, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 348, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 355, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 358, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 362, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 365, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 368, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 377, "usage_type": "argument"}, {"api_name": "textworld.logic.Proposition", "line_number": 382, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 368, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 386, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 387, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 387, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 388, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 388, "usage_type": "name"}, {"api_name": "textworld.g_rng.next", "line_number": 389, "usage_type": "call"}, {"api_name": "textworld.g_rng", "line_number": 389, "usage_type": "name"}, {"api_name": "textworld.logic.Variable", "line_number": 393, "usage_type": "call"}, {"api_name": "textworld.generator.vtypes.get_new", "line_number": 422, "usage_type": "call"}, {"api_name": "textworld.logic.Variable", "line_number": 423, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 431, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 442, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 448, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 453, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 455, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 457, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 459, "usage_type": "call"}, {"api_name": "textworld.generator.vtypes.get_new", "line_number": 464, "usage_type": "call"}, {"api_name": "textworld.logic.Variable", "line_number": 465, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 466, "usage_type": "call"}, {"api_name": "textworld.generator.vtypes.get_new", "line_number": 470, "usage_type": "call"}, {"api_name": "textworld.logic.Variable", "line_number": 471, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 472, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 476, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 388, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 388, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 491, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 491, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 492, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 492, "usage_type": "name"}, {"api_name": "textworld.g_rng.next", "line_number": 493, "usage_type": "call"}, {"api_name": "textworld.g_rng", "line_number": 493, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 496, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 492, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 492, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 508, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 508, "usage_type": "name"}, {"api_name": "textworld.g_rng.next", "line_number": 509, "usage_type": "call"}, {"api_name": "textworld.g_rng", "line_number": 509, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 542, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 544, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 546, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 552, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 557, "usage_type": "call"}, {"api_name": "textworld.logic.Proposition", "line_number": 561, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 508, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 508, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 573, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 574, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 574, "usage_type": "name"}, {"api_name": "textworld.g_rng.next", "line_number": 575, "usage_type": "call"}, {"api_name": "textworld.g_rng", "line_number": 575, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 578, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 574, "usage_type": "name"}, {"api_name": "textworld.logic.Proposition", "line_number": 574, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 591, "usage_type": "name"}]} +{"seq_id": "72654565492", "text": "\"\"\"Command line interface\"\"\"\nimport click\nfrom .project import Project\n\n\n@click.command()\n@click.argument(\"path\", type=click.Path(exists=True))\ndef cli(path):\n \"\"\"Quick and simple Python project statistics\"\"\"\n click.echo(f\"path: {path}\")\n\n project = Project(path)\n project.find_files()\n project.analyse_files()\n project.make_report()\n print(project.report.to_text())\n print(project.report.to_pandas())\n\n\nif __name__ == \"__main__\":\n cli()\n", "repo_name": "cdeil/pyprojectstats", "sub_path": "pyprojectstats/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "click.echo", "line_number": 10, "usage_type": "call"}, {"api_name": "project.Project", "line_number": 12, "usage_type": "call"}, {"api_name": "project.find_files", "line_number": 13, "usage_type": "call"}, {"api_name": "project.analyse_files", "line_number": 14, "usage_type": "call"}, {"api_name": "project.make_report", "line_number": 15, "usage_type": "call"}, {"api_name": "project.report.to_text", "line_number": 16, "usage_type": "call"}, {"api_name": "project.report", "line_number": 16, "usage_type": "attribute"}, {"api_name": "project.report.to_pandas", "line_number": 17, "usage_type": "call"}, {"api_name": "project.report", "line_number": 17, "usage_type": "attribute"}, {"api_name": "click.command", "line_number": 6, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 7, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "6620531647", "text": "# 라이브러리 설치\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# !pip install seaborn\r\nimport seaborn as sns\r\n\r\n# 데이터 로딩\r\ndf = pd.read_csv('데이터 경로.csv')\r\n\r\n# 데이터 구성 확인\r\ndf.describe()\r\ndf.head()\r\ndf.tail()\r\ndf.info()\r\n\r\n# 시각화 - 상관관계 분석\r\ncorr = df.corr()\r\nsns.heatmap(corr, annot=True)\r\n\r\n# 필요없는 데이터 제거/ 결측치(null) 제거 또는 채우기\r\n\r\n# replace : 다른 값으로 대체\r\ndf['Total'].replace([' '], ['0'], inplace=True)\r\ndf['age_itg_cd'].replace(np.nan, 48, inplace=True)\r\n\r\n# drop : 지우기 axis=1 : 열, axis=0 : 행\r\ndf.drop('CustomerID', axis=1, inplace=True)\r\ndf.drop(columns=['new_date', 'opn_nfl_chg_date'], inplace=True)\r\n\r\n# 데이터타입 변경\r\ndf['Total'] = df['Total'].astype(float)\r\n\r\n# 결측치 개수\r\ndf.isnull().sum()\r\ndf['Total'].isnull().sum()\r\n\r\n# 데이터 전처리\r\n\r\n# X(Feature)와 Y(Label) 지정\r\nX = df.drop('Total', axis=1).values\r\nY = df['Total'].values\r\n\r\n# 7:3으로 Data 분리(Train:Test)\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_valid, Y_train, Y_valid = train_test_split(X, Y, test_size=0.3, \r\n stratify=Y, random_state=42)\r\n\r\n# AI 모델링\r\n# 로지스틱 회귀분석 모델링\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nrf = RandomForestClassifier(n_estimators=20, max_depth=5, random_state=42)\r\nrf.fit(X_train, Y_train) # 학습\r\npredicted = rf.predict(X_valid)\r\naccuracy_score(Y_valid, predicted)\r\n\r\n# 딥러닝 모델링\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Activation, Dropout\r\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom tensorflow.keras.utils import to_categorical\r\nepochs=50\r\nbatch_size=10\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(128, activation='relu', input_shape=(29.)))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(32, activation='relu'))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(1, activation='sigmoid')) # sigmoid : 이진분류, softmax : 다중분류\r\n\r\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) # metrics : 평가기준\r\nes = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)\r\nmc = ModelCheckpoint('best_model.h5', monitor='val_loss', mode='min', save_best_only=True)\r\n\r\nhistory = model.fit(x=X_train, y=Y_train,\r\n epochs=epochs,\r\n batch_size=batch_size,\r\n validation_data=(X_valid, Y_valid),\r\n callbacks=[es, mc],\r\n verbose=1)\r\n\r\n\r\n# 정확도(acc, loss) 그래프 그리기\r\nhistory = model.fit(X_train, Y_train, validation_split=0.25, epochs=10, verbose=1)\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('Accuracy')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Acc')\r\nplt.legend(['acc', 'val_acc', 'loss', 'val_loss']) # 범례\r\nplt.show()\r\n\r\n# 모델 성능평가\r\nnp.mean((y_pred - y_test) ** 2) ** 0.5 # RMSE\r\n\r\n", "repo_name": "cnjxkdrk/AIFB", "sub_path": "AIFB.py", "file_name": "AIFB.py", "file_ext": "py", "file_size_in_byte": 3264, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.EarlyStopping", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "75213433973", "text": "import os\nimport yaml\nimport pandas as pd\nfrom tqdm import tqdm\nfrom utils import display\n\nif __name__ == '__main__':\n tqdm.pandas()\n display.configure_pandas()\n display.configure_logging()\n # configure the working directory to the project root path\n with open(\"../../config.yaml\", \"r\", encoding=\"utf8\") as f:\n conf = yaml.load(f, Loader=yaml.FullLoader)\n os.chdir(conf[\"project_path\"])\n\n # Load data\n trajectories = pd.read_parquet('data/trajectory/statuses')\n # print(\"If ce begin_time conflict with od?\", (trajectories['charging'] & trajectories['occupied']).any())\n # print(\"If ce start_charging conflict with od?\", (trajectories['actual_charging'] & trajectories['occupied']).any())\n # print(\"If ce arrival_time conflict with od?\", (trajectories['queuing'] & trajectories['occupied']).any())\n print(trajectories.head(3))\n\n # Make a special trajectory that switch timestamp to last one if point is end of a big interval.\n trajectories['last_timestamp'] = trajectories['timestamp'].shift()\n trajectories.loc[trajectories['plate'] != trajectories['plate'].shift(), 'last_timestamp'] = None\n trajectories['begin_time'] = trajectories['timestamp']\n trajectories.loc[trajectories['big_dur'] & trajectories['valid'], 'begin_time'] = \\\n trajectories.loc[trajectories['big_dur'] & trajectories['valid'], 'last_timestamp']\n # Group to stay trajectories\n trajectories['grp'] = ((trajectories['stop'] != trajectories['stop'].shift())\n | (trajectories['plate'] != trajectories['plate'].shift())\n | (trajectories['occupied'] != trajectories['occupied'].shift())\n | (trajectories['charging'] != trajectories['charging'].shift())).cumsum()\n trajectories['day'] = trajectories['timestamp'].dt.day\n stay_trajectory = pd.DataFrame({\n 'license': trajectories.groupby('grp')['plate'].first(),\n 'start_time': trajectories.groupby('grp')['begin_time'].first(),\n 'end_time': trajectories.groupby('grp')['timestamp'].last(),\n 'duration': (trajectories.groupby('grp')['timestamp'].last() -\n trajectories.groupby('grp')['begin_time'].first()).dt.total_seconds(),\n 'Longitude': trajectories.groupby('grp')['longitude'].mean(),\n 'Latitude': trajectories.groupby('grp')['latitude'].mean(),\n 'occupied': trajectories.groupby('grp')['occupied'].first(),\n 'stop': trajectories.groupby('grp')['stop'].first(),\n 'charging': trajectories.groupby('grp')['charging'].first(),\n 'day': trajectories.groupby('grp')['day'].first(),\n 'have_big_dur': trajectories.groupby('grp')['big_dur'].any(),\n })\n rest_events = stay_trajectory.loc[\n ~stay_trajectory['occupied'] & stay_trajectory['stop'] & ~stay_trajectory['charging']\n & (stay_trajectory['duration'] > 1800)]\n\n rest_events.to_csv('data/rest/rest_events.csv', index=False)\n rest_events.to_parquet('data/rest/rest_events.parquet')\n", "repo_name": "easysam/electric-taxi-mobility", "sub_path": "s1_preprocessing/trajectory_segmentation/s2_rest_event_extraction.py", "file_name": "s2_rest_event_extraction.py", "file_ext": "py", "file_size_in_byte": 3024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "tqdm.tqdm.pandas", "line_number": 8, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 8, "usage_type": "name"}, {"api_name": "utils.display.configure_pandas", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.display", "line_number": 9, "usage_type": "name"}, {"api_name": "utils.display.configure_logging", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.display", "line_number": 10, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 13, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_parquet", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "13198313239", "text": "__author__ = \"Joerg Hoettges\"\n__date__ = \"September 2016\"\n__copyright__ = \"(C) 2016, Joerg Hoettges\"\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = \":%H$\"\n\n# import tempfile\nimport logging\nimport os\n\nfrom qgis.core import QgsDataSourceUri, QgsProject, QgsVectorLayer\nfrom qgis.utils import pluginDirectory\n\nfrom qkan import QKan\nfrom qkan.database.dbfunc import DBConnection\nfrom qkan.database.qkan_utils import fehlermeldung, get_database_QKan\n\nlogger = logging.getLogger(\"QKan\")\n\n\nclass ResultsTask:\n def __init__(self):\n pass\n\n def run(self) -> bool:\n\n database_QKan, epsg = get_database_QKan()\n\n # Attach SQLite-Database with HE8 Data\n sql = f'ATTACH DATABASE \"{QKan.config.he8.results_file}\" AS he'\n\n with DBConnection(dbname=database_QKan) as db_qkan:\n if not db_qkan.connected:\n return False\n\n if dbQK is None:\n fehlermeldung(\n \"Fehler in QKan_Import_from_HE\",\n \"QKan-Datenbank {:s} wurde nicht gefunden!\\nAbbruch!\".format(\n database_QKan\n ),\n )\n return None\n\n if not dbQK.sql(sql, \"He8Porter.run_export_to_he8 Attach HE8\"):\n logger.error(\n f\"Fehler in He8Porter._doexport(): Attach fehlgeschlagen: {QKan.config.he8.results_file}\"\n )\n return False\n\n # Vorbereiten der temporären Ergebnistabellen\n sqllist = [\n \"\"\"CREATE TABLE IF NOT EXISTS ResultsSch(\n pk INTEGER PRIMARY KEY AUTOINCREMENT,\n schnam TEXT,\n uebstauhaeuf REAL,\n uebstauanz REAL, \n maxuebstauvol REAL,\n kommentar TEXT,\n createdat TEXT DEFAULT CURRENT_DATE)\"\"\",\n \"\"\"SELECT AddGeometryColumn('ResultsSch','geom',{},'POINT',2)\"\"\".format(\n epsg\n ),\n \"\"\"DELETE FROM ResultsSch\"\"\",\n ]\n # , '''CREATE TABLE IF NOT EXISTS ResultsHal(\n # pk INTEGER PRIMARY KEY AUTOINCREMENT,\n # haltnam TEXT,\n # uebstauhaeuf REAL,\n # uebstauanz REAL,\n # maxuebstauvol REAL,\n # kommentar TEXT,\n # createdat TEXT DEFAULT CURRENT_DATE)''',\n # \"\"\"SELECT AddGeometryColumn('ResultsHal','geom',{},'LINESTRING',2)\"\"\".format(epsg)\n # '''DELETE FROM ResultsHal''']\n\n for sql in sqllist:\n if not dbQK.sql(sql, \"QKan_Import_Results (1)\"):\n return False\n\n # Die folgende Abfrage gilt sowohl bei Einzel- als auch bei Seriensimulationen:\n sql = f\"\"\"INSERT INTO ResultsSch\n (schnam, uebstauhaeuf, uebstauanz, maxuebstauvol, geom, kommentar)\n SELECT \n MR.KNOTEN, LZ.HAEUFIGKEITUEBERSTAU, \n LZ.ANZAHLUEBERSTAU, MR.UEBERSTAUVOLUMEN, SC.geop, '{QKan.config.he8.results_file}'\n FROM he.LAU_MAX_S AS MR\n LEFT JOIN LANGZEITKNOTEN AS LZ\n ON MR.KNOTEN = LZ.KNOTEN\n JOIN schaechte AS SC\n ON SC.schnam = MR.KNOTEN\n \"\"\"\n\n if not dbQK.sql(sql, stmt_category=\"QKan_Import_Results (4)\"):\n return False\n\n dbQK.commit()\n\n # Einfügen der Ergebnistabelle in die Layerliste, wenn nicht schon geladen\n project = QgsProject.instance()\n if not project.mapLayersByName(\"Überstau Schächte\"):\n\n uri = QgsDataSourceUri()\n uri.setDatabase(database_QKan)\n logger.debug(f\"database_QKan (1): {database_QKan}\")\n uri.setDataSource(\"\", \"ResultsSch\", \"geom\")\n logger.debug(f\"(2) uri.database(): {uri.database()}\")\n vlayer = QgsVectorLayer(uri.uri(), \"Überstau Schächte\", \"spatialite\")\n\n root = project.layerTreeRoot()\n group = root.addGroup(\"Ergebnisse\")\n project.addMapLayer(vlayer, False)\n group.addLayer(vlayer)\n\n # Stilvorlage nach Benutzerwahl laden\n templatepath = os.path.join(pluginDirectory(\"qkan\"), \"templates\")\n if QKan.config.he8.qml_choice == \"uebh\":\n template = os.path.join(templatepath, 'qml', \"ueberstauhaeufigkeit.qml\")\n try:\n vlayer.loadNamedStyle(template)\n except:\n fehlermeldung(\n \"Fehler in QKan_Results_from_HE\",\n 'Stildatei \"Überstauhäufigkeit.qml\" wurde nicht gefunden!\\nAbbruch!',\n )\n elif QKan.config.he8.qml_choice == \"uebvol\":\n template = os.path.join(templatepath, 'qml', \"ueberstauvolumen.qml\")\n try:\n vlayer.loadNamedStyle(template)\n except:\n fehlermeldung(\n \"Fehler in QKan_Results_from_HE\",\n 'Stildatei \"Überstauvolumen.qml\" wurde nicht gefunden!\\nAbbruch!',\n )\n elif QKan.config.he8.qml_choice == \"userqml\":\n try:\n vlayer.loadNamedStyle(QKan.config.he8.qml_file_results)\n except:\n fehlermeldung(\n \"Fehler in QKan_Results_from_HE\",\n f\"Benutzerdefinierte Stildatei {QKan.config.he8.qml_choice} \"\n \"wurde nicht gefunden!\\nAbbruch!\",\n )\n\n del dbQK\n", "repo_name": "hoettges/QKan", "sub_path": "qkan/he8porter/_results.py", "file_name": "_results.py", "file_ext": "py", "file_size_in_byte": 5478, "program_lang": "python", "lang": "de", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "qkan.database.qkan_utils.get_database_QKan", "line_number": 29, "usage_type": "call"}, {"api_name": "qkan.QKan.config", "line_number": 32, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 32, "usage_type": "name"}, {"api_name": "qkan.database.dbfunc.DBConnection", "line_number": 34, "usage_type": "call"}, {"api_name": "qkan.database.qkan_utils.fehlermeldung", "line_number": 39, "usage_type": "call"}, {"api_name": "qkan.QKan.config", "line_number": 49, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 49, "usage_type": "name"}, {"api_name": "qkan.QKan.config", "line_number": 88, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 88, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 102, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 102, "usage_type": "name"}, {"api_name": "qgis.core.QgsDataSourceUri", "line_number": 105, "usage_type": "call"}, {"api_name": "qgis.core.QgsVectorLayer", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "qgis.utils.pluginDirectory", "line_number": 118, "usage_type": "call"}, {"api_name": "qkan.QKan.config", "line_number": 119, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 119, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "qkan.database.qkan_utils.fehlermeldung", "line_number": 124, "usage_type": "call"}, {"api_name": "qkan.QKan.config", "line_number": 128, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 128, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "qkan.database.qkan_utils.fehlermeldung", "line_number": 133, "usage_type": "call"}, {"api_name": "qkan.QKan.config", "line_number": 137, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 137, "usage_type": "name"}, {"api_name": "qkan.QKan.config", "line_number": 139, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 139, "usage_type": "name"}, {"api_name": "qkan.database.qkan_utils.fehlermeldung", "line_number": 141, "usage_type": "call"}, {"api_name": "qkan.QKan.config", "line_number": 143, "usage_type": "attribute"}, {"api_name": "qkan.QKan", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "22227134232", "text": "from django.urls import path\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"create\", views.create, name=\"create\"),\n path(\"categories\", views.categories, name=\"categories\"),\n path(\"categories/\", views.category, name=\"category\"),\n path(\"item/\", views.item_view, name=\"listing\"),\n path(\"watchlist\", views.watchlist, name=\"watchlist\"),\n path(\n \"watchlist/add_to_watchlist/\",\n views.add_to_watchlist,\n name=\"user_wishlist\",\n ),\n path(\"bid_to_listing/\", views.bid_to_listing, name=\"bid_to_listing\"),\n path(\"delete_product/\", views.delete_product, name=\"delete_product\"),\n path(\"close_listing/\", views.close_listing, name=\"close_listing\"),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "repo_name": "Muxsidov/E-commerce-django", "sub_path": "auctions/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "30378062797", "text": "import ast\nimport hashlib\n\nimport pandas as pd\nimport streamlit as st\nfrom streamlit_extras.switch_page_button import switch_page\nfrom st_aggrid import AgGrid, GridUpdateMode\nfrom st_aggrid.grid_options_builder import GridOptionsBuilder\n\nimport plotly.express as px\nimport plotly.graph_objects as go\n\nfrom wordcloud import WordCloud\n\nfrom utils import (\n streamlit_header_and_footer_setup,\n top_menu,\n switch_menu,\n process_clusterdf,\n rename_clusters,\n show_wordcloud,\n compute_pca,\n join_pca_cluster_dfs,\n get_topk_clusters,\n)\nfrom config import config\n\nCURRENT_PAGE = \"Import\"\nst.set_page_config(\n page_title=\"Import/compute Cohere clusters\",\n page_icon=\"📈\",\n initial_sidebar_state=config[\"sidebar\"],\n layout=config[\"layout\"],\n)\n# Render header/footer\nstreamlit_header_and_footer_setup()\n# Set menu index\nmenu = top_menu(1)\nst.session_state[\"current_page\"] = CURRENT_PAGE\nif menu:\n switch_menu(menu)\n\n\nst.title(\"Import cluster data\")\nst.write(\n \"\"\"\n You've precomputed your text embeddings and clusters and want to visualize them!\n Select how you'd like to upload your data and then hit **Next** to visualize the output.\n \"\"\"\n)\nuploaded_file, path_to_gcs = None, None\ntext_column = \"text\"\nst.session_state[\"processed_df\"] = None\nst.session_state[\"cluster_df\"] = None\nst.session_state[\"co_cluster_output\"] = None\nst.session_state[\"embed_df\"] = None\n# st.session_state[\"pca_df\"] = None\n\nCLUSTER_COLUMNS = [\"description\", \"keywords\", \"elements\", \"cluster_id\", \"text_ids\"]\n\n# give user the choice to upload from filesystem or a bucket\nupload_options = [\"file-system\", \"cloud-bucket\"]\nupload_choice = st.radio(\"Upload type\", upload_options)\nif upload_choice == \"file-system\":\n embed_file = st.file_uploader(\"Choose an embeddings output file\", type=[\"jsonl\"])\n if embed_file is not None:\n embed_df = pd.read_json(embed_file, lines=True)\n st.session_state[\"embed_df\"] = embed_df\n\n uploaded_file = st.file_uploader(\n \"Choose a cluster output file\", type=[\"csv\", \"jsonl\"]\n )\n co_cluster_output = False\n if uploaded_file is not None:\n # JSONL file support - This would be an output of co.cluster\n if \".jsonl\" in uploaded_file.name:\n co_cluster_output = True\n # Can be used wherever a \"file-like\" object is accepted:\n df = pd.read_json(uploaded_file, lines=True)\n assert set(CLUSTER_COLUMNS).issubset(\n set(df.columns)\n ), f\"Expected file to contain columns: {CLUSTER_COLUMNS}\"\n df = df[CLUSTER_COLUMNS]\n df = process_clusterdf(df)\n\n st.session_state[\"processed_df\"] = df\n st.session_state[\"text_column\"] = text_column\n else:\n df = pd.read_csv(uploaded_file)\n file_name = uploaded_file.name\n file_id = uploaded_file.id\n file_size = uploaded_file.size\n\nelse:\n path_to_gcs = st.text_input(\"Insert a path to the output of `co.cluster()`.\")\n if path_to_gcs:\n if \".jsonl\" in path_to_gcs:\n co_cluster_output = True\n # Can be used wherever a \"file-like\" object is accepted:\n df = pd.read_json(path_to_gcs, lines=True)\n assert set(CLUSTER_COLUMNS).issubset(\n set(df.columns)\n ), f\"Expected file to contain columns: {CLUSTER_COLUMNS}\"\n df = df[CLUSTER_COLUMNS]\n df = process_clusterdf(df)\n st.session_state[\"processed_df\"] = df\n st.session_state[\"text_column\"] = \"text\"\n else:\n df = pd.read_csv(path_to_gcs)\n file_name = path_to_gcs\n file_id = (\n int(hashlib.sha256(file_name.encode(\"utf-8\")).hexdigest(), 16) % 10**8\n )\n file_size = len(df)\n\n# if uploaded_file is not None or path_to_gcs is not None:\nif st.session_state[\"processed_df\"] is not None:\n # Create a file UUID to be used for file specific caching\n file_uuid = f'{file_name.split(\".\")[0]}_{file_id}_{file_size}'\n\n st.session_state[\"df\"] = df\n st.session_state[\"import_done\"] = True\n # st.session_state[\"co_cluster_output\"] = co_cluster_output\n # st.session_state[\"file_uuid\"] = file_uuid\n if \"pca_df\" not in st.session_state:\n with st.spinner(\"Reducing dimensionality of embeddings\"):\n pca_df = compute_pca(embed_df)\n st.session_state[\"pca_df\"] = pca_df\n st.success(\"Success\")\n st.write(df)\n next = st.button(\"Next\")\n # Delete state of text column on each new dataset import\n # if \"text_column\" in st.session_state and not co_cluster_output:\n # del st.session_state[\"text_column\"]\n if next:\n switch_page(\"Visualize\")\n # else:\n # switch_page(\"Cluster\")\n", "repo_name": "nabsabraham/co-demo-public", "sub_path": "pages/Import.py", "file_name": "Import.py", "file_ext": "py", "file_size_in_byte": 4744, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "streamlit.set_page_config", "line_number": 29, "usage_type": "call"}, {"api_name": "config.config", "line_number": 32, "usage_type": "name"}, {"api_name": "config.config", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.streamlit_header_and_footer_setup", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.top_menu", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 39, "usage_type": "attribute"}, {"api_name": "utils.switch_menu", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 44, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 45, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 53, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 54, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 55, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 56, "usage_type": "attribute"}, {"api_name": "streamlit.radio", "line_number": 63, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 67, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 68, "usage_type": "attribute"}, {"api_name": "streamlit.file_uploader", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.process_clusterdf", "line_number": 84, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 86, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 89, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.process_clusterdf", "line_number": 105, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 106, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 109, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 112, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 117, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 121, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 122, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 125, "usage_type": "attribute"}, {"api_name": "streamlit.spinner", "line_number": 126, "usage_type": "call"}, {"api_name": "utils.compute_pca", "line_number": 127, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 128, "usage_type": "attribute"}, {"api_name": "streamlit.success", "line_number": 129, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 130, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 131, "usage_type": "call"}, {"api_name": "streamlit_extras.switch_page_button.switch_page", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "69901481012", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom pathlib import Path\r\nimport requests\r\nfrom flask import Flask, request, Response\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\ndef top_page():\r\n res = requests.get('https://ipinfo.io')\r\n return res.content\r\n\r\n\r\n@app.route('/torrc')\r\ndef torrc():\r\n return Response(Path('torrc').read_bytes(), mimetype='text/plain')\r\n\r\n\r\n@app.route('/test/')\r\ndef test():\r\n url = request.args.get('url', 'https://ipinfo.io')\r\n headers = {\r\n 'User-Agent': request.headers.get('User-Agent')\r\n }\r\n if 'tor' in request.args:\r\n proxies = {\r\n 'http': 'socks5://127.0.0.1:9050',\r\n 'https': 'socks5://127.0.0.1:9050'\r\n }\r\n else:\r\n proxies = {}\r\n res = requests.get(url, headers=headers, proxies=proxies)\r\n return res.content\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=5000, debug=True)\r\n", "repo_name": "kairi003/tor_test", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "70534927414", "text": "from flask import Flask\nfrom flask import request\nimport trim\nimport get\n\napp = Flask(__name__)\n\ndef convert_to_dict(converting_list):\n texts = {}\n for i, text in enumerate(converting_list):\n texts[i] = text\n return texts\n\n@app.route('/trim/text')\ndef route_trim_text():\n text = request.args.get('text')\n trimed = trim.trim_text(text)\n return convert_to_dict(trimed)\n\n@app.route('/trim/url')\ndef route_trim_url():\n url = request.args.get('url')\n print(url)\n text = get.get_text_of_webpage(url)\n print(text[0:100])\n trimed = trim.trim_text(text)\n print(trimed)\n return convert_to_dict(trimed)", "repo_name": "thearian/litex-engine", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 637, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "trim.trim_text", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "get.get_text_of_webpage", "line_number": 24, "usage_type": "call"}, {"api_name": "trim.trim_text", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "7415900081", "text": "'''\nTests the text files listed in list.json to make sure that all characters\nare readable\n'''\nimport sys\nimport json\nimport string\n\n\ndef test():\n with open('list.json', 'r') as data_file:\n fileArray = json.load(data_file)\n\n print(fileArray)\n\n for file_name in fileArray:\n print(\"Reading \", file_name)\n f = open(file_name, 'rU')\n raw = f.read()\n\n print(\"Read all\")\n\nif __name__ == \"__main__\":\n test()\n", "repo_name": "jenjohnson7/NounVerbAssociator", "sub_path": "python/test_charmap.py", "file_name": "test_charmap.py", "file_ext": "py", "file_size_in_byte": 444, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "json.load", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "6533224916", "text": "import datetime as dt\n\ndata_siswa1 = {\n'name' : 'ujang',\n'nis' : 1021213382,\n'sks' : 35,\n'lahir' : dt.datetime(2006,5,31)\n}\ndata_siswa2 = {\n'name' : 'ujanang',\n'nis' : 1021213382,\n'sks' : 35,\n'lahir' : dt.datetime(2076,5,31)\n}\ndata_siswa3 = {\n'name' : 'ujasjaiang',\n'nis' : 1021213382,\n'sks' : 35,\n'lahir' : dt.datetime(2012,5,31)\n}\n\ndata = {\n'uhuy':data_siswa1,\n'ulala':data_siswa2,\n'ukek':data_siswa3,\n}\n\nprint(f'{\"KEY\":<7} {\"NAME\":<20} {\"NIS\":<13} {\"SKS\":<6} {\"LAHIR\":<8}')\nprint(''.center(60,\"\\'\"))\nfor siswa in data.keys():\n\tKEY = siswa\n\tNAME = data[KEY]['name']\n\tNIS = data[KEY]['nis']\n\tSKS = data[KEY]['sks']\n\tLAHIR = data[KEY]['lahir'].strftime('%x')\n\t\n\tprint(f'{KEY:<7} {NAME:<20} {NIS:<13} {SKS:<6} {LAHIR:<8}')\n\t", "repo_name": "faridanang/hasil-belajar-python", "sub_path": "Python Dasar/Belajar Dictionary Dasar/42. multi keys & nesting dict.py", "file_name": "42. multi keys & nesting dict.py", "file_ext": "py", "file_size_in_byte": 723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.datetime", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "40113535859", "text": "import sys\nfrom typing import List\n\ndef sieveOfEratosthenes(n:int) -> List[int]:\n isPrime = [False] * 2 + [True] * (n-1)\n for x in range(2, int(n**0.5)+1):\n if isPrime[x]:\n for i in range(x**2, n+1, x):\n isPrime[i] = False\n return [i for i, b in enumerate(isPrime) if b]\n\nif __name__ == '__main__':\n n = int(sys.argv[1])\n print(sieveOfEratosthenes(n))\n", "repo_name": "yoonseonchoi/ProblemSolving", "sub_path": "Practice13/sieveOfEratosthenes.py", "file_name": "sieveOfEratosthenes.py", "file_ext": "py", "file_size_in_byte": 400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "72063110773", "text": "# -*- coding: UTF-8 -*-\nimport os\nimport re\nimport pytz\nimport json\nimport traceback\nfrom datetime import datetime\nfrom ..utils.EmailUtil import send_email\nfrom pyinotify import WatchManager, Notifier, ProcessEvent, IN_MODIFY\n\n\npattern = '''(?P[\\d\\.]{7,}) - - (?:\\[(?P[^\\[\\]]+)\\]) \"(?P[^\"]+)\" (?P\\d+) (?P\\d+) \"(?P[^\"]+)\" \"(?P[^\"]+)\" \"(?:[^\"]+)\"'''\nlog_path = '/var/log/nginx/access.log'\nfile = None\ntz = pytz.timezone('Asia/Shanghai')\npic_name = 'px1.gif'\naccess_log = 'access.log'\nemail_subject = 'nginx ' + pic_name\n\n\n'''\n183.206.18.237 - - [13/Apr/2019:12:49:16 -0400] \"GET /tm.jpg HTTP/1.1\" 304 0 \"https://mail.qq.com/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\" \"-\"\n'''\n\nops = {\n 'datetime': lambda timestr: datetime.strptime(timestr, \"%d/%b/%Y:%H:%M:%S %z\").astimezone(tz).strftime('%Y-%m-%d:%H:%M:%S'),\n 'status': int,\n 'size': int\n}\n\n\ndef extract(line):\n regex = re.compile(pattern)\n matcher = regex.match(line)\n if matcher:\n return {k: ops.get(k, lambda x: x)(v) for k, v in matcher.groupdict().items()}\n else:\n raise Exception('No match')\n\n\nclass ProcessTransientFile(ProcessEvent):\n def process_IN_MODIFY(self, event):\n print(\"Modify file: %s \" % os.path.join(event.path, event.name))\n global file\n line = file.readline()\n print(line)\n if line and line.find(pic_name) != -1:\n try:\n info = extract(line)\n json_str = json.dumps(info)\n send_email(email_subject, json.dumps(info, indent=4))\n with open(access_log, 'a') as f:\n f.write(json_str + '\\n')\n except Exception as e:\n err = traceback.format_exc()\n send_email(email_subject, err)\n\n\ndef monitor(file_name='.'):\n global file\n file = open(file_name, 'r')\n st_results = os.stat(file_name)\n st_size = st_results[6]\n file.seek(st_size)\n wm = WatchManager()\n notifier = Notifier(wm)\n wm.watch_transient_file(file_name, IN_MODIFY, ProcessTransientFile)\n print('now starting monitor %s' % file_name)\n notifier.loop()\n\n\nif __name__ == \"__main__\":\n monitor(log_path)\n", "repo_name": "luohaoGit/pybots", "sub_path": "nginxLog/NgxLog.py", "file_name": "NgxLog.py", "file_ext": "py", "file_size_in_byte": 2309, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pytz.timezone", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 33, "usage_type": "call"}, {"api_name": "pyinotify.ProcessEvent", "line_number": 41, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.EmailUtil.send_email", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.EmailUtil.send_email", "line_number": 56, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 62, "usage_type": "call"}, {"api_name": "pyinotify.WatchManager", "line_number": 65, "usage_type": "call"}, {"api_name": "pyinotify.Notifier", "line_number": 66, "usage_type": "call"}, {"api_name": "pyinotify.IN_MODIFY", "line_number": 67, "usage_type": "argument"}]} +{"seq_id": "37205748252", "text": "from asyncio.windows_events import NULL\nfrom base64 import encode\nfrom datetime import datetime\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport os \n\n\noptions = webdriver.EdgeOptions()\noptions.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9222\")\ndriver = webdriver.Edge( options=options)\n\nfile_list = os.listdir(\"D:\\code\\official-images\\library\")\n\nfor name in file_list:\n file = open('D:\\code\\official-images\\library\\\\'+name)\n lines = file.readlines()\n for line in lines:\n if line.startswith(\"GitRepo:\"):\n uri = line.replace(\"GitRepo: https://github.com/\",\"\").strip(\"\\n\").strip(\"\\t\").strip(\".git\")\n repo = uri.split(\"/\")[-1]\n if not os.path.exists('dockerfile\\\\'+repo):\n os.mkdir('dockerfile\\\\'+repo)\n else:\n continue\n driver.get(\"https://github.com/search?q=&type=Code\")\n text = driver.find_element_by_name(\"q\")\n text.send_keys(\"repo:\"+uri+\" language:Dockerfile\")\n submit = driver.find_element_by_xpath('/html/body/div[5]/main/div[1]/div[1]/form/div/button')\n submit.click()\n time.sleep(1)\n code = driver.find_element_by_xpath('/html/body/div[5]/main/div[1]/div[2]/nav[1]/a[2]')\n code.click()\n time.sleep(1.5)\n dockerfiles = driver.find_elements_by_xpath('/html/body/div[5]/main/div[2]/div[3]/div[1]/div[2]/div[1]/div')\n print(len(dockerfiles))\n origin_window = driver.current_window_handle\n for i in range(len(dockerfiles)):\n print(i)\n dockerfile = driver.find_element_by_xpath('/html/body/div[5]/main/div[2]/div[3]/div[1]/div[2]/div[1]/div['+str(i+1)+']/div[1]/div[2]/a')\n href = dockerfile.get_attribute('href')\n print(href)\n driver.execute_script(f'window.open(\"{href}\");')\n driver.switch_to.window(driver.window_handles[-1])\n time.sleep(1)\n raw = driver.find_element_by_xpath('/html/body/div[5]/div[1]/main/div[2]/div[1]/div[1]/div[4]/div[1]/div[2]/div[1]/a[1]')\n raw.click()\n time.sleep(1)\n dockerfile_raw = driver.find_element_by_xpath('html/body/pre')\n text = dockerfile_raw.text\n out_file = open(\"dockerfile\\\\\"+repo+\"\\\\\"+str(i)+\".dockerfile\",\"w+\",encoding=\"utf-8\")\n print(text,file=out_file)\n out_file.close()\n driver.close()\n driver.switch_to.window(origin_window)\n # repo:TimWolla/docker-adminer language:Dockerfile\n", "repo_name": "copyrightpoiiiii/dockerfile-archive", "sub_path": "script.py", "file_name": "script.py", "file_ext": "py", "file_size_in_byte": 2784, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "selenium.webdriver.EdgeOptions", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}, {"api_name": "selenium.webdriver.Edge", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 14, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "36882003973", "text": "import json\nimport linecache\nimport os\nimport re\n\nimport jieba\nimport numpy as np\nfrom acora import AcoraBuilder\n\nfrom emotion_cla.emo_cls import classify\nfrom emotion_cla.separate import separate\n\nin_dir = 'data/tweet'\nout_dir = 'data/tweet_emo'\nbuilder = AcoraBuilder([line.strip() for line in open('data/emoji.txt')])\nac = builder.build()\n\n\ndef load_labelled():\n lines = set()\n for i in range(5):\n for line in open('data/content_3000/{}.txt'.format(i)):\n lines.add(line.strip())\n return lines\n# have_lines = load_labelled()\n\n\ndef random_ids(in_name, out_name, lens):\n '''\n 随机选择文本的行\n '''\n global have_lines\n out_file = open(out_name, 'a')\n ids = set()\n _max = len(open(in_name).readlines())\n while len(ids) < lens:\n num = int(_max * np.random.random())\n if num in ids:\n continue\n line = linecache.getline(in_name, num)\n y, X = line.strip().split('\\t')\n if line not in have_lines:\n ids.add(num)\n out_file.write(X + '\\n')\n out_file.close()\n return ids\n\n\ndef pre_label():\n '''\n 打预标签\n '''\n for i, in_name in enumerate(os.listdir(in_dir)):\n print(i)\n stock_name = in_name\n in_name = os.path.join(in_dir, in_name)\n for j, line in enumerate(open(in_name)):\n d = json.loads(line)\n d['content_pre_emo'] = classify(separate(d['content']))\n d['title_pre_emo'] = classify(separate(d['title']))\n with open('{}/{}'.format(out_dir, stock_name), 'a') as f:\n f.write(json.dumps(d, ensure_ascii=False) + '\\n')\n\n\ndef get_train_data(in_name):\n for line in open(in_name):\n d = json.loads(line.strip())\n content = d['content']\n # title = d['title']\n # t_emo = d['title_pre_emo']\n c_emo = d['content_pre_emo']\n\n # 标题和内容中要有一个有表情符\n # if not (re.search('\\\\[\\\\S+\\\\]', title) or re.search('\\\\[\\\\S+\\\\]', content)):\n\n # bingo = False\n # for kw, pos in ac.finditer(content):\n # bingo = True\n # break\n\n # if not re.search('\\\\[\\\\S+\\\\]', content):\n # print('不满足要求 ...')\n # continue\n\n bingo = True\n if bingo:\n # 内容长度5到200\n if 10 < len(content) < 200:\n with open('data/content/{}.txt'.format(c_emo), 'a') as f:\n f.write(str(c_emo) + '\\t' + content + '\\n')\n\n # with open('data/title/{}.txt'.format(t_emo), 'a') as f:\n # f.write(str(t_emo) + '\\t' + title + '\\n')\n\n\ndef label_split(in_name):\n \"\"\"\n 分割数据,用于数据标注划分\n \"\"\"\n index = 0\n for line in open(in_name):\n with open(in_name[:-4] + '-({}).txt'.format(int(index / 500 + 1)), 'a') as f:\n f.write(line)\n print(index, int(index / 500 + 1))\n index += 1\n\n\ndef what_the_fuck():\n \"\"\"\n 将已经标注的数据按情绪分类\n \"\"\"\n labels = []\n in_dir = 'data/labelled'\n\n for in_name in os.listdir(in_dir):\n _in = os.path.join(in_dir, in_name)\n # print(_in)\n for i, line in enumerate(open(_in)):\n if line.strip() == '':\n continue\n label = line.split('\\t')[0]\n s= line.split('\\t')[1]\n # 1234:四种情绪,-:没有情绪,x:不确定\n if label in ['1', '2', '3', '4', '-']:\n if label == '-':\n label = '0'\n with open('data/labelled_split/{}.txt'.format(label), 'a') as f:\n f.write(line)\n\n\nif __name__ == '__main__':\n # for line in open('data/random_ids.txt'):\n # # for line in open('data/_id.txt'):\n # line = line.strip().split(',')[0]\n # print(line)\n # in_name = 'data/tweet_emo/' + line.strip() + '.txt'\n # get_train_data(in_name)\n\n\n # random_ids('data/_id.txt', 100)\n # get_train_data('data/002446.txt')\n\n # for i in range(5):\n # random_ids('data/content/{}.txt'.format(i), 'data/content_sample_3000/{}.txt'.format(i), 3000)\n\n\n # for i in range(1, 5):\n # label_split('data/content_3000/{}.txt'.format(i))\n\n what_the_fuck()\n", "repo_name": "kayzhou/Guba_emotion", "sub_path": "pre_emotion.py", "file_name": "pre_emotion.py", "file_ext": "py", "file_size_in_byte": 4250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "acora.AcoraBuilder", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "linecache.getline", "line_number": 40, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "emotion_cla.emo_cls.classify", "line_number": 59, "usage_type": "call"}, {"api_name": "emotion_cla.separate.separate", "line_number": 59, "usage_type": "call"}, {"api_name": "emotion_cla.emo_cls.classify", "line_number": 60, "usage_type": "call"}, {"api_name": "emotion_cla.separate.separate", "line_number": 60, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 67, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}]} +{"seq_id": "7259794002", "text": "import sys\nfrom argparse import ArgumentParser\nfrom traceback import print_exc\n\nfrom .lexer import BaseLexer, FileLexer, StringLexer\nfrom .exception import KoiLangError\nfrom .lib import load_library, main_class_from_module\nfrom .lib.debugger import KoiLangRunner, CommandDebugger\n\nfrom . import __version__\n\n\ndef _read_stdin() -> str:\n try:\n sys.stdout.write(\"$kola: \")\n sys.stdout.flush()\n s = sys.stdin.readline()\n while s.endswith(\"\\\\\\n\"):\n sys.stdout.write(\"$...: \")\n sys.stdout.flush()\n s += sys.stdin.readline()\n except KeyboardInterrupt:\n sys.exit()\n return s\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"kola\")\n parser.add_argument(\"file\", default=None, nargs=\"?\")\n parser.add_argument(\"-i\", \"-c\", \"--inline\", help=\"parse inline string\")\n parser.add_argument(\"-s\", \"--script\", help=\"parser script\")\n parser.add_argument(\"-d\", \"--debug\", help=\"dubugger type\", choices=[\"token\", \"command\"])\n parser.add_argument(\"--encoding\", help=\"file encoding\", default=\"utf-8\")\n\n namespace = parser.parse_args()\n\n encoding = namespace.encoding\n if namespace.file:\n lexer = FileLexer(namespace.file, encoding=encoding)\n elif namespace.inline:\n lexer = StringLexer(namespace.inline, encoding=encoding)\n elif not sys.stdin.isatty():\n lexer = BaseLexer(encoding=encoding)\n else:\n lexer = None\n\n runner_type = \"Runner\"\n if namespace.debug == \"token\":\n runner_type = \"Token Debugger\"\n if lexer is None:\n print(f\"KoiLang {runner_type} {__version__} on Python {sys.version}\")\n lexer = BaseLexer()\n while True:\n try:\n for i in lexer:\n print(i)\n break\n except KeyboardInterrupt:\n break\n except KoiLangError:\n print_exc()\n else:\n if namespace.debug == \"command\":\n command_cls = CommandDebugger\n runner_type = \"Command Debugger\"\n elif namespace.script:\n command_cls = main_class_from_module(load_library(namespace.script))\n else:\n command_cls = KoiLangRunner\n\n with command_cls().exec_block() as command_set:\n if lexer:\n command_set.parse(lexer)\n else:\n print(f\"KoiLang {runner_type} {__version__} on Python {sys.version}\")\n while True:\n command_set.parse(_read_stdin())\n", "repo_name": "Ovizro/Kola", "sub_path": "kola/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 2525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.stdout.write", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 23, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 28, "usage_type": "call"}, {"api_name": "lexer.FileLexer", "line_number": 39, "usage_type": "call"}, {"api_name": "lexer.StringLexer", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.stdin.isatty", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 42, "usage_type": "attribute"}, {"api_name": "lexer.BaseLexer", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 51, "usage_type": "attribute"}, {"api_name": "lexer.BaseLexer", "line_number": 52, "usage_type": "call"}, {"api_name": "exception.KoiLangError", "line_number": 60, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 61, "usage_type": "call"}, {"api_name": "lib.debugger.CommandDebugger", "line_number": 64, "usage_type": "name"}, {"api_name": "lib.main_class_from_module", "line_number": 67, "usage_type": "call"}, {"api_name": "lib.load_library", "line_number": 67, "usage_type": "call"}, {"api_name": "lib.debugger.KoiLangRunner", "line_number": 69, "usage_type": "name"}, {"api_name": "sys.version", "line_number": 75, "usage_type": "attribute"}]} +{"seq_id": "28591055536", "text": "# Import required packages\nimport cv2\nimport pytesseract\nimport os\nimport re\nimport numpy as np\n\nclass TikTokTextDetector:\n \"\"\"\n A class used to detect text in tiktok videos and save them in a file\n\n ...\n\n Attributes\n ----------\n path_mp4 : str\n path where the mp4 video is stored\n path_text : int\n path where to store the resulting text\n\n Methods\n -------\n prepare_video_and_file(self, mp4)\n Load the video and prepare the text file.\n Returns the video and the name of the text file\n\n pre_processing(self, frame)\n Pre-processing of a frame before the text detection\n Returns the processed frame\n\n clean_and_save_text(self, text, name_mp4_file)\n Clean the text detected by pytesseract.\n Removes newlines, trailing spaces etc...\n TODO later : Remove the garbage words that means nothing\n Returns the cleaned text\n\n print_every_N_frames(self, framenbr, N= 100)\n Print the evolution of the dectection to see the advancement\n\n detect_text(self, mp4)\n Detect the texts in a video\n Load the video, read each frame, pre-process them, detect the text\n Save detected text in the text folder (defined in the initialization)\n \"\"\"\n\n def __init__(self, path_mp4= \"video\", path_text= \"detected_text\") -> None:\n # Mention the installed location of Tesseract-OCR in your system\n # If on windows :\n # 1. Install tesseract using windows installer available at: https://github.com/UB-Mannheim/tesseract/wiki\n\n # 2. Note the tesseract path from the installation. Default installation path at the time of this edit was: C:\\Users\\USER\\AppData\\Local\\Tesseract-OCR\\\\tesseract.exe. It may change so please check the installation path.\n\n # 3. pip install pytesseract \n\n # 4. Set the tesseract path in the script\n # If on Mac/Linux :\n # Install it and set the path to \"/usr/local/bin/tesseract\" or whatever directory it got installed\n # You can find the directory with the shell command \"where tesseract\"\n pytesseract.pytesseract.tesseract_cmd = '/usr/local/bin/tesseract'\n\n # Path the mp4 video we want to process\n self.path_mp4 = path_mp4\n self.path_text = path_text\n\n \n def prepare_video_and_file(self, mp4) :\n \"\"\"\n Load the video and prepare the text file.\n It will delete the content of the previous file if it exists\n\n Parameters :\n ----------\n mp4 : str\n The name of the file, with the .mp4 extension\n\n Returns :\n ----------\n vidcap : VideoCapture \n VideoCapture instance of cv2. Will be used to read the frames\n \n name_file : the name of the text file in its directory, without the .txt extension\n \"\"\"\n\n vidcap = cv2.VideoCapture(f\"{self.path_mp4}/{mp4}\")\n\n name_file = os.path.splitext(mp4)[0] # name of file without the .mp4 extension\n\n # Save text into file\n # create a directory to store the text if it doesn't exist\n if not os.path.isdir(self.path_text):\n os.mkdir(self.path_text)\n f = open(f\"{self.path_text}/{name_file}.txt\", \"w+\")\n f.write(\"\")\n f.close()\n\n return vidcap, name_file\n\n def pre_processing(self, frame) :\n \"\"\"\n Pre-process the frame before the contour detection\n You can choose which pre-processing to do here\n Definitely a good way to improve the performance \n\n Parameters :\n ----------\n frame : OutputArray\n Current frame\n\n Returns :\n ----------\n dilation : OutputArray \n Pre-processed frame\n \"\"\"\n\n # Preprocessing the image starts\n invert = cv2.bitwise_not(frame)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Performing OTSU threshold\n ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)\n\n # Specify structure shape and kernel size.\n # Kernel size increases or decreases the area\n # of the rectangle to be detected.\n # A smaller value like (10, 10) will detect\n # each word instead of a sentence.\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))\n\n\n\n cv2.imshow(\"ntm\", thresh1)\n cv2.waitKey(0)\n\n # Applying dilation on the threshold image\n dilation = cv2.dilate(thresh1, rect_kernel, iterations = 1)\n\n return dilation\n\n def clean_and_save_text(self, text, name_txt_file):\n \"\"\"\n Clean the detected text and ignore the newlines or trailing spaces\n Improves the visualization of the results in the .txt files \n Saves the cleaned text in the .txt file \n\n Parameters :\n ----------\n text : str\n Detected text \n \n name_mp4_file : str\n name of the txt file in its directory, without the .txt extension\n \"\"\"\n \n # Post-processing the text : We remove the blanks and newlines\n if text != '\\x0c' or text != ' \\x0c' :\n # Remove unnecessary \\n\n text = re.sub(r'( )?\\n( )?(( )?\\n( )?)*( )?' , r' ',text)\n # Remove unnecessary new lines\n text = re.sub(r'( )?\\x0c( )?(( )?\\x0c( )?)*( )?', r' ', text) \n # Remove trailing spaces\n text = text.strip()\n if text != '':\n # Open the file in append mode\n file = open(f\"{self.path_text}/{name_txt_file}.txt\", \"a\")\n file.write(text)\n file.write(\"\\n\")\n \n # Close the file\n file.close\n\n def print_every_N_frames(self, framenbr, N= 100) :\n \"\"\"\n Print the evolution of the detection every N frames\n\n Parameters :\n ----------\n framenbr : int\n Current count of frames processed \n \n N : int\n Number of frames on which we print the evolution\n \"\"\"\n\n if framenbr % N == 0 :\n if framenbr != 0 :\n print(f\"Processed frames : {framenbr - 100 + 1} to {framenbr}\")\n\n def detect_text(self, mp4) :\n \"\"\"\n Detects texts on a video\n Loads the video, Reads each frame, Pre-process them,\n Detects the texts on each of them and saves them in a file\n\n Parameters :\n ----------\n mp4 : str\n path to the mp4 video, with the .mp4 extension\n Must be present in self.path_mp4 directory\n \"\"\"\n\n vidcap, name_file = self.prepare_video_and_file(mp4)\n\n success,frame = vidcap.read()\n framenbr = 0\n while success:\n\n framenbr += 1\n self.print_every_N_frames(framenbr, N=100)\n\n success,frame = vidcap.read()\n if not success: # Means we reached the end of video\n break\n # We process only 1 every 10 frames to avoid redundacies and save time\n if framenbr % 10 != 1 :\n continue\n \n pre_proc_frame = self.pre_processing(frame)\n\n contours, hierarchy = cv2.findContours(pre_proc_frame, cv2.RETR_EXTERNAL,\n\t\t\t\t\t\t\t\t\t\t\t\tcv2.CHAIN_APPROX_NONE)\n # Creating a copy of image\n im2 = frame.copy()\n\n # Looping through the identified contours\n # Then rectangular part is cropped and passed on\n # to pytesseract for extracting text from it\n # Extracted text is then written into the text file\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n \n # Drawing a rectangle on copied image\n rect = cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n \n # Cropping the text block for giving input to OCR\n cropped = im2[y:y + h, x:x + w]\n \n\n\n # Apply OCR on the cropped image\n text = pytesseract.image_to_string(cropped)\n self.clean_and_save_text(text, name_file)\n \ndef main() :\n tiktokTD = TikTokTextDetector(path_mp4= \"video\", path_text= \"detected_text\")\n tiktokTD.detect_text(\"sketch-subtitles-tiktok.mp4\")\n\nif __name__ == '__main__':\n main()", "repo_name": "Aydin-ab/hate-speech-tiktok", "sub_path": "Transcription/text_detection.py", "file_name": "text_detection.py", "file_ext": "py", "file_size_in_byte": 8330, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pytesseract.pytesseract", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 118, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 121, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 121, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.MORPH_RECT", "line_number": 128, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 132, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 133, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 136, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 158, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 160, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 220, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 220, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 221, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 230, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 233, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 241, "usage_type": "call"}]} +{"seq_id": "19368839590", "text": "import numpy as np\nimport os\nimport cohere_core.utilities.utils as ut\nimport math\nfrom typing import Union\n\n# tensorflow will use cpu\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n# import tensorflow for trained model\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.activations import sigmoid, tanh\n\n\nclass Mymodel:\n __model = None\n __amp_layer_model = None\n __ph_layer_model = None\n\n @staticmethod\n def get_model(model_file):\n \"\"\" Static access method. \"\"\"\n if Mymodel.__model == None:\n Mymodel(model_file)\n return Mymodel.__amp_layer_model, Mymodel.__ph_layer_model\n\n def __init__(self, model_file):\n \"\"\" Virtually private constructor. \"\"\"\n if Mymodel.__model != None:\n raise Exception(\"This class is a singleton!\")\n else:\n # load trained network\n Mymodel.__model = load_model(\n model_file,\n custom_objects={\n 'tf': tf,\n 'loss_comb2_scale': loss_comb2_scale,\n 'sigmoid': sigmoid,\n 'tanh': tanh,\n 'math': math,\n 'combine_complex': combine_complex,\n 'get_mask': get_mask,\n 'ff_propagation': ff_propagation\n })\n model = Mymodel.__model\n # get the outputs from amplitude and phase layers\n Mymodel.__amp_layer_model = Model(inputs=model.input,\n outputs=model.get_layer('amp').output)\n Mymodel.__ph_layer_model = Model(inputs=model.input,\n outputs=model.get_layer('phi').output)\n\n\ndef threshold_by_edge(fp: np.ndarray) -> np.ndarray:\n # threshold by left edge value\n mask = np.ones_like(fp, dtype=bool)\n mask[tuple([slice(1, None)] * fp.ndim)] = 0\n zero = 1e-6\n cut = np.max(fp[mask])\n binary = np.zeros_like(fp)\n binary[(np.abs(fp) > zero) & (fp > cut)] = 1\n return binary\n\n\ndef select_central_object(fp: np.ndarray) -> np.ndarray:\n import scipy.ndimage as ndimage\n zero = 1e-6\n binary = np.abs(fp)\n binary[binary > zero] = 1\n binary[binary <= zero] = 0\n\n # cluster by connectivity\n struct = ndimage.morphology.generate_binary_structure(fp.ndim,\n 1).astype(\"uint8\")\n label, nlabel = ndimage.label(binary, structure=struct)\n\n # select largest cluster\n select = np.argmax(np.bincount(np.ravel(label))[1:]) + 1\n\n binary[label != select] = 0\n\n fp[binary == 0] = 0\n return fp\n\n\ndef get_central_object_extent(fp: np.ndarray) -> list:\n fp_cut = threshold_by_edge(np.abs(fp))\n need = select_central_object(fp_cut)\n\n # get extend of cluster\n extent = [np.max(s) + 1 - np.min(s) for s in np.nonzero(need)]\n return extent\n\n\ndef get_oversample_ratio(fp: np.ndarray) -> np.ndarray:\n \"\"\" get oversample ratio\n\t\tfp = diffraction pattern\n\t\"\"\"\n # autocorrelation\n acp = np.fft.fftshift(np.fft.ifftn(np.abs(fp)**2.))\n aacp = np.abs(acp)\n\n # get extent\n blob = get_central_object_extent(aacp)\n\n # correct for underestimation due to thresholding\n correction = [0.025, 0.025, 0.0729][:fp.ndim]\n\n extent = [\n min(m, s + int(round(f * aacp.shape[i], 1)))\n for i, (s, f, m) in enumerate(zip(blob, correction, aacp.shape))\n ]\n\n # oversample ratio\n oversample = [\n 2. * s / (e + (1 - s % 2)) for s, e in zip(aacp.shape, extent)\n ]\n return np.round(oversample, 3)\n\n\ndef Resize(IN, dim):\n ft = np.fft.fftshift(np.fft.fftn(IN)) / np.prod(IN.shape)\n\n pad_value = np.array(dim) // 2 - np.array(ft.shape) // 2\n pad = [[pad_value[0], pad_value[0]], [pad_value[1], pad_value[1]],\n [pad_value[2], pad_value[2]]]\n ft_resize = ut.adjust_dimensions(ft, pad)\n output = np.fft.ifftn(np.fft.ifftshift(ft_resize)) * np.prod(dim)\n return output\n\n\ndef match_oversample_diff(\n diff: np.ndarray,\n fr: Union[list, np.ndarray, None] = None,\n to: Union[list, np.ndarray, None] = None,\n shape: Union[list, np.ndarray, None] = [64, 64, 64],\n):\n \"\"\" resize diff to match oversample ratios \n diff = diffraction pattern\n fr = from oversample ratio\n to = to oversample ratio\n shape = output shape\n \"\"\"\n # adjustment needed to match oversample ratio\n change = [np.round(f / t).astype('int32') for f, t in zip(fr, to)]\n change = [np.max([1, c]) for c in change]\n\n diff = ut.binning(diff, change)\n # crop diff to match output shape\n shape_arr = np.array(shape)\n diff_shape_arr = np.array(diff.shape)\n pad_value1 = shape_arr // 2 - diff_shape_arr // 2\n pad_value2 = shape_arr - diff_shape_arr -pad_value1\n pad = [[pad_value1[0], pad_value2[0]], [pad_value1[1], pad_value2[1]],\n [pad_value1[2], pad_value2[2]]]\n\n output = ut.adjust_dimensions(diff, pad)\n return output, diff.shape\n\n\ndef shift_com(amp, phi):\n from scipy.ndimage.measurements import center_of_mass as com\n from scipy.ndimage.interpolation import shift\n\n h, w, t = 64, 64, 64\n coms = com(amp)\n deltas = (int(round(h / 2 - coms[0])), int(round(w / 2 - coms[1])),\n int(round(t / 2 - coms[2])))\n amp_shift = shift(amp, shift=deltas, mode='wrap')\n phi_shift = shift(phi, shift=deltas, mode='wrap')\n return amp_shift, phi_shift\n\n\ndef post_process(amp, phi, th=0.1, uw=0):\n if uw == 1:\n # phi = np.unwrap(np.unwrap(np.unwrap(phi,0),1),2)\n phi = unwrap_phase(phi)\n\n mask = np.where(amp > th, 1, 0)\n amp_out = mask * amp\n phi_out = mask * phi\n\n mean_phi = np.sum(phi_out) / np.sum(mask)\n phi_out = phi_out - mean_phi\n\n amp_out, phi_out = shift_com(amp_out, phi_out)\n\n mask = np.where(amp_out > th, 1, 0)\n amp_out = mask * amp_out\n phi_out = mask * phi_out\n return amp_out, phi_out\n\n\n# funcions needed in tensorflow model\n@tf.function\ndef combine_complex(amp, phi):\n import tensorflow as tf\n output = tf.cast(amp, tf.complex64) * tf.exp(\n 1j * tf.cast(phi, tf.complex64))\n return output\n\n\n@tf.function\ndef get_mask(input):\n import tensorflow as tf\n\n mask = tf.where(input >= 0.1, tf.ones_like(input), tf.zeros_like(input))\n return mask\n\n\n@tf.function\ndef loss_comb2_scale(Y_true, Y_pred):\n Y_pred = Y_pred / (\n tf.math.reduce_max(Y_pred, axis=(1, 2, 3), keepdims=True) +\n 1e-6) * tf.math.reduce_max(Y_true, axis=(1, 2, 3), keepdims=True)\n loss_1 = tf.math.sqrt(loss_sq(Y_true, Y_pred))\n loss_2 = loss_pcc(Y_true, Y_pred)\n a1 = 1\n a2 = 1\n loss_value = (a1 * loss_1 + a2 * loss_2) / (a1 + a2)\n return loss_value\n\n\n@tf.function\ndef loss_sq(Y_true, Y_pred):\n top = tf.reduce_sum(tf.math.square(Y_pred - Y_true))\n bottom = tf.reduce_sum(tf.math.square(Y_true))\n loss_value = tf.sqrt(top / bottom)\n return loss_value\n\n\n@tf.function\ndef loss_pcc(Y_true, Y_pred):\n pred = Y_pred - tf.reduce_mean(Y_pred)\n true = Y_true - tf.reduce_mean(Y_true)\n\n top = tf.reduce_sum(pred * true)\n bottom = tf.math.sqrt(tf.reduce_sum(pred**2) * tf.reduce_sum(true**2))\n loss_value = 1 - top / bottom\n return loss_value\n\n\n@tf.function\ndef ff_propagation(data):\n '''\n diffraction. Assume same x and y lengthss and uniform sampling\n data: source plane field\n \n '''\n diff = _fourier_transform(data)\n\n # far-field amplitude\n intensity = tf.math.abs(diff)\n intensity = tf.cast(intensity, tf.float32)\n return intensity\n\n\n@tf.function\n# 3D fourier transform\ndef _fourier_transform(input):\n import tensorflow as tf\n # fft3d transform with channel unequal to 1\n perm_input = K.permute_dimensions(input, pattern=[4, 0, 1, 2, 3])\n perm_Fr = tf.signal.fftshift(tf.signal.fft3d(\n tf.signal.ifftshift(tf.cast(perm_input, tf.complex64),\n axes=[-3, -2, -1])),\n axes=[-3, -2, -1])\n Fr = K.permute_dimensions(perm_Fr, pattern=[1, 2, 3, 4, 0])\n return Fr\n\n\ndef run_AI(data, model_file, dir):\n \"\"\"\n Runs AI process.\n\n Parameters\n ----------\n data : ndarray\n data array\n\n model_file : str\n file name containing training model\n\n dir : str\n a parent directory that holds the reconstructions. It can be experiment directory or scan directory.\n Result of AI will be saved in dir/results_AI.\n\n Returns\n -------\n nothing\n \"\"\"\n print('AI guess')\n\n # prepare data to make the oversampling ratio ~3\n wos = 3.0\n orig_os = get_oversample_ratio(data)\n # match oversampling to wos\n wanted_os = [wos, wos, wos]\n # match diff os\n new_data, inshape = match_oversample_diff(data, orig_os, wanted_os)\n new_data = new_data[np.newaxis]\n\n amp_layer_model, ph_layer_model = Mymodel.get_model(model_file)\n\n preds_amp = amp_layer_model.predict(new_data, verbose=1)\n\n preds_phi = ph_layer_model.predict(new_data, verbose=1)\n\n preds_amp, preds_phi = post_process(preds_amp[0, ..., 0],\n preds_phi[0, ..., 0],\n th=0.1,\n uw=0)\n\n pred_obj = preds_amp * np.exp(1j * preds_phi)\n\n # match object size with the input data\n pred_obj = Resize(pred_obj, inshape)\n\n pad_value = np.array(data.shape) // 2 - np.array(pred_obj.shape) // 2\n pad = [[pad_value[0], pad_value[0]], [pad_value[1], pad_value[1]],\n [pad_value[2], pad_value[2]]]\n guess = ut.adjust_dimensions(pred_obj, pad)\n\n np.save(dir + '/image.npy', guess)\n\n if \"CUDA_VISIBLE_DEVICES\" in os.environ:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n\ndef start_AI(pars, datafile, dir):\n \"\"\"\n Starts AI process if all conditionas are met.\n\n Parameters\n ----------\n pars : dict\n parameters for reconstruction\n\n datafile : str\n file name containing data for reconstruction\n\n dir : str\n a parent directory that holds the reconstructions. It can be experiment directory or scan directory.\n Result of AI will be saved in dir/results_AI.\n\n Returns\n -------\n ai_dir : str\n directory where results were saved\n \"\"\"\n if 'AI_trained_model' not in pars:\n print ('no AI_trained_model in config')\n return None\n if not os.path.isfile(pars['AI_trained_model']):\n print('there is no file', pars['AI_trained_model'])\n return None\n\n if datafile.endswith('tif') or datafile.endswith('tiff'):\n try:\n data = ut.read_tif(datafile)\n except:\n print('could not load data file', datafile)\n return None\n elif datafile.endswith('npy'):\n try:\n data = np.load(datafile)\n except:\n print('could not load data file', datafile)\n return None\n else:\n print('no data file found')\n return None\n\n # The results will be stored in the directory /AI_guess\n ai_dir = dir + '/results_AI'\n if os.path.exists(ai_dir):\n pass\n else:\n os.makedirs(ai_dir)\n\n run_AI(data, pars['AI_trained_model'], ai_dir)\n return ai_dir", "repo_name": "AdvancedPhotonSource/cohere", "sub_path": "cohere_core/controller/AI_guess.py", "file_name": "AI_guess.py", "file_ext": "py", "file_size_in_byte": 11294, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.keras.activations.sigmoid", "line_number": 39, "usage_type": "name"}, {"api_name": "tensorflow.keras.activations.tanh", "line_number": 40, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.ones_like", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.ndimage.morphology.generate_binary_structure", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.ndimage.morphology", "line_number": 73, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 73, "usage_type": "name"}, {"api_name": "scipy.ndimage.label", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftshift", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifftn", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftn", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils.adjust_dimensions", "line_number": 127, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils", "line_number": 127, "usage_type": "name"}, {"api_name": "numpy.fft.ifftn", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifftshift", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 133, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 134, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 135, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 136, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 146, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils.binning", "line_number": 148, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils", "line_number": 148, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils.adjust_dimensions", "line_number": 157, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils", "line_number": 157, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.center_of_mass", "line_number": 166, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 169, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.shift", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 198, "usage_type": "call"}, {"api_name": "tensorflow.complex64", "line_number": 198, "usage_type": "attribute"}, {"api_name": "tensorflow.exp", "line_number": 198, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.complex64", "line_number": 199, "usage_type": "attribute"}, {"api_name": "tensorflow.function", "line_number": 195, "usage_type": "attribute"}, {"api_name": "tensorflow.where", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.ones_like", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.zeros_like", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 203, "usage_type": "attribute"}, {"api_name": "tensorflow.math.reduce_max", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 214, "usage_type": "attribute"}, {"api_name": "tensorflow.math.reduce_max", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tensorflow.math.sqrt", "line_number": 216, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 216, "usage_type": "attribute"}, {"api_name": "tensorflow.function", "line_number": 211, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 226, "usage_type": "call"}, {"api_name": "tensorflow.math.square", "line_number": 226, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 226, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 227, "usage_type": "call"}, {"api_name": "tensorflow.math.square", "line_number": 227, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 227, "usage_type": "attribute"}, {"api_name": "tensorflow.sqrt", "line_number": 228, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 224, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 234, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 235, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 237, "usage_type": "call"}, {"api_name": "tensorflow.math.sqrt", "line_number": 238, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 238, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 238, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 232, "usage_type": "attribute"}, {"api_name": "tensorflow.math.abs", "line_number": 253, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 253, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 254, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 254, "usage_type": "attribute"}, {"api_name": "tensorflow.function", "line_number": 243, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.permute_dimensions", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 263, "usage_type": "name"}, {"api_name": "tensorflow.signal.fftshift", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.signal", "line_number": 264, "usage_type": "attribute"}, {"api_name": "tensorflow.signal.fft3d", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.signal.ifftshift", "line_number": 265, "usage_type": "call"}, {"api_name": "tensorflow.signal", "line_number": 265, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 265, "usage_type": "call"}, {"api_name": "tensorflow.complex64", "line_number": 265, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.permute_dimensions", "line_number": 268, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 268, "usage_type": "name"}, {"api_name": "tensorflow.function", "line_number": 258, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 301, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 319, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils.adjust_dimensions", "line_number": 322, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils", "line_number": 322, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 324, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 326, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 327, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path", "line_number": 354, "usage_type": "attribute"}, {"api_name": "cohere_core.utilities.utils.read_tif", "line_number": 360, "usage_type": "call"}, {"api_name": "cohere_core.utilities.utils", "line_number": 360, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 376, "usage_type": "call"}, {"api_name": "os.path", "line_number": 376, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 379, "usage_type": "call"}]} +{"seq_id": "27604393033", "text": "import logging\nimport re\nfrom typing import Optional\n\nimport attr\n\n\n@attr.s(auto_attribs=True)\nclass Attachment:\n \"\"\"This object represents a Signal message attachment.\n\n The attributes have a 1 to 1 correspondance to the signald JsonAttachment class\n https://signald.org/protocol/structures/v1/JsonAttachment/\n \"\"\"\n filename: Optional[str] = attr.ib()\n blurhash: str = attr.ib(default=None)\n caption: str = attr.ib(default=None)\n content_type: str = attr.ib(default=None)\n custom_filename: str = attr.ib(default=None)\n digest: str = attr.ib(default=None)\n height: int = attr.ib(default=None)\n id: str = attr.ib(default=None)\n key: str = attr.ib(default=None)\n stored_filename: str = attr.ib(default=None)\n size: int = attr.ib(default=None)\n voice_note: bool = attr.ib(default=None)\n width: int = attr.ib(default=None)\n\n @staticmethod\n def _snake_to_camel(attr_name: str) -> str:\n attr_name = re.sub(r\"(_|-)+\", \" \", attr_name).title().replace(\" \", \"\")\n return ''.join([attr_name[0].lower(), attr_name[1:]])\n\n def to_send_dict(self) -> dict:\n attachment_data = attr.asdict(self)\n send_data = {}\n for attr_name, value in attachment_data.items():\n if value is not None and attr_name != \"stored_filename\":\n send_data[self._snake_to_camel(attr_name)] = value\n\n # Make sure the filename field is populated,\n # because the received attachment don't have the filename field.\n if self.filename is None:\n if self.stored_filename is None:\n raise ValueError(\"Filename or stored_filename must be provided.\")\n send_data[\"filename\"] = self.stored_filename\n\n return send_data\n\n @staticmethod\n def create_from_receive_dict(data: dict) -> 'Attachment':\n log = logging.getLogger(__name__)\n\n attachment = Attachment(\"\")\n attachment_attr_names = attr.asdict(attachment)\n\n processed_data_attrs = set()\n for attr_name in attachment_attr_names:\n data_name = Attachment._snake_to_camel(attr_name)\n setattr(attachment, attr_name, data.get(data_name))\n processed_data_attrs.add(data_name)\n\n for attr_name in data:\n if attr_name not in processed_data_attrs:\n log.warning(f\"Attribute {attr_name} in data was ignored\")\n\n return attachment\n", "repo_name": "lwesterhof/semaphore", "sub_path": "semaphore/attachment.py", "file_name": "attachment.py", "file_ext": "py", "file_size_in_byte": 2409, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 123, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.Optional", "line_number": 15, "usage_type": "name"}, {"api_name": "attr.ib", "line_number": 15, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 16, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 17, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 18, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 19, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 20, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 21, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 22, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 23, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 24, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 25, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 26, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 27, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "attr.asdict", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 52, "usage_type": "call"}, {"api_name": "attr.asdict", "line_number": 55, "usage_type": "call"}, {"api_name": "attr.s", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "20079486651", "text": "import pygame.font\nfrom pygame.sprite import Group\nfrom jet import Jet\n\nclass Scoreboard:\n \"\"\"A class to report scoring information.\"\"\"\n def __init__(self, ai_game):\n \"\"\"Initialize scorekeeping attributes.\"\"\"\n self.aiGame = ai_game\n self.screen = ai_game.screen\n self.screenRect = self.screen.get_rect()\n self.settings = ai_game.settings\n self.stats = ai_game.stats\n\n # Font settings for scoring information.\n self.textColor = (30, 30, 30)\n self.font = pygame.font.SysFont(None, 48)\n\n # Prepare the initial score image.\n self.prepScore()\n self.prepHighScore()\n self.prepLevel()\n self.prepJets()\n\n def prepScore(self):\n \"\"\"Turn the score into a rendered image.\"\"\"\n roundedScore = round(self.stats.score, - 1)\n scoreStr = \"{:,}\".format(roundedScore)\n self.scoreImage = self.font.render(scoreStr, True, self.textColor, self.settings.bgColor)\n\n # Display the score at the top right of the screen.\n self.scoreRect = self.scoreImage.get_rect()\n self.scoreRect.right = self.screenRect.right - 20\n self.scoreRect.top = 20\n\n def prepHighScore(self):\n \"\"\"Turn the high score into a rendered image.\"\"\"\n highScore = round(self.stats.highScore, -1)\n highScoreStr = \"{:,}\".format(highScore)\n self.highScoreImage = self.font.render(highScoreStr, True, self.textColor, self.settings.bgColor)\n\n # Center the high score at the top of the screen.\n self.highScoreRect = self.highScoreImage.get_rect()\n self.highScoreRect.centerx = self.screenRect.centerx\n self.highScoreRect.top = self.scoreRect.top\n\n def prepLevel(self):\n \"\"\"Turn the level into a rendered image.\"\"\"\n levelStr = str(self.stats.level)\n self.levelImage = self.font.render(levelStr, True,\n self.textColor, self.settings.bgColor)\n\n # Position the level below the score.\n self.levelRect = self.levelImage.get_rect()\n self.levelRect.right = self.scoreRect.right\n self.levelRect.top = self.scoreRect.bottom + 10\n\n def prepJets(self):\n \"\"\"Show how many jets are left.\"\"\"\n self.jets = Group()\n for jetNumber in range(self.stats.jetsLeft):\n jet = Jet(self.aiGame)\n jet.rect.x = 10 + jetNumber * jet.rect.width\n jet.rect.y = 10\n self.jets.add(jet)\n\n def checkHighScore(self):\n \"\"\"Check to see if there's a new high score.\"\"\"\n if self.stats.score > self.stats.highScore:\n self.stats.highScore = self.stats.score\n self.prepHighScore() \n \n def showScore(self):\n \"\"\" draw score and level on screen \"\"\"\n self.screen.blit(self.scoreImage, self.scoreRect)\n self.screen.blit(self.highScoreImage, self.highScoreRect)\n self.screen.blit(self.levelImage, self.levelRect)\n self.jets.draw(self.screen)\n\n ", "repo_name": "bd3young/pygame", "sub_path": "scoreboard.py", "file_name": "scoreboard.py", "file_ext": "py", "file_size_in_byte": 2962, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pygame.font.font.SysFont", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.font.font", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.font", "line_number": 17, "usage_type": "name"}, {"api_name": "pygame.sprite.Group", "line_number": 60, "usage_type": "call"}, {"api_name": "jet.Jet", "line_number": 62, "usage_type": "call"}, {"api_name": "jet.rect", "line_number": 63, "usage_type": "attribute"}, {"api_name": "jet.rect", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "35682643354", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport pprint\nimport MySQLdb\nimport re\nimport json\nimport csv\nimport gzip # Using gzip file\n\npp = pprint.PrettyPrinter(indent=4)\n\nparser = argparse.ArgumentParser(description=\"\"\"Script for parsing authorities from dumps and DB\"\"\")\nparser.add_argument(\"-dump\",help=\"\"\"Path to Dump file\"\"\")\nparser.add_argument(\"-authorities\",help=\"\"\"Path to Authorities file\"\"\")\nparser.add_argument(\"-config\",help=\"\"\"Path to a JSON file with configuration options!\"\"\")\nargs = parser.parse_args()\n\ndata = {}\nauthorities = {}\nmysqlmode = False\n\nconn = None\n\nif \"config\" in args:\n if args.config is not None:\n with open(args.config) as json_data_file:\n data = json.load(json_data_file)\n\nif \"mysql\" in data:\n dbfile = None\n mysqlmode = True\n conn = MySQLdb.connect(host=data[\"mysql\"][\"host\"], user=data[\"mysql\"][\"user\"], passwd=data[\"mysql\"][\"password\"], db=data[\"mysql\"][\"database\"], use_unicode=True, charset='utf8', init_command='SET NAMES UTF8')\n\n\nif conn is None:\n print(\"NO CONNECTION\")\n exit()\n\n\ndef addToDb( id, props, conn, iter ):\n\n c = conn.cursor()\n records = []\n\n for prop in props:\n records.append( ( id, prop ) )\n\n c.executemany( \"INSERT INTO `authorities` (`id`, `authority`) VALUES ( %s, %s )\", records )\n\n if iter > 10000 :\n conn.commit()\n iter = 0\n else :\n iter = iter + 1\n\n return iter\n\n\nif \"authorities\" in args:\n if args.authorities is not None:\n with open(args.authorities) as authorities_file:\n csvreader = csv.reader(authorities_file, delimiter='\\t')\n for row in csvreader:\n authorities[row[2]] = 1\n\n#pp.pprint( authorities )\n\nif \"dump\" in args:\n if args.dump is not None:\n\n cur = conn.cursor()\n cur.execute(\"DROP TABLE IF EXISTS `authorities`;\")\n cur.execute(\"CREATE TABLE IF NOT EXISTS `authorities` ( `id` VARCHAR(25), `authority` VARCHAR(25), PRIMARY KEY (`id`, `authority`) ) ;\")\n cur.execute(\"CREATE INDEX idx_id ON authorities (id);\")\n cur.execute(\"CREATE INDEX idx_authorities ON authorities (authority);\")\n\n iter = 0\n with gzip.open(args.dump,'rt') as f:\n for line in f:\n detectid = re.findall( r'\\\"id\\\":\\\"(Q\\d+)\\\"', line )\n if len( detectid ) > 0:\n id = detectid[0]\n # print( id )\n listp = re.findall( r'\\\"(P\\d+)\\\"', line )\n authp = []\n for prop in listp:\n if prop in authorities:\n authp.append( prop )\n # pp.pprint( authp )\n if len( authp ) > 0:\n iter = addToDb( id, list(set(authp)), conn, iter )\n\n conn.commit()\n", "repo_name": "WikimediaCAT/wikidata-pylisting", "sub_path": "autoritiesCheck.py", "file_name": "autoritiesCheck.py", "file_ext": "py", "file_size_in_byte": 2831, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 12, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 34, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 64, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 80, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 82, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "71099167412", "text": "import json\nfrom flask import Blueprint, Response, request, render_template\n\nfrom app import db\nfrom app.modules.auth import validate_jwt, get_auth_info\nfrom app.modules.auth.jwt_parser import encode_jwt\nfrom app.modules.external.bitrix24.user import get_users_per_department\nfrom app.modules.settings import get_settings\nfrom app.models import UserZipAssociation\n\n\nblueprint = Blueprint(\"users\", __name__, template_folder='templates')\n\n\n@blueprint.route(\"sales_users\", methods=['GET'])\ndef sales_users():\n auth_data = validate_jwt()\n if auth_data is None or \"user\" not in auth_data or auth_data[\"user\"] is None:\n return \"forbidden\", 401\n users = []\n defaults = {\n \"data\": [],\n \"user_type\": \"Handelsvertreter 2022\"\n }\n departments = [\n 5, # Vertrieb\n 23, # VK Profis E360\n 57, # HV Profis E360\n 43, # ---\n 248, # Team POWER-PLAY\n 272, # ---\n 270, # ---\n 41, # Extern IT Unterstützung\n 390, # Team Power Play 2022\n 434, # Team Dura\n 432, # Team Gode\n 436, # Team Herzberg\n 392, # Team Hoffmann\n 428, # Team Kraft\n 386, # Team Maier\n 430, # Team Miele\n 426, # Team Niggemann\n 384, # Team Schuster\n 458, # Team Oftring\n 464, # Team Rieger\n 462, # Spba-Finanz*\n 489, # Solar B\n 503 # Team Ellrich\n ]\n for department_id in departments:\n response = get_users_per_department(department_id) # Verkauf/Außendienst\n if response is None:\n continue\n for user in response:\n existing_user = next((item for item in users if str(item[\"ID\"]) == str(user[\"ID\"])), None)\n if existing_user is not None:\n continue\n if user[\"NAME\"] is None or user[\"NAME\"] == \"\":\n user[\"NAME\"] = user[\"EMAIL\"]\n association = UserZipAssociation.query.filter(UserZipAssociation.user_id == user[\"ID\"]).first()\n if association is None:\n user[\"association\"] = defaults\n else:\n if association.user_type in [None, \"\"]:\n association.user_type = defaults[\"user_type\"]\n user[\"association\"] = {\n \"data\": association.data,\n \"last_assigned\": str(association.last_assigned),\n \"max_leads\": association.max_leads,\n \"current_cycle_index\": association.current_cycle_index,\n \"current_cycle_count\": association.current_cycle_count,\n \"supervisor_id\": association.supervisor_id,\n \"user_type\": association.user_type\n }\n if user[\"association\"][\"last_assigned\"] == \"None\":\n user[\"association\"][\"last_assigned\"] = None\n if user[\"ACTIVE\"] is True or user[\"ACTIVE\"] == \"true\" or user[\"association\"].get(\"max_leads\", 0) > 0:\n users.append(user)\n users.sort(key=lambda x: x[\"NAME\"], reverse=True)\n users.reverse()\n return Response(\n json.dumps({\"status\": \"success\", \"data\": users}),\n status=200,\n mimetype='application/json')\n\n\n@blueprint.route(\"sales_users\", methods=['POST'])\ndef sales_users_store():\n auth_data = validate_jwt()\n if auth_data is None or \"user\" not in auth_data or auth_data[\"user\"] is None:\n return \"forbidden\", 401\n data = request.json\n association = UserZipAssociation.query.filter(UserZipAssociation.user_id == data[\"ID\"]).first()\n if association is None:\n association = UserZipAssociation(\n user_id=int(data[\"ID\"]),\n current_cycle_count=0,\n comment=f\"{data['NAME']} {data['LAST_NAME']}\"\n )\n db.session.add(association)\n association.data = data[\"association\"][\"data\"]\n if \"max_leads\" not in data[\"association\"] or data[\"association\"][\"max_leads\"] is None or data[\"association\"][\"max_leads\"] == \"\":\n data[\"association\"][\"max_leads\"] = 0\n association.max_leads = int(data[\"association\"][\"max_leads\"])\n if \"supervisor_id\" in data[\"association\"] and data[\"association\"][\"supervisor_id\"] is not None:\n association.supervisor_id = int(data[\"association\"][\"supervisor_id\"])\n association.user_type = data[\"association\"].get(\"user_type\")\n db.session.commit()\n return Response(\n json.dumps({\"status\": \"success\", \"data\": data}),\n status=200,\n mimetype='application/json')\n\n\n@blueprint.route(\"supervisors\", methods=['GET'])\ndef supervisors():\n auth_data = validate_jwt()\n if auth_data is None or \"user\" not in auth_data or auth_data[\"user\"] is None:\n return \"forbidden\", 401\n users = []\n departments = [\n 5, # Vertrieb\n 23, # VK Profis E360\n 57, # HV Profis E360\n 43, # ---\n 248, # Team POWER-PLAY\n 272, # ---\n 270, # ---\n 41, # Extern IT Unterstützung\n 390, # Team Power Play 2022\n 434, # Team Dura\n 432, # Team Gode\n 436, # Team Herzberg\n 392, # Team Hoffmann\n 428, # Team Kraft\n 386, # Team Maier\n 430, # Team Miele\n 426, # Team Niggemann\n 384, # Team Schuster\n 458, # Team Oftring\n 464, # Team Rieger\n 462, # Spba-Finanz*\n 489, # Solar B\n 503 # Team Ellrich\n ]\n for department_id in departments:\n response = get_users_per_department(department_id) # Verkauf/Außendienst\n if response is None:\n continue\n for user in response:\n existing_user = next((item for item in users if str(item[\"ID\"]) == str(user[\"ID\"])), None)\n if existing_user is not None:\n continue\n association = UserZipAssociation.query.filter(UserZipAssociation.user_id == user[\"ID\"]).first()\n if association is None:\n continue\n if association.user_type not in [\"Ausbilder\", \"Teamleiter HV\", \"Teamleiter Angestellt\"]:\n continue\n user[\"fullname\"] = user[\"NAME\"] + \" \" + user[\"LAST_NAME\"]\n user[\"ID\"] = int(user[\"ID\"])\n if user[\"ACTIVE\"] is True or user[\"ACTIVE\"] == \"true\":\n users.append(user)\n users.sort(key=lambda x: x[\"NAME\"], reverse=True)\n users.append({\n \"fullname\": \"Kein Teamleiter\",\n \"ID\": 0\n })\n users.reverse()\n return Response(\n json.dumps({\"status\": \"success\", \"data\": users}),\n status=200,\n mimetype='application/json')\n\n\n@blueprint.route(\"sales_settings\", methods=['GET', 'POST'])\ndef sales_users_settings_app():\n config = get_settings(section=\"external/bitrix24\")\n auth_info = get_auth_info()\n if auth_info[\"user\"] is None:\n return \"Forbidden\"\n token = encode_jwt(auth_info, expire_minutes=600)\n return render_template(\"sales_settings/sales_settings.html\", token=token)\n", "repo_name": "vrcompugo/EV-Manager-Data-API", "sub_path": "app/modules/user/user_routes.py", "file_name": "user_routes.py", "file_ext": "py", "file_size_in_byte": 6984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Blueprint", "line_number": 12, "usage_type": "call"}, {"api_name": "app.modules.auth.validate_jwt", "line_number": 17, "usage_type": "call"}, {"api_name": "app.modules.external.bitrix24.user.get_users_per_department", "line_number": 51, "usage_type": "call"}, {"api_name": "app.models.UserZipAssociation.query.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "app.models.UserZipAssociation.query", "line_number": 60, "usage_type": "attribute"}, {"api_name": "app.models.UserZipAssociation", "line_number": 60, "usage_type": "name"}, {"api_name": "app.models.UserZipAssociation.user_id", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "app.modules.auth.validate_jwt", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "app.models.UserZipAssociation.query.filter", "line_number": 93, "usage_type": "call"}, {"api_name": "app.models.UserZipAssociation.query", "line_number": 93, "usage_type": "attribute"}, {"api_name": "app.models.UserZipAssociation", "line_number": 93, "usage_type": "name"}, {"api_name": "app.models.UserZipAssociation.user_id", "line_number": 93, "usage_type": "attribute"}, {"api_name": "app.models.UserZipAssociation", "line_number": 95, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 100, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 100, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 100, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 108, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 108, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 109, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 110, "usage_type": "call"}, {"api_name": "app.modules.auth.validate_jwt", "line_number": 117, "usage_type": "call"}, {"api_name": "app.modules.external.bitrix24.user.get_users_per_department", "line_number": 147, "usage_type": "call"}, {"api_name": "app.models.UserZipAssociation.query.filter", "line_number": 154, "usage_type": "call"}, {"api_name": "app.models.UserZipAssociation.query", "line_number": 154, "usage_type": "attribute"}, {"api_name": "app.models.UserZipAssociation", "line_number": 154, "usage_type": "name"}, {"api_name": "app.models.UserZipAssociation.user_id", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.Response", "line_number": 169, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 170, "usage_type": "call"}, {"api_name": "app.modules.settings.get_settings", "line_number": 177, "usage_type": "call"}, {"api_name": "app.modules.auth.get_auth_info", "line_number": 178, "usage_type": "call"}, {"api_name": "app.modules.auth.jwt_parser.encode_jwt", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "41616038130", "text": "import datetime\nimport uuid\nfrom collections import defaultdict\nimport pandas as pd\n\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nfrom rq.exceptions import NoSuchJobError\nfrom rq.job import Job\n\nfrom .core import app, conn, db, queue\nfrom .models import Result\nfrom .tasks import slow_loop\nfrom .view import navbar, body\nfrom .model import scraper\n\napp.layout = html.Div(\n [\n dcc.Store(id=\"store\"),\n dcc.Interval(id=\"interval\", interval=500),\n navbar,\n body\n ]\n)\n\n\n@app.callback(\n Output(\"store\", \"data\"),\n [Input(\"letsgo\", \"n_clicks\")],\n [State(\"transaction_type\",\"value\"),\n State(\"housing_type\",\"value\"),\n State(\"rental_period\",\"value\")]\n)\ndef submit(n_clicks, transaction_type, housing_type, rental_period):\n if n_clicks:\n id_ = uuid.uuid4()\n\n scraper_init = scraper(transaction_type, housing_type, rental_period)\n\n # queue the task\n queue.enqueue(slow_loop, scraper_init, id_, job_id=str(id_))\n\n # record queuing in the database\n result = Result(id=id_, queued=datetime.datetime.now())\n db.session.add(result)\n db.session.commit()\n\n # log process id in dcc.Store\n return {\"id\": str(id_)}\n return {}\n\n\n@app.callback(\n [\n Output(\"table\", \"data\"),\n Output(\"progress\", \"value\"),\n Output(\"progress\", \"children\"),\n Output(\"collapse\", \"is_open\"),\n ],\n [Input(\"interval\", \"n_intervals\")],\n [State(\"store\", \"data\")],\n)\ndef retrieve_output(n, data):\n if n and data:\n try:\n job = Job.fetch(data[\"id\"], connection=conn)\n if job.get_status() == \"finished\":\n return pd.read_json(job.result, orient='index').to_dict('records'), 100, \"100%\", False\n progress = job.meta.get(\"progress\", 0)\n total = job.meta.get(\"total\",1)\n return [], progress*100/total,\"%d%%\" %(progress*100/total), True\n except NoSuchJobError:\n # if job no longer exists, retrive result from database\n result = Result.query.filter_by(id=data[\"id\"]).first()\n if result and result.result:\n return [], 100,\"100%\", False\n return [], None,None, False\n", "repo_name": "mdylan2/propertyfinderscraper", "sub_path": "dash_rq_demo/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "core.app.layout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "core.app", "line_number": 19, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 19, "usage_type": "call"}, {"api_name": "dash_core_components.Store", "line_number": 21, "usage_type": "call"}, {"api_name": "dash_core_components.Interval", "line_number": 22, "usage_type": "call"}, {"api_name": "view.navbar", "line_number": 23, "usage_type": "name"}, {"api_name": "view.body", "line_number": 24, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 38, "usage_type": "call"}, {"api_name": "model.scraper", "line_number": 40, "usage_type": "call"}, {"api_name": "core.queue.enqueue", "line_number": 43, "usage_type": "call"}, {"api_name": "tasks.slow_loop", "line_number": 43, "usage_type": "argument"}, {"api_name": "core.queue", "line_number": 43, "usage_type": "name"}, {"api_name": "models.Result", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "attribute"}, {"api_name": "core.db.session.add", "line_number": 47, "usage_type": "call"}, {"api_name": "core.db.session", "line_number": 47, "usage_type": "attribute"}, {"api_name": "core.db", "line_number": 47, "usage_type": "name"}, {"api_name": "core.db.session.commit", "line_number": 48, "usage_type": "call"}, {"api_name": "core.db.session", "line_number": 48, "usage_type": "attribute"}, {"api_name": "core.db", "line_number": 48, "usage_type": "name"}, {"api_name": "core.app.callback", "line_number": 29, "usage_type": "call"}, {"api_name": "core.app", "line_number": 29, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 30, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 31, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 32, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 33, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 34, "usage_type": "call"}, {"api_name": "rq.job.Job.fetch", "line_number": 68, "usage_type": "call"}, {"api_name": "rq.job.Job", "line_number": 68, "usage_type": "name"}, {"api_name": "core.conn", "line_number": 68, "usage_type": "name"}, {"api_name": "pandas.read_json", "line_number": 70, "usage_type": "call"}, {"api_name": "rq.exceptions.NoSuchJobError", "line_number": 74, "usage_type": "name"}, {"api_name": "models.Result.query.filter_by", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Result.query", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.Result", "line_number": 76, "usage_type": "name"}, {"api_name": "core.app.callback", "line_number": 55, "usage_type": "call"}, {"api_name": "core.app", "line_number": 55, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 57, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 58, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 59, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 60, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 62, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "32441653169", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom vk_api.keyboard import VkKeyboard, VkKeyboardColor\nimport vk_api\nfrom datetime import datetime\nimport random\nimport time\nimport requests\nimport json\n\n# Токены:\ntoken = 'Токен ВК здесь'\n\nvk_session = vk_api.VkApi(token=token)\nsession_api = vk_session.get_api()\nlongpoll = VkLongPoll(vk_session)\n\ndef search(sea):\n send_message(vk_session, 'user_id', event.user_id, message='🎓 Пожалуйста подождите, я ищу информацию в сети интернет 🌎')\n\n url = (\"https://google-search3.p.rapidapi.com/api/v1/search/q=\" + sea)\n\n headers = {\n 'x-rapidapi-key': \"Токен от Google-Search\",\n 'x-rapidapi-host': \"google-search3.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers)\n\n js = response.json()\n title1 = []\n description1 = []\n link1 = []\n title1 = js.get('results')[0].get('title')\n description1 = js.get('results')[0].get('description')\n link1 = js.get('results')[0].get('link')\n\n title2 = []\n description2 = []\n link2 = []\n title2 = js.get('results')[1].get('title')\n description2 = js.get('results')[1].get('description')\n link2 = js.get('results')[1].get('link')\n\n title3 = []\n description3 = []\n link3 = []\n title3 = js.get('results')[2].get('title')\n description3 = js.get('results')[2].get('description')\n link3 = js.get('results')[2].get('link')\n\n # print(f'Название: {title}\\nОписание:\\n{description}')\n\n send_message(vk_session, 'user_id', event.user_id, message=f'🔥 Название: \\n{title1}\\nОписание:\\n{description1}\\n'\\\n f'🔗 ссылка на источник: {link1}\\n{\"-\" * 30}')\n send_message(vk_session, 'user_id', event.user_id, message=f'🔥 Название: \\n{title2}\\nОписание:\\n{description2}\\n' \\\n f'🔗 ссылка на источник: {link2}\\n{\"-\" * 30}')\n send_message(vk_session, 'user_id', event.user_id, message=f'🔥 Название: \\n{title3}\\nОписание:\\n{description3}\\n' \\\n f'🔗 ссылка на источник: {link3}')\n\n\ndef create_keyboard(response):\n keyboard = VkKeyboard(one_time=False)\n\n if response == 'меню':\n keyboard.add_button('Инструкция', color=VkKeyboardColor.POSITIVE)\n keyboard.add_line() # Разделитель\n keyboard.add_button('Поиск', color=VkKeyboardColor.PRIMARY)\n keyboard.add_line() # Разделитель\n keyboard.add_button('Закрыть', color=VkKeyboardColor.NEGATIVE)\n\n elif response == 'начать':\n keyboard.add_button('Разработчик', color=VkKeyboardColor.POSITIVE)\n keyboard.add_button('Меню', color=VkKeyboardColor.POSITIVE)\n\n elif response == 'привет':\n keyboard.add_button('Меню', color=VkKeyboardColor.POSITIVE)\n\n elif response == 'поиск':\n keyboard.add_button('поиск', color=VkKeyboardColor.PRIMARY)\n keyboard.add_line() # Разделитель\n keyboard.add_button('Меню', color=VkKeyboardColor.POSITIVE)\n\n elif response == 'закрыть':\n print('закрываем клаву')\n return keyboard.get_empty_keyboard()\n\n keyboard = keyboard.get_keyboard()\n return keyboard\n\n\ndef send_message(vk_session, id_type, id, message=None, attachment=None, keyboard=None):\n vk_session.method('messages.send',{id_type: id, 'message': message, 'random_id': random.randint(-2147483648, +2147483648), \"attachment\": attachment, 'keyboard': keyboard})\n\nrule_list = '''\n👋🏻 Привет, данный бот поможет тебе найти любой контент со всего мира\nПросто введи на клавиатуре, или воспользуйся кнопками активности, чтобы взаимодействовать с ботом.\nЕсли пропала клавиатура, напиши мне \"Привет\", тогда все снова заработает\n'''\n\nwaiting_place_users = set()\n\n\ndef input_search():\n send_message(vk_session, 'user_id', event.user_id, message=\"Что вы хотите найти?\",\n keyboard=keyboard)\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:\n waiting_place_users.add(event.user_id)\n\n# Загрузка картинки в ВК\ndef upload_photo():\n uploader = vk_api.upload.VkUpload(vk_session)\n img = uploader.photo_messages(\"img/pony.png\") # Место откуда загружать фото\n media_id = str(img[0]['id'])\n owner_id = str(img[0]['owner_id'])\n print(\"photo\" + owner_id + \"_\" + media_id)\n print(img)\n\n# Раскомментируй вызов функции что ниже, чтобы загрузить картинку\n# upload_photo()\n\nwhile True:\n for event in longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW:\n print('Сообщение пришло в: ' + str(datetime.strftime(datetime.now(), \"%H:%M:%S\")))\n print('Текст сообщения: ' + str(event.text))\n print(f\"Пользовательский ID: {event.user_id}\")\n print('-'*50)\n response = event.text.lower()\n keyboard = create_keyboard(response)\n\n if event.from_user and not event.from_me:\n if event.type == VkEventType.MESSAGE_NEW and event.user_id in waiting_place_users and event.text:\n waiting_place_users.remove(event.user_id)\n try:\n search(event.text)\n except:\n if event.text.lower() == 'меню' or event.text.lower() == 'поиск':\n break;\n else:\n send_message(vk_session, 'user_id', event.user_id, message=\"Город не найден, проверьте правильность ввода и повторите попытку\")\n\n elif response == \"поиск\":\n input_search()\n elif response == \"привет\":\n send_message(vk_session, 'user_id', event.user_id, message='И тебе привет 👋🏻\\nЧтобы воспользоваться моим функционалом откройте меню',keyboard=keyboard)\n elif response == \"меню\":\n send_message(vk_session, 'user_id', event.user_id, message='Хе-хей, ты решил зайти в меню?\\nЧем я могу тебе помочь?',keyboard=keyboard)\n elif response== 'начать':\n send_message(vk_session, 'user_id', event.user_id, message=f'{rule_list}',keyboard=keyboard)\n elif response == 'инструкция':\n send_message(vk_session, 'user_id', event.user_id, message=f'{rule_list}')\n elif response=='разработчик':\n send_message(vk_session, 'user_id', event.user_id, message='Мой разработчик пока-что студент, зовут его @id186019886 (Сергей)')\n elif response=='закрыть':\n send_message(vk_session, 'user_id', event.user_id, message='Меню закрыто, чтобы вернуть кнопки, напиши \"Меню\"',keyboard=keyboard)\n\n else:\n send_message(vk_session, 'user_id', event.user_id, message='Неизвестная команда, или сообщение, человек я тебя не понимаю, так как понимают тебя другие люди =(', attachment=\"photo-199806584_457239036\")", "repo_name": "SergioStrangeS/VkSearchBot", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8020, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "vk_api.VkApi", "line_number": 15, "usage_type": "call"}, {"api_name": "vk_api.longpoll.VkLongPoll", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 29, "usage_type": "call"}, {"api_name": "vk_api.keyboard.VkKeyboard", "line_number": 64, "usage_type": "call"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.POSITIVE", "line_number": 67, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 67, "usage_type": "name"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.PRIMARY", "line_number": 69, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 69, "usage_type": "name"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.NEGATIVE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 71, "usage_type": "name"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.POSITIVE", "line_number": 74, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 74, "usage_type": "name"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.POSITIVE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 75, "usage_type": "name"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.POSITIVE", "line_number": 78, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 78, "usage_type": "name"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.PRIMARY", "line_number": 81, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 81, "usage_type": "name"}, {"api_name": "vk_api.keyboard.VkKeyboardColor.POSITIVE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "vk_api.keyboard.VkKeyboardColor", "line_number": 83, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 94, "usage_type": "call"}, {"api_name": "vk_api.longpoll.VkEventType.MESSAGE_NEW", "line_number": 108, "usage_type": "attribute"}, {"api_name": "vk_api.longpoll.VkEventType", "line_number": 108, "usage_type": "name"}, {"api_name": "vk_api.upload.VkUpload", "line_number": 113, "usage_type": "call"}, {"api_name": "vk_api.upload", "line_number": 113, "usage_type": "attribute"}, {"api_name": "vk_api.longpoll.VkEventType.MESSAGE_NEW", "line_number": 125, "usage_type": "attribute"}, {"api_name": "vk_api.longpoll.VkEventType", "line_number": 125, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 126, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 126, "usage_type": "call"}, {"api_name": "vk_api.longpoll.VkEventType.MESSAGE_NEW", "line_number": 134, "usage_type": "attribute"}, {"api_name": "vk_api.longpoll.VkEventType", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "33786949244", "text": "\"\"\"Turnos\n\nRevision ID: 023\nRevises: 022\nCreate Date: 2014-06-23 07:45:11.330105\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '023'\ndown_revision = '022'\n\nfrom alembic import op\nimport sqlalchemy as db\n\n\ndef upgrade():\n op.create_table('turno',\n db.Column('id', db.Integer, primary_key=True),\n db.Column('id_lugar', db.Integer, db.ForeignKey('lugar.id'), nullable=False),\n db.Column('id_dia_semana', db.Integer, db.ForeignKey('dia_semana.id'), nullable=False),\n db.Column('horario_inicio', db.Time, nullable=False),\n db.Column('horario_fin', db.Time, nullable=False),\n )\n\n\ndef downgrade():\n op.drop_table('turno')\n", "repo_name": "tzulberti/entrenamiento-arqueria", "sub_path": "alembic/versions/023_turno.py", "file_name": "023_turno.py", "file_ext": "py", "file_size_in_byte": 671, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "alembic.op.create_table", "line_number": 18, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 18, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Time", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Time", "line_number": 23, "usage_type": "attribute"}, {"api_name": "alembic.op.drop_table", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "23757356782", "text": "import copy\nfrom re import I\nimport time\nimport cv2\nimport numpy as np\nimport open3d as o3d\nimport time\nfrom sklearn.neighbors import KDTree\nimport transforms3d as transform\n\n# Question 4: deal with point_to_plane = True\n\ndef fit_rigid(src, tgt, normal_tgt, point_to_plane):\n # Question 2: Rigid Transform Fitting\n # Implement this function\n # -------------------------\n T = np.identity(4)\n # assume each row represents coordinate of a point\n if point_to_plane == False:\n centroid_src = np.mean(src, axis=0)\n centroid_tgt = np.mean(tgt, axis=0)\n centered_src = src - centroid_src\n centered_tgt = tgt - centroid_tgt\n # print(\"centroid_src:\", centroid_src.shape)\n # covariance_matrix = np.dot(centered_src.T, centered_tgt)\n covariance_matrix = np.dot(centered_tgt.T, centered_src)\n U, _, V = np.linalg.svd(covariance_matrix)\n\n # R = np.dot(U,V.T)\n R = np.dot(U, V)\n # R = np.dot(V.T, U.T)\n t = centroid_tgt - np.dot(R, centroid_src)\n # RtR: [[1.00000000e+00 - 1.64452575e-16 - 3.07641091e-16]\n # [-1.64452575e-16 1.00000000e+00 - 2.29237585e-16]\n # [-3.07641091e-16 - 2.29237585e-16 1.00000000e+00]]\n # t: [1.18285614 0.99242399 1.66882667]\n T[:3,:3] = R\n T[:3,3] = t.T\n else:\n C = np.matrix(np.zeros((6, 6)))\n b = np.matrix(np.zeros((6, 1)))\n for i in range(src.shape[0]):\n p = np.matrix(src[i, :])\n q = np.matrix(tgt[i, :])\n n = np.matrix(normal_tgt[i, :])\n c = np.cross(p, n).T\n C[:3, :3] += c @ c.T\n C[:3, 3:] += c @ n\n C[3:, :3] += n.T @ c.T\n C[3:, 3:] += n.T @ n\n b[:3, 0] += c @ (p-q) @ n.T\n b[3:, 0] += n.T @ (p-q) @ n.T\n x = -np.dot(np.linalg.pinv(C), b)\n T[:3,:3] = transform.euler.euler2mat(x[0], x[1], x[2])\n T[0, 3] = x[3]\n T[1, 3] = x[4]\n T[2, 3] = x[5]\n # -------------------------\n return T\n\n\n# Question 4: deal with point_to_plane = True\ndef icp(source, target, init_pose=np.eye(4), max_iter = 20, point_to_plane = False):\n src = np.asarray(source.points)#.T\n tgt = np.asarray(target.points)#.T\n tgt_normals = np.asarray(target.normals)\n\n # ---------------------------------------------------\n T = init_pose\n transforms = []\n delta_Ts = []\n inlier_ratio = 0\n print(\"iter %d: inlier ratio: %.2f\" % (0, inlier_ratio))\n\n traget_tree = KDTree(tgt)\n\n for i in range(max_iter):\n\n distances, indices = traget_tree.query(src, k=1)\n indices = np.squeeze(indices.reshape(1,-1))\n\n T_delta = fit_rigid(src, tgt[indices], tgt_normals[indices], point_to_plane)\n src = np.dot(src, T_delta[:3, :3].T) + T_delta[:3, 3]\n T = T @ T_delta\n # T = T_delta @ T\n threshold = 0.05\n inliers = np.count_nonzero(distances < threshold)\n inlier_ratio = np.sum(inliers) / distances.shape[0]\n # ---------------------------------------------------\n\n print(\"iter %d: inlier ratio: %.2f\" % (i + 1, inlier_ratio))\n # relative update from each iteration\n delta_Ts.append(T_delta.copy())\n # pose estimation after each iteration\n transforms.append(T.copy())\n\n if inlier_ratio > 0.999:\n break\n\n return transforms, delta_Ts\n\ndef rgbd2pts(color_im, depth_im, K):\n # Question 1: unproject rgbd to color point cloud, provide visualization in your document\n # Your implementation between the lines\n # ---------------------------\n # N = 0 # todo\n # color = np.zeros((N, 3))\n # xyz = np.zeros((N, 3))\n\n height, width = depth_im.shape[0], depth_im.shape[1]\n x, y = np.meshgrid(np.arange(width), np.arange(height))\n\n # Compute the normalized image coordinates\n normalized_x = (x - K[0, 2]) / K[0, 0]\n normalized_y = (y - K[1, 2]) / K[0, 0]\n\n # Unproject the depth image to 3D points\n points_3d = np.dstack((normalized_x * depth_im, normalized_y * depth_im, depth_im))\n\n # Reshape the 3D points and the RGB image to create the colored point cloud\n xyz = points_3d.reshape(-1, 3)\n color = color_im.reshape(-1, 3)\n\n # ---------------------------\n\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(xyz)\n pcd.colors = o3d.utility.Vector3dVector(color)\n return pcd\n\ndef pose_error(estimated_pose, gt_pose):\n # Question 5: Translation and Rotation Error\n # Use equations 5-6 in https://cmp.felk.cvut.cz/~hodanto2/data/hodan2016evaluation.pdf\n # Your implementation between the lines\n # ---------------------------\n error = np.zeros(2)\n estimated_R = estimated_pose[:3, :3]\n estimated_t = estimated_pose[:3, 3]\n gt_R = gt_pose[:3, :3]\n gt_t = gt_pose[:3, 3]\n error[1] = np.sqrt(sum((gt_t - estimated_t) ** 2))\n error[0] = np.arccos((np.trace(np.dot(estimated_R, gt_R.T)) - 1) / 2)\n # ---------------------------\n return error\n\ndef read_data(ind = 0):\n K = np.loadtxt(\"data/camera-intrinsics.txt\", delimiter=' ')\n depth_im = cv2.imread(\"data/frame-%06d.depth.png\"%(ind),-1).astype(float)\n depth_im /= 1000. # depth is saved in 16-bit PNG in millimeters\n depth_im[depth_im == 65.535] = 0 # set invalid depth to 0 (specific to 7-scenes dataset)\n T = np.loadtxt(\"data/frame-%06d.pose.txt\"%(ind)) # 4x4 rigid transformation matrix\n color_im = cv2.imread(\"data/frame-%06d.color.jpg\"%(ind),-1)\n color_im = cv2.cvtColor(color_im, cv2.COLOR_BGR2RGB) / 255.0\n return color_im, depth_im, K, T\n\nif __name__ == \"__main__\":\n\n # pairwise ICP\n\n # read color, image data and the ground-truth, converting to point cloud\n color_im, depth_im, K, T_tgt = read_data(0)\n target = rgbd2pts(color_im, depth_im, K)\n color_im, depth_im, K, T_src = read_data(40)\n source = rgbd2pts(color_im, depth_im, K)\n\n # downsampling and normal estimatoin\n source = source.voxel_down_sample(voxel_size=0.02)\n target = target.voxel_down_sample(voxel_size=0.02)\n source.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\n target.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\n\n\n # # print(\"source:\", np.asarray(source.points))\n # T = fit_rigid(np.asarray(source.points)[:10000,:], np.asarray(target.points)[:10000,:], point_to_plane=False)\n # print(\"T:\", T)\n # # print(\"source:\", np.asarray(source.points).shape) #source: (18732, 3)\n # # print(\"target:\", np.asarray(target.points).shape) #target: (20784, 3)\n # # ValueError: shapes (3,18732) and (20784,3) not aligned: 18732 (dim 1) != 20784 (dim 0)\n # # source number and target number are not same\n\n\n # conduct ICP (your code)\n final_Ts, delta_Ts = icp(source, target,point_to_plane = False)\n\n # visualization\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n ctr = vis.get_view_control()\n ctr.set_front([ -0.11651295252277051, -0.047982289143896774, -0.99202945108647766 ])\n ctr.set_lookat([ 0.023592929264511786, 0.051808635289583765, 1.7903649529102956 ])\n ctr.set_up([ 0.097655832648056065, -0.9860023571949631, -0.13513952033284915 ])\n ctr.set_zoom(0.42199999999999971)\n vis.add_geometry(source)\n vis.add_geometry(target)\n\n save_image = False\n\n # update source images\n for i in range(len(delta_Ts)):\n source.transform(delta_Ts[i])\n vis.update_geometry(source)\n vis.poll_events()\n vis.update_renderer()\n time.sleep(0.2)\n if save_image:\n vis.capture_screen_image(\"temp_%04d.jpg\" % i)\n\n # visualize camera\n h, w, c = color_im.shape\n tgt_cam = o3d.geometry.LineSet.create_camera_visualization(w, h, K, np.eye(4), scale = 0.2)\n src_cam = o3d.geometry.LineSet.create_camera_visualization(w, h, K, np.linalg.inv(T_src) @ T_tgt, scale = 0.2)\n pred_cam = o3d.geometry.LineSet.create_camera_visualization(w, h, K, np.linalg.inv(final_Ts[-1]), scale = 0.2)\n\n gt_pose = np.linalg.inv(T_src) @ T_tgt\n pred_pose = np.linalg.inv(final_Ts[-1])\n p_error = pose_error(pred_pose, gt_pose)\n print(\"Ground truth pose:\", gt_pose)\n print(\"Estimated pose:\", pred_pose)\n print(\"Rotation/Translation Error\", p_error)\n\n tgt_cam.paint_uniform_color((1, 0, 0))\n src_cam.paint_uniform_color((0, 1, 0))\n pred_cam.paint_uniform_color((0, 0.5, 0.5))\n vis.add_geometry(src_cam)\n vis.add_geometry(tgt_cam)\n vis.add_geometry(pred_cam)\n\n vis.run()\n vis.destroy_window()\n\n # Provide visualization of alignment with camera poses in write-up.\n # Print pred pose vs gt pose in write-up.", "repo_name": "HongqingThomas/CS498-machine-perception-Assignment", "sub_path": "HW3/code/icp.py", "file_name": "icp.py", "file_ext": "py", "file_size_in_byte": 8658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.identity", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 53, "usage_type": "attribute"}, {"api_name": "transforms3d.euler.euler2mat", "line_number": 54, "usage_type": "call"}, {"api_name": "transforms3d.euler", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KDTree", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 118, "usage_type": "call"}, {"api_name": "open3d.geometry.PointCloud", "line_number": 126, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 126, "usage_type": "attribute"}, {"api_name": "open3d.utility.Vector3dVector", "line_number": 127, "usage_type": "call"}, {"api_name": "open3d.utility", "line_number": 127, "usage_type": "attribute"}, {"api_name": "open3d.utility.Vector3dVector", "line_number": 128, "usage_type": "call"}, {"api_name": "open3d.utility", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.trace", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 147, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 152, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 153, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 153, "usage_type": "attribute"}, {"api_name": "open3d.geometry.KDTreeSearchParamHybrid", "line_number": 169, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 169, "usage_type": "attribute"}, {"api_name": "open3d.geometry.KDTreeSearchParamHybrid", "line_number": 170, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 170, "usage_type": "attribute"}, {"api_name": "open3d.visualization.Visualizer", "line_number": 186, "usage_type": "call"}, {"api_name": "open3d.visualization", "line_number": 186, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 204, "usage_type": "call"}, {"api_name": "open3d.geometry.LineSet.create_camera_visualization", "line_number": 210, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 210, "usage_type": "call"}, {"api_name": "open3d.geometry.LineSet.create_camera_visualization", "line_number": 211, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.linalg.inv", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 211, "usage_type": "attribute"}, {"api_name": "open3d.geometry.LineSet.create_camera_visualization", "line_number": 212, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 212, "usage_type": "attribute"}, {"api_name": "numpy.linalg.inv", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 212, "usage_type": "attribute"}, {"api_name": "numpy.linalg.inv", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 214, "usage_type": "attribute"}, {"api_name": "numpy.linalg.inv", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 215, "usage_type": "attribute"}]} +{"seq_id": "17303471124", "text": "from pathlib import Path\nfrom datetime import datetime\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Input\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import Model\nfrom keras.utils.multi_gpu_utils import multi_gpu_model\nfrom classification_models import Classifiers\nfrom keras import backend as K\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler\nimport pandas as pd\n\nfrom utils.callbacks.plotter_callback import PlotterCallback\nfrom utils.callbacks.classification_callback import ClassificationCallback\nfrom utils.generators.new_image_builder import EmgImageGenerator, permute_axes_subtract\nfrom utils.losses.numpy_losses import *\nfrom utils.preprocessing.data_preprocessor import DataPreprocessor\n\nnum_imfs = 4\n\n\ndef get_gpu_model(input_size=None, activation=None, initial_weights=None, is_corruption=False):\n ResNet50v2, preprocess_input = Classifiers.get('resnet50v2')\n model = ResNet50v2(input_shape=input_size, weights='imagenet', classes=1, include_top=False, pooling='avg')\n model_inputs = model.inputs\n model_outsputs = model.output\n model_outsputs = Dense(128, activation='relu')(model_outsputs)\n model_outsputs = Dense(32, activation='relu')(model_outsputs)\n model_outsputs = Dense(1, activation=activation)(model_outsputs)\n model = Model(model_inputs, model_outsputs)\n model = multi_gpu_model(model, gpus=2)\n model.compile(loss=keras.losses.mean_squared_error,\n optimizer=keras.optimizers.Adam()\n )\n return model\n\ndef get_cpu_model_no_overfit(input_size=None, activation=None, initial_weights=None, is_corruption=False):\n model = Sequential()\n model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(8, 8, num_imfs)))\n model.add(Conv2D(4, (3, 3), activation='relu'))\n #model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(64, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(8, activation='relu'))\n #model.add(Dropout(0.5))\n model.add(Dense(1, activation=keras.activations.selu))\n if is_corruption:\n loss = keras.losses.binary_crossentropy\n else:\n loss = keras.losses.mean_squared_error\n model.compile(loss=loss,\n optimizer=keras.optimizers.Adam(1e-3)\n )\n if initial_weights:\n model.load_weights(initial_weights)\n return model\n\ndef __get_cpu_model(input_size=None, activation=None, initial_weights=None, is_corruption=False):\n model = Sequential()\n model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(8, 8, num_imfs), padding='same'))\n model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(4, (3, 3), activation='relu', padding='same'))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(8, activation='relu'))\n model.add(Dropout(0.25))\n model.add(Dense(1, activation=None)) # , bias_initializer=0.5\n if is_corruption:\n loss = keras.losses.binary_crossentropy\n else:\n loss = keras.losses.mean_squared_error\n model.compile(loss=loss,\n optimizer=keras.optimizers.SGD(1e-5)\n )\n if initial_weights:\n model.load_weights(initial_weights)\n return model\n\ndef get_cpu_model(input_size=None, activation=None, initial_weights=None, is_corruption=False):\n model = Sequential()\n #model.add(Conv2D(16, kernel_size=(6, 6), activation='relu', input_shape=(8, 8, num_imfs), padding='same'))\n #model.add(Conv2D(4, (4, 4), activation='relu', padding='same'))\n model.add(Flatten(input_shape=(8, 8, num_imfs)))\n model.add(Dense(512, activation='relu'))\n model.add(Dense(256, activation='relu'))\n model.add(Dense(128, activation='relu'))\n model.add(Dense(64, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(8, activation='relu'))\n model.add(Dense(1, activation=None))\n if is_corruption:\n loss = keras.losses.binary_crossentropy\n print('bce')\n else:\n loss = keras.losses.mean_squared_error\n print('mse')\n model.compile(loss=loss,\n optimizer=keras.optimizers.Adam(1e-4)\n )\n if initial_weights:\n model.load_weights(initial_weights)\n return model\n\n\nnum_gpus = len(K.tensorflow_backend._get_available_gpus())\nget_model = get_gpu_model if num_gpus else get_cpu_model\n\ndef get_model_checkpoint(experiment_dir):\n weights_path = experiment_dir.joinpath('weights', '{epoch:02d}.hdf5')\n if not weights_path.parents[0].exists():\n weights_path.parents[0].mkdir(parents=True)\n weights_path = str(weights_path)\n model_checkpoint = ModelCheckpoint(weights_path)\n return model_checkpoint\n\nif __name__ == '__main__':\n # todo ideal would be to define the train metric as tf metrics in order to avoid reiterating over the train data\n train_path = Path(__file__, '..', 'files', 'emd_dl_train_annotations.csv').resolve()\n val_path = Path(__file__, '..', 'files', 'emd_dl_val_annotations.csv').resolve()\n train_preprocessor = DataPreprocessor()\n #val_preprocessor = DataPreprocessor()\n train_annotations = pd.read_csv(train_path)\n val_annotations = pd.read_csv(val_path)\n train_annotations = train_annotations.fillna(0)\n val_annotations = val_annotations.fillna(0)\n\n\n date_id = datetime.now().strftime('%Y%m%d%H%M')\n #date_id ='202007192001'\n experiment_dir = Path(__file__, '..', 'files', 'deep_learning', date_id).resolve()\n initial_weights = None\n #initial_weights = experiment_dir.joinpath('weights', '82.hdf5').resolve()\n #initial_weights = initial_weights if num_gpus else None\n activation = None\n\n summary_path = experiment_dir.joinpath('summaries', 'summary.json')\n batch_size = 64 if num_gpus else 256\n input_size = (224, 224, 3) if num_gpus else [None]\n is_corruption = False\n\n train_emg_gen = EmgImageGenerator(train_annotations.copy(), batch_size, scaler=None, input_size=input_size[0], num_imfs=num_imfs)\n train_scaler = train_emg_gen.scaler\n callback_train_emg_gen = EmgImageGenerator(train_annotations.copy(), batch_size, scaler=train_scaler, input_size=input_size[0], num_imfs=num_imfs)\n callback_val_emg_gen = EmgImageGenerator(val_annotations, batch_size, scaler=train_scaler, input_size=input_size[0], num_imfs=num_imfs)\n\n\n model = get_model(activation=activation, initial_weights=initial_weights, input_size=input_size)\n if is_corruption:\n train_gen = train_emg_gen.corruption_train_generator()\n loss = numpy_bce\n p = ClassificationCallback(callback_train_emg_gen, callback_val_emg_gen, summary_path, loss)\n else:\n train_gen = train_emg_gen.train_generator()\n loss = numpy_mse\n p = PlotterCallback(callback_train_emg_gen, callback_val_emg_gen, summary_path, loss)\n model_checkpoint = get_model_checkpoint(experiment_dir)\n\n callbacks = [p, model_checkpoint]\n model.fit_generator(train_gen,\n steps_per_epoch=train_emg_gen.num_samples // train_emg_gen.batch_size,\n epochs=2000,\n callbacks=callbacks,\n verbose=1,\n initial_epoch=0)\n", "repo_name": "bfialkoff/thesis-dl-stuff", "sub_path": "train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 7459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "classification_models.Classifiers.get", "line_number": 25, "usage_type": "call"}, {"api_name": "classification_models.Classifiers", "line_number": 25, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.Model", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.utils.multi_gpu_utils.multi_gpu_model", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.losses", "line_number": 34, "usage_type": "attribute"}, {"api_name": "keras.optimizers.Adam", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 35, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.activations", "line_number": 50, "usage_type": "attribute"}, {"api_name": "keras.losses", "line_number": 52, "usage_type": "attribute"}, {"api_name": "keras.losses", "line_number": 54, "usage_type": "attribute"}, {"api_name": "keras.optimizers.Adam", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 56, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.losses", "line_number": 73, "usage_type": "attribute"}, {"api_name": "keras.losses", "line_number": 75, "usage_type": "attribute"}, {"api_name": "keras.optimizers.SGD", "line_number": 77, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 77, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.losses", "line_number": 97, "usage_type": "attribute"}, {"api_name": "keras.losses", "line_number": 100, "usage_type": "attribute"}, {"api_name": "keras.optimizers.Adam", "line_number": 103, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 103, "usage_type": "attribute"}, {"api_name": "keras.backend.tensorflow_backend._get_available_gpus", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.backend.tensorflow_backend", "line_number": 110, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 110, "usage_type": "name"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 118, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 123, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.preprocessing.data_preprocessor.DataPreprocessor", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 135, "usage_type": "call"}, {"api_name": "utils.generators.new_image_builder.EmgImageGenerator", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.generators.new_image_builder.EmgImageGenerator", "line_number": 148, "usage_type": "call"}, {"api_name": "utils.generators.new_image_builder.EmgImageGenerator", "line_number": 149, "usage_type": "call"}, {"api_name": "utils.callbacks.classification_callback.ClassificationCallback", "line_number": 156, "usage_type": "call"}, {"api_name": "utils.callbacks.plotter_callback.PlotterCallback", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "18337832612", "text": "\"\"\"\nResources module ot help load templates in\n\"\"\"\nimport io\nimport logging\nimport os\nimport shlex\nimport shutil\n\nimport jinja2\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef run(var_dir: str, config: dict, do_exec: bool, cmd: str = None):\n \"\"\"Execute Apache given the config.\"\"\"\n\n jinja_env = jinja2.Environment(\n loader=jinja2.PackageLoader(\"forevd\", \"apache\"), autoescape=jinja2.select_autoescape()\n )\n jinja_env.add_extension(\"jinja2.ext.do\")\n\n template = jinja_env.get_template(\"httpd.conf\")\n\n apache_config = template.render(**config)\n _LOGGER.debug(f\"apache_config: {apache_config}\")\n\n config_file = os.path.join(var_dir, \"httpd.conf\")\n _LOGGER.debug(f\"config_file: {config_file}\")\n\n try:\n os.makedirs(var_dir, mode=0o700)\n except FileExistsError:\n pass\n\n with io.open(config_file, \"w\", encoding=\"utf-8\") as fh:\n fh.write(apache_config)\n\n _LOGGER.debug(f\"cmd: {cmd!r}\")\n if not cmd:\n httpd = shutil.which(\"httpd\")\n _LOGGER.debug(f\"httpd: {httpd}\")\n\n cmd = [\n os.path.basename(httpd),\n \"-D\",\n \"FOREGROUND\",\n \"-f\",\n config_file,\n \"-d\",\n \"/opt/homebrew/Cellar/httpd/2.4.55/lib/httpd\",\n ]\n else:\n cmd = shlex.split(cmd)\n httpd = shutil.which(cmd[0])\n\n if not do_exec:\n _LOGGER.info(f\"Not executing command: {shlex.join(cmd)}\")\n return\n\n _LOGGER.info(f\"Executing: {httpd} {shlex.join(cmd)}\")\n\n os.execve(httpd, cmd, os.environ)\n", "repo_name": "firestoned/forevd", "sub_path": "forevd/apache/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1543, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 18, "usage_type": "call"}, {"api_name": "jinja2.PackageLoader", "line_number": 19, "usage_type": "call"}, {"api_name": "jinja2.select_autoescape", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 32, "usage_type": "call"}, {"api_name": "io.open", "line_number": 36, "usage_type": "call"}, {"api_name": "shutil.which", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "shlex.split", "line_number": 54, "usage_type": "call"}, {"api_name": "shutil.which", "line_number": 55, "usage_type": "call"}, {"api_name": "shlex.join", "line_number": 58, "usage_type": "call"}, {"api_name": "shlex.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.execve", "line_number": 63, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "18271725957", "text": "\"\"\"Exercise 12: Simple hierarchical model\n\nThe file pew research center june elect wknd data.dta3 has data from Pew\nResearch Center polls taken during the 2008 election campaign. You can read\nthese data into R using the read.dta() function (after first loading the\nforeign package into R).\n\nYour task is to estimate the percentage of the (adult) population in each state\n(excluding Alaska, Hawaii, and the District of Columbia) who label themselves\nas ‘very liberal,’ following the general procedure that was used in Section 2.7\nto estimate cancer rates, but using the binomial and beta rather than Poisson\nand gamma distributions.\n\nBut you do not need to make maps; it will be enough to make scatterplots,\nplotting the estimate vs. Barack Obama’s vote share in 2008 (data available at\n2008ElectionResult.csv, readable in R using read.csv()).\n\nMake the following four graphs on a single page:\n• Graph proportion very liberal among the survey respondents in each state vs.\n Obama vote share—that is, a scatterplot using the two-letter state\n abbreviations (see state.abb() in R).\n• Graph the Bayes posterior mean in each state vs. Obama vote share.\n• Repeat graphs (a) and (b) using the number of respondents in the state on the\n x-axis.\n\nThis exercise has four challenges: first, manipulating the data in order to get\nthe totals by state; second, estimating the parameters of the prior\ndistribution; third, doing the Bayesian analysis by state; and fourth, making\nthe graphs.\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom matplotlib import pyplot as plt\n\n\n# Alaska, Hawaii, and the District of Columbia\n\nDATA_DIR = \"../../data\"\nSTATES_TO_EXCLUDE = [\"alaska\", \"hawaii\", \"district of columbia\", \"washington dc\"]\n\n\ndef _exclude_states(df, state_col=\"state\"):\n df[state_col] = df[state_col].str.lower()\n df = df.loc[~df[state_col].isin(STATES_TO_EXCLUDE), :]\n return df\n\n\ndef load_poll_data():\n df = pd.read_stata(\n os.path.join(DATA_DIR, \"pew_research_center_june_elect_wknd_data.dta\")\n )\n df = _exclude_states(df)\n return df\n\n\ndef load_election_data():\n df = pd.read_csv(os.path.join(DATA_DIR, \"2008ElectionResult.csv\"))\n df = _exclude_states(df)\n return df\n\n\ndef posterior_mean_binom_beta(alpha, beta, y, n):\n \"\"\"Posterior mean of binomial model with beta distribution prior\"\"\"\n return (alpha + y) / (alpha + beta + n)\n\n\ndef ideo_by_state(df):\n ideo_count = df.groupby([\"state\", \"ideo\"]).size()\n state_count = ideo_count.groupby(\"state\").transform(\"sum\")\n ideo_prop = ideo_count / state_count\n\n return pd.concat(\n [\n ideo_count.rename(\"count\"),\n ideo_prop.rename(\"prop\"),\n state_count.rename(\"total\"),\n ],\n axis=1,\n )\n\n\ndef main():\n df_poll = load_poll_data()\n df_elec = load_election_data()\n\n ideo_prop = ideo_by_state(df_poll)\n\n very_liberal_prop = ideo_prop.loc[\n ideo_prop.index.get_level_values(\"ideo\") == \"very liberal\"\n ]\n\n # estimate the prior\n df_elec[\"vote_Obama_frac\"] = df_elec[\"vote_Obama_pct\"] / 100.0\n\n # plt.hist(very_liberal_prop[\"prop\"])\n\n lnspc = np.linspace(0, 0.25, 100)\n\n alpha, beta, loc, scale = stats.beta.fit(\n very_liberal_prop[\"prop\"], floc=0, fscale=1\n )\n pdf_beta = stats.beta.pdf(lnspc, alpha, beta, loc=0, scale=1)\n # plt.plot(lnspc, pdf_beta, label=\"Beta\")\n #\n # plt.show()\n\n def row_apply(row, alpha=alpha, beta=beta):\n return posterior_mean_binom_beta(\n alpha=alpha, beta=beta, y=row[\"count\"], n=row[\"total\"],\n )\n\n posterior_mean = very_liberal_prop.apply(row_apply, axis=1,)\n\n df = pd.merge(\n df_elec, posterior_mean.reset_index(), left_on=\"state\", right_on=\"state\"\n )\n\n from IPython import embed\n\n embed()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "CDonnerer/notes", "sub_path": "bda/exercises/chapter2/exercise_21.py", "file_name": "exercise_21.py", "file_ext": "py", "file_size_in_byte": 3858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_stata", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 100, "usage_type": "call"}, {"api_name": "scipy.stats.beta.fit", "line_number": 102, "usage_type": "call"}, {"api_name": "scipy.stats.beta", "line_number": 102, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 102, "usage_type": "name"}, {"api_name": "scipy.stats.beta.pdf", "line_number": 105, "usage_type": "call"}, {"api_name": "scipy.stats.beta", "line_number": 105, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 105, "usage_type": "name"}, {"api_name": "pandas.merge", "line_number": 117, "usage_type": "call"}, {"api_name": "IPython.embed", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "71850803253", "text": "import json\nimport time\nfrom kafka import KafkaProducer\nfrom random import *\n\n'''\nis an object attached by debezium\n'''\n\n\nclass Source:\n def __init__(self, table):\n self.version = '0.9.3.Final'\n self.connector = 'mysql'\n self.name = 'edutec'\n self.server_id = 0\n self.ts_sec = 0\n self.gtid = None\n self.file = 'mysql-bin.000006'\n self.pos = 154\n self.row = 0\n self.snapshot = True\n self.thread = None\n self.db = \"moodle\"\n self.table = table\n self.query = None\n\n\n'''\nthe wraper object constructed by debezium\ncaptures the former and the current state from binary log\n'''\n\n\nclass Cdc:\n def __init__(self, value, source, topicname):\n self.before = None\n self.after = value\n self.topic = topicname\n self.source = source\n self.op = 'c'\n self.ts_ms = time.time()\n\n\n'''\nthe Moodle User entity\n'''\n\n\nclass Mdl_User:\n def __init__(self, user_id, username, firstname, lastname, email):\n self.email = 'emailadress'\n self.name = 'firstname'\n self.id = user_id\n self.auth = \"manual\"\n self.confirmed = 1\n self.policyagreed = 0\n self.deleted = 0\n self.suspended = 0\n self.mnethostid = 1\n self.username = username\n self.password = \"$2y$10$dWTO17/bcvTMAqF7SSLRzuv9S59rc45X8lQADpeU.aNOCwQP2khHu\"\n self.idnumber = \"someidnumber\"\n self.firstname = firstname\n self.lastname = lastname\n self.email = email\n self.emailstop = 0\n self.icq = \"icq\"\n self.skype = \"skype\"\n self.yahoo = \"yahoo\"\n self.aim = \"aim\"\n self.msn = \"msn\"\n self.phone1 = \"01234\"\n self.phone2 = \"40123\"\n self.institution = \"an institution\"\n self.department = \"a department\"\n self.address = \"an adress\"\n self.city = \"a city\"\n self.country = \"de\"\n self.lang = \"en\"\n self.calendartype = \"gregorian\"\n self.theme = \"dark\"\n self.timezone = \"99\"\n self.firstaccess = 1560043501\n self.lastaccess = 1560048856\n self.lastlogin = 1560043501\n self.currentlogin = 1560043646\n self.lastip = \"172.18.0.1\"\n self.secret = \"asecret\"\n self.picture = 0\n self.url = \"\"\n self.description = \"some url\"\n self.descriptionformat = 1\n self.mailformat = 1\n self.maildigest = 0\n self.maildisplay = 1\n self.autosubscribe = 1\n self.trackforums = 0\n self.timecreated = 1560043574\n self.timemodified = 1560043574\n self.trustbitmask = 10\n self.imagealt = \"someimagealt\"\n self.lastnamephonetic = \"phonetics\"\n self.firstnamephonetic = \"phonetics\"\n self.middlename = \"not catholic\"\n self.alternatename = \"not in the net\"\n\n\n'''\nMoodle Forum User\n'''\n\n\nclass Mdl_ForumPost:\n def __init__(self, mdlForumPostID, discussionID, parentID, userID, t0):\n self.id = mdlForumPostID\n self.discussion = discussionID\n self.parent = parentID\n self.userid = userID\n self.created = t0\n self.modified = self.created\n self.mailed = 0\n self.subject = \"some subject\"\n self.message = \"some message \" + str(self.id)\n self.messageformat = 0\n self.messagetrust = 0\n self.attachment = \"nonetoattach\"\n self.totalscore = 0\n self.mailnow = 0\n self.deleted = 0\n\n\n'''\nMoodle Forum\n'''\n\n\nclass Mdl_Forum:\n\n def __init__(self, id):\n self.id = id\n self.course = id\n self.type = \"type\"\n self.name = \"name \" + str(id)\n self.intro = \"intro \" + str(id)\n self.introformat = 0\n self.assessed = False\n self.assesstimestart = 0\n self.assesstimefinish = 0\n self.scale = 0\n self.maxbytes = 0\n self.maxattachments = 0\n self.forcesubscribe = 0\n self.trackingtype = 0\n self.rsstype = 0\n self.rssarticles = 0\n self.timemodified = time.time()\n self.warnafter = 0\n self.blockafter = 0\n self.blockperiod = 0\n self.completiondiscussions = 0\n self.completionreplies = 0\n self.completionposts = 0\n self.displaywordcount = False\n self.lockdiscussionafter = False\n\n\n'''\n Moodle Quiz\n'''\nclass Mdl_Quiz:\n def __init__(self, course, name):\n\n self.course = course\n self.name = name\n self.intro = \"some intro of \" + name\n self.introformat = 1\n self.timeopen = 0\n self.timeclose = 0\n self.timelimit = 0\n self.overduehandling = \"autosubmit\"\n self.graceperiod = 0\n self.preferredbehaviour = \"deferredfeedback\"\n self.canredoquestions = 0\n self.attempts = 0\n self.attemptonlast = 0\n self.grademethod = 1\n self.decimalpoints = 2\n self.questiondecimalpoints = -1\n self.reviewattempt = 69888\n self.reviewcorrectness = 4352\n self.reviewmarks = 4352\n self.reviewspecificfeedback = 4352\n self.reviewgeneralfeedback = 4352\n self.reviewrightanswer = 4352\n self.reviewoverallfeedback = 4352\n self.questionsperpage = 1\n self.navmethod = \"free\"\n self.shuffleanswers = 1\n self.sumgrades = 0.00000\n self.grade = 10.00000\n self.timecreated = time.time()\n self.timemodified = time.time()\n self.password = \"\"\n self.subnet = \"\"\n self.browsersecurity = \"-\"\n self.delay1 = 0\n self.delay2 = 0\n self.showuserpicture = 0\n self.showblocks = 0\n self.completionattemptsexhausted = 0\n self.completionpass = 0\n self.allowofflineattempts = 0\n\n\nclass Mdl_Quiz_Attempt:\n def __init__(self, quizid, userid):\n self.quiz = quizid\n self.userid = userid\n self.attempt = 1\n self.uniqueid = quizid * userid * 100000\n self.layout = \"smol\"\n self.currentpage = 1\n self.preview = 1\n self.state = \"started\"\n self.timestart = time.time()\n self.timefinish = time.time() + 10000\n self.timemodified = time.time()\n self.timemodifiedoffline = time.time()\n self.timecheckstate = 1\n self.sumgrades = 1\n\ndef connect():\n producer = None\n try:\n producer = KafkaProducer(bootstrap_servers='localhost:9092', client_id='python-leap-motion-mock-data-producer')\n print('Successful connection wit Kafka ...')\n except Exception as ex:\n print('Failure:')\n print(str(ex))\n finally:\n return producer\n\n\ndef send(producer, message):\n try:\n m = json.dumps(message, default=lambda o: o.__dict__).encode('utf-8')\n producer.send(message.topic, value=m)\n producer.flush()\n except Exception as ex:\n print('Failure:')\n print(str(ex))\n\ndef sendJson(producer, message, topic, key):\n try:\n producer.send(topic = topic, value=json.dumps(message).encode('utf-8'), key = bytes(key, 'utf-8'))\n producer.flush()\n except Exception as ex:\n print('Failure:')\n print(str(ex))\n\n\n\n\ndef constructKafkaObject(value, source, topic):\n source = Source(source)\n cdc = Cdc(value, source, topic)\n return cdc\n", "repo_name": "toschio/bachelor-thesis", "sub_path": "software/scripts/testdata-generators/in-stream/moodle_mock_data_base.py", "file_name": "moodle_mock_data_base.py", "file_ext": "py", "file_size_in_byte": 7291, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 157, "usage_type": "call"}, {"api_name": "time.time", "line_number": 202, "usage_type": "call"}, {"api_name": "time.time", "line_number": 203, "usage_type": "call"}, {"api_name": "time.time", "line_number": 226, "usage_type": "call"}, {"api_name": "time.time", "line_number": 227, "usage_type": "call"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}, {"api_name": "time.time", "line_number": 229, "usage_type": "call"}, {"api_name": "kafka.KafkaProducer", "line_number": 236, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 247, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "40180473818", "text": "#! /usr/bin/env python\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport pickle\nfrom sklearn import datasets\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nimport tensorflow.compat.v1 as tf\nfrom model import Model\nimport pdb\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Regular training and robust training of the pdf malware classification model.')\n parser.add_argument('--method', type=str, default=\"A\", help=\"d1, i1, d2, i41, mono\")\n parser.add_argument('--model', type=str, default=\"baseline\", help=\"baseline\")\n parser.add_argument('--iters', type=int, default=\"4000\", help=\"Number of iterations for gradient attacks\")\n parser.add_argument('--gpu', type=str, default=\"0\")\n return parser.parse_args()\n\ndef eval(x, y, sess, model):\n y_p = sess.run(model.y_pred,\\\n feed_dict={model.x_input:x,\\\n model.y_input:y\n })\n\n try:\n tn, fp, fn, tp = confusion_matrix(y, y_p).ravel()\n #print tn, fp, fn, tp\n acc = (tp+tn)/float(tp+tn+fp+fn)\n if((tn+fp)!=0):\n fpr = fp/float(fp+tn)\n return acc, fpr\n else:\n return acc\n except ValueError:\n return accuracy_score(y, y_p), None\n\n\ndef find_model_path(args):\n model_path = \"../models/adv_trained/\"\n if args.model == \"baseline\":\n PATH = model_path + \"baseline_checkpoint.ckpt\"\n elif args.model == \"TA\":\n PATH = model_path + \"baseline_adv_delete_one.ckpt\"\n elif args.model == \"TB\":\n PATH = model_path + \"baseline_adv_insert_one.ckpt\"\n elif args.model == \"TC\":\n PATH = model_path + \"baseline_adv_delete_two.ckpt\"\n elif args.model == \"TD\":\n PATH = model_path + \"baseline_adv_insert_rootallbutone.ckpt\"\n elif args.model == \"ATAB\":\n PATH = model_path + \"baseline_adv_combine_two.ckpt\"\n elif args.model == \"EAB\":\n PATH = model_path + \"adv_del_twocls.ckpt\"\n elif args.model == \"ED\":\n PATH = model_path + \"adv_keep_twocls.ckpt\"\n elif args.model == \"RA\":\n PATH = model_path + \"robust_delete_one.ckpt\"\n elif args.model == \"RB\":\n PATH = model_path + \"robust_insert_one.ckpt\"\n elif args.model == \"RC\":\n PATH = model_path + \"robust_delete_two.ckpt\"\n elif args.model == \"RD\":\n PATH = model_path + \"robust_insert_allbutone.ckpt\"\n elif args.model == \"RAB\":\n PATH = model_path + \"robust_combine_two_v2_e18.ckpt\"\n elif args.model == \"RABE\":\n PATH = model_path + \"robust_combine_three_e17.ckpt\"\n elif args.model == \"mono\":\n PATH = model_path + \"robust_monotonic.ckpt\"\n else:\n print(\"no such model!\")\n exit()\n return PATH\n\n\ndef cal_grad(sess, model, benign_gradient, attack_loss, x, y):\n g, l, acc = sess.run([benign_gradient, model.y_pred, model.accuracy], feed_dict={model.x_input:x, model.y_input:y})\n return g, l, acc\n\n\ndef find_index(sess, model, benign_gradient, attack_loss, inds_tensor, index_tensor, x, y, inds):\n g, l, acc = sess.run([benign_gradient, model.pre_softmax, model.accuracy], feed_dict={model.x_input:x, model.y_input:y, inds_tensor:inds})\n return g, l, acc\n\n\ndef unrestricted_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n\n adv_acc = 0\n is_adv = np.zeros(x.shape[0]).astype(bool)\n new_is_adv = np.zeros(x.shape[0]).astype(bool)\n adv_accs = []\n\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n stuck_acc = -1\n\n inds = list(range(x.shape[1]))\n x_adv = np.copy(x)\n y_adv = np.copy(y)\n steps = 0\n distances = np.ones(x.shape[0])*(-1.)\n samples = np.copy(x)\n\n #while np.sum(x_adv[:, inds]==0.):\n for steps in range(200000):\n #start_time = time.time()\n x_adv = x_adv[new_is_adv==0]\n y_adv = y_adv[new_is_adv==0]\n\n g_adv, l_adv, acc_adv = cal_grad(sess, model, benign_gradient, attack_loss, x_adv, y_adv)\n new_is_adv = np.not_equal(l_adv, y_adv)\n\n distance = np.sum(np.absolute(x_adv-x[is_adv==0]), axis=1)\n #print(is_adv.shape, distances.shape, distance.shape)\n distances[is_adv==0] = distance\n samples[is_adv==0] = x_adv\n\n is_adv[is_adv==0] = np.logical_or(is_adv[is_adv==0], new_is_adv)\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n\n print(\"steps:\", steps, \"adv_acc:\", adv_acc, \"distance:\", np.mean(distances))\n adv_accs.append([steps, adv_acc])\n\n if adv_acc == 0.:\n print(\"acc break\")\n break\n\n for i in range(g_adv.shape[0]):\n\n zero_inds = list(np.nonzero(1.-x_adv[i])[0])\n one_inds = list(np.nonzero(x_adv[i])[0])\n r = np.random.rand()\n if (r>0.5) and one_inds:\n zero_inds = []\n\n if zero_inds and one_inds:\n # nature greedy\n\n index_zero = np.argmax(g_adv[i, zero_inds])\n index_one = np.argmax(-g_adv[i, one_inds])\n\n if (g_adv[i, zero_inds][index_zero]) >= (-g_adv[i, one_inds][index_one]):\n assert x_adv[i][zero_inds[index_zero]] == 0., \"insert wrong\"\n x_adv[i][zero_inds[index_zero]] = 1\n else:\n assert x_adv[i][one_inds[index_one]] == 1., \"del wrong\"\n x_adv[i][one_inds[index_one]] = 0\n\n\n elif zero_inds:\n index_zero = np.argmax(g_adv[i, zero_inds])\n assert x_adv[i][zero_inds[index_zero]] == 0., \"insert wrong\"\n x_adv[i][zero_inds[index_zero]] = 1\n\n elif one_inds:\n index_one = np.argmax(-g_adv[i, one_inds])\n assert x_adv[i][one_inds[index_one]] == 1., \"del wrong\"\n x_adv[i][one_inds[index_one]] = 0\n else:\n break\n\n #print(time.time()-start_time)\n g_adv, l_adv, sample_acc = cal_grad(sess, model, benign_gradient, attack_loss, samples, y)\n print(\"samples acc\", sample_acc)\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n #assert sample_acc == adv_acc, \"wrong adversarial samples \"+str(sample_acc)+\"/\"+str(adv_acc)\n return adv_accs, distances, samples, is_adv\n\n\ndef unrestricted_delete_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n x_adv = np.copy(x)\n for t in range(args.iters):\n g, l, acc = cal_grad(sess, model, benign_gradient, attack_loss, x_adv, y)\n index = np.argmax(x_adv*(-g), axis=-1)\n for i in range(index.shape[0]):\n x_adv[i][index[i]] = 0\n adv_acc = eval(x_adv, y, sess, model)\n print (\"iters:\", t, \"adv_acc\", adv_acc)\n return adv_acc\n\n\ndef unrestricted_insert_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n\n adv_acc = 0\n is_adv = np.zeros(x.shape[0]).astype(bool)\n new_is_adv = np.zeros(x.shape[0]).astype(bool)\n adv_accs = []\n\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n stuck_acc = -1\n\n inds = list(range(x.shape[1]))\n x_adv = np.copy(x)\n y_adv = np.copy(y)\n steps = 0\n distances = np.ones(x.shape[0])*(-1.)\n samples = np.copy(x)\n\n #while np.sum(x_adv[:, inds]==0.):\n for steps in range(x.shape[1]):\n #start_time = time.time()\n x_adv = x_adv[new_is_adv==0]\n y_adv = y_adv[new_is_adv==0]\n\n if np.sum(x_adv[:, inds]==0.)==0.:\n break\n\n g_adv, l_adv, acc_adv = cal_grad(sess, model, benign_gradient, attack_loss, x_adv, y_adv)\n new_is_adv = np.not_equal(l_adv, y_adv)\n\n distance = np.sum(np.absolute(x_adv-x[is_adv==0]), axis=1)\n distances[is_adv==0] = distance\n samples[is_adv==0] = x_adv\n\n is_adv[is_adv==0] = np.logical_or(is_adv[is_adv==0], new_is_adv)\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n\n print(\"steps:\", steps, \"adv_acc:\", adv_acc, \"distance:\", np.mean(distances))\n adv_accs.append([steps, adv_acc])\n\n if adv_acc == 0.:\n print(\"acc break\")\n break\n\n for i in range(g_adv.shape[0]):\n zero_inds = list(np.nonzero(1.-x_adv[i])[0])\n if not zero_inds:\n break\n\n index = np.argmax(g_adv[i, zero_inds])\n\n assert x_adv[i][zero_inds[index]] == 0., \"insert wrong\"\n x_adv[i][zero_inds[index]] = 1\n #print(time.time()-start_time)\n\n g_adv, l_adv, sample_acc = cal_grad(sess, model, benign_gradient, attack_loss, samples, y)\n print(\"samples acc\", sample_acc)\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n #assert sample_acc == adv_acc, \"wrong adversarial samples \"+str(sample_acc)+\"/\"+str(adv_acc)\n return adv_accs, distances, samples, is_adv\n\n\ndef delete1_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n adv_acc = 0\n break_samples = []\n for i in range(x.shape[0]):\n xi_adv = np.copy(x[i:i+1])\n yi = y[i:i+1]\n is_adv = False\n #print(\"sample\", i)\n for key, value in feat_trie._root.children.iteritems():\n xi_adv = np.copy(x[i:i+1])\n min_idx, max_idx = feat_trie[key]\n #print(i, key, min_idx, max_idx)\n inds = []\n for ind in range(min_idx-1, max_idx):\n if xi_adv[0,ind]==1.:\n inds.append(ind)\n #if inds: print(key, inds)\n #for t in range(args.iters):\n while inds:\n gi, li, acci = cal_grad(sess, model, benign_gradient, attack_loss, xi_adv, yi)\n if acci == 0.:\n is_adv = True\n break\n #x_adv[:, inds]*(-g[:, inds])\n index = np.argmax(-gi[0][inds])\n xi_adv[0,inds[index]] = 0.\n inds.pop(index)\n\n if is_adv:\n #print(\"break sample\", i, \"distance:\", np.sum(x[i:i+1] - xi_adv))\n adv_acc += 1\n break_samples.append(i)\n break\n\n adv_acc = 1.-float(adv_acc)/float(x.shape[0])\n #print(\"adv_acc: %.2f\" % (adv_acc*100.))\n #print(break_samples)\n\n return adv_acc\n\n\n\ndef delete2_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n adv_acc = 0\n keys = []\n break_samples = []\n\n for key, value in feat_trie._root.children.iteritems():\n keys.append(key)\n\n for i in range(x.shape[0]):\n xi_adv = np.copy(x[i:i+1])\n yi = y[i:i+1]\n is_adv = False\n #print(\"sample\", i)\n for k1 in range(len(keys)-1):\n key1 = keys[k1]\n for k2 in range(k1, len(keys)):\n xi_adv = np.copy(x[i:i+1])\n key2 = keys[k2]\n min_idx1, max_idx1 = feat_trie[key1]\n min_idx2, max_idx2 = feat_trie[key2]\n inds = []\n for ind in range(min_idx1-1, max_idx1):\n if xi_adv[0,ind]==1.:\n #print(ind, xi_adv[0,ind])\n inds.append(ind)\n\n for ind in range(min_idx2-1, max_idx2):\n if (xi_adv[0,ind]==1.) and (ind not in inds):\n #print(ind, xi_adv[0,ind])\n inds.append(ind)\n\n while inds:\n gi, li, acci = cal_grad(sess, model, benign_gradient, attack_loss, xi_adv, yi)\n if acci == 0.:\n is_adv = True\n break\n index = np.argmax(-gi[0][inds])\n xi_adv[0,inds[index]] = 0.\n\n #print(inds[index], gi[0][inds])\n #exit()\n inds.pop(index)\n\n if is_adv:\n break\n\n if is_adv:\n adv_acc += 1\n #print(\"break sample\", i, \"distance:\", np.sum(x[i:i+1] - xi_adv))\n break_samples.append(i)\n break\n\n adv_acc = 1.-float(adv_acc)/float(x.shape[0])\n #print(break_samples)\n\n return adv_acc\n\n\ndef insert1_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n adv_acc = 0\n for i in range(x.shape[0]):\n xi_adv = np.copy(x[i:i+1])\n yi = y[i:i+1]\n is_adv = False\n #print(\"sample\", i)\n for key, value in feat_trie._root.children.iteritems():\n xi_adv = np.copy(x[i:i+1])\n min_idx, max_idx = feat_trie[key]\n #print(i, key, min_idx, max_idx)\n inds = []\n for ind in range(min_idx-1, max_idx):\n if xi_adv[0,ind]==0.:\n inds.append(ind)\n\n #for t in range(args.iters):\n while inds:\n gi, li, acci = cal_grad(sess, model, benign_gradient, attack_loss, xi_adv, yi)\n #print(\"acci\", acci)\n if acci == 0.:\n #print(\"evade\", i)\n is_adv = True\n break\n index = np.argmax(gi[0][inds])\n xi_adv[0,inds[index]] = 1.\n inds.pop(index)\n\n if is_adv:\n adv_acc += 1\n #print(\"break sample\", i, \"distance:\", np.sum(xi_adv - x[i:i+1]))\n break\n adv_acc = 1.-float(adv_acc)/float(x.shape[0])\n #print(\"adv_acc: %.2f\" % (adv_acc*100.))\n\n return adv_acc\n\n\ndef insert41_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n adv_acc = 0\n is_adv = np.zeros(x.shape[0]).astype(bool)\n new_is_adv = np.zeros(x.shape[0]).astype(bool)\n keys = []\n values = []\n for key, value in feat_trie._root.children.iteritems():\n min_idx, max_idx = feat_trie[key]\n keys.append(key)\n values.append(max_idx-min_idx)\n\n keys = np.array(keys)[np.argsort(np.array(values))]\n\n\n for key in keys:\n print(key)\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n if adv_acc==0.: break\n stuck_acc = -1\n min_idx, max_idx = feat_trie[key]\n inds = list(range(0, min_idx-1))+list(range(max_idx, x.shape[1]))\n x_adv = np.copy(x[is_adv==0])\n y_adv = np.copy(y[is_adv==0])\n steps = 0\n total_zero = np.sum(x_adv[:,inds]==0.)\n\n while total_zero>0.:\n total_zero = 0\n total_cut = 0\n #start_time = time.time()\n #if steps >= 100:\n #break\n x_adv = np.copy(x_adv[new_is_adv==0])\n y_adv = np.copy(y_adv[new_is_adv==0])\n\n min_idx, max_idx = feat_trie[key]\n\n g_adv, l_adv, acc_adv = cal_grad(sess, model, benign_gradient, attack_loss, x_adv, y_adv)\n new_is_adv = np.not_equal(l_adv, y_adv)\n is_adv[is_adv==0] = np.logical_or(is_adv[is_adv==0], new_is_adv)\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n print(\"steps:\", steps, \"adv_acc:\", adv_acc)\n\n if adv_acc == stuck_acc:\n steps += 1\n else:\n stuck_acc = adv_acc\n steps = 0\n\n if adv_acc == 0.: break\n\n for i in range(g_adv.shape[0]):\n zero_inds = []\n for ind in inds:\n if x_adv[i, ind] == 0.:\n zero_inds.append(ind)\n if not zero_inds:\n break\n total_zero += len(zero_inds)\n\n index = np.argmax(g_adv[i, zero_inds])\n\n assert x_adv[i][zero_inds[index]] == 0., \"insert wrong\"\n x_adv[i][zero_inds[index]] = 1\n total_cut += 1\n #print(time.time()-start_time)\n\n #print(total_zero, total_cut, total_zero-total_cut, g_adv.shape[0])\n\n\n adv_acc = 1.-float(np.sum(is_adv))/float(x.shape[0])\n\n return adv_acc\n\n\n\ndef gradient_attack(sess, model, benign_gradient, attack_loss, x, y):\n g, l = sess.run([benign_gradient, model.pre_softmax], feed_dict={model.x_input:x, model.y_input:y})\n\n index = np.argmax((1.0-x)*g, axis=-1)\n\n for i in range(index.shape[0]):\n x[i][index[i]] = 1.0\n\n #g, l = sess.run([benign_gradient, model.pre_softmax], feed_dict={model.x_input:x, model.y_input:y})\n #print l[:10]\n\n return x\n\n\ndef attack(args, saver, sess, model, benign_gradient, attack_loss, x, y, feat_trie):\n\n PATH = find_model_path(args)\n saver.restore(sess, PATH)\n print (\"load model from:\", PATH)\n\n acc = eval(x, y, sess, model)\n print( \"clean acc\", acc)\n\n import time\n start_time = time.time()\n\n if args.method == \"un\":\n print(\"unstricted attack\")\n adv_accs, distances, samples, is_adv = unrestricted_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie)\n return adv_accs, distances, samples, is_adv\n elif args.method == \"uni\":\n print(\"unrestricted insert attack\")\n adv_accs, distances, samples, is_adv = unrestricted_insert_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie)\n return adv_accs, distances, samples, is_adv\n elif args.method == \"und\":\n print(\"unstricted attack\")\n adv_acc = unrestricted_delete_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie)\n elif args.method == \"A\":\n print(\"delete1 attack\")\n adv_acc = delete1_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie)\n elif args.method == \"B\":\n print(\"insert1 attack\")\n adv_acc = insert1_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie)\n elif args.method == \"C\":\n print(\"delete2 attack\")\n adv_acc = delete2_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie)\n elif args.method == \"D\":\n print(\"insert41 attack\")\n adv_acc = insert41_gradient_attack(sess, model, benign_gradient, attack_loss, x, y, feat_trie)\n else:\n print(\"no such attack method!\")\n exit()\n\n if args.method not in [\"un\", \"uni\"]:\n print(\"time: %.2f, adv_acc: %.2f\" % (time.time()-start_time, adv_acc*100.))\n\n return adv_acc\n\n\n\ndef main(args):\n\n # Initialize the model\n model = Model()\n\n seed_path = \"../train/robustness_spec/seed_test_malicious/seed_feature_3416.csv\"\n trie_path = \"../train/robustness_spec/feature_spec/pathtrie_filled.pickle\"\n\n feat_trie = pickle.load(open(trie_path, 'rb'))\n\n #x_input_test = pickle.load(f)\n x_input_test = np.genfromtxt(seed_path, delimiter=',')\n #x_input_test = np.array([item[0] for key, item in x_input_test.iteritems()])\n print(\"input shape:\", x_input_test.shape)\n y_input_test = np.ones(x_input_test.shape[0])\n\n #attack_loss = model.pre_softmax[:, 0]\n attack_loss = model.xent\n benign_gradient = tf.gradients(attack_loss, model.x_input)[0]\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n if args.model == \"all\":\n adv_accs = []\n distances = []\n samples = []\n all_is_adv = []\n if args.method in [\"A\", \"B\", \"C\", \"D\"]:\n model_names = [\"baseline\", \"TA\", \"TB\", \"TC\", \"TD\", \"ATAB\",\\\n \"EAB\", \"ED\", \"RA\", \"RB\", \"RC\", \"RD\", \"RAB\", \"RABE\", \"mono\"]\n if args.method == \"D\":\n model_names = [\"TB\", \"ATAB\", \"RD\", \"RAB\", \"RABE\"]\n for model_name in model_names:\n args.model = model_name\n adv_acc = attack(args, saver, sess, model, benign_gradient,\\\n attack_loss, x_input_test, y_input_test, feat_trie)\n adv_accs.append(adv_acc)\n np.save(args.method+\"_adv_accs.npy\", adv_accs)\n #np.save(args.method+\"_adv_accs.npy\", adv_accs)\n if args.method in [\"un\", \"uni\"]:\n model_names = [\"baseline\", \"TA\", \"TB\", \"TC\", \"TD\", \"ATAB\",\\\n \"EAB\", \"ED\", \"RA\", \"RB\", \"RC\", \"RD\", \"RAB\", \"RABE\", \"mono\"]\n if args.method == \"un\":\n model_names = [\"ATAB\", \"RAB\", \"RD\"]\n for model_name in model_names:\n args.model = model_name\n adv_acc, distance, sample, is_adv = attack(args, saver, sess, model, benign_gradient,\\\n attack_loss, x_input_test, y_input_test, feat_trie)\n adv_accs.append(adv_acc)\n distances.append(distance)\n samples.append(sample)\n all_is_adv.append(is_adv)\n np.save(args.method+\"_adv_accs_200K.npy\", np.array(adv_accs))\n np.save(args.method+\"_adv_distances_200K.npy\", np.array(distances))\n np.save(args.method+\"_adv_samples_200K.npy\", np.array(samples))\n np.save(args.method+\"_is_adv_200K.npy\", np.array(all_is_adv))\n\n else:\n adv_accs = attack(args, saver, sess, model, benign_gradient,\\\n attack_loss, x_input_test, y_input_test, feat_trie)\n\n np.save(args.method+\"_\"+args.model+\"_adv_accs.npy\", adv_accs)\n\n\n\nif __name__=='__main__':\n args = parse_args()\n #if(args.method not in [\"d2\", \"i\", \"b1\", \"c2\"]):\n #print (\"--method: d2, i, b1, c2\")\n #exit(1)\n if(args.iters<=0):\n print( \"--itrers: Number of iterations for gradient attacks should not be less than 0!\")\n exit(1)\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n main(args)\n", "repo_name": "surrealyz/pdfclassifier", "sub_path": "attack/gradient_attack.py", "file_name": "gradient_attack.py", "file_ext": "py", "file_size_in_byte": 21623, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "model.y_pred", "line_number": 24, "usage_type": "attribute"}, {"api_name": "model.x_input", "line_number": 25, "usage_type": "attribute"}, {"api_name": "model.y_input", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 39, "usage_type": "call"}, {"api_name": "model.y_pred", "line_number": 81, "usage_type": "attribute"}, {"api_name": "model.accuracy", "line_number": 81, "usage_type": "attribute"}, {"api_name": "model.x_input", "line_number": 81, "usage_type": "attribute"}, {"api_name": "model.y_input", "line_number": 81, "usage_type": "attribute"}, {"api_name": "model.pre_softmax", "line_number": 86, "usage_type": "attribute"}, {"api_name": "model.accuracy", "line_number": 86, "usage_type": "attribute"}, {"api_name": "model.x_input", "line_number": 86, "usage_type": "attribute"}, {"api_name": "model.y_input", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.not_equal", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 444, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 454, "usage_type": "call"}, {"api_name": "model.pre_softmax", "line_number": 461, "usage_type": "attribute"}, {"api_name": "model.x_input", "line_number": 461, "usage_type": "attribute"}, {"api_name": "model.y_input", "line_number": 461, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 463, "usage_type": "call"}, {"api_name": "time.time", "line_number": 484, "usage_type": "call"}, {"api_name": "time.time", "line_number": 514, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 523, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 534, "usage_type": "call"}, {"api_name": "model.xent", "line_number": 537, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.gradients", "line_number": 538, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 538, "usage_type": "name"}, {"api_name": "model.x_input", "line_number": 538, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.train.Saver", "line_number": 540, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train", "line_number": 540, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 540, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 541, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 541, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.global_variables_initializer", "line_number": 542, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 542, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.local_variables_initializer", "line_number": 543, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 543, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 560, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 575, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 575, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 576, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 576, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 578, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 578, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 584, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 597, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 598, "usage_type": "attribute"}]} +{"seq_id": "7476161375", "text": "import requests\nimport os \nimport zipfile\nimport shutil\n\ndef unzip(dir, zip_name):\n \"\"\"\n dir: Download directory\n zip_name: Zip file name\n \"\"\"\n zip_name = f'{dir}/{zip_name}'\n unzip_file = type('obj', (object,), {'file_size' : 0})\n with zipfile.ZipFile(zip_name, 'r') as zip:\n for zinfo in zip.filelist:\n if zinfo.file_size > unzip_file.file_size:\n unzip_file = zinfo\n zip.extract(unzip_file.filename, dir)\n \n file_name = os.path.join(dir, unzip_file.filename.split('/')[-1])\n print(file_name)\n if not os.path.exists(file_name):\n os.rename(f'{dir}/{unzip_file.filename}', file_name)\n extract_dir = os.path.join(dir, unzip_file.filename.split('/')[0])\n if os.path.isdir(extract_dir): \n shutil.rmtree(extract_dir)\n os.remove(zip_name)\n\ndef unzip_all(dir):\n \"\"\"\n dir: directory where are the zipped files\n\n \"\"\"\n print(\"------------------------------ Unziping files --------------------------------\")\n list = os.listdir(dir)\n for i in list:\n if i.split('.')[-1] == 'zip':\n unzip(dir, i)\n print(f'unziped {i}')\ndef clean_string(str):\n wierd_chars = ['/', '\\\\', ':', '?', '\"', '<','>','|', '=']\n s = ''\n for i in str:\n if i in wierd_chars:\n s = ''\n else:\n s += i\n return s\n\ndef download(url):\n dir = 'downloads'\n if not os.path.exists(dir):\n os.mkdir(dir)\n name = url.split('/')[-1]\n name = clean_string(name)\n file_name = f'{dir}/{name}'\n if not os.path.exists(file_name):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'}\n file = requests.get(url, headers= headers).content\n with open(file_name, 'wb') as f:\n f.write(file)\n print(f'Downloaded {name}')\n else:\n print(f'Skip {name}')\n\ndef chechk_downloaded(dir, urls):\n files = os.listdir(dir) \n not_downloaded = []\n for url in urls:\n b = True\n for i in files:\n if i in url:\n b = False\n break\n if b:\n not_downloaded.append(url) \n return not_downloaded\n\ndef read_urls(file):\n if not os.path.exists(file):\n print(f'No existe {file}')\n return \n with open(file, 'r') as f:\n urls = f.read().splitlines()\n return urls", "repo_name": "rogerramosruiz/hdri-downloader", "sub_path": "download.py", "file_name": "download.py", "file_ext": "py", "file_size_in_byte": 2395, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "zipfile.ZipFile", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 25, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 58, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}]} +{"seq_id": "28259927295", "text": "import folium\n\nfrom el_mundo.models import *\n\n\nm = folium.Map(location=[49.0, 32.0],\n zoom_start=5,\n max_zoom=6,\n min_zoom=3,\n zoom_control=True,\n tiles='cartodb positron'\n )\n\n\ncountries = Countries.objects.all()\nfor i in range(len(countries) - 1):\n\n countries[i].population = '{:,}'.format(countries[i].population)\n languages_by_country = Languages.objects.filter(languagesbycountries__country=countries[i])\n\n lang_str = f\"
    \"\n for lang in languages_by_country:\n lang_str += f\"
  • {lang.name}
  • \"\n lang_str += \"
\"\n\n popup_html = f\"{countries[i].name}

\"\\\n f\"Population: {countries[i].population}

\"\\\n f\"Language:
\"\\\n f\"{lang_str}\"\n\n marker = folium.CircleMarker(location=(countries[i].lat, countries[i].long),\n color='#696969',\n fill_color='#006400',\n radius=5,\n fill_opacity=0.8,\n popup=folium.Popup(popup_html),\n parse_html=True).add_to(m)\n\n\nm.save('templates/map.html')\n\n\n\n\n\n\n\n", "repo_name": "Igorjano/Languages-of-the-World", "sub_path": "el_mundo/create_map.py", "file_name": "create_map.py", "file_ext": "py", "file_size_in_byte": 1348, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "folium.Map", "line_number": 6, "usage_type": "call"}, {"api_name": "folium.CircleMarker", "line_number": 31, "usage_type": "call"}, {"api_name": "folium.Popup", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "9940518708", "text": "import os\nfrom googleapiclient.discovery import build\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport gspread\n\nclass googlesheet:\n\n def cred(self):\n scope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\", \"https://www.googleapis.com/auth/drive\"]\n creds = ServiceAccountCredentials.from_json_keyfile_name(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"csvfile/creds.json\"), scope)\n return creds\n\n def writegooglesheet(self,reader_values):\n creds=self.cred()\n service = build('sheets', 'v4', credentials=creds)\n spreadsheet = {\n 'properties': {\n 'title': 'Bank'\n }\n }\n spreadsheet = service.spreadsheets().create(body=spreadsheet,fields='spreadsheetId').execute()\n spreadsheetid=spreadsheet.get('spreadsheetId')\n Response_date = service.spreadsheets().values().update(\n spreadsheetId=spreadsheetid,\n valueInputOption='RAW',\n range='A1:'+'Z'+str(len(reader_values)),\n body=dict(\n majorDimension='ROWS',\n values=reader_values)).execute()\n\n def readgooglesheet(self):\n creds=self.cred()\n client=gspread.authorize(creds)\n sheet= client.open(\"Bank\").sheet1\n #print(sheet.get_all_records())\n return (sheet.get_all_records())\n", "repo_name": "h96Coder/Bank_API", "sub_path": "Bank_App/googlesheet.py", "file_name": "googlesheet.py", "file_ext": "py", "file_size_in_byte": 1525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 11, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.build", "line_number": 16, "usage_type": "call"}, {"api_name": "gspread.authorize", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "37346026198", "text": "#!/usr/bin/env python\n\nimport os\nimport json\nimport logging\nimport argparse\nimport torch\nimport numpy as np\nimport random\n\nfrom util.corpus import Corpus\nimport paddle.nn as nn\nimport paddle\nfrom paddlenlp.transformers import ErnieTokenizer, LinearDecayWithWarmup, ErnieGramTokenizer, BertTokenizer\nfrom ernie_gen import ErnieForGeneration, ErnieModel\nfrom engine import Trainer\nfrom model import CRS\n\n\ndef model_config():\n \"\"\"\n model_config\n \"\"\"\n parser = argparse.ArgumentParser()\n\n # Data\n data_arg = parser.add_argument_group(\"Data\")\n data_arg.add_argument(\"--data_dir\", type=str, default=\"./data/DuRecDial\")\n data_arg.add_argument(\"--data_prefix\", type=str, default=\"demo.DuRecDial\")\n data_arg.add_argument(\"--save_dir\", type=str, default=\"./outputs/\")\n data_arg.add_argument(\"--max_src_len\", type=int, default=150)\n data_arg.add_argument(\"--max_tgt_len\", type=int, default=100)\n data_arg.add_argument(\"--max_cue0_len\", type=int, default=30)\n data_arg.add_argument(\"--max_cue1_len\", type=int, default=80)\n data_arg.add_argument(\"--cover_ratio\", type=float, default=0.8)\n\n\n # Network\n net_arg = parser.add_argument_group(\"Model\")\n net_arg.add_argument(\"--model_name_or_path\", type=str, default=\"macbert-base-chinese\")\n\n # Training / Testing\n train_arg = parser.add_argument_group(\"Training\")\n train_arg.add_argument(\"--optimizer\", type=str, default=\"Adam\")\n train_arg.add_argument(\"--lr\", type=float, default=1e-4)\n train_arg.add_argument(\"--weight_decay\", type=float, default=0.01)\n train_arg.add_argument(\"--adam_epsilon\", type=float, default=1e-8)\n train_arg.add_argument(\"--warmup_proportion\", type=float, default=0.1)\n train_arg.add_argument(\"--num_epochs\", type=int, default=10)\n\n\n # MISC\n misc_arg = parser.add_argument_group(\"Misc\")\n misc_arg.add_argument(\"--seed\", type=int, default=2011)\n misc_arg.add_argument(\"--log_steps\", type=int, default=4000)\n misc_arg.add_argument(\"--valid_steps\", type=int, default=24000)\n misc_arg.add_argument(\"--batch_size\", type=int, default=2)\n misc_arg.add_argument(\"--accumulate_batchs_num\", type=int, default=4)\n misc_arg.add_argument(\"--test\", action=\"store_true\")\n\n config = parser.parse_args()\n\n return config\n\n\ndef main():\n \"\"\"\n main\n \"\"\"\n config = model_config()\n paddle.set_device('gpu')\n\n paddle.seed(config.seed)\n np.random.seed(config.seed)\n random.seed(config.seed)\n\n # gen\n tokenizer = BertTokenizer.from_pretrained(config.model_name_or_path, do_lower_case=True)\n\n # Data definition\n corpus = Corpus(config=config, tokenizer=tokenizer)\n\n train_iter = corpus.create_batches(config.batch_size, \"train\")\n valid_iter = corpus.create_batches(config.batch_size, \"valid\")\n test_iter = corpus.create_batches(config.batch_size, \"test\")\n\n gen_model = ErnieForGeneration.from_pretrained(config.model_name_or_path)\n model = CRS(gen_model)\n\n max_steps = len(train_iter) * config.num_epochs / config.accumulate_batchs_num\n\n lr_scheduler = LinearDecayWithWarmup(config.lr, max_steps,\n config.warmup_proportion)\n \n # Generate parameter names needed to perform weight decay.\n # All bias and LayerNorm parameters are excluded.\n decay_params = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n epsilon=config.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=config.weight_decay,\n grad_clip=nn.ClipGradByGlobalNorm(1.0),\n apply_decay_param_fun=lambda x: x in decay_params)\n\n # Save directory\n if not os.path.exists(config.save_dir):\n os.makedirs(config.save_dir)\n\n # Logger definition\n logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.DEBUG, format=\"%(message)s\")\n if config.test == 0:\n fh = logging.FileHandler(os.path.join(config.save_dir, \"train.log\"))\n else:\n fh = logging.FileHandler(os.path.join(config.save_dir, \"test.log\"))\n logger.addHandler(fh)\n\n logger.info(config)\n logger.info(model)\n\n trainer = Trainer(model=model, tokenizer=tokenizer, optimizer=optimizer, train_iter=train_iter,\n valid_iter=valid_iter, logger=logger, config=config, lr_scheduler=lr_scheduler)\n\n\n if config.test:\n logger.info(\"\")\n trainer.load(os.path.join(config.save_dir, \"best\"))\n logger.info(\"Testing starts ...\")\n metrics = trainer.evaluate(test_iter)\n logger.info(metrics.report_val())\n logger.info(\"Generation starts ...\")\n test_gen_file = os.path.join(config.save_dir, \"test.result\")\n metrics = trainer.evaluate_generation(test_iter, save_file=test_gen_file)\n logger.info(metrics.report_val())\n else:\n trainer.train()\n logger.info(\"Training done!\")\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(\"\\nExited from the program ealier!\")", "repo_name": "apprivoiser/GoCVAE", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5106, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "paddle.set_device", "line_number": 71, "usage_type": "call"}, {"api_name": "paddle.seed", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 74, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 75, "usage_type": "call"}, {"api_name": "paddlenlp.transformers.BertTokenizer.from_pretrained", "line_number": 78, "usage_type": "call"}, {"api_name": "paddlenlp.transformers.BertTokenizer", "line_number": 78, "usage_type": "name"}, {"api_name": "util.corpus.Corpus", "line_number": 81, "usage_type": "call"}, {"api_name": "ernie_gen.ErnieForGeneration.from_pretrained", "line_number": 87, "usage_type": "call"}, {"api_name": "ernie_gen.ErnieForGeneration", "line_number": 87, "usage_type": "name"}, {"api_name": "model.CRS", "line_number": 88, "usage_type": "call"}, {"api_name": "paddlenlp.transformers.LinearDecayWithWarmup", "line_number": 92, "usage_type": "call"}, {"api_name": "model.named_parameters", "line_number": 98, "usage_type": "call"}, {"api_name": "paddle.optimizer.AdamW", "line_number": 101, "usage_type": "call"}, {"api_name": "paddle.optimizer", "line_number": 101, "usage_type": "attribute"}, {"api_name": "model.parameters", "line_number": 104, "usage_type": "call"}, {"api_name": "paddle.nn.ClipGradByGlobalNorm", "line_number": 106, "usage_type": "call"}, {"api_name": "paddle.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 115, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 115, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "engine.Trainer", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}]} +{"seq_id": "86752156176", "text": "from __future__ import absolute_import\nimport etcd\nimport logging\nimport os\nimport random\nimport requests\nimport socket\nimport time\n\nfrom dns.exception import DNSException\nfrom dns import resolver\nfrom patroni.dcs import AbstractDCS, Cluster, Failover, Leader, Member\nfrom patroni.exceptions import DCSError\nfrom patroni.utils import Retry, RetryFailedError, sleep\nfrom urllib3.exceptions import HTTPError, ReadTimeoutError\nfrom requests.exceptions import RequestException\nfrom six.moves.http_client import HTTPException\n\nlogger = logging.getLogger(__name__)\n\n\nclass EtcdError(DCSError):\n pass\n\n\nclass Client(etcd.Client):\n\n def __init__(self, config):\n super(Client, self).__init__(read_timeout=5)\n self._config = config\n if 'protocol' in self._config:\n self._protocol = self._config['protocol']\n\n self._load_machines_cache()\n self._allow_reconnect = True\n\n @property\n def machines(self):\n \"\"\"Original `machines` method(property) of `etcd.Client` class raise exception\n when it failed to get list of etcd cluster members. This method is being called\n only when request failed on one of the etcd members during `api_execute` call.\n For us it's more important to execute original request rather then get new\n topology of etcd cluster. So we will catch this exception and return valid list\n of machines with setting flag `self._update_machines_cache` to `!True`.\n Later, during next `api_execute` call we will forcefully update machines_cache\"\"\"\n try:\n ret = super(Client, self).machines\n random.shuffle(ret)\n return ret\n except etcd.EtcdException:\n if self._update_machines_cache: # We are updating machines_cache\n raise # This exception is fatal, we should re-raise it.\n self._update_machines_cache = True\n return [self._base_uri]\n\n def _do_http_request(self, request_executor, method, url, fields=None, **kwargs):\n try:\n response = request_executor(method, url, fields=fields, **kwargs)\n response.data.decode('utf-8')\n self._check_cluster_id(response)\n except (HTTPError, HTTPException, socket.error, socket.timeout) as e:\n if (isinstance(fields, dict) and fields.get(\"wait\") == \"true\" and\n isinstance(e, ReadTimeoutError)):\n logger.debug(\"Watch timed out.\")\n raise etcd.EtcdWatchTimedOut(\"Watch timed out: {0}\".format(e), cause=e)\n logger.error(\"Request to server %s failed: %r\", self._base_uri, e)\n logger.info(\"Reconnection allowed, looking for another server.\")\n self._base_uri = self._next_server(cause=e)\n response = False\n return response\n\n def api_execute(self, path, method, params=None, timeout=None):\n if not path.startswith('/'):\n raise ValueError('Path does not start with /')\n\n if timeout is None:\n timeout = self.read_timeout\n\n if timeout == 0:\n timeout = None\n\n kwargs = {'timeout': timeout, 'fields': params, 'redirect': self.allow_redirect,\n 'headers': self._get_headers(), 'preload_content': False}\n\n if method in [self._MGET, self._MDELETE]:\n request_executor = self.http.request\n elif method in [self._MPUT, self._MPOST]:\n request_executor = self.http.request_encode_body\n kwargs['encode_multipart'] = False\n else:\n raise etcd.EtcdException('HTTP method {0} not supported'.format(method))\n\n # Update machines_cache if previous attempt of update has failed\n if self._update_machines_cache:\n self._load_machines_cache()\n\n response = False\n\n try:\n while not response:\n response = self._do_http_request(request_executor, method, self._base_uri + path, **kwargs)\n\n if response is False and not self._use_proxies:\n self._machines_cache = self.machines\n self._machines_cache.remove(self._base_uri)\n return self._handle_server_response(response)\n except etcd.EtcdConnectionFailed:\n self._update_machines_cache = True\n raise\n\n @staticmethod\n def get_srv_record(host):\n try:\n return [(str(r.target).rstrip('.'), r.port) for r in resolver.query('_etcd-server._tcp.' + host, 'SRV')]\n except DNSException:\n logger.exception('Can not resolve SRV for %s', host)\n return []\n\n def _get_machines_cache_from_srv(self, discovery_srv):\n \"\"\"Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record.\n This record should contain list of host and peer ports which could be used to run\n 'GET http://{host}:{port}/members' request (peer protocol)\"\"\"\n\n ret = []\n for host, port in self.get_srv_record(discovery_srv):\n url = '{0}://{1}:{2}/members'.format(self._protocol, host, port)\n try:\n response = requests.get(url, timeout=5)\n if response.ok:\n for member in response.json():\n ret.extend(member['clientURLs'])\n break\n except RequestException:\n logger.exception('GET %s', url)\n return list(set(ret))\n\n def _get_machines_cache_from_dns(self, addr):\n \"\"\"One host might be resolved into multiple ip addresses. We will make list out of it\"\"\"\n\n ret = []\n host, port = addr.split(':')\n try:\n for r in set(socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)):\n ret.append('{0}://{1}:{2}'.format(self._protocol, r[4][0], r[4][1]))\n except socket.error:\n logger.exception('Can not resolve %s', host)\n return list(set(ret)) if ret else ['{0}://{1}:{2}'.format(self._protocol, host, port)]\n\n def _load_machines_cache(self):\n \"\"\"This method should fill up `_machines_cache` from scratch.\n It could happen only in two cases:\n 1. During class initialization\n 2. When all etcd members failed\"\"\"\n\n self._update_machines_cache = True\n\n if 'discovery_srv' not in self._config and 'host' not in self._config:\n raise Exception('Neither discovery_srv nor host are defined in etcd section of config')\n\n self._machines_cache = []\n\n if 'discovery_srv' in self._config:\n self._machines_cache = self._get_machines_cache_from_srv(self._config['discovery_srv'])\n\n if not self._machines_cache and 'host' in self._config:\n self._machines_cache = self._get_machines_cache_from_dns(self._config['host'])\n\n # Can not bootstrap list of etcd-cluster members, giving up\n if not self._machines_cache:\n raise etcd.EtcdException\n\n # After filling up initial list of machines_cache we should ask etcd-cluster about actual list\n self._base_uri = self._machines_cache.pop(0)\n self._machines_cache = self.machines\n\n if self._base_uri in self._machines_cache:\n self._machines_cache.remove(self._base_uri)\n\n self._update_machines_cache = False\n\n\ndef catch_etcd_errors(func):\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs) is not None\n except (RetryFailedError, etcd.EtcdException):\n return False\n except:\n logger.exception(\"\")\n raise EtcdError(\"unexpected error\")\n\n return wrapper\n\n\nclass Etcd(AbstractDCS):\n\n def __init__(self, name, config):\n super(Etcd, self).__init__(name, config)\n self.ttl = config.get('ttl', 30)\n self._retry = Retry(deadline=10, max_delay=1, max_tries=-1,\n retry_exceptions=(etcd.EtcdConnectionFailed,\n etcd.EtcdLeaderElectionInProgress,\n etcd.EtcdWatcherCleared,\n etcd.EtcdEventIndexCleared))\n self._client = self.get_etcd_client(config)\n\n def retry(self, *args, **kwargs):\n return self._retry.copy()(*args, **kwargs)\n\n @staticmethod\n def get_etcd_client(config):\n client = None\n while not client:\n try:\n client = Client(config)\n except etcd.EtcdException:\n logger.info('waiting on etcd')\n sleep(5)\n return client\n\n @staticmethod\n def member(node):\n return Member.from_node(node.modifiedIndex, os.path.basename(node.key), node.ttl, node.value)\n\n def _load_cluster(self):\n try:\n result = self.retry(self._client.read, self.client_path(''), recursive=True)\n nodes = {os.path.relpath(node.key, result.key): node for node in result.leaves}\n\n # get initialize flag\n initialize = nodes.get(self._INITIALIZE)\n initialize = initialize and initialize.value\n\n # get last leader operation\n last_leader_operation = nodes.get(self._LEADER_OPTIME)\n last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation.value)\n\n # get list of members\n members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]\n\n # get leader\n leader = nodes.get(self._LEADER)\n if leader:\n member = Member(-1, leader.value, None, {})\n member = ([m for m in members if m.name == leader.value] or [member])[0]\n leader = Leader(leader.modifiedIndex, leader.ttl, member)\n\n # failover key\n failover = nodes.get(self._FAILOVER)\n if failover:\n failover = Failover.from_node(failover.modifiedIndex, failover.value)\n\n self._cluster = Cluster(initialize, leader, last_leader_operation, members, failover)\n except etcd.EtcdKeyNotFound:\n self._cluster = Cluster(False, None, None, [], None)\n except:\n logger.exception('get_cluster')\n raise EtcdError('Etcd is not responding properly')\n\n @catch_etcd_errors\n def touch_member(self, connection_string, ttl=None):\n return self.retry(self._client.set, self.member_path, connection_string, ttl or self.ttl)\n\n @catch_etcd_errors\n def take_leader(self):\n return self.retry(self._client.set, self.leader_path, self._name, self.ttl)\n\n def attempt_to_acquire_leader(self):\n try:\n return bool(self.retry(self._client.write, self.leader_path, self._name, ttl=self.ttl, prevExist=False))\n except etcd.EtcdAlreadyExist:\n logger.info('Could not take out TTL lock')\n except (RetryFailedError, etcd.EtcdException):\n pass\n return False\n\n @catch_etcd_errors\n def set_failover_value(self, value, index=None):\n return self._client.write(self.failover_path, value, prevIndex=index or 0)\n\n @catch_etcd_errors\n def write_leader_optime(self, last_operation):\n return self._client.set(self.leader_optime_path, last_operation)\n\n @catch_etcd_errors\n def update_leader(self):\n return self.retry(self._client.test_and_set, self.leader_path, self._name, self._name, self.ttl)\n\n @catch_etcd_errors\n def initialize(self, create_new=True, sysid=\"\"):\n return self.retry(self._client.write, self.initialize_path, sysid, prevExist=(not create_new))\n\n @catch_etcd_errors\n def delete_leader(self):\n return self._client.delete(self.leader_path, prevValue=self._name)\n\n @catch_etcd_errors\n def cancel_initialization(self):\n return self.retry(self._client.delete, self.initialize_path)\n\n @catch_etcd_errors\n def delete_cluster(self):\n return self.retry(self._client.delete, self.client_path(''), recursive=True)\n\n def watch(self, timeout):\n cluster = self.cluster\n # watch on leader key changes if it is defined and current node is not lock owner\n if cluster and cluster.leader and cluster.leader.name != self._name and cluster.leader.index:\n end_time = time.time() + timeout\n\n while timeout >= 1: # when timeout is too small urllib3 doesn't have enough time to connect\n try:\n self._client.watch(self.leader_path, index=cluster.leader.index + 1, timeout=timeout + 0.5)\n # Synchronous work of all cluster members with etcd is less expensive\n # than reestablishing http connection every time from every replica.\n return True\n except etcd.EtcdWatchTimedOut:\n self._client.http.clear()\n return False\n except etcd.EtcdException:\n logging.exception('watch')\n\n timeout = end_time - time.time()\n\n try:\n return super(Etcd, self).watch(timeout)\n finally:\n self.event.clear()\n", "repo_name": "UKHomeOffice/docker-postgresql-patroni", "sub_path": "patroni/dcs/etcd.py", "file_name": "etcd.py", "file_ext": "py", "file_size_in_byte": 13143, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "patroni.exceptions.DCSError", "line_number": 22, "usage_type": "name"}, {"api_name": "etcd.Client", "line_number": 26, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 48, "usage_type": "call"}, {"api_name": "etcd.EtcdException", "line_number": 50, "usage_type": "attribute"}, {"api_name": "urllib3.exceptions.HTTPError", "line_number": 61, "usage_type": "name"}, {"api_name": "six.moves.http_client.HTTPException", "line_number": 61, "usage_type": "name"}, {"api_name": "socket.error", "line_number": 61, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 61, "usage_type": "attribute"}, {"api_name": "urllib3.exceptions.ReadTimeoutError", "line_number": 63, "usage_type": "argument"}, {"api_name": "etcd.EtcdWatchTimedOut", "line_number": 65, "usage_type": "call"}, {"api_name": "etcd.EtcdException", "line_number": 91, "usage_type": "call"}, {"api_name": "etcd.EtcdConnectionFailed", "line_number": 107, "usage_type": "attribute"}, {"api_name": "dns.resolver.query", "line_number": 114, "usage_type": "call"}, {"api_name": "dns.resolver", "line_number": 114, "usage_type": "name"}, {"api_name": "dns.exception.DNSException", "line_number": 115, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 128, "usage_type": "call"}, {"api_name": "requests.exceptions.RequestException", "line_number": 133, "usage_type": "name"}, {"api_name": "socket.getaddrinfo", "line_number": 143, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 143, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 143, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_TCP", "line_number": 143, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 145, "usage_type": "attribute"}, {"api_name": "etcd.EtcdException", "line_number": 170, "usage_type": "attribute"}, {"api_name": "patroni.utils.RetryFailedError", "line_number": 186, "usage_type": "name"}, {"api_name": "etcd.EtcdException", "line_number": 186, "usage_type": "attribute"}, {"api_name": "patroni.dcs.AbstractDCS", "line_number": 195, "usage_type": "name"}, {"api_name": "patroni.utils.Retry", "line_number": 200, "usage_type": "call"}, {"api_name": "etcd.EtcdConnectionFailed", "line_number": 201, "usage_type": "attribute"}, {"api_name": "etcd.EtcdLeaderElectionInProgress", "line_number": 202, "usage_type": "attribute"}, {"api_name": "etcd.EtcdWatcherCleared", "line_number": 203, "usage_type": "attribute"}, {"api_name": "etcd.EtcdEventIndexCleared", "line_number": 204, "usage_type": "attribute"}, {"api_name": "etcd.EtcdException", "line_number": 216, "usage_type": "attribute"}, {"api_name": "patroni.utils.sleep", "line_number": 218, "usage_type": "call"}, {"api_name": "patroni.dcs.Member.from_node", "line_number": 223, "usage_type": "call"}, {"api_name": "patroni.dcs.Member", "line_number": 223, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "patroni.dcs.Member", "line_number": 244, "usage_type": "call"}, {"api_name": "patroni.dcs.Leader", "line_number": 246, "usage_type": "call"}, {"api_name": "patroni.dcs.Failover.from_node", "line_number": 251, "usage_type": "call"}, {"api_name": "patroni.dcs.Failover", "line_number": 251, "usage_type": "name"}, {"api_name": "patroni.dcs.Cluster", "line_number": 253, "usage_type": "call"}, {"api_name": "etcd.EtcdKeyNotFound", "line_number": 254, "usage_type": "attribute"}, {"api_name": "patroni.dcs.Cluster", "line_number": 255, "usage_type": "call"}, {"api_name": "etcd.EtcdAlreadyExist", "line_number": 271, "usage_type": "attribute"}, {"api_name": "patroni.utils.RetryFailedError", "line_number": 273, "usage_type": "name"}, {"api_name": "etcd.EtcdException", "line_number": 273, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 309, "usage_type": "call"}, {"api_name": "etcd.EtcdWatchTimedOut", "line_number": 317, "usage_type": "attribute"}, {"api_name": "etcd.EtcdException", "line_number": 320, "usage_type": "attribute"}, {"api_name": "logging.exception", "line_number": 321, "usage_type": "call"}, {"api_name": "time.time", "line_number": 323, "usage_type": "call"}]} +{"seq_id": "5575342817", "text": "from typing import Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..models.search_volume_history import SearchVolumeHistory\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"Tag\")\n\n@attr.s(auto_attribs=True)\nclass Tag:\n \"\"\"\n Attributes:\n tag (Union[Unset, str]):\n history (Union[Unset, List[SearchVolumeHistory]]):\n \"\"\"\n\n tag: Union[Unset, str] = UNSET\n history: Union[Unset, List[SearchVolumeHistory]] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n\n def to_dict(self) -> Dict[str, Any]:\n tag = self.tag\n history: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.history, Unset):\n history = []\n for history_item_data in self.history:\n history_item = history_item_data.to_dict()\n\n history.append(history_item)\n\n\n\n\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({\n })\n if tag is not UNSET:\n field_dict[\"tag\"] = tag\n if history is not UNSET:\n field_dict[\"history\"] = history\n\n return field_dict\n\n\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy() if src_dict else dict(src_dict.copy())\n tag = d.pop(\"tag\", UNSET)\n\n history = []\n _history = d.pop(\"history\", UNSET)\n for history_item_data in (_history or []):\n history_item = SearchVolumeHistory.from_dict(history_item_data)\n\n\n\n history.append(history_item)\n\n\n tag = cls(\n tag=tag,\n history=history,\n )\n\n tag.additional_properties = d\n return tag\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n", "repo_name": "lboaccu/accuranker-api-client", "sub_path": "accuranker_api_client/models/tag.py", "file_name": "tag.py", "file_ext": "py", "file_size_in_byte": 2256, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.TypeVar", "line_number": 8, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 18, "usage_type": "name"}, {"api_name": "types.Unset", "line_number": 18, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "types.Unset", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "models.search_volume_history.SearchVolumeHistory", "line_number": 19, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "attr.ib", "line_number": 20, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 25, "usage_type": "name"}, {"api_name": "types.Unset", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 25, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 25, "usage_type": "name"}, {"api_name": "types.Unset", "line_number": 26, "usage_type": "argument"}, {"api_name": "typing.Dict", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 41, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 51, "usage_type": "name"}, {"api_name": "types.UNSET", "line_number": 53, "usage_type": "argument"}, {"api_name": "types.UNSET", "line_number": 56, "usage_type": "argument"}, {"api_name": "models.search_volume_history.SearchVolumeHistory.from_dict", "line_number": 58, "usage_type": "call"}, {"api_name": "models.search_volume_history.SearchVolumeHistory", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 80, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "73830500534", "text": "import json\r\nimport readPortfolio as rp\r\nimport readMajorIndices as rm\r\n\r\ndef graphic():\r\n majorIndicesComprador, majorIndicesNeutro, majorIndicesVendedor = rm.majorIndices()\r\n portfolioComprador, portfolioNeutro, portfolioVendedor = rp.portfolio()\r\n\r\n comprador = majorIndicesComprador + portfolioComprador\r\n vendedor = majorIndicesVendedor + portfolioVendedor\r\n neutro = majorIndicesNeutro + portfolioNeutro\r\n\r\n dadosGraphic = {'Comprador': comprador, 'Vendedor': vendedor, 'Neutro': neutro}\r\n with open('data/dadosGraphic.json', 'w') as arq:\r\n json.dump(dadosGraphic, arq, indent=4)", "repo_name": "Cawarinny/Compass", "sub_path": "graphic.py", "file_name": "graphic.py", "file_ext": "py", "file_size_in_byte": 613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "readMajorIndices.majorIndices", "line_number": 6, "usage_type": "call"}, {"api_name": "readPortfolio.portfolio", "line_number": 7, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "34953704466", "text": "__author__ = \"Vanessa Sochat\"\n__copyright__ = \"Copyright 2020-2021, Vanessa Sochat\"\n__license__ = \"MPL 2.0\"\n\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom snakeface import settings\n\n\ndef globals(request):\n \"\"\"Returns a dict of defaults to be used by templates, if configured\n correcty in the settings.py file.\"\"\"\n return {\n \"DOMAIN\": settings.DOMAIN_NAME,\n \"WORKFLOW_UPDATE_SECONDS\": settings.cfg.WORKFLOW_UPDATE_SECONDS,\n \"NOTEBOOK\": settings.cfg.NOTEBOOK,\n \"TWITTER_USERNAME\": settings.cfg.TWITTER_USERNAME,\n \"GITHUB_REPOSITORY\": settings.cfg.GITHUB_REPOSITORY,\n \"GITHUB_DOCUMENTATION\": settings.cfg.GITHUB_DOCUMENTATION,\n \"SITE_NAME\": get_current_site(request).name,\n \"GOOGLE_ANALYTICS_ID\": settings.cfg.GOOGLE_ANALYTICS_ID,\n \"GOOGLE_ANALYTICS_SITE\": settings.cfg.GOOGLE_ANALYTICS_SITE,\n }\n", "repo_name": "snakemake/snakeface", "sub_path": "snakeface/context_processors.py", "file_name": "context_processors.py", "file_ext": "py", "file_size_in_byte": 887, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "21", "api": [{"api_name": "snakeface.settings.DOMAIN_NAME", "line_number": 13, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "snakeface.settings.cfg", "line_number": 14, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "snakeface.settings.cfg", "line_number": 15, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "snakeface.settings.cfg", "line_number": 16, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "snakeface.settings.cfg", "line_number": 17, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "snakeface.settings.cfg", "line_number": 18, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 19, "usage_type": "call"}, {"api_name": "snakeface.settings.cfg", "line_number": 20, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "snakeface.settings.cfg", "line_number": 21, "usage_type": "attribute"}, {"api_name": "snakeface.settings", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "3583295506", "text": "from time import sleep\nfrom random import randint\n\nfrom utils import display_title, display_greeting, display_commands, get_cmd, display_incorrect_command, check_money\nfrom constants import CHEAP_ROOM_LUCK_MODIFIER, WELL_MAINTAINED_ROOM_LUCK_MODIFIER, LUXURY_ROOM_LUCK_MODIFIER\nfrom constants import CHEAP_ROOM_ROB_PERCENTAGE, WELL_MAINTAINED_ROOM_ROB_PERCENTAGE, LUXURY_ROOM_ROB_PERCENTAGE\n\n\ndef pay_for_service(hero, cost):\n hero.gold = round(hero.gold - cost, 3)\n\n\ndef _be_robbed(hero, rob_percentage):\n stolen_gold = (hero.gold * rob_percentage) / 100\n\n hero.gold = round(hero.gold - stolen_gold, 3)\n print('\\nYou were robbed while you slept.\\nYou lost {0} gold'.format(stolen_gold))\n\n\ndef _limit_health_and_energy(hero):\n if hero.health > hero.max_health:\n hero.health = hero.max_health\n\n if hero.energy > hero.max_energy:\n hero.energy = hero.max_energy\n\n\ndef _sleep(hero, room_level):\n print('\\nYou fell asleep right in the equipment.\\nSleeping...')\n\n count = 0\n modifier = 1\n\n if room_level == 'low':\n modifier = 5\n\n hero.health += 10\n hero.energy += 10\n elif room_level == 'middle':\n modifier = 7\n\n hero.health += 20\n hero.energy += 20\n elif modifier == 'high':\n modifier = 10\n\n hero.health += 30\n hero.energy += 30\n while count <= 5:\n hero.health += hero.hp_regen * modifier\n hero.energy += hero.energy_regen * modifier\n count += 1\n sleep(1)\n\n hero.health = round(hero.health, 3)\n hero.energy = round(hero.energy, 3)\n\n _limit_health_and_energy(hero)\n\n\ndef _event(hero, luck_modifier, rob_percentage):\n chance = hero.luck * luck_modifier\n\n if chance < randint(1, 100):\n _be_robbed(hero=hero, rob_percentage=rob_percentage)\n return True\n else:\n return False\n\n\ndef _luxury_room(hero):\n _sleep(hero=hero, room_level='high')\n if _event(hero=hero, luck_modifier=LUXURY_ROOM_LUCK_MODIFIER,\n rob_percentage=LUXURY_ROOM_ROB_PERCENTAGE):\n print(\"\\nYou slept well, but after the discovery of the loss you're sad\")\n else:\n print(\"\\nYou feel well-rested yourself\")\n\n\ndef _well_maintained_room(hero):\n _sleep(hero=hero, room_level='middle')\n if _event(hero=hero, luck_modifier=WELL_MAINTAINED_ROOM_LUCK_MODIFIER,\n rob_percentage=WELL_MAINTAINED_ROOM_ROB_PERCENTAGE):\n print('\\nYou slept almost well, but loss money makes you angry')\n else:\n print(\"\\nYou're rested quite good\")\n\n\ndef _cheap_room(hero):\n _sleep(hero=hero, room_level='low')\n if _event(hero=hero, luck_modifier=CHEAP_ROOM_LUCK_MODIFIER,\n rob_percentage=CHEAP_ROOM_ROB_PERCENTAGE):\n print(\"\\nHeck! I not only did sleep on a hard bed, but also I'm robbed\")\n else:\n print('\\nI need fight hard to not return back here again...')\n\n\ndef _have_a_drink(hero):\n pass\n\n\ndef _rent_room(hero):\n cheap_room_cost = 5\n well_maintained_room_cost = 12\n luxury_room_cost = 30\n\n if hero.gold > 0:\n while True:\n print('\\nYour health: {0}/{1}\\nYour energy: {2}/{3}\\nYou have {4} gold\\n'.format(\n hero.health, hero.max_health,\n hero.energy, hero.max_energy,\n hero.gold\n ))\n\n display_commands(commands=[\n 'Rent cheap room ({0} gold)'.format(cheap_room_cost),\n 'Rent well-maintained room ({0} gold)'.format(well_maintained_room_cost),\n 'Rent luxury room ({0} gold)'.format(luxury_room_cost),\n 'Back to the previous menu'\n ])\n\n cmd = get_cmd(message='What room would you rent?\\n')\n\n if cmd == '1':\n if check_money(hero_money=hero.gold, cost=cheap_room_cost):\n pay_for_service(hero=hero, cost=cheap_room_cost)\n _cheap_room(hero)\n else:\n print('It seems like you have no enough gold')\n elif cmd == '2':\n if check_money(hero_money=hero.gold, cost=well_maintained_room_cost):\n pay_for_service(hero=hero, cost=well_maintained_room_cost)\n _well_maintained_room(hero)\n else:\n print(\"Take a look at more cheap room, because you don't have enough gold\")\n elif cmd == '3':\n if check_money(hero_money=hero.gold, cost=luxury_room_cost):\n pay_for_service(hero=hero, cost=luxury_room_cost)\n _luxury_room(hero)\n else:\n print(\"Hahaha, son, are you kidding me? Thou don't have enough any gold!\")\n elif cmd == '4':\n return\n else:\n display_incorrect_command()\n else:\n print('\\nWhat did you forget here without money?! Get out!')\n\n\ndef tavern(hero):\n display_title('Tavern')\n display_greeting()\n\n while True:\n display_commands(commands=[\n 'Rent the room',\n \"Wet one's whistle\",\n 'Back to the previous menu'\n ])\n\n cmd = get_cmd(message='What do you want?\\n')\n\n if cmd == '1':\n _rent_room(hero)\n elif cmd == '2':\n print(\"\\nAre you kidding me? I wanna sleep! Let's rent a room...\")\n elif cmd == '3':\n return\n else:\n display_incorrect_command()\n", "repo_name": "bldaj/Knights", "sub_path": "tavern.py", "file_name": "tavern.py", "file_ext": "py", "file_size_in_byte": 5419, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "constants.LUXURY_ROOM_LUCK_MODIFIER", "line_number": 73, "usage_type": "name"}, {"api_name": "constants.LUXURY_ROOM_ROB_PERCENTAGE", "line_number": 74, "usage_type": "name"}, {"api_name": "constants.WELL_MAINTAINED_ROOM_LUCK_MODIFIER", "line_number": 82, "usage_type": "name"}, {"api_name": "constants.WELL_MAINTAINED_ROOM_ROB_PERCENTAGE", "line_number": 83, "usage_type": "name"}, {"api_name": "constants.CHEAP_ROOM_LUCK_MODIFIER", "line_number": 91, "usage_type": "name"}, {"api_name": "constants.CHEAP_ROOM_ROB_PERCENTAGE", "line_number": 92, "usage_type": "name"}, {"api_name": "utils.display_commands", "line_number": 115, "usage_type": "call"}, {"api_name": "utils.get_cmd", "line_number": 122, "usage_type": "call"}, {"api_name": "utils.check_money", "line_number": 125, "usage_type": "call"}, {"api_name": "utils.check_money", "line_number": 131, "usage_type": "call"}, {"api_name": "utils.check_money", "line_number": 137, "usage_type": "call"}, {"api_name": "utils.display_incorrect_command", "line_number": 145, "usage_type": "call"}, {"api_name": "utils.display_title", "line_number": 151, "usage_type": "call"}, {"api_name": "utils.display_greeting", "line_number": 152, "usage_type": "call"}, {"api_name": "utils.display_commands", "line_number": 155, "usage_type": "call"}, {"api_name": "utils.get_cmd", "line_number": 161, "usage_type": "call"}, {"api_name": "utils.display_incorrect_command", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "41090078488", "text": "import pandas as pd\nimport json\nimport requests\nimport csv\nimport json\nimport datetime\n\n\ndf = pd.read_csv('planning_area_list_2022.csv')\n# Remove the first row as it is not relevant\ndf = df.tail(-1) \n\n# insert your own API key\nAuthorization_key = \"Insert your API key\"\n\nheaders = {'Content-Type': 'application/json',\n 'Authorization': Authorization_key,\n 'User-Agent': 'Mozilla/5.0'}\n\n\ndef get_data(url):\n\n response = response = requests.request(\"GET\", url, headers=headers)\n\n if response.status_code == 200:\n return json.loads(response.content.decode('utf-8'))\n else:\n return None\n\n# for loop to call onemap API with the planning areas gotten from previous list \n# take note that onemap API is updated every 5 years hence, we are using 2020 data\nfor planning_area in df['pln_area_n']:\n url = f\"https://www.onemap.gov.sg/api/public/popapi/getHouseholdMonthlyIncomeWork?planningArea={planning_area}&year=2020\"\n data_info = get_data(url)\n if data_info is not None:\n print(\"Success! json dataset to convert to csv is embedded in data_info['Result']\") \n else:\n print('[!] Request Failed')\n\n #flatten nested data in json file\n from pandas.io.json import json_normalize\n data = data_info\n print(data_info)\n flattendata = json_normalize(data)\n flattendata.to_csv('household_monthly_income_' + '2020' + '.csv', mode='a' , index=False)", "repo_name": "wjang96/onemap-sg", "sub_path": "Code/OneMap_get_household_monthly_income.py", "file_name": "OneMap_get_household_monthly_income.py", "file_ext": "py", "file_size_in_byte": 1417, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 23, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.io.json.json_normalize", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "72149799733", "text": "from sklearn.ensemble import AdaBoostClassifier\n\nfrom DTLearner import DTLearner\nfrom ClassifierType import ClassifierType\nfrom ClassifierType import plot_valid_curve\nfrom sklearn.model_selection import validation_curve\nimport numpy as np\n\n# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html\n# boosted version of your decision trees - so need the same parameters of the DTLearner\nclass BoostLearner(ClassifierType):\n def __init__(self, data, target, data_name, clz_name):\n super().__init__(data, target, data_name)\n self.clz_name = clz_name\n\n def init_classifier(self, optimal_values):\n optimal_values['random_state'] = 3\n self.classifier = AdaBoostClassifier(**optimal_values)\n return self.classifier\n\n def gen_validation_curves(self, possible_range_dict, cv_folds=3):\n self.gen_validation_curve(AdaBoostClassifier(base_estimator=DTLearner.get_regular_classifier(self.data_name)), AdaBoostClassifier(base_estimator=DTLearner.get_pruned_classifier(self.data_name)), param_name=\"n_estimators\", param_value=possible_range_dict['n_estimators'])\n self.gen_validation_curve(AdaBoostClassifier(base_estimator=DTLearner.get_regular_classifier(self.data_name)), AdaBoostClassifier(base_estimator=DTLearner.get_pruned_classifier(self.data_name)), param_name=\"learning_rate\",param_value=possible_range_dict['learning_rate'])\n\n def gen_validation_curve(self, clf, clf_prune, param_name, param_value, cv_folds=5):\n file_name = \"valid_curve_{data_name}_{clz_name}_{param_name}_noprune\".format(data_name=self.data_name, clz_name=self.clz_name, param_name=param_name)\n train_scores, valid_scores = validation_curve(clf, self.X_train, self.y_train, param_name, param_value, cv=cv_folds, verbose=1, n_jobs=20)\n np.savez('./output/results/{}/{}.npz'.format(self.data_name, file_name), train_scores=train_scores, valid_scores=valid_scores)\n print('file_name: '+ file_name)\n data = np.load('./output/results/{}/{}.npz'.format(self.data_name, file_name))\n train_scores, valid_scores = data['train_scores'], data['valid_scores']\n\n file_name = \"valid_curve_{data_name}_{clz_name}_{param_name}_withprune\".format(data_name=self.data_name, clz_name=self.clz_name, param_name=param_name)\n pruned_train_scores, pruned_valid_scores = validation_curve(clf_prune, self.X_train, self.y_train, param_name, param_value, cv=cv_folds, verbose=1, n_jobs=20)\n np.savez('./output/results/{}/{}.npz'.format(self.data_name, file_name), train_scores=pruned_train_scores, valid_scores=pruned_valid_scores)\n print('file_name: ' + file_name)\n data = np.load('./output/results/{}/{}.npz'.format(self.data_name, file_name))\n pruned_train_scores, pruned_valid_scores = data['train_scores'], data['valid_scores']\n\n plot_valid_curve(param_name, param_value, train_scores, valid_scores, self.data_name, file_name, pruned_train_scores, pruned_valid_scores)", "repo_name": "sengopal/classic-ml-review-paper", "sub_path": "BoostLearner.py", "file_name": "BoostLearner.py", "file_ext": "py", "file_size_in_byte": 2997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "ClassifierType.ClassifierType", "line_number": 11, "usage_type": "name"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 22, "usage_type": "call"}, {"api_name": "DTLearner.DTLearner.get_regular_classifier", "line_number": 22, "usage_type": "call"}, {"api_name": "DTLearner.DTLearner", "line_number": 22, "usage_type": "name"}, {"api_name": "DTLearner.DTLearner.get_pruned_classifier", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 23, "usage_type": "call"}, {"api_name": "DTLearner.DTLearner.get_regular_classifier", "line_number": 23, "usage_type": "call"}, {"api_name": "DTLearner.DTLearner", "line_number": 23, "usage_type": "name"}, {"api_name": "DTLearner.DTLearner.get_pruned_classifier", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.model_selection.validation_curve", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.model_selection.validation_curve", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 37, "usage_type": "call"}, {"api_name": "ClassifierType.plot_valid_curve", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "3622230574", "text": "# %%\nfrom sklearn.linear_model import LogisticRegression\nimport torch\nimport numpy as np\nfrom torch import distributions\nimport sys\nsys.path.append(\"..\")\nfrom functions.objective_funcs_torch import get_dat_vals_multidim # noqa:E402\nfrom functions.estimators_torch import kliep_multi_dim_sep_wrap # noqa:E402\nfrom functions.np_classifier_torch import cutoff_bin, power_alpha_calc # noqa:E402\nunif = distributions.Uniform(0, 1)\n# %%\n\n\ndef get_ci(vec, verbose=True):\n \"\"\"Get mean and CI for mean from vector\n\n Args:\n vec (Tensor): The vector of values you want the C.I. for the mean from\n verbose (bool, optional): Whether or not to print the CI. Defaults to True.\n \"\"\"\n n = vec.shape[0]\n mean = torch.mean(vec, 0)\n se = torch.std(vec, 0)/(n**0.5)\n ci_up = mean+1.96*se\n ci_low = mean-1.96*se\n if verbose:\n print(f\"Our Estimated Expected Power is: {mean:4.3f}\")\n print(f\"With ci({ci_low:4.3f}, {ci_up:4.3f})\")\n return([mean, ci_low, ci_up])\n\n\ndef progress(percent=0, width=40):\n left = width * percent // 100\n right = width - left\n\n tags = \"#\" * left\n spaces = \" \" * right\n percents = f\"{percent:.0f}%\"\n\n print(\"\\r[\", tags, spaces, \"]\", percents, sep=\"\", end=\"\", flush=True)\n\n\ndef corrupt_func(data, missing_funcs, seed=None):\n \"\"\"Corrupts data along each feature according to pre-specified missing functions\n\n Args:\n data (Tensor): The data to corrupt\n missing_funcs (List): A list of missing functions for each feature.\n If no missingness should be applied, put None.\n\n Returns:\n Tensor: Corrupted Data\n \"\"\"\n if seed is not None:\n # print(f\"Missing seed is {seed}\")\n torch.manual_seed(seed)\n # Get uniform samples to select missing points\n us = unif.sample(data.shape)\n # Set up missing probabilities for each point\n misses = torch.zeros(data.shape)\n for j, miss_func in enumerate(missing_funcs):\n if miss_func is not None:\n # If missing function for column available\n # use it to claculate missing prob\n misses[:, j] = miss_func(data[:, j])\n # Replace all points which should be missing with NaN\n nan = torch.nan\n out_data = torch.where(\n us < misses, nan, data.double()).float()\n\n return out_data\n\n\ndef miss_func_creator(theta):\n \"\"\"Create logistic regression functions from parameters\n\n Args:\n theta (Tensor): A 2-element tensor containing the intercept and slope term.\n \"\"\"\n def miss_func(x):\n return 1/(1+torch.exp((-theta[0]-theta[1]*x)))\n return miss_func\n\n\ndef create_standard_miss_func(mean, sd, sign, shift=0.):\n def miss_func(x):\n return(1/(1+torch.exp(sign*((x-mean)/sd)-shift)))\n return miss_func\n\n\ndef func_adj(m, std, f):\n \"\"\"Change missing functions to take in normalised values\n\n Args:\n m (float): mean of original data\n std (float): The stnadard deviationn of original data\n f (function): The function to normalise\n\n Returns:\n function: The equivalent function to f for taking in normalised data.\n \"\"\"\n if f is None:\n return None\n else:\n def miss_func(x):\n return f(x*std+m)\n return miss_func\n\n\ndef learn_missing_func(corrupted_data, clean_data, nlearn=50, seed=None):\n \"\"\"A function to learn the missingness structure of the data\n by revealing the value of a few key points\n\n Args:\n corrupted_data (Tensor): Data with corruptions\n clean_data (Tensor): Fully observed data (should be identical to non-mising parts of corrupted data)\n nlearn (int, optional): The number of missing points to learn. Defaults to 50.\n\n Returns:\n List: List of estimated missing functions.\n \"\"\"\n y = torch.isnan(corrupted_data).bool()\n est_miss_params = []\n # Set seed if given\n if seed is not None:\n torch.manual_seed(seed)\n for j in range(corrupted_data.shape[1]):\n # Get missing indices to learn\n which_miss = torch.nonzero(y[:, j]).reshape(-1)\n nmiss = which_miss.shape[0]\n x_adj = (corrupted_data[:, j]).clone().detach()\n # Learn those 50 values\n to_learn = which_miss[\n torch.multinomial(torch.zeros(which_miss.shape[0])+1, nlearn)]\n x_adj[to_learn] = clean_data[to_learn, j]\n no_miss2 = ~torch.isnan(x_adj)\n\n # ####Check if data completely separable####\n # non-missing data\n null_dat = x_adj[~y[:, j]]\n # learned missing data\n alt_dat = x_adj[to_learn]\n # Check if separated\n if torch.max(null_dat) < torch.min(alt_dat):\n midpoint = (torch.max(null_dat) + torch.min(alt_dat))/2\n params = torch.tensor([-100.*midpoint, 100.])\n elif torch.max(alt_dat) < torch.min(null_dat):\n midpoint = (torch.max(null_dat) + torch.min(alt_dat))/2\n params = torch.tensor([100.*midpoint, -100.])\n else:\n # Set up logistic regression\n model = LogisticRegression(\n solver='lbfgs', penalty='none')\n # Fit logistic regression\n model.fit((x_adj[no_miss2]).numpy().reshape(-1, 1),\n (y[no_miss2, j]).numpy())\n # Adjust intercept\n a_0 = model.intercept_-np.log(nlearn/nmiss)\n # Bind parameters\n params = torch.tensor(\n [a_0[0], model.coef_[0, 0]])\n\n est_miss_params.append(params)\n # Create estimated missingness functions\n est_miss_funcs = [miss_func_creator(miss_param)\n for miss_param in est_miss_params]\n return est_miss_funcs\n\n\ndef missing_pipeline(data_dict, missing_funcs, dr_proc=kliep_multi_dim_sep_wrap,\n dat_val_fun=get_dat_vals_multidim,\n norm=True, est_miss=True, lr=1, alpha=0.1, f=lambda x: x,\n delta=0.1, nlearn=50, miss_seed=None, learn_seed=None, **kwargs):\n \"\"\"Perform the entired DRE pipeline: corruption, normalisation, learn missingness, DRE, NP classificaiton\n\n Args:\n data_dict (dictionary): A dictionary containing all the partitions of the data. These are:\n alt_tr,null_tr,alt_test, null_cal.\n missing_funcs (list): list of missingness functions for data. If None no corruption performed.\n norm (bool, optional): Should normalisation be done. Defaults to True.\n est_miss (bool, optional): Should missing functions be estimated. Defaults to True.\n lr (list, optional): learning rate for DRE. Defaults to None.\n alpha (float/list, optional): List of alphas for NP classification. Defaults to 0.1.\n delta (float/list, optional): List of deltas for NP classification. Defaults to 0.1.\n nlearn (int, optional): Number of points used to learn missingness funcs. Defaults to 50.\n\n Returns:\n dictionary: contains power and dr fit\n \"\"\"\n out_dict = {}\n model_dat_dict = data_dict.copy()\n\n # Covert alpha\n if type(alpha) == float:\n alpha = [alpha]\n if type(delta) == float:\n delta = [delta]\n\n # ### Optionally perform corruption ###\n if missing_funcs is not None:\n model_dat_dict[\"alt_tr\"] = corrupt_func(\n model_dat_dict[\"alt_tr\"], missing_funcs, seed=miss_seed)\n\n # ### Optionally Perform normalisation ###\n if norm:\n # Re-assign full_tr\n model_dat_dict[\"full_tr\"] = torch.cat(\n (model_dat_dict[\"null_tr\"], model_dat_dict[\"alt_tr\"]),\n dim=0)\n # Get mean and sd\n miss_std = torch.tensor(\n np.nanstd(model_dat_dict[\"full_tr\"], axis=0))\n miss_mean = torch.nanmean(model_dat_dict[\"full_tr\"], dim=0)\n # Normalise data\n model_dat_dict = {key: (value-miss_mean)/miss_std for\n key, value in model_dat_dict.items()}\n # Normalise\n if (not est_miss) & (missing_funcs is not None):\n missing_funcs = [\n func_adj(m, std, f) for f, m, std in\n zip(missing_funcs, miss_mean, miss_std)]\n # If estimated, normalise non-missing data as wekk\n else:\n data_dict = {key: (value-miss_mean)/miss_std for\n key, value in data_dict.items()}\n out_dict[\"norm\"] = {\"mean\": miss_mean, \"std\": miss_std}\n # ### Optionally learn missingness functions ###\n if est_miss:\n miss_funcs_to_use = learn_missing_func(\n corrupted_data=model_dat_dict[\"alt_tr\"], clean_data=data_dict[\"alt_tr\"],\n nlearn=nlearn, seed=learn_seed)\n else:\n miss_funcs_to_use = missing_funcs\n out_dict[\"est_miss_funcs\"] = miss_funcs_to_use\n # ### Perform DRE ###\n # Do initial data summaries\n gen_dat_miss = dat_val_fun(\n x_plus=model_dat_dict[\"alt_tr\"], x_minus=model_dat_dict[\"null_tr\"],\n varphi_plus=miss_funcs_to_use, varphi_minus=None, f=f, **kwargs\n )\n # Do actual DRE\n miss_result = dr_proc(\n gen_dat_miss, norm_fl=True, lr=lr, f=f, **kwargs)\n # Extract estimated r\n miss_est_dr = miss_result[\"r\"]\n out_dict[\"dr\"] = miss_result\n\n out_dict[\"power_res\"] = []\n # ###NP Classification### #\n # For each alpha-delta combination construct classifier.\n for j in range(len(alpha)):\n # Get NP classifier\n threshold, classif = cutoff_bin(\n class_func=miss_est_dr, alpha=alpha[j], delta=delta[j],\n newdata=model_dat_dict[\"null_cal\"])\n\n # Asses classifier\n power_res = power_alpha_calc(\n classif=classif, data_0=model_dat_dict[\"null_cal\"],\n data_1=model_dat_dict[\"alt_test\"])\n out_dict[\"power_res\"].append(power_res)\n\n out_dict[\"alphas\"] = alpha\n out_dict[\"deltas\"] = delta\n out_dict[\"prop_miss\"] = (\n torch.sum(torch.isnan(model_dat_dict[\"alt_tr\"]))\n / torch.numel(model_dat_dict[\"alt_tr\"]))\n\n return out_dict\n\n\ndef full_pipeline(null_df, alt_df, missing_funcs, n_altte, n_nulltr,\n dr_proc=kliep_multi_dim_sep_wrap,\n dat_val_fun=get_dat_vals_multidim,\n norm=True, est_miss=True, lr=1, alpha=0.1, f=lambda x: x,\n delta=0.1, nlearn=50, miss_seed=None, learn_seed=None,\n split_seed=None, **kwargs):\n split_dict = {}\n # Split these into test, train, and calibration\n split_dict[\"null_tr\"] = null_df.sample(n_nulltr, random_state=split_seed)\n split_dict[\"null_cal\"] = null_df.drop(\n split_dict[\"null_tr\"].index)\n\n split_dict[\"alt_test\"] = alt_df.sample(n_altte, random_state=split_seed)\n split_dict[\"alt_tr\"] = alt_df.drop(\n split_dict[\"alt_test\"].index)\n\n # Get tensor versions\n # null_tens = torch.tensor(null_df[]\n split_tens = {}\n for key, value in split_dict.items():\n split_tens[key] = torch.tensor(\n (value).to_numpy().astype(np.float32))\n d = split_tens[\"null_tr\"].shape[1]\n # Impute each row of the tensor\n nanmeans = {}\n # Do original imputation to have only our own missingness\n for key, value in split_tens.items():\n nanmeans[key] = torch.nanmean(value, dim=0)\n for i in range(d):\n split_tens[key][torch.isnan(value[:, i]), i] = nanmeans[key][i]\n\n out = missing_pipeline(\n split_tens, missing_funcs, dr_proc=dr_proc, dat_val_fun=dat_val_fun,\n est_miss=est_miss, lr=lr, alpha=alpha, delta=delta, f=f, nlearn=nlearn,\n norm=norm, miss_seed=miss_seed, learn_seed=learn_seed, **kwargs)\n return out\n", "repo_name": "joshgivens/DRE-NP-MissingData", "sub_path": "functions/pipeline_funcs.py", "file_name": "pipeline_funcs.py", "file_ext": "py", "file_size_in_byte": 11510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.distributions.Uniform", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.std", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nan", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.where", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.multinomial", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 161, "usage_type": "call"}, {"api_name": "functions.estimators_torch.kliep_multi_dim_sep_wrap", "line_number": 171, "usage_type": "name"}, {"api_name": "functions.objective_funcs_torch.get_dat_vals_multidim", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.nanstd", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.nanmean", "line_number": 214, "usage_type": "call"}, {"api_name": "functions.np_classifier_torch.cutoff_bin", "line_number": 254, "usage_type": "call"}, {"api_name": "functions.np_classifier_torch.power_alpha_calc", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.numel", "line_number": 268, "usage_type": "call"}, {"api_name": "functions.estimators_torch.kliep_multi_dim_sep_wrap", "line_number": 274, "usage_type": "name"}, {"api_name": "functions.objective_funcs_torch.get_dat_vals_multidim", "line_number": 275, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 294, "usage_type": "attribute"}, {"api_name": "torch.nanmean", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 302, "usage_type": "call"}]} +{"seq_id": "38229957684", "text": "from handlers.base import BaseHandler\nfrom models.models import Post\nfrom google.appengine.api import users, memcache\n\nimport cgi\nimport uuid\n\nclass AddPostHandler(BaseHandler):\n def get(self):\n params = {\n 'csrf_token': str(uuid.uuid4())\n }\n\n memcache.add(params['csrf_token'], True, 60 * 10)\n return self.render_template('add_post.html', params)\n\n def post(self):\n csrf_value = self.request.get('csrf-token')\n\n if not memcache.get(csrf_value):\n return self.write('CSRF attack detected!!')\n\n title = cgi.escape(self.request.get('title'))\n content = cgi.escape(self.request.get('text'))\n user = users.get_current_user()\n email = user.email()\n new_post = Post(title=title,\n content=content,\n user_email=email)\n\n new_post.put()\n return self.redirect_to('post', post_id=new_post.key.id())\n", "repo_name": "Cene93/Web-Dev-2", "sub_path": "NinjaTech-Forum/handlers/post_handler.py", "file_name": "post_handler.py", "file_ext": "py", "file_size_in_byte": 947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "handlers.base.BaseHandler", "line_number": 8, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 11, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache.add", "line_number": 14, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 14, "usage_type": "name"}, {"api_name": "google.appengine.api.memcache.get", "line_number": 20, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 20, "usage_type": "name"}, {"api_name": "cgi.escape", "line_number": 23, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 24, "usage_type": "call"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 25, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 25, "usage_type": "name"}, {"api_name": "models.models.Post", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "31226948875", "text": "#!/usr/bin/env python\n# Name:\tMilou Nederstigt\n# Student number: 11022914\n\n\"\"\"\nThis script outputs a JSON file with the content of a csv.\nConverts a CSV file with three variables to a JSON file containing a dictioniary with three lists (one per variable).\n\"\"\"\n\nimport csv\nimport json\nimport sys\n\nif len (sys.argv) != 2 :\n print(\"Usage: convertCSV2JSON.py inputfile.csv\")\n sys.exit (1)\n\nelse:\n\tinputfile = sys.argv[1]\n\tif not inputfile.endswith('.csv'):\n\t\tprint(\"Usage: convertCSV2JSON.py inputfile.csv\")\n\t\tsys.exit (1)\n\n\toutputfile = inputfile.replace(\".csv\",\".json\")\n\n\twith open(inputfile) as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tx = []\n\t\ty = []\n\t\tz = []\n\t\ta = []\n\t\tdl = []\n\n\t\tfor row in reader:\n\t\t\tx.append(row[list(row.keys())[0]])\n\t\t\ty.append(row[list(row.keys())[1]])\n\t\t\tz.append(row[list(row.keys())[2]])\n\t\t\ta.append(row[list(row.keys())[3]])\n\n\t\tdl.append({list(row.keys())[0]: x, list(row.keys())[1]: y, list(row.keys())[2]: z, list(row.keys())[3]: a})\n\t\t\t\t\t\n\twith open(outputfile, 'w') as output:\n \t\tjson.dump(dl, output)\n", "repo_name": "mhmnederstigt/DataProcessing", "sub_path": "Homework/Week5/helper_scripts/convertCSV2JSON_dl.py", "file_name": "convertCSV2JSON_dl.py", "file_ext": "py", "file_size_in_byte": 1047, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 22, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "2702253437", "text": "#\n# Author: Qiming Sun \n#\n\nimport unittest\nimport numpy\nimport scipy.linalg\nimport tempfile\nfrom pyscf import gto\nfrom pyscf import scf\nfrom pyscf import fci\n\nclass KnowValues(unittest.TestCase):\n def test_davidson(self):\n mol = gto.Mole()\n mol.verbose = 0\n mol.atom = [['H', (0,0,i)] for i in range(8)]\n mol.basis = {'H': 'sto-3g'}\n mol.build()\n mf = scf.RHF(mol)\n mf.scf()\n myfci = fci.FCI(mol, mf.mo_coeff)\n myfci.max_memory = .001\n myfci.max_cycle = 100\n e = myfci.kernel()[0]\n self.assertAlmostEqual(e, -11.579978414933732, 9)\n\nif __name__ == \"__main__\":\n print(\"Full Tests for linalg_helper\")\n unittest.main()\n", "repo_name": "sunchong137/pyscf_2017", "sub_path": "lib/test/test_linalg_helper.py", "file_name": "test_linalg_helper.py", "file_ext": "py", "file_size_in_byte": 731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pyscf.gto.Mole", "line_number": 15, "usage_type": "call"}, {"api_name": "pyscf.gto", "line_number": 15, "usage_type": "name"}, {"api_name": "pyscf.scf.RHF", "line_number": 20, "usage_type": "call"}, {"api_name": "pyscf.scf", "line_number": 20, "usage_type": "name"}, {"api_name": "pyscf.fci.FCI", "line_number": 22, "usage_type": "call"}, {"api_name": "pyscf.fci", "line_number": 22, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "22415479961", "text": "# Pickling iris\r\n#uci ml repository --> datasets --> iris --> iris.data\r\n# download(using link) --> text --> parsing --> list of list --> pickle --> reading code\r\nimport requests\r\nimport pickle\r\n\r\ndata = requests.get(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\").text\r\n# print(data)\r\nlist = data.split(\"\\n\")\r\n# print(list)\r\nlist2 = [item.split(\",\") for item in list if len(item)!=0]\r\nprint(list2)\r\nwith open(\"myiris.pkl\",\"wb\") as f:\r\n pickle.dump(list2,f)\r\n\r\n# ---------------------------Code to read pickle file -----------------------------------\r\nwith open(\"myiris.pkl\",\"rb\") as f:\r\n print(pickle.load(f))\r\n\r\n", "repo_name": "akashbagwan2308/python_codes", "sub_path": "Exercise10(pickling iris).py", "file_name": "Exercise10(pickling iris).py", "file_ext": "py", "file_size_in_byte": 648, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 14, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "25659886271", "text": "from bottle import post, request\nfrom datetime import datetime\nimport re\nimport pdb\nimport json\n@post('/otzivi', method='post')\ndef reviews():\n #вносим в переменные данные, внесенные пользователем\n revv=request.forms.get('REV')\n nickk=request.forms.get('NICK')\n markk=request.forms.get('MRK')\n #проверка введенных данных\n if (revv!='' and nickk!='' and markk!='' and len(markk)==1 and (markk=='1' or markk=='2' or markk=='3' or markk=='4' or markk=='5')):\n f = open('revstxt.txt', 'a') #параметр 'a' позволяет не переписать файл, а дописать к уже существующим записям\n #добавление отзыва в текстовый файл\n f.write(str(nickk) + '\\n' + str(markk) + '/5 (' + str(datetime.now().date()) + ')\\n' + str(revv) + '\\n---\\n')\n f.close()\n return \"Thank you!\"\n else:\n return \"Data entered incorrectly\"\n\n", "repo_name": "xsuperblyx/lb5-6", "sub_path": "Site/modulereview.py", "file_name": "modulereview.py", "file_ext": "py", "file_size_in_byte": 1017, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "bottle.request.forms.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 9, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 9, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 10, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 10, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 11, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "bottle.post", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "73014654773", "text": "import sqlite3\nfrom typing import List, Set, Dict, Tuple, Optional, AnyStr\n\n\nclass UserModel:\n\n db_path = './src/db/datacart.db'\n\n def __init__(self, id, email, name):\n self.id = id\n self.email = email\n self.name = name\n\n def __repr__(self):\n return str(self.id) + \", \" + self.email + \", \" + self.name\n\n def json(self):\n return {'id': self.id,\n 'email': self.email,\n 'name': self.name}\n\n @classmethod\n def FindById(cls, id):\n try:\n ConnectionSqlite = sqlite3.connect(UserModel.db_path)\n CursorSqlite = ConnectionSqlite.cursor()\n ResultUsersById = CursorSqlite.execute(\n 'SELECT * FROM users WHERE id=?', (id,))\n RowsUsersByIdAll = ResultUsersById.fetchall()\n if RowsUsersByIdAll is None:\n return {'message': \"User id not found\"}\n for Row in RowsUsersByIdAll:\n UsersByIdAll = UserModel(Row[0], Row[1], Row[2])\n ConnectionSqlite.close()\n return UsersByIdAll.json(), 200\n return {'message': 'User Invalid ID supplied'}, 400\n except sqlite3.Error as er:\n return {'message': 'User not edited'}, 404\n\n @classmethod\n def DeleteById(cls, id):\n try:\n ConnectionSqlite = sqlite3.connect(UserModel.db_path)\n CursorSqlite = ConnectionSqlite.cursor()\n ResultDeleteUser = CursorSqlite.execute(\n 'DELETE FROM users WHERE id=?;', (id,))\n ConnectionSqlite.commit()\n ConnectionSqlite.close()\n if ResultDeleteUser.rowcount:\n return {'message': 'User successfully delete'}, 200\n return {'message': 'User Invalid ID supplied'}, 400\n except sqlite3.Error as er:\n return {'message': 'User not delete'}, 404\n\n @classmethod\n def UpdateById(cls, id, body):\n try:\n ConnectionSqlite = sqlite3.connect(UserModel.db_path)\n CursorSqlite = ConnectionSqlite.cursor()\n ResultUpdateUser = CursorSqlite.execute(\n 'UPDATE users SET email = ?, name = ? WHERE id=?;', (str(body['email']), str(body['name']), id))\n ConnectionSqlite.commit()\n ConnectionSqlite.close()\n if ResultUpdateUser.rowcount:\n return {'message': 'User successfully edited'}, 200\n return {'message': 'User Invalid ID supplied'}, 400\n except sqlite3.Error as er:\n return {'message': 'User not edited'}, 404\n\n @classmethod\n def InsertData(cls, body):\n try:\n ConnectionSqlite = sqlite3.connect(UserModel.db_path)\n CursorSqlite = ConnectionSqlite.cursor()\n ResultInsertUser = CursorSqlite.execute(\n 'INSERT INTO users VALUES(NULL, ?, ?)', (str(body['email']), str(body['name'])))\n ConnectionSqlite.commit()\n ConnectionSqlite.close()\n if ResultInsertUser.rowcount:\n return {'message': 'User successfully created'}, 201\n return {'message': 'User not created'}, 400\n except sqlite3.Error as er:\n return {'message': 'User not created'}, 404\n\n @classmethod\n def FindAll(cls):\n try:\n UsersList = list()\n ConnectionSqlite = sqlite3.connect(UserModel.db_path)\n CursorSqlite = ConnectionSqlite.cursor()\n ResultUsersAll = CursorSqlite.execute('SELECT * FROM users')\n RowsUsersAll = ResultUsersAll.fetchall()\n for Row in RowsUsersAll:\n UsersList.append(UserModel(Row[0], Row[1], Row[2]))\n ConnectionSqlite.close()\n if len(UsersList) >= 0:\n return [user.json() for user in UsersList], 200\n return {'message': 'User not found'}, 400\n except sqlite3.Error as er:\n return {'message': 'User not found'}, 404\n", "repo_name": "Danil0ws/challenge-01", "sub_path": "src/models/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 3953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 99, "usage_type": "attribute"}]} +{"seq_id": "9378674549", "text": "import json, requests\nimport datetime\n\nmobileUrl=\"http://14.53.49.163:9000/accident\"\nwebUrl=\"http://223.131.2.220:1818/accidentLocation\"\ndate_string = str(datetime.datetime.now())\ndate_string=datetime.datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f')\ndate_string= date_string.strftime(\"%Y-%m-%d %H:%M:%S\")\nappData = {\"alarm\": \"True\",\"mapX\":37.4763121,\"mapY\":126.9021}\ndef myconverter(o):\n if isinstance(o, datetime.datetime):\n return o._str_()\nwebData = {'Content-Type': 'application/json; charset=utf-8'}\nparam={\"mapX\":\"37.4763121\",\"mapY\":\"126.9021\",}\njsonData = json.dumps(param)\n\ndef mobileSend():\n res=requests.post(mobileUrl,json=appData)\n print(\"Status Code = \",res.status_code)\ndef webSend():\n res=requests.post(webUrl,headers=webData,data=jsonData)\n print(\"Status Code = \",res.status_code)\n", "repo_name": "sjh50200/embedded", "sub_path": "embedded_raspberrypi/alarm.py", "file_name": "alarm.py", "file_ext": "py", "file_size_in_byte": 825, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.datetime.now", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 6, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "6324279079", "text": "#!/usr/bin/env python3\n\"\"\"\nDemo script for pick&place task sequence. Compatible with fake, simulated and real robot.\n\"\"\"\n\n\nimport random\nimport sys\nfrom copy import deepcopy\nfrom math import pi\nfrom typing import Tuple\n\nimport geometry_msgs.msg\nimport moveit_commander\nimport rospy\nimport std_msgs.msg\n\n\ndef setup_planning_scene(\n scene: moveit_commander.planning_scene_interface.PlanningSceneInterface,\n object_pose: geometry_msgs.msg.PoseStamped,\n object_size: Tuple[float, float, float],\n):\n \"\"\"\n Setup the planning scene of MoveIt for collision checking.\n \"\"\"\n\n ## 0 (cleanup): Remove all scene objects from previous run\n scene.remove_attached_object()\n for scene_object_name in scene.get_known_object_names():\n scene.remove_world_object(scene_object_name)\n\n ## 1: Add `module_cover` to the planning scene\n MODULE_COVER_FRAME_ID = \"phantomx_pincher_task_board_module_link\"\n MODULE_COVER_BOX_SIZE = (0.148, 0.148, 0.003)\n MODULE_COVER_POSITION = geometry_msgs.msg.Point(z=MODULE_COVER_BOX_SIZE[2] / 2.0)\n module_cover_pose = geometry_msgs.msg.PoseStamped(\n header=std_msgs.msg.Header(\n frame_id=MODULE_COVER_FRAME_ID, stamp=rospy.Time.from_sec(rospy.get_time())\n ),\n pose=geometry_msgs.msg.Pose(position=MODULE_COVER_POSITION),\n )\n scene.add_box(\"module_cover\", module_cover_pose, MODULE_COVER_BOX_SIZE)\n\n ## Optional: Add `object` to the planning scene\n scene.add_box(\"object\", object_pose, object_size)\n\n\ndef pick(\n arm: moveit_commander.move_group.MoveGroupCommander,\n gripper: moveit_commander.move_group.MoveGroupCommander,\n scene: moveit_commander.planning_scene_interface.PlanningSceneInterface,\n object_pose: geometry_msgs.msg.PoseStamped,\n):\n \"\"\"\n Execute a subroutine for picking an object.\n \"\"\"\n\n ## 1: Increase velocity & accel. scaling (to 75%)\n arm.set_max_acceleration_scaling_factor(0.75)\n arm.set_max_velocity_scaling_factor(0.75)\n\n ## 2: Move arm to a pose goal above object (5 cm above the object centre)\n pre_grasp_pose = deepcopy(object_pose.pose)\n pre_grasp_pose.position.z += 0.05\n arm.set_pose_target(pre_grasp_pose)\n arm.go(wait=True)\n\n ## 3: Open the gripper\n gripper.go(gripper.get_named_target_values(\"open\"), wait=True)\n rospy.sleep(rospy.Duration(secs=2))\n\n ## 4: Reduce Cartesian speed (2 cm/s)\n arm.limit_max_cartesian_link_speed(0.02)\n\n ## 5: Descend arm via a Cartesian path (with TCP 2 cm above the object centre)\n grasp_pose = deepcopy(object_pose.pose)\n grasp_pose.position.z += 0.02\n (plan, fraction) = arm.compute_cartesian_path([grasp_pose], 0.005, 0.0)\n if fraction > 0.8:\n arm.execute(plan, wait=True)\n else:\n print(\n f\"Error: grasp - Unable to plan the full Cartesian path! Only {100*fraction}% of the path is feasible.\",\n file=sys.stderr,\n )\n exit(1)\n\n ## Optional: Attach object\n ## Note: Attaching the object enables to specify touch links, i.e. links that won't be checked for collisions. For this reason, it is beneficial to attach the object before closing the gripper\n scene.attach_box(\n link=\"phantomx_pincher_end_effector\",\n name=\"object\",\n touch_links=[\n \"phantomx_pincher_gripper_finger1_link\",\n \"phantomx_pincher_gripper_finger2_link\",\n ],\n )\n\n ## 6: Close the gripper (almost closed)\n gripper.go(gripper.get_named_target_values(\"closed\"), wait=True)\n\n ## 7: Ascend arm via a Cartesian path (re-use the pre-grasp pose)\n post_grasp_pose = pre_grasp_pose\n (plan, fraction) = arm.compute_cartesian_path([post_grasp_pose], 0.005, 0.0)\n if fraction > 0.8:\n arm.execute(plan, wait=True)\n else:\n print(\n f\"Error: post_grasp - Unable to plan the full Cartesian path! Only {100*fraction}% of the path is feasible.\",\n file=sys.stderr,\n )\n exit(1)\n\n\ndef place(\n arm: moveit_commander.move_group.MoveGroupCommander,\n gripper: moveit_commander.move_group.MoveGroupCommander,\n scene: moveit_commander.planning_scene_interface.PlanningSceneInterface,\n object_pose: geometry_msgs.msg.PoseStamped,\n):\n \"\"\"\n Execute a subroutine for placing an object.\n \"\"\"\n\n ## 1: Decrease velocity & accel. scaling (to 50%)\n arm.set_max_acceleration_scaling_factor(0.5)\n arm.set_max_velocity_scaling_factor(0.5)\n\n ## 2: Move arm to a joint config. for placing (here we sample a random joint configuration)\n elbow_angle = random.uniform(pi / 32, pi / 6)\n pre_place_joint_configuration = [\n random.uniform(-pi / 6, pi / 6),\n elbow_angle,\n pi / 2 - elbow_angle,\n pi / 2,\n ]\n arm.go(pre_place_joint_configuration, wait=True)\n\n ## 3: Descend arm via a Cartesian path (use the current pose and the same Z coordinate that was used during the grasp)\n place_pose = arm.get_current_pose().pose\n place_pose.position.z = object_pose.pose.position.z + 0.02\n (plan, fraction) = arm.compute_cartesian_path([place_pose], 0.005, 0.0)\n if fraction > 0.8:\n arm.execute(plan, wait=True)\n else:\n print(\n f\"Error: place - Unable to plan the full Cartesian path! Only {100*fraction}% of the path is feasible.\",\n file=sys.stderr,\n )\n exit(1)\n\n ## 4: Open the gripper\n gripper.go(gripper.get_named_target_values(\"open\"), wait=True)\n\n ## Optional: Detach object (planning scene)\n scene.remove_attached_object()\n\n ## 5: Increase Cartesian speed (4 cm/s)\n arm.limit_max_cartesian_link_speed(0.04)\n\n ## 6: Ascend arm via a Cartesian path (10 cm along +Z axis)\n post_place_pose = arm.get_current_pose().pose\n post_place_pose.position.z += 0.1\n # Replan the motion multiple times if it fails (for simulation, where the gripper can take longer to open)\n (plan, fraction) = arm.compute_cartesian_path([post_place_pose], 0.005, 0.0)\n if fraction > 0.8:\n arm.execute(plan, wait=True)\n else:\n print(\n f\"Error: post_place - Unable to plan the full Cartesian path! Only {100*fraction}% of the path is feasible.\",\n file=sys.stderr,\n )\n exit(1)\n\n ## 7: Close the gripper (partially)\n gripper.go([0.01] * 2, wait=True)\n\n\ndef main():\n\n # Initialize MoveIt Commander\n moveit_commander.roscpp_initialize(sys.argv)\n\n # Create node for this example\n rospy.init_node(\"phantomx_pincher_template\", anonymous=True)\n\n # Instantiate RobotCommander (outer-level interface to the robot)\n robot = moveit_commander.robot.RobotCommander()\n\n # Instantiate MoveGroupCommander (interface to one group of joints) for arm and gripper\n arm = moveit_commander.move_group.MoveGroupCommander(\"arm\", wait_for_servers=300.0)\n gripper = moveit_commander.move_group.MoveGroupCommander(\"gripper\")\n\n # Instantiate PlanningSceneInterface (interface to the world surrounding the robot)\n scene = moveit_commander.planning_scene_interface.PlanningSceneInterface(\n synchronous=True\n )\n\n ### Setup the Planning Scene ###\n object_relative_to_frame_id = arm.get_planning_frame()\n object_size = (0.015, 0.015, 0.015)\n # Offset of the task board surface from the ground plane (2cm profile + 4mm task board plate + 3mm module cover)\n TASK_BOARD_SURFACE_OFFSET = 0.02 + 0.004 + 0.003\n object_pose = geometry_msgs.msg.PoseStamped(\n header=std_msgs.msg.Header(\n frame_id=object_relative_to_frame_id,\n stamp=rospy.Time.from_sec(rospy.get_time()),\n ),\n pose=geometry_msgs.msg.Pose(\n position=geometry_msgs.msg.Point(\n x=0.1,\n z=TASK_BOARD_SURFACE_OFFSET\n + object_size[2] / 2.0\n + 0.0025, # +2.5mm tolerance to account for robot shakiness\n ),\n orientation=geometry_msgs.msg.Quaternion(x=1.0, y=0.0, z=0.0, w=0.0),\n ),\n )\n setup_planning_scene(scene=scene, object_pose=object_pose, object_size=object_size)\n\n # Increase gripper acceleration and velocity\n gripper.set_max_acceleration_scaling_factor(1.0)\n gripper.set_max_velocity_scaling_factor(1.0)\n\n while not rospy.is_shutdown():\n ### Pick ###\n pick(arm=arm, gripper=gripper, scene=scene, object_pose=object_pose)\n\n ### Place ###\n place(arm=arm, gripper=gripper, scene=scene, object_pose=object_pose)\n\n # Update the object pose (for repeated pick&place)\n __tmp_object_pos_z = object_pose.pose.position.z\n object_pose.pose = arm.get_current_pose().pose\n object_pose.pose.position.z = __tmp_object_pos_z\n\n # Ensure that there is no residual movement\n if not rospy.is_shutdown():\n arm.stop()\n\n # Shutdown MoveIt Commander and exit\n moveit_commander.roscpp_shutdown()\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "snt-spacer/phantomx_pincher", "sub_path": "phantomx_pincher_demos/examples/demo_pick_and_place.py", "file_name": "demo_pick_and_place.py", "file_ext": "py", "file_size_in_byte": 8893, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "moveit_commander.planning_scene_interface", "line_number": 20, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 21, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 22, "usage_type": "name"}, {"api_name": "geometry_msgs.msg.msg.Point", "line_number": 36, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 36, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 36, "usage_type": "name"}, {"api_name": "geometry_msgs.msg.msg.PoseStamped", "line_number": 37, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 37, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 37, "usage_type": "name"}, {"api_name": "std_msgs.msg.msg.Header", "line_number": 38, "usage_type": "call"}, {"api_name": "std_msgs.msg.msg", "line_number": 38, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 38, "usage_type": "name"}, {"api_name": "rospy.Time.from_sec", "line_number": 39, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rospy.get_time", "line_number": 39, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg.Pose", "line_number": 41, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 41, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 41, "usage_type": "name"}, {"api_name": "moveit_commander.move_group", "line_number": 50, "usage_type": "attribute"}, {"api_name": "moveit_commander.move_group", "line_number": 51, "usage_type": "attribute"}, {"api_name": "moveit_commander.planning_scene_interface", "line_number": 52, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 53, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 53, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 64, "usage_type": "call"}, {"api_name": "rospy.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 71, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 111, "usage_type": "attribute"}, {"api_name": "moveit_commander.move_group", "line_number": 117, "usage_type": "attribute"}, {"api_name": "moveit_commander.move_group", "line_number": 118, "usage_type": "attribute"}, {"api_name": "moveit_commander.planning_scene_interface", "line_number": 119, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 120, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 120, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 131, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 131, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 133, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 133, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 135, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 136, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 172, "usage_type": "attribute"}, {"api_name": "moveit_commander.roscpp_initialize", "line_number": 183, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 183, "usage_type": "attribute"}, {"api_name": "rospy.init_node", "line_number": 186, "usage_type": "call"}, {"api_name": "moveit_commander.robot.RobotCommander", "line_number": 189, "usage_type": "call"}, {"api_name": "moveit_commander.robot", "line_number": 189, "usage_type": "attribute"}, {"api_name": "moveit_commander.move_group.MoveGroupCommander", "line_number": 192, "usage_type": "call"}, {"api_name": "moveit_commander.move_group", "line_number": 192, "usage_type": "attribute"}, {"api_name": "moveit_commander.move_group.MoveGroupCommander", "line_number": 193, "usage_type": "call"}, {"api_name": "moveit_commander.move_group", "line_number": 193, "usage_type": "attribute"}, {"api_name": "moveit_commander.planning_scene_interface.PlanningSceneInterface", "line_number": 196, "usage_type": "call"}, {"api_name": "moveit_commander.planning_scene_interface", "line_number": 196, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg.msg.PoseStamped", "line_number": 205, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 205, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 205, "usage_type": "name"}, {"api_name": "std_msgs.msg.msg.Header", "line_number": 206, "usage_type": "call"}, {"api_name": "std_msgs.msg.msg", "line_number": 206, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 206, "usage_type": "name"}, {"api_name": "rospy.Time.from_sec", "line_number": 208, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 208, "usage_type": "attribute"}, {"api_name": "rospy.get_time", "line_number": 208, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg.Pose", "line_number": 210, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 210, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 210, "usage_type": "name"}, {"api_name": "geometry_msgs.msg.msg.Point", "line_number": 211, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 211, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 211, "usage_type": "name"}, {"api_name": "geometry_msgs.msg.msg.Quaternion", "line_number": 217, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.msg", "line_number": 217, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 217, "usage_type": "name"}, {"api_name": "rospy.is_shutdown", "line_number": 226, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 239, "usage_type": "call"}, {"api_name": "moveit_commander.roscpp_shutdown", "line_number": 243, "usage_type": "call"}]} +{"seq_id": "33235270507", "text": "import gphoto2 as gp\nimport logging\nimport os\nimport shutil\nimport getpass\n\nfrom datetime import datetime\n\nglobal camera\n\nIMAGE_PATH = '/home/'+getpass.getuser()+'/Pictures/Canon_700D/'\n\nif not os.path.isdir(IMAGE_PATH):\n os.makedirs(IMAGE_PATH)\n\n\ndef init_camera():\n os.system(\"pkill vfsd-gphoto2\")\n logging.basicConfig(\n format='%(levelname)s: %(name)s: %(message)s', level=logging.WARNING)\n camera = gp.Camera()\n camera.init()\n\n\ndef set_shutter_speed(sh):\n config = camera.get_config()\n\n shutterspeed_config = gp.gp_widget_get_child_by_name(config, 'shutterspeed')\n shutterspeed_config.set_value(sh)\n\n camera.set_config(config)\n\n\ndef set_iso(iso):\n config = camera.get_config()\n\n shutterspeed_config = gp.gp_widget_get_child_by_name(config, 'iso')\n shutterspeed_config.set_value(iso)\n\n camera.set_config(config)\n\n\ndef capture_image(shutterspeed, iso):\n os.system('gphoto2 --set-config /main/capturesettings/shutterspeed=' + str(shutterspeed))\n os.system('gphoto2 --set-config /main/imgsettings/iso=' + str(iso))\n\n logging.basicConfig(\n format='%(levelname)s: %(name)s: %(message)s', level=logging.WARNING)\n camera = gp.Camera()\n camera.init()\n # config = camera.get_config()\n #\n # shutterspeed_config = gp.gp_widget_get_child_by_name(config, 'shutterspeed')\n # shutterspeed_config.set_value(shutter_speed)\n #\n # shutterspeed_config = gp.gp_widget_get_child_by_name(config, 'iso')\n # shutterspeed_config.set_value(iso)\n #\n # camera.set_config(config)\n\n file_path = camera.capture(gp.GP_CAPTURE_IMAGE)\n\n # SET FILE NAME\n data = datetime.now()\n data_str = 'PIC_' + data.strftime(\"%d-%m-%y--%H:%M:%S\") + \".jpg\"\n\n target = os.path.join('/tmp', file_path.name)\n\n camera_file = camera.file_get(file_path.folder, file_path.name, gp.GP_FILE_TYPE_NORMAL)\n camera_file.save(target)\n if not os.path.exists(IMAGE_PATH):\n os.mkdir(IMAGE_PATH)\n shutil.move(target, IMAGE_PATH)\n os.rename(IMAGE_PATH + file_path.name, IMAGE_PATH + data_str)\n camera.exit()\n return IMAGE_PATH + data_str\n", "repo_name": "MaksMacioszczyk/cameraControllGui", "sub_path": "cameraControll.py", "file_name": "cameraControll.py", "file_ext": "py", "file_size_in_byte": 2115, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "getpass.getuser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 20, "usage_type": "attribute"}, {"api_name": "gphoto2.Camera", "line_number": 21, "usage_type": "call"}, {"api_name": "gphoto2.gp_widget_get_child_by_name", "line_number": 28, "usage_type": "call"}, {"api_name": "gphoto2.gp_widget_get_child_by_name", "line_number": 37, "usage_type": "call"}, {"api_name": "os.system", "line_number": 44, "usage_type": "call"}, {"api_name": "os.system", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 48, "usage_type": "attribute"}, {"api_name": "gphoto2.Camera", "line_number": 49, "usage_type": "call"}, {"api_name": "gphoto2.GP_CAPTURE_IMAGE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "gphoto2.GP_FILE_TYPE_NORMAL", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 72, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 73, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "39741748266", "text": "\"\"\"elisa URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework.routers import SimpleRouter\nfrom rest_framework_extensions.routers import ExtendedSimpleRouter\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token\nfrom rest_framework_swagger.views import get_swagger_view\n\nimport school.views as school\nimport timetables.views as timetables\n\nrouter = SimpleRouter()\nrouter.register(r'groups', school.GroupViewSet)\nrouter.register(r'departments', school.DepartmentViewSet)\nrouter.register(r'courses', school.CourseViewSet)\nrouter.register(r'equipment', school.EquipmentViewSet)\nrouter.register(r'room-categories', school.RoomCategoryViewSet)\nrouter.register(r'rooms', school.RoomViewSet)\nrouter.register(r'activity-categories', school.ActivityCategoryViewSet)\nrouter.register(r'activities', school.ActivityViewSet)\n\ntimetables_router = ExtendedSimpleRouter()\ntimetables_router.register(\n r'timetables', timetables.TimetableViewSet,\n base_name='timetable').register(\n r'events',\n timetables.EventViewSet,\n base_name='timetables-event',\n parents_query_lookups=['timetable'])\n\nschema_view = get_swagger_view(title='Elisa API')\n\nurlpatterns = [\n url(r'^$', schema_view),\n url(r'^', include(router.urls)),\n url(r'^', include(timetables_router.urls)),\n url(r'^api-auth/', include('rest_framework.urls')),\n url(r'^api-token-auth/', obtain_jwt_token),\n url(r'^api-token-refresh/', refresh_jwt_token),\n url(r'^admin/', admin.site.urls),\n]\n", "repo_name": "matusjokay/Elisa", "sub_path": "docs/2016_racak_martin/cd/elisa/elisa-server/elisa/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2172, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "rest_framework.routers.SimpleRouter", "line_number": 26, "usage_type": "call"}, {"api_name": "school.views.GroupViewSet", "line_number": 27, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 27, "usage_type": "name"}, {"api_name": "school.views.DepartmentViewSet", "line_number": 28, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 28, "usage_type": "name"}, {"api_name": "school.views.CourseViewSet", "line_number": 29, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 29, "usage_type": "name"}, {"api_name": "school.views.EquipmentViewSet", "line_number": 30, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 30, "usage_type": "name"}, {"api_name": "school.views.RoomCategoryViewSet", "line_number": 31, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 31, "usage_type": "name"}, {"api_name": "school.views.RoomViewSet", "line_number": 32, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 32, "usage_type": "name"}, {"api_name": "school.views.ActivityCategoryViewSet", "line_number": 33, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 33, "usage_type": "name"}, {"api_name": "school.views.ActivityViewSet", "line_number": 34, "usage_type": "attribute"}, {"api_name": "school.views", "line_number": 34, "usage_type": "name"}, {"api_name": "rest_framework_extensions.routers.ExtendedSimpleRouter", "line_number": 36, "usage_type": "call"}, {"api_name": "timetables.views.TimetableViewSet", "line_number": 38, "usage_type": "attribute"}, {"api_name": "timetables.views", "line_number": 38, "usage_type": "name"}, {"api_name": "timetables.views.EventViewSet", "line_number": 41, "usage_type": "attribute"}, {"api_name": "timetables.views", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework_swagger.views.get_swagger_view", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 48, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 49, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 49, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.obtain_jwt_token", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.refresh_jwt_token", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "29861391718", "text": "# MIDN 1/C Abby McGinn\n# Lab08\n\n# Professor Dias: I worked with my partner (1/C Berroteran) for several\n# hours on this lab. We attempted it first without looking at your code,\n# as neither of us even realized it was available. When we got stuck,\n# we looked at your code for guidance. I commented specifically which\n# portions we used from your code, and commented everything to show I\n# understand the code. We spent a lot of time looking things up to fully\n# understand what you did, and to try to do it differently.\n\nimport os\nimport hashlib\nimport datetime\nimport csv\n\nrootDirectory = \"/\" # so it walks through every file in the system\n# the list \"unhashable\" includes libraries we don't want to hash\n# I also added directories that continued to cause errors when I was testing\nunhashable = [\"vmlinuz\",\"bin\",\"dev\",\"proc\",\"run\",\"sys\",\"tmp\",\"var/lib\",\"var/run\"]\n\n# checks if the hash file already exists\nif os.path.isfile(\"hashedData.csv\"):\n print(\"Hash file already exists. Comparing against current data.\")\n\n # initializes a list to hold items that represent changes from the old hash data\n updateList = []\n\n # open the old hash file\n with open(\"hashedData.csv\") as old_file:\n # read the lines so we can easily handle it later when comparing\n old_hash_data = old_file.readlines()\n\n # open the csv file for writing\n f_new = open(\"hashedData.csv\",\"w\")\n # walk through ALL the files\n for root, dirs, files in os.walk(rootDirectory):\n # this is how we skip the unhashable directories\n if root in unhashable:\n dirs = []\n files = []\n # walking through every file we want to hash\n for name in files:\n strAppend = os.path.join(root,name)\n hash = hashlib.sha256()\n try:\n f_file = open(strAppend,'rb')\n except:\n continue\n # we used your code as an example of using the buffer\n while True:\n # we use 4096 because Linux file systems allocate memory in a\n # directory by factors of 4096 bytes\n holder = f_file.read(4096)\n if not holder:\n break\n hash.update(holder)\n f_file.close()\n\n # grab the current time (using imported lib)\n time = str(datetime.datetime.now())\n hash_update = hash.hexdigest()\n # strAppend is what we are building to append to the file\n strAppend += str(\" \"+hash_update)\n strAppend += str(\" \"+time)\n strAppend += \"\\n\"\n\n # here we check to see if the hash exists in old hash data\n for line in old_hash_data:\n # if it is not in the old data, that means this file HAS been altered\n # so we want to add it to the updateList\n if hash_update not in line:\n updateList.append(strAppend)\n f_new.write(strAppend)\n f_new.close()\n print(\"The following items were changed: \\n\")\n for update in updateList:\n print(update+\"\\n\")\n\nelse:\n # else, we do the same process, but do not check for updates\n print(\"Hash file does not exist. Creating hash file.\")\n f_new = open(\"hashedData.csv\",\"w\")\n for root, dirs, files in os.walk(rootDirectory):\n if root in unhashable:\n dirs = []\n files = []\n for name in files:\n strAppend = os.path.join(root,name)\n hash = hashlib.sha256()\n try:\n f_file = open(strAppend,'rb')\n except:\n #f_file.close()\n continue\n while True:\n holder = f_file.read(4096)\n if not holder:\n break\n hash.update(holder)\n f_file.close()\n time = str(datetime.datetime.now())\n strAppend += str(\" \"+hash.hexdigest())\n strAppend += str(\" \"+time)\n strAppend += \"\\n\"\n f_new.write(strAppend)\nf_new.close()\n", "repo_name": "m214386/sy402_lab08", "sub_path": "hash.py", "file_name": "hash.py", "file_ext": "py", "file_size_in_byte": 4076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.isfile", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 102, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "11084793598", "text": "from glob import glob\nimport json\nimport pandas as pd\n\ndf_list=[]\nfor i,subj_dir in enumerate(sorted(snakemake.input.subj_dirs)):\n\n print(subj_dir)\n #glob to get the first json in the data folders\n bids_jsons = sorted(glob(f'{subj_dir}/ses-*/*/*.json'))\n\n for bids_json in bids_jsons:\n\n\n with open(bids_json) as f:\n json_metadata = json.load(f)\n\n if 'ppmi' not in json_metadata.keys():\n continue\n \n \n metadata = json_metadata['ppmi']\n subj_metadata = {'participant_id': f\"sub-{metadata['subject']['subjectIdentifier']}\",\n 'group': metadata['subject']['researchGroup'],\n 'age': metadata['subject']['study']['subjectAge'],\n 'weightKg': metadata['subject']['study']['weightKg'],\n 'sex': metadata['subject']['subjectSex'],\n 'site': metadata['siteKey'],\n 'visit': metadata['subject']['visit']['visitIdentifier'],\n 'date_acquired': metadata['subject']['study']['series']['dateAcquired'],\n 'scanner_mfg': metadata['subject']['study']['imagingProtocol']['Manufacturer'],\n 'scanner_make': metadata['subject']['study']['imagingProtocol']['Mfg Model'],\n 'field_strength': metadata['subject']['study']['imagingProtocol']['Field Strength']}\n\n df_subj=pd.DataFrame(subj_metadata,index=[i])\n\n df_list.append(df_subj)\n break\n\n\ndf = pd.concat(df_list)\nprint(df)\ndf.to_csv(snakemake.output.tsv,sep='\\t',index=False)\n", "repo_name": "khanlab/ppmi-bids-smk", "sub_path": "bids_workflow/scripts/create_participants_tsv.py", "file_name": "create_participants_tsv.py", "file_ext": "py", "file_size_in_byte": 1629, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "glob.glob", "line_number": 10, "usage_type": "call"}, {"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "19969538194", "text": "import random\nfrom enum import Enum\nfrom typing import Dict, List, Union\n\nfrom faker import Faker\n\nfake = Faker()\n\n\nclass WeightClass(Enum):\n STRAWWEIGHT = 0\n FLYWEIGHT = 1\n BANTAMWEIGHT = 2\n FEATHERWEIGHT = 3\n LIGHTWEIGHT = 4\n WELTERWEIGHT = 5\n MIDDLEWEIGHT = 6\n LIGHTHEAVYWEIGHT = 7\n HEAVYWEIGHT = 8\n\n\nclass GenderType(Enum):\n MALE = \"M\"\n FEMALE = \"F\"\n\n\nclass Age(Enum):\n MIN = 18\n MAX = 40\n\n\nclass Height:\n MIN = [155, 165, 168, 168, 170, 175, 175, 180, 180]\n MAX = [170, 175, 180, 183, 190, 190, 193, 198, 201]\n\n\nclass Reach:\n MEAN = [165, 170, 171, 179, 184, 186, 190, 198, 200]\n SIGMA = [5] * 9\n\n\nclass Weight:\n MIN = [155, 115, 125, 135, 145, 155, 170, 185, 205]\n MAX = [115, 125, 135, 145, 155, 170, 185, 205, 265]\n\n\nclass Fighter:\n def __init__(self, weight_class: WeightClass, gender: GenderType) -> None:\n self.weight_class = weight_class.value\n self.gender = gender.value\n self.record = [0, 0, 0]\n\n @property\n def name(self) -> str:\n return (\n fake.name_male()\n if self.gender == GenderType.MALE.value\n else fake.name_female()\n )\n\n @property\n def age(self) -> int:\n return random.randint(Age.MIN.value, Age.MAX.value)\n\n @property\n def height(self) -> int:\n return random.randint(\n Height.MIN[self.weight_class], Height.MAX[self.weight_class]\n )\n\n @property\n def reach(self) -> int:\n return round(\n random.normalvariate(\n Reach.MEAN[self.weight_class], Reach.SIGMA[self.weight_class]\n ),\n 0,\n )\n\n @property\n def weight(self) -> float:\n return round(\n random.uniform(\n Weight.MIN[self.weight_class], Weight.MAX[self.weight_class]\n ),\n 1,\n )\n\n def add_win(self) -> List[int]:\n self.record[0] += 1\n return self.record\n\n def add_loss(self) -> List[int]:\n self.record[1] += 1\n return self.record\n\n def add_draw(self) -> List[int]:\n self.record[2] += 1\n return self.record\n\n def get_data(self) -> Dict[str, Union[int, float, str]]:\n map_weight_class = {\n 0: \"Strawweight\",\n 1: \"Flyweight\",\n 2: \"Bantamweight\",\n 3: \"Featherweight\",\n 4: \"Lightweight\",\n 5: \"Welterweight\",\n 6: \"Middleweight\",\n 7: \"Light heavyweight\",\n 8: \"Heavyweight\",\n }\n\n data = {\n \"name\": self.name,\n \"gender\": self.gender,\n \"age\": self.age,\n \"height\": self.height,\n \"reach\": self.reach,\n \"weight\": self.weight,\n \"record\": self.record,\n \"weight_class\": map_weight_class[self.weight_class],\n }\n\n return data\n\n\nif __name__ == \"__main__\":\n fighter = Fighter(WeightClass.BANTAMWEIGHT, GenderType.FEMALE)\n print(fighter.get_data())\n", "repo_name": "viniap/ufc-fights-generator", "sub_path": "src/fighter.py", "file_name": "fighter.py", "file_ext": "py", "file_size_in_byte": 2996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "faker.Faker", "line_number": 7, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 10, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 22, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 27, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "random.normalvariate", "line_number": 74, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 83, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 89, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "43184047292", "text": "import json \nfrom dsat.data import create_rrd_if , open_rrd, write_rrd\nfrom dsat.state import Connector\nfrom time import time\nimport sys\ncfg = json.load(open(\"rrd.json\"))\nrrd_l = dict()\n\nfor rrd_db in cfg.keys():\n if rrd_db.startswith(\"_\"):\n continue\n else:\n this_cfg = cfg[rrd_db]\n if isinstance(this_cfg, dict) and cfg[rrd_db].get(\"source\"):\n try:\n create_rrd_if(cfg, rrd_db)\n except IOError:\n pass\n finally:\n rrd_l[rrd_db] = open_rrd(cfg, rrd_db)\n\n\nlast_seen=dict()\n\ndef rrd(cnx, payload, msg):\n now = time()\n if not msg[\"type\"] in last_seen:\n last_seen[msg[\"type\"]] = now\n if now - last_seen[msg[\"type\"]] >= 1:\n write_rrd(rrd_l[msg[\"type\"]], cfg, msg[\"type\"], payload)\n last_seen[msg[\"type\"]] = now\n\nConnector(rrd).turbine()\n", "repo_name": "jul/dsat", "sub_path": "odprobe/rrd.py", "file_name": "rrd.py", "file_ext": "py", "file_size_in_byte": 863, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "dsat.data.create_rrd_if", "line_number": 16, "usage_type": "call"}, {"api_name": "dsat.data.open_rrd", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "dsat.data.write_rrd", "line_number": 30, "usage_type": "call"}, {"api_name": "dsat.state.Connector", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "16932587464", "text": "# -*- coding: utf-8 -*-\n# @Time : 2022/7/22 21:27\n# @Author : Bingshuai Liu\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass SentimentDataSet(Dataset):\n def __init__(self, features, mode, device):\n self.len = len(features)\n self.input_ids = [torch.tensor(feature.input_ids).long().to(device) for feature in features]\n self.input_mask = [torch.tensor(feature.input_mask).float().to(device) for feature in features]\n self.segment_ids = [torch.tensor(feature.segment_ids).float().to(device) for feature in features]\n self.label_id = None\n if mode == 'train' or 'test':\n self.label_id = [torch.tensor(feature.label_id).to(device) for feature in features]\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, item):\n data = {'input_ids': self.input_ids[item],\n 'input_mask': self.input_mask[item],\n 'segment_ids': self.segment_ids[item]}\n if self.label_id is not None:\n data['label_id'] = self.label_id[item]\n return data\n", "repo_name": "bingshuailiu/BERT_SENTIMENT_CLASSIFICATION", "sub_path": "preprocess/SentimentDataSet.py", "file_name": "SentimentDataSet.py", "file_ext": "py", "file_size_in_byte": 1066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "32852284361", "text": "from django.utils.unittest.case import TestCase\nfrom corehq.apps.users.models import WebUser\nfrom corehq.apps.domain.shortcuts import create_domain\nfrom django.test.client import Client\nfrom django.core.urlresolvers import reverse\nimport os\nfrom StringIO import StringIO\nfrom dimagi.utils.post import tmpfile\nfrom couchforms.models import SubmissionErrorLog, XFormInstance\nfrom couchforms.signals import xform_saved\n\ndef _clear_all_forms(domain):\n for item in SubmissionErrorLog.view(\"couchforms/all_submissions_by_domain\",\n reduce=False,\n include_docs=True,\n startkey=[domain, \"by_type\"],\n endkey=[domain, \"by_type\", {}],\n wrapper=lambda row: SubmissionErrorLog.wrap(row['doc'])).all():\n\n item.delete()\n\nclass SubmissionErrorTest(TestCase):\n \n def setUp(self):\n self.domain = create_domain(\"submit-errors\")\n self.couch_user = WebUser.create(None, \"test\", \"foobar\")\n self.couch_user.add_domain_membership(self.domain.name, is_admin=True)\n self.couch_user.save()\n self.client = Client()\n self.client.login(**{'username': 'test', 'password': 'foobar'})\n self.url = reverse(\"receiver_post\", args=[self.domain])\n _clear_all_forms(self.domain.name)\n \n def tearDown(self):\n self.couch_user.delete()\n self.domain.delete()\n _clear_all_forms(self.domain.name)\n \n def _submit(self, formname):\n file_path = os.path.join(os.path.dirname(__file__), \"data\", formname)\n with open(file_path, \"rb\") as f:\n return self.client.post(self.url, {\n \"xml_submission_file\": f\n })\n \n def testSubmitBadAttachmentType(self):\n res = self.client.post(self.url, {\n \"xml_submission_file\": \"this isn't a file\"\n })\n self.assertEqual(400, res.status_code)\n self.assertTrue(\"xml_submission_file\" in res.content)\n \n def testSubmitDuplicate(self):\n file = os.path.join(os.path.dirname(__file__), \"data\", \"simple_form.xml\")\n with open(file) as f:\n res = self.client.post(self.url, {\n \"xml_submission_file\": f\n })\n self.assertEqual(201, res.status_code)\n self.assertTrue(\"Thanks for submitting\" in res.content)\n \n with open(file) as f:\n res = self.client.post(self.url, {\n \"xml_submission_file\": f\n })\n self.assertEqual(201, res.status_code)\n self.assertTrue(\"Form is a duplicate\" in res.content)\n \n # make sure we logged it\n log = SubmissionErrorLog.view(\"couchforms/all_submissions_by_domain\",\n reduce=False,\n include_docs=True,\n startkey=[self.domain.name, \"by_type\", \"XFormDuplicate\"],\n endkey=[self.domain.name, \"by_type\", \"XFormDuplicate\", {}],\n classes={'XFormDuplicate': SubmissionErrorLog}).one()\n \n self.assertTrue(log is not None)\n self.assertTrue(\"Form is a duplicate\" in log.problem)\n with open(file) as f:\n self.assertEqual(f.read(), log.get_xml())\n \n \n def testSubmissionError(self):\n evil_laugh = \"mwa ha ha!\"\n \n def fail(sender, xform, **kwargs):\n raise Exception(evil_laugh)\n \n xform_saved.connect(fail)\n \n try: \n file = os.path.join(os.path.dirname(__file__), \"data\", \"simple_form.xml\")\n with open(file) as f:\n res = self.client.post(self.url, {\n \"xml_submission_file\": f\n })\n self.assertEqual(201, res.status_code)\n self.assertTrue(evil_laugh in res.content)\n \n # make sure we logged it\n log = SubmissionErrorLog.view(\"couchforms/all_submissions_by_domain\",\n reduce=False,\n include_docs=True,\n startkey=[self.domain.name, \"by_type\", \"XFormError\"],\n endkey=[self.domain.name, \"by_type\", \"XFormError\", {}],\n classes={'XFormError': SubmissionErrorLog}).one()\n \n self.assertTrue(log is not None)\n self.assertTrue(evil_laugh in log.problem)\n with open(file) as f:\n self.assertEqual(f.read(), log.get_xml())\n \n finally:\n xform_saved.disconnect(fail)\n \n def testSubmitBadXML(self):\n f, path = tmpfile()\n with f:\n f.write(\"this isn't even close to xml\")\n with open(path) as f:\n res = self.client.post(self.url, {\n \"xml_submission_file\": f\n })\n self.assertEqual(500, res.status_code)\n self.assertIn('Invalid XML', res.content)\n \n # make sure we logged it\n log = SubmissionErrorLog.view(\"couchforms/all_submissions_by_domain\",\n reduce=False,\n include_docs=True,\n startkey=[self.domain.name, \"by_type\", \"SubmissionErrorLog\"],\n endkey=[self.domain.name, \"by_type\", \"SubmissionErrorLog\", {}]).one()\n \n self.assertTrue(log is not None)\n self.assertIn('Invalid XML', log.problem)\n self.assertEqual(\"this isn't even close to xml\", log.get_xml())\n \n", "repo_name": "gmimano/commcaretest", "sub_path": "corehq/apps/receiverwrapper/tests/test_submit_errors.py", "file_name": "test_submit_errors.py", "file_ext": "py", "file_size_in_byte": 5827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "couchforms.models.SubmissionErrorLog.view", "line_number": 13, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog", "line_number": 13, "usage_type": "name"}, {"api_name": "couchforms.models.SubmissionErrorLog.wrap", "line_number": 18, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog", "line_number": 18, "usage_type": "name"}, {"api_name": "django.utils.unittest.case.TestCase", "line_number": 22, "usage_type": "name"}, {"api_name": "corehq.apps.domain.shortcuts.create_domain", "line_number": 25, "usage_type": "call"}, {"api_name": "corehq.apps.users.models.WebUser.create", "line_number": 26, "usage_type": "call"}, {"api_name": "corehq.apps.users.models.WebUser", "line_number": 26, "usage_type": "name"}, {"api_name": "django.test.client.Client", "line_number": 29, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 54, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog.view", "line_number": 70, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog", "line_number": 70, "usage_type": "name"}, {"api_name": "couchforms.models.SubmissionErrorLog", "line_number": 75, "usage_type": "name"}, {"api_name": "couchforms.signals.xform_saved.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "couchforms.signals.xform_saved", "line_number": 89, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 92, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog.view", "line_number": 101, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog", "line_number": 101, "usage_type": "name"}, {"api_name": "couchforms.models.SubmissionErrorLog", "line_number": 106, "usage_type": "name"}, {"api_name": "couchforms.signals.xform_saved.disconnect", "line_number": 114, "usage_type": "call"}, {"api_name": "couchforms.signals.xform_saved", "line_number": 114, "usage_type": "name"}, {"api_name": "dimagi.utils.post.tmpfile", "line_number": 117, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog.view", "line_number": 128, "usage_type": "call"}, {"api_name": "couchforms.models.SubmissionErrorLog", "line_number": 128, "usage_type": "name"}]} +{"seq_id": "16264913326", "text": "from django.shortcuts import render\nfrom Webapp import forms\nfrom Webapp.models import Religion \nfrom django.http import HttpResponse\n\n\n# Create your views here.\ndef religion_view(request):\n print(request.method)\n if request.method==\"POST\":\n form= forms.ReligionForm(request.POST,request.FILES)\n print(request.POST)\n print(request.FILES)\n if form.is_valid():\n form.save()\n return HttpResponse(\"Data has been inserted\")\n return render(request,'MyApp/base.html',{'form':form})\n form=forms.ReligionForm()\n\n return render(request,'MyApp/base.html',{'form':form})\ndef display_view(request):\n if request.method==\"POST\":\n\n form=forms.InputForm(request.POST)\n # print(request.POST)\n if form.is_valid():\n bars= request.POST['bars']\n stripes= request.POST['stripes']\n colours= request.POST['colours']\n red= request.POST['red']\n green= request.POST['green']\n blue= request.POST['blue']\n gold= request.POST['gold']\n white= request.POST['white']\n black= request.POST['black']\n orange= request.POST['orange']\n circles= request.POST['circles']\n crosses= request.POST['crosses']\n saltires= request.POST['saltires']\n quarters= request.POST['quarters']\n sunstars= request.POST['sunstars']\n crescent= request.POST['crescent']\n triangle= request.POST['triangle']\n icon= request.POST['icon']\n animate= request.POST['animate']\n text= request.POST['text']\n import numpy as np\n pre_data = [bars,stripes,colours,red , green , blue, gold , white , black , orange ,circles,crosses,\n saltires,quarters,sunstars,crescent,triangle,icon,animate,text ]\n test_data = np.array([[int(i) for i in pre_data]])\n \n op=random_algorithm(test_data)\n var=Religion.objects.get(religion_id=1)\n print(op)\n return render(request,'MyApp/base.html',{'form':form, 'op':var})\n form=forms.InputForm()\n return render(request,'MyApp/base.html',{'form':form})\n\n\n\ndef random_algorithm(test1):\n print(test1)\n import numpy as np \n \n import pandas as pd \n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data\"\n names = ['name','landmass','zone','area','population','language','religion', 'bars','stripes',\n 'colours','red','green','blue','gold','white','black','orange','mainhue','circles','crosses','saltires','quarters',\n 'sunstars','crescent','triangle','icon','animate','text','topleft','botright']\n dataset=pd.read_csv(url,names=names)\n # print(dataset.head(10))\n dataset.head(10)\n # X = dataset.drop(['name','landmass','zone','area','population','language','religion','mainhue','topleft','botright'],axis=1)\n # y = dataset[['religion']]\n feature_cols=['bars' , 'stripes', 'colours' , 'red' , 'green', 'blue' , 'gold' , 'white' , 'black' , 'orange' ,'circles', 'crosses' , 'saltires' ,'quarters' , 'sunstars', 'crescent' , 'triangle' , 'icon','animate', 'text' ]\n X=dataset[feature_cols]\n y = dataset[['religion']]\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.03 , random_state=0)\n from sklearn.preprocessing import StandardScaler\n sc = StandardScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n from sklearn.ensemble import RandomForestClassifier\n\n clf = RandomForestClassifier(n_estimators=194, random_state=4)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(test1)\n # print(y_pred)\n # y_pred = clf.predict([[0,0,2,1,0,0,1,0,0,0,0,0,0,0,5,0,0,0,0,0]])\n # print(X_test.shape)\n # print(X_test)\n # print(y_pred.shape)\n # print(y_pred)\n return y_pred\n\n", "repo_name": "SreekanthTeja/Identifying_religion_flags", "sub_path": "Webapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "Webapp.forms.ReligionForm", "line_number": 11, "usage_type": "call"}, {"api_name": "Webapp.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 17, "usage_type": "call"}, {"api_name": "Webapp.forms.ReligionForm", "line_number": 18, "usage_type": "call"}, {"api_name": "Webapp.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "Webapp.forms.InputForm", "line_number": 24, "usage_type": "call"}, {"api_name": "Webapp.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "Webapp.models.Religion.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "Webapp.models.Religion.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "Webapp.models.Religion", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "Webapp.forms.InputForm", "line_number": 56, "usage_type": "call"}, {"api_name": "Webapp.forms", "line_number": 56, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "13242141496", "text": "import pathlib\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Generator, List\n\nimport pytest\n\nfrom .factories import DicomDirFactory, DicomFactory\n\n\n@dataclass(frozen=True)\nclass DicomFolder:\n path: pathlib.Path\n files: List[pathlib.Path]\n\n\nDicomFolderGenerator = Callable[[int, bool], DicomFolder]\n\n\n@pytest.fixture\ndef dicom_folder(tmp_path: pathlib.Path) -> DicomFolderGenerator:\n # Number of images to create\n def generate(\n number: int = 5,\n dicomdir: bool = False,\n ) -> DicomFolder:\n files = []\n\n for k in range(number):\n filename = tmp_path.joinpath(\"Image%04d\" % k)\n dicom = DicomFactory.build_with_defaults()\n dicom.save_as(filename, write_like_original=False)\n files.append(filename)\n\n # Add DicomDir file\n if dicomdir:\n d = DicomDirFactory.build()\n filename = tmp_path.joinpath(\"DICOMDIR\")\n d.save_as(filename, write_like_original=False)\n files.append(filename)\n\n return DicomFolder(path=tmp_path, files=files)\n\n return generate\n\n\n@pytest.fixture\ndef dicom_file(\n tmp_path: pathlib.Path, **kwargs: Any\n) -> Generator[pathlib.Path, None, None]:\n filename = tmp_path.joinpath(\"dicom\")\n\n dicom = DicomFactory.build_with_defaults(**kwargs)\n dicom.save_as(filename, write_like_original=False)\n\n yield filename\n", "repo_name": "dicomsort/dicomsorter", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 1408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pathlib.Path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 16, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "factories.DicomFactory.build_with_defaults", "line_number": 30, "usage_type": "call"}, {"api_name": "factories.DicomFactory", "line_number": 30, "usage_type": "name"}, {"api_name": "factories.DicomDirFactory.build", "line_number": 36, "usage_type": "call"}, {"api_name": "factories.DicomDirFactory", "line_number": 36, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 48, "usage_type": "name"}, {"api_name": "factories.DicomFactory.build_with_defaults", "line_number": 52, "usage_type": "call"}, {"api_name": "factories.DicomFactory", "line_number": 52, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 46, "usage_type": "attribute"}, {"api_name": "typing.Generator", "line_number": 49, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "73889717172", "text": "import numpy as np\nfrom sklearn.metrics import accuracy_score\n\nfrom data.utils import load_data, normalize_adj\n\nclass LGCLP:\n def __init__(self, alpha=0.98, iter=20):\n self.f = None\n self.alpha = alpha\n self.iter = iter\n\n def fit(self, adj_m, Y):\n self.f = np.zeros(Y.shape)\n L = normalize_adj(adj_m)\n for _ in range(self.iter):\n self.f = (self.alpha*L).dot(self.f) + (1 - self.alpha)*Y\n \n def predict(self, x_indexes):\n return np.argmax(self.f[x_indexes], axis=1)\n\n def evaluate(self, Y_test, test_mask):\n test_index = np.nonzero(test_mask)\n y_true = np.argmax(Y_test[test_mask, :], axis=1)\n y_predicted = self.predict(test_index)\n return accuracy_score(y_true, y_predicted)\n\n\nif __name__ == \"__main__\":\n adj, _, Y_train, Y_val, Y_test, train_mask, val_mask, test_mask = load_data(\"citeseer\")\n model = LGCLP()\n model.fit(adj, Y_train)\n print(model.evaluate(Y_test, test_mask))\n\n\n\n\n\n ", "repo_name": "CoderPat/CAM-MPHIL-ACS", "sub_path": "MLADM-17-18/proj2/lgclp.py", "file_name": "lgclp.py", "file_ext": "py", "file_size_in_byte": 1006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "data.utils.normalize_adj", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 25, "usage_type": "call"}, {"api_name": "data.utils.load_data", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "27965162473", "text": "from flask import Flask, jsonify, request, send_from_directory\r\nfrom flask_cors import CORS\r\n\r\nfrom wallet import Wallet\r\nfrom blockchain import Blockchain\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef get_node_ui():\r\n return send_from_directory('ui', 'node.html')\r\n\r\n\r\n@app.route('/network', methods=['GET'])\r\ndef get_network_ui():\r\n return send_from_directory('ui', 'network.html')\r\n\r\n\r\n@app.route('/wallet', methods=['POST'])\r\ndef create_keys():\r\n wallet.create_keys()\r\n if wallet.save_keys():\r\n # Create our blockchain using a newly created public key\r\n # Use global blockchain, don't create a new local variable\r\n global blockchain\r\n blockchain = Blockchain(wallet.public_key, port)\r\n response = {\r\n 'public_key': wallet.public_key,\r\n 'private_key': wallet.private_key,\r\n 'funds': blockchain.get_balance()\r\n }\r\n return jsonify(response), 201\r\n else:\r\n reponse = {\r\n 'message': 'Saving the keys failed.'\r\n }\r\n return jsonify(response), 500\r\n\r\n\r\n@app.route('/wallet', methods=['GET'])\r\ndef load_keys():\r\n if wallet.load_keys():\r\n # Create our blockchain using a newly created public key\r\n # Use global blockchain, don't create a new local variable\r\n global blockchain\r\n blockchain = Blockchain(wallet.public_key, port)\r\n response = {\r\n 'public_key': wallet.public_key,\r\n 'private_key': wallet.private_key,\r\n 'funds': blockchain.get_balance()\r\n }\r\n return jsonify(response), 201\r\n else:\r\n reponse = {\r\n 'message': 'Loading the keys failed.'\r\n }\r\n return jsonify(response), 500\r\n\r\n\r\n@app.route('/balance', methods=['GET'])\r\ndef get_balance():\r\n balance = blockchain.get_balance()\r\n if balance != None:\r\n response = {\r\n 'message': 'Fetched balance successfully.',\r\n 'funds': balance\r\n }\r\n return jsonify(response), 201\r\n else:\r\n response = {\r\n 'message': 'Loading balance failed.',\r\n 'wallet_set_up': wallet.public_key != None\r\n }\r\n return jsonify(response), 500\r\n\r\n\r\n@app.route('/transaction', methods=['POST'])\r\ndef add_transaction():\r\n # Check to make sure we have a wallet to begin with\r\n if wallet.public_key == None:\r\n response = {\r\n 'message': 'No wallet setup.'\r\n }\r\n return jsonify(response), 400\r\n # Function given by the Flask package that gives us data attached to the incoming\r\n # request if it is in the json format. Creates a dict\r\n values = request.get_json()\r\n if not values:\r\n response = {\r\n 'message': 'No data found.'\r\n }\r\n return jsonify(response), 400\r\n required_fields = ['recipient', 'amount']\r\n # Check to make sure all required fields are apart of the incoming values. If it\r\n # does not contain all the required fields then return error\r\n if not all(field in values for field in required_fields):\r\n response = {\r\n 'message': 'Required data is missing.'\r\n }\r\n return jsonify(response), 400\r\n recipient = values['recipient']\r\n amount = values['amount']\r\n # Create signature using request data\r\n signature = wallet.sign_transaction(wallet.public_key, recipient, amount)\r\n # Add new transaction\r\n success = blockchain.add_transaction(recipient, wallet.public_key, signature, amount)\r\n if success:\r\n response = {\r\n 'message': 'Successfully added transaction.',\r\n 'transaction': {\r\n 'sender': wallet.public_key,\r\n 'recipient': recipient,\r\n 'amount': amount,\r\n 'signature': signature\r\n },\r\n 'funds': blockchain.get_balance()\r\n }\r\n return jsonify(response), 201\r\n else:\r\n response = {\r\n 'message': 'Creating a transaction failed.'\r\n }\r\n return jsonify(response), 500\r\n\r\n\r\n@app.route('/mine', methods=['POST'])\r\ndef mine():\r\n # Don't mine a block if there are conflicts we need to resolve\r\n if blockchain.resolve_conflicts:\r\n response = {'message': 'Resolve conflicts first, block not added!'}\r\n return jsonify(response), 409\r\n block = blockchain.mine_block()\r\n if block != None:\r\n dict_block = block.__dict__.copy()\r\n dict_block['transactions'] = [tx.__dict__ for tx in dict_block['transactions']]\r\n response = {\r\n 'message': 'Block added succesfully.',\r\n 'block': dict_block,\r\n 'funds': blockchain.get_balance()\r\n }\r\n return jsonify(response), 201\r\n else:\r\n response = {\r\n 'message': 'Adding a block failed.',\r\n 'wallet_set_up': wallet.public_key != None\r\n }\r\n return jsonify(response), 500\r\n\r\n\r\n@app.route('/resolve-conflicts', methods=['POST'])\r\ndef resolve_conflicts():\r\n replaced = blockchain.resolve()\r\n if replaced:\r\n response = {'message': 'Chain was replaced!'}\r\n else:\r\n response = {'message': 'Local chain kept!'}\r\n return jsonify(response), 200\r\n\r\n\r\n@app.route('/broadcast-transaction', methods=['POST'])\r\ndef broadcast_transaction():\r\n # Extract values\r\n values = request.get_json()\r\n if not values:\r\n response = {'message': 'No data found.'}\r\n return jsonify(response), 400\r\n required = ['sender', 'recipient', 'amount', 'signature']\r\n if not all(key in values for key in required):\r\n response = {'message': 'Some data is missing.'}\r\n return jsonify(response), 400\r\n success = blockchain.add_transaction(values['recipient'], values['sender'], values['signature'], values['amount'], is_receiving = True)\r\n if success:\r\n response = {\r\n 'message': 'Successfully added transaction.',\r\n 'transaction': {\r\n 'sender': values['sender'],\r\n 'recipient': values['recipient'],\r\n 'amount': values['amount'],\r\n 'signature': values['signature']\r\n }\r\n }\r\n return jsonify(response), 201\r\n else:\r\n response = {\r\n 'message': 'Creating a transaction failed.'\r\n }\r\n return jsonify(response), 500\r\n\r\n\r\n@app.route('/broadcast-block', methods=['POST'])\r\ndef broadcast_block():\r\n # Extract json data to dictionary\r\n values = request.get_json()\r\n if not values:\r\n response = {'message': 'No data found.'}\r\n return jsonify(response), 400\r\n if 'block' not in values:\r\n response = {'message': 'Some data is missing.'}\r\n return jsonify(response), 400\r\n block = values['block']\r\n # Check to see if the index we receive is equal to our local blockchains\r\n # last block index + 1\r\n if block['index'] == blockchain.chain[-1].index + 1:\r\n if blockchain.add_block(block):\r\n response = {'message': 'Block added'}\r\n return jsonify(response), 201\r\n else:\r\n response = {'message': 'Block seems invalid.'}\r\n # Http 409 - conflict\r\n return jsonify(response), 409\r\n # Blockchain we are receiving is longer than ours and we need to catch up\r\n elif block['index'] > blockchain.chain[-1].index:\r\n response = {'message': 'Blockchain seems to differ from local blockchain'}\r\n blockchain.resolve_conflicts = True\r\n # Still send a success code because it is an issue with our node, the request was successful\r\n return jsonify(response), 200\r\n # Blockchain we are receiving is shorter than ours and therefore insignificant \r\n else:\r\n response = {'message': 'Blockchain seems to be shorter, block not added'}\r\n # HTTP 409 = data sent invalid\r\n return jsonify(response), 409\r\n\r\n\r\n\r\n@app.route('/transactions', methods=['GET'])\r\ndef get_open_transaction():\r\n # Return the list of transaction objects\r\n transactions = blockchain.get_open_transactions()\r\n # Convert transaction objects to dictionary representation\r\n dict_transactions = [tx.__dict__ for tx in transactions]\r\n return jsonify(dict_transactions), 200\r\n\r\n\r\n@app.route('/chain', methods=['GET'])\r\ndef get_chain():\r\n chain_snapshot = blockchain.chain\r\n dict_chain = [block.__dict__.copy() for block in chain_snapshot]\r\n for dict_block in dict_chain:\r\n dict_block['transactions'] = [tx.__dict__ for tx in dict_block['transactions']]\r\n return jsonify(dict_chain), 200\r\n\r\n\r\n@app.route('/node', methods=['POST'])\r\ndef add_node():\r\n values = request.get_json()\r\n # Checking for values\r\n if not values:\r\n response = {\r\n 'message': 'No data attached.'\r\n }\r\n return jsonify(response), 400\r\n # Checking for node. Remember: get_json() yields a dictionary therefore we are looking for keys\r\n if 'node' not in values:\r\n response = {\r\n 'message': 'No node data found.'\r\n }\r\n return jsonify(response), 400\r\n node = values['node']\r\n blockchain.add_peer_node(node)\r\n response = {\r\n 'message': 'Node added successfully.',\r\n 'all_nodes': blockchain.get_peer_nodes()\r\n }\r\n return jsonify(response), 201\r\n\r\n\r\n@app.route('/node/', methods=['DELETE'])\r\ndef remove_node(node_url):\r\n # Check to see if there is a node to remove\r\n if node_url == '' or node_url == None:\r\n response ={\r\n 'message': 'No node found.'\r\n }\r\n return jsonify(response), 400\r\n blockchain.remove_peer_nodes(node_url)\r\n response = {\r\n 'message': 'Node removed',\r\n 'add_nodes': blockchain.get_peer_nodes()\r\n }\r\n return jsonify(response), 200\r\n\r\n\r\n@app.route('/nodes', methods=['GET'])\r\ndef get_nodes():\r\n nodes = blockchain.get_peer_nodes()\r\n response = {\r\n 'all_nodes': nodes\r\n }\r\n return jsonify(response), 200\r\n\r\n\r\nif __name__ == '__main__':\r\n from argparse import ArgumentParser\r\n parser = ArgumentParser()\r\n parser.add_argument('-p', '--port', type=int, default=5000)\r\n # Give list of parsed in arguments\r\n args = parser.parse_args()\r\n port = args.port\r\n # Initialize the wallet as none\r\n wallet = Wallet(port)\r\n # Create the blockchain with the initialized 'none' wallet\r\n blockchain = Blockchain(wallet.public_key, port)\r\n app.run(host='0.0.0.0', port=port)", "repo_name": "wallacetyler/blockchain-project", "sub_path": "node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 10447, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 18, "usage_type": "call"}, {"api_name": "wallet.create_keys", "line_number": 23, "usage_type": "call"}, {"api_name": "wallet.save_keys", "line_number": 24, "usage_type": "call"}, {"api_name": "blockchain.Blockchain", "line_number": 28, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 28, "usage_type": "attribute"}, {"api_name": "wallet.public_key", "line_number": 30, "usage_type": "attribute"}, {"api_name": "wallet.private_key", "line_number": 31, "usage_type": "attribute"}, {"api_name": "blockchain.get_balance", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}, {"api_name": "wallet.load_keys", "line_number": 44, "usage_type": "call"}, {"api_name": "blockchain.Blockchain", "line_number": 48, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wallet.public_key", "line_number": 50, "usage_type": "attribute"}, {"api_name": "wallet.private_key", "line_number": 51, "usage_type": "attribute"}, {"api_name": "blockchain.get_balance", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 59, "usage_type": "call"}, {"api_name": "blockchain.get_balance", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 76, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 102, "usage_type": "call"}, {"api_name": "wallet.sign_transaction", "line_number": 106, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 106, "usage_type": "attribute"}, {"api_name": "blockchain.add_transaction", "line_number": 108, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 108, "usage_type": "attribute"}, {"api_name": "wallet.public_key", "line_number": 113, "usage_type": "attribute"}, {"api_name": "blockchain.get_balance", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 125, "usage_type": "call"}, {"api_name": "blockchain.resolve_conflicts", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 133, "usage_type": "call"}, {"api_name": "blockchain.mine_block", "line_number": 134, "usage_type": "call"}, {"api_name": "blockchain.get_balance", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 143, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 147, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 149, "usage_type": "call"}, {"api_name": "blockchain.resolve", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 159, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 165, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 172, "usage_type": "call"}, {"api_name": "blockchain.add_transaction", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 195, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 201, "usage_type": "call"}, {"api_name": "blockchain.chain", "line_number": 205, "usage_type": "attribute"}, {"api_name": "blockchain.add_block", "line_number": 206, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 212, "usage_type": "call"}, {"api_name": "blockchain.chain", "line_number": 214, "usage_type": "attribute"}, {"api_name": "blockchain.resolve_conflicts", "line_number": 216, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 223, "usage_type": "call"}, {"api_name": "blockchain.get_open_transactions", "line_number": 230, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 233, "usage_type": "call"}, {"api_name": "blockchain.chain", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 259, "usage_type": "call"}, {"api_name": "blockchain.add_peer_node", "line_number": 261, "usage_type": "call"}, {"api_name": "blockchain.get_peer_nodes", "line_number": 264, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 276, "usage_type": "call"}, {"api_name": "blockchain.remove_peer_nodes", "line_number": 277, "usage_type": "call"}, {"api_name": "blockchain.get_peer_nodes", "line_number": 280, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 282, "usage_type": "call"}, {"api_name": "blockchain.get_peer_nodes", "line_number": 287, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 291, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 296, "usage_type": "call"}, {"api_name": "wallet.Wallet", "line_number": 302, "usage_type": "call"}, {"api_name": "blockchain.Blockchain", "line_number": 304, "usage_type": "call"}, {"api_name": "wallet.public_key", "line_number": 304, "usage_type": "attribute"}]} +{"seq_id": "28000078677", "text": "import sqlalchemy as sa\nimport sqlalchemy_utils\nfrom alembic import op\nfrom sqlalchemy.engine.reflection import Inspector\n\n# revision identifiers, used by Alembic.\nrevision = '9848d0149abd'\ndown_revision = '843bc79c426f'\nbranch_labels = ()\ndepends_on = None\n\n\ndef upgrade():\n \"\"\"Upgrade database.\"\"\"\n op.create_table(\n 'accounts_role',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=80), nullable=True),\n sa.Column('description', sa.String(length=255), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table(\n 'accounts_user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=255), nullable=True),\n sa.Column('password', sa.String(length=255), nullable=True),\n sa.Column('active', sa.Boolean(name='active'), nullable=True),\n sa.Column('confirmed_at', sa.DateTime(), nullable=True),\n sa.Column('last_login_at', sa.DateTime(), nullable=True),\n sa.Column('current_login_at', sa.DateTime(), nullable=True),\n sa.Column('last_login_ip',\n sqlalchemy_utils.types.ip_address.IPAddressType(),\n nullable=True),\n sa.Column('current_login_ip',\n sqlalchemy_utils.types.ip_address.IPAddressType(),\n nullable=True),\n sa.Column('login_count', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_table(\n 'accounts_user_session_activity',\n sa.Column('created', sa.DateTime(), nullable=False),\n sa.Column('updated', sa.DateTime(), nullable=False),\n sa.Column('sid_s', sa.String(length=255), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(\n ['user_id'], [u'accounts_user.id'],\n name='fk_accounts_session_activity_user_id',\n ),\n sa.PrimaryKeyConstraint('sid_s')\n )\n op.create_table(\n 'accounts_userrole',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('role_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(\n ['role_id'], ['accounts_role.id'],\n name='fk_accounts_userrole_role_id',\n ),\n sa.ForeignKeyConstraint(\n ['user_id'], ['accounts_user.id'],\n name='fk_accounts_userrole_user_id',\n ),\n )\n with op.batch_alter_table('transaction') as batch_op:\n batch_op.add_column(sa.Column(\n 'user_id',\n sa.Integer(),\n sa.ForeignKey(\n 'accounts_user.id', name='fk_transaction_accounts_user_id'),\n nullable=True,\n ))\n batch_op.create_index(\n op.f('ix_transaction_user_id'), ['user_id'], unique=False\n )\n\n\ndef downgrade():\n \"\"\"Downgrade database.\"\"\"\n ctx = op.get_context()\n insp = Inspector.from_engine(ctx.connection.engine)\n\n for fk in insp.get_foreign_keys('transaction'):\n if fk['referred_table'] == 'accounts_user':\n op.drop_constraint(\n op.f(fk['name']), 'transaction', type_='foreignkey'\n )\n\n with op.batch_alter_table('transaction') as batch_op:\n batch_op.drop_index(op.f('ix_transaction_user_id'))\n batch_op.drop_column('user_id')\n op.drop_table('accounts_userrole')\n op.drop_table('accounts_user_session_activity')\n op.drop_table('accounts_user')\n op.drop_table('accounts_role')\n", "repo_name": "N03/invenio", "sub_path": ".virtualenvs/invenio/lib/python2.7/site-packages/invenio_accounts/alembic/9848d0149abd_create_accounts_tables.py", "file_name": "9848d0149abd_create_accounts_tables.py", "file_ext": "py", "file_size_in_byte": 3583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "alembic.op.create_table", "line_number": 15, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 15, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.types.ip_address.IPAddressType", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.types", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.types.ip_address.IPAddressType", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.types", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 42, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 42, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 52, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 54, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 54, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 62, "usage_type": "call"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 67, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 67, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 68, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 70, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 71, "usage_type": "call"}, {"api_name": "alembic.op.f", "line_number": 76, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 76, "usage_type": "name"}, {"api_name": "alembic.op.get_context", "line_number": 82, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 82, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.Inspector.from_engine", "line_number": 83, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.reflection.Inspector", "line_number": 83, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 87, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 87, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 88, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 88, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 91, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 91, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 92, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 92, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 94, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 94, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 95, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 95, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 96, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 96, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 97, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "72287186613", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nwith open('putty.log') as f:\n data = f.readlines()\n data = [i for i in data if not (i.startswith('=') or i.startswith('COM'))]\n\non_hex = [i.rstrip() for i in data]\ndata = [int(i, 16) for i in on_hex]\n\nlenght = len(data)\n\n\nx = range(len(data[:lenght]))\ny = data[:lenght]\n\nplt.stem(x, y, use_line_collection=True)\nplt.show()\n\n_data = np.array(data[:lenght])\n\nnew_data = _data - (max(_data)-min(_data)) /2\ny = new_data\n\nplt.stem(x, y, use_line_collection=True)\nplt.show()\n\nfrom scipy.fftpack import fft\n# Number of sample points\nN = lenght\n# sample spacing\nT = 1.0 / 500.0\nx = np.linspace(0.0, N*T, N)\ny = y #np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x)\nyf = fft(y)\nxf = np.linspace(0.0, 1.0/(2.0*T), N//20)\n\nplt.plot(xf, 2.0/N * np.abs(yf[0:N//20]))\nplt.grid()\nplt.show()\n\npass", "repo_name": "ericksc/PSOC", "sub_path": "process_log.py", "file_name": "process_log.py", "file_ext": "py", "file_size_in_byte": 849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "matplotlib.pyplot.stem", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.stem", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.fftpack.fft", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "24436162057", "text": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport datetime\nimport json\nimport csv\nimport StringIO\nfrom collections import OrderedDict\n\nfrom .exceptions import *\nfrom .bundle import Bundle\nfrom .utils import *\n\n\nclass Serializer(object):\n \"\"\"\n A swappable class for serialization.\n\n This handles most types of data as well as the following output formats::\n\n * json\n * jsonp\n * xml\n * yaml\n * html\n * plist (see http://explorapp.com/biplist/)\n\n It was designed to make changing behavior easy, either by overridding the\n various format methods (i.e. ``to_json``), by changing the\n ``formats/content_types`` options or by altering the other hook methods.\n \"\"\"\n formats = ['json', 'html', 'csv' ]\n content_types = {\n 'json': 'application/json',\n 'html': 'text/html',\n 'csv': 'text/csv',\n }\n\n def __init__(self, formats=None, content_types=None, datetime_formatting=None):\n self.supported_formats = []\n self.datetime_formatting = 'iso-8601'\n\n if formats is not None:\n self.formats = formats\n\n if content_types is not None:\n self.content_types = content_types\n\n if datetime_formatting is not None:\n self.datetime_formatting = datetime_formatting\n\n for format in self.formats:\n try:\n self.supported_formats.append(self.content_types[format])\n except KeyError:\n raise ConfigurationError(\"Content type for specified type '{}' not found. Please provide it at either the class level or via the arguments.\".format( format ) )\n\n def get_mime_for_format(self, format):\n \"\"\"\n Given a format, attempts to determine the correct MIME type.\n\n If not available on the current ``Serializer``, returns\n ``application/json`` by default.\n \"\"\"\n try:\n return self.content_types[format]\n except KeyError:\n return 'application/json'\n\n def format_datetime(self, data):\n \"\"\"\n A hook to control how datetimes are formatted.\n\n Default is ``iso-8601``, which looks like \"2010-12-16T03:02:14\".\n \"\"\"\n data = make_aware( data )\n return data.isoformat()\n\n def format_date(self, data):\n \"\"\"\n A hook to control how dates are formatted.\n\n Default is ``iso-8601``, which looks like \"2010-12-16\".\n \"\"\"\n return data.isoformat()\n\n def format_time(self, data):\n \"\"\"\n A hook to control how times are formatted.\n\n Default is ``iso-8601``, which looks like \"03:02:14\".\n \"\"\"\n data = make_aware( data )\n return data.isoformat()\n\n def serialize( self, bundle, format='application/json', options=None ):\n \"\"\"\n Given some data and a format, calls the correct method to serialize\n the data and returns the result.\n \"\"\"\n desired_format = None\n\n for short_format, long_format in self.content_types.items():\n if format == long_format:\n if hasattr(self, \"to_%s\" % short_format):\n desired_format = short_format\n break\n\n if desired_format is None:\n raise UnsupportedFormat(\"The format indicated '{}' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer.\".format( format ) )\n\n serialized = getattr(self, \"to_{}\".format( desired_format ) )( bundle, options )\n return serialized\n\n def deserialize(self, content, format='application/json'):\n \"\"\"\n Given some data and a format, calls the correct method to deserialize\n the data and returns the result.\n \"\"\"\n desired_format = None\n\n format = format.split(';')[0]\n\n for short_format, long_format in self.content_types.items():\n if format == long_format:\n if hasattr(self, \"from_%s\" % short_format):\n desired_format = short_format\n break\n\n if desired_format is None:\n raise UnsupportedFormat(\"The format indicated '{}' had no available deserialization method. Please check your ``formats`` and ``content_types`` on your Serializer.\".format( format ) )\n\n deserialized = getattr(self, \"from_%s\" % desired_format)(content)\n return deserialized\n\n def to_simple( self, data ):\n \"\"\"\n For a piece of data, attempts to recognize it and provide a simplified\n form of something complex.\n\n This brings complex Python data structures down to native types of the\n serialization format(s).\n \"\"\"\n if isinstance( data, ( list, tuple ) ):\n return [ self.to_simple( item ) for item in data ]\n if isinstance(data, dict):\n return dict( ( key, self.to_simple( val ) ) for ( key, val ) in data.iteritems() )\n elif isinstance(data, Bundle):\n return dict( ( key, self.to_simple( val ) ) for ( key, val ) in data.data.iteritems() )\n elif isinstance( data, datetime.datetime ):\n return self.format_datetime( data )\n elif isinstance( data, datetime.date ):\n return self.format_date( data )\n elif isinstance( data, datetime.time ):\n return self.format_time( data )\n elif isinstance( data, bool ):\n return data\n elif type( data ) in ( long, int, float ):\n return data\n elif data is None:\n return None\n else:\n return unicode( data )\n\n def to_json( self, data, options ):\n \"\"\"\n Given some Python data, produces JSON output.\n \"\"\"\n options = options or {}\n data = self.to_simple( data )\n return json.dumps( data, sort_keys=True )\n\n def from_json( self, content ):\n \"\"\"\n Given some JSON data, returns a Python dictionary of the decoded data.\n \"\"\"\n return json.loads(content)\n\n def to_jsonp( self, data, options ):\n \"\"\"\n Given some Python data, produces JSON output wrapped in the provided\n callback.\n \"\"\"\n options = options or {}\n return '{}({})'.format( options['callback'], self.to_json( data, options ) )\n\n def to_html( self, data, options ):\n \"\"\"\n Reserved for future usage.\n\n Provide HTML output of a resource, making an API available to a browser.\n \"\"\"\n options = options or {}\n data = self.to_simple( data )\n js = json.dumps( data, sort_keys=True, indent=4 )\n html = '
{}
'.format( js )\n return html\n\n def from_html( self, content ):\n \"\"\"\n Reserved for future usage.\n\n The desire is to handle form-based (maybe Javascript?) input, making an\n API available to a browser. This is on the TODO list but not currently\n implemented.\n \"\"\"\n pass\n\n def to_csv( self, data, options ):\n data = self.to_simple( data )\n raw_data = StringIO.StringIO()\n rows = []\n\n def getByDotNotation( obj, ref ):\n val = obj\n for key in ref.split( '.' ):\n if val and key in val:\n val = val[ key ]\n else:\n val = None\n break\n\n return val\n\n # Transform the data to the format specified in `options`. If `options` is not specified, just take the objects.\n if 'objects' in data:\n if isinstance( options, OrderedDict ):\n for row in data[ 'objects' ]:\n item = OrderedDict()\n\n for name, field in options.items():\n value = getByDotNotation( row, field )\n if isinstance( value, basestring ):\n value = value.encode( 'utf-8' )\n item[ name ] = value\n\n rows.append( item )\n else:\n rows = data[ 'objects' ]\n\n if rows and isinstance( rows, list ) and len( rows ):\n writer = csv.DictWriter( raw_data, rows[0].keys(), dialect='excel', extrasaction='ignore' )\n writer.writeheader()\n writer.writerows( rows )\n elif options:\n writer = csv.DictWriter( raw_data, options.keys(), extrasaction='ignore', quoting=csv.QUOTE_NONNUMERIC )\n writer.writeheader()\n\n return raw_data.getvalue()\n\n def from_csv( self, content ):\n raw_data = StringIO.StringIO(content)\n data = []\n for item in csv.DictReader(raw_data):\n data.append(item)\n return data\n\n\ndef get_type_string(data):\n \"\"\"\n Translates a Python data type into a string format.\n \"\"\"\n data_type = type(data)\n\n if data_type in (int, long):\n return 'integer'\n elif data_type == float:\n return 'float'\n elif data_type == bool:\n return 'boolean'\n elif data_type in (list, tuple):\n return 'list'\n elif data_type == dict:\n return 'hash'\n elif data is None:\n return 'null'\n elif isinstance(data, basestring):\n return 'string'\n", "repo_name": "gpd-today/TastyMongo", "sub_path": "tastymongo/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 9244, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "bundle.Bundle", "line_number": 148, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 152, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 154, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 171, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 177, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 195, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 211, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 227, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 229, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 242, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 246, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONNUMERIC", "line_number": 246, "usage_type": "attribute"}, {"api_name": "StringIO.StringIO", "line_number": 252, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "32684649026", "text": "########################################################################\n# File name: list_presence.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# .\n#\n########################################################################\nimport asyncio\n\nfrom datetime import timedelta\n\nimport aioxmpp.presence\n\nfrom framework import Example, exec_example\n\n\nclass PresenceCollector:\n def __init__(self, done_timeout=timedelta(seconds=1)):\n self.presences = []\n self.done_future = asyncio.Future()\n self.done_timeout = done_timeout\n self._reset_timer()\n\n def _reset_timer(self):\n self._done_task = asyncio.ensure_future(\n asyncio.sleep(self.done_timeout.total_seconds())\n )\n self._done_task.add_done_callback(self._sleep_done)\n\n def _sleep_done(self, task):\n try:\n task.result()\n except asyncio.CancelledError:\n return\n self.done_future.set_result(self.presences)\n\n def add_presence(self, pres):\n self.presences.append(pres)\n self._done_task.cancel()\n self._reset_timer()\n\n\nclass ListPresence(Example):\n def make_simple_client(self):\n client = super().make_simple_client()\n self.collector = PresenceCollector()\n client.stream.register_presence_callback(\n aioxmpp.PresenceType.AVAILABLE,\n None,\n self.collector.add_presence,\n )\n client.stream.register_presence_callback(\n aioxmpp.PresenceType.UNAVAILABLE,\n None,\n self.collector.add_presence,\n )\n\n return client\n\n async def run_simple_example(self):\n print(\"collecting presences... \")\n self.presences = await self.collector.done_future\n\n async def run_example(self):\n await super().run_example()\n\n print(\"found presences:\")\n for i, pres in enumerate(self.presences):\n print(\"presence {}\".format(i))\n print(\" peer: {}\".format(pres.from_))\n print(\" type: {}\".format(pres.type_))\n print(\" show: {}\".format(pres.show))\n print(\" status: \")\n for lang, text in pres.status.items():\n print(\" (lang={}) {!r}\".format(\n lang,\n text))\n\n\nif __name__ == \"__main__\":\n exec_example(ListPresence())\n", "repo_name": "horazont/aioxmpp", "sub_path": "examples/list_presence.py", "file_name": "list_presence.py", "file_ext": "py", "file_size_in_byte": 2996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 215, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.timedelta", "line_number": 32, "usage_type": "call"}, {"api_name": "asyncio.Future", "line_number": 34, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 39, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "asyncio.CancelledError", "line_number": 47, "usage_type": "attribute"}, {"api_name": "framework.Example", "line_number": 57, "usage_type": "name"}, {"api_name": "aioxmpp.presence.PresenceType", "line_number": 62, "usage_type": "attribute"}, {"api_name": "aioxmpp.presence", "line_number": 62, "usage_type": "name"}, {"api_name": "aioxmpp.presence.PresenceType", "line_number": 67, "usage_type": "attribute"}, {"api_name": "aioxmpp.presence", "line_number": 67, "usage_type": "name"}, {"api_name": "framework.exec_example", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "26835443002", "text": "import glob\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.model_selection import train_test_split\n\n\ndef data_show(data_dir):\n\n x_plt = []\n y_plt = []\n for directory in os.listdir(data_dir):\n x_plt.append(directory)\n y_plt.append(len(os.listdir(os.path.join(data_dir, directory))))\n\n # creating the bar plot\n flg, ax = plt.subplots(figsize=(16, 16))\n plt.barh(x_plt, y_plt, color='maroon')\n\n # remove x,y ticks\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('none')\n\n # add padding between axis and labels\n ax.xaxis.set_tick_params(pad=5)\n ax.yaxis.set_tick_params(pad=10)\n\n # show top values\n ax.invert_yaxis()\n\n plt.ylabel('Bark types')\n plt.xlabel('Num of images')\n plt.title('Bark texture dataset')\n plt.show()\n\n\ndef data_split(data_dir):\n each_path = []\n label_list = os.listdir(data_dir)\n\n for i in range(len(label_list)):\n # each_path[i]에 각 라벨의 전체 이미지 경로 저장\n each_label = label_list[i]\n each_path.append(glob.glob(os.path.join(data_dir, each_label, '*.jpg')))\n # print(len(each_path[i])) # check each image amount\n\n each_train_data, each_val_list = train_test_split(each_path[i], test_size=0.2, random_state=99)\n each_val_data, each_test_data = train_test_split(each_val_list, test_size=0.5, random_state=99)\n\n count = 1\n for j in range(len(each_train_data)):\n train_image = Image.open(each_train_data[j])\n image_name = each_label + f'_{count}'\n os.makedirs('./data/train/' + each_label, exist_ok=True)\n train_image.save('./data/train/' + each_label + f'/{image_name}.png')\n count += 1\n\n for j in range(len(each_val_data)):\n val_image = Image.open(each_val_data[j])\n image_name = each_label + f'_{count}'\n os.makedirs('./data/val/' + each_label, exist_ok=True)\n val_image.save('./data/val/' + each_label + f'/{image_name}.png')\n count += 1\n\n for j in range(len(each_test_data)):\n test_image = Image.open(each_test_data[j])\n image_name = each_label + f'_{count}'\n os.makedirs('./data/test/' + each_label, exist_ok=True)\n test_image.save('./data/test/' + each_label + f'/{image_name}.png')\n count += 1\n\n\ndata_dir = 'D:/dataset'\n# data_show(data_dir)\n# data_split(data_dir)\n", "repo_name": "NoirCade/MS-AI-School", "sub_path": "65일차/data_split.py", "file_name": "data_split.py", "file_ext": "py", "file_size_in_byte": 2468, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 52, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 52, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 59, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 59, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "38190188549", "text": "import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QLineEdit, QHBoxLayout, QVBoxLayout\r\nfrom PyQt5.Qtcore import Qt\r\n\r\nclass Calculator(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n \r\n # Mengatur judul window\r\n self.setWindowTitle(\"Kalkulator\")\r\n \r\n # Membuat label untuk menampilkan hasil\r\n self.result_label = QLabel(\"0\")\r\n self.result_label.setObjectName(\"result_label\")\r\n self.result_label.setAlignment(Qt.AlignRight)\r\n \r\n # Membuat tombol angka dan operator\r\n self.buttons = []\r\n for i in range(10):\r\n button = QPushButton(str(i))\r\n button.setObjectName(\"num_button\")\r\n button.clicked.connect(self.num_click)\r\n self.buttons.append(button)\r\n \r\n self.add_button = QPushButton(\"+\")\r\n self.add_button.setObjectName(\"op_button\")\r\n self.add_button.clicked.connect(self.op_click)\r\n \r\n self.subtract_button = QPushButton(\"-\")\r\n self.subtract_button.setObjectName(\"op_button\")\r\n self.subtract_button.clicked.connect(self.op_click)\r\n \r\n self.multiply_button = QPushButton(\"x\")\r\n self.multiply_button.setObjectName(\"op_button\")\r\n self.multiply_button.clicked.connect(self.op_click)\r\n \r\n self.divide_button = QPushButton(\"/\")\r\n self.divide_button.setObjectName(\"op_button\")\r\n self.divide_button.clicked.connect(self.op_click)\r\n \r\n self.clear_button = QPushButton(\"Clear\")\r\n self.clear_button.setObjectName(\"clear_button\")\r\n self.clear_button.clicked.connect(self.clear_click)\r\n \r\n self.equal_button = QPushButton(\"=\")\r\n self.equal_button.setObjectName(\"equal_button\")\r\n self.equal_button.clicked.connect(self.equal_click)\r\n \r\n # Membuat layout untuk menampilkan tombol\r\n num_layout = QVBoxLayout()\r\n row1 = QHBoxLayout()\r\n row1.addWidget(self.buttons[7])\r\n row1.addWidget(self.buttons[8])\r\n row1.addWidget(self.buttons[9])\r\n num_layout.addLayout(row1)\r\n row2 = QHBoxLayout()\r\n row2.addWidget(self.buttons[4])\r\n row2.addWidget(self.buttons[5])\r\n row2.addWidget(self.buttons[6])\r\n num_layout.addLayout(row2)\r\n row3 = QHBoxLayout()\r\n row3.addWidget(self.buttons[1])\r\n row3.addWidget(self.buttons[2])\r\n row3.addWidget(self.buttons[3])\r\n num_layout.addLayout(row3)\r\n row4 = QHBoxLayout()\r\n row4.addWidget(self.buttons[0])\r\n row4.addWidget(self.clear_button)\r\n num_layout.addLayout(row4)\r\n \r\n op_layout = QVBoxLayout()\r\n op_layout.addWidget(self.add_button)\r\n op_layout.addWidget(self.subtract_button)\r\n op_layout.addWidget(self.multiply_button)\r\n op_layout.addWidget(self.divide_button)\r\n op_layout.addWidget(self.equal_button)\r\n \r\n # Mengatur layout utama\r\n main_layout = QVBoxLayout()\r\n main_layout.addWidget(self.result_label)\r\n main_layout.addLayout(num_layout)\r\n main_layout.addLayout(op_layout)\r\n \r\n self.setLayout(main_layout)\r\n \r\n # Mengatur stylesheet untuk tombol dan label\r\n self.setStyleSheet(\"\"\"\r\n #num_button {\r\n background-color: #f0f0f0;\r\n border: none;\r\n font-size: 20px;\r\n height: 50px;\r\n width: 50px;\r\n }\r\n \r\n #op_button, #equal_button {\r\n background-color: #f0f0f0;\r\n border: none;\r\n font-size: 20px;\r\n height: 50px;\r\n width: 50px;\r\n margin-top: 10px;\r\n }\r\n\"\"\")\r\n\r\n", "repo_name": "ajitular/vigilant-system", "sub_path": "Calculator/Calculator.py", "file_name": "Calculator.py", "file_ext": "py", "file_size_in_byte": 3825, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.Qtcore.Qt.AlignRight", "line_number": 15, "usage_type": "attribute"}, {"api_name": "PyQt5.Qtcore.Qt", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 50, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 56, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 71, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "21329042590", "text": "import requests\nfrom lxml import etree\nimport json\nimport re\nimport math\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\nimport logging\nimport traceback\nrequests.adapters.DEFAULT_RETRIES = 5\n\n# 设置日志记录\nLOG_FORMAT = \"%(asctime)s %(filename)s %(levelname)s %(lineno)d %(message)s \" # 配置输出日志格式\nDATE_FORMAT = '%Y-%m-%d %H:%M:%S ' # 配置输出时间的格式,注意月份和天数不要搞乱了\nfile_name = r\"./../jiemian/jiemian-{}.log\".format(str(datetime.now()).split(' ')[0])\nlogging.basicConfig(level=logging.DEBUG,\n format=LOG_FORMAT,\n datefmt=DATE_FORMAT,\n filename=file_name, # 有了filename参数就不会直接输出显示到控制台,而是直接写入文件\n )\nheadle = logging.FileHandler(filename=file_name, encoding='utf-8')\nlogger = logging.getLogger()\nlogger.addHandler(headle)\nnow_time = str(datetime.now()).split(' ')[0].replace('-', '_')\n\n\nclass JieMianSpider():\n def __init__(self):\n # 爬取新闻列表的headers\n self.headers_one = {\n 'Accept':'*/*',\n 'Accept-Encoding':'gzip, deflate, sdch',\n 'Accept-Language':'zh-CN,zh;q=0.8',\n # 'Cookie':'pgv_pvi=8618249216; pgv_si=s6958286848; SERVERID=10.70.50.21',\n 'Host':'a.jiemian.com',\n 'Referer':'https://www.jiemian.com/lists/51.html',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'\n }\n # 爬取评论的headers\n self.headers_two = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n # 'Cookie': 'pgv_pvi=8618249216; pgv_si=s6958286848; SERVERID=10.70.50.21',\n 'Host': 'a.jiemian.com',\n 'Referer': 'https://www.jiemian.com/article/2598846.html',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'\n }\n # 获取新闻页的headers\n self.headers_three = {\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding':'gzip, deflate, sdch',\n 'Accept-Language':'zh-CN,zh;q=0.8',\n 'Cache-Control':'max-age=0',\n # 'Cookie':'__jsluid=e2383ce96a600d8b7af7367fb52adaaf; pgv_pvi=8618249216; pgv_si=s6958286848; _tb_sess_r=https%3A//www.jiemian.com/lists/51.html; _tb_t_ppg=https%3A//www.jiemian.com/article/2596911.html; trc_cookie_storage=taboola%2520global%253Auser-id%3D89521ea2-baa4-4a77-b1bd-dafd704907a8-tuct2dc33bb',\n 'Host':'www.jiemian.com',\n 'Upgrade-Insecure-Requests':'1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'''\n }\n\n self.start_url = 'http://a.jiemian.com/index.php?m=lists&a=cLists¬id=2600447,2603510,2600141&id=194&type=card&callback=jQuery110209049534785792808_1541642080659&page={}&_=1541642080661'\n self.news_page_num = 1\n # 用作爬虫时间判断的计数\n self.time_out_num = 0\n # 用作判断爬虫是否停止的参数\n self.stop_spider = False\n # 评论url模板\n self.comment_port_url = 'http://a.jiemian.com/index.php?m=comment&a=getlistCommentP&aid={}&page={}&comment_type=1&per_page=5&callback=jQuery110208603319204711195_1541662025237&_=1541662025244'\n # 通过系统时间自动计算时间间隔\n date = datetime.now() - timedelta(days=3) # 七天前的时间,不包括今天\n str_time = str(date).split(' ')[0]\n\n yesterday = datetime.now() - timedelta(days=1) # 昨天时间\n yesterday = str(yesterday).split(' ')[0]\n\n print('爬取时间段:{}到{}'.format(str_time, yesterday))\n\n logging.info('爬取时间段:{}到{}'.format(str_time, yesterday))\n # 定义开始时间 y-m-d 离现在时间远\n self.start_time = str_time\n # 定义结束时间 y-m-d 离现在时间近\n self.end_time = yesterday\n # 错误计数\n self.error_num = 0\n # 错误url\n self.error_url_list = []\n # 总计新闻页\n self.all_news_num = 0\n\n # 获取新闻列表的url\n def get_news_url(self, url):\n print(url, '%%%%%%%%%')\n s = requests.session()\n s.keep_alive = False\n news_url_json_data = requests.get(url, headers=self.headers_one)\n content = news_url_json_data.text\n content = content.split('(')[1].split(')')[0]\n content = json.loads(content)\n content_str = content['rst']\n url_list = re.findall(r'https://www.jiemian.com.*?.html', content_str)\n try:\n data = re.search(r'\">\\d{2}/\\d{2}', content_str).group(0).split('>')[1]\n except AttributeError as e:\n print(e)\n data = time.strftime(\"%m/%d\")\n\n print(data)\n print(url_list)\n url_list_new = []\n for url in url_list:\n url_split = url.split('/')\n if 'article' in url_split:\n url_list_new.append(url)\n url_list_new = set(url_list_new)\n url_list_new = list(url_list_new)\n print(url_list_new)\n return url_list_new, data\n\n # 获取新闻页\n def get_news_page(self, url):\n print(url)\n # self.headers_two['Referer'] = str(url)\n s = requests.session()\n s.keep_alive = False\n response = requests.get(url, headers=self.headers_three)\n code = response.status_code\n news_page_content = etree.HTML(response.content.decode())\n title = news_page_content.xpath('.//div[@class=\"article-header\"]/h1/text()')[0]\n # 获取新闻发表的时间,用作时间判断\n try:\n data = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[0]\n except:\n data = news_page_content.xpath('.//div[2]/p/span[2]/text()')[0].split(' ')[0]\n data = re.sub('/', '-', data)\n print(code)\n print(title, data)\n return news_page_content, code, data\n\n # 处理新闻html页面\n def parse_page(self, news_page_content, news_page_url):\n item = dict()\n # 网站paltform\n item['paltform'] = '汽车_界面新闻'\n\n # 文章标题title\n title = news_page_content.xpath('.//div[@class=\"article-header\"]/h1/text()')[0]\n item['title'] = title\n # 文章正文content\n content = news_page_content.xpath('.//div[5]/div[@class=\"article-content\"]/p/text()')\n item['content'] = ''.join(content)\n # 获取第一节点数据\n first_node = news_page_content.xpath('.//div[4]/p/span[1]/text()')[0]\n # first_node = first_node.split(' ')\n # 获取第三节点数据\n third_node = news_page_content.xpath('.//div[4]/p/span[3]/text()')[0]\n\n # 判断是否能获取第四节点数据\n if news_page_content.xpath('.//div[4]/p/span[4]/text()'):\n print(1111111111111111111111111)\n # 发布日期data\n data_tiem = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[0]\n times = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[1]\n data = re.sub('/', '-', data_tiem)\n item['data'] = data\n # 发布时间time\n item['time'] = times\n # 来源/作者source_author\n author = news_page_content.xpath('.//div[4]/p/span[1]/a/text()')[0]\n source = news_page_content.xpath('.//div[4]/p/span[4]/text()')[0]\n # 阅读量\n views = news_page_content.xpath('.//div[2]/div[4]/p/span[3]/text()')[0]\n elif re.search('/', first_node):\n print(2222222222222222222222222)\n # 发布日期data\n data_tiem = news_page_content.xpath('.//div[4]/p/span[1]/text()')[0].split(' ')[0]\n times = news_page_content.xpath('.//div[4]/p/span[1]/text()')[0].split(' ')[1]\n data = re.sub('/', '-', data_tiem)\n item['data'] = data\n # 发布时间time\n item['time'] = times\n # 来源/作者source_author\n author = ''\n source = news_page_content.xpath('.//div[4]/p/span[3]/text()')[0]\n # 阅读量\n views = news_page_content.xpath('.//div[2]/div[4]/p/span[2]/text()')[0]\n\n elif '浏览' in third_node and news_page_content.xpath('.//div[@class=\"article-author\"]/div'):\n print(33333333333333333333333333333333333)\n # 发布日期data\n data_tiem = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[0]\n times = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[1]\n data = re.sub('/', '-', data_tiem)\n item['data'] = data\n # 发布时间time\n item['time'] = times\n # 来源/作者source_author\n author = news_page_content.xpath('.//div[4]/p/span[1]/a/text()')[0]\n source = ''\n # 阅读量\n views = news_page_content.xpath('.//div[4]/p/span[3]/text()')[0]\n elif '浏览' in third_node and not news_page_content.xpath('.//div[@class=\"article-author\"]/div'):\n print(44444444444444444444444444444)\n # 发布日期data\n data_tiem = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[0]\n times = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[1]\n data = re.sub('/', '-', data_tiem)\n item['data'] = data\n # 发布时间time\n item['time'] = times\n # 来源/作者source_author\n author = ''\n source = news_page_content.xpath('.//div[4]/p/span[1]/a/text()')[0]\n # 阅读量\n views = news_page_content.xpath('.//div[4]/p/span[3]/text()')[0]\n else:\n print(5555555555555555555555555555555555)\n # 发布日期data\n data_tiem = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[0]\n times = news_page_content.xpath('.//div[4]/p/span[2]/text()')[0].split(' ')[1]\n data = re.sub('/', '-', data_tiem)\n item['data'] = data\n # 发布时间time\n item['time'] = times\n # 来源/作者source_author\n author = news_page_content.xpath('.//div[4]/p/span[1]/a/text()')[0]\n source = news_page_content.xpath('.//div[4]/p/span[3]/text()')[0]\n # 阅读量\n views = ''\n\n item['source_author'] = source + '/' + author\n # 点击数clicks\n item['clicks'] = ''\n # 阅读量 views\n item['views'] = views\n # 评论数comments_count\n comments_count = news_page_content.xpath('.//p[@class=\"title-box\"]/span/text()')[0]\n item['comments_count'] = comments_count\n # 点赞数likes\n item['likes'] = news_page_content.xpath('.//span[@class=\"ding_count\"]/text()')[0]\n # 关键词keyword\n item['keyword'] = ''\n # url\n item['url'] = news_page_url\n self.write_news_into_jsonfile(item)\n # 爬取文章评论回复内容\n if int(comments_count) != 0:\n self.parse_comment_info(news_page_url, int(comments_count), data,times, title, news_page_url)\n\n else:\n pass\n\n return item\n\n # 处理从评论接口处获取的评论信息\n def parse_comment_page(self, comment_port_url, source_date, source_time, source_title, source_url, floor_num):\n print(comment_port_url)\n s = requests.session()\n s.keep_alive = False\n response = requests.get(comment_port_url, headers=self.headers_two)\n data = response.content.decode()\n data = re.sub(r'\\\\\"', '\\\"', data)\n print('*******',data)\n data = re.findall(r'
', data)\n print(data)\n comment_item = dict()\n\n if data:\n print('---------------------')\n for data in data:\n data = etree.HTML(data)\n data_list = data.xpath('.//dd[@class=\"comment-post\"]')\n for data in data_list:\n # 网站\n comment_item['platform'] = '界面新闻'\n # 文章原文发表日期\n comment_item['source_date'] = source_date\n # 原文发表时间\n comment_item['source_time'] = source_time\n # 文章标题\n comment_item['title'] = source_title\n # 原文url\n comment_item['source_url'] = source_url\n # 文章回复人名称\n author = data.xpath('.//div[@class=\"comment-body\"]/a/text()')[0].encode('utf-8').decode('unicode_escape')\n comment_item['author'] = author\n # 文章回复内容\n text = data.xpath('.//div[1]/p/text()')[0].encode('utf-8').decode('unicode_escape')\n comment_item['content'] = text\n try:\n data_all = data.xpath('.//div[@class=\"comment-footer\"]/span[1]/text()')[0]\n print(data_all)\n # 回复日期\n day_date = data_all.split(' ')[0]\n day_date = re.sub(r'\\\\/', '-', day_date)\n comment_item['date'] = day_date\n # 回复时间\n comment_time = data_all.split(' ')[1]\n comment_item['time'] = comment_time\n except:\n comment_item['date'] = ''\n comment_item['time'] = ''\n # 点赞数\n likes = data.xpath('.//em/text()')[0]\n likes = re.search('\\d', likes).group(0)\n comment_item['likes'] = likes\n # 回复数\n comments_count = '0'\n comment_item['comments_count'] = comments_count\n # 评论url\n comment_url = comment_port_url\n comment_item['comment_url'] = comment_url\n # 阅读量\n views = ''\n comment_item['views'] = views\n # 关键字\n keyword = ''\n comment_item['keyword'] = keyword\n # 楼层\n floor = floor_num\n comment_item['floor'] = floor\n floor_num += 1\n self.write_comment_into_jsonfile(comment_item)\n\n return floor_num\n\n # 爬取评论信息\n def parse_comment_info(self, url, comments_count, data, times, title, source_url):\n all_page_num = math.ceil(comments_count/5)\n url_num = url.split('/')[-1].split('.')[0]\n floor_num = 1\n for port_page_num in range(1, int(all_page_num + 1)):\n\n comment_port_url = self.comment_port_url.format(str(url_num), str(port_page_num))\n floor_num = self.parse_comment_page(comment_port_url, data, times, title, source_url, floor_num)\n\n # 将新闻信息写入json文件\n def write_news_into_jsonfile(self, news_item):\n news_item = json.dumps(dict(news_item), ensure_ascii=False) + '\\n'\n # try:\n print('写入文件中.....')\n with open('./../jiemian/27_{}_jiemian_news.json'.format(str(now_time)), 'ab') as f:\n f.write(news_item.encode(\"utf-8\"))\n # except:\n # pass\n\n # 将文章评论信息写入json文件\n def write_comment_into_jsonfile(self, comment_item):\n comment_item = json.dumps(dict(comment_item), ensure_ascii=False) + '\\n'\n try:\n with open('./../jiemian/41_{}_jiemian_comment.json'.format(str(now_time)), 'ab') as f:\n f.write(comment_item.encode(\"utf-8\"))\n except:\n pass\n\n def run(self):\n while self.news_page_num < 100:\n url_list_new, data = self.get_news_url(self.start_url.format(str(self.news_page_num)))\n # 时间判断\n data = '2019-' + re.sub('/', '-', data)\n get_time = time.mktime(time.strptime(data, \"%Y-%m-%d\"))\n end_time = time.mktime(time.strptime(self.end_time, \"%Y-%m-%d\"))\n # if self.start_time != '':\n start_time = time.mktime(time.strptime(self.start_time, \"%Y-%m-%d\"))\n # else:\n # start_time = time.mktime(time.strptime('2100-1-1', \"%Y-%m-%d\"))\n if float(get_time) < float(start_time):\n # self.crawler.engine.close_spider(self, '爬虫终止')\n print(data,111)\n # break_flag = True\n break\n if float(start_time) <= float(get_time) <= float(end_time):\n print(data)\n\n for news_page_url in url_list_new:\n self.all_news_num += 1\n news_page_content, code, data = self.get_news_page(news_page_url)\n try:\n item = self.parse_page(news_page_content, news_page_url)\n print(item)\n except:\n self.error_num += 1\n self.error_url_list.append(news_page_url)\n print('error')\n self.news_page_num += 1\n print(self.error_num)\n print(self.error_url_list)\n print(self.all_news_num)\n logger.info('爬取完毕......')\n\nif __name__ == \"__main__\":\n jiemian_spider = JieMianSpider()\n try:\n jiemian_spider.run()\n except:\n logger.error(traceback.format_exc())", "repo_name": "daiguobinit/project", "sub_path": "jiemian/jiemian_spider.py", "file_name": "jiemian_spider.py", "file_ext": "py", "file_size_in_byte": 17971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.adapters", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 94, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 96, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 99, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 101, "usage_type": "call"}, {"api_name": "re.search", "line_number": 103, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 106, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 124, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 126, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 128, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 128, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 135, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 164, "usage_type": "call"}, {"api_name": "re.search", "line_number": 173, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 178, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 193, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 207, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 221, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 258, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 260, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 262, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 264, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 271, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 271, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 295, "usage_type": "call"}, {"api_name": "re.search", "line_number": 305, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 329, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 339, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 349, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 360, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 361, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 361, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 362, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 362, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 364, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 364, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 396, "usage_type": "call"}]} +{"seq_id": "16305459693", "text": "import pathlib\nfrom setuptools import setup\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\n\n# This call to setup() does all the work\nsetup(\n name=\"pyrpc-discord\",\n version=\"1.1.3\",\n python_requires='>=3.5',\n description=\"A Small Project To Automate Discord Rich Presence using CLI\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/bartick/pyrpc-discord\",\n author=\"Bartick Maiti\",\n platforms=['Windows', 'Linux', 'OSX'],\n author_email=\"blackmumba890@gmail.com\",\n license=\"MIT\",\n classifiers=[\n \t\"Framework :: AsyncIO\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n packages=[\"pyrpc\"],\n include_package_data=True,\n install_requires=[\n \t\"pypresence>=4.2.0\"\n ],\n entry_points={\n \"console_scripts\": [\n \"pyrpc=pyrpc.__main__:main\",\n ]\n },\n)", "repo_name": "bartick/pyrpc-discord", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pathlib.Path", "line_number": 5, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "31806799716", "text": "from .struct2 import struct_be, parse, parse_array\nfrom grope import BlobIO\nimport six, grope\n\n@struct_be\nclass OTF_tag_hdr:\n '''\n H:count\n '''\n\n@struct_be\nclass OTF_tag_offset:\n '''\n 4s:tag\n H:offset\n '''\n\n @classmethod\n def pair(cls, fin):\n obj = cls.parse(fin)\n return obj.tag, obj.offset\n\ndef load_taglist(fin):\n hdr = OTF_tag_hdr.parse(fin)\n return [OTF_tag_offset.pair(fin) for i in six.moves.range(hdr.count)]\n\n@struct_be\nclass GSUB_hdr:\n '''\n H:majorVersion\n H:minorVersion\n H:scriptListOffset\n H:featureListOffset\n H:lookupListOffset\n '''\n\n@struct_be\nclass OTF_script_table:\n '''\n H:defaultLangSys\n '''\n\n@struct_be\nclass OTF_langsys:\n '''\n H:lookupOrder\n H:requiredFeatureIndex\n H:featureIndexCount\n '''\n\n@struct_be\nclass OTF_feature_index:\n '''\n H:index\n '''\n\nclass LangSys:\n def __init__(self, lookup_order, req_feature, features):\n self.lookup_order = lookup_order\n self.req_feature = req_feature\n self.features = features\n\nclass Script:\n def __init__(self, default, langs):\n self.default = default\n self.langs = langs\n\nclass Feature:\n def __init__(self, tag, params, lookups):\n self.tag = tag\n self.params = params\n self.lookups = lookups\n\ndef parse_langsys(blob, features):\n fin = BlobIO(blob)\n hdr = OTF_langsys.parse(fin)\n selected_features = [features[OTF_feature_index.parse(fin).index] for i in six.moves.range(hdr.featureIndexCount)]\n return LangSys(hdr.lookupOrder, hdr.requiredFeatureIndex, selected_features)\n\ndef parse_script(blob, features):\n fin = BlobIO(blob)\n hdr = OTF_script_table.parse(fin)\n langsys_list = load_taglist(fin)\n\n langs = {}\n if hdr.defaultLangSys != 0:\n langs[None] = parse_langsys(blob[hdr.defaultLangSys:], features)\n\n for lang, offset in langsys_list:\n langs[lang] = parse_langsys(blob[offset:], features)\n return Script(hdr.defaultLangSys, langs)\n\ndef parse_scriptlist(blob, features):\n scripts = load_taglist(BlobIO(blob))\n return { tag: parse_script(blob[offset:], features) for tag, offset in scripts }\n\ndef parse_feature(blob, tag, lookups):\n fin = BlobIO(blob)\n feature_params, lookup_index_count = parse(fin, '>HH')\n lookup_indices = parse_array(fin, '>H', lookup_index_count)\n\n return Feature(tag, feature_params, [lookup for idx in lookup_indices for lookup in lookups[idx]])\n\ndef parse_feature_list(blob, lookups):\n fin = BlobIO(blob)\n features = load_taglist(fin)\n return [parse_feature(blob[offs:], tag, lookups) for tag, offs in features]\n\n@struct_be\nclass _cov_range_rec:\n '''\n H:start_gid\n H:end_gid\n H:start_covidx\n '''\n\ndef parse_coverage(blob):\n fin = BlobIO(blob)\n format, = parse(fin, '>H')\n if format == 1:\n glyph_count, = parse(fin, '>H')\n glyph_array = parse_array(fin, '>H', glyph_count)\n def cov1(gids, idx):\n try:\n return glyph_array.index(gids[idx])\n except ValueError:\n return None\n return cov1\n\n if format == 2:\n range_count, = parse(fin, '>H')\n ranges = [_cov_range_rec.parse(fin) for i in six.moves.range(range_count)]\n def cov2(gids, idx):\n gid = gids[idx]\n for range in ranges:\n if range.start_gid <= gid <= range.end_gid:\n return range.start_covidx + gid - range.start_gid\n return None\n return cov2\n\n raise RuntimeError('unknown coverage format')\n\ndef parse_gsub_lookup1(blob):\n fin = BlobIO(blob)\n format, = parse(fin, '>H')\n if format == 1:\n coverage_offs, delta_glyph_id = parse(fin, '>Hh')\n coverage = parse_coverage(blob[coverage_offs:])\n def sub1(gids, idx):\n if coverage(gids, idx) is not None:\n gids[idx] += delta_glyph_id\n return sub1\n elif format == 2:\n coverage_offs, glyph_count = parse(fin, '>HH')\n substitute_gids = parse_array(fin, '>H', glyph_count)\n coverage = parse_coverage(blob[coverage_offs:])\n def sub2(gids, idx):\n cov_idx = coverage(gids, idx)\n if cov_idx is not None:\n gids[idx] = substitute_gids[cov_idx]\n return sub2\n\n else:\n raise RuntimeError('unknown subtable format')\n\ndef parse_liga(blob):\n fin = BlobIO(blob)\n target_gid, component_count = parse(fin, '>HH')\n components = parse_array(fin, '>H', component_count - 1)\n return components, target_gid\n\ndef parse_ligaset(blob):\n fin = BlobIO(blob)\n count, = parse(fin, '>H')\n liga_offsets = parse_array(fin, '>H', count)\n return [parse_liga(blob[offs:]) for offs in liga_offsets]\n\ndef parse_gsub_lookup4(blob):\n fin = BlobIO(blob)\n format, cov_offset, ligaset_count = parse(fin, '>HHH')\n if format != 1:\n raise RuntimeError('unknown ligature format')\n coverage = parse_coverage(blob[cov_offset:])\n ligasets = [parse_ligaset(blob[offs:]) for offs in parse_array(fin, '>H', ligaset_count)]\n\n def sub_liga(gids, idx):\n cov_idx = coverage(gids, idx)\n if cov_idx is None:\n return\n for components, target in ligasets[cov_idx]:\n if gids[idx+1:idx+1+len(components)] == components:\n gids[idx:idx+1+len(components)] = [target]\n break\n\n return sub_liga\n\n_gsub_lookups = {\n 1: parse_gsub_lookup1,\n 4: parse_gsub_lookup4,\n }\n\ndef parse_lookup(blob):\n fin = BlobIO(blob)\n lookup_type, lookup_flag, subtable_count = parse(fin, '>HHH')\n subtable_offsets = parse_array(fin, '>H', subtable_count)\n mark_filtering_set = parse(fin, '>H')\n\n assert lookup_type in (1, 3, 4, 6)\n\n parse_fn = _gsub_lookups.get(lookup_type)\n if parse_fn:\n subbers = [parse_fn(blob[offs:]) for offs in subtable_offsets]\n else:\n subbers = []\n\n return subbers\n\ndef parse_lookup_list(blob):\n fin = BlobIO(blob)\n count, = parse(fin, '>H')\n lookup_offsets = parse_array(fin, '>H', count)\n\n lookups = []\n for offs in lookup_offsets:\n lookups.append(parse_lookup(blob[offs:]))\n\n return lookups\n\nclass _Subber:\n def __init__(self, lookups):\n self._lookups = lookups\n\n def sub(self, gids):\n gids = list(gids)\n\n i = 0\n while i < len(gids):\n for lookup in self._lookups:\n new_i = lookup(gids, i)\n if new_i is not None:\n i = new_i\n break\n else:\n i += 1\n \n return gids\n\nclass OtfGsubTable:\n def __init__(self, name, blob):\n hdr = GSUB_hdr.parse_blob(blob)\n\n if (hdr.majorVersion, hdr.minorVersion) != (1, 0):\n raise RuntimeError('unknown GSUB table version')\n\n lookups = parse_lookup_list(blob[hdr.lookupListOffset:])\n features = parse_feature_list(blob[hdr.featureListOffset:], lookups)\n scripts = parse_scriptlist(blob[hdr.scriptListOffset:], features)\n\n self.name = name\n self._scripts = scripts\n\n def make_subber(self, enabled_features, script=b'DFLT', langsys=None):\n lookups = []\n for feature in self._scripts[script].langs[langsys].features:\n if not enabled_features(feature.tag):\n continue\n lookups.extend(feature.lookups)\n\n return _Subber(lookups)\n", "repo_name": "avakar/font_codify", "sub_path": "otf_tools/adv_typo.py", "file_name": "adv_typo.py", "file_ext": "py", "file_size_in_byte": 7432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "struct2.struct_be", "line_number": 5, "usage_type": "name"}, {"api_name": "struct2.struct_be", "line_number": 11, "usage_type": "name"}, {"api_name": "six.moves.range", "line_number": 25, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 25, "usage_type": "attribute"}, {"api_name": "struct2.struct_be", "line_number": 27, "usage_type": "name"}, {"api_name": "struct2.struct_be", "line_number": 37, "usage_type": "name"}, {"api_name": "struct2.struct_be", "line_number": 43, "usage_type": "name"}, {"api_name": "struct2.struct_be", "line_number": 51, "usage_type": "name"}, {"api_name": "grope.BlobIO", "line_number": 75, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 77, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 77, "usage_type": "attribute"}, {"api_name": "grope.BlobIO", "line_number": 81, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 94, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 98, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 99, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 100, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 105, "usage_type": "call"}, {"api_name": "struct2.struct_be", "line_number": 109, "usage_type": "name"}, {"api_name": "grope.BlobIO", "line_number": 118, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 119, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 121, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 122, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 131, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 132, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 132, "usage_type": "attribute"}, {"api_name": "grope.BlobIO", "line_number": 144, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 145, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 147, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 154, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 155, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 167, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 168, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 169, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 173, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 174, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 175, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 179, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 180, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 184, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 203, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 204, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 205, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 206, "usage_type": "call"}, {"api_name": "grope.BlobIO", "line_number": 219, "usage_type": "call"}, {"api_name": "struct2.parse", "line_number": 220, "usage_type": "call"}, {"api_name": "struct2.parse_array", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "34646632804", "text": "from functools import reduce\r\nn, x = [int(a) for a in input().split()]\r\nif n == 1:\r\n if x == 1:\r\n print(1)\r\n else:\r\n print(0)\r\nelif x == 1:\r\n print(1)\r\nelse:\r\n total = 0\r\n def factors(n): \r\n return set(reduce(list.__add__, \r\n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))\r\n factorsX = sorted(factors(x))\r\n limit = len(factorsX)//2-1\r\n if len(factorsX)%2:\r\n limit += 1\r\n if factorsX[(len(factorsX))//2] <= n:\r\n total += 1\r\n for factor in range(len(factorsX)-1, limit, -1):\r\n if factorsX[factor] <= n:\r\n total += 2\r\n print(total)\r\n", "repo_name": "Chacon-Miguel/CodeForces-Solutions", "sub_path": "multiplicationTable.py", "file_name": "multiplicationTable.py", "file_ext": "py", "file_size_in_byte": 662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "functools.reduce", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "1784740078", "text": "def External_use(mid, ticks):\n\n import m5_1_D_midi\n import m5_2_mid_time2real_time\n\n process_mid = m5_1_D_midi.External_use(mid)\n middle_mid = m5_2_mid_time2real_time.External_use(process_mid, ticks)\n\n return middle_mid\n\nif __name__ == \"__main__\":\n\n dir = \"C:\\\\Users\\\\rkrp1\\\\Desktop\\\\Music\\\\practice_file\\\\\"\n mid_filename = dir + \"Grand_1.0_0.5.mid\"\n wav_filename = dir + \"Grand_1.0_0.5.mid\"\n\n import mido\n mid = mido.MidiFile(mid_filename)\n middle_mid = External_use(mid, mid.ticks_per_beat)\n print(middle_mid)\n print(middle_mid[0][0])", "repo_name": "momongss/pianist_wav2mid", "sub_path": "pianist/m4_1_middle_preprocessing.py", "file_name": "m4_1_middle_preprocessing.py", "file_ext": "py", "file_size_in_byte": 575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "m5_1_D_midi.External_use", "line_number": 6, "usage_type": "call"}, {"api_name": "m5_2_mid_time2real_time.External_use", "line_number": 7, "usage_type": "call"}, {"api_name": "mido.MidiFile", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "38899923533", "text": "import allauth\nfrom django.dispatch import receiver, Signal\nfrom utils.response import get_mail_text_on_order_creation, get_mail_text_on_sing_up\n\nfrom tasks.tasks import send_mail\n\nnew_user_registration = Signal(\n providing_args=['user_id']\n)\n\nnew_order_confirmation = Signal(\n providing_args=[\n 'order_number', 'order_url', 'to_address', 'to_address', 'last_name', 'first_name'\n ]\n)\n\n\n@receiver(new_order_confirmation)\ndef send_order_confirmation_mail(order_url, order_number, to_address, last_name, first_name, **kwargs):\n mail_text, mail_header = get_mail_text_on_order_creation(order_url, order_number, last_name, first_name)\n send_mail.delay(to_address, mail_header, mail_text)\n\n\n@receiver(allauth.account.signals.user_signed_up)\ndef send_email_on_registration_confirmation(request, user, **kwargs):\n mail_text, mail_header = get_mail_text_on_sing_up(user.email)\n send_mail.delay(user.email, mail_header, mail_text)\n", "repo_name": "Zippelin/DemoShop", "sub_path": "docker/web/app/shop/utils/signals.py", "file_name": "signals.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.dispatch.Signal", "line_number": 7, "usage_type": "call"}, {"api_name": "django.dispatch.Signal", "line_number": 11, "usage_type": "call"}, {"api_name": "utils.response.get_mail_text_on_order_creation", "line_number": 20, "usage_type": "call"}, {"api_name": "tasks.tasks.send_mail.delay", "line_number": 21, "usage_type": "call"}, {"api_name": "tasks.tasks.send_mail", "line_number": 21, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.response.get_mail_text_on_sing_up", "line_number": 26, "usage_type": "call"}, {"api_name": "tasks.tasks.send_mail.delay", "line_number": 27, "usage_type": "call"}, {"api_name": "tasks.tasks.send_mail", "line_number": 27, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 24, "usage_type": "call"}, {"api_name": "allauth.account", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "25083833531", "text": "#!/usr/bin/env python3\nfrom mpi4py import MPI\n#import multiprocessing as mp\nimport datetime as dt\nimport functools\nfrom glob import glob\nimport toml\nconfig = toml.load('../config.toml')\n\nimport os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, os.path.join(parentdir, 'embed'))\nsys.path.insert(0, parentdir)\n\nimport embed.clean_and_token_text as ctt\n\ndef slow_fun(mach, jobnum):\n \"\"\"\n Takes about 4 sec on rpi\n \"\"\"\n N = dt.datetime.now()\n #proc = mp.current_process()\n proc = 0\n print(\"At {}, jobnum = {} machine {} started on proc {}\"\\\n .format((N.hour,N.minute,N.second), jobnum, mach, proc))\n sum([i*i for i in range(10_000_000)])\n return 0\n\n\ndef main():\n comm = MPI.COMM_WORLD\n rank = comm.rank\n size = comm.Get_size()\n\n\n for i in range(5):\n if i%size == rank:\n slow_fun(rank, i)\n\ndef main_xml2xml():\n comm = MPI.COMM_WORLD\n rank = comm.rank\n size = comm.Get_size()\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('in_files', type=str, nargs='+',\n help='one or more tar.gz files of Latexmled file (from promath)')\n parser.add_argument('out_dir', type=str,\n help='replicate the in_files dir struct in this directory with the clean files')\n #parser.add_argument('--phrases_file', default=None, type=str, nargs='+',\n # help='XML file with the phrases to be joined (from the glossary dir)')\n parser.add_argument('--norm_args', nargs='?', default=['rm_punct'],\n help='arguments for the tokenization function')\n parser.add_argument('--num_phrases', type=int, default=0,\n help='Max number of phrases to use')\n #parser.add_argument('--skip_n', default=1, type=int)\n args = parser.parse_args()\n\n #phrase_lst = ReadGlossary(args.phrases_file).common_phrases_lst(args.num_phrases)\n #join_fun = lambda s: token_phrases3(s, phrase_lst=phrase_lst)\n\n if rank == 0:\n RG = ctt.ReadGlossary(os.path.join(config['paths']['data'], 'glossary/v3/math*/*.xml.gz'),\n os.path.join(config['paths']['data'], 'glossary/NN.v1/math*/*.xml.gz'))\n ph_dict = RG.first_word_dict(intersect = 'relative', max_phrases=args.num_phrases)\n print(f'Using {len(ph_dict)} phrases')\n else:\n ph_dict = None\n ph_dict = comm.bcast(ph_dict, root=0)\n join_fun = functools.partial(ctt.join_phrases, phrase_dict=ph_dict)\n \n N = dt.datetime.now()\n\n #for j,gz_file in enumerate(args.in_files):\n for j,gz_file in enumerate(glob(args.in_files[0])):\n if j%size == rank:\n tdelta = dt.datetime.now() - N\n print(\"At {}, jobnum = {} machine {} started\"\\\n .format((tdelta.seconds//60, tdelta.seconds%60), j, rank))\n ctt.join_xml_para_and_write(gz_file, args.out_dir, join_fun)\n\n #with mp.Pool(processes=2, maxtasksperchild=1, initializer=worker_init, initargs=(join_fun,)) as pool:\n # pool.starmap(join_xml_para_and_write, [(f, args.out_dir, _func) for f in args.in_files])\n\nif __name__ == \"__main__\":\n main_xml2xml()\n", "repo_name": "lab156/arxivDownload", "sub_path": "MP_scripts/mpi_only_loop.py", "file_name": "mpi_only_loop.py", "file_ext": "py", "file_size_in_byte": 3206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "21", "api": [{"api_name": "toml.load", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "inspect.getfile", "line_number": 11, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 32, "usage_type": "name"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 42, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 42, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 46, "usage_type": "call"}, {"api_name": "embed.clean_and_token_text.ReadGlossary", "line_number": 64, "usage_type": "call"}, {"api_name": "embed.clean_and_token_text", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 71, "usage_type": "call"}, {"api_name": "embed.clean_and_token_text.join_phrases", "line_number": 71, "usage_type": "attribute"}, {"api_name": "embed.clean_and_token_text", "line_number": 71, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "attribute"}, {"api_name": "embed.clean_and_token_text.join_xml_para_and_write", "line_number": 81, "usage_type": "call"}, {"api_name": "embed.clean_and_token_text", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "1057261854", "text": "# 作者:Tom\r\n# 开发时间:2022/9/27 17:06\r\n\r\n# plt.plot(x, y, fmt='xxx', linestyle(线的样式)=, marker(点的样式)=, color(点、线的颜色)=, linewidth(线的粗细)=, markersize(点的大小)=, label=, ) 函数介绍\r\n# axis('off') #关闭坐标显示\r\n# plot(x, y) #默认为蓝色实线\r\n# plot(x, y, 'r*') # 红色星状标记\r\n# plot(x, y, 'go-') #带有圆圈标记的绿线\r\n# plot(x, y, 'ks:') #带有正方形标记的黑色虚线\r\nfrom PIL import Image\r\nfrom pylab import *\r\n\r\nim = array(Image.open('../image/empire.jpg'))\r\n\r\nimshow(im)\r\n\r\nx = [100, 100, 400, 400]\r\ny = [200, 500, 200, 500]\r\n\r\nplot(x, y, 'r*')\r\n\r\nplot(x[:2], y[:2], color='red')\r\n\r\ntitle('Plotting: \"empire.jpg\"')\r\n\r\nshow()\r\n", "repo_name": "Tom-HZJ/test", "sub_path": "123/Test_Matplotlib.py", "file_name": "Test_Matplotlib.py", "file_ext": "py", "file_size_in_byte": 744, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "PIL.Image.open", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "71534332534", "text": "import argparse\nimport time\n\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.preprocessing import StandardScaler\n\nimport config\nimport wandb\n\n\ndef run(fold):\n # WANDB\n wandb.init(project=\"wesad\", entity='berkegocmen',\n name=f\"lr_default_extracted_cf_{fold}\",\n tags=[\"3class\", \"default_params\", 'lr', '4HZ', 'extracted_feat'],\n group=\"3class_4hz\")\n\n # Get training file and generate features\n df_folds = pd.read_csv(config.THREE_CLASS_EXTRACTED_FOLDS_v2)\n df_test = pd.read_csv(config.THREE_CLASS_EXTRACTED_TEST_v2)\n\n df_train = df_folds[df_folds['kfold'] != fold]\n df_valid = df_folds[df_folds['kfold'] == fold]\n\n # # Get unique labels\n # labels = df_train['label'].unique()\n # labels_map = {1: 'baseline', 2: 'stress', 3: 'amusement', 4: 'meditation'}\n\n # get the feature names\n features = [f for f in df_train.columns if f not in ['label', 'kfold']]\n\n # Scale the features\n\n scalers = [StandardScaler() for i in range(len(features))]\n for i, scaler in enumerate(scalers):\n scaler.fit(df_train[features[i]].values.reshape(-1, 1))\n df_train.loc[:, features[i]] = scaler.transform(df_train[features[i]].values.reshape(-1, 1))\n df_valid.loc[:, features[i]] = scaler.transform(df_valid[features[i]].values.reshape(-1, 1))\n df_test.loc[:, features[i]] = scaler.transform(df_test[features[i]].values.reshape(-1, 1))\n # initiate a Logistic Regression\n clf = LogisticRegression()\n\n # fit the model\n print('Training started')\n start = time.time()\n clf.fit(df_train[features].values, df_train.label.values)\n finish = time.time() - start\n\n # get the training results\n print('Making predictions')\n\n training_pred = clf.predict(df_train[features].values)\n training_acc = accuracy_score(df_train.label.values, training_pred)\n training_f1 = f1_score(df_train.label.values, training_pred, average='weighted')\n\n # get the validation results\n validation_pred = clf.predict(df_valid[features].values)\n validation_acc = accuracy_score(df_valid.label.values, validation_pred)\n validation_f1 = f1_score(df_valid.label.values, validation_pred, average='weighted')\n\n # get the test predictions\n test_pred = clf.predict(df_test[features].values)\n test_acc = accuracy_score(df_test.label.values, test_pred)\n test_f1 = f1_score(df_test.label.values, test_pred, average='weighted')\n\n # # ROC AUC Score\n # training_acc_prob = clf.predict_proba(df_train[features].values)\n # validation_acc_prob = clf.predict_proba(df_valid[features].values)\n # test_acc_prob = clf.predict_proba(df_test[features].values)\n #\n # # training_auc = roc_auc_score(df_train.label.values, training_acc_prob, multi_class='ovr')\n # validation_auc = roc_auc_score(df_valid.label.values, validation_acc_prob, multi_class='ovr')\n # test_auc = roc_auc_score(df_test.label.values, test_acc_prob, multi_class='ovr')\n\n print(\n f'Fold:{fold}, Training Accuracy:{training_acc}, Validation Accuracy:{validation_acc}, Test Accuracy:{test_acc}'\n # f'\\nTraining AUC:{training_auc}, Validation_auc:{validation_auc}'\n f'\\nTraining F1:{training_f1}, Validation F1:{validation_f1}, Test F1:{test_f1}'\n f'\\nTraining time:{finish} seconds')\n\n # log the results to wandb\n wandb.log({'Training Accuracy': training_acc, 'Validation Accuracy': validation_acc, 'Test Accuracy': test_acc,\n 'Training F1': training_f1, 'Validation F1': validation_f1, 'Test F1': test_f1,\n 'Confusion Matrix': wandb.plot.confusion_matrix(y_true=df_valid.label.values, preds=validation_pred,\n class_names=[None, 'baseline', 'stress', 'amusement'])})\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--fold',\n type=int\n )\n\n args = parser.parse_args()\n print(args.fold)\n run(args.fold)\n", "repo_name": "berkegocmen/wesad", "sub_path": "src/lr.py", "file_name": "lr.py", "file_ext": "py", "file_size_in_byte": 4073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "wandb.init", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "config.THREE_CLASS_EXTRACTED_FOLDS_v2", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "config.THREE_CLASS_EXTRACTED_TEST_v2", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 66, "usage_type": "call"}, {"api_name": "wandb.log", "line_number": 84, "usage_type": "call"}, {"api_name": "wandb.plot.confusion_matrix", "line_number": 86, "usage_type": "call"}, {"api_name": "wandb.plot", "line_number": 86, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "20727284292", "text": "import io\nfrom operator import mod\nimport sys\n\n\n_INPUT = \"\"\"\\\n4\n1 5 4\n7 8\n6\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n# ---------------------------------\n\n\nN = int(input())\nvecs = [list(map(int, input().split())) for _ in range(N - 1)]\n\ndp = [0] * (2**N)\n\nfor b in range(2**N):\n l = -1\n for i in range(N - 1):\n if not (b >> i) & 1:\n l = i\n break\n if l >= 0:\n for j in range(l + 1, N):\n if not (b >> j) & 1:\n nb = b | (1 << l) | (1 << j)\n dp[nb] = max(dp[nb], dp[b] + vecs[l][j - l - 1])\n\n\nprint(dp[-1])\n", "repo_name": "makima333/Atcoder-ganbaru", "sub_path": "contest/abc318/D.py", "file_name": "D.py", "file_ext": "py", "file_size_in_byte": 583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.stdin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "21616996331", "text": "from flask import Flask, render_template, request, flash, redirect, url_for, session, abort\nimport sqlite3 as sql\nfrom bs4 import BeautifulSoup\nimport requests\nimport lxml\n\nDEBUG = True\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SECRET_KEY'] = 'MN76MN$^6MTmt6@#m6jj0h%65tj^h6nht65NHrn75tj6T^'\n\n\ndef createUsersDb():\n con = sql.connect('users.db')\n cur = con.cursor()\n\n cur.execute('''\n CREATE TABLE IF NOT EXISTS users \n (name TEXT, psw TEXT, avatarPath TEXT, countOfPosts BIGINT)\n ''')\n con.commit()\n\n\ndef createPostsDb():\n con = sql.connect('posts.db')\n cur = con.cursor()\n\n cur.execute('''\n CREATE TABLE IF NOT EXISTS posts\n (title TEXT, content TEXT, author TEXT)\n ''')\n\n con.commit()\n\n\ncreateUsersDb()\ncreatePostsDb()\n\n\n@app.route('/', methods=['POST', 'GET'])\n@app.route('/index', methods=['POST', 'GET'])\ndef index():\n con = sql.connect('users.db')\n cur = con.cursor()\n usernames = cur.execute('''SELECT name FROM users''').fetchall()\n avatarPaths = cur.execute('''SELECT avatarPath FROM users''').fetchall()\n countOfPosts = cur.execute('''SELECT countOfPosts FROM users''').fetchall()\n\n return render_template('index.html',\n usernames=usernames,\n avatarPaths=avatarPaths,\n countOfPosts=countOfPosts,\n usersCount=len(usernames))\n\n\n@app.route('/about', methods=['POST', 'GET'])\ndef about():\n return render_template('about.html')\n\n\n@app.route('/reg', methods=['POST', 'GET'])\ndef reg():\n if 'userLogged' in session:\n return redirect(url_for('acc', name=session['userLogged']))\n elif request.method == 'POST':\n\n name = request.form['name']\n psw = request.form['psw']\n\n if len(name) >= 4 and len(psw) >= 4 and ('\"' not in name) and ('\"' not in psw):\n\n if not checkUserExistence(name=name) and name != 'NONE':\n\n con = sql.connect('users.db')\n cur = con.cursor()\n cur.execute(f'''INSERT INTO users \n (name, psw, avatarPath, countOfPosts) \n VALUES (\"{name}\", \"{psw}\", \"NONE.jpg\", 0) ''')\n con.commit()\n session['userLogged'] = request.form['name']\n return redirect(url_for('acc', name=session['userLogged']))\n else:\n session.clear()\n flash('This name is already occupied')\n else:\n flash('Psw and name must be 4 or more chars length, and not include \\' or \"')\n\n return render_template('reg.html')\n\n\n@app.route('/acc/', methods=['POST', 'GET'])\ndef acc(name):\n # preventing user from authorize without password\n if 'userLogged' not in session:\n abort(401)\n if name != session['userLogged']:\n abort(401)\n name = session['userLogged']\n\n # get images from db\n con = sql.connect('users.db')\n cur = con.cursor()\n avatarPath = cur.execute(f'''SELECT avatarPath FROM users WHERE name = \"{name}\"''').fetchone()[0]\n countOfPosts = cur.execute(f'''SELECT countOfPosts FROM users WHERE name = \"{name}\"''').fetchone()[0]\n\n if request.method == 'POST':\n try:\n # get images from GitHub account\n response = requests.get(request.form['git-url'])\n soup = BeautifulSoup(response.text, 'lxml')\n url = str(soup.find(\n 'img', {'class': 'rounded-2 avatar-user'})\n ).split('src=\"')[1].split('\"')[0].split('?s=')[0]\n resp = requests.get(url)\n out = open(f\"static/media/avatars/{name}.jpg\", \"wb\")\n out.write(resp.content)\n out.close()\n\n # Update avatar path in db\n con = sql.connect('users.db')\n cur = con.cursor()\n cur.execute(f'UPDATE users SET avatarPath = \"{name}.jpg\" WHERE name = \"{name}\"')\n avatarPath = cur.execute(f'''SELECT avatarPath FROM users WHERE name = \"{name}\"''').fetchone()[0]\n con.commit()\n\n\n return render_template('acc.html', name=name, avatarPath=avatarPath, countOfPosts=countOfPosts)\n\n except ValueError:\n # flash about unfounded GitHub account\n flash(\"Such GitHub account doesn't exists\")\n\n return render_template('acc.html', name=name, avatarPath=avatarPath, countOfPosts=countOfPosts)\n\n\n@app.route('/logout', methods=['POST', 'GET'])\ndef logout():\n session.clear()\n return redirect(url_for('reg'))\n\n\n@app.route('/auth', methods=['POST', 'GET'])\ndef auth():\n if request.method == 'POST':\n name = request.form['name']\n psw = request.form['psw']\n\n if not checkUserExistence(name=name):\n session.clear()\n flash(\"This account doesn't exists\")\n else:\n if checkAuth(name=name, psw=psw):\n session['userLogged'] = request.form['name']\n return redirect(url_for('acc', name=session['userLogged']))\n else:\n session.clear()\n flash('Incorrect login or password')\n return render_template('auth.html')\n\n\n@app.route('/acc//add-post', methods=['POST', 'GET'])\ndef addPost(name):\n\n if 'userLogged' not in session:\n abort(401)\n if name != session['userLogged']:\n abort(401)\n\n name = session['userLogged']\n\n if request.method == 'POST':\n\n title = request.form['title']\n content = request.form['content']\n\n if ('\"' not in title) and (\"'\" not in title) and (\"'\" not in content) and ('\"' not in content):\n\n con = sql.connect('posts.db')\n cur = con.cursor()\n\n cur.execute(f'''INSERT INTO posts (title, content, author) VALUES (\"{title}\", \"{content}\", \"{name}\")''')\n con.commit()\n\n con = sql.connect('users.db')\n cur = con.cursor()\n\n countOfPosts = cur.execute(f'''SELECT countOfPosts FROM users WHERE name = \"{name}\"''').fetchone()[0]\n cur.execute(f'''UPDATE users SET countOfPosts = {countOfPosts + 1} WHERE name = \"{name}\"''')\n con.commit()\n\n else:\n flash('content and title must be 4 or more chars length, and not include \\' or \"')\n\n return render_template('add-post.html')\n\n\n@app.route('//posts')\ndef posts(name):\n if checkUserExistence(name=name):\n if checkPostsExistence(name=name):\n # get images from db\n con = sql.connect('users.db')\n cur = con.cursor()\n avatarPath = cur.execute(f'''SELECT avatarPath FROM users WHERE name = \"{name}\"''').fetchone()[0]\n\n con = sql.connect('posts.db')\n cur = con.cursor()\n titles = cur.execute(f'''SELECT title FROM posts WHERE author = \"{name}\"''').fetchall()\n contents = cur.execute(f'''SELECT content FROM posts WHERE author = \"{name}\"''').fetchall()\n postsCount = len(contents)\n print(postsCount)\n\n if 'userLogged' in session:\n username = session['userLogged']\n else:\n username = None\n\n return render_template('posts.html',\n name=name,\n avatarPath=avatarPath,\n titles=titles, contents=contents,\n postsCount=postsCount,\n username=username)\n else:\n return redirect(url_for('postsNotFound', name=name))\n else:\n return redirect(url_for('userNotFound'))\n\n\n@app.route('//profile')\ndef profile(name):\n if checkUserExistence(name=name):\n # get images from db\n con = sql.connect('users.db')\n cur = con.cursor()\n avatarPath = cur.execute(f'''SELECT avatarPath FROM users WHERE name = \"{name}\"''').fetchone()[0]\n countOfPosts = cur.execute(f'''SELECT countOfPosts FROM users WHERE name = \"{name}\"''').fetchone()[0]\n\n return render_template('profile.html', name=name, avatarPath=avatarPath, countOfPosts=countOfPosts)\n else:\n return redirect(url_for('userNotFound'))\n\n\n@app.route('/error:user-not-found')\ndef userNotFound():\n return render_template('userNotFound.html')\n\n\n@app.route('//posts-not-found')\ndef postsNotFound(name):\n return render_template('postsNotFound.html', name=name)\n\n\n@app.errorhandler(401)\ndef error401redir(error):\n return redirect(url_for('error401page'))\n\n\n@app.route('/error:401')\ndef error401page():\n return render_template('error401.html')\n\n\n@app.errorhandler(404)\ndef error404redir(error):\n return redirect(url_for('error404page'))\n\n\n@app.route('/error:404')\ndef error404page():\n return render_template('error404.html')\n\n\ndef checkUserExistence(name):\n con = sql.connect('users.db')\n cur = con.cursor()\n checker = False\n\n for userName in cur.execute('''SELECT name FROM users'''):\n if name == userName[0]:\n checker = True\n break\n else:\n checker = False\n return checker\n\n\ndef checkPostsExistence(name):\n con = sql.connect('posts.db')\n cur = con.cursor()\n checker = False\n\n for userName in cur.execute('''SELECT author FROM posts'''):\n if name == userName[0]:\n checker = True\n break\n else:\n checker = False\n return checker\n\n\ndef checkPostExistence(name, title):\n con = sql.connect('posts.db')\n cur = con.cursor()\n checker = False\n\n for userName in cur.execute(f'''SELECT author FROM posts WHERE title = \"{title}\"'''):\n if name == userName[0]:\n checker = True\n break\n else:\n checker = False\n print(checker)\n return checker\n\n\ndef checkAuth(name, psw):\n con = sql.connect('users.db')\n cur = con.cursor()\n\n if cur.execute(f'''SELECT psw FROM users WHERE name = \"{name}\"''').fetchall()[0][0] == psw:\n return True\n else:\n return False\n\n\n@app.route('/deletepost/:')\ndef deletepost(name, title):\n\n if checkPostsExistence(name=name):\n if checkPostExistence(name=name, title=title):\n\n if 'userLogged' in session:\n if session['userLogged'] == name:\n\n print(f'post {title} deleted')\n\n con = sql.connect('posts.db')\n cur = con.cursor()\n\n cur.execute(f'''DELETE FROM posts WHERE title = \"{title}\" and author = \"{name}\" ''')\n con.commit()\n\n con = sql.connect('users.db')\n cur = con.cursor()\n countOfPosts = cur.execute(f'''SELECT countOfPosts FROM users \n WHERE name = \"{name}\"''').fetchone()[0]\n\n cur.execute(f'''UPDATE users SET countOfPosts = {countOfPosts - 1} WHERE name = \"{name}\"''')\n con.commit()\n\n return redirect(url_for('posts', name=name))\n else:\n abort(401)\n else:\n abort(401)\n\n else:\n abort(404)\n return redirect(url_for('posts', name=name))\n else:\n return redirect(url_for('userNotFound'))\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n", "repo_name": "ImCocos/MoreLore", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11299, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.session.clear", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 98, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 110, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 114, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.session.clear", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 145, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 146, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.session.clear", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 153, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.session.clear", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 156, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 157, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 158, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 164, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 169, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 171, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 173, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 173, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 174, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 174, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 178, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 194, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 202, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 206, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 227, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 234, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 241, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 241, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 251, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 271, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 275, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 289, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 303, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 318, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 333, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 334, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 338, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 344, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 354, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 356, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 359, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 360, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 360, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 362, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 362, "usage_type": "call"}]} +{"seq_id": "32360921060", "text": "import datetime\nfrom facebook_business.adobjects.adaccount import AdAccount\nfrom facebook_business.adobjects.adset import AdSet\nfrom facebook_business.api import FacebookAdsApi\nfrom flask import request\nfrom classes.interests import Interest\nfrom functions.check_adset import check_adset\nfrom classes.genders import Gender\nfrom classes.positions import Position\n\nfrom classes.regions import Region\nfrom classes.segmentations import Segmentations\nfrom functions.adset import create_adset_model\nfrom functions.campaign import create_campaign_model\nfrom functions.ad import create_ads\nfrom functions.creative_images import create_hash_images\nfrom functions.dynamic_ad import create_dynamic_ads, create_story_ads\nfrom functions.story_adset import create_story_adset\n\n\ndef main(files_dst: str):\n\n FacebookAdsApi.init(access_token=request.form['access_token'])\n\n ad_account_id = 'act_' + request.form['ad_account_id']\n instagram_actor_id = request.form['instagram_actor_id']\n page_id = request.form['facebook_page_id']\n\n campaign_result = AdAccount(\n ad_account_id).create_campaign(params=create_campaign_model(name=request.form['campaign_name'], objective=request.form['objective'],\n status=request.form['status'], daily_budget=request.form['daily_budget']))\n\n today = datetime.date.today()\n start_time = str(today)\n\n segmentations = Segmentations()\n region = Region()\n gender = Gender()\n position = Position()\n interest = Interest()\n\n genders = [request.form[x]\n for x in gender.genders if x in request.form]\n\n regions = [region.region_keys[x]\n for x in region.region_keys if x in request.form]\n\n interests = [interest.interests[x]\n for x in interest.interests if x in request.form]\n\n titulos = [request.form['titulo_1'], request.form['titulo_2']]\n descricoes = [request.form['descricao_1'], request.form['descricao_2']]\n\n if 'feed' in request.form:\n feed_face = position.facebook\n feed_insta = position.instagram\n else:\n feed_face = []\n feed_insta = []\n\n adset_ids = []\n for seg in segmentations.check_list:\n if seg in request.form:\n adset = AdSet(parent_id=ad_account_id)\n\n adset.update(create_adset_model(adset_name=request.form[seg], campaign_id=campaign_result['id'],\n genders=genders, age_min=request.form['age_min'], start_time=start_time,\n age_max=request.form['age_max'], feed_face=feed_face, feed_insta=feed_insta, pixel_id=request.form['pixel_id']))\n\n adset = check_adset(\n adset=adset, segmentation=seg, broad_regions=regions, interests=interests, is_dynamic_adset=request.form['is_dynamic_campaign'])\n\n adset.remote_create()\n\n adset_ids.append(adset['id'])\n\n if request.files:\n hash_files = create_hash_images(\n request.files, files_dst, ad_account_id=ad_account_id)\n\n campaign_name = request.form['campaign_name'].replace(' ', '_').lower()\n\n if 'story' in request.form:\n adset_story = AdSet(parent_id=ad_account_id)\n\n adset_story.update(create_story_adset(\n adset_name='Stories', campaign_id=campaign_result['id'], genders=genders, age_min=request.form['age_min'],\n age_max=request.form['age_max'], start_time=start_time, pixel_id=request.form['pixel_id']\n ))\n adset_story.remote_create()\n\n create_story_ads(adset_story['id'], ad_account_id=ad_account_id, call_to_action=request.form['call_to_action'],\n img_files=[hash_files[2], hash_files[3]\n ], link=request.form['link'], campaign_name=campaign_name,\n page_id=page_id, instagram_actor_id=instagram_actor_id, status=request.form['status'], pixel_id=request.form['pixel_id'])\n\n if request.form['is_dynamic_campaign'] == 'true':\n create_dynamic_ads(descricoes, titulos, adset_ids, ad_account_id,\n campaign_name, page_id, instagram_actor_id, [\n hash_files[0], hash_files[1]],\n request.form['call_to_action'], request.form['link'],\n request.form['status'], request.form['pixel_id'])\n else:\n create_ads(descricoes, titulos, adset_ids, ad_account_id,\n campaign_name, page_id, instagram_actor_id, [\n hash_files[0], hash_files[1]],\n request.form['call_to_action'], request.form['link'],\n request.form['status'], request.form['pixel_id'])\n", "repo_name": "PedroCruzADS/oxygen-campaign-creator", "sub_path": "main/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4697, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "facebook_business.api.FacebookAdsApi.init", "line_number": 23, "usage_type": "call"}, {"api_name": "facebook_business.api.FacebookAdsApi", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "facebook_business.adobjects.adaccount.AdAccount", "line_number": 29, "usage_type": "call"}, {"api_name": "functions.campaign.create_campaign_model", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 33, "usage_type": "attribute"}, {"api_name": "classes.segmentations.Segmentations", "line_number": 36, "usage_type": "call"}, {"api_name": "classes.regions.Region", "line_number": 37, "usage_type": "call"}, {"api_name": "classes.genders.Gender", "line_number": 38, "usage_type": "call"}, {"api_name": "classes.positions.Position", "line_number": 39, "usage_type": "call"}, {"api_name": "classes.interests.Interest", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "facebook_business.adobjects.adset.AdSet", "line_number": 64, "usage_type": "call"}, {"api_name": "functions.adset.create_adset_model", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "functions.check_adset.check_adset", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "functions.creative_images.create_hash_images", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "facebook_business.adobjects.adset.AdSet", "line_number": 84, "usage_type": "call"}, {"api_name": "functions.story_adset.create_story_adset", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "functions.dynamic_ad.create_story_ads", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "name"}, {"api_name": "functions.dynamic_ad.create_dynamic_ads", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "functions.ad.create_ads", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "5849074443", "text": "\r\n\r\n# Create your views here.\r\nfrom django.shortcuts import render\r\nfrom core.cart import Cart\r\nfrom core.form import FormularioPro, FormularioCat,CreacionDeUsuario,Login,User\r\nfrom .models import Producto,Categoria\r\nfrom rest_AmigosP.viewslogin import login\r\nfrom django.contrib.auth import authenticate, login\r\nfrom django.contrib import messages\r\nfrom email import message\r\nfrom pyexpat.errors import messages\r\nfrom django.shortcuts import redirect, render\r\nfrom rest_framework.decorators import permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.decorators import permission_required #indica nivel de permisos\r\nfrom django.contrib.auth.mixins import PermissionRequiredMixin \r\nfrom django.views.generic.base import TemplateView\r\n\r\n# Create your views here.\r\n\r\ndef home(request):\r\n\r\n return render(request, 'core/index.html')\r\n \r\n@permission_classes((IsAuthenticated, ))\r\ndef Productos(request):\r\n #Se define objeto para obtener los productos\r\n #Se puede utilizar Producto.object.all() o 'select * from Producto'\r\n productos = Producto.objects.all()\r\n\r\n #Se cargan los objetos obtenidos en la variable\r\n contexto = {\r\n 'producto' : productos\r\n }\r\n return render(request, 'core/productos.html', contexto)\r\n\r\n@login_required\r\n@permission_classes((IsAdminUser,))\r\n#Agregar Productos\r\ndef AgregarPro(request):\r\n \r\n contexto ={\r\n 'producto' : FormularioPro()\r\n }\r\n\r\n #Se verifican los datos de producto\r\n if request.method == 'POST':\r\n #S recuperan los datos\r\n producto = FormularioPro(request.POST, request.FILES)\r\n #validacion de formulario\r\n if producto.is_valid:\r\n producto.save()\r\n contexto['mensaje'] = \"Creado correctamente\"\r\n return redirect(to='Productos')\r\n return render(request, 'core/AgregarPro.html', contexto)\r\n\r\n #eliminacion de producto\r\n@login_required\r\ndef del_producto(request, id):\r\n #se usara la id para identificar el producto borrado\r\n producto = Producto.objects.get(id = id)\r\n #se elimina el producto que coincida con su id\r\n producto.delete()\r\n #redireccion a la pagina de productos\r\n return redirect(to= 'Productos')\r\n\r\n@login_required\r\n@permission_required((IsAuthenticated,IsAdminUser))\r\n#Agregar una categoria\r\ndef AgregarCat(request):\r\n\r\n contexto ={\r\n 'categoria': FormularioCat()\r\n }\r\n\r\n #Se verifican los datos de categoeia\r\n if request.method == 'POST':\r\n #S recuperan los datos\r\n categoria = FormularioCat(request.POST)\r\n #validacion de formulario\r\n if categoria.is_valid:\r\n categoria.save()\r\n contexto['mensaje'] = \"Creado correctamente\"\r\n return redirect(to='Productos')\r\n return render(request, 'core/agregarCat.html', contexto)\r\n\r\n@login_required\r\n#Modificar un producto\r\ndef ModificarPro(request, id):\r\n #rescatar producto por id\r\n producto = Producto.objects.get(id = id)\r\n #Se agrega al contexto\r\n contexto = {\r\n 'form': FormularioPro(instance=producto)\r\n }\r\n #Se verifica que el metodo sea post \r\n if request.method == 'POST':\r\n formulario = FormularioPro(data=request.POST, instance=producto)\r\n #validamos\r\n if formulario.is_valid():\r\n formulario.save()\r\n contexto['mensaje'] =\"Producto Modificado\"\r\n return redirect(to='Productos')\r\n return render(request, 'core/ModificarPro.html', contexto)\r\n \r\n#para el registro de usuario\r\ndef Registro(request):\r\n data = {\r\n 'form': CreacionDeUsuario()\r\n }\r\n if request.method == 'POST':\r\n formulario = CreacionDeUsuario(data=request.POST)\r\n if formulario.is_valid():\r\n formulario.save()\r\n user = authenticate(username = formulario.cleaned_data[\"username\"], password = formulario.cleaned_data[\"password1\"])\r\n login(request, user)\r\n messages.success(request, \"Registro completado con exito\")\r\n #redireccion\r\n return redirect(to='iniSesion')\r\n data[\"form\"] = formulario\r\n return render(request , 'core/Registro.html', data)\r\n@login_required\r\n@permission_required('Vendedores')\r\ndef Admin(request):\r\n #Se define objeto para obtener los productos\r\n #Se puede utilizar Producto.object.all() o 'select * from Producto'\r\n usuarios = User.objects.all()\r\n\r\n #Se cargan los objetos obtenidos en la variable\r\n contexto = {\r\n 'form' : usuarios\r\n }\r\n\r\n return render(request, 'core/VistaAdmin', contexto)\r\n\r\n#definir view carro\r\ndef Carro(request):\r\n #obtiene los productos\r\n productos = Producto.objects.all()\r\n contexto = {\r\n 'producto' : productos\r\n }\r\n return render(request, 'core/Carro.html', contexto )\r\n\r\n#agregar al carrito\r\ndef agrega_carro(request, id):\r\n cart = Cart(request)\r\n producto = Producto.objects.get(id = id)\r\n cart.add(producto)\r\n return redirect(to = \"Carro\")\r\n\r\n#elimiar del carro\r\ndef del_carro(request, id):\r\n cart = Cart(request)\r\n producto = Producto.objects.get(id = id)\r\n cart.remove(producto)\r\n return redirect(to = \"Carro\")\r\n\r\n#restar\r\ndef res_carro(request, id):\r\n cart = Cart(request)\r\n producto = Producto.objects.get(id = id)\r\n cart.decrement(producto)\r\n return redirect(to = \"Carro\")\r\n\r\n#limpiar carro\r\ndef limpiar_carro(request):\r\n cart = Cart(request)\r\n cart.clear()\r\n return redirect(\"Carro\")\r\n\r\n\r\ndef Suscribete(request):\r\n\r\n return render(request , 'core/Suscribete.html')\r\n", "repo_name": "Johny-ctrl/Examen", "sub_path": "Examen/AmigosPeludos/core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5681, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Producto.objects.all", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Producto.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Producto", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 27, "usage_type": "name"}, {"api_name": "core.form.FormularioPro", "line_number": 45, "usage_type": "call"}, {"api_name": "core.form.FormularioPro", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Producto.objects.get", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Producto.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.Producto", "line_number": 63, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 60, "usage_type": "name"}, {"api_name": "core.form.FormularioCat", "line_number": 75, "usage_type": "call"}, {"api_name": "core.form.FormularioCat", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 86, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 69, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 70, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 70, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 70, "usage_type": "name"}, {"api_name": "models.Producto.objects.get", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Producto.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Producto", "line_number": 93, "usage_type": "name"}, {"api_name": "core.form.FormularioPro", "line_number": 96, "usage_type": "call"}, {"api_name": "core.form.FormularioPro", "line_number": 100, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 106, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 89, "usage_type": "name"}, {"api_name": "core.form.CreacionDeUsuario", "line_number": 111, "usage_type": "call"}, {"api_name": "core.form.CreacionDeUsuario", "line_number": 114, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 118, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages.success", "line_number": 119, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages", "line_number": 119, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 121, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 123, "usage_type": "call"}, {"api_name": "core.form.User.objects.all", "line_number": 129, "usage_type": "call"}, {"api_name": "core.form.User.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "core.form.User", "line_number": 129, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 124, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Producto.objects.all", "line_number": 141, "usage_type": "call"}, {"api_name": "models.Producto.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "models.Producto", "line_number": 141, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 145, "usage_type": "call"}, {"api_name": "core.cart.Cart", "line_number": 149, "usage_type": "call"}, {"api_name": "models.Producto.objects.get", "line_number": 150, "usage_type": "call"}, {"api_name": "models.Producto.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "models.Producto", "line_number": 150, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 152, "usage_type": "call"}, {"api_name": "core.cart.Cart", "line_number": 156, "usage_type": "call"}, {"api_name": "models.Producto.objects.get", "line_number": 157, "usage_type": "call"}, {"api_name": "models.Producto.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "models.Producto", "line_number": 157, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 159, "usage_type": "call"}, {"api_name": "core.cart.Cart", "line_number": 163, "usage_type": "call"}, {"api_name": "models.Producto.objects.get", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Producto.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.Producto", "line_number": 164, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "core.cart.Cart", "line_number": 170, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 172, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "32412383278", "text": "#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\nimport os\nimport io\nimport datetime\nimport glob\nimport hashlib\n\nfrom minikerberos.protocol.asn1_structs import Ticket, EncryptedData, \\\n\tkrb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart\nfrom minikerberos.common.utils import dt_to_kerbtime, TGSTicket2hashcat\nfrom minikerberos.protocol.constants import EncryptionType, MESSAGE_TYPE\nfrom minikerberos import logger\nfrom asn1crypto import core\n\n\n\n# http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt\nclass Header:\n\tdef __init__(self):\n\t\tself.tag = None\n\t\tself.taglen = None\n\t\tself.tagdata = None\n\t\t\n\t@staticmethod\n\tdef parse(data):\n\t\t\"\"\"\n\t\treturns a list of header tags\n\t\t\"\"\"\n\t\treader = io.BytesIO(data)\n\t\theaders = []\n\t\twhile reader.tell() < len(data):\n\t\t\th = Header()\n\t\t\th.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\t\th.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\t\th.tagdata = reader.read(h.taglen)\n\t\t\theaders.append(h)\n\t\treturn headers\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.tag.to_bytes(2, byteorder='big', signed=False)\n\t\tt += len(self.tagdata).to_bytes(2, byteorder='big', signed=False)\n\t\tt += self.tagdata\n\t\treturn t\n\t\t\n\tdef __str__(self):\n\t\tt = 'tag: %s\\n' % self.tag\n\t\tt += 'taglen: %s\\n' % self.taglen\n\t\tt += 'tagdata: %s\\n' % self.tagdata\n\t\treturn t\n\nclass DateTime:\n\tdef __init__(self):\n\t\tself.time_offset = None\n\t\tself.usec_offset = None\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\td = DateTime()\n\t\td.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\td.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\treturn d\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.time_offset.to_bytes(4, byteorder='big', signed=False)\n\t\tt += self.usec_offset.to_bytes(4, byteorder='big', signed=False)\n\t\treturn t\n\t\t\n\n\t\t\nclass Credential:\n\tdef __init__(self):\n\t\tself.client = None\n\t\tself.server = None\n\t\tself.key = None\n\t\tself.time = None\n\t\tself.is_skey = None\n\t\tself.tktflags = None\n\t\tself.num_address = None\n\t\tself.addrs = []\n\t\tself.num_authdata = None\n\t\tself.authdata = []\n\t\tself.ticket = None\n\t\tself.second_ticket = None\n\n\tdef to_hash(self):\n\t\tres = Ticket.load(self.ticket.to_asn1()).native\n\t\ttgs_encryption_type = int(res['enc-part']['etype'])\n\t\tt = len(res['sname']['name-string'])\n\t\tif t == 1:\n\t\t\ttgs_name_string = res['sname']['name-string'][0]\n\t\telse:\n\t\t\ttgs_name_string = res['sname']['name-string'][1]\n\t\ttgs_realm = res['realm']\n\t\tif tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value:\n\t\t\ttgs_checksum = res['enc-part']['cipher'][-12:]\n\t\t\ttgs_encrypted_data2 = res['enc-part']['cipher'][:-12:]\n\t\t\treturn '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )\n\t\telse:\n\t\t\ttgs_checksum = res['enc-part']['cipher'][:16]\n\t\t\ttgs_encrypted_data2 = res['enc-part']['cipher'][16:]\n\t\t\treturn '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )\n\n\tdef to_tgt(self):\n\t\t\"\"\"\n\t\tReturns the native format of an AS_REP message and the sessionkey in EncryptionKey native format\n\t\t\"\"\"\n\t\tenc_part = EncryptedData({'etype': 1, 'cipher': b''})\n\t\t\n\t\ttgt_rep = {}\n\t\ttgt_rep['pvno'] = krb5_pvno\n\t\ttgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value\n\t\ttgt_rep['crealm'] = self.server.realm.to_string()\n\t\ttgt_rep['cname'] = self.client.to_asn1()[0]\n\t\ttgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native\n\t\ttgt_rep['enc-part'] = enc_part.native\n\n\t\tt = EncryptionKey(self.key.to_asn1()).native\n\t\t\n\t\treturn tgt_rep, t\n\t\t\n\tdef to_kirbi(self):\n\t\tfilename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8])\n\t\tkrbcredinfo = {}\n\t\tkrbcredinfo['key'] = EncryptionKey(self.key.to_asn1())\n\t\tkrbcredinfo['prealm'] = self.client.realm.to_string()\n\t\tkrbcredinfo['pname'] = self.client.to_asn1()[0]\n\t\tkrbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags)\n\t\tif self.time.authtime != 0: #this parameter is not mandatory, and most of the time not present\n\t\t\tkrbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime)\n\t\tkrbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime)\n\t\tkrbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime)\n\t\tif self.time.renew_till != 0: #this parameter is not mandatory, and sometimes it's not present\n\t\t\tkrbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime)\n\t\tkrbcredinfo['srealm'] = self.server.realm.to_string()\n\t\tkrbcredinfo['sname'] = self.server.to_asn1()[0]\n\t\t\n\t\tenc_krbcred = {}\n\t\tenc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)]\n\t\t\n\t\tkrbcred = {}\n\t\tkrbcred['pvno'] = krb5_pvno\n\t\tkrbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value\n\t\tkrbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())]\n\t\tkrbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()})\n\t\n\t\n\t\n\t\tkirbi = KRBCRED(krbcred)\n\t\treturn kirbi, filename\n\n\t@staticmethod\n\tdef from_asn1(ticket, data):\n\t\t###\n\t\t# data = KrbCredInfo \n\t\t###\n\t\tc = Credential()\n\t\tc.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm'])\n\t\tc.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm'])\n\t\tc.key = Keyblock.from_asn1(data['key'])\n\t\tc.is_skey = 0 #not sure!\n\t\t\n\t\tc.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native\n\t\tc.num_address = 0\n\t\tc.num_authdata = 0\n\t\tc.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher'])\n\t\tc.second_ticket = CCACHEOctetString.empty()\n\t\treturn c\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\tc = Credential()\n\t\tc.client = CCACHEPrincipal.parse(reader)\n\t\tc.server = CCACHEPrincipal.parse(reader)\n\t\tc.key = Keyblock.parse(reader)\n\t\tc.time = Times.parse(reader)\n\t\tc.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False)\n\t\tc.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False)\n\t\tc.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\tfor _ in range(c.num_address):\n\t\t\tc.addrs.append(Address.parse(reader))\n\t\tc.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\tfor i in range(c.num_authdata):\n\t\t\tc.authdata.append(Authdata.parse(reader))\n\t\tc.ticket = CCACHEOctetString.parse(reader)\n\t\tc.second_ticket = CCACHEOctetString.parse(reader)\n\t\treturn c\n\t\n\t@staticmethod\n\tdef summary_header():\n\t\treturn ['client','server','starttime','endtime','renew-till']\n\t\t\n\tdef summary(self):\n\t\treturn [ \n\t\t\t'%s@%s' % \t(self.client.to_string(),self.client.realm.to_string()), \n\t\t\t'%s@%s' % \t(self.server.to_string(), self.server.realm.to_string()),\n\t\t\tdatetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A',\n\t\t\tdatetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A',\n\t\t\tdatetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A',\n\t\t\n\t\t]\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.client.to_bytes()\n\t\tt += self.server.to_bytes()\n\t\tt += self.key.to_bytes()\n\t\tt += self.time.to_bytes()\n\t\tt += self.is_skey.to_bytes(1, byteorder='big', signed=False)\n\t\tt += self.tktflags.to_bytes(4, byteorder='little', signed=False)\n\t\tt += self.num_address.to_bytes(4, byteorder='big', signed=False)\n\t\tfor addr in self.addrs:\n\t\t\tt += addr.to_bytes()\n\t\tt += self.num_authdata.to_bytes(4, byteorder='big', signed=False)\n\t\tfor ad in self.authdata:\n\t\t\tt += ad.to_bytes()\n\t\tt += self.ticket.to_bytes()\n\t\tt += self.second_ticket.to_bytes()\n\t\treturn t\n\t\t\nclass Keyblock:\n\tdef __init__(self):\n\t\tself.keytype = None\n\t\tself.etype = None\n\t\tself.keylen = None\n\t\tself.keyvalue = None\n\t\n\t@staticmethod\n\tdef from_asn1(data):\n\t\tk = Keyblock()\n\t\tk.keytype = data['keytype']\n\t\tk.etype = 0 # not sure\n\t\tk.keylen = len(data['keyvalue'])\n\t\tk.keyvalue = data['keyvalue']\n\t\t\n\t\treturn k\n\t\t\n\tdef to_asn1(self):\n\t\tt = {}\n\t\tt['keytype'] = self.keytype\n\t\tt['keyvalue'] = self.keyvalue\n\t\t\n\t\treturn t\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\tk = Keyblock()\n\t\tk.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\tk.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\tk.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\tk.keyvalue = reader.read(k.keylen)\n\t\treturn k\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.keytype.to_bytes(2, byteorder='big', signed=False)\n\t\tt += self.etype.to_bytes(2, byteorder='big', signed=False)\n\t\tt += self.keylen.to_bytes(2, byteorder='big', signed=False)\n\t\tt += self.keyvalue\n\t\treturn t\n\t\t\n\t\t\nclass Times:\n\tdef __init__(self):\n\t\tself.authtime = None\n\t\tself.starttime = None\n\t\tself.endtime = None\n\t\tself.renew_till = None\n\t\n\t@staticmethod\n\tdef from_asn1(enc_as_rep_part):\n\t\tt = Times()\n\t\tt.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\\n\t\t\tif 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else 0\n\t\tt.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\\n\t\t\tif 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else 0\n\t\tt.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\\n\t\t\tif 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else 0\n\t\tt.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\\n\t\t\tif 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0\n\t\treturn t\n\t\n\t@staticmethod\n\tdef dummy_time(start= datetime.datetime.now(datetime.timezone.utc)):\n\t\tt = Times()\n\t\tt.authtime = dt_to_kerbtime(start)\n\t\tt.starttime = dt_to_kerbtime(start )\n\t\tt.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1))\n\t\tt.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2))\n\t\treturn t\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\tt = Times()\n\t\tt.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\tt.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\tt.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\tt.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\treturn t\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.authtime.to_bytes(4, byteorder='big', signed=False)\n\t\tt += self.starttime.to_bytes(4, byteorder='big', signed=False)\n\t\tt += self.endtime.to_bytes(4, byteorder='big', signed=False)\n\t\tt += self.renew_till.to_bytes(4, byteorder='big', signed=False)\n\t\treturn t\n\t\t\nclass Address:\n\tdef __init__(self):\n\t\tself.addrtype = None\n\t\tself.addrdata = None\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\ta = Address()\n\t\ta.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\ta.addrdata = CCACHEOctetString.parse(reader)\n\t\treturn a\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.addrtype.to_bytes(2, byteorder='big', signed=False)\n\t\tt += self.addrdata.to_bytes()\n\t\treturn t\n\t\t\nclass Authdata:\n\tdef __init__(self):\n\t\tself.authtype = None\n\t\tself.authdata = None\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\ta = Authdata()\n\t\ta.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\ta.authdata = CCACHEOctetString.parse(reader)\n\t\treturn a\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.authtype.to_bytes(2, byteorder='big', signed=False)\n\t\tt += self.authdata.to_bytes()\n\t\treturn t\n\t\t\nclass CCACHEPrincipal:\n\tdef __init__(self):\n\t\tself.name_type = None\n\t\tself.num_components = None\n\t\tself.realm = None\n\t\tself.components = []\n\t\n\t@staticmethod\n\tdef from_asn1(principal, realm):\n\t\tp = CCACHEPrincipal()\n\t\tp.name_type = principal['name-type']\n\t\tp.num_components = len(principal['name-string'])\n\t\tp.realm = CCACHEOctetString.from_string(realm)\n\t\tfor comp in principal['name-string']:\n\t\t\tp.components.append(CCACHEOctetString.from_asn1(comp))\n\t\t\t\n\t\treturn p\n\t\n\t@staticmethod\n\tdef dummy():\n\t\tp = CCACHEPrincipal()\n\t\tp.name_type = 1\n\t\tp.num_components = 1\n\t\tp.realm = CCACHEOctetString.from_string('kerbi.corp')\n\t\tfor _ in range(1):\n\t\t\tp.components.append(CCACHEOctetString.from_string('kerbi'))\n\t\t\t\n\t\treturn p\n\t\t\n\tdef to_string(self):\n\t\treturn '-'.join([c.to_string() for c in self.components])\n\t\t\n\tdef to_asn1(self):\n\t\tt = {'name-type': self.name_type, 'name-string': [name.to_string() for name in self.components]}\n\t\treturn t, self.realm.to_string()\t\t\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\tp = CCACHEPrincipal()\n\t\tp.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\tp.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\tp.realm = CCACHEOctetString.parse(reader)\n\t\tfor _ in range(p.num_components):\n\t\t\tp.components.append(CCACHEOctetString.parse(reader))\n\t\treturn p\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.name_type.to_bytes(4, byteorder='big', signed=False)\n\t\tt += len(self.components).to_bytes(4, byteorder='big', signed=False)\n\t\tt += self.realm.to_bytes()\n\t\tfor com in self.components:\n\t\t\tt += com.to_bytes()\n\t\treturn t\n\t\t\nclass CCACHEOctetString:\n\tdef __init__(self):\n\t\tself.length = None\n\t\tself.data = None\n\t\n\t@staticmethod\n\tdef empty():\n\t\to = CCACHEOctetString()\n\t\to.length = 0\n\t\to.data = b''\n\t\treturn o\n\t\t\n\tdef to_asn1(self):\n\t\treturn self.data\n\t\t\n\tdef to_string(self):\n\t\treturn self.data.decode()\n\t\n\t@staticmethod\n\tdef from_string(data):\n\t\to = CCACHEOctetString()\n\t\to.data = data.encode()\n\t\to.length = len(o.data)\n\t\treturn o\n\t\n\t@staticmethod\n\tdef from_asn1(data):\n\t\to = CCACHEOctetString()\n\t\to.length = len(data)\n\t\tif isinstance(data,str):\n\t\t\to.data = data.encode()\n\t\telse:\n\t\t\to.data = data\n\t\treturn o\n\t\n\t@staticmethod\n\tdef parse(reader):\n\t\to = CCACHEOctetString()\n\t\to.length = int.from_bytes(reader.read(4), byteorder='big', signed=False)\n\t\to.data = reader.read(o.length)\n\t\treturn o\n\t\t\n\tdef to_bytes(self):\n\t\tif isinstance(self.data,str):\n\t\t\tself.data = self.data.encode()\n\t\t\tself.length = len(self.data)\n\t\tt = len(self.data).to_bytes(4, byteorder='big', signed=False)\n\t\tt += self.data\n\t\treturn t\n\t\t\n\t\t\nclass CCACHE:\n\t\"\"\"\n\tAs the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present\n\t\"\"\"\n\tdef __init__(self, empty = False):\n\t\tself.file_format_version = None #0x0504\n\t\tself.headers = []\n\t\tself.primary_principal = None\n\t\tself.credentials = []\n\t\t\n\t\tif empty == False:\n\t\t\tself.__setup()\n\t\t\n\tdef __setup(self):\n\t\tself.file_format_version = 0x0504\n\t\t\n\t\theader = Header()\n\t\theader.tag = 1\n\t\theader.taglen = 8\n\t\t#header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00'\n\t\theader.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\t\tself.headers.append(header)\n\t\t\n\t\t#t_hdr = b''\n\t\t#for header in self.headers:\n\t\t#\tt_hdr += header.to_bytes()\n\t\t#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int\n\t\t\n\t\tself.primary_principal = CCACHEPrincipal.dummy()\n\t\t\n\tdef __str__(self):\n\t\tt = '== CCACHE ==\\n'\n\t\tt+= 'file_format_version : %s\\n' % self.file_format_version\n\t\tfor header in self.headers:\n\t\t\tt+= '%s\\n' % header\n\t\tt+= 'primary_principal : %s\\n' % self.primary_principal\n\t\treturn t\n\t\t\n\tdef add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP\n\t\t\"\"\"\n\t\tCreates credential object from the TGT and adds to the ccache file\n\t\tThe TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.\n\t\t\n\t\tThis function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part\n\t\t\n\t\toverride_pp: bool to determine if client principal should be used as the primary principal for the ccache file\n\t\t\"\"\"\n\t\tc = Credential()\n\t\tc.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])\n\t\tif override_pp == True:\n\t\t\tself.primary_principal = c.client\n\t\tc.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])\n\t\tc.time = Times.from_asn1(enc_as_rep_part)\n\t\tc.key = Keyblock.from_asn1(enc_as_rep_part['key'])\n\t\tc.is_skey = 0 #not sure!\n\t\t\n\t\tc.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native\n\t\tc.num_address = 0\n\t\tc.num_authdata = 0\n\t\tc.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())\n\t\tc.second_ticket = CCACHEOctetString.empty()\n\t\t\n\t\tself.credentials.append(c)\n\t\t\n\tdef add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP\n\t\t\"\"\"\n\t\tCreates credential object from the TGS and adds to the ccache file\n\t\tThe TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT\n\t\t\n\t\tThis function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part\n\t\t\n\t\toverride_pp: bool to determine if client principal should be used as the primary principal for the ccache file\n\t\t\"\"\"\n\t\tc = Credential()\n\t\tc.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])\n\t\tif override_pp == True:\n\t\t\tself.primary_principal = c.client\n\t\tc.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])\n\t\tc.time = Times.from_asn1(enc_tgs_rep_part)\n\t\tc.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])\n\t\tc.is_skey = 0 #not sure!\n\t\t\n\t\tc.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native\n\t\tc.num_address = 0\n\t\tc.num_authdata = 0\n\t\tc.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())\n\t\tc.second_ticket = CCACHEOctetString.empty()\n\t\t\n\t\tself.credentials.append(c)\n\t\n\t\t\n\tdef add_kirbi(self, krbcred, override_pp = True, include_expired = False):\n\t\tc = Credential()\n\t\tenc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native\n\t\tticket_info = enc_credinfo['ticket-info'][0]\n\t\t\n\t\t\"\"\"\n\t\tif ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):\n\t\t\tif include_expired == True:\n\t\t\t\tlogging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')\n\t\t\telse:\n\t\t\t\tlogging.debug('This ticket has most likely expired, skipping')\n\t\t\t\treturn\n\t\t\"\"\"\n\t\t\n\t\tc.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])\n\t\tif override_pp == True:\n\t\t\tself.primary_principal = c.client\n\t\t\n\t\t#yaaaaay 4 additional weirdness!!!!\n\t\t#if sname name-string contains a realm as well htne impacket will crash miserably :(\n\t\tif len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():\n\t\t\tlogger.debug('SNAME contains the realm as well, trimming it')\n\t\t\tt = ticket_info['sname']\n\t\t\tt['name-string'] = t['name-string'][:-1]\n\t\t\tc.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])\n\t\telse:\n\t\t\tc.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])\n\t\t\n\t\t\n\t\tc.time = Times.from_asn1(ticket_info)\n\t\tc.key = Keyblock.from_asn1(ticket_info['key'])\n\t\tc.is_skey = 0 #not sure!\n\t\t\n\t\tc.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native\n\t\tc.num_address = 0\n\t\tc.num_authdata = 0\n\t\tc.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file\n\t\tc.second_ticket = CCACHEOctetString.empty()\n\t\t\n\t\tself.credentials.append(c)\n\t\t\n\t@staticmethod\n\tdef from_kirbi(kirbidata):\n\t\tkirbi = KRBCRED.load(kirbidata).native\n\t\tcc = CCACHE()\n\t\tcc.add_kirbi(kirbi)\t\t\n\t\treturn cc\n\n\tdef get_all_tgt(self):\n\t\t\"\"\"\n\t\tReturns a list of AS_REP tickets in native format (dict). \n\t\tTo determine which ticket are AP_REP we check for the server principal to be the kerberos service\n\t\t\"\"\"\n\t\ttgts = []\n\t\tfor cred in self.credentials:\n\t\t\tif cred.server.to_string().lower().find('krbtgt') != -1:\n\t\t\t\ttgts.append(cred.to_tgt())\n\n\t\treturn tgts\n\n\tdef get_hashes(self, all_hashes = False):\n\t\t\"\"\"\n\t\tReturns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)\n\t\tall_hashes: overrides the encryption type filtering and returns hash for all tickets\n\n\t\t\"\"\"\n\t\thashes = []\n\t\tfor cred in self.credentials:\n\t\t\tres = Ticket.load(cred.ticket.to_asn1()).native\n\t\t\tif int(res['enc-part']['etype']) == 23 or all_hashes == True:\n\t\t\t\thashes.append(cred.to_hash())\n\n\t\treturn hashes\n\t\t\n\t@staticmethod\n\tdef parse(reader):\n\t\tc = CCACHE(True)\n\t\tc.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\t\n\t\thdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)\n\t\tc.headers = Header.parse(reader.read(hdr_size))\n\t\t\n\t\t#c.headerlen = \n\t\t#for i in range(c.headerlen):\n\t\t#\tc.headers.append(Header.parse(reader))\n\t\t\n\t\t\n\t\tc.primary_principal = CCACHEPrincipal.parse(reader)\n\t\tpos = reader.tell()\n\t\treader.seek(-1,2)\n\t\teof = reader.tell()\n\t\treader.seek(pos,0)\n\t\twhile reader.tell() < eof:\n\t\t\tc.credentials.append(Credential.parse(reader))\n\t\t\n\t\treturn c\n\t\t\n\tdef to_bytes(self):\n\t\tt = self.file_format_version.to_bytes(2, byteorder='big', signed=False)\n\t\t\n\t\tt_hdr = b''\n\t\tfor header in self.headers:\n\t\t\tt_hdr += header.to_bytes()\n\t\t\n\t\tt += len(t_hdr).to_bytes(2, byteorder='big', signed=False)\n\t\tt += t_hdr\n\t\t\n\t\tt += self.primary_principal.to_bytes()\n\t\tfor cred in self.credentials:\n\t\t\tt += cred.to_bytes()\n\t\treturn t\n\t\n\t@staticmethod\n\tdef from_kirbifile(kirbi_filename):\n\t\tkf_abs = os.path.abspath(kirbi_filename)\n\t\tkirbidata = None\n\t\twith open(kf_abs, 'rb') as f:\n\t\t\tkirbidata = f.read()\n\t\t\t\n\t\treturn CCACHE.from_kirbi(kirbidata)\n\t\n\t@staticmethod\n\tdef from_kirbidir(directory_path):\n\t\t\"\"\"\n\t\tIterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object\n\t\t\"\"\"\n\t\tcc = CCACHE()\n\t\tdir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')\n\t\tfor filename in glob.glob(dir_path):\n\t\t\twith open(filename, 'rb') as f:\n\t\t\t\tkirbidata = f.read()\n\t\t\t\tkirbi = KRBCRED.load(kirbidata).native\n\t\t\t\tcc.add_kirbi(kirbi)\n\t\t\n\t\treturn cc\n\t\t\n\tdef to_kirbidir(self, directory_path):\n\t\t\"\"\"\n\t\tConverts all credential object in the CCACHE object to the kirbi file format used by mimikatz.\n\t\tThe kirbi file format supports one credential per file, so prepare for a lot of files being generated.\n\t\t\n\t\tdirectory_path: str the directory to write the kirbi files to\n\t\t\"\"\"\n\t\tkf_abs = os.path.abspath(directory_path)\n\t\tfor cred in self.credentials:\n\t\t\tkirbi, filename = cred.to_kirbi()\n\t\t\tfilename = '%s.kirbi' % filename.replace('..','!')\n\t\t\tfilepath = os.path.join(kf_abs, filename)\n\t\t\twith open(filepath, 'wb') as o:\n\t\t\t\to.write(kirbi.dump())\n\t\n\t@staticmethod\n\tdef from_file(filename):\n\t\t\"\"\"\n\t\tParses the ccache file and returns a CCACHE object\n\t\t\"\"\"\n\t\twith open(filename, 'rb') as f:\n\t\t\treturn CCACHE.parse(f)\n\t\t\t\n\tdef to_file(self, filename):\n\t\t\"\"\"\n\t\tWrites the contents of the CCACHE object to a file\n\t\t\"\"\"\n\t\twith open(filename, 'wb') as f:\n\t\t\tf.write(self.to_bytes())\n\t\t\n", "repo_name": "ryanmrestivo/red-team", "sub_path": "Exploitation-Tools/CrackMapExec/site-packages/minikerberos/common/ccache.py", "file_name": "ccache.py", "file_ext": "py", "file_size_in_byte": 22559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 91, "dataset": "github-code", "pt": "21", "api": [{"api_name": "io.BytesIO", "line_number": 34, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket.load", "line_number": 91, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket", "line_number": 91, "usage_type": "name"}, {"api_name": "minikerberos.protocol.constants.EncryptionType.AES256_CTS_HMAC_SHA1_96", "line_number": 99, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.constants.EncryptionType", "line_number": 99, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.EncryptedData", "line_number": 112, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.krb5_pvno", "line_number": 115, "usage_type": "name"}, {"api_name": "minikerberos.protocol.constants.MESSAGE_TYPE.KRB_AS_REP", "line_number": 116, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.constants.MESSAGE_TYPE", "line_number": 116, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket.load", "line_number": 119, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket", "line_number": 119, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.EncryptionKey", "line_number": 122, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 127, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.EncryptionKey", "line_number": 129, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.TicketFlags", "line_number": 132, "usage_type": "argument"}, {"api_name": "asn1crypto.core.IntegerBitString", "line_number": 132, "usage_type": "call"}, {"api_name": "asn1crypto.core", "line_number": 132, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 134, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 138, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.asn1_structs.KrbCredInfo", "line_number": 143, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.krb5_pvno", "line_number": 146, "usage_type": "name"}, {"api_name": "minikerberos.protocol.constants.MESSAGE_TYPE.KRB_CRED", "line_number": 147, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.constants.MESSAGE_TYPE", "line_number": 147, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket.load", "line_number": 148, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket", "line_number": 148, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.EncryptedData", "line_number": 149, "usage_type": "call"}, {"api_name": "minikerberos.protocol.constants.EncryptionType.NULL", "line_number": 149, "usage_type": "attribute"}, {"api_name": "minikerberos.protocol.constants.EncryptionType", "line_number": 149, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.EncKrbCredPart", "line_number": 149, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.KRBCRED", "line_number": 153, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.TicketFlags", "line_number": 167, "usage_type": "call"}, {"api_name": "asn1crypto.core.IntegerBitString", "line_number": 167, "usage_type": "attribute"}, {"api_name": "asn1crypto.core", "line_number": 167, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 201, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 201, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 202, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 202, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 203, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 203, "usage_type": "attribute"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 275, "usage_type": "call"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 277, "usage_type": "call"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 279, "usage_type": "call"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 286, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 286, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 286, "usage_type": "attribute"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 288, "usage_type": "call"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 289, "usage_type": "call"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 290, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 290, "usage_type": "call"}, {"api_name": "minikerberos.common.utils.dt_to_kerbtime", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 291, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.TicketFlags", "line_number": 505, "usage_type": "call"}, {"api_name": "asn1crypto.core.IntegerBitString", "line_number": 505, "usage_type": "attribute"}, {"api_name": "asn1crypto.core", "line_number": 505, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket", "line_number": 508, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.TicketFlags", "line_number": 531, "usage_type": "call"}, {"api_name": "asn1crypto.core.IntegerBitString", "line_number": 531, "usage_type": "attribute"}, {"api_name": "asn1crypto.core", "line_number": 531, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket", "line_number": 534, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.EncKrbCredPart.load", "line_number": 542, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.EncKrbCredPart", "line_number": 542, "usage_type": "name"}, {"api_name": "minikerberos.logger.debug", "line_number": 561, "usage_type": "call"}, {"api_name": "minikerberos.logger", "line_number": 561, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.TicketFlags", "line_number": 573, "usage_type": "call"}, {"api_name": "asn1crypto.core.IntegerBitString", "line_number": 573, "usage_type": "attribute"}, {"api_name": "asn1crypto.core", "line_number": 573, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket", "line_number": 576, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.KRBCRED.load", "line_number": 583, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.KRBCRED", "line_number": 583, "usage_type": "name"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket.load", "line_number": 608, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.Ticket", "line_number": 608, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 654, "usage_type": "call"}, {"api_name": "os.path", "line_number": 654, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 667, "usage_type": "call"}, {"api_name": "os.path", "line_number": 667, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 667, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 668, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.KRBCRED.load", "line_number": 671, "usage_type": "call"}, {"api_name": "minikerberos.protocol.asn1_structs.KRBCRED", "line_number": 671, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 683, "usage_type": "call"}, {"api_name": "os.path", "line_number": 683, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 687, "usage_type": "call"}, {"api_name": "os.path", "line_number": 687, "usage_type": "attribute"}]} +{"seq_id": "72225165174", "text": "import os\r\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\r\n\r\nspecial_chars = '%_&#$~^'\r\nr_constant = 185\r\n\r\nwith open('cvs.txt', 'r', encoding='utf8') as f:\r\n arr = [x[:-1] for x in f.readlines()]\r\n\r\ndata = []\r\nprev = 0\r\nfor i in range(len(arr)):\r\n if arr[i]=='%%%-----%%%':\r\n data.append(arr[prev:i])\r\n prev = i+1\r\n\r\nfor i in range(len(data)//2):\r\n with open(f'cv_{i+r_constant}.txt', 'w', encoding='utf8') as f:\r\n for line in data[2*i]:\r\n step = 0\r\n for j in range(len(line)):\r\n if line[j+step] in special_chars:\r\n line = line[:j+step]+'\\\\'+line[j+step:]\r\n step += 1\r\n elif line[j+step]==\"´\":\r\n line = line[:j+step]+\"'\"+line[j+step+1:]\r\n elif 'I will go to ^^' in line:\r\n k = line.find('^^')\r\n line = line[:k]+' '+line[k+2:]\r\n if 'part_time' in line:\r\n k = line.find('part_time')\r\n l = line[:k+4]+' '+line[k+5:]\r\n elif 'full_time' in line:\r\n k = line.find('full_time')\r\n l = line[:k+4]+' '+line[k+5:]\r\n else:\r\n l = line\r\n f.write(l+'\\n')\r\n os.rename(f'cv_{i+r_constant}.txt', f'cv_{i+r_constant}.tex')\r\n os.system(f'pdflatex cv_{i+r_constant}.tex')\r\n\r\n r = data[2*i+1][0]\r\n s = r[r.find('cv=')+3:r.find('=cv')]\r\n\r\n print(i+r_constant)\r\n if s[-4:]=='.pdf':\r\n secure = True\r\n elif s:\r\n secure = False\r\n print(f'CV {i+r_constant} has wrong suffix')\r\n print(s[-4:])\r\n\r\n if s and secure:\r\n output = PdfFileWriter()\r\n\r\n pdfOne = PdfFileReader(open(f'cv_{i+r_constant}.pdf', 'rb'))\r\n pdfTwo = PdfFileReader(open(s, 'rb'))\r\n\r\n output.addPage(pdfOne.getPage(0))\r\n output.addPage(pdfTwo.getPage(0))\r\n\r\n outputStream = open(r'hacker_{}.pdf'.format(i+r_constant), 'wb')\r\n output.write(outputStream)\r\n outputStream.close()\r\n else:\r\n os.rename(f'cv_{i+r_constant}.pdf', f'hacker_{i+r_constant}.pdf')\r\n\r\n for p in ['aux', 'log', 'out', 'tex']:\r\n try:\r\n os.remove(f'cv_{i+r_constant}.{p}')\r\n except(FileNotFoundError):\r\n pass\r\n", "repo_name": "hackkosice/util-scripts", "sub_path": "cv-generator/cv_generator.py", "file_name": "cv_generator.py", "file_ext": "py", "file_size_in_byte": 2284, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.rename", "line_number": 39, "usage_type": "call"}, {"api_name": "os.system", "line_number": 40, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileWriter", "line_number": 54, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 56, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 57, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 66, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "21176455025", "text": "#!/usr/bin/python\nimport sys\nimport qrcode\n\npy = sys.argv[1]\nif \"min\" in py:\n fn, qual = \"py-qr-reg.png\", qrcode.constants.ERROR_CORRECT_L\nelse:\n fn, qual = \"py-qr-min.png\", qrcode.constants.ERROR_CORRECT_Q\n\nSCHEME_DATA = open(py).read()\n\nqr = qrcode.QRCode(border=1, error_correction=qual)\nqr.add_data(SCHEME_DATA)\nimg = qr.make_image()\nimg.save(fn)\n", "repo_name": "Mehvix/MSE", "sub_path": "gen-py-qr.py", "file_name": "gen-py-qr.py", "file_ext": "py", "file_size_in_byte": 357, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "qrcode.constants", "line_number": 7, "usage_type": "attribute"}, {"api_name": "qrcode.constants", "line_number": 9, "usage_type": "attribute"}, {"api_name": "qrcode.QRCode", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "11430025512", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nsize=50\nx1=np.linspace(0,10,size)+np.random.rand(size)*5\ny1=2*x1+1+np.random.rand(size)*5\n\n\nx2=np.linspace(0,10,size)+np.random.randn(size)*1\ny2=2*x2+10+np.random.randn(size)*1\n\nplt.figure()\nplt.scatter(x1,y1,color='r',marker='s')\nplt.scatter(x2,y2,color='b',marker='o')\nplt.show()\n", "repo_name": "Carmon-Lee/python", "sub_path": "python_cookbook/algorithm/sigmoid.py", "file_name": "sigmoid.py", "file_ext": "py", "file_size_in_byte": 334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.linspace", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "18203537056", "text": "from scipy.ndimage import gaussian_filter1d\nfrom scipy.signal import savgol_filter\nimport numpy as np\nimport pandas as pd\nimport nlopt\nimport ipywidgets as widgets\n\nDX = 0.01\n\ndef load_file_data(CSV_COLUMN_SEPARATOR, ENERGY_AXIS):\n # Load your experimental data\n experimental_data = './input/experimental_data.csv'\n csv_panda = pd.read_csv(experimental_data, sep=CSV_COLUMN_SEPARATOR, header=None)\n\n # Normalize experimental data\n spectrum_e = csv_panda.iloc[:,0].to_numpy()\n spectrum = csv_panda.iloc[:,1].to_numpy()\n spectrum = spectrum - np.min(spectrum)\n spectrum = spectrum /np.max(spectrum) \n\n if np.sum(np.diff(spectrum_e)) < 0:\n spectrum_e = np.flip(spectrum_e)\n spectrum = np.flip(spectrum)\n \n y_interpolated = np.interp(ENERGY_AXIS,spectrum_e, spectrum)\n \n return y_interpolated, ENERGY_AXIS\n\n\n# FIT LOSS FUNCTION\ndef fit_loss(x, y_true, model, energy_axis, atomic_positions):\n y_spectrum = CalculatedSpectrum(model, x, energy_axis, atomic_positions)\n background, difference = y_spectrum.infer_background(y_true)\n deviation = np.square(difference - background)\n back_error = np.sum(deviation)\n mae = np.sum(np.abs(difference))\n\n loss = 0.6*mae + 0.4*back_error\n \n print(f\"mae: {mae} bkg_error: {back_error}\")\n return float(loss)\n\n\n\ndef calculate_spectra(model, x, gaussian=1, offset=0, shift=0, num_atoms=1, input_size=7):\n y_pred = None\n lim = input_size + 1 # X contains NN input plus one parameter for scale factor\n \n for i in range(0, num_atoms):\n weight_i = x[i*lim + input_size]\n x_tensor = np.expand_dims(x[i*lim:i*lim+input_size],0)\n if y_pred is None:\n y_pred = weight_i * model(x_tensor)[0].numpy()[:,0]\n else:\n y_pred += weight_i * model(x_tensor)[0].numpy()[:,0]\n \n y_pred = shift_spectrum_energy(y_pred,shift)\n y_pred = y_pred + offset\n y_pred = gaussian_broadening(y_pred, gaussian)\n return y_pred \n\n\ndef gaussian_broadening(y, broad):\n FWHM = broad/DX\n sigma = FWHM/2.355\n return gaussian_filter1d(y, sigma)\n \n\n\ndef shift_spectrum_energy(spec, shift):\n dx = DX\n size = len(spec)\n if abs(shift) < dx:\n return spec \n pad = np.zeros(int(abs(shift)/dx))\n if shift < 0:\n b = np.concatenate((spec,pad))\n b = b[-size:]\n else:\n b = np.concatenate((pad, spec))\n b = b[0:size]\n return b\n\n\ndef parse_opt_input(x):\n NN_input = x[0:7]\n gs_broad= x[7]\n scale = x[8]\n offset = x[9]; \n shift = x[10] \n return NN_input, gs_broad, scale, offset, shift \n\n\ndef create_optimizer(S,fit_loss, num_atoms=1):\n \"\"\" Create nlopt optimizer for spectrum fit \"\"\"\n \n lower_state = [S['range_nox'][0],\n S['range_delta'][0],\n S['range_Udd'][0],\n S['range_Upd'][0],\n S['range_T2q'][0],\n S['range_Dt'][0],\n S['range_Ds'][0],\n S['range_scale'][0],\n \n ]\n \n lower_instrum = [S['range_broad'][0],\n S['range_offset'][0],\n S['range_shift'][0] \n ]\n \n lower = np.array(num_atoms*lower_state + lower_instrum)\n\n upper_state = [S['range_nox'][1],\n S['range_delta'][1],\n S['range_Udd'][1],\n S['range_Upd'][1],\n S['range_T2q'][1],\n S['range_Dt'][1],\n S['range_Ds'][1],\n S['range_scale'][1],\n ]\n \n upper_instrum = [S['range_broad'][1],\n S['range_offset'][1],\n S['range_shift'][1] \n ]\n \n upper = np.array(num_atoms*upper_state + upper_instrum) \n \n \n opt = nlopt.opt(nlopt.LN_BOBYQA, len(upper)) \n opt.set_lower_bounds(lower)\n opt.set_upper_bounds(upper)\n opt.set_ftol_rel(S['set_ftol_rel'])\n opt.set_xtol_rel(S['set_xtol_rel'])\n opt.verbose = 1\n # opt.maxeval = 1\n opt.set_min_objective(fit_loss)\n x0 = (upper + lower) / 2\n return opt, x0\n\n\nclass AtomicElement():\n def __init__(self, nox=2, delta=0, Udd=0, Upd=0, T2q=0,Dt=0, Ds=0, weight=1):\n self.nox = nox\n self.delta = delta\n self.Udd = Udd\n self.Upd = Upd\n self.T2q = T2q\n self.Dt = Dt\n self.Ds = Ds\n self.weight = weight\n\n \n \nclass CalculatedSpectrum():\n y_peaks = None\n y_background = None\n spectrum = None\n atoms = []\n \n def __init__(self, model, X, energy_range, atomic_positions=1, input_size=7):\n self.num_atoms = atomic_positions\n self.model = model\n self.energy_range = energy_range\n self.atoms= [] \n lim = input_size + 1 # NN_input parameters and Scale factor\n \n for i in range(0,atomic_positions):\n if len(X):\n self.atoms.append(AtomicElement(*X[i*lim:(i+1)*lim])) \n else:\n atom = AtomicElement()\n print(atom)\n self.atoms.append(atom) \n if len(X):\n self.broad = X[-3] \n self.offset = X[-2] \n self.shift = X[-1]\n else:\n self.broad = 0.05 \n self.offset = 0 \n self.shift = 0\n \n \n def __str__(self):\n inst_params = (f\"\\nINSTRUMENTAL PARAMETERS:\\n\"\n f\"broad: {self.broad}\\n\"\n f\"offset: {self.offset}\\n\"\n f\"shift: {self.shift}\\n\")\n ele_params = \"\"\n for i,atom in enumerate(self.atoms):\n ele_params += (f\"\\nELECTRONIC PARAMETERS Element:{i}\\n\"\n f\"Ox. state:{round(atom.nox,2)}\\n\" \n f\"delta: {atom.delta}\\n\"\n f\"Udd: {atom.Udd}\\n\"\n f\"Upd: {atom.Upd}\\n\"\n f\"\\nCRYSTAL PARAMS:\\n\"\n f\"T2q: {atom.T2q}\\n\"\n f\"Dt: {atom.Dt}\\n\"\n f\"Ds: {atom.Ds}\\n\"\n f\"Weight:{atom.weight}\\n\" \n )\n \n return inst_params + ele_params\n \n \n def get_model_input(self):\n nn_input = []\n for atom in self.atoms:\n nn_input += [atom.nox, \n atom.delta,\n atom.Udd,\n atom.Upd,\n atom.T2q,\n atom.Dt,\n atom.Ds,\n atom.weight\n ]\n \n return np.array(nn_input)\n \n def get_parameter_array(self): \n nn_input = self.get_model_input()\n instrum = np.array([self.broad, self.offset, self.shift]);\n parameters = np.concatenate((nn_input, instrum)) \n \n return parameters\n \n def calculate_peaks(self):\n model_input = self.get_model_input()\n self.y_peaks = calculate_spectra(self.model, model_input, self.broad, self.offset, self.shift, self.num_atoms)\n return self.y_peaks\n \n \n def infer_background(self, y_experiment):\n self.calculate_peaks()\n difference = (y_experiment - self.y_peaks)\n self.y_background = savgol_filter(difference,301,1)\n self.y_background = savgol_filter(self.y_background,301,1)\n return self.y_background, difference\n \n def calculate_spectra(self, y_experiment=None):\n self.spectrum = self.calculate_peaks()\n if y_experiment is not None:\n self.infer_background(y_experiment)\n self.spectrum = self.spectrum + self.y_background\n return self.spectrum\n \n \n def plot(self, plt,y_experiment=None):\n if self.y_background is None and y_experiment is None:\n plt.plot(self.energy_range, self.spectrum)\n if self.y_background is not None and y_experiment is None:\n plt.plot(self.energy_range, self.spectrum, self.energy_range, self.y_background)\n plt.legend(['Calculated spectrum', 'Background estimation'])\n \n if self.y_background is not None and y_experiment is not None:\n plt.plot(self.energy_range, self.spectrum, self.energy_range, self.y_background, self.energy_range, y_experiment) \n plt.legend(['Calculated spectrum', 'Background estimation', 'Experimental data'])\n \n plt.xlabel('energy (eV)')\n \n \n \n \nuploader = widgets.FileUpload(\n accept='.txt, .dsv, .csv', # Accepted file extension e.g. '.txt', '.pdf', 'image/*', 'image/*,.pdf'\n multiple=False \n)\n\ndef on_upload_change(change):\n global uploader\n try:\n input_file = list(uploader.value.values())[0]\n except:\n input_file = uploader.value[0]\n with open(\"./input/experimental_data.csv\", \"wb\") as fp:\n fp.write(input_file['content'])\n print(\"File select. Ready to load the file data\")\n\nuploader.observe(on_upload_change, names=['value'])\n\n\n\n ", "repo_name": "cviolbarbosa/HYSUCAP-DNN4XPS", "sub_path": "src/spect_tools.py", "file_name": "spect_tools.py", "file_ext": "py", "file_size_in_byte": 9144, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_filter1d", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "nlopt.opt", "line_number": 133, "usage_type": "call"}, {"api_name": "nlopt.LN_BOBYQA", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 228, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 241, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 242, "usage_type": "call"}, {"api_name": "ipywidgets.FileUpload", "line_number": 269, "usage_type": "call"}]} +{"seq_id": "14211641702", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport tempfile\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport numpy as np \n\n\"\"\" ggg: Here we create a dataset, i.e. a class that contains\n data: input to the net\n labels: output\n\n It gives part of the data every time by calling \"next_batch\"\n Every time is goes over the whole data it shuffles it.\n \"\"\"\nclass Dataset:\n\n def __init__(self,data,labels):\n self._index_in_epoch = 0\n self._epochs_completed = 0\n self._data = data\n self._labels = labels\n self._num_examples = data.shape[0]\n pass\n\n\n @property\n def data(self):\n return self._data\n\n @property\n def labels(self):\n return self._labels\n\n def next_batch(self,batch_size,shuffle = True):\n start = self._index_in_epoch\n if start == 0 and self._epochs_completed == 0:\n idx = np.arange(0, self._num_examples) # get all possible indexes\n np.random.shuffle(idx) # shuffle indexe\n self._data = self.data[idx] # get list of `num` random samples\n self._labels = self.labels[idx] # get list of `num` random samples\n\n # go to the next batch\n if start + batch_size > self._num_examples:\n self._epochs_completed += 1\n rest_num_examples = self._num_examples - start\n data_rest_part = self.data[start:self._num_examples]\n labels_rest_part = self.labels[start:self._num_examples]\n idx0 = np.arange(0, self._num_examples) # get all possible indexes\n np.random.shuffle(idx0) # shuffle indexes\n self._data = self.data[idx0] # get list of `num` random samples\n self._labels = self.labels[idx0] # get list of `num` random samples\n\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples #avoid the case where the #sample != integar times of batch_size\n end = self._index_in_epoch \n data_new_part = self._data[start:end] \n labels_new_part = self._labels[start:end] \n return [np.concatenate((data_rest_part, data_new_part), axis=0),np.concatenate((labels_rest_part, labels_new_part), axis=0)]\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return [self._data[start:end],self._labels[start:end]]\n\n\n\n\"\"\"ggg: Here we load the data from Matlab and put it in our dataset class\nIn matlab it was simply:\nData=rand(5000,28,28,2);\nLabels=rand(5000,28,28,2);\nsave('/home/a/tensorflowtest/MyData_Img.mat','Data','Labels')\n\"\"\"\n\nimport scipy.io\nFullData=scipy.io.loadmat('/home/a/tensorflowtest/MyData_Img.mat')\n\ndataset = Dataset(FullData['Data'],FullData['Labels'])\n\nSz=FullData['Data'].shape\nOutSz=FullData['Labels'].shape\n\nimport tensorflow as tf\n\nFLAGS = None\n\n\nnPE=Sz[1];\nnRO=Sz[2];\n\nnFeaturesInput=Sz[3]; # Should be 2: real and imag\nnFeaturesAfterFC=4;\nnFeaturesLayer1=32;\nnFeaturesLayer2=17;\nnFeaturesOutput=OutSz[3]; # Should be 2: real and imag\n\nKernelSizeLayer1PE=5;\nKernelSizeLayer1RO=7;\n\nKernelSizeLayer2PE=5;\nKernelSizeLayer2RO=5;\n\nKernelSizeLayer3PE=3;\nKernelSizeLayer3RO=5;\n\n\n\"\"\"ggg: Defining the neural network\"\"\"\n\ndef deepnn(x):\n \"\"\"deepnn builds the graph for a deep net for classifying digits.\n\n Args:\n x: an input tensor with the dimensions (N_examples, 784), where 784 is the\n number of pixels in a standard MNIST image.\n\n Returns:\n A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values\n equal to the logits of classifying the digit into one of 10 classes (the\n digits 0-9). keep_prob is a scalar placeholder for the probability of\n dropout.\n \"\"\"\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n #with tf.name_scope('reshape'):\n x_flatted = tf.reshape(x, [-1, nPE*nRO*nFeaturesInput])\n\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([nPE*nRO*nFeaturesInput, nPE*nRO*nFeaturesAfterFC])\n b_fc1 = bias_variable([nPE*nRO*nFeaturesAfterFC])\n\n h_fc1 = tf.nn.relu(tf.matmul(x_flatted, W_fc1) + b_fc1)\n h_fc1_as_Img=tf.reshape(h_fc1, [-1, nPE,nRO,nFeaturesAfterFC])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([KernelSizeLayer1PE, KernelSizeLayer1RO, nFeaturesAfterFC, nFeaturesLayer1])\n b_conv1 = bias_variable([nFeaturesLayer1])\n #h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n \"\"\"ggg chaned here to not do reshape\"\"\"\n #h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)\n h_conv1 = tf.nn.relu(conv2d(h_fc1_as_Img, W_conv1) + b_conv1)\n\n # Second convolutional layer -- maps nFeaturesLayer1 feature maps to 17.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([KernelSizeLayer2PE, KernelSizeLayer2RO, nFeaturesLayer1, nFeaturesLayer2])\n b_conv2 = bias_variable([nFeaturesLayer2])\n h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)\n\n # Third convolutional layer -- maps 17 feature maps to 2.\n with tf.name_scope('conv3'):\n W_conv3 = weight_variable([KernelSizeLayer3PE, KernelSizeLayer3RO, nFeaturesLayer2, nFeaturesOutput])\n b_conv3 = bias_variable([nFeaturesOutput])\n #h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3) + b_conv3)\n h_conv3 = conv2d(h_conv2, W_conv3) + b_conv3\n y_conv = h_conv3;\n\n return y_conv\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n# Create the model\nx = tf.placeholder(tf.float32, np.concatenate(([None],Sz[1:10])))\n# x = tf.placeholder(tf.float32, [None, 28,28,2])\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, np.concatenate(([None],OutSz[1:10])))\n\n# Build the graph for the deep net\ny_conv = deepnn(x)\n\nwith tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,\n logits=y_conv)\ncross_entropy = tf.reduce_mean(cross_entropy)\n\nwith tf.name_scope('adam_optimizer'):\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\nwith tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\naccuracy = tf.reduce_mean(correct_prediction)\n\n#graph_location = tempfile.mkdtemp()\n#print('Saving graph to: %s' % graph_location)\n#train_writer = tf.summary.FileWriter(graph_location)\n#train_writer.add_graph(tf.get_default_graph())\n\n# saver = tf.train.Saver()\n\n\"\"\"ggg: Running the training \"\"\"\nmy_batch_size=40;\nnIters=3;\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in range(nIters):\n batch=dataset.next_batch(my_batch_size)\n \n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={\n x: batch[0], y_: batch[1] })\n print('step %d, training accuracy %g' % (i, train_accuracy))\n train_step.run(feed_dict={x: batch[0], y_: batch[1] })\n \n #save_path = saver.save(sess, \"/tmp/aaa/model.ckpt\")\n\n \"\"\"ggg: Printing how the current net is doing on the full data:\n Don't run that on the GPU\"\"\"\n print('test accuracy %g' % accuracy.eval(feed_dict={\n x: FullData['Data'], y_: FullData['Labels'] }))\n\n \"\"\"ggg: Accessing data from a layer of the net\n This is just to understand stuff. Don't run that on the GPU\"\"\"\n #LayerData=[v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,'conv1')]\n #Bias1=LayerData[-1].eval();\n #W1=LayerData[-2].eval();\n\n\nprint('OK finished')\n\n\"\"\" to run a TF command: \"\"\"\n# qq=tf.truncated_normal([3, 4],stddev=0.1)\n# ww=tf.InteractiveSession().run(qq)\n\n\"\"\" To run this file: \"\"\"\n# execfile('/home/a/tensorflowtest/deep_try6_OutImg_FCstart.py')\n\n\"\"\" To change directory #\"\"\"\n# import os\n# os.chdir('/home/a/tensorflowtest')\n# os.getcwd()\n\n\"\"\" to load data from matlab: \"\"\"\n# import scipy.io\n# A=scipy.io.loadmat('/home/a/tensorflowtest/a5kx784.mat')\n# A['a'].shape\n\n\"\"\" Manual for simple matlab stuff in Python:\n http://mathesaurus.sourceforge.net/matlab-python-xref.pdf \"\"\"\n\nimport numpy as np\na = tf.constant(np.arange(1, 25, dtype=np.int32), shape=[2, 2, 2, 3])\nb=tf.reshape(a,[ 2,1,4,3 ])\naa=tf.InteractiveSession().run(a)\nbb=tf.InteractiveSession().run(b)", "repo_name": "GiladMRI/TF", "sub_path": "deep_try6_OutImg_FCstart.py", "file_name": "deep_try6_OutImg_FCstart.py", "file_ext": "py", "file_size_in_byte": 8680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.io.io.loadmat", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 79, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 79, "usage_type": "name"}, {"api_name": "tensorflow.reshape", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 135, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 145, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 151, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 165, "usage_type": "attribute"}, {"api_name": "tensorflow.truncated_normal", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 179, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 179, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 183, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 189, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 189, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 194, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 198, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 198, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 212, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 261, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 262, "usage_type": "call"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 264, "usage_type": "call"}]} +{"seq_id": "24430158151", "text": "import sys\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom models import Record\nfrom config import conn_string\n\ncbr_url = 'https://www.cbr.ru/scripts/XML_daily.asp'\n# Create engine and DBSession instance\nengine = create_engine(conn_string)\nDBSession = sessionmaker(bind=engine)\n\n\nclass CurrencyUpdater:\n \"\"\"\n Class for getting and update quotes and prices.\n Pass the currency you want to get from the CBR to constructor.\n \"\"\"\n def __init__(self, currency):\n self.currency = currency\n\n def get_quote(self):\n \"\"\"Returns quote of stored currency\"\"\"\n response = requests.get(cbr_url)\n root = ET.fromstring(response.text)\n currency_list = root.findall('Valute')\n\n for currency in currency_list:\n if currency.find('CharCode').text == self.currency:\n quote = currency.find('Value').text\n quote = float(quote.replace(',', '.')) # Value is string with ',' separator\n return quote\n return 0\n\n def update_rub_currency(self, quote):\n \"\"\"Updates ruble cost in the database column 'rub_cost'\"\"\"\n db_session = DBSession()\n\n records = db_session.query(Record).all()\n for record in records:\n if record.usd_cost is not None:\n record.rub_cost = round(record.usd_cost * quote, 2)\n else:\n record.rub_cost = None\n try:\n db_session.commit() # Commit changes\n except Exception:\n print(sys.exc_info()[1])\n db_session.rollback()\n db_session.close()\n", "repo_name": "TimurIshankulov/kanal-service", "sub_path": "currency_updater.py", "file_name": "currency_updater.py", "file_ext": "py", "file_size_in_byte": 1692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 13, "usage_type": "call"}, {"api_name": "config.conn_string", "line_number": 13, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 28, "usage_type": "name"}, {"api_name": "models.Record", "line_number": 42, "usage_type": "argument"}, {"api_name": "sys.exc_info", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "13415616905", "text": "\"\"\"Functions that enable reading/writing the config file.\"\"\"\nfrom abc import ABC\nfrom typing import Iterable, Mapping\n\nfrom vigorish.config.types.batch_job_settings import BatchJobSettings\nfrom vigorish.config.types.batch_scrape_delay import BatchScrapeDelay\nfrom vigorish.config.types.folder_path_setting import LocalFolderPathSetting, S3FolderPathSetting\nfrom vigorish.config.types.url_scrape_delay import UrlScrapeDelay\nfrom vigorish.enums import (\n CombinedDataStorageOption,\n ConfigType,\n DataSet,\n HtmlStorageOption,\n JsonStorageOption,\n ScrapeCondition,\n ScrapeTaskOption,\n StatusReport,\n)\nfrom vigorish.util.list_helpers import report_dict\n\n\ndef same_value_for_all_data_sets_is_required(setting_name: str) -> bool:\n return setting_name in {\n \"STATUS_REPORT\",\n \"S3_BUCKET\",\n \"SCRAPE_TASK_OPTION\",\n \"SCRAPED_DATA_COMBINE_CONDITION\",\n \"COMBINED_DATA_STORAGE\",\n \"COMBINED_DATA_LOCAL_FOLDER_PATH\",\n \"COMBINED_DATA_S3_FOLDER_PATH\",\n \"DB_BACKUP_FOLDER_PATH\",\n }\n\n\nEnumSetting = CombinedDataStorageOption | HtmlStorageOption | JsonStorageOption | ScrapeCondition | ScrapeTaskOption\nNumericSetting = BatchJobSettings | BatchScrapeDelay | UrlScrapeDelay\nPathSetting = LocalFolderPathSetting | S3FolderPathSetting\nConfigValue = EnumSetting | bool | int | str | None\nConfigDict = Mapping[str, None | bool | int | str]\nConfigSettingValue = EnumSetting | NumericSetting | PathSetting | str\n\n\nclass ConfigSetting(ABC):\n data_type: ConfigType\n setting_name: str\n config_dict: ConfigDict\n\n def __init__(self, setting_name: str, config_dict: ConfigDict):\n self.setting_name = setting_name\n self.config_dict = config_dict\n self.data_type = ConfigType.NONE\n\n @property\n def setting_name_title(self) -> str:\n return \" \".join(self.setting_name.split(\"_\")).title()\n\n @property\n def description(self) -> str:\n return self.config_dict.get(\"DESCRIPTION\", \"\")\n\n @property\n def possible_values(self) -> Iterable[EnumSetting]:\n return []\n\n @property\n def is_same_for_all_data_sets(self) -> bool:\n return (\n True\n if same_value_for_all_data_sets_is_required(self.setting_name)\n else self.config_dict.get(\"SAME_SETTING_FOR_ALL_DATA_SETS\")\n )\n\n @property\n def current_settings_report(self) -> str:\n if self.is_same_for_all_data_sets:\n settings_dict = {\"ALL_DATA_SETS\": self.current_setting(DataSet.ALL)}\n else:\n settings_dict = {ds.name: self.current_setting(ds) for ds in DataSet}\n return report_dict(settings_dict, title=\"\", title_prefix=\"\", title_suffix=\"\")\n\n def current_setting(self, data_set: DataSet) -> ConfigValue:\n return (\n self.config_dict.get(DataSet.ALL.name)\n if self.is_same_for_all_data_sets\n else self.config_dict.get(data_set.name)\n )\n\n\nclass StringConfigSetting(ConfigSetting):\n def __init__(self, setting_name: str, config_dict: ConfigDict):\n super().__init__(setting_name, config_dict)\n self.data_type = ConfigType.STRING\n\n def __repr__(self) -> str:\n return f\"<StringConfigSetting setting={self.setting_name}, value={self.current_settings_report}>\"\n\n\nclass PathConfigSetting(ConfigSetting):\n def __init__(self, setting_name: str, config_dict: ConfigDict):\n super().__init__(setting_name, config_dict)\n self.data_type = ConfigType.PATH\n\n def __repr__(self) -> str:\n return f\"<PathConfigSetting setting={self.setting_name}, value={self.current_settings_report}>\"\n\n @property\n def class_name(self) -> str:\n return self.config_dict.get(\"CLASS_NAME\")\n\n def current_setting(self, data_set: DataSet) -> PathSetting:\n current_setting = super().current_setting(data_set)\n if self.class_name == \"LocalFolderPathSetting\":\n return LocalFolderPathSetting(path=current_setting, data_set=data_set)\n if self.class_name == \"S3FolderPathSetting\":\n return S3FolderPathSetting(path=current_setting, data_set=data_set)\n\n\nclass EnumConfigSetting(ConfigSetting):\n def __init__(self, setting_name: str, config_dict: ConfigDict):\n super().__init__(setting_name, config_dict)\n self.data_type = ConfigType.ENUM\n\n def __repr__(self) -> str:\n return f\"<EnumConfigSetting setting={self.setting_name}, value={self.current_settings_report}>\"\n\n @property\n def enum_name(self) -> str:\n return self.config_dict.get(\"ENUM_NAME\")\n\n @property\n def possible_values(self) -> Iterable[EnumSetting]:\n if self.enum_name == \"ScrapeCondition\":\n return list(ScrapeCondition)\n if self.enum_name == \"ScrapeTaskOption\":\n return list(ScrapeTaskOption)\n if self.enum_name == \"HtmlStorageOption\":\n return list(HtmlStorageOption)\n if self.enum_name == \"JsonStorageOption\":\n return list(JsonStorageOption)\n if self.enum_name == \"CombinedDataStorageOption\":\n return list(CombinedDataStorageOption)\n if self.enum_name == \"StatusReport\":\n return list(StatusReport)\n return []\n\n def current_setting(self, data_set: DataSet) -> EnumSetting:\n current_setting = super().current_setting(data_set)\n if not current_setting:\n return None\n if self.enum_name == \"ScrapeCondition\":\n return ScrapeCondition[current_setting.upper()]\n if self.enum_name == \"ScrapeTaskOption\":\n return ScrapeTaskOption[current_setting.upper()]\n if self.enum_name == \"HtmlStorageOption\":\n return HtmlStorageOption[current_setting.upper()]\n if self.enum_name == \"JsonStorageOption\":\n return JsonStorageOption[current_setting.upper()]\n if self.enum_name == \"CombinedDataStorageOption\":\n return CombinedDataStorageOption[current_setting.upper()]\n if self.enum_name == \"StatusReport\":\n return StatusReport[current_setting.upper()]\n return None\n\n\nclass NumericConfigSetting(ConfigSetting):\n def __init__(self, setting_name: str, config_dict: ConfigDict):\n super().__init__(setting_name, config_dict)\n self.data_type = ConfigType.NUMERIC\n\n def __repr__(self) -> str:\n return f\"<NumericConfigSetting setting={self.setting_name}, value={self.current_settings_report}>\"\n\n @property\n def class_name(self) -> str:\n return self.config_dict.get(\"CLASS_NAME\")\n\n @property\n def cannot_be_disabled(self) -> bool:\n return self.class_name == \"UrlScrapeDelay\"\n\n def current_setting(self, data_set: DataSet) -> NumericSetting:\n current_setting = super().current_setting(data_set)\n if self.class_name == \"UrlScrapeDelay\":\n return UrlScrapeDelay.from_config(current_setting)\n if self.class_name == \"BatchJobSettings\":\n return BatchJobSettings.from_config(current_setting)\n if self.class_name == \"BatchScrapeDelay\":\n return BatchScrapeDelay.from_config(current_setting)\n return None\n", "repo_name": "a-luna/vigorish", "sub_path": "src/vigorish/config/config_setting.py", "file_name": "config_setting.py", "file_ext": "py", "file_size_in_byte": 7120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "vigorish.enums.CombinedDataStorageOption", "line_number": 35, "usage_type": "name"}, {"api_name": "vigorish.enums.HtmlStorageOption", "line_number": 35, "usage_type": "name"}, {"api_name": "vigorish.enums.JsonStorageOption", "line_number": 35, "usage_type": "name"}, {"api_name": "vigorish.enums.ScrapeCondition", "line_number": 35, "usage_type": "name"}, {"api_name": "vigorish.enums.ScrapeTaskOption", "line_number": 35, "usage_type": "name"}, {"api_name": "vigorish.config.types.batch_job_settings.BatchJobSettings", "line_number": 36, "usage_type": "name"}, {"api_name": "vigorish.config.types.batch_scrape_delay.BatchScrapeDelay", "line_number": 36, "usage_type": "name"}, {"api_name": "vigorish.config.types.url_scrape_delay.UrlScrapeDelay", "line_number": 36, "usage_type": "name"}, {"api_name": "vigorish.config.types.folder_path_setting.LocalFolderPathSetting", "line_number": 37, "usage_type": "name"}, {"api_name": "vigorish.config.types.folder_path_setting.S3FolderPathSetting", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 39, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 43, "usage_type": "name"}, {"api_name": "vigorish.enums.ConfigType", "line_number": 44, "usage_type": "name"}, {"api_name": "vigorish.enums.ConfigType.NONE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "vigorish.enums.ConfigType", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 62, "usage_type": "name"}, {"api_name": "vigorish.enums.DataSet.ALL", "line_number": 76, "usage_type": "attribute"}, {"api_name": "vigorish.enums.DataSet", "line_number": 76, "usage_type": "name"}, {"api_name": "vigorish.enums.DataSet", "line_number": 78, "usage_type": "name"}, {"api_name": "vigorish.util.list_helpers.report_dict", "line_number": 79, "usage_type": "call"}, {"api_name": "vigorish.enums.DataSet", "line_number": 81, "usage_type": "name"}, {"api_name": "vigorish.enums.DataSet.ALL", "line_number": 83, "usage_type": "attribute"}, {"api_name": "vigorish.enums.DataSet", "line_number": 83, "usage_type": "name"}, {"api_name": "vigorish.enums.ConfigType.STRING", "line_number": 92, "usage_type": "attribute"}, {"api_name": "vigorish.enums.ConfigType", "line_number": 92, "usage_type": "name"}, {"api_name": "vigorish.enums.ConfigType.PATH", "line_number": 101, "usage_type": "attribute"}, {"api_name": "vigorish.enums.ConfigType", "line_number": 101, "usage_type": "name"}, {"api_name": "vigorish.enums.DataSet", "line_number": 110, "usage_type": "name"}, {"api_name": "vigorish.config.types.folder_path_setting.LocalFolderPathSetting", "line_number": 113, "usage_type": "call"}, {"api_name": "vigorish.config.types.folder_path_setting.S3FolderPathSetting", "line_number": 115, "usage_type": "call"}, {"api_name": "vigorish.enums.ConfigType.ENUM", "line_number": 121, "usage_type": "attribute"}, {"api_name": "vigorish.enums.ConfigType", "line_number": 121, "usage_type": "name"}, {"api_name": "vigorish.enums.ScrapeCondition", "line_number": 133, "usage_type": "argument"}, {"api_name": "vigorish.enums.ScrapeTaskOption", "line_number": 135, "usage_type": "argument"}, {"api_name": "vigorish.enums.HtmlStorageOption", "line_number": 137, "usage_type": "argument"}, {"api_name": "vigorish.enums.JsonStorageOption", "line_number": 139, "usage_type": "argument"}, {"api_name": "vigorish.enums.CombinedDataStorageOption", "line_number": 141, "usage_type": "argument"}, {"api_name": "vigorish.enums.StatusReport", "line_number": 143, "usage_type": "argument"}, {"api_name": "typing.Iterable", "line_number": 131, "usage_type": "name"}, {"api_name": "vigorish.enums.DataSet", "line_number": 146, "usage_type": "name"}, {"api_name": "vigorish.enums.ScrapeCondition", "line_number": 151, "usage_type": "name"}, {"api_name": "vigorish.enums.ScrapeTaskOption", "line_number": 153, "usage_type": "name"}, {"api_name": "vigorish.enums.HtmlStorageOption", "line_number": 155, "usage_type": "name"}, {"api_name": "vigorish.enums.JsonStorageOption", "line_number": 157, "usage_type": "name"}, {"api_name": "vigorish.enums.CombinedDataStorageOption", "line_number": 159, "usage_type": "name"}, {"api_name": "vigorish.enums.StatusReport", "line_number": 161, "usage_type": "name"}, {"api_name": "vigorish.enums.ConfigType.NUMERIC", "line_number": 168, "usage_type": "attribute"}, {"api_name": "vigorish.enums.ConfigType", "line_number": 168, "usage_type": "name"}, {"api_name": "vigorish.enums.DataSet", "line_number": 181, "usage_type": "name"}, {"api_name": "vigorish.config.types.url_scrape_delay.UrlScrapeDelay.from_config", "line_number": 184, "usage_type": "call"}, {"api_name": "vigorish.config.types.url_scrape_delay.UrlScrapeDelay", "line_number": 184, "usage_type": "name"}, {"api_name": "vigorish.config.types.batch_job_settings.BatchJobSettings.from_config", "line_number": 186, "usage_type": "call"}, {"api_name": "vigorish.config.types.batch_job_settings.BatchJobSettings", "line_number": 186, "usage_type": "name"}, {"api_name": "vigorish.config.types.batch_scrape_delay.BatchScrapeDelay.from_config", "line_number": 188, "usage_type": "call"}, {"api_name": "vigorish.config.types.batch_scrape_delay.BatchScrapeDelay", "line_number": 188, "usage_type": "name"}]} +{"seq_id": "12028424081", "text": "from datetime import datetime,timedelta\n\nfrom django.db.models import Count, Q\n\nfrom calls.forms import CallsPostForm\nfrom calls.models import CallsPost, CallsCategory\nfrom mezzanine.generic.models import Keyword\nfrom mezzanine import template\nfrom mezzanine.utils.models import get_user_model\nfrom django.utils import timezone\n\nUser = get_user_model()\n\nregister = template.Library()\n\n\n@register.as_tag\ndef calls_months(*args):\n \"\"\"\n Put a list of dates for calls posts into the template context.\n \"\"\" \n dates = CallsPost.objects.published().values_list(\"publish_date\", flat=True)\n date_dicts = [{\"date\": datetime(d.year, d.month, 1)} for d in dates]\n month_dicts = []\n for date_dict in date_dicts:\n if date_dict not in month_dicts:\n month_dicts.append(date_dict)\n for i, date_dict in enumerate(month_dicts):\n month_dicts[i][\"post_count\"] = date_dicts.count(date_dict)\n return month_dicts\n\n\n@register.as_tag\ndef calls_categories(*args):\n \"\"\"\n Put a list of categories for calls posts into the template context.\n \"\"\"\n posts = CallsPost.objects.published()\n categories = CallsCategory.objects.filter(callsposts__in=posts)\n return list(categories.annotate(post_count=Count(\"callsposts\")))\n\n\n@register.as_tag\ndef calls_authors(*args):\n \"\"\"\n Put a list of authors (users) for calls posts into the template context.\n \"\"\"\n calls_posts = CallsPost.objects.published()\n authors = User.objects.filter(callsposts__in=calls_posts)\n return list(authors.annotate(post_count=Count(\"callsposts\")))\n\n\n@register.as_tag\ndef calls_recent_posts(limit=5, tag=None, username=None, category=None):\n \"\"\"\n Put a list of recently published calls posts into the template\n context. A tag title or slug, category title or slug or author's\n username can also be specified to filter the recent posts returned.\n\n Usage::\n\n {% calls_recent_posts 5 as recent_posts %}\n {% calls_recent_posts limit=5 tag=\"django\" as recent_posts %}\n {% calls_recent_posts limit=5 category=\"python\" as recent_posts %}\n {% calls_recent_posts 5 username=admin as recent_posts %}\n\n \"\"\"\n calls_posts = CallsPost.objects.published().select_related(\"user\")\n title_or_slug = lambda s: Q(title=s) | Q(slug=s)\n if tag is not None:\n try:\n tag = Keyword.objects.get(title_or_slug(tag))\n calls_posts = calls_posts.filter(keywords__in=tag.assignments.all())\n except Keyword.DoesNotExist:\n return []\n if category is not None:\n try:\n category = CallsCategory.objects.get(title_or_slug(category))\n calls_posts = calls_posts.filter(categories=category)\n except CallsCategory.DoesNotExist:\n return []\n if username is not None:\n try:\n author = User.objects.get(username=username)\n calls_posts = calls_posts.filter(user=author)\n except User.DoesNotExist:\n return []\n return list(calls_posts[:limit])\n\n@register.as_tag\ndef calls_deadline_recent_posts(limit=5, category=None):\n \"\"\"\n Put a list of recently published calls posts into the template\n context. A tag title or slug, category title or slug or author's\n username can also be specified to filter the recent posts returned.\n\n Usage::\n\n {% calls_deadline_recent_posts 5 as recent_posts %}\n {% calls_deadline_recent_posts limit=5 category=\"python\" as recent_posts %}\n \n \"\"\"\n calls_posts = CallsPost.objects.published().select_related(\"user\")\n title_or_slug = lambda s: Q(title=s) | Q(slug=s)\n \n if category is not None:\n try:\n category = CallsCategory.objects.get(title_or_slug(category))\n calls_posts = calls_posts.filter(categories=category,deadline_date__range=(timezone.now(),'2016-12-31' ) )\n except CallsCategory.DoesNotExist:\n return []\n \n return list(calls_posts[:limit])\n\n@register.inclusion_tag(\"admin/includes/quick_calls.html\", takes_context=True)\ndef quick_calls(context):\n \"\"\"\n Admin dashboard tag for the quick calls form.\n \"\"\"\n context[\"form\"] = CallsPostForm()\n return context\n\n# https://gist.github.com/renyi/3596248\nfrom django import template\nfrom django.utils import translation\n\nfrom mezzanine.conf import settings\n\n\n@register.filter\ndef get_object_translation(obj):\n # get current language\n lang = translation.get_language()\n\n try:\n # returns object with current translation\n for i in obj.translation.all():\n if i.lang == lang:\n return i\n except:\n pass\n\n # returns object without translation\n return obj", "repo_name": "psentinelli/ippc_it", "sub_path": "calls/templatetags/calls_tags.py", "file_name": "calls_tags.py", "file_ext": "py", "file_size_in_byte": 4679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "mezzanine.utils.models.get_user_model", "line_number": 12, "usage_type": "call"}, {"api_name": "mezzanine.template.Library", "line_number": 14, "usage_type": "call"}, {"api_name": "mezzanine.template", "line_number": 14, "usage_type": "name"}, {"api_name": "calls.models.CallsPost.objects.published", "line_number": 22, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "calls.models.CallsPost", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects.published", "line_number": 38, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "calls.models.CallsPost", "line_number": 38, "usage_type": "name"}, {"api_name": "calls.models.CallsCategory.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "calls.models.CallsCategory.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "calls.models.CallsCategory", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 40, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects.published", "line_number": 48, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "calls.models.CallsPost", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 50, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects.published", "line_number": 68, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "calls.models.CallsPost", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 69, "usage_type": "call"}, {"api_name": "mezzanine.generic.models.Keyword.objects.get", "line_number": 72, "usage_type": "call"}, {"api_name": "mezzanine.generic.models.Keyword.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "mezzanine.generic.models.Keyword", "line_number": 72, "usage_type": "name"}, {"api_name": "mezzanine.generic.models.Keyword.DoesNotExist", "line_number": 74, "usage_type": "attribute"}, {"api_name": "mezzanine.generic.models.Keyword", "line_number": 74, "usage_type": "name"}, {"api_name": "calls.models.CallsCategory.objects.get", "line_number": 78, "usage_type": "call"}, {"api_name": "calls.models.CallsCategory.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "calls.models.CallsCategory", "line_number": 78, "usage_type": "name"}, {"api_name": "calls.models.CallsCategory.DoesNotExist", "line_number": 80, "usage_type": "attribute"}, {"api_name": "calls.models.CallsCategory", "line_number": 80, "usage_type": "name"}, {"api_name": "calls.models.CallsPost.objects.published", "line_number": 103, "usage_type": "call"}, {"api_name": "calls.models.CallsPost.objects", "line_number": 103, "usage_type": "attribute"}, {"api_name": "calls.models.CallsPost", "line_number": 103, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 104, "usage_type": "call"}, {"api_name": "calls.models.CallsCategory.objects.get", "line_number": 108, "usage_type": "call"}, {"api_name": "calls.models.CallsCategory.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "calls.models.CallsCategory", "line_number": 108, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 109, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 109, "usage_type": "name"}, {"api_name": "calls.models.CallsCategory.DoesNotExist", "line_number": 110, "usage_type": "attribute"}, {"api_name": "calls.models.CallsCategory", "line_number": 110, "usage_type": "name"}, {"api_name": "calls.forms.CallsPostForm", "line_number": 120, "usage_type": "call"}, {"api_name": "django.utils.translation.get_language", "line_number": 133, "usage_type": "call"}, {"api_name": "django.utils.translation", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "14782852444", "text": "'''\nhttps://leetcode.com/problems/path-sum-iii/\n\nYou are given a binary tree in which each node contains an integer value.\n\nFind the number of paths that sum to a given value.\n\nThe path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).\n\nThe tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.\n\nExample:\n\nroot = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8\n\n 10\n / \\\n 5 -3\n / \\ \\\n 3 2 11\n / \\ \\\n3 -2 1\n\nReturn 3. The paths that sum to 8 are:\n\n1. 5 -> 3\n2. 5 -> 2 -> 1\n3. -3 -> 11\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def pathSum(self, root, sum):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: int\n \"\"\"\n path = [0]\n ans = [0]\n\n def dfs(node, s):\n if not node:\n return\n s += node.val\n for x in path:\n if s - x == sum:\n ans[0] += 1\n path.append(s)\n dfs(node.left, s)\n dfs(node.right, s)\n path.pop()\n\n dfs(root, 0)\n return ans[0]\n\n\nif __name__ == '__main__':\n from utils import TreeNode\n f = Solution().pathSum\n root = TreeNode.make_tree([10,5,-3,3,2,None,11,3,-2,None,1])\n assert f(root, 8) == 3\n assert f(TreeNode.make_tree([5,4,8,11,None,13,4,7,2,None,None,5,1]), 22) == 3\n", "repo_name": "irachex/leetcode", "sub_path": "path-sum-iii.py", "file_name": "path-sum-iii.py", "file_ext": "py", "file_size_in_byte": 1603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "21", "api": [{"api_name": "utils.TreeNode.make_tree", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.TreeNode", "line_number": 67, "usage_type": "name"}, {"api_name": "utils.TreeNode.make_tree", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.TreeNode", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "20089697717", "text": "#!/bin/python3\nimport socket\nimport argparse\nimport sys\nimport random as rnd\nimport time\n\nheader = 64\nformat = 'utf-8'\ndisconnect_msg = 'DISCONNECT'\n\n\n\n\nLETTERS=\"abcdifghijklmnopqrstuvwxyz\"\n\naparse = argparse.ArgumentParser(\"This is small test client application for K8s tests. Witch send some data on TCP port and address and get answers.\")\naparse.add_argument( \"--port\", type=int, required=True, help=\"TCP port for connecting\")\naparse.add_argument( \"--address\", type=str, required=True, help=\"Address for connecting.\")\naparse.add_argument( \"--sleep\", type=int, required=False, help=\"Sleep timer in seconds. No sleep by default.\")\n\nargs = aparse.parse_args()\n\naddress = args.address\nport = args.port\nslp = args.sleep\n\ndef data_processor(data):\n\t#do somting with data\n\treturn data\n\ndef gen_message(n):\n out = ''\n for i in range(n):\n if rnd.choice([0,1]):\n out = out + LETTERS[rnd.randrange(len(LETTERS))].upper()\n else:\n out = out + LETTERS[rnd.randrange(len(LETTERS))].lower() \n return out\n\n\n\ndef send_msg(msg):\n\tmsg_len = str(len(msg)).encode(format)\n\tmsg_len += b' ' * (header - len(msg_len))\n\tclient.send(msg_len)\n\tclient.send(msg.encode(format))\n\tprint(client.recv(2048).decode(format))\n\n\n\ntry:\n\tclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tclient.connect((address, port))\n\n\n\twhile True:\n\t\tsend_msg(gen_message(32))\n\t\tif slp:\n\t\t\ttime.sleep(slp)\n\nexcept KeyboardInterrupt:\n\tsend_msg(disconnect_msg)\n\tclient.close()\n\tsys.exit(0)\nexcept Exception as exc:\n\tsend_msg(disconnect_msg)\n\tclient.close()\n\tprint( \"Error : \", exc)\n\tsys.exit(1)\n", "repo_name": "dragun83/test_server_client", "sub_path": "client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 38, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 53, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 53, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 53, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "33157435358", "text": "from select import select\nimport sqlite3\n\ndef create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except sqlite3.Error as e:\n print(e)\n return conn\n\n### CREATE\ndef dodaj_pracownika(conn, pracownik):\n sql = '''INSERT INTO pracownicy(imię, nazwisko, zawód)\n VALUES(?,?,?)'''\n cur = conn.cursor()\n cur.execute(sql, pracownik)\n conn.commit()\n return cur.lastrowid\ndef dodaj_zadanie(conn, zadanie):\n sql = '''INSERT INTO zadania(zadanie_id, opis_zadania, czas_wykonania_w_godz)\n VALUES(?,?,?)'''\n cur = conn.cursor()\n cur.execute(sql, zadanie)\n conn.commit()\n return cur.lastrowid\n\n### READ\ndef wybierz_wg_parametru(conn, zadanie_id):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM zadania WHERE zadanie_id=?\", (zadanie_id,))\n rows = cur.fetchall()\n return rows\n\ndef select_all(conn, table):\n cur = conn.cursor()\n cur.execute(f\"SELECT * From {table}\")\n rows = cur.fetchall()\n return rows\n \ndef select_where(conn, table, **query):\n cur = conn.cursor()\n qs = []\n values = ()\n for k, v in query.items():\n qs.append(f\"{k}=?\")\n values += (v,)\n q = \" AND \".join(qs)\n cur.execute(f\"SELECT * FROM {table} WHERE {q}\", values)\n rows = cur.fetchall()\n return rows\n\n### UPDATE\n\ndef update(conn, table, id, **kwargs):\n parameters = [f\"{k} = ?\" for k in kwargs]\n parameters = \", \".join(parameters)\n values = tuple(v for v in kwargs.values())\n values += (id, )\n sql = f\"\"\" UPDATE {table}\n SET {parameters}\n WHERE id = ?\"\"\"\n try:\n cur = conn.cursor()\n cur.execute(sql, values)\n conn.commit()\n print(\"OK\")\n except sqlite3.OperationalError as e:\n print(e)\n\n###DELETE\ndef delete_where(conn, table, **kwargs):\n qs = []\n values = tuple()\n for k, v in kwargs.items():\n qs.append(f\"{k} = ?\")\n values += (v,)\n q = \" AND \".join(qs)\n sql = f\"DELETE FROM {table} WHERE {q}\"\n cur = conn.cursor()\n cur.execute(sql, values)\n conn.commit()\n print(\"Deleted\")\ndef delete_all(conn, table):\n sql = f\"DELETE FROM {table}\"\n cur = conn.cursor()\n cur.execute(sql)\n conn.commit()\n print(\"Deleted\")\n \n\n\nif __name__ == \"__main__\":\n conn = create_connection(\"harmonogram_pracy.db\")\n\n###dodanie praconika i jego zadania\n pracownik = (\"Jan\", \"Kowalski\", \"murarz\")\n pracownik_id = dodaj_pracownika(conn, pracownik)\n zadanie = (\n pracownik_id,\n \"wymurownie jednej kondygnacji\",\n 80\n )\n zadanie_id = dodaj_zadanie(conn, zadanie)\n\n print(pracownik_id, zadanie_id)\n conn.commit()\n\n###wyświetl listę pracowników i zadania\n print(select_all(conn, \"pracownicy\"))\n print(select_all(conn, \"zadania\"))\n\n###wyświetl zadanie wg id=1\n print(wybierz_wg_parametru(conn, 2))\n\n###wyświetl wg wybranej tabeli i parametru\n print(select_where(conn, \"pracownicy\", imię=\"Jan\"))\n print(\"---------\")\n#aktualizacja wybranego wiersza\n update(conn, \"zadania\", id=2, opis_zadania=\"wykonanie stropu monolitycznego\" )\n print(wybierz_wg_parametru(conn, 2))\n\n\n ", "repo_name": "mateuszuser/Module13", "sub_path": "ex_crud.py", "file_name": "ex_crud.py", "file_ext": "py", "file_size_in_byte": 3199, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sqlite3.OperationalError", "line_number": 69, "usage_type": "attribute"}]} +{"seq_id": "74666991091", "text": "import random\nimport torch\nimport torch.nn.functional as F\n\nfrom replay_memory import ReplayMemory\nfrom model import DQNNET\nimport threading as th\n\n\nclass DQNAgent:\n def __init__(self, observation_space, action_space, device, epsilon_max,\n epsilon_min, epsilon_decay, memory_capacity, discount=.99, lr=1e-3):\n\n self.observation_space = observation_space\n self.action_space = action_space\n self.discount = discount\n self.device = device\n\n self.epsilon = epsilon_max\n self.epsilon_min = epsilon_min\n self.epsilon_decay = epsilon_decay\n\n self.replay_memory = ReplayMemory(memory_capacity)\n\n self.online_network = DQNNET(\n observation_space.shape[0], action_space.n, lr=lr).to(device)\n self.target_network = DQNNET(\n observation_space.shape[0], action_space.n, lr=lr).to(device)\n\n self.target_network.eval()\n self.update_target()\n self.isLearning = False\n\n def update_target(self):\n self.target_network.load_state_dict(self.online_network.state_dict())\n\n def select_action(self, state):\n if random.random() < self.epsilon:\n return self.action_space.sample()\n if not torch.is_tensor(state):\n state = torch.from_numpy(state).float().to(self.device)\n\n with torch.no_grad():\n action = torch.argmax(self.online_network(state))\n return action.item()\n\n def learn(self, batch_size):\n if self.isLearning:\n return\n else:\n self.isLearning = True\n self._learn(batch_size)\n\n def _learn(self, batch_size):\n try:\n if len(self.replay_memory) < batch_size:\n self.isLearning = False\n return\n states, actions, next_states, rewards, dones = self.replay_memory.sample(\n batch_size, self.device)\n\n actions = actions.reshape((-1, 1))\n rewards = rewards.reshape((-1, 1))\n dones = dones.reshape((-1, 1))\n actions = actions.long()\n dones = dones.long()\n predicted_qs = self.online_network(states)\n predicted_qs = predicted_qs.gather(1, actions)\n\n target_qs = self.target_network(next_states)\n target_qs = torch.max(target_qs, dim=1).values\n target_qs = target_qs.reshape(-1, 1)\n target_qs[dones] = 0.1\n\n y_js = rewards + self.discount * target_qs\n loss = F.smooth_l1_loss(predicted_qs, y_js)\n self.online_network.optimizer.zero_grad()\n loss.backward()\n self.online_network.optimizer.step()\n self.isLearning = False\n except Exception as e:\n print()\n print(e)\n self.isLearning = False\n self.update_target()\n\n def save(self, path):\n torch.save(self.online_network.state_dict(), path)\n\n def load(self, path):\n self.online_network.load_state_dict(torch.load(path))\n self.online_network.eval()\n", "repo_name": "leonardoigor/dqn_torch", "sub_path": "agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 3056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "replay_memory.ReplayMemory", "line_number": 23, "usage_type": "call"}, {"api_name": "model.DQNNET", "line_number": 25, "usage_type": "call"}, {"api_name": "model.DQNNET", "line_number": 27, "usage_type": "call"}, {"api_name": "random.random", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.is_tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.functional.smooth_l1_loss", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "28799882837", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import svm\n\ndata = pd.read_csv(filepath_or_buffer = 'C:/Users/admin/Desktop/homework/6.2/watermelon3_0_En.csv')\n\n#Seperata the data into samples and labels\ncolumns = data.columns\nattributes = columns[:-1]\nlabel = columns[-1]\nsamples_frame = data[attributes]\nlabel_series = data[label]\n#Up to now, we do not need DataFrame any more, instead, an array is more suitable\nsamples = samples_frame.values\nlabels = label_series.tolist()\n\n#linear kernel\nclf = svm.SVC(kernel = 'linear')\nclf.fit(samples,labels)\n#Get support vectors\nprint(clf.support_vectors_)\n#Get indices of support vectors\nprint(clf.support_)\n#Get number of support vectors for each classes\nprint('linear kernel',clf.n_support_)\n\n#gaussian kernel\nclf1 = svm.SVC(kernel = 'rbf')\nclf1.fit(samples,labels)\n#Get support vectors\nprint(clf1.support_vectors_)\n#Get indices of support vectors\nprint(clf1.support_)\n#Get number of support vectors for each classes\nprint('gaussian kernel',clf1.n_support_)\n", "repo_name": "KlausEusford/Libsvm", "sub_path": "svm.py", "file_name": "svm.py", "file_ext": "py", "file_size_in_byte": 1032, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 20, "usage_type": "name"}, {"api_name": "sklearn.svm.SVC", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "8072663492", "text": "from vk_api import VkApi\nfrom vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType\nfrom vk_api.vk_api import VkApiMethod\n# всякая всячина api\n\nfrom random import randint\n# для случайного id\n# так как так требует api vk\nfrom time import sleep\n# для задержки перед ответом\nfrom typing import List, NoReturn, Optional\n# немного подсказок типов\n\nfrom config import config\n# самописный конфиг\nfrom generate_text import get_text_from_history\n# обёртка над чудо-библиотекой\nfrom data import db_session\nfrom data.messages import Message\n# базы данных\n\n\ninv_message = \"\"\"\nПривет! Я Балабол, я учусь человеческой речи, и для каждого чата\n\"обучен\" я буду в зависимости от ваших сообщений)\nА для работы мне нужно выдать доступ к переписке или права администратора.\nДля сброса базы данных этого чата используйте команды /сброс или /reset\n\"\"\".strip(' ').strip('\\n').strip(' ').replace('\\n', '. ')\n# сообщение при приглашении\n# временно не будет использоваться\ndb_session.global_init(\"db/messages.db\")\nprint(\"база данных успешно подключена\")\ndb_sess = db_session.create_session()\n# суета с базами данных\n\n\ndef send_on_invite(vk: VkApiMethod, peer_id: int) -> None:\n \"\"\"\n обёртка на приглашение бота в беседу\n :vk `VkApiMethod`\n :peer_id `int`\n return `None`\n \"\"\"\n vk.messages.send(\n peer_id=peer_id,\n massage=inv_message,\n random_id=randint(0, 2 ** 32)\n )\n\n\ndef add_history_to_db(history: List[str]) -> None:\n for message in history:\n msg = Message()\n msg.text = message\n # db_sess.add(msg) if message in map(lambda _msg: _msg.text, db_sess.query(Message).all()) else ...\n db_sess.add(msg)\n db_sess.commit()\n\n\ndef get_and_generate_message_from_db() -> str:\n messages: List[str] = list(map(\n lambda msg: msg.text, db_sess.query(Message).all()\n ))\n return get_text_from_history(messages)\n\n\ndef main() -> Optional[NoReturn]:\n vk_session = VkApi(\n token=config.token\n )\n longpoll = VkBotLongPoll(vk_session, config.ID)\n print(\"бот начал слушать сообщения\")\n vk = vk_session.get_api()\n for event in longpoll.listen():\n if event.type == VkBotEventType.MESSAGE_NEW:\n message: dict = dict(event.obj.message)\n if 'action' in message:\n if message['action']['type'] == 'chat_invite_user':\n # проверка на приглашение в чат\n if event.group_id == -message['action']['member_id']:\n print(\"пригласили в новый чат\")\n try:\n # send_on_invite(vk, message['peer_id'])\n # по какой-то причине вылетает ошибка api с кодом 100\n ...\n except Exception as e:\n print(e)\n finally:\n # следующий круг -- всё по новой\n continue\n\n\n print('Новое сообщение:')\n print('Для меня от:', message['from_id'])\n print('Текст:', message['text'])\n # информирование\n sleep(config.RESPONSE_DELAY)\n # сама задержка\n history = vk.messages.getHistory(\n peer_id=message['peer_id'],\n count=199\n )['items']\n history = list(\n map(\n lambda msg: msg[\"text\"],\n filter(\n lambda msg: msg[\"from_id\"] != -config.ID,\n history\n )\n )\n )\n add_history_to_db(history)\n # получение истории чата для обучения на лету позже\n answer = get_and_generate_message_from_db()\n print(\"мой ответ: \", answer)\n # полученный сгенерированный ответ бота\n vk.messages.send(\n peer_id=message['peer_id'],\n message=answer,\n random_id=randint(0, 2 ** 32)\n )\n\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "StrangeArcturus/web-project-balabol", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4789, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "data.db_session.global_init", "line_number": 31, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 31, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 33, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 33, "usage_type": "name"}, {"api_name": "vk_api.vk_api.VkApiMethod", "line_number": 37, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 47, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}, {"api_name": "data.messages.Message", "line_number": 53, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "data.messages.Message", "line_number": 62, "usage_type": "argument"}, {"api_name": "generate_text.get_text_from_history", "line_number": 64, "usage_type": "call"}, {"api_name": "vk_api.VkApi", "line_number": 68, "usage_type": "call"}, {"api_name": "config.config.token", "line_number": 69, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 69, "usage_type": "name"}, {"api_name": "vk_api.bot_longpoll.VkBotLongPoll", "line_number": 71, "usage_type": "call"}, {"api_name": "config.config.ID", "line_number": 71, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 71, "usage_type": "name"}, {"api_name": "vk_api.bot_longpoll.VkBotEventType.MESSAGE_NEW", "line_number": 75, "usage_type": "attribute"}, {"api_name": "vk_api.bot_longpoll.VkBotEventType", "line_number": 75, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}, {"api_name": "config.config.RESPONSE_DELAY", "line_number": 97, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 97, "usage_type": "name"}, {"api_name": "config.config.ID", "line_number": 107, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 107, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 120, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.NoReturn", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "37452224793", "text": "import matplotlib.animation as animation\nfrom viz.util import *\nimport os\nfrom models.agent import *\n\ndef animate_results(models, limits, obstacles, Thetas, goals, ma_segs, name):\n agent_num = len(models)\n dim = len(limits)\n fig, axes = plot_env(limits, obstacles)\n plot_goals(goals)\n\n interval = 20\n total_frames = 500\n total_time = max([ma_segs[idx][-1][0][-1] for idx in range(agent_num)])\n\n if dim == 2:\n paths = extract_paths(models, Thetas, ma_segs)\n ref_patches = []\n tru_patches = []\n err_patches = []\n for idx in range(agent_num):\n ref_x, ref_y, _, tru_x, tru_y, _, times = paths[idx]\n ref_patch = plt.Circle((ref_x[0], ref_y[0]), 0, fc='k')\n tru_patch = plt.Circle((tru_x[0], tru_y[0]), models[idx].size, fc='navy', alpha = 0.7)\n err_patch = plt.Circle((ref_x[0], ref_y[0]), models[idx].size, fc='lightsteelblue', alpha = 0.4)\n ref_patches.append(ref_patch)\n tru_patches.append(tru_patch)\n err_patches.append(err_patch)\n # init\n def init():\n for idx in range(agent_num):\n ref_x, ref_y, _, tru_x, tru_y, _, times = paths[idx]\n ref_patches[idx].center = (ref_x[0], ref_y[0])\n tru_patches[idx].center = (tru_x[0], tru_y[0])\n err_patches[idx].center = (ref_x[0], ref_y[0])\n\n for patch in ref_patches + tru_patches + err_patches: axes.add_patch(patch)\n return ref_patches + tru_patches + err_patches\n # animate\n tpf = total_time / total_frames\n\n def animate(f):\n ref, tru = [], []\n for idx in range(agent_num):\n ref_x, ref_y, _, tru_x, tru_y, _, times = paths[idx]\n step = 0\n while (step < len(times) - 1) and (times[step] < tpf * f):\n step = step + 1\n ref_patches[idx].center = (ref_x[step], ref_y[step])\n tru_patches[idx].center = (tru_x[step], tru_y[step])\n err_patches[idx].center = (ref_x[step], ref_y[step])\n if step == len(ref_x) - 1: error = models[idx].size\n else: error = (models[idx].size + models[idx].bloating(step))\n err_patches[idx].width = 2 * error\n err_patches[idx].height = 2 * error\n return ref_patches + tru_patches + err_patches\n\n\n ani = animation.FuncAnimation(fig, animate, frames = total_frames, init_func=init,\n blit=True, interval = interval)\n\n # fig.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=None, hspace=None)\n path = os.path.abspath(\"results/plots/%s.mp4\" % (name))\n ani.save(path, writer='ffmpeg')\n", "repo_name": "jkchengh/s2m2", "sub_path": "viz/animate.py", "file_name": "animate.py", "file_ext": "py", "file_size_in_byte": 2794, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "21", "api": [{"api_name": "models.agent", "line_number": 7, "usage_type": "argument"}, {"api_name": "models.agent", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.agent", "line_number": 24, "usage_type": "name"}, {"api_name": "models.agent", "line_number": 25, "usage_type": "name"}, {"api_name": "models.agent", "line_number": 52, "usage_type": "name"}, {"api_name": "models.agent", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 59, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "22358823099", "text": "import base64\nimport json\nimport math\n\nimport frappe\nfrom frappe import _\nfrom frappe.utils import cstr, flt, get_link_to_form, rounded, time_diff_in_hours\nfrom frappe.utils.formatters import format_value\n\nfrom erpnext.setup.utils import insert_record\n\nfrom healthcare.healthcare.doctype.healthcare_settings.healthcare_settings import (\n\tget_income_account,\n)\nfrom healthcare.healthcare.doctype.lab_test.lab_test import create_multiple\nfrom healthcare.healthcare.doctype.observation.observation import add_observation\nfrom healthcare.healthcare.doctype.observation_template.observation_template import (\n\tget_observation_template_details,\n)\nfrom healthcare.setup import setup_healthcare\n\n\n@frappe.whitelist()\ndef get_healthcare_services_to_invoice(patient, company):\n\tpatient = frappe.get_doc(\"Patient\", patient)\n\titems_to_invoice = []\n\tif patient:\n\t\tvalidate_customer_created(patient)\n\t\t# Customer validated, build a list of billable services\n\t\titems_to_invoice += get_appointments_to_invoice(patient, company)\n\t\titems_to_invoice += get_encounters_to_invoice(patient, company)\n\t\titems_to_invoice += get_lab_tests_to_invoice(patient, company)\n\t\titems_to_invoice += get_clinical_procedures_to_invoice(patient, company)\n\t\titems_to_invoice += get_inpatient_services_to_invoice(patient, company)\n\t\titems_to_invoice += get_therapy_plans_to_invoice(patient, company)\n\t\titems_to_invoice += get_therapy_sessions_to_invoice(patient, company)\n\t\titems_to_invoice += get_service_requests_to_invoice(patient, company)\n\t\titems_to_invoice += get_observations_to_invoice(patient, company)\n\t\treturn items_to_invoice\n\n\ndef validate_customer_created(patient):\n\tif not frappe.db.get_value(\"Patient\", patient.name, \"customer\"):\n\t\tmsg = _(\"Please set a Customer linked to the Patient\")\n\t\tmsg += \" <b><a href='/app/Form/Patient/{0}'>{0}</a></b>\".format(patient.name)\n\t\tfrappe.throw(msg, title=_(\"Customer Not Found\"))\n\n\ndef get_appointments_to_invoice(patient, company):\n\tappointments_to_invoice = []\n\tpatient_appointments = frappe.get_list(\n\t\t\"Patient Appointment\",\n\t\tfields=\"*\",\n\t\tfilters={\n\t\t\t\"patient\": patient.name,\n\t\t\t\"company\": company,\n\t\t\t\"invoiced\": 0,\n\t\t\t\"status\": [\"!=\", \"Cancelled\"],\n\t\t},\n\t\torder_by=\"appointment_date\",\n\t)\n\n\tfor appointment in patient_appointments:\n\t\t# Procedure Appointments\n\t\tif appointment.procedure_template:\n\t\t\tif frappe.db.get_value(\n\t\t\t\t\"Clinical Procedure Template\", appointment.procedure_template, \"is_billable\"\n\t\t\t):\n\t\t\t\tappointments_to_invoice.append(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"reference_type\": \"Patient Appointment\",\n\t\t\t\t\t\t\"reference_name\": appointment.name,\n\t\t\t\t\t\t\"service\": appointment.procedure_template,\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t# Consultation Appointments, should check fee validity\n\t\telse:\n\t\t\tif frappe.db.get_single_value(\n\t\t\t\t\"Healthcare Settings\", \"enable_free_follow_ups\"\n\t\t\t) and frappe.db.exists(\"Fee Validity Reference\", {\"appointment\": appointment.name}):\n\t\t\t\tcontinue # Skip invoicing, fee validty present\n\t\t\tpractitioner_charge = 0\n\t\t\tincome_account = None\n\t\t\tservice_item = None\n\t\t\tif appointment.practitioner:\n\t\t\t\tdetails = get_appointment_billing_item_and_rate(appointment)\n\t\t\t\tservice_item = details.get(\"service_item\")\n\t\t\t\tpractitioner_charge = details.get(\"practitioner_charge\")\n\t\t\t\tincome_account = get_income_account(appointment.practitioner, appointment.company)\n\t\t\tappointments_to_invoice.append(\n\t\t\t\t{\n\t\t\t\t\t\"reference_type\": \"Patient Appointment\",\n\t\t\t\t\t\"reference_name\": appointment.name,\n\t\t\t\t\t\"service\": service_item,\n\t\t\t\t\t\"rate\": practitioner_charge,\n\t\t\t\t\t\"income_account\": income_account,\n\t\t\t\t}\n\t\t\t)\n\n\treturn appointments_to_invoice\n\n\ndef get_encounters_to_invoice(patient, company):\n\tif not isinstance(patient, str):\n\t\tpatient = patient.name\n\tencounters_to_invoice = []\n\tencounters = frappe.get_list(\n\t\t\"Patient Encounter\",\n\t\tfields=[\"*\"],\n\t\tfilters={\"patient\": patient, \"company\": company, \"invoiced\": False, \"docstatus\": 1},\n\t)\n\tif encounters:\n\t\tfor encounter in encounters:\n\t\t\tif not encounter.appointment:\n\t\t\t\tpractitioner_charge = 0\n\t\t\t\tincome_account = None\n\t\t\t\tservice_item = None\n\t\t\t\tif encounter.practitioner:\n\t\t\t\t\tif encounter.inpatient_record and frappe.db.get_single_value(\n\t\t\t\t\t\t\"Healthcare Settings\", \"do_not_bill_inpatient_encounters\"\n\t\t\t\t\t):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tdetails = get_appointment_billing_item_and_rate(encounter)\n\t\t\t\t\tservice_item = details.get(\"service_item\")\n\t\t\t\t\tpractitioner_charge = details.get(\"practitioner_charge\")\n\t\t\t\t\tincome_account = get_income_account(encounter.practitioner, encounter.company)\n\n\t\t\t\tencounters_to_invoice.append(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"reference_type\": \"Patient Encounter\",\n\t\t\t\t\t\t\"reference_name\": encounter.name,\n\t\t\t\t\t\t\"service\": service_item,\n\t\t\t\t\t\t\"rate\": practitioner_charge,\n\t\t\t\t\t\t\"income_account\": income_account,\n\t\t\t\t\t}\n\t\t\t\t)\n\n\treturn encounters_to_invoice\n\n\ndef get_lab_tests_to_invoice(patient, company):\n\tlab_tests_to_invoice = []\n\tlab_tests = frappe.get_list(\n\t\t\"Lab Test\",\n\t\tfields=[\"name\", \"template\"],\n\t\tfilters={\n\t\t\t\"patient\": patient.name,\n\t\t\t\"company\": company,\n\t\t\t\"invoiced\": False,\n\t\t\t\"docstatus\": 1,\n\t\t\t\"service_request\": \"\",\n\t\t},\n\t)\n\tfor lab_test in lab_tests:\n\t\titem, is_billable = frappe.get_cached_value(\n\t\t\t\"Lab Test Template\", lab_test.template, [\"item\", \"is_billable\"]\n\t\t)\n\t\tif is_billable:\n\t\t\tlab_tests_to_invoice.append(\n\t\t\t\t{\"reference_type\": \"Lab Test\", \"reference_name\": lab_test.name, \"service\": item}\n\t\t\t)\n\n\treturn lab_tests_to_invoice\n\n\ndef get_observations_to_invoice(patient, company):\n\tobservations_to_invoice = []\n\tobservations = frappe.get_list(\n\t\t\"Observation\",\n\t\tfields=[\"name\", \"observation_template\"],\n\t\tfilters={\n\t\t\t\"patient\": patient.name,\n\t\t\t\"company\": company,\n\t\t\t\"invoiced\": False,\n\t\t\t\"docstatus\": 1,\n\t\t\t\"service_request\": \"\",\n\t\t},\n\t)\n\tfor observation in observations:\n\t\titem, is_billable = frappe.get_cached_value(\n\t\t\t\"Observation Template\", observation.observation_template, [\"item\", \"is_billable\"]\n\t\t)\n\t\tif is_billable:\n\t\t\tobservations_to_invoice.append(\n\t\t\t\t{\"reference_type\": \"Observation\", \"reference_name\": observation.name, \"service\": item}\n\t\t\t)\n\n\treturn observations_to_invoice\n\n\ndef get_clinical_procedures_to_invoice(patient, company):\n\tclinical_procedures_to_invoice = []\n\tprocedures = frappe.get_list(\n\t\t\"Clinical Procedure\",\n\t\tfields=\"*\",\n\t\tfilters={\n\t\t\t\"patient\": patient.name,\n\t\t\t\"company\": company,\n\t\t\t\"invoiced\": False,\n\t\t\t\"docstatus\": 1,\n\t\t\t\"service_request\": \"\",\n\t\t},\n\t)\n\tfor procedure in procedures:\n\t\tif not procedure.appointment:\n\t\t\titem, is_billable = frappe.get_cached_value(\n\t\t\t\t\"Clinical Procedure Template\", procedure.procedure_template, [\"item\", \"is_billable\"]\n\t\t\t)\n\t\t\tif procedure.procedure_template and is_billable:\n\t\t\t\tclinical_procedures_to_invoice.append(\n\t\t\t\t\t{\"reference_type\": \"Clinical Procedure\", \"reference_name\": procedure.name, \"service\": item}\n\t\t\t\t)\n\n\t\t# consumables\n\t\tif (\n\t\t\tprocedure.invoice_separately_as_consumables\n\t\t\tand procedure.consume_stock\n\t\t\tand procedure.status == \"Completed\"\n\t\t\tand not procedure.consumption_invoiced\n\t\t):\n\t\t\tservice_item = frappe.db.get_single_value(\n\t\t\t\t\"Healthcare Settings\", \"clinical_procedure_consumable_item\"\n\t\t\t)\n\t\t\tif not service_item:\n\t\t\t\tmsg = _(\"Please Configure Clinical Procedure Consumable Item in {0}\").format(\n\t\t\t\t\tget_link_to_form(\"Healthcare Settings\", \"Healthcare Settings\")\n\t\t\t\t)\n\n\t\t\t\tfrappe.throw(msg, title=_(\"Missing Configuration\"))\n\n\t\t\tclinical_procedures_to_invoice.append(\n\t\t\t\t{\n\t\t\t\t\t\"reference_type\": \"Clinical Procedure\",\n\t\t\t\t\t\"reference_name\": procedure.name,\n\t\t\t\t\t\"service\": service_item,\n\t\t\t\t\t\"rate\": procedure.consumable_total_amount,\n\t\t\t\t\t\"description\": procedure.consumption_details,\n\t\t\t\t}\n\t\t\t)\n\n\treturn clinical_procedures_to_invoice\n\n\ndef get_inpatient_services_to_invoice(patient, company):\n\tservices_to_invoice = []\n\tinpatient_services = frappe.db.sql(\n\t\t\"\"\"\n\t\t\tSELECT\n\t\t\t\tio.*\n\t\t\tFROM\n\t\t\t\t`tabInpatient Record` ip, `tabInpatient Occupancy` io\n\t\t\tWHERE\n\t\t\t\tip.patient=%s\n\t\t\t\tand ip.company=%s\n\t\t\t\tand io.parent=ip.name\n\t\t\t\tand io.left=1\n\t\t\t\tand io.invoiced=0\n\t\t\"\"\",\n\t\t(patient.name, company),\n\t\tas_dict=1,\n\t)\n\n\tfor inpatient_occupancy in inpatient_services:\n\t\tservice_unit_type = frappe.db.get_value(\n\t\t\t\"Healthcare Service Unit\", inpatient_occupancy.service_unit, \"service_unit_type\"\n\t\t)\n\t\tservice_unit_type = frappe.get_cached_doc(\"Healthcare Service Unit Type\", service_unit_type)\n\t\tif service_unit_type and service_unit_type.is_billable:\n\t\t\thours_occupied = flt(\n\t\t\t\ttime_diff_in_hours(inpatient_occupancy.check_out, inpatient_occupancy.check_in), 2\n\t\t\t)\n\t\t\tqty = 0.5\n\t\t\tif hours_occupied > 0:\n\t\t\t\tactual_qty = hours_occupied / service_unit_type.no_of_hours\n\t\t\t\tfloor = math.floor(actual_qty)\n\t\t\t\tdecimal_part = actual_qty - floor\n\t\t\t\tif decimal_part > 0.5:\n\t\t\t\t\tqty = rounded(floor + 1, 1)\n\t\t\t\telif decimal_part < 0.5 and decimal_part > 0:\n\t\t\t\t\tqty = rounded(floor + 0.5, 1)\n\t\t\t\tif qty <= 0:\n\t\t\t\t\tqty = 0.5\n\t\t\tservices_to_invoice.append(\n\t\t\t\t{\n\t\t\t\t\t\"reference_type\": \"Inpatient Occupancy\",\n\t\t\t\t\t\"reference_name\": inpatient_occupancy.name,\n\t\t\t\t\t\"service\": service_unit_type.item,\n\t\t\t\t\t\"qty\": qty,\n\t\t\t\t}\n\t\t\t)\n\n\treturn services_to_invoice\n\n\ndef get_therapy_plans_to_invoice(patient, company):\n\ttherapy_plans_to_invoice = []\n\ttherapy_plans = frappe.get_list(\n\t\t\"Therapy Plan\",\n\t\tfields=[\"therapy_plan_template\", \"name\"],\n\t\tfilters={\n\t\t\t\"patient\": patient.name,\n\t\t\t\"invoiced\": 0,\n\t\t\t\"company\": company,\n\t\t\t\"therapy_plan_template\": (\"!=\", \"\"),\n\t\t\t\"docstatus\": 1,\n\t\t},\n\t)\n\tfor plan in therapy_plans:\n\t\ttherapy_plans_to_invoice.append(\n\t\t\t{\n\t\t\t\t\"reference_type\": \"Therapy Plan\",\n\t\t\t\t\"reference_name\": plan.name,\n\t\t\t\t\"service\": frappe.db.get_value(\n\t\t\t\t\t\"Therapy Plan Template\", plan.therapy_plan_template, \"linked_item\"\n\t\t\t\t),\n\t\t\t}\n\t\t)\n\n\treturn therapy_plans_to_invoice\n\n\ndef get_therapy_sessions_to_invoice(patient, company):\n\ttherapy_sessions_to_invoice = []\n\ttherapy_plans = frappe.db.get_all(\"Therapy Plan\", {\"therapy_plan_template\": (\"!=\", \"\")})\n\ttherapy_plans_created_from_template = []\n\tfor entry in therapy_plans:\n\t\ttherapy_plans_created_from_template.append(entry.name)\n\n\ttherapy_sessions = frappe.get_list(\n\t\t\"Therapy Session\",\n\t\tfields=\"*\",\n\t\tfilters={\n\t\t\t\"patient\": patient.name,\n\t\t\t\"invoiced\": 0,\n\t\t\t\"company\": company,\n\t\t\t\"therapy_plan\": (\"not in\", therapy_plans_created_from_template),\n\t\t\t\"docstatus\": 1,\n\t\t\t\"service_request\": \"\",\n\t\t},\n\t)\n\tfor therapy in therapy_sessions:\n\t\tif not therapy.appointment:\n\t\t\tif therapy.therapy_type and frappe.db.get_value(\n\t\t\t\t\"Therapy Type\", therapy.therapy_type, \"is_billable\"\n\t\t\t):\n\t\t\t\ttherapy_sessions_to_invoice.append(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"reference_type\": \"Therapy Session\",\n\t\t\t\t\t\t\"reference_name\": therapy.name,\n\t\t\t\t\t\t\"service\": frappe.db.get_value(\"Therapy Type\", therapy.therapy_type, \"item\"),\n\t\t\t\t\t}\n\t\t\t\t)\n\n\treturn therapy_sessions_to_invoice\n\n\ndef get_service_requests_to_invoice(patient, company):\n\torders_to_invoice = []\n\tservice_requests = frappe.get_list(\n\t\t\"Service Request\",\n\t\tfields=[\"*\"],\n\t\tfilters={\n\t\t\t\"patient\": patient.name,\n\t\t\t\"company\": company,\n\t\t\t\"billing_status\": [\"!=\", \"Invoiced\"],\n\t\t\t\"docstatus\": 1,\n\t\t},\n\t)\n\tfor service_request in service_requests:\n\t\titem, is_billable = frappe.get_cached_value(\n\t\t\tservice_request.template_dt, service_request.template_dn, [\"item\", \"is_billable\"]\n\t\t)\n\t\tprice_list, price_list_currency = frappe.db.get_values(\n\t\t\t\"Price List\", {\"selling\": 1}, [\"name\", \"currency\"]\n\t\t)[0]\n\t\targs = {\n\t\t\t\"doctype\": \"Sales Invoice\",\n\t\t\t\"item_code\": item,\n\t\t\t\"company\": service_request.get(\"company\"),\n\t\t\t\"customer\": frappe.db.get_value(\"Patient\", service_request.get(\"patient\"), \"customer\"),\n\t\t\t\"plc_conversion_rate\": 1.0,\n\t\t\t\"conversion_rate\": 1.0,\n\t\t}\n\t\tif is_billable:\n\t\t\torders_to_invoice.append(\n\t\t\t\t{\n\t\t\t\t\t\"reference_type\": \"Service Request\",\n\t\t\t\t\t\"reference_name\": service_request.name,\n\t\t\t\t\t\"service\": item,\n\t\t\t\t\t\"qty\": service_request.quantity if service_request.quantity else 1,\n\t\t\t\t}\n\t\t\t)\n\treturn orders_to_invoice\n\n\n@frappe.whitelist()\ndef get_appointment_billing_item_and_rate(doc):\n\tif isinstance(doc, str):\n\t\tdoc = json.loads(doc)\n\t\tdoc = frappe.get_doc(doc)\n\n\tservice_item = None\n\tpractitioner_charge = None\n\tdepartment = doc.medical_department if doc.doctype == \"Patient Encounter\" else doc.department\n\tservice_unit = doc.service_unit if doc.doctype == \"Patient Appointment\" else None\n\n\tis_inpatient = doc.inpatient_record\n\n\tif doc.get(\"practitioner\"):\n\t\tservice_item, practitioner_charge = get_practitioner_billing_details(\n\t\t\tdoc.practitioner, is_inpatient\n\t\t)\n\n\tif not service_item and doc.get(\"appointment_type\"):\n\t\tservice_item, appointment_charge = get_appointment_type_billing_details(\n\t\t\tdoc.appointment_type, department if department else service_unit, is_inpatient\n\t\t)\n\t\tif not practitioner_charge:\n\t\t\tpractitioner_charge = appointment_charge\n\n\tif not service_item:\n\t\tservice_item = get_healthcare_service_item(is_inpatient)\n\n\tif not service_item:\n\t\tthrow_config_service_item(is_inpatient)\n\n\tif not practitioner_charge and doc.get(\"practitioner\"):\n\t\tthrow_config_practitioner_charge(is_inpatient, doc.practitioner)\n\n\tif not practitioner_charge and not doc.get(\"practitioner\"):\n\t\tthrow_config_appointment_type_charge(is_inpatient, doc.appointment_type)\n\n\treturn {\"service_item\": service_item, \"practitioner_charge\": practitioner_charge}\n\n\ndef get_appointment_type_billing_details(appointment_type, dep_su, is_inpatient):\n\tfrom healthcare.healthcare.doctype.appointment_type.appointment_type import get_billing_details\n\n\tif not dep_su:\n\t\treturn None, None\n\n\titem_list = get_billing_details(appointment_type, dep_su)\n\tservice_item = None\n\tpractitioner_charge = None\n\n\tif item_list:\n\t\tif is_inpatient:\n\t\t\tservice_item = item_list.get(\"inpatient_visit_charge_item\")\n\t\t\tpractitioner_charge = item_list.get(\"inpatient_visit_charge\")\n\t\telse:\n\t\t\tservice_item = item_list.get(\"op_consulting_charge_item\")\n\t\t\tpractitioner_charge = item_list.get(\"op_consulting_charge\")\n\n\treturn service_item, practitioner_charge\n\n\ndef throw_config_service_item(is_inpatient):\n\tservice_item_label = (\n\t\t_(\"Inpatient Visit Charge Item\") if is_inpatient else _(\"Out Patient Consulting Charge Item\")\n\t)\n\n\tmsg = _(\n\t\t(\"Please Configure {0} in \").format(service_item_label)\n\t\t+ \"\"\"<b><a href='/app/Form/Healthcare Settings'>Healthcare Settings</a></b>\"\"\"\n\t)\n\tfrappe.throw(msg, title=_(\"Missing Configuration\"))\n\n\ndef throw_config_practitioner_charge(is_inpatient, practitioner):\n\tcharge_name = _(\"Inpatient Visit Charge\") if is_inpatient else _(\"OP Consulting Charge\")\n\n\tmsg = _(\n\t\t(\"Please Configure {0} for Healthcare Practitioner\").format(charge_name)\n\t\t+ \"\"\" <b><a href='/app/Form/Healthcare Practitioner/{0}'>{0}</a></b>\"\"\".format(practitioner)\n\t)\n\tfrappe.throw(msg, title=_(\"Missing Configuration\"))\n\n\ndef throw_config_appointment_type_charge(is_inpatient, appointment_type):\n\tcharge_name = _(\"Inpatient Visit Charge\") if is_inpatient else _(\"OP Consulting Charge\")\n\n\tmsg = _(\n\t\t(\"Please Configure {0} for Appointment Type\").format(charge_name)\n\t\t+ \"\"\" <b><a href='/app/Form/Appointment type/{0}'>{0}</a></b>\"\"\".format(appointment_type)\n\t)\n\tfrappe.throw(msg, title=_(\"Missing Configuration\"))\n\n\ndef get_practitioner_billing_details(practitioner, is_inpatient):\n\tservice_item = None\n\tpractitioner_charge = None\n\n\tif is_inpatient:\n\t\tfields = [\"inpatient_visit_charge_item\", \"inpatient_visit_charge\"]\n\telse:\n\t\tfields = [\"op_consulting_charge_item\", \"op_consulting_charge\"]\n\n\tif practitioner:\n\t\tservice_item, practitioner_charge = frappe.db.get_value(\n\t\t\t\"Healthcare Practitioner\", practitioner, fields\n\t\t)\n\n\treturn service_item, practitioner_charge\n\n\ndef get_healthcare_service_item(is_inpatient):\n\tservice_item = None\n\n\tif is_inpatient:\n\t\tservice_item = frappe.db.get_single_value(\"Healthcare Settings\", \"inpatient_visit_charge_item\")\n\telse:\n\t\tservice_item = frappe.db.get_single_value(\"Healthcare Settings\", \"op_consulting_charge_item\")\n\n\treturn service_item\n\n\ndef manage_invoice_validate(doc, method):\n\tif doc.service_unit and len(doc.items):\n\t\tfor item in doc.items:\n\t\t\tif not item.service_unit:\n\t\t\t\titem.service_unit = doc.service_unit\n\n\ndef manage_invoice_submit_cancel(doc, method):\n\tif not doc.patient:\n\t\treturn\n\n\tif doc.items:\n\t\tfor item in doc.items:\n\t\t\tif item.get(\"reference_dt\") and item.get(\"reference_dn\"):\n\t\t\t\t# TODO check\n\t\t\t\t# if frappe.get_meta(item.reference_dt).has_field(\"invoiced\"):\n\t\t\t\tset_invoiced(item, method, doc.name)\n\t\tif method == \"on_submit\" and frappe.db.get_single_value(\n\t\t\t\"Healthcare Settings\", \"create_observation_on_si_submit\"\n\t\t):\n\t\t\tcreate_sample_collection_and_observation(doc)\n\n\tif method == \"on_submit\":\n\t\tif frappe.db.get_single_value(\"Healthcare Settings\", \"create_lab_test_on_si_submit\"):\n\t\t\tcreate_multiple(\"Sales Invoice\", doc.name)\n\n\t\tif (\n\t\t\tnot frappe.db.get_single_value(\"Healthcare Settings\", \"show_payment_popup\")\n\t\t\tand frappe.db.get_single_value(\"Healthcare Settings\", \"enable_free_follow_ups\")\n\t\t\tand doc.items\n\t\t):\n\t\t\tfor item in doc.items:\n\t\t\t\tif item.reference_dt == \"Patient Appointment\":\n\t\t\t\t\tfee_validity = frappe.db.exists(\"Fee Validity\", {\"patient_appointment\": item.reference_dn})\n\t\t\t\t\tif fee_validity:\n\t\t\t\t\t\tfrappe.db.set_value(\"Fee Validity\", fee_validity, \"sales_invoice_ref\", doc.name)\n\n\ndef set_invoiced(item, method, ref_invoice=None):\n\tinvoiced = False\n\tif method == \"on_submit\":\n\t\tvalidate_invoiced_on_submit(item)\n\t\tinvoiced = True\n\n\tif item.reference_dt == \"Clinical Procedure\":\n\t\tservice_item = frappe.db.get_single_value(\n\t\t\t\"Healthcare Settings\", \"clinical_procedure_consumable_item\"\n\t\t)\n\t\tif service_item == item.item_code:\n\t\t\tfrappe.db.set_value(item.reference_dt, item.reference_dn, \"consumption_invoiced\", invoiced)\n\t\telse:\n\t\t\tfrappe.db.set_value(item.reference_dt, item.reference_dn, \"invoiced\", invoiced)\n\telse:\n\t\tif item.reference_dt not in [\"Service Request\", \"Medication Request\"]:\n\t\t\tfrappe.db.set_value(item.reference_dt, item.reference_dn, \"invoiced\", invoiced)\n\n\tif item.reference_dt == \"Patient Appointment\":\n\t\tif frappe.db.get_value(\"Patient Appointment\", item.reference_dn, \"procedure_template\"):\n\t\t\tdt_from_appointment = \"Clinical Procedure\"\n\t\telse:\n\t\t\tdt_from_appointment = \"Patient Encounter\"\n\t\tmanage_doc_for_appointment(dt_from_appointment, item.reference_dn, invoiced)\n\n\telif item.reference_dt == \"Lab Prescription\":\n\t\tmanage_prescriptions(\n\t\t\tinvoiced, item.reference_dt, item.reference_dn, \"Lab Test\", \"lab_test_created\"\n\t\t)\n\n\telif item.reference_dt == \"Procedure Prescription\":\n\t\tmanage_prescriptions(\n\t\t\tinvoiced, item.reference_dt, item.reference_dn, \"Clinical Procedure\", \"procedure_created\"\n\t\t)\n\telif item.reference_dt in [\"Service Request\", \"Medication Request\"]:\n\t\t# if order is invoiced, set both order and service transaction as invoiced\n\t\thso = frappe.get_doc(item.reference_dt, item.reference_dn)\n\t\tif invoiced:\n\t\t\thso.update_invoice_details(item.qty)\n\t\telse:\n\t\t\thso.update_invoice_details(item.qty * -1)\n\n\t\t# service transaction linking to HSO\n\t\tif item.reference_dt == \"Service Request\":\n\t\t\ttemplate_map = {\n\t\t\t\t\"Clinical Procedure Template\": \"Clinical Procedure\",\n\t\t\t\t\"Therapy Type\": \"Therapy Session\",\n\t\t\t\t\"Lab Test Template\": \"Lab Test\"\n\t\t\t\t# 'Healthcare Service Unit': 'Inpatient Occupancy'\n\t\t\t}\n\n\ndef validate_invoiced_on_submit(item):\n\tif (\n\t\titem.reference_dt == \"Clinical Procedure\"\n\t\tand frappe.db.get_single_value(\"Healthcare Settings\", \"clinical_procedure_consumable_item\")\n\t\t== item.item_code\n\t):\n\t\tis_invoiced = frappe.db.get_value(item.reference_dt, item.reference_dn, \"consumption_invoiced\")\n\n\telif item.reference_dt in [\"Service Request\", \"Medication Request\"]:\n\t\tbilling_status = frappe.db.get_value(item.reference_dt, item.reference_dn, \"billing_status\")\n\t\tis_invoiced = True if billing_status == \"Invoiced\" else False\n\n\telse:\n\t\tis_invoiced = frappe.db.get_value(item.reference_dt, item.reference_dn, \"invoiced\")\n\tif is_invoiced:\n\t\tfrappe.throw(\n\t\t\t_(\"The item referenced by {0} - {1} is already invoiced\").format(\n\t\t\t\titem.reference_dt, item.reference_dn\n\t\t\t)\n\t\t)\n\n\ndef manage_prescriptions(invoiced, ref_dt, ref_dn, dt, created_check_field):\n\tcreated = frappe.db.get_value(ref_dt, ref_dn, created_check_field)\n\tif created:\n\t\t# Fetch the doc created for the prescription\n\t\tdoc_created = frappe.db.get_value(dt, {\"prescription\": ref_dn})\n\t\tfrappe.db.set_value(dt, doc_created, \"invoiced\", invoiced)\n\n\ndef manage_doc_for_appointment(dt_from_appointment, appointment, invoiced):\n\tdn_from_appointment = frappe.db.get_value(\n\t\tdt_from_appointment, filters={\"appointment\": appointment}\n\t)\n\tif dn_from_appointment:\n\t\tfrappe.db.set_value(dt_from_appointment, dn_from_appointment, \"invoiced\", invoiced)\n\n\n@frappe.whitelist()\ndef get_drugs_to_invoice(encounter):\n\tencounter = frappe.get_doc(\"Patient Encounter\", encounter)\n\tif encounter:\n\t\tpatient = frappe.get_doc(\"Patient\", encounter.patient)\n\t\tif patient:\n\t\t\tif patient.customer:\n\t\t\t\torders_to_invoice = []\n\t\t\t\tmedication_requests = frappe.get_list(\n\t\t\t\t\t\"Medication Request\",\n\t\t\t\t\tfields=[\"*\"],\n\t\t\t\t\tfilters={\n\t\t\t\t\t\t\"patient\": patient.name,\n\t\t\t\t\t\t\"order_group\": encounter.name,\n\t\t\t\t\t\t\"billing_status\": [\"in\", [\"Pending\", \"Partly Invoiced\"]],\n\t\t\t\t\t\t\"docstatus\": 1,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\tfor medication_request in medication_requests:\n\t\t\t\t\tis_billable = frappe.get_cached_value(\n\t\t\t\t\t\t\"Medication\", medication_request.medication, [\"is_billable\"]\n\t\t\t\t\t)\n\n\t\t\t\t\tdescription = \"\"\n\t\t\t\t\tif medication_request.dosage and medication_request.period:\n\t\t\t\t\t\tdescription = _(\"{0} for {1}\").format(medication_request.dosage, medication_request.period)\n\n\t\t\t\t\tif medication_request.medicaiton_item and is_billable:\n\t\t\t\t\t\tbillable_order_qty = medication_request.get(\"quantity\", 1) - medication_request.get(\n\t\t\t\t\t\t\t\"qty_invoiced\", 0\n\t\t\t\t\t\t)\n\t\t\t\t\t\tif medication_request.number_of_repeats_allowed:\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\tmedication_request.total_dispensable_quantity\n\t\t\t\t\t\t\t\t>= medication_request.quantity + medication_request.qty_invoiced\n\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\tbillable_order_qty = medication_request.get(\"quantity\", 1)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbillable_order_qty = (\n\t\t\t\t\t\t\t\t\tmedication_request.total_dispensable_quantity - medication_request.get(\"qty_invoiced\", 0)\n\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\torders_to_invoice.append(\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"reference_type\": \"Medication Request\",\n\t\t\t\t\t\t\t\t\"reference_name\": medication_request.name,\n\t\t\t\t\t\t\t\t\"drug_code\": medication_request.medicaiton_item,\n\t\t\t\t\t\t\t\t\"quantity\": billable_order_qty,\n\t\t\t\t\t\t\t\t\"description\": description,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\t\t\t\treturn orders_to_invoice\n\n\n@frappe.whitelist()\ndef get_children(doctype, parent=None, company=None, is_root=False):\n\tparent_fieldname = \"parent_\" + doctype.lower().replace(\" \", \"_\")\n\tfields = [\"name as value\", \"is_group as expandable\", \"lft\", \"rgt\"]\n\n\tfilters = [[\"ifnull(`{0}`,'')\".format(parent_fieldname), \"=\", \"\" if is_root else parent]]\n\n\tif is_root:\n\t\tfields += [\"service_unit_type\"] if doctype == \"Healthcare Service Unit\" else []\n\t\tfilters.append([\"company\", \"=\", company])\n\telse:\n\t\tfields += (\n\t\t\t[\"service_unit_type\", \"allow_appointments\", \"inpatient_occupancy\", \"occupancy_status\"]\n\t\t\tif doctype == \"Healthcare Service Unit\"\n\t\t\telse []\n\t\t)\n\t\tfields += [parent_fieldname + \" as parent\"]\n\n\tservice_units = frappe.get_list(doctype, fields=fields, filters=filters)\n\tfor each in service_units:\n\t\tif each[\"expandable\"] != 1 or each[\"value\"].startswith(\"All Healthcare Service Units\"):\n\t\t\tcontinue\n\n\t\tavailable_count = frappe.db.count(\n\t\t\t\"Healthcare Service Unit\",\n\t\t\tfilters={\"parent_healthcare_service_unit\": each[\"value\"], \"inpatient_occupancy\": 1},\n\t\t)\n\n\t\tif available_count > 0:\n\t\t\toccupied_count = frappe.db.count(\n\t\t\t\t\"Healthcare Service Unit\",\n\t\t\t\tfilters={\n\t\t\t\t\t\"parent_healthcare_service_unit\": each[\"value\"],\n\t\t\t\t\t\"inpatient_occupancy\": 1,\n\t\t\t\t\t\"occupancy_status\": \"Occupied\",\n\t\t\t\t},\n\t\t\t)\n\t\t\t# set occupancy status of group node\n\t\t\teach[\"occupied_of_available\"] = f\"{str(occupied_count)} Occupied of {str(available_count)}\"\n\n\treturn service_units\n\n\n@frappe.whitelist()\ndef get_patient_vitals(patient, from_date=None, to_date=None):\n\tif not patient:\n\t\treturn\n\n\tvitals = frappe.db.get_all(\n\t\t\"Vital Signs\",\n\t\tfilters={\"docstatus\": 1, \"patient\": patient},\n\t\torder_by=\"signs_date, signs_time\",\n\t\tfields=[\"*\"],\n\t)\n\n\tif len(vitals):\n\t\treturn vitals\n\treturn False\n\n\n@frappe.whitelist()\ndef render_docs_as_html(docs):\n\t# docs key value pair {doctype: docname}\n\tdocs_html = \"<div class='col-md-12 col-sm-12 text-muted'>\"\n\tfor doc in docs:\n\t\tdocs_html += render_doc_as_html(doc[\"doctype\"], doc[\"docname\"])[\"html\"] + \"<br/>\"\n\t\treturn {\"html\": docs_html}\n\n\n@frappe.whitelist()\ndef render_doc_as_html(doctype, docname, exclude_fields=None):\n\t\"\"\"\n\tRender document as HTML\n\t\"\"\"\n\texclude_fields = exclude_fields or []\n\tdoc = frappe.get_doc(doctype, docname)\n\tmeta = frappe.get_meta(doctype)\n\tdoc_html = section_html = section_label = html = \"\"\n\tsec_on = has_data = False\n\tcol_on = 0\n\n\tfor df in meta.fields:\n\t\t# on section break append previous section and html to doc html\n\t\tif df.fieldtype == \"Section Break\":\n\t\t\tif has_data and col_on and sec_on:\n\t\t\t\tdoc_html += section_html + html + \"</div>\"\n\n\t\t\telif has_data and not col_on and sec_on:\n\t\t\t\tdoc_html += \"\"\"\n\t\t\t\t\t<br>\n\t\t\t\t\t<div class='row'>\n\t\t\t\t\t\t<div class='col-md-12 col-sm-12'>\n\t\t\t\t\t\t\t<b>{0}</b>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div class='row'>\n\t\t\t\t\t\t<div class='col-md-12 col-sm-12'>\n\t\t\t\t\t\t\t{1} {2}\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\"\"\".format(\n\t\t\t\t\tsection_label, section_html, html\n\t\t\t\t)\n\n\t\t\t# close divs for columns\n\t\t\twhile col_on:\n\t\t\t\tdoc_html += \"</div>\"\n\t\t\t\tcol_on -= 1\n\n\t\t\tsec_on = True\n\t\t\thas_data = False\n\t\t\tcol_on = 0\n\t\t\tsection_html = html = \"\"\n\n\t\t\tif df.label:\n\t\t\t\tsection_label = df.label\n\t\t\tcontinue\n\n\t\t# on column break append html to section html or doc html\n\t\tif df.fieldtype == \"Column Break\":\n\t\t\tif sec_on and not col_on and has_data:\n\t\t\t\tsection_html += \"\"\"\n\t\t\t\t\t<br>\n\t\t\t\t\t<div class='row'>\n\t\t\t\t\t\t<div class='col-md-12 col-sm-12'>\n\t\t\t\t\t\t\t<b>{0}</b>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div class='row'>\n\t\t\t\t\t\t<div class='col-md-4 col-sm-4'>\n\t\t\t\t\t\t\t{1}\n\t\t\t\t\t\t</div>\n\t\t\t\t\"\"\".format(\n\t\t\t\t\tsection_label, html\n\t\t\t\t)\n\t\t\telif col_on == 1 and has_data:\n\t\t\t\tsection_html += \"<div class='col-md-4 col-sm-4'>\" + html + \"</div>\"\n\t\t\telif col_on > 1 and has_data:\n\t\t\t\tdoc_html += \"<div class='col-md-4 col-sm-4'>\" + html + \"</div>\"\n\t\t\telse:\n\t\t\t\tdoc_html += \"\"\"\n\t\t\t\t\t<div class='row'>\n\t\t\t\t\t\t<div class='col-md-12 col-sm-12'>\n\t\t\t\t\t\t\t{0}\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\"\"\".format(\n\t\t\t\t\thtml\n\t\t\t\t)\n\n\t\t\thtml = \"\"\n\t\t\tcol_on += 1\n\n\t\t\tif df.label:\n\t\t\t\thtml += \"<br>\" + df.label\n\t\t\tcontinue\n\n\t\t# on table iterate through items and create table\n\t\t# based on the in_list_view property\n\t\t# append to section html or doc html\n\t\tif df.fieldtype == \"Table\":\n\t\t\titems = doc.get(df.fieldname)\n\t\t\tif not items:\n\t\t\t\tcontinue\n\t\t\tchild_meta = frappe.get_meta(df.options)\n\n\t\t\tif not has_data:\n\t\t\t\thas_data = True\n\t\t\ttable_head = table_row = \"\"\n\t\t\tcreate_head = True\n\n\t\t\tfor item in items:\n\t\t\t\ttable_row += \"<tr>\"\n\t\t\t\tfor cdf in child_meta.fields:\n\t\t\t\t\tif cdf.in_list_view:\n\t\t\t\t\t\tif create_head:\n\t\t\t\t\t\t\ttable_head += \"<th class='text-muted'>\" + cdf.label + \"</th>\"\n\t\t\t\t\t\tif item.get(cdf.fieldname):\n\t\t\t\t\t\t\ttable_row += \"<td>\" + cstr(item.get(cdf.fieldname)) + \"</td>\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttable_row += \"<td></td>\"\n\n\t\t\t\tcreate_head = False\n\t\t\t\ttable_row += \"</tr>\"\n\n\t\t\tif sec_on:\n\t\t\t\tsection_html += \"\"\"\n\t\t\t\t\t<table class='table table-condensed bordered'>\n\t\t\t\t\t\t{0} {1}\n\t\t\t\t\t</table>\n\t\t\t\t\"\"\".format(\n\t\t\t\t\ttable_head, table_row\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\thtml += \"\"\"\n\t\t\t\t\t<table class='table table-condensed table-bordered'>\n\t\t\t\t\t\t{0} {1}\n\t\t\t\t\t</table>\n\t\t\t\t\"\"\".format(\n\t\t\t\t\ttable_head, table_row\n\t\t\t\t)\n\t\t\tcontinue\n\n\t\t# on any other field type add label and value to html\n\t\tif (\n\t\t\tnot df.hidden\n\t\t\tand not df.print_hide\n\t\t\tand doc.get(df.fieldname)\n\t\t\tand df.fieldname not in exclude_fields\n\t\t):\n\t\t\tformatted_value = format_value(doc.get(df.fieldname), meta.get_field(df.fieldname), doc)\n\t\t\thtml += \"<br>{0} : {1}\".format(df.label or df.fieldname, formatted_value)\n\n\t\t\tif not has_data:\n\t\t\t\thas_data = True\n\n\tif sec_on and col_on and has_data:\n\t\tdoc_html += section_html + html + \"</div></div>\"\n\telif sec_on and not col_on and has_data:\n\t\tdoc_html += \"\"\"\n\t\t\t<div class='col-md-12 col-sm-12'>\n\t\t\t\t<div class='col-md-12 col-sm-12'>\n\t\t\t\t\t{0} {1}\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t\"\"\".format(\n\t\t\tsection_html, html\n\t\t)\n\treturn {\"html\": doc_html}\n\n\ndef update_address_links(address, method):\n\t\"\"\"\n\tHook validate Address\n\tIf Patient is linked in Address, also link the associated Customer\n\t\"\"\"\n\tif \"Healthcare\" not in frappe.get_active_domains():\n\t\treturn\n\n\tpatient_links = list(filter(lambda link: link.get(\"link_doctype\") == \"Patient\", address.links))\n\n\tfor link in patient_links:\n\t\tcustomer = frappe.db.get_value(\"Patient\", link.get(\"link_name\"), \"customer\")\n\t\tif customer and not address.has_link(\"Customer\", customer):\n\t\t\taddress.append(\"links\", dict(link_doctype=\"Customer\", link_name=customer))\n\n\ndef update_patient_email_and_phone_numbers(contact, method):\n\t\"\"\"\n\tHook validate Contact\n\tUpdate linked Patients' primary mobile and phone numbers\n\t\"\"\"\n\tif \"Healthcare\" not in frappe.get_active_domains() or contact.flags.skip_patient_update:\n\t\treturn\n\n\tif contact.is_primary_contact and (contact.email_id or contact.mobile_no or contact.phone):\n\t\tpatient_links = list(filter(lambda link: link.get(\"link_doctype\") == \"Patient\", contact.links))\n\n\t\tfor link in patient_links:\n\t\t\tcontact_details = frappe.db.get_value(\n\t\t\t\t\"Patient\", link.get(\"link_name\"), [\"email\", \"mobile\", \"phone\"], as_dict=1\n\t\t\t)\n\t\t\tif contact.email_id and contact.email_id != contact_details.get(\"email\"):\n\t\t\t\tfrappe.db.set_value(\"Patient\", link.get(\"link_name\"), \"email\", contact.email_id)\n\t\t\tif contact.mobile_no and contact.mobile_no != contact_details.get(\"mobile\"):\n\t\t\t\tfrappe.db.set_value(\"Patient\", link.get(\"link_name\"), \"mobile\", contact.mobile_no)\n\t\t\tif contact.phone and contact.phone != contact_details.get(\"phone\"):\n\t\t\t\tfrappe.db.set_value(\"Patient\", link.get(\"link_name\"), \"phone\", contact.phone)\n\n\ndef before_tests():\n\t# complete setup if missing\n\tfrom frappe.desk.page.setup_wizard.setup_wizard import setup_complete\n\n\tcurrent_year = frappe.utils.now_datetime().year\n\n\tif not frappe.get_list(\"Company\"):\n\t\tsetup_complete(\n\t\t\t{\n\t\t\t\t\"currency\": \"INR\",\n\t\t\t\t\"full_name\": \"Test User\",\n\t\t\t\t\"company_name\": \"Frappe Care LLC\",\n\t\t\t\t\"timezone\": \"America/New_York\",\n\t\t\t\t\"company_abbr\": \"WP\",\n\t\t\t\t\"industry\": \"Healthcare\",\n\t\t\t\t\"country\": \"United States\",\n\t\t\t\t\"fy_start_date\": f\"{current_year}-01-01\",\n\t\t\t\t\"fy_end_date\": f\"{current_year}-12-31\",\n\t\t\t\t\"language\": \"english\",\n\t\t\t\t\"company_tagline\": \"Testing\",\n\t\t\t\t\"email\": \"test@erpnext.com\",\n\t\t\t\t\"password\": \"test\",\n\t\t\t\t\"chart_of_accounts\": \"Standard\",\n\t\t\t\t\"domains\": [\"Healthcare\"],\n\t\t\t}\n\t\t)\n\n\t\tsetup_healthcare()\n\n\ndef create_healthcare_service_unit_tree_root(doc, method=None):\n\trecord = [\n\t\t{\n\t\t\t\"doctype\": \"Healthcare Service Unit\",\n\t\t\t\"healthcare_service_unit_name\": \"All Healthcare Service Units\",\n\t\t\t\"is_group\": 1,\n\t\t\t\"company\": doc.name,\n\t\t}\n\t]\n\tinsert_record(record)\n\n\ndef validate_nursing_tasks(document):\n\tif not frappe.db.get_single_value(\"Healthcare Settings\", \"validate_nursing_checklists\"):\n\t\treturn True\n\n\tfilters = {\n\t\t\"reference_name\": document.name,\n\t\t\"mandatory\": 1,\n\t\t\"status\": [\"not in\", [\"Completed\", \"Cancelled\"]],\n\t}\n\ttasks = frappe.get_all(\"Nursing Task\", filters=filters)\n\tif not tasks:\n\t\treturn True\n\n\tfrappe.throw(\n\t\t_(\"Please complete linked Nursing Tasks before submission: {}\").format(\n\t\t\t\", \".join(get_link_to_form(\"Nursing Task\", task.name) for task in tasks)\n\t\t)\n\t)\n\n\n@frappe.whitelist()\ndef get_medical_codes(template_dt, template_dn, code_standard=None):\n\t\"\"\"returns codification table from templates\"\"\"\n\tfilters = {\"parent\": template_dn, \"parenttype\": template_dt}\n\n\tif code_standard:\n\t\tfilters[\"medical_code_standard\"] = code_standard\n\n\treturn frappe.db.get_all(\n\t\t\"Codification Table\",\n\t\tfilters=filters,\n\t\tfields=[\n\t\t\t\"medical_code\",\n\t\t\t\"code\",\n\t\t\t\"system\",\n\t\t\t\"description\",\n\t\t\t\"medical_code_standard\",\n\t\t],\n\t)\n\n\ndef company_on_trash(doc, method):\n\tfor su in frappe.get_all(\"Healthcare Service Unit\", {\"company\": doc.name}):\n\t\tservice_unit_doc = frappe.get_doc(\"Healthcare Service Unit\", su.get(\"name\"))\n\t\tservice_unit_doc.flags.on_trash_company = True\n\t\tservice_unit_doc.delete()\n\n\ndef create_sample_collection_and_observation(doc):\n\tmeta = frappe.get_meta(\"Sales Invoice Item\", cached=True)\n\tdiag_report_required = False\n\tdata = []\n\tfor item in doc.items:\n\t\t# to set patient in item table if not set\n\t\tif meta.has_field(\"patient\") and not item.patient:\n\t\t\titem.patient = doc.patient\n\n\t\t# ignore if already created from service request\n\t\tif item.get(\"reference_dt\") == \"Service Request\" and item.get(\"reference_dn\"):\n\t\t\tif frappe.db.exists(\n\t\t\t\t\"Observation Sample Collection\", {\"service_request\": item.get(\"reference_dn\")}\n\t\t\t) or frappe.db.exists(\n\t\t\t\t\"Sample Collection\", {\"service_request\": item.get(\"reference_dn\")}\n\t\t\t):\n\t\t\t\tcontinue\n\n\t\ttemplate_id = frappe.db.exists(\"Observation Template\", {\"item\": item.item_code})\n\t\tif template_id:\n\t\t\ttemp_dict = {}\n\t\t\ttemp_dict[\"name\"] = template_id\n\t\t\tif meta.has_field(\"patient\") and item.get(\"patient\"):\n\t\t\t\ttemp_dict[\"patient\"] = item.get(\"patient\")\n\t\t\t\ttemp_dict[\"child\"] = item.get(\"name\")\n\t\t\tdata.append(temp_dict)\n\n\tout_data = []\n\tfor d in data:\n\t\tobservation_template = frappe.get_value(\n\t\t\t\"Observation Template\",\n\t\t\td.get(\"name\"),\n\t\t\t[\n\t\t\t\t\"sample_type\",\n\t\t\t\t\"sample\",\n\t\t\t\t\"medical_department\",\n\t\t\t\t\"container_closure_color\",\n\t\t\t\t\"name\",\n\t\t\t\t\"sample_qty\",\n\t\t\t\t\"has_component\",\n\t\t\t\t\"sample_collection_required\",\n\t\t\t],\n\t\t\tas_dict=True,\n\t\t)\n\t\tif observation_template:\n\t\t\tobservation_template[\"patient\"] = d.get(\"patient\")\n\t\t\tobservation_template[\"child\"] = d.get(\"child\")\n\t\t\tout_data.append(observation_template)\n\tif not meta.has_field(\"patient\"):\n\t\tsample_collection = create_sample_collection(doc, doc.patient)\n\telse:\n\t\tgrouped = {}\n\t\tfor grp in out_data:\n\t\t\tgrouped.setdefault(grp.patient, []).append(grp)\n\t\tif grouped:\n\t\t\tout_data = grouped\n\n\tfor grp in out_data:\n\t\tpatient = doc.patient\n\t\tif meta.has_field(\"patient\") and grp:\n\t\t\tpatient = grp\n\t\tif meta.has_field(\"patient\"):\n\t\t\tsample_collection = create_sample_collection(doc, patient)\n\t\t\tfor obs in out_data[grp]:\n\t\t\t\t(sample_collection, diag_report_required,) = insert_observation_and_sample_collection(\n\t\t\t\t\tdoc, patient, obs, sample_collection, obs.get(\"child\")\n\t\t\t\t)\n\t\t\tif sample_collection and len(sample_collection.get(\"observation_sample_collection\")) > 0:\n\t\t\t\tsample_collection.save(ignore_permissions=True)\n\n\t\t\tif diag_report_required:\n\t\t\t\tinsert_diagnostic_report(doc, patient, sample_collection.name)\n\t\telse:\n\t\t\tsample_collection, diag_report_required = insert_observation_and_sample_collection(\n\t\t\t\tdoc, patient, grp, sample_collection\n\t\t\t)\n\n\tif not meta.has_field(\"patient\"):\n\t\tif sample_collection and len(sample_collection.get(\"observation_sample_collection\")) > 0:\n\t\t\tsample_collection.save(ignore_permissions=True)\n\n\t\tif diag_report_required:\n\t\t\tinsert_diagnostic_report(doc, patient, sample_collection.name)\n\n\ndef create_sample_collection(doc, patient):\n\tpatient = frappe.get_doc(\"Patient\", patient)\n\tsample_collection = frappe.new_doc(\"Sample Collection\")\n\tsample_collection.patient = patient.name\n\tsample_collection.patient_age = patient.get_age()\n\tsample_collection.patient_sex = patient.sex\n\tsample_collection.company = doc.company\n\tsample_collection.referring_practitioner = doc.ref_practitioner\n\tsample_collection.reference_doc = doc.doctype\n\tsample_collection.reference_name = doc.name\n\treturn sample_collection\n\n\ndef insert_diagnostic_report(doc, patient, sample_collection=None):\n\tdiagnostic_report = frappe.new_doc(\"Diagnostic Report\")\n\tdiagnostic_report.company = doc.company\n\tdiagnostic_report.patient = patient\n\tdiagnostic_report.ref_doctype = doc.doctype\n\tdiagnostic_report.docname = doc.name\n\tdiagnostic_report.practitioner = doc.ref_practitioner\n\tdiagnostic_report.sample_collection = sample_collection\n\tdiagnostic_report.save(ignore_permissions=True)\n\n\ndef insert_observation_and_sample_collection(doc, patient, grp, sample_collection, child=None):\n\tdiag_report_required = False\n\tif grp.get(\"has_component\"):\n\t\tdiag_report_required = True\n\t\t# parent observation\n\t\tparent_observation = add_observation(\n\t\t\tpatient=patient,\n\t\t\ttemplate=grp.get(\"name\"),\n\t\t\tpractitioner=doc.ref_practitioner,\n\t\t\tinvoice=doc.name,\n\t\t\tchild=child if child else \"\",\n\t\t)\n\n\t\tsample_reqd_component_obs, non_sample_reqd_component_obs = get_observation_template_details(\n\t\t\tgrp.get(\"name\")\n\t\t)\n\t\t# create observation for non sample_collection_reqd grouped templates\n\n\t\tif len(non_sample_reqd_component_obs) > 0:\n\t\t\tfor comp in non_sample_reqd_component_obs:\n\t\t\t\tadd_observation(\n\t\t\t\t\tpatient=patient,\n\t\t\t\t\ttemplate=comp,\n\t\t\t\t\tpractitioner=doc.ref_practitioner,\n\t\t\t\t\tparent=parent_observation,\n\t\t\t\t\tinvoice=doc.name,\n\t\t\t\t\tchild=child if child else \"\",\n\t\t\t\t)\n\t\t# create sample_colleciton child row for sample_collection_reqd grouped templates\n\t\tif len(sample_reqd_component_obs) > 0:\n\t\t\tsample_collection.append(\n\t\t\t\t\"observation_sample_collection\",\n\t\t\t\t{\n\t\t\t\t\t\"observation_template\": grp.get(\"name\"),\n\t\t\t\t\t\"container_closure_color\": grp.get(\"color\"),\n\t\t\t\t\t\"sample\": grp.get(\"sample\"),\n\t\t\t\t\t\"sample_type\": grp.get(\"sample_type\"),\n\t\t\t\t\t\"component_observation_parent\": parent_observation,\n\t\t\t\t\t\"reference_child\": child if child else \"\",\n\t\t\t\t},\n\t\t\t)\n\n\telse:\n\t\tdiag_report_required = True\n\t\t# create observation for non sample_collection_reqd individual templates\n\t\tif not grp.get(\"sample_collection_required\"):\n\t\t\tadd_observation(\n\t\t\t\tpatient=patient,\n\t\t\t\ttemplate=grp.get(\"name\"),\n\t\t\t\tpractitioner=doc.ref_practitioner,\n\t\t\t\tinvoice=doc.name,\n\t\t\t\tchild=child if child else \"\",\n\t\t\t)\n\t\telse:\n\t\t\t# create sample_colleciton child row for sample_collection_reqd individual templates\n\t\t\tsample_collection.append(\n\t\t\t\t\"observation_sample_collection\",\n\t\t\t\t{\n\t\t\t\t\t\"observation_template\": grp.get(\"name\"),\n\t\t\t\t\t\"container_closure_color\": grp.get(\"color\"),\n\t\t\t\t\t\"sample\": grp.get(\"sample\"),\n\t\t\t\t\t\"sample_type\": grp.get(\"sample_type\"),\n\t\t\t\t\t\"reference_child\": child if child else \"\",\n\t\t\t\t},\n\t\t\t)\n\treturn sample_collection, diag_report_required\n\n\n@frappe.whitelist()\ndef generate_barcodes(in_val):\n\tfrom io import BytesIO\n\n\tfrom barcode import Code128\n\tfrom barcode.writer import ImageWriter\n\n\tstream = BytesIO()\n\tCode128(str(in_val), writer=ImageWriter()).write(\n\t\tstream,\n\t\t{\n\t\t\t\"module_height\": 3,\n\t\t\t\"text_distance\": 0.9,\n\t\t\t\"write_text\": False,\n\t\t},\n\t)\n\tbarcode_base64 = base64.b64encode(stream.getbuffer()).decode()\n\tstream.close()\n\n\treturn barcode_base64\n", "repo_name": "frappe/health", "sub_path": "healthcare/healthcare/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 38164, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 190, "dataset": "github-code", "pt": "21", "api": [{"api_name": "frappe.get_doc", "line_number": 25, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 23, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 43, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 43, "usage_type": "attribute"}, {"api_name": "frappe._", "line_number": 44, "usage_type": "call"}, {"api_name": "frappe.throw", "line_number": 46, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 46, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 51, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 66, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 66, "usage_type": "attribute"}, {"api_name": "frappe.db.get_single_value", "line_number": 78, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 78, "usage_type": "attribute"}, {"api_name": "frappe.db.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 80, "usage_type": "attribute"}, {"api_name": "healthcare.healthcare.doctype.healthcare_settings.healthcare_settings.get_income_account", "line_number": 89, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 107, "usage_type": "call"}, {"api_name": "frappe.db.get_single_value", "line_number": 119, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 119, "usage_type": "attribute"}, {"api_name": "healthcare.healthcare.doctype.healthcare_settings.healthcare_settings.get_income_account", "line_number": 127, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 144, "usage_type": "call"}, {"api_name": "frappe.get_cached_value", "line_number": 156, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 169, "usage_type": "call"}, {"api_name": "frappe.get_cached_value", "line_number": 181, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 194, "usage_type": "call"}, {"api_name": "frappe.get_cached_value", "line_number": 207, "usage_type": "call"}, {"api_name": "frappe.db.get_single_value", "line_number": 222, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 222, "usage_type": "attribute"}, {"api_name": "frappe._", "line_number": 226, "usage_type": "call"}, {"api_name": "frappe.utils.get_link_to_form", "line_number": 227, "usage_type": "call"}, {"api_name": "frappe.throw", "line_number": 230, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 230, "usage_type": "call"}, {"api_name": "frappe.db.sql", "line_number": 247, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 247, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 265, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 265, "usage_type": "attribute"}, {"api_name": "frappe.get_cached_doc", "line_number": 268, "usage_type": "call"}, {"api_name": "frappe.utils.flt", "line_number": 270, "usage_type": "call"}, {"api_name": "frappe.utils.time_diff_in_hours", "line_number": 271, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 276, "usage_type": "call"}, {"api_name": "frappe.utils.rounded", "line_number": 279, "usage_type": "call"}, {"api_name": "frappe.utils.rounded", "line_number": 281, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 298, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 314, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 314, "usage_type": "attribute"}, {"api_name": "frappe.db.get_all", "line_number": 325, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 325, "usage_type": "attribute"}, {"api_name": "frappe.get_list", "line_number": 330, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 344, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 344, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 351, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 351, "usage_type": "attribute"}, {"api_name": "frappe.get_list", "line_number": 360, "usage_type": "call"}, {"api_name": "frappe.get_cached_value", "line_number": 371, "usage_type": "call"}, {"api_name": "frappe.db.get_values", "line_number": 374, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 374, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 381, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 381, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 400, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 401, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 397, "usage_type": "call"}, {"api_name": "healthcare.healthcare.doctype.appointment_type.appointment_type.get_billing_details", "line_number": 443, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 460, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 463, "usage_type": "call"}, {"api_name": "frappe.throw", "line_number": 467, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 467, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 471, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 473, "usage_type": "call"}, {"api_name": "frappe.throw", "line_number": 477, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 477, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 481, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 483, "usage_type": "call"}, {"api_name": "frappe.throw", "line_number": 487, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 487, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 500, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 500, "usage_type": "attribute"}, {"api_name": "frappe.db.get_single_value", "line_number": 511, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 511, "usage_type": "attribute"}, {"api_name": "frappe.db.get_single_value", "line_number": 513, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 513, "usage_type": "attribute"}, {"api_name": "frappe.db.get_single_value", "line_number": 535, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 535, "usage_type": "attribute"}, {"api_name": "frappe.db.get_single_value", "line_number": 541, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 541, "usage_type": "attribute"}, {"api_name": "healthcare.healthcare.doctype.lab_test.lab_test.create_multiple", "line_number": 542, "usage_type": "call"}, {"api_name": "frappe.db.get_single_value", "line_number": 545, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 545, "usage_type": "attribute"}, {"api_name": "frappe.db.get_single_value", "line_number": 546, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 546, "usage_type": "attribute"}, {"api_name": "frappe.db.exists", "line_number": 551, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 551, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 553, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 553, "usage_type": "attribute"}, {"api_name": "frappe.db.get_single_value", "line_number": 563, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 563, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 567, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 567, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 569, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 569, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 572, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 572, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 575, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 575, "usage_type": "attribute"}, {"api_name": "frappe.get_doc", "line_number": 592, "usage_type": "call"}, {"api_name": "frappe.db.get_single_value", "line_number": 611, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 611, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 614, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 614, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 617, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 617, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 621, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 621, "usage_type": "attribute"}, {"api_name": "frappe.throw", "line_number": 623, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 624, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 631, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 631, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 634, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 634, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 635, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 635, "usage_type": "attribute"}, {"api_name": "frappe.db.get_value", "line_number": 639, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 639, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 643, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 643, "usage_type": "attribute"}, {"api_name": "frappe.get_doc", "line_number": 648, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 650, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 654, "usage_type": "call"}, {"api_name": "frappe.get_cached_value", "line_number": 665, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 671, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 646, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 718, "usage_type": "call"}, {"api_name": "frappe.db.count", "line_number": 723, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 723, "usage_type": "attribute"}, {"api_name": "frappe.db.count", "line_number": 729, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 729, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 700, "usage_type": "call"}, {"api_name": "frappe.db.get_all", "line_number": 748, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 748, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 743, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 760, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 775, "usage_type": "call"}, {"api_name": "frappe.get_meta", "line_number": 776, "usage_type": "call"}, {"api_name": "frappe.get_meta", "line_number": 864, "usage_type": "call"}, {"api_name": "frappe.utils.cstr", "line_number": 878, "usage_type": "call"}, {"api_name": "frappe.utils.formatters.format_value", "line_number": 910, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 769, "usage_type": "call"}, {"api_name": "frappe.get_active_domains", "line_number": 936, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 942, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 942, "usage_type": "attribute"}, {"api_name": "frappe.get_active_domains", "line_number": 952, "usage_type": "call"}, {"api_name": "frappe.db.get_value", "line_number": 959, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 959, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 963, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 963, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 965, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 965, "usage_type": "attribute"}, {"api_name": "frappe.db.set_value", "line_number": 967, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 967, "usage_type": "attribute"}, {"api_name": "frappe.utils.now_datetime", "line_number": 974, "usage_type": "call"}, {"api_name": "frappe.utils", "line_number": 974, "usage_type": "attribute"}, {"api_name": "frappe.get_list", "line_number": 976, "usage_type": "call"}, {"api_name": "frappe.desk.page.setup_wizard.setup_wizard.setup_complete", "line_number": 977, "usage_type": "call"}, {"api_name": "healthcare.setup.setup_healthcare", "line_number": 997, "usage_type": "call"}, {"api_name": "erpnext.setup.utils.insert_record", "line_number": 1009, "usage_type": "call"}, {"api_name": "frappe.db.get_single_value", "line_number": 1013, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 1013, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 1021, "usage_type": "call"}, {"api_name": "frappe.throw", "line_number": 1025, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 1026, "usage_type": "call"}, {"api_name": "frappe.utils.get_link_to_form", "line_number": 1027, "usage_type": "call"}, {"api_name": "frappe.db.get_all", "line_number": 1040, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 1032, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 1054, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 1055, "usage_type": "call"}, {"api_name": "frappe.get_meta", "line_number": 1061, "usage_type": "call"}, {"api_name": "frappe.db.exists", "line_number": 1071, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 1071, "usage_type": "attribute"}, {"api_name": "frappe.db.exists", "line_number": 1073, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 1073, "usage_type": "attribute"}, {"api_name": "frappe.db.exists", "line_number": 1078, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 1078, "usage_type": "attribute"}, {"api_name": "frappe.get_value", "line_number": 1089, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 1146, "usage_type": "call"}, {"api_name": "frappe.new_doc", "line_number": 1147, "usage_type": "call"}, {"api_name": "frappe.new_doc", "line_number": 1159, "usage_type": "call"}, {"api_name": "healthcare.healthcare.doctype.observation.observation.add_observation", "line_number": 1174, "usage_type": "call"}, {"api_name": "healthcare.healthcare.doctype.observation_template.observation_template.get_observation_template_details", "line_number": 1182, "usage_type": "call"}, {"api_name": "healthcare.healthcare.doctype.observation.observation.add_observation", "line_number": 1189, "usage_type": "call"}, {"api_name": "healthcare.healthcare.doctype.observation.observation.add_observation", "line_number": 1215, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 1244, "usage_type": "call"}, {"api_name": "barcode.Code128", "line_number": 1245, "usage_type": "call"}, {"api_name": "barcode.writer.ImageWriter", "line_number": 1245, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 1253, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 1237, "usage_type": "call"}]} +{"seq_id": "39134166038", "text": "import sys\nif \"pytest\" in sys.modules:\n import tests.testing_config as config\nelse:\n import config\nfrom constants import *\nfrom itertools import pairwise\n\n\nclass TapeSquare:\n \"\"\"\n represents a single square on the tape of a Turing machine\n basically implemented as a linked list node\n \"\"\"\n symbol_dict = config.SYMBOL_DICT\n\n def __init__(self, data=None, prev=None, nxt=None):\n \"\"\"\n previous : another TapeSquare\n reference to the next TS to the left\n next: another TapeSquare\n reference to the next TS to the right\n data: string\n symbol stored on the TSquare\n \"\"\"\n # setting data to symbol_dict[0] at the header doesn't re-evaluate the\n # argument if the dict changes\n if data is None:\n data = TapeSquare.symbol_dict[0]\n\n self.prev = prev\n self.next = nxt\n self.data = data\n\n def __str__(self):\n return \"(prev: {}, next: {}, data: {})\".format(id(self.prev),\n id(self.next),\n self.data)\n\n\nclass Tape:\n \"\"\"\n represents a Tape object\n\n This is essentially a neutered doubly-linked list, in the spirit of Turing!\n It also holds the current square on the tape, also in the spirit of Turing!\n \"\"\"\n\n def __init__(self, lst, pos):\n \"\"\"\n initializes a totally blank tape\n\n current: TapeSquare\n holds the current TapeSquare the TM is processing\n\n Fields:\n current: TapeSquare on the tape\n \"\"\"\n\n if not lst:\n self.current = TapeSquare()\n return\n\n if pos not in range(0, len(lst)):\n raise ValueError(\"Position is out of bounds of list (index starts at 0)\")\n\n tape_lst = [TapeSquare(val) for val in lst]\n\n for back, front in pairwise(tape_lst):\n back.next = front\n front.prev = back\n\n self.current = tape_lst[pos]\n\n def get_symbol(self):\n \"\"\"\n getter method for symbol on current TapeSquare\n purely for typing convenience\n \"\"\"\n\n return self.current.data\n\n def set_symbol(self, sym):\n \"\"\"\n setter method for symbol on current TapeSquare\n purely for typing convenience\n \"\"\"\n\n self.current.data = sym\n\n def add_left(self):\n \"\"\"\n adds a new TapeSquare to the left of the passed square\n can assume that this is also the leftmost square\n\n parent: TapeSquare we are attaching this new TS to on the left\n \"\"\"\n\n new_sqr = TapeSquare(nxt=self.current)\n self.current.prev = new_sqr\n\n def add_right(self):\n \"\"\"\n adds a new TapeSquare to the right of the passed square\n can assume that this is also the rightmost square\n\n parent: TapeSquare we are attaching this new TS to on the right\n \"\"\"\n\n new_sqr = TapeSquare(prev=self.current)\n self.current.next = new_sqr\n\n def move_left(self):\n \"\"\"\n moves the current TapeSquare left one square\n also handles if this is the leftmost square\n \"\"\"\n\n if self.current.prev is None:\n self.add_left()\n self.current = self.current.prev\n\n def move_right(self):\n \"\"\"\n moves the current TapeSquare right one square\n also handles if this is the rightmost square\n \"\"\"\n\n if self.current.next is None:\n self.add_right()\n self.current = self.current.next\n\n def move(self, instruct):\n \"\"\"\n moves the current square according to instruction passed\n\n instruct: string\n either \"L\", \"R\", or \"N\"\n will move left, right, or not at all accordingly\n \"\"\"\n\n if instruct == Move.STAY:\n pass\n elif instruct == Move.LEFT:\n self.move_left()\n elif instruct == Move.RIGHT:\n self.move_right()\n else:\n raise RuntimeError('Movement command is not \"N\", \"L\", or \"R\"')\n\n def square_check(self, sym):\n \"\"\"\n checks if current square symbol is same as passed symbol\n returns a Boolean\n symbol: string\n symbol we are checking\n \"\"\"\n\n return self.current.data == sym\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n \"\"\"\n creates and returns string presentation of entire Tape\n current square is indictated by underline under symbol\n \"\"\"\n\n to_right = []\n cur = self.current.next\n while cur is not None:\n to_right.append(f\"[{cur.data}]\")\n cur = cur.next\n\n to_left = []\n cur = self.current.prev\n while cur is not None:\n to_left.append(f\"[{cur.data}]\")\n cur = cur.prev\n\n right_str = \"\".join(to_right)\n left_str = \"\".join(to_left[::-1])\n\n # return (left_str +\n # \"\\033[4m\" +\n # f\"[{self.current.data}]\" +\n # \"\\033[0m\" +\n # right_str)\n\n return \"...]\" + left_str + f\"<<[{self.current.data}]>>\" + right_str + \"[...\"\n\n def __eq__(self, other):\n if not isinstance(other, Tape):\n return False\n\n # start checking if ahead list matches\n cur_this = self.current\n cur_other = other.current\n\n while cur_this and cur_other:\n if cur_this.data != cur_other.data:\n return False\n\n cur_this, cur_other = cur_this.next, cur_other.next\n\n if cur_this or cur_other: # both must be None\n return False\n\n # start checking if before list matches\n cur_this = self.current\n cur_other = other.current\n\n while cur_this and cur_other:\n if cur_this.data != cur_other.data:\n return False\n\n cur_this, cur_other = cur_this.prev, cur_other.prev\n\n if cur_this or cur_other:\n return False\n\n return True\n", "repo_name": "kh9sd/UniversalTuringMachine", "sub_path": "tape.py", "file_name": "tape.py", "file_ext": "py", "file_size_in_byte": 6056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.modules", "line_number": 2, "usage_type": "attribute"}, {"api_name": "config.SYMBOL_DICT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "itertools.pairwise", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "9796004013", "text": "import typing\n\n\nclass Field:\n def __init__(\n self,\n verbose_name: typing.AnyStr = None,\n data_type: typing.Any = \"string\",\n required: bool = True,\n default: typing.Any = None,\n multi: bool = False,\n ):\n self.name = None\n self.fullname = None\n self.internal_name = None\n self.verbose_name: typing.AnyStr = verbose_name\n self.data_type: typing.AnyStr = data_type\n self.required: bool = required\n self.default: typing.Any = default\n self.multi: bool = multi\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n value = getattr(instance, self.internal_name, None)\n value = value if value else self.default\n return value\n\n def __set__(self, instance, value):\n setattr(instance, self.internal_name, value)\n", "repo_name": "tinylambda/keep", "sub_path": "blueprint/field.py", "file_name": "field.py", "file_ext": "py", "file_size_in_byte": 877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.AnyStr", "line_number": 7, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 8, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 10, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "23926356034", "text": "\"\"\"[Git](https://git-scm.com/): update all, extract subtree, rewrite history, ...\"\"\"\nfrom collections import defaultdict\nfrom configparser import ConfigParser\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nfrom pathlib import Path\n\nimport typer\nfrom invoke import Context, Exit, UnexpectedExit, task\n\nfrom conjuring.colors import COLOR_LIGHT_RED, COLOR_NONE\nfrom conjuring.grimoire import (\n REGEX_JIRA,\n print_error,\n print_success,\n run_command,\n run_lines,\n run_multiple,\n run_stdout,\n run_with_fzf,\n)\nfrom conjuring.visibility import MagicTask, ShouldDisplayTasks, is_git_repo\n\nSHOULD_PREFIX = True\nshould_display_tasks: ShouldDisplayTasks = is_git_repo\nGLOBAL_GITCONFIG_PATH = Path(\"~/.gitconfig\").expanduser()\n\n\n@lru_cache\ndef global_config() -> ConfigParser:\n \"\"\"Global Git configuration.\"\"\"\n config = ConfigParser()\n config.read(GLOBAL_GITCONFIG_PATH)\n return config\n\n\nclass Git:\n \"\"\"Git helpers.\"\"\"\n\n # Use \"tail +2\" to remove the blank line at the top\n SHOW_ALL_FILE_HISTORY = 'git log --pretty=\"format:\" --name-only | sort -u | tail +2'\n\n def __init__(self, context: Context) -> None:\n self.context = context\n\n def current_branch(self) -> str:\n \"\"\"Return the current branch name.\"\"\"\n return run_stdout(self.context, \"git branch --show-current\")\n\n def default_branch(self) -> str:\n \"\"\"Return the default branch name (master/main/develop/development).\"\"\"\n return run_stdout(\n self.context,\n \"git branch -a | rg -o -e /master -e /develop.+ -e /main | sort -u | cut -b 2- | head -1\",\n )\n\n def checkout(self, *branches: str) -> str:\n \"\"\"Try checking out the specified branches in order.\"\"\"\n for branch in branches:\n try:\n self.context.run(f\"git checkout {branch}\")\n except UnexpectedExit:\n pass\n else:\n return branch\n return \"\"\n\n @property\n def github_username(self) -> str:\n \"\"\"The GitHub username configured in the global settings.\"\"\"\n return global_config()[\"github\"][\"user\"]\n\n def choose_local_branch(self, branch: str) -> str:\n \"\"\"Choose a local branch.\"\"\"\n return run_with_fzf(self.context, \"git branch --list | rg -v develop | cut -b 3-\", query=branch)\n\n\n@dataclass(frozen=True)\nclass PrefixBranch:\n \"\"\"Tuple of prefix and branch name.\"\"\"\n\n prefix: str\n branch: str\n\n\n@task(klass=MagicTask)\ndef update_all(c: Context, group: str = \"\") -> None:\n \"\"\"Run gita super to update and clean branches.\"\"\"\n parts = [\"gita\", \"super\"]\n if group:\n parts.append(group)\n gita_super = \" \".join(parts)\n c.run(f\"{gita_super} up && {gita_super} delete-merged-branches\")\n\n\n@task\ndef switch_url_to(c: Context, remote: str = \"origin\", https: bool = False) -> None:\n \"\"\"Set an SSH or HTTPS URL for a remote.\"\"\"\n regex = r\"'git@(.+\\.com):(.+/.+)\\.git\\s'\" if https else r\"'/([^/]+\\.com)/([^/]+/.+)\\s\\('\"\n replace = \"'$1/$2'\" if https else \"'$1:$2'\"\n\n result = c.run(f\"git remote -v | rg {remote} | head -1 | rg -o {regex} -r {replace}\", warn=True, pty=False)\n match = result.stdout.strip()\n if not match:\n typer.echo(f\"{COLOR_LIGHT_RED}Match not found{COLOR_NONE}\")\n else:\n repo = f\"https://{match}\" if https else f\"git@{match}\"\n if not repo.endswith(\".git\"):\n repo += \".git\"\n c.run(f\"git remote set-url {remote} {repo}\")\n\n c.run(\"git remote -v\")\n\n\n@task(\n help={\n \"new_project_dir\": \"Dir of the project to be created. The dir might exist or not\",\n \"reset\": \"Remove the new dir and start over\",\n \"keep\": \"Keep branches and remote after the extracting is done\",\n },\n)\ndef extract_subtree(c: Context, new_project_dir: str, reset: bool = False, keep: bool = False) -> None:\n \"\"\"Extract files from subdirectories of the current Git repo to another repo, using git subtree.\n\n The files will be moved to the root of the new repo.\n\n Solutions adapted from:\n - https://serebrov.github.io/html/2021-09-13-git-move-history-to-another-repository.html\n - https://stackoverflow.com/questions/25574407/git-subtree-split-two-directories/58253979#58253979\n \"\"\"\n new_project_path: Path = Path(new_project_dir).expanduser().absolute()\n if reset:\n c.run(f\"rm -rf {new_project_path}\")\n\n new_project_path.mkdir(parents=False, exist_ok=True)\n old_project_path = Path.cwd()\n\n all_files = set(run_lines(c, Git.SHOW_ALL_FILE_HISTORY, dry=False))\n chosen_files = set(\n run_with_fzf(\n c,\n Git.SHOW_ALL_FILE_HISTORY,\n dry=False,\n header=\"Use TAB to choose the files you want to KEEP\",\n multi=True,\n preview=\"test -f {} && head -20 {} || echo FILE NOT FOUND, IT EXISTS ONLY IN GIT HISTORY\",\n ),\n )\n sub_dirs = {part.rsplit(\"/\", 1)[0] for part in chosen_files}\n obliterate = set(all_files.difference(chosen_files))\n\n first_date = run_stdout(c, 'git log --format=\"%cI\" --root | sort -u | head -1')\n\n prefixes: list[str] = []\n for sub_dir in sorted(sub_dirs):\n absolute_subdir = Path(sub_dir).expanduser().absolute()\n # Add slash to the end\n prefixes.append(str(absolute_subdir.relative_to(Path.cwd())).rstrip(\"/\") + \"/\")\n\n with c.cd(new_project_dir):\n run_multiple(\n c,\n \"git init\",\n \"touch README.md\",\n \"git add README.md\",\n f'git commit -m \"chore: first commit\" --date {first_date}',\n f\"git remote add -f upstream {old_project_path}\",\n \"git checkout -b upstream_master upstream/master\",\n pty=False,\n )\n pairs: set[PrefixBranch] = set()\n for prefix in prefixes:\n if not Path(prefix).exists():\n print_error(f\"Skipping non-existent prefix {prefix}...\")\n continue\n\n clean = prefix.strip(\" /\").replace(\"/\", \"_\")\n branch = f\"upstream_subtree_{clean}\"\n local_obliterate = {f[len(prefix) :] for f in obliterate if f.startswith(prefix)}\n pairs.add(PrefixBranch(prefix, branch))\n\n run_multiple(\n c,\n \"git checkout upstream_master\",\n f\"git subtree split --prefix={prefix} -b {branch}\",\n f\"git checkout {branch}\",\n \"git obliterate \" + \" \".join(sorted(local_obliterate)) if obliterate else \"\",\n \"git checkout master\",\n # TODO: fix: deal with files that have the same name in different subdirs\n # The files are merged in the root, without prefix.\n # What happens if a file has the same name in multiple subdirs? e.g.: bin/file.py and src/file.py\n f\"git merge {branch} --allow-unrelated-histories -m 'refactor: merge subtree {prefix}'\",\n )\n\n if obliterate:\n c.run(\"git obliterate \" + \" \".join(sorted(obliterate)))\n if not keep:\n run_multiple(\n c,\n \"git branch -D upstream_master\",\n *[f\"git branch -D {pair.branch}\" for pair in pairs],\n \"git remote remove upstream\",\n )\n history(c, full=True)\n print_error(\"Don't forget to switch to the new repo:\", f\" cd {new_project_dir}\", nl=True)\n print_success(\n \"Next steps:\",\n \"- Run 'git obliterate' manually for files in Git history (listed above) you still want to remove\",\n \"- Run 'invoke git.rewrite' to fix dates and authors\",\n \"- Create a new empty repo on https://github.com/new without initializing it (no README/.gitignore/license)\",\n \"- Follow the instructions to add a remote (from 'push an existing repository from the command line')\",\n \"- Push files to the new repo with:\",\n \" git push -u origin master\",\n nl=True,\n )\n\n\n@task(\n help={\n \"full\": \"Display all info: files, authors, dates\",\n \"files\": \"Display all files in Git history, even the ones that were deleted and don't exist anymore\",\n \"author\": \"Display authors\",\n \"dates\": \"Display committer and author dates in different colors\",\n },\n)\ndef history(c: Context, full: bool = False, files: bool = False, author: bool = False, dates: bool = False) -> None:\n \"\"\"Grep the whole Git log and display information.\"\"\"\n option_chosen = False\n if full:\n option_chosen = True\n files = author = dates = True\n if files:\n option_chosen = True\n c.run(Git.SHOW_ALL_FILE_HISTORY)\n if author:\n option_chosen = True\n c.run(\"git log --name-only | rg author | sort -u\")\n if dates:\n option_chosen = True\n header = True\n for line in run_lines(c, 'git log --format=\"%H|%cI|%aI|%GK|%s\"', hide=False):\n if header:\n print_success(\"Green = dates are equal\")\n print_error(\"Red = dates are different\")\n typer.echo(\n \"Commit Committer Date \"\n \"Author Date GPG key Subject\",\n )\n header = False\n\n fields = line.split(\"|\")\n committer_date = fields[1]\n author_date = fields[2]\n func = print_success if committer_date == author_date else print_error\n func(*fields)\n if not option_chosen:\n msg = \"Choose at least one option: --full, --files, --author, --dates\"\n raise Exit(msg, 1)\n\n\n@task(\n help={\n \"commit\": \"Base commit to be used for the range (default: --root)\",\n \"gpg\": \"Sign the commit (default: True)\",\n \"author\": \"Set the current author (from 'git config') on the commit range\",\n },\n)\ndef rewrite(c: Context, commit: str = \"--root\", gpg: bool = True, author: bool = True) -> None:\n \"\"\"Rewrite a range of commits, signing with GPG and setting the author.\n\n https://git-scm.com/docs/git-commit\n https://git-scm.com/docs/git-rebase\n \"\"\"\n gpg_flag = \" --gpg-sign\" if gpg else \" --no-gpg-sign\"\n\n author_flag = \"\"\n if author:\n name = run_stdout(c, \"git config user.name\", dry=False)\n email = run_stdout(c, \"git config user.email\", dry=False)\n author_flag = f' --author \"{name} <{email}>\"'\n\n c.run(f'git log --format=\"%H %cI %aI %s\" {commit} > $TMPDIR/rebase_sign_hashlist')\n c.run(\n \"git rebase --committer-date-is-author-date --exec 'GIT_COMMITTER_DATE=\"\n '$(fgrep -m 1 \"$(git log -1 --format=\"%aI %s\" $GIT_COMMIT)\" $TMPDIR/rebase_sign_hashlist'\n f' | cut -d\" \" -f3) git commit --amend --no-edit -n{author_flag}{gpg_flag}\\' -i {commit}',\n )\n history(c, dates=True)\n typer.echo()\n typer.echo(\"NOTE: If commits were modified during the rebase above, their committer date will be the current date\")\n typer.echo(\"Rebase again with this command, without changing any commit, and all dates should be green\")\n\n\n@task\ndef tidy_up(c: Context) -> None:\n \"\"\"Prune remotes, update all branches of the repo, delete merged/squashed branches.\"\"\"\n c.run(\"gitup .\")\n c.run(\"git delete-merged-branches\")\n\n # warn=True is needed; apparently, this command fails when there is no branch, and execution is stopped\n c.run(\"git delete-squashed-branches\", warn=True)\n\n for remote in run_lines(c, \"git remote\", dry=False):\n c.run(f\"git remote prune {remote}\")\n\n\n@task(\n help={\n \"remote\": \"List remote branches (default: False)\",\n \"update\": \"Update the repo before merging (default: True)\",\n \"push\": \"Push the merge to the remote (default: True)\",\n \"rebase\": \"Rebase the default branch before merging (default: False)\",\n },\n)\ndef merge_default(\n c: Context,\n remote: bool = False,\n update: bool = True,\n push: bool = True,\n rebase: bool = False,\n) -> None:\n \"\"\"Merge the default branch of the repo. Also set it with \"git config\", if not already set.\"\"\"\n default_branch = set_default_branch(c, remote)\n\n if update:\n tidy_up(c)\n which_verb = \"rebase\" if rebase else \"merge\"\n run_command(c, f\"git {which_verb}\", f\"origin/{default_branch}\")\n if push:\n force_option = \"--force-with-lease\" if rebase else \"\"\n run_command(c, \"git push\", force_option)\n\n\ndef set_default_branch(c: Context, remote: bool = False) -> str:\n \"\"\"Set the default branch config on the repo, if not configured yet.\"\"\"\n cmd_read_default_branch = \"git config git-extras.default-branch\"\n default_branch = run_stdout(c, cmd_read_default_branch, warn=True, dry=False)\n if not default_branch:\n default_branch = run_with_fzf(\n c,\n \"git branch --list\",\n \"--all\" if remote else \"\",\n \"| cut -b 3- | grep -v HEAD | sed -E 's#remotes/[^/]+/##g' | sort -u\",\n )\n run_command(c, cmd_read_default_branch, default_branch)\n run_command(c, \"git config init.defaultBranch\", default_branch)\n run_command(c, \"git config --list | rg default.*branch\")\n return default_branch\n\n\n@task(\n help={\n \"tag\": \"Name of the tag to compare to (default: last created tag)\",\n \"files\": \"Display files instead of commits (default: false)\",\n \"verbose\": \"Files: display changes/insertions/deletion.\"\n \" Commits: display the full commit message, author... (default: False)\",\n \"by_author\": \"Group commits by author. Doesn't work with --files or --verbose. (default: False)\",\n },\n)\ndef changes_since_tag(\n c: Context,\n tag: str = \"\",\n files: bool = False,\n verbose: bool = False,\n by_author: bool = False,\n) -> None:\n \"\"\"Display changes (commits or files) since the last tag (or a chosen tag).\"\"\"\n if files:\n which_tag = tag or run_stdout(c, \"git tag --list --sort -creatordate | head -1\", hide=False, dry=False)\n default_branch = set_default_branch(c)\n option = \"\" if verbose else \" --name-only\"\n c.run(f\"git diff --stat {which_tag} origin/{default_branch}{option}\")\n else:\n which_tag = tag or \"$(git describe --tags --abbrev=0)\"\n option = \" --format='%aN|%s' | sort -u\" if by_author else \"\" if verbose else \" --oneline\"\n cmd = f\"git log {which_tag}..HEAD{option}\"\n if by_author:\n commits_by_author = defaultdict(list)\n for line in run_lines(c, cmd):\n author, commit = line.split(\"|\")\n commits_by_author[author].append(commit)\n for author, commits in commits_by_author.items():\n print(f\"\\n{author}:\") # noqa: T201\n for commit in commits:\n print(f\" {commit}\") # noqa: T201\n else:\n c.run(cmd)\n\n\n@task()\ndef watch(c: Context) -> None:\n \"\"\"Watch a build on GitHub Actions, then open a pull request or repo after the build is over.\"\"\"\n current_branch = Git(c).current_branch()\n print_success(f\"Current branch = {current_branch}\")\n\n c.run(\"gh run watch\", warn=True)\n out = c.run(f\"gh pr view {current_branch} --web\", warn=True).stdout.strip()\n if \"no pull requests found for branch\" in out:\n c.run(\"gh repo view --web\")\n\n\n@task(\n help={\n \"prefix\": \"Keep the Conventional Commits prefix\",\n \"original_order\": \"Don't sort bullets, keep them in original order\",\n },\n)\ndef body(c: Context, prefix: bool = False, original_order: bool = False) -> None:\n \"\"\"Prepare a commit body to be used on pull requests and squashed commits.\"\"\"\n default_branch = set_default_branch(c)\n bullets = []\n for line in run_lines(c, f\"git log {default_branch}..\", \"--format=%s%n%b\"):\n clean = line.strip(\" -\")\n if (\n \"Merge branch\" in clean\n or \"Merge remote-tracking branch\" in clean\n or \"Revert \" in clean\n or \"This reverts\" in clean\n or not clean\n ):\n continue\n\n # Remove Jira ticket with regex\n clean = REGEX_JIRA.sub(\"\", clean).replace(\"()\", \"\").replace(\"[]\", \"\").strip(\" -\")\n\n # Split on the Conventional Commit prefix\n if not prefix and \":\" in clean:\n clean = clean.split(\":\", 1)[1].strip()\n\n bullets.append(f\"- {clean}\")\n\n results = bullets if original_order else sorted(set(bullets))\n typer.echo(\"\\n\".join(results))\n", "repo_name": "andreoliwa/conjuring", "sub_path": "src/conjuring/spells/git.py", "file_name": "git.py", "file_ext": "py", "file_size_in_byte": 16370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "conjuring.visibility.ShouldDisplayTasks", "line_number": 25, "usage_type": "name"}, {"api_name": "conjuring.visibility.is_git_repo", "line_number": 25, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 32, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 29, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 30, "usage_type": "name"}, {"api_name": "invoke.Context", "line_number": 43, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_stdout", "line_number": 48, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_stdout", "line_number": 52, "usage_type": "call"}, {"api_name": "invoke.UnexpectedExit", "line_number": 62, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_with_fzf", "line_number": 75, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 78, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 87, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 86, "usage_type": "call"}, {"api_name": "conjuring.visibility.MagicTask", "line_number": 86, "usage_type": "name"}, {"api_name": "invoke.Context", "line_number": 97, "usage_type": "name"}, {"api_name": "typer.echo", "line_number": 105, "usage_type": "call"}, {"api_name": "conjuring.colors.COLOR_LIGHT_RED", "line_number": 105, "usage_type": "name"}, {"api_name": "conjuring.colors.COLOR_NONE", "line_number": 105, "usage_type": "name"}, {"api_name": "invoke.task", "line_number": 96, "usage_type": "name"}, {"api_name": "invoke.Context", "line_number": 122, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 131, "usage_type": "name"}, {"api_name": "pathlib.Path.cwd", "line_number": 136, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 136, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_lines", "line_number": 138, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_with_fzf", "line_number": 140, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_stdout", "line_number": 152, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 156, "usage_type": "call"}, {"api_name": "pathlib.Path.cwd", "line_number": 158, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 158, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_multiple", "line_number": 161, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 173, "usage_type": "call"}, {"api_name": "conjuring.grimoire.print_error", "line_number": 174, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_multiple", "line_number": 182, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_multiple", "line_number": 198, "usage_type": "call"}, {"api_name": "conjuring.grimoire.print_error", "line_number": 205, "usage_type": "call"}, {"api_name": "conjuring.grimoire.print_success", "line_number": 206, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 115, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 226, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_lines", "line_number": 241, "usage_type": "call"}, {"api_name": "conjuring.grimoire.print_success", "line_number": 243, "usage_type": "call"}, {"api_name": "conjuring.grimoire.print_error", "line_number": 244, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 245, "usage_type": "call"}, {"api_name": "conjuring.grimoire.print_success", "line_number": 254, "usage_type": "name"}, {"api_name": "conjuring.grimoire.print_error", "line_number": 254, "usage_type": "name"}, {"api_name": "invoke.Exit", "line_number": 258, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 218, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 268, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_stdout", "line_number": 278, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_stdout", "line_number": 279, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 289, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 290, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 291, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 261, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 295, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_lines", "line_number": 303, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 294, "usage_type": "name"}, {"api_name": "invoke.Context", "line_number": 316, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_command", "line_number": 328, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_command", "line_number": 331, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 307, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 334, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_stdout", "line_number": 337, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_with_fzf", "line_number": 339, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_command", "line_number": 345, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_command", "line_number": 346, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_command", "line_number": 347, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 361, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_stdout", "line_number": 369, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 378, "usage_type": "call"}, {"api_name": "conjuring.grimoire.run_lines", "line_number": 379, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 351, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 391, "usage_type": "name"}, {"api_name": "conjuring.grimoire.print_success", "line_number": 394, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 390, "usage_type": "call"}, {"api_name": "invoke.Context", "line_number": 408, "usage_type": "name"}, {"api_name": "conjuring.grimoire.run_lines", "line_number": 412, "usage_type": "call"}, {"api_name": "conjuring.grimoire.REGEX_JIRA.sub", "line_number": 424, "usage_type": "call"}, {"api_name": "conjuring.grimoire.REGEX_JIRA", "line_number": 424, "usage_type": "name"}, {"api_name": "typer.echo", "line_number": 433, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 402, "usage_type": "call"}]} +{"seq_id": "26380402682", "text": "from traits.api import Instance, Dict, HasTraits, Array, Float, on_trait_change, List, Int, Button, Bool, File\nfrom traitsui.api import Item, View, HGroup, VGroup, ArrayEditor, HSplit, TabularEditor\nfrom traitsui.tabular_adapter import TabularAdapter\nfrom chaco.api import ArrayPlotData, Plot\nfrom chaco.tools.api import ZoomTool, PanTool\nfrom enable.api import ComponentEditor\nfrom enable.savage.trait_defs.ui.svg_button import SVGButton\nfrom pyface.api import GUI\nfrom piksi_tools.console.gui_utils import plot_square_axes\nfrom piksi_tools.console.utils import determine_path, mode_dict, get_mode, color_dict, FLOAT_MODE, \\\n SPP_MODE, DGNSS_MODE, NO_FIX_MODE, FIXED_MODE, EMPTY_STR, \\\n sopen, log_time_strings, datetime_2_str, call_repeatedly \n\nimport math\nimport os\nimport numpy as np\nimport datetime\nimport time\n\nfrom sbp.piksi import *\nfrom sbp.navigation import *\n\nclass SimpleAdapter(TabularAdapter):\n columns = [('Item', 0), ('Value', 1)]\n width = 80\n\nclass BaselineView(HasTraits):\n\n # This mapping should match the flag definitions in libsbp for\n # the MsgBaselineNED message. While this isn't strictly necessary\n # it helps avoid confusion\n\n python_console_cmds = Dict()\n\n table = List()\n\n logging_b = Bool(False)\n directory_name_b = File\n\n plot = Instance(Plot)\n plot_data = Instance(ArrayPlotData)\n\n running = Bool(True)\n zoomall = Bool(False)\n position_centered = Bool(False)\n\n clear_button = SVGButton(\n label='', tooltip='Clear',\n filename=os.path.join(determine_path(), 'images', 'iconic', 'x.svg'),\n width=16, height=16\n )\n zoomall_button = SVGButton(\n label='', tooltip='Zoom All', toggle=True,\n filename=os.path.join(determine_path(), 'images', 'iconic', 'fullscreen.svg'),\n width=16, height=16\n )\n center_button = SVGButton(\n label='', tooltip='Center on Baseline', toggle=True,\n filename=os.path.join(determine_path(), 'images', 'iconic', 'target.svg'),\n width=16, height=16\n )\n paused_button = SVGButton(\n label='', tooltip='Pause', toggle_tooltip='Run', toggle=True,\n filename=os.path.join(determine_path(), 'images', 'iconic', 'pause.svg'),\n toggle_filename=os.path.join(determine_path(), 'images', 'iconic', 'play.svg'),\n width=16, height=16\n )\n\n reset_button = Button(label='Reset Filters')\n\n traits_view = View(\n HSplit(\n Item('table', style = 'readonly', editor = TabularEditor(adapter=SimpleAdapter()), show_label=False, width=0.3),\n VGroup(\n HGroup(\n Item('paused_button', show_label=False),\n Item('clear_button', show_label=False),\n Item('zoomall_button', show_label=False),\n Item('center_button', show_label=False),\n Item('reset_button', show_label=False),\n ),\n Item(\n 'plot',\n show_label = False,\n editor = ComponentEditor(bgcolor = (0.8,0.8,0.8)),\n )\n )\n )\n )\n\n def _zoomall_button_fired(self):\n self.zoomall = not self.zoomall\n\n def _center_button_fired(self):\n self.position_centered = not self.position_centered\n\n def _paused_button_fired(self):\n self.running = not self.running\n\n def _reset_button_fired(self):\n self.link(MsgResetFilters(filter=0))\n\n\n def _reset_remove_current(self):\n self.plot_data.set_data('cur_fixed_n', [])\n self.plot_data.set_data('cur_fixed_e', [])\n self.plot_data.set_data('cur_fixed_d', [])\n self.plot_data.set_data('cur_float_n', [])\n self.plot_data.set_data('cur_float_e', [])\n self.plot_data.set_data('cur_float_d', [])\n self.plot_data.set_data('cur_dgnss_n', [])\n self.plot_data.set_data('cur_dgnss_e', [])\n self.plot_data.set_data('cur_dgnss_d', [])\n\n def _clear_history(self):\n self.plot_data.set_data('n_fixed', [])\n self.plot_data.set_data('e_fixed', [])\n self.plot_data.set_data('d_fixed', [])\n self.plot_data.set_data('n_float', [])\n self.plot_data.set_data('e_float', [])\n self.plot_data.set_data('d_float', [])\n self.plot_data.set_data('n_dgnss', [])\n self.plot_data.set_data('e_dgnss', [])\n self.plot_data.set_data('d_dgnss', [])\n \n def _clear_button_fired(self):\n self.n[:] = np.NAN\n self.e[:] = np.NAN\n self.d[:] = np.NAN\n self.mode[:] = np.NAN\n self.plot_data.set_data('t', [])\n self._clear_history()\n self._reset_remove_current()\n \n\n def iar_state_callback(self, sbp_msg, **metadata):\n self.num_hyps = sbp_msg.num_hyps\n self.last_hyp_update = time.time()\n\n def age_corrections_callback(self, sbp_msg, **metadata):\n age_msg = MsgAgeCorrections(sbp_msg)\n if age_msg.age != 0xFFFF:\n self.age_corrections = age_msg.age/10.0\n else:\n self.age_corrections = None\n\n def gps_time_callback(self, sbp_msg, **metadata):\n if sbp_msg.msg_type == SBP_MSG_GPS_TIME_DEP_A:\n time_msg = MsgGPSTimeDepA(sbp_msg)\n flags = 1\n elif sbp_msg.msg_type == SBP_MSG_GPS_TIME:\n time_msg = MsgGPSTime(sbp_msg)\n flags = time_msg.flags\n if flags != 0:\n self.week = time_msg.wn\n self.nsec = time_msg.ns_residual\n \n def utc_time_callback(self, sbp_msg, **metadata):\n tmsg = MsgUtcTime(sbp_msg)\n seconds = math.floor(tmsg.seconds)\n microseconds = int(tmsg.ns/1000.00)\n if tmsg.flags&0x1 == 1:\n dt = datetime.datetime(tmsg.year, tmsg.month, tmsg.day, tmsg.hours,\n tmsg.minutes, tmsg.seconds, microseconds)\n self.utc_time = dt\n self.utc_time_flags = tmsg.flags\n if (tmsg.flags >> 3) & 0x3 == 0:\n self.utc_source = \"Factory Default\"\n elif (tmsg.flags >> 3) & 0x3 == 1:\n self.utc_source = \"Non Volatile Memory\"\n elif (tmsg.flags >> 3) & 0x3 == 2:\n self.utc_source = \"Decoded this Session\"\n else:\n self.utc_source = \"Unknown\"\n else:\n self.utc_time = None\n self.utc_source = None\n \n def baseline_heading_callback(self, sbp_msg, **metadata):\n headingMsg = MsgBaselineHeading(sbp_msg)\n if headingMsg.flags&0x7 != 0:\n self.heading = headingMsg.heading * 1e-3\n else:\n self.heading = None\n \n def baseline_callback(self, sbp_msg, **metadata):\n soln = MsgBaselineNEDDepA(sbp_msg)\n self.last_soln = soln\n table = []\n\n soln.n = soln.n * 1e-3\n soln.e = soln.e * 1e-3\n soln.d = soln.d * 1e-3\n soln.h_accuracy = soln.h_accuracy * 1e-3\n soln.v_accuracy = soln.v_accuracy * 1e-3\n\n dist = np.sqrt(soln.n**2 + soln.e**2 + soln.d**2)\n \n tow = soln.tow * 1e-3\n if self.nsec is not None:\n tow += self.nsec * 1e-9\n\n ((tloc, secloc), (tgps, secgps)) = log_time_strings(self.week, tow) \n \n if self.utc_time is not None:\n ((tutc, secutc)) = datetime_2_str(self.utc_time)\n \n if self.directory_name_b == '':\n filepath = time.strftime(\"baseline_log_%Y%m%d-%H%M%S.csv\")\n else:\n filepath = os.path.join(self.directory_name_b, time.strftime(\"baseline_log_%Y%m%d-%H%M%S.csv\"))\n\n if self.logging_b == False:\n self.log_file = None\n\n if self.logging_b:\n if self.log_file is None:\n self.log_file = sopen(filepath, 'w')\n self.log_file.write('pc_time,gps_time,tow(msec),north(meters),east(meters),down(meters),h_accuracy(meters),v_accuracy(meters),'\n 'distance(meters),num_sats,flags,num_hypothesis\\n')\n log_str_gps = ''\n if tgps != '' and secgps != 0:\n log_str_gps = \"{0}:{1:06.6f}\".format(tgps, float(secgps)) \n self.log_file.write('%s,%s,%.3f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%d,%d,%d\\n' % (\n \"{0}:{1:06.6f}\".format(tloc, float(secloc)), \n log_str_gps,\n tow, soln.n, soln.e, soln.d, \n soln.h_accuracy, soln.v_accuracy,\n dist,\n soln.n_sats,\n soln.flags,\n self.num_hyps)\n )\n self.log_file.flush()\n\n self.last_mode = get_mode(soln)\n\n if self.last_mode < 1:\n table.append(('GPS Week', EMPTY_STR))\n table.append(('GPS TOW', EMPTY_STR))\n table.append(('GPS Time', EMPTY_STR))\n table.append(('UTC Time', EMPTY_STR))\n table.append(('UTC Src', EMPTY_STR))\n table.append(('N', EMPTY_STR))\n table.append(('E', EMPTY_STR))\n table.append(('D', EMPTY_STR))\n table.append(('Horiz Acc', EMPTY_STR))\n table.append(('Vert Acc', EMPTY_STR))\n table.append(('Dist.', EMPTY_STR))\n table.append(('Sats Used', EMPTY_STR))\n table.append(('Flags', EMPTY_STR))\n table.append(('Mode', EMPTY_STR))\n else:\n self.last_btime_update = time.time()\n if self.week is not None:\n table.append(('GPS Week', str(self.week)))\n table.append(('GPS TOW', \"{:.3f}\".format(tow)))\n \n if self.week is not None:\n table.append(('GPS Time', \"{0}:{1:06.3f}\".format(tgps, float(secgps))))\n if self.utc_time is not None:\n table.append(('UTC Time', \"{0}:{1:06.3f}\".format(tutc, float(secutc))))\n table.append(('UTC Src', self.utc_source))\n \n table.append(('N', soln.n))\n table.append(('E', soln.e))\n table.append(('D', soln.d))\n table.append(('Horiz Acc', soln.h_accuracy))\n table.append(('Vert Acc', soln.v_accuracy))\n table.append(('Dist.', dist))\n table.append(('Sats Used', soln.n_sats))\n \n table.append(('Flags', '0x%02x' % soln.flags))\n table.append(('Mode', mode_dict[self.last_mode]))\n if self.heading != None:\n table.append(('Heading', self.heading)) \n if self.age_corrections != None:\n table.append(('Corr. Age [s]', self.age_corrections))\n self.table = table\n # Rotate array, deleting oldest entries to maintain\n # no more than N in plot\n self.n[1:] = self.n[:-1]\n self.e[1:] = self.e[:-1]\n self.d[1:] = self.d[:-1]\n self.mode[1:] = self.mode[:-1]\n\n # Insert latest position\n if self.last_mode > 1:\n self.n[0], self.e[0], self.d[0] = soln.n, soln.e, soln.d\n else:\n self.n[0], self.e[0], self.d[0] = [np.NAN, np.NAN, np.NAN]\n self.mode[0] = self.last_mode\n \n def solution_draw(self):\n if self.running:\n GUI.invoke_later(self._solution_draw)\n \n def _solution_draw(self): \n self._clear_history()\n soln = self.last_soln\n if np.any(self.mode):\n float_indexer = (self.mode == FLOAT_MODE)\n fixed_indexer = (self.mode == FIXED_MODE)\n dgnss_indexer = (self.mode == DGNSS_MODE)\n\n if np.any(fixed_indexer):\n self.plot_data.set_data('n_fixed', self.n[fixed_indexer])\n self.plot_data.set_data('e_fixed', self.e[fixed_indexer])\n self.plot_data.set_data('d_fixed', self.d[fixed_indexer])\n if np.any(float_indexer):\n self.plot_data.set_data('n_float', self.n[float_indexer])\n self.plot_data.set_data('e_float', self.e[float_indexer])\n self.plot_data.set_data('d_float', self.d[float_indexer])\n if np.any(dgnss_indexer):\n self.plot_data.set_data('n_dgnss', self.n[dgnss_indexer])\n self.plot_data.set_data('e_dgnss', self.e[dgnss_indexer])\n self.plot_data.set_data('d_dgnss', self.d[dgnss_indexer])\n \n # Update our last solution icon \n if self.last_mode == FIXED_MODE:\n self._reset_remove_current()\n self.plot_data.set_data('cur_fixed_n', [soln.n])\n self.plot_data.set_data('cur_fixed_e', [soln.e])\n self.plot_data.set_data('cur_fixed_d', [soln.d])\n elif self.last_mode == FLOAT_MODE:\n self._reset_remove_current()\n self.plot_data.set_data('cur_float_n', [soln.n])\n self.plot_data.set_data('cur_float_e', [soln.e])\n self.plot_data.set_data('cur_float_d', [soln.d])\n elif self.last_mode == DGNSS_MODE:\n self._reset_remove_current()\n self.plot_data.set_data('cur_dgnss_n', [soln.n])\n self.plot_data.set_data('cur_dgnss_e', [soln.e])\n self.plot_data.set_data('cur_dgnss_d', [soln.d])\n else:\n pass\n # make the zoomall win over the position centered button \n # position centered button has no effect when zoom all enabled \n\n if not self.zoomall and self.position_centered:\n d = (self.plot.index_range.high - self.plot.index_range.low) / 2.\n self.plot.index_range.set_bounds(soln.e - d, soln.e + d)\n d = (self.plot.value_range.high - self.plot.value_range.low) / 2.\n self.plot.value_range.set_bounds(soln.n - d, soln.n + d)\n\n if self.zoomall:\n plot_square_axes(self.plot, ('e_fixed', 'e_float', 'e_dgnss'), ('n_fixed', 'n_float', 'n_dgnss'))\n\n def __init__(self, link, plot_history_max=1000, dirname=''):\n super(BaselineView, self).__init__()\n self.log_file = None\n self.directory_name_b = dirname\n self.num_hyps = 0\n self.last_hyp_update = 0\n self.last_btime_update = 0\n self.last_soln = None\n self.last_mode = 0\n self.plot_data = ArrayPlotData(n_fixed=[0.0], e_fixed=[0.0], d_fixed=[0.0],\n n_float=[0.0], e_float=[0.0], d_float=[0.0],\n n_dgnss=[0.0], e_dgnss=[0.0], d_dgnss=[0.0],\n t=[0.0],\n ref_n=[0.0], ref_e=[0.0], ref_d=[0.0],\n cur_fixed_e=[], cur_fixed_n=[], cur_fixed_d=[],\n cur_float_e=[], cur_float_n=[], cur_float_d=[],\n cur_dgnss_e=[], cur_dgnss_n=[], cur_dgnss_d=[])\n\n self.plot_history_max = plot_history_max\n self.n = np.zeros(plot_history_max)\n self.e = np.zeros(plot_history_max)\n self.d = np.zeros(plot_history_max)\n self.mode = np.zeros(plot_history_max)\n\n self.plot = Plot(self.plot_data)\n pts_float = self.plot.plot(('e_float', 'n_float'),\n type='scatter',\n color=color_dict[FLOAT_MODE],\n marker='dot',\n line_width=0.0,\n marker_size=1.0)\n pts_fixed = self.plot.plot(('e_fixed', 'n_fixed'),\n type='scatter',\n color=color_dict[FIXED_MODE],\n marker='dot',\n line_width=0.0,\n marker_size=1.0)\n pts_dgnss = self.plot.plot(('e_dgnss', 'n_dgnss'),\n type='scatter',\n color=color_dict[DGNSS_MODE],\n marker='dot',\n line_width=0.0,\n marker_size=1.0)\n ref = self.plot.plot(('ref_e', 'ref_n'),\n type='scatter',\n color='red',\n marker='plus',\n marker_size=5,\n line_width=1.5)\n cur_fixed = self.plot.plot(('cur_fixed_e', 'cur_fixed_n'),\n type='scatter',\n color=color_dict[FIXED_MODE],\n marker='plus',\n marker_size=5,\n line_width=1.5)\n cur_float = self.plot.plot(('cur_float_e', 'cur_float_n'),\n type='scatter',\n color=color_dict[FLOAT_MODE],\n marker='plus',\n marker_size=5,\n line_width=1.5)\n cur_dgnss = self.plot.plot(('cur_dgnss_e', 'cur_dgnss_n'),\n type='scatter',\n color=color_dict[DGNSS_MODE],\n marker='plus',\n line_width=1.5,\n marker_size=5)\n plot_labels = [' Base Position','DGPS', 'RTK Float', 'RTK Fixed']\n plots_legend = dict(zip(plot_labels, [ref, cur_dgnss, cur_float, cur_fixed]))\n self.plot.legend.plots = plots_legend\n self.plot.legend.labels = plot_labels # sets order\n self.plot.legend.visible = True\n\n self.plot.index_axis.tick_label_position = 'inside'\n self.plot.index_axis.tick_label_color = 'gray'\n self.plot.index_axis.tick_color = 'gray'\n self.plot.index_axis.title='E (meters)'\n self.plot.index_axis.title_spacing = 5\n self.plot.value_axis.tick_label_position = 'inside'\n self.plot.value_axis.tick_label_color = 'gray'\n self.plot.value_axis.tick_color = 'gray'\n self.plot.value_axis.title='N (meters)'\n self.plot.value_axis.title_spacing = 5\n self.plot.padding = (25, 25, 25, 25)\n\n self.plot.tools.append(PanTool(self.plot))\n zt = ZoomTool(self.plot, zoom_factor=1.1, tool_mode=\"box\", always_on=False)\n self.plot.overlays.append(zt)\n\n self.week = None\n self.utc_time = None \n self.age_corrections = None\n self.heading = None\n self.nsec = 0\n \n\n self.link = link\n self.link.add_callback(self.baseline_callback, [SBP_MSG_BASELINE_NED, SBP_MSG_BASELINE_NED_DEP_A])\n self.link.add_callback(self.baseline_heading_callback, [SBP_MSG_BASELINE_HEADING])\n self.link.add_callback(self.iar_state_callback, SBP_MSG_IAR_STATE)\n self.link.add_callback(self.gps_time_callback, [SBP_MSG_GPS_TIME, SBP_MSG_GPS_TIME_DEP_A])\n self.link.add_callback(self.utc_time_callback, [SBP_MSG_UTC_TIME])\n self.link.add_callback(self.age_corrections_callback, SBP_MSG_AGE_CORRECTIONS)\n \n call_repeatedly(0.2, self.solution_draw)\n\n self.python_console_cmds = {\n 'baseline': self\n }\n", "repo_name": "wangjiefjj/GNSS-recive", "sub_path": "piksi_tools/console/baseline_view.py", "file_name": "baseline_view.py", "file_ext": "py", "file_size_in_byte": 16614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "traitsui.tabular_adapter.TabularAdapter", "line_number": 23, "usage_type": "name"}, {"api_name": "traits.api.HasTraits", "line_number": 27, "usage_type": "name"}, {"api_name": "traits.api.Dict", "line_number": 33, "usage_type": "call"}, {"api_name": "traits.api.List", "line_number": 35, "usage_type": "call"}, {"api_name": "traits.api.Bool", "line_number": 37, "usage_type": "call"}, {"api_name": "traits.api.File", "line_number": 38, "usage_type": "name"}, {"api_name": "traits.api.Instance", "line_number": 40, "usage_type": "call"}, {"api_name": "chaco.api.Plot", "line_number": 40, "usage_type": "argument"}, {"api_name": "traits.api.Instance", "line_number": 41, "usage_type": "call"}, {"api_name": "chaco.api.ArrayPlotData", "line_number": 41, "usage_type": "argument"}, {"api_name": "traits.api.Bool", "line_number": 43, "usage_type": "call"}, {"api_name": "traits.api.Bool", "line_number": 44, "usage_type": "call"}, {"api_name": "traits.api.Bool", "line_number": 45, "usage_type": "call"}, {"api_name": "enable.savage.trait_defs.ui.svg_button.SVGButton", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "piksi_tools.console.utils.determine_path", "line_number": 49, "usage_type": "call"}, {"api_name": "enable.savage.trait_defs.ui.svg_button.SVGButton", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "piksi_tools.console.utils.determine_path", "line_number": 54, "usage_type": "call"}, {"api_name": "enable.savage.trait_defs.ui.svg_button.SVGButton", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "piksi_tools.console.utils.determine_path", "line_number": 59, "usage_type": "call"}, {"api_name": "enable.savage.trait_defs.ui.svg_button.SVGButton", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "piksi_tools.console.utils.determine_path", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "piksi_tools.console.utils.determine_path", "line_number": 65, "usage_type": "call"}, {"api_name": "traits.api.Button", "line_number": 69, "usage_type": "call"}, {"api_name": "traitsui.api.View", "line_number": 71, "usage_type": "call"}, {"api_name": "traitsui.api.HSplit", "line_number": 72, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 73, "usage_type": "call"}, {"api_name": "traitsui.api.TabularEditor", "line_number": 73, "usage_type": "call"}, {"api_name": "traitsui.api.VGroup", "line_number": 74, "usage_type": "call"}, {"api_name": "traitsui.api.HGroup", "line_number": 75, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 76, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 77, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 78, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 79, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 80, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 82, "usage_type": "call"}, {"api_name": "enable.api.ComponentEditor", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.NAN", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.NAN", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.NAN", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.NAN", "line_number": 130, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 138, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 197, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.log_time_strings", "line_number": 203, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.datetime_2_str", "line_number": 206, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path", "line_number": 211, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 211, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.sopen", "line_number": 218, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.get_mode", "line_number": 236, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 239, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 240, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 241, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 242, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 243, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 244, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 245, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 246, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 247, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 248, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 249, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 250, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 251, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.EMPTY_STR", "line_number": 252, "usage_type": "name"}, {"api_name": "time.time", "line_number": 254, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.mode_dict", "line_number": 274, "usage_type": "name"}, {"api_name": "numpy.NAN", "line_number": 291, "usage_type": "attribute"}, {"api_name": "pyface.api.GUI.invoke_later", "line_number": 296, "usage_type": "call"}, {"api_name": "pyface.api.GUI", "line_number": 296, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 301, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.FLOAT_MODE", "line_number": 302, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.FIXED_MODE", "line_number": 303, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.DGNSS_MODE", "line_number": 304, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 314, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.FIXED_MODE", "line_number": 320, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.FLOAT_MODE", "line_number": 325, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.DGNSS_MODE", "line_number": 330, "usage_type": "name"}, {"api_name": "piksi_tools.console.gui_utils.plot_square_axes", "line_number": 347, "usage_type": "call"}, {"api_name": "chaco.api.ArrayPlotData", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 371, "usage_type": "call"}, {"api_name": "chaco.api.Plot", "line_number": 373, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.color_dict", "line_number": 376, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.FLOAT_MODE", "line_number": 376, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.color_dict", "line_number": 382, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.FIXED_MODE", "line_number": 382, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.color_dict", "line_number": 388, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.DGNSS_MODE", "line_number": 388, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.color_dict", "line_number": 400, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.FIXED_MODE", "line_number": 400, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.color_dict", "line_number": 406, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.FLOAT_MODE", "line_number": 406, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.color_dict", "line_number": 412, "usage_type": "name"}, {"api_name": "piksi_tools.console.utils.DGNSS_MODE", "line_number": 412, "usage_type": "name"}, {"api_name": "chaco.tools.api.PanTool", "line_number": 434, "usage_type": "call"}, {"api_name": "chaco.tools.api.ZoomTool", "line_number": 435, "usage_type": "call"}, {"api_name": "piksi_tools.console.utils.call_repeatedly", "line_number": 453, "usage_type": "call"}]} +{"seq_id": "29908439900", "text": "# This autograder script leverages the 'grade' package, which has command-line utilities\n# to run the tests, assign scores, and print results. We're calling internal APIs\n# instead and custom-printing the output. If you need output in a more specific format,\n# such as for GradeScope, the grade command-line has output options in JSON (claimed\n# compatible with GradeScope) and Markdown.\n\n# Also, note that we're running mypy here and assigning it one point. Again, we're running\n# mypy using its internal APIs rather than from the command-line. We could ostensibly do\n# something more invasive, like calling `make lint`, which also does pylint and checks\n# indentation with black, and would normally be required if you were submitting a PR to\n# the \"real\" ElectionGuard repository. Probably unnecessary for pedagogic purposes.\n\n# https://github.com/thoward27/grade\n\nimport time\nimport unittest\nfrom datetime import datetime\nfrom io import StringIO\nfrom typing import List, Dict, Union\n\nimport mypy.main\nfrom grade.result import Result\nfrom grade.runners import GradedRunner\n\nfrom electionguard.logs import log_file_only\n\ngood_mark = \"✅ \"\nfail_mark = \"❌ \"\nblank_line_header = \"│\" # unicode: \"BOX DRAWINGS LIGHT VERTICAL\"\n\ndivider_line = \"├\" + \"─\" * 78\nstart_divider_line = \"┌\" + \"─\" * 78\nend_divider_line = \"└\" + \"─\" * 78\n\n\ndef print_score_line(category: str, points: int, max_points: int) -> None:\n print(\n \"%s %-65s: %2d/%2d %s\"\n % (\n blank_line_header,\n category,\n points,\n max_points,\n good_mark if points == max_points else fail_mark,\n )\n )\n\n\ndef print_start_line() -> None:\n print(start_divider_line)\n\n\ndef print_end_line() -> None:\n print(end_divider_line)\n\n\ndef print_divider_line() -> None:\n print(divider_line)\n\n\n# suppresses all logging to stdout; logs instead go to a file: electionguard.log\nlog_file_only()\n\nprint(\n \"Autograder: starting at \"\n + str(datetime.utcnow().replace(microsecond=0).isoformat())\n)\nprint()\nprint(\"Autograder: running mypy\")\nmypy_stdout = StringIO()\nmypy_stderr = StringIO()\nmypy.main.main(\n script_path=\".\",\n stdout=mypy_stdout,\n stderr=mypy_stderr,\n args=[\"src\", \"stubs\", \"tests\"],\n)\noutput_lines = mypy_stdout.getvalue().splitlines()\nmypy_success = len(output_lines) == 1 and output_lines[0].startswith(\"Success:\")\nmypy_points = 1 if mypy_success else 0\nmypy_maxpoints = 1\nmypy_str = \"Mypy, \" + \"no errors\" if mypy_success else \"errors found\"\nprint(mypy_str)\nprint()\n\nprint(\"Autograder: running unit tests\")\nstart_time = time.perf_counter()\nsuite = unittest.defaultTestLoader.discover(start_dir=\".\", pattern=\"test*\")\nresults: Result = GradedRunner(visibility=\"visible\").run(suite)\nend_time = time.perf_counter()\n\nprint()\nprint_start_line()\n\nall_tests: List[Dict[str, Union[str, int]]] = results.data[\"tests\"]\nrelevant_tests = sorted(\n [x for x in all_tests if int(x[\"max_score\"]) > 0], key=lambda x: x[\"name\"]\n)\n\nscore_sum: Dict[str, int] = {}\nscore_max_sum: Dict[str, int] = {}\n\nprev_test_part = \"\"\nfor t in relevant_tests:\n name = str(t[\"name\"])\n score = int(t[\"score\"])\n max_score = int(t[\"max_score\"])\n test_part = name.split(\".\")[0]\n if prev_test_part != test_part:\n if prev_test_part != \"\":\n print_divider_line()\n prev_test_part = test_part\n\n print_score_line(name, score, max_score)\n if test_part not in score_sum:\n score_sum[test_part] = score\n score_max_sum[test_part] = max_score\n else:\n score_sum[test_part] += score\n score_max_sum[test_part] += max_score\n\nprint_divider_line()\nprint_score_line(mypy_str, mypy_points, mypy_maxpoints)\nfor k in sorted(score_sum.keys()):\n print_score_line(f\"Subtotal: {k}\", score_sum[k], score_max_sum[k])\nprint_divider_line()\n\ntotal_score = sum(score_sum.values()) + mypy_points\ntotal_max_score = sum(score_max_sum.values()) + mypy_maxpoints\nprint_score_line(\"TOTALS\", total_score, total_max_score)\nprint_end_line()\n\nprint()\nprint(\"Running time: %.3f seconds\" % (end_time - start_time))\n\nif total_score == total_max_score:\n # successful exit tells the CI system to give us a green checkmark\n exit(0)\nelse:\n # unsuccessful exit tells the CI system to give us a red X\n print(\"To see full output from the unit tests, run `make test`.\")\n exit(1)\n", "repo_name": "NBsyxx/UIUC-CS407-Cryptography", "sub_path": "mp4-electionguard/autograder.py", "file_name": "autograder.py", "file_ext": "py", "file_size_in_byte": 4350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "electionguard.logs.log_file_only", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 70, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 71, "usage_type": "call"}, {"api_name": "mypy.main.main.main", "line_number": 72, "usage_type": "call"}, {"api_name": "mypy.main.main", "line_number": 72, "usage_type": "attribute"}, {"api_name": "mypy.main", "line_number": 72, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 87, "usage_type": "call"}, {"api_name": "unittest.defaultTestLoader.discover", "line_number": 88, "usage_type": "call"}, {"api_name": "unittest.defaultTestLoader", "line_number": 88, "usage_type": "attribute"}, {"api_name": "grade.result.Result", "line_number": 89, "usage_type": "name"}, {"api_name": "grade.runners.GradedRunner", "line_number": 89, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 90, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "18781833167", "text": "import cv2\nimport argparse\nfrom pathlib import Path\nfrom PIL import Image\nfrom mtcnn import MTCNN\nfrom datetime import datetime\n\nfrom PIL import Image\nimport numpy as np\nfrom mtcnn_pytorch.src.align_trans import get_reference_facial_points, warp_and_crop_face\n\nparser = argparse.ArgumentParser(description='take a picture')\nparser.add_argument('--name','-n', default='unknown', type=str,help='input the name of the recording person')\nargs = parser.parse_args()\nfrom pathlib import Path\ndata_path = Path('data')\nsave_path = data_path/'facebank'/args.name\nif not save_path.exists():\n save_path.mkdir()\n\nmtcnn = MTCNN()\n\nframe = cv2.imread(\"face.png\")\np = Image.fromarray(frame[...,::-1])\nboxes ,faces = mtcnn.align_multi(p)\n\nif len(boxes)>0:\n scores = []\n for box in boxes:\n scores.append(box[4])\n scores = np.array(scores)\n best_face_index = np.argmax(scores)\n best_face_box = boxes[best_face_index]\n cv2.rectangle(frame, (int(best_face_box[0]),int(best_face_box[1])), (int(best_face_box[2]),int(best_face_box[3])), (0,0,255), 3)\n warped_face = faces[best_face_index]\n warped_face = np.array(warped_face)[...,::-1]\n \ncv2.imshow(\"face\",frame)\ncv2.waitKey(1000)\ntry: \n cv2.imwrite(str(save_path/'{}.jpg'.format(str(datetime.now())[:-7].replace(\":\",\"-\").replace(\" \",\"-\"))), warped_face)\nexcept:\n print('no face captured')\n \n", "repo_name": "Qidian213/Face_reID", "sub_path": "take_from_image.py", "file_name": "take_from_image.py", "file_ext": "py", "file_size_in_byte": 1378, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "mtcnn.MTCNN", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 23, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 24, "usage_type": "name"}, {"api_name": "mtcnn.align_multi", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "7698826040", "text": "from django.urls import path\r\nfrom news import views\r\n\r\nurlpatterns = [\r\n path('',views.index,name='index'),\r\n path('add_article',views.add_article_page,name='add_article_page'),\r\n path('save_article',views.save_article,name='save_article'),\r\n path('news_cat/<int:id>',views.news_cat,name='news_cat'),\r\n path('article_detail/<int:id>',views.article_detail,name='article_detail'),\r\n\r\n]\r\n", "repo_name": "sarangkkl/world_news", "sub_path": "news/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "news.views.index", "line_number": 5, "usage_type": "attribute"}, {"api_name": "news.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "news.views.add_article_page", "line_number": 6, "usage_type": "attribute"}, {"api_name": "news.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "news.views.save_article", "line_number": 7, "usage_type": "attribute"}, {"api_name": "news.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "news.views.news_cat", "line_number": 8, "usage_type": "attribute"}, {"api_name": "news.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "news.views.article_detail", "line_number": 9, "usage_type": "attribute"}, {"api_name": "news.views", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "32077343780", "text": "import pandas as pd\nimport numpy as np\nfrom binance.client import Client\nfrom huobi.linear_swap.rest.account import Account\nfrom huobi.linear_swap.rest.market import Market\nfrom huobi.linear_swap.rest.order import Order\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\npd.options.mode.chained_assignment = None \nimport os\nimport sys\n\n\nBinance = {\"API\": \"ojaWM7JF5nib3nY9hpiTIM9RZDTmVWIW5k5Z4HrY11CksrN18IJuJrP42WSed2X4\",\n \"Secret\": \"qkqY8QG9xiYQCvtctBBHhFqnUvz9mC1RiAxiSkU6zkTX3RisRwFc3KRINPWn91bz\"}\n\nHuobi = {\"API\": \"cc5417d1-d6f5a369-cdgs9k03f3-1916a\",\n \"Secret\": \"c3680fc5-19f9569d-1e6fcce3-c60a0\"}\n\nFTX = {\"API\": \"7DS8r5ipITuiR81i9Nov_Fi8jjxujDFaMWZvLW2_\",\n \"Secret\": \"gaHIllTYJEXqX8G3gUvgmIRE-WzL36rfHmBmpc-E\"}\n \nBybit = {\"API\": \"NUxyoWG4BHAb7LTnxD\", \n \"Secret\": \"ndCqe0JoQxXIyyZ5ptSuQjlKOpmLDxlJMobo\"}\n\n\nBinance_client = Client(Binance[\"API\"], Binance[\"Secret\"])\nHuobi_client = Account(Huobi[\"API\"], Huobi[\"Secret\"])\nHuobi_market = Market()\n\n\n# Bi_watch_list = [\"BTCUSDT\", \"ETHUSDT\", \"AVAXUSDT\", \"LTCUSDT\", \"ADAUSDT\"]\n# Bi_watch_list = [\"ETHUSDT\", \"BTCUSDT\", \"AVAXUSDT\", \"LTCUSDT\", \"ADAUSDT\"]\nBi_watch_list = [\"AVAXUSDT\"]\n# Huo_watch_list = [{\"contract_code\": \"btc-usdt\"}, {\"contract_code\": \"eth-usdt\"}, {\"contract_code\": \"avax-usdt\"}, {\"contract_code\": \"ltc-usdt\"}, {\"contract_code\": \"ada-usdt\"}]\n# Huo_watch_list = [{\"contract_code\": \"eth-usdt\"}, {\"contract_code\": \"btc-usdt\"}, {\"contract_code\": \"avax-usdt\"}, {\"contract_code\": \"ltc-usdt\"}, {\"contract_code\": \"ada-usdt\"}]\nHuo_watch_list = [{\"contract_code\": \"avax-usdt\"}]\nfor j in range(len(Bi_watch_list)):\n print(Bi_watch_list[j])\n count = 0\n profit = 0\n res = []\n i = 0\n while True:\n i += 1\n # print(i)\n # for r in res:\n # print(r)\n # sys.stdout.flush()\n Bi = Binance_client.get_orderbook_ticker(symbol = Bi_watch_list[j])\n Huo = Huobi_market.get_batch_merged(Huo_watch_list[j])\n Bi_bid = float(Bi[\"bidPrice\"])\n Huo_bid = Huo[\"ticks\"][0][\"bid\"][0]\n Bi_ask = float(Bi[\"askPrice\"])\n Huo_ask = Huo[\"ticks\"][0][\"ask\"][0]\n # if (Bi_bid - Huo_ask > 0):\n # print(\"Buy Huo, sell Bi: \", \"sell\", Bi_bid, \"buy\", Huo_ask)\n # if (Huo_bid - Bi_ask > 0):\n # print(\"Sell Huo, buy Bi: \", \"buy\", Bi_bid, \"sell\", Huo_ask)\n Huo_charge = 0.000\n Bi_charge = 0.000\n if (Bi_bid - Huo_ask > Bi_bid * Bi_charge + Huo_ask * Huo_charge):\n count += 1\n print(\">>\", i, \"<<\", \"Buy Huo, sell Bi: \", \"sell\", Bi_bid, \"buy\", Huo_ask, \"profit\", profit, \"volume\", float(Bi[\"bidQty\"]) * Bi_bid, Huo[\"ticks\"][0][\"ask\"][1])\n profit += Bi_bid - Huo_ask - Bi_bid * Bi_charge - Huo_ask * Huo_charge\n # res.append([\">> Buy Huo, sell Bi: \", \"sell\", Bi_bid, \"buy\", Huo_ask, \"profit\", profit])\n if (Huo_bid - Bi_ask > Bi_ask * Bi_charge + Huo_bid * Huo_charge):\n count += 1\n print(\">>\", i , \"<<\", \"Sell Huo, buy Bi: \", \"buy\", Bi_bid, \"sell\", Huo_ask, \"profit\", profit, \"volume\", float(Bi[\"askQty\"]) * Bi_ask, Huo[\"ticks\"][0][\"bid\"][1])\n profit += Huo_bid - Bi_ask - Bi_ask * Bi_charge - Huo_bid * Huo_charge \n # res.append([\">> Sell Huo, buy Bi: \", \"buy\", Bi_bid, \"sell\", Huo_ask, \"profit\", profit])\n # os.system(\"clear\")\n # print(\"Arbitrage count:\", count, \"profit:\", profit)\n", "repo_name": "LagerWu0501/Quantitative_Trading", "sub_path": "Arbitrage/test/Arbitrage.py", "file_name": "Arbitrage.py", "file_ext": "py", "file_size_in_byte": 3425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "warnings.simplefilter", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.options", "line_number": 10, "usage_type": "attribute"}, {"api_name": "binance.client.Client", "line_number": 28, "usage_type": "call"}, {"api_name": "huobi.linear_swap.rest.account.Account", "line_number": 29, "usage_type": "call"}, {"api_name": "huobi.linear_swap.rest.market.Market", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "5940222489", "text": "from collections import deque\nimport sys\nsys.setrecursionlimit(10**7)\nmove = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n\nt = int(input())\n\nposition = []\n\nd = deque()\nanswer = [0]\nfor _ in range(t):\n in_x, in_y = map(int, input().split())\n d.append([in_x, in_y])\n\n\ndef dfs(x, y, visit, cnt):\n if [x, y] == d[0]:\n return cnt\n\n for mx, my in move:\n nx, ny = x + mx, y + my\n\n if [nx, ny] not in visit and 0 <= nx < 1000000 and 0 <= ny < 1000000:\n visit.append([nx, ny])\n dfs(nx, ny, visit, cnt + 1)\n\n\nwhile d:\n start_x, start_y = d.popleft()\n dfs(start_x, start_y, [[start_x, start_y]], 0)\n", "repo_name": "Jeonghoon2/Coding-once-a-day", "sub_path": "파이썬 (Python)/백준/Platinum.4/체커.py", "file_name": "체커.py", "file_ext": "py", "file_size_in_byte": 639, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 3, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "8127211400", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport Activity.models\nimport django.utils.timezone\nimport custom.models_template\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Activity',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, verbose_name='\\u6d3b\\u52a8\\u540d\\u79f0')),\n ('description', models.TextField(verbose_name='\\u6d3b\\u52a8\\u63cf\\u8ff0')),\n ('max_attend', models.PositiveIntegerField(default=0, verbose_name='\\u4eba\\u6570\\u4e0a\\u9650')),\n ('start_at', models.DateTimeField(verbose_name='\\u5f00\\u59cb\\u65f6\\u95f4')),\n ('end_at', models.DateTimeField(verbose_name='\\u7ed3\\u675f\\u65f6\\u95f4')),\n ('poster', models.ImageField(upload_to=Activity.models.activity_poster, verbose_name='\\u6d3b\\u52a8\\u6d77\\u62a5')),\n ('closed', models.BooleanField(default=False, verbose_name=b'\\xe6\\xb4\\xbb\\xe5\\x8a\\xa8\\xe6\\x8a\\xa5\\xe5\\x90\\x8d\\xe6\\x98\\xaf\\xe5\\x90\\xa6\\xe5\\x85\\xb3\\xe9\\x97\\xad')),\n ('closed_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'\\xe5\\x85\\xb3\\xe9\\x97\\xad\\xe6\\x8a\\xa5\\xe5\\x90\\x8d\\xe7\\x9a\\x84\\xe6\\x97\\xb6\\xe9\\x97\\xb4')),\n ('like_num', models.IntegerField(default=0)),\n ('comment_num', models.IntegerField(default=0)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ('-created_at',),\n 'verbose_name': '\\u6d3b\\u52a8',\n 'verbose_name_plural': '\\u6d3b\\u52a8',\n },\n ),\n migrations.CreateModel(\n name='ActivityComment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('image', models.ImageField(upload_to=custom.models_template.comment_image_path, verbose_name=b'\\xe8\\xaf\\x84\\xe8\\xae\\xba\\xe5\\x9b\\xbe\\xe7\\x89\\x87')),\n ('content', models.CharField(max_length=255, verbose_name=b'\\xe8\\xaf\\x84\\xe8\\xae\\xba\\xe6\\xad\\xa3\\xe6\\x96\\x87')),\n ],\n options={\n 'verbose_name': '\\u6d3b\\u52a8\\u8bc4\\u8bba',\n 'verbose_name_plural': '\\u6d3b\\u52a8\\u8bc4\\u8bba\\u5185\\u5bb9',\n },\n ),\n migrations.CreateModel(\n name='ActivityInvitation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('responsed', models.BooleanField(default=False)),\n ('agree', models.BooleanField(default=False)),\n ],\n options={\n 'ordering': ('-created_at',),\n 'verbose_name': '\\u6d3b\\u52a8\\u9080\\u8bf7',\n 'verbose_name_plural': '\\u6d3b\\u52a8\\u9080\\u8bf7',\n },\n ),\n migrations.CreateModel(\n name='ActivityJoin',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('approved', models.BooleanField(default=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ('-created_at',),\n },\n ),\n migrations.CreateModel(\n name='ActivityLikeThrough',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('activity', models.ForeignKey(verbose_name=b'\\xe6\\xb4\\xbb\\xe5\\x8a\\xa8', to='Activity.Activity')),\n ],\n options={\n 'ordering': ('-created_at',),\n },\n ),\n ]\n", "repo_name": "huangy10/SportscarStyle", "sub_path": "Activity/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 4253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "Activity.models.models", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Activity.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.utils", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "custom.models_template.models_template", "line_number": 43, "usage_type": "attribute"}, {"api_name": "custom.models_template", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "30024311654", "text": "from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\n\ndef create_app() -> FastAPI:\n\n app = FastAPI(\n title=\"User Service API\",\n )\n\n app.add_middleware(\n CORSMiddleware,\n allow_headers=[\"*\"],\n )\n\n # include all routes here\n from app.v1 import v1_router\n\n app.include_router(v1_router)\n\n return app\n", "repo_name": "rajatgupta310198/fastapi-backend-boilerplate", "sub_path": "app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "fastapi.FastAPI", "line_number": 7, "usage_type": "call"}, {"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 12, "usage_type": "argument"}, {"api_name": "app.v1.include_router", "line_number": 19, "usage_type": "call"}, {"api_name": "app.v1.v1_router", "line_number": 19, "usage_type": "argument"}, {"api_name": "app.v1", "line_number": 19, "usage_type": "name"}, {"api_name": "app.v1", "line_number": 21, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "21601504620", "text": "import pytest\nfrom fastapi.testclient import TestClient\nfrom forest_lite.server import main, config\n\n\nclient = TestClient(main.app)\n\n\ndef override(fn):\n main.app.dependency_overrides[config.get_settings] = fn\n\n\ndef get_settings(data):\n def fn():\n return config.Settings(**data)\n return fn\n\n\ndef test_viewport_endpoint_default():\n override(get_settings({}))\n response = client.get(\"/viewport\")\n result = response.json()\n expect = {\n \"longitude\": [-180, 180],\n \"latitude\": [-85, 85]\n }\n assert result == expect\n\n\ndef test_viewport_endpoint_from_config():\n data = {\n \"viewport\": {\n \"longitude\": [10, 20],\n \"latitude\": [30, 40]\n }\n }\n\n # Patch main.get_settings\n override(get_settings(data))\n\n response = client.get(\"/viewport\")\n result = response.json()\n expect = {\n \"longitude\": [10., 20.],\n \"latitude\": [30., 40.]\n }\n assert result == expect\n", "repo_name": "MetOffice/forest-lite", "sub_path": "forest_lite/test/test_viewport.py", "file_name": "test_viewport.py", "file_ext": "py", "file_size_in_byte": 966, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "21", "api": [{"api_name": "fastapi.testclient.TestClient", "line_number": 6, "usage_type": "call"}, {"api_name": "forest_lite.server.main.app", "line_number": 6, "usage_type": "attribute"}, {"api_name": "forest_lite.server.main", "line_number": 6, "usage_type": "name"}, {"api_name": "forest_lite.server.main.app", "line_number": 10, "usage_type": "attribute"}, {"api_name": "forest_lite.server.main", "line_number": 10, "usage_type": "name"}, {"api_name": "forest_lite.server.config.get_settings", "line_number": 10, "usage_type": "attribute"}, {"api_name": "forest_lite.server.config", "line_number": 10, "usage_type": "name"}, {"api_name": "forest_lite.server.config.Settings", "line_number": 15, "usage_type": "call"}, {"api_name": "forest_lite.server.config", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "23003882318", "text": "import sqlite3\n\n# Establish a connection to the SQLite database\nconn = sqlite3.connect('../db.sqlite3')\ncursor = conn.cursor()\n\n# SQL statement to insert a record\nsql = \"INSERT INTO dictionary_entry (word, definition, origin_language) VALUES (?, ?, ?)\"\nrecord = ('naija', 'Slang for Nigeria', 'Pigeon') # Replace 'value1' and 'value2' with your actual data\n\n# Execute the SQL statement\ncursor.execute(sql, record)\n\n# Commit the changes and close the connection\nconn.commit()\nconn.close()\n", "repo_name": "yeevon/Naija-Dictionary", "sub_path": "myproject/sqlscripts/insert_dict_words.py", "file_name": "insert_dict_words.py", "file_ext": "py", "file_size_in_byte": 489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "2238766257", "text": "import warnings\nfrom unittest import TestCase\nfrom dotenv import load_dotenv\nimport pandas as pd\nfrom hana2py import utilities as u\n\nload_dotenv()\n\nIRIS = pd.read_csv('hana2py/data/iris.csv')\n\nclass Test(TestCase):\n \"\"\"\n Test utilities module\n \"\"\"\n\n def test_millify_float_return_string(self):\n num = IRIS.sepal_length.tolist()[0]*1e3\n millify_num = u.millify(num)\n print(millify_num)\n assert millify_num == '5k'\n\n def test_get_hana_connection_details(self):\n user, pwd, host, port = u.get_hana_connection_details('user',\n 'password',\n 'host',\n 'port')\n assert user == 'my_user'\n assert pwd == 'mypwd'\n assert host == '192.168.1.1'\n assert port == '8080'\n\n def test_get_sqlserver_connection_details(self):\n server = u.get_sqlserver_connection_details('server')\n assert server == 'my_server'\n\n def test_get_sqlserver_none_detail(self):\n\n with warnings.catch_warnings(record=True) as warn:\n warnings.simplefilter(\"always\")\n u.get_sqlserver_connection_details('incorrect_key')\n assert len(warn) == 1\n assert issubclass(warn[-1].category, UserWarning)\n assert \".env\" in str(warn[-1].message)\n\n def test_sqlserver_engine_exception(self):\n server = None\n with self.assertRaises(Exception):\n u.create_sqlserver_engine(server, True)\n", "repo_name": "melio-consulting/hana2py", "sub_path": "hana2py/tests/utilities_tests.py", "file_name": "utilities_tests.py", "file_ext": "py", "file_size_in_byte": 1596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "hana2py.utilities.millify", "line_number": 18, "usage_type": "call"}, {"api_name": "hana2py.utilities", "line_number": 18, "usage_type": "name"}, {"api_name": "hana2py.utilities.get_hana_connection_details", "line_number": 23, "usage_type": "call"}, {"api_name": "hana2py.utilities", "line_number": 23, "usage_type": "name"}, {"api_name": "hana2py.utilities.get_sqlserver_connection_details", "line_number": 33, "usage_type": "call"}, {"api_name": "hana2py.utilities", "line_number": 33, "usage_type": "name"}, {"api_name": "warnings.catch_warnings", "line_number": 38, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 39, "usage_type": "call"}, {"api_name": "hana2py.utilities.get_sqlserver_connection_details", "line_number": 40, "usage_type": "call"}, {"api_name": "hana2py.utilities", "line_number": 40, "usage_type": "name"}, {"api_name": "hana2py.utilities.create_sqlserver_engine", "line_number": 48, "usage_type": "call"}, {"api_name": "hana2py.utilities", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "71399036852", "text": "import numpy as np\r\nimport scipy\r\nfrom scipy.sparse import coo_matrix,csc_matrix,csr_matrix\r\nfrom scipy.sparse.linalg import splu\r\nimport scipy.sparse as sparse\r\nimport matplotlib.pyplot as plt\r\n\r\nclass arpls():\r\n def __init__(self, data,lamda,ratio):\r\n self.data = data\r\n self.lamda = lamda\r\n self.ratio = ratio\r\n def speyediff(self, format='csc'):\r\n N=self.data.shape[0]\r\n shape = (N - 2, N)\r\n diagonals = np.zeros(5)\r\n diagonals[2] = 1.\r\n for i in range(2):\r\n diff = diagonals[:-1] - diagonals[1:]\r\n diagonals = diff\r\n offsets = np.arange(3)\r\n spmat = sparse.diags(diagonals, offsets, shape, format=format)\r\n return spmat\r\n def fit(self):\r\n N = self.data.shape[0]\r\n D = self.speyediff()\r\n H = D.T.dot(D) * self.lamda\r\n w = np.ones(N, dtype=float)\r\n while True:\r\n col = np.arange(N)\r\n row = np.arange(N)\r\n W = csc_matrix((w, (row, col)), shape=(N, N))\r\n sol = W + H\r\n z = splu(sol).solve(w * self.data)\r\n d = self.data - z\r\n dn = d[d < 0]\r\n m = np.mean(dn)\r\n s = np.std(dn)\r\n wt = 1 / (1 + np.exp((2 * (d - (2 * s - m)) / s)))\r\n if np.linalg.norm(w - wt) / np.linalg.norm(w) < self.ratio: break\r\n w = wt\r\n return z\r\n\r\ndata=np.loadtxt('system/data/pyexample/Data.txt')\r\nlamda=pow(15,10)\r\nratio=0.01\r\nz=arpls(data,lamda,ratio).fit()\r\n\r\nplt.close('all');\r\nprint (\"\\n\"*80)\r\n\r\nx = np.linspace(10.7,-1,32768)\r\nplt.gca().invert_xaxis() \r\nplt.plot(x,data,color='b')\r\nplt.plot(x,z,color='r')\r\nplt.xlabel(\"ppm\")\r\nplt.show()\r\n", "repo_name": "qonenmr/spinspj", "sub_path": "dataprocess/baseline/arpls.py", "file_name": "arpls.py", "file_ext": "py", "file_size_in_byte": 1699, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.sparse.diags", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.sparse.csc_matrix", "line_number": 32, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.splu", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "33994501573", "text": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torch_geometric.data import Data, Batch\r\nfrom torch_geometric.nn import GCNConv\r\nfrom torch_geometric.nn import SAGEConv\r\nimport networkx as nx\r\nimport random\r\nimport numpy as np\r\nfrom torch_geometric.utils import add_self_loops\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import product\r\ndef create_random_graph(num_nodes, prob):\r\n num_nodes = num_nodes\r\n G = nx.fast_gnp_random_graph(num_nodes, prob)\r\n return G\r\ndef solve_graph(G):\r\n node_colors, _ = welsh_powell_coloring(G)\r\n color_list = [node_colors.get(node, -1) for node in G.nodes()]\r\n return G, color_list\r\ndef welsh_powell_coloring(G):\r\n vertices = sorted(list(G.nodes()), key=lambda x: G.degree(x), reverse=True)\r\n colors = {}\r\n colors[vertices[0]] = 1\r\n for vertex in vertices[1:]:\r\n neighbor_colors = [colors.get(neigh) for neigh in G.neighbors(vertex)]\r\n available_colors = set(range(1, len(vertices) + 1)) - set(neighbor_colors)\r\n colors[vertex] = min(available_colors)\r\n num_colors = len(set(colors.values()))\r\n return colors, num_colors\r\ndef generate_dataset(num_iterations):\r\n adj_matrices_list = []\r\n node_feat_matrices_list = []\r\n labels_list = []\r\n for i in range(num_iterations):\r\n G = create_random_graph(15, 0.2)\r\n _, color_list = solve_graph(G)\r\n adj_matrix = nx.to_numpy_array(G)\r\n node_feat_matrix = np.array([G.degree(node) for node in G.nodes()]).reshape(-1, 1)\r\n adj_matrices_list.append(adj_matrix)\r\n node_feat_matrices_list.append(node_feat_matrix)\r\n labels_list.append(color_list)\r\n\r\n return adj_matrices_list, node_feat_matrices_list, labels_list\r\ndef predict_list(model, test_data, labels):\r\n labels = [torch.tensor(label, dtype=torch.long) for label in labels]\r\n model.eval()\r\n predicted_labels = []\r\n with torch.no_grad():\r\n for data in test_data:\r\n out = model(data.x, data.edge_index)\r\n predicted_labels.append(torch.argmax(out, dim=1))\r\n labels_tensor = torch.cat(labels, dim=0) \r\n predicted_labels = torch.cat(predicted_labels)\r\n correct = (predicted_labels == labels_tensor).sum().item()\r\n total = len(labels_tensor)\r\n accuracy = correct / total\r\n return predicted_labels, accuracy\r\ndef predict(model, data):\r\n model.eval()\r\n with torch.no_grad():\r\n out = model(data.x, data.edge_index)\r\n predicted_labels = torch.argmax(out, dim=1)\r\n return predicted_labels\r\ndef convert(test_graph):\r\n # Convert the test graph to PyTorch tensors\r\n test_adj_matrix = nx.to_numpy_array(test_graph)\r\n test_node_feat_matrix = np.array([test_graph.degree(node) for node in test_graph.nodes()]).reshape(-1, 1)\r\n test_adj_tensor = torch.tensor(test_adj_matrix, dtype=torch.float)\r\n test_node_feat_tensor = torch.tensor(test_node_feat_matrix, dtype=torch.float)\r\n\r\n # Create a PyTorch Geometric Data object for the test data\r\n test_edge_index = torch.tensor(add_self_loops(np.argwhere(test_adj_tensor)[:, [0, 1]].T)[0], dtype=torch.long)\r\n test_data = Data(x=test_node_feat_tensor, edge_index=test_edge_index)\r\n return test_data\r\nclass GraphSAGE(torch.nn.Module):\r\n def __init__(self, input_dim, hidden_dims, output_dim):\r\n super(GraphSAGE, self).__init__()\r\n\r\n self.hidden_layers = nn.ModuleList()\r\n self.hidden_layers.append(SAGEConv(input_dim, hidden_dims[0]))\r\n for i in range(1, len(hidden_dims)):\r\n self.hidden_layers.append(SAGEConv(hidden_dims[i-1], hidden_dims[i]))\r\n\r\n self.conv_final = SAGEConv(hidden_dims[-1], output_dim)\r\n\r\n def forward(self, x, edge_index):\r\n for conv in self.hidden_layers:\r\n x = F.relu(conv(x, edge_index))\r\n x = self.conv_final(x, edge_index)\r\n return F.log_softmax(x, dim=1)\r\n def l2_regularization(self):\r\n l2_reg = None\r\n for name, param in self.named_parameters():\r\n if 'weight' in name:\r\n if l2_reg is None:\r\n l2_reg = param.norm(2)\r\n else:\r\n l2_reg = l2_reg + param.norm(2)\r\n return l2_reg\r\nclass GCN(nn.Module):\r\n def __init__(self, input_dim, hidden_dims, output_dim):\r\n super(GCN, self).__init__()\r\n self.hidden_layers = nn.ModuleList()\r\n self.hidden_layers.append(GCNConv(input_dim, hidden_dims[0]))\r\n for i in range(1, len(hidden_dims)):\r\n self.hidden_layers.append(GCNConv(hidden_dims[i-1], hidden_dims[i]))\r\n self.output_layer = GCNConv(hidden_dims[-1], output_dim)\r\n\r\n def forward(self, x, edge_index):\r\n for layer in self.hidden_layers:\r\n x = F.relu(layer(x, edge_index))\r\n x = self.output_layer(x, edge_index)\r\n return F.log_softmax(x, dim=1)\r\n \r\n def l2_regularization(self):\r\n l2_reg = None\r\n for name, param in self.named_parameters():\r\n if 'weight' in name:\r\n if l2_reg is None:\r\n l2_reg = param.norm(2)\r\n else:\r\n l2_reg = l2_reg + param.norm(2)\r\n return l2_reg\r\n\r\ntest_graph = nx.Graph()\r\ntest_graph.add_edges_from([(0,7),(0,8),(0,10),(0,13),(1,6),(1,8),(1,11),(1,14),(2,6),(2,13),(3,5),(14,3),(8,5),(6,8),(7,9)\r\n,(7,13),(8,10),(8,14),(9,10),(11,12),(10,15),(5,14)])\r\ntest_data, test_labels = solve_graph(test_graph)\r\ntest_data = convert(test_data)\r\nmodel = GraphSAGE(1, [32, 64, 128], 8) # Create an instance of the model\r\nmodel.load_state_dict(torch.load('C:\\\\Users\\\\dell\\\\bb\\\\GraphSage\\\\GraphSage100000.pth')) # Load the saved parameters\r\npredictions= predict(model, test_data)\r\npos=nx.spring_layout(test_graph)\r\nplt.figure(num=\"Input graph\")\r\nnx.draw(test_graph, pos=pos, with_labels=True)\r\nplt.show()\r\nplt.figure(num=\"Welsh Powell output graph\")\r\nnx.draw(test_graph, pos=pos, node_color=test_labels, with_labels=True)\r\nplt.show()\r\nplt.figure(num=\"Machine learning output graph\")\r\nnx.draw(test_graph, pos=pos, node_color=predictions, with_labels=True)\r\nplt.show()\r\n#_______________________________________________________________________________________#\r\ntest_graph = nx.Graph()\r\ntest_graph.add_edges_from([(0,7),(0,11),(0,13),(0,12),(2,1),(2,13),(2,11),(4,11),(4,14),(5,8),(5,12),(6,8),(6,12),(6,13),(10,6),(8,10),(11,14),(12,13),(13,14)])\r\ntest_data, test_labels = solve_graph(test_graph)\r\ntest_data = convert(test_data)\r\nmodel = GCN(1, [16, 32, 64], 8) # Create an instance of the model\r\nmodel.load_state_dict(torch.load('C:\\\\Users\\\\dell\\\\bb\\\\GCN\\\\GCN100001.pth')) # Load the saved parameters\r\npredictions= predict(model, test_data)\r\npos=nx.spring_layout(test_graph)\r\nplt.figure(num=\"Input graph\")\r\nnx.draw(test_graph, pos=pos, with_labels=True)\r\nplt.show()\r\nplt.figure(num=\"Welsh Powell output graph\")\r\nnx.draw(test_graph, pos=pos, node_color=test_labels, with_labels=True)\r\nplt.show()\r\nplt.figure(num=\"Machine learning output graph\")\r\nnx.draw(test_graph, pos=pos, node_color=predictions, with_labels=True)\r\nplt.show()", "repo_name": "Ahmed-Elkallawy/ML_Graph_Coloring", "sub_path": "Testing.py", "file_name": "Testing.py", "file_ext": "py", "file_size_in_byte": 7097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "networkx.fast_gnp_random_graph", "line_number": 16, "usage_type": "call"}, {"api_name": "networkx.to_numpy_array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 64, "usage_type": "call"}, {"api_name": "networkx.to_numpy_array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch_geometric.utils.add_self_loops", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch_geometric.data.Data", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn.ModuleList", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch_geometric.nn.SAGEConv", "line_number": 82, "usage_type": "call"}, {"api_name": "torch_geometric.nn.SAGEConv", "line_number": 84, "usage_type": "call"}, {"api_name": "torch_geometric.nn.SAGEConv", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 102, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 106, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 108, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 115, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 133, "usage_type": "call"}, {"api_name": "networkx.spring_layout", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 151, "usage_type": "call"}, {"api_name": "networkx.spring_layout", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "4113472987", "text": "from O365 import Account, FileSystemTokenBackend\nfrom O365 import MSGraphProtocol\nimport os\nimport json\n\nprotocol = MSGraphProtocol(api_version='beta')\ncredentials = ('7b9c81d7-2c01-4d48-86fc-9a7cd9700b85', 'cBPj06Vjh_[HNkntI-e6pEf.h1JKn12H')\n\ntoken_backend = FileSystemTokenBackend(token_path='my_folder', token_filename='my_token.txt')\naccount = Account(credentials, protocol=protocol, token_backend=token_backend)\naccount.authenticate(scopes=['basic', 'message_all', 'onedrive_all', 'address_book_all'])\n\nmailbox = account.mailbox()\ninbox = mailbox.inbox_folder()\n\npath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data_mailbox.json')\ndata = {}\nfor item in inbox.get_messages(limit=300):\n message = {\n 'subject': item.subject,\n 'from': str(item._Message__sender),\n 'datetime': item._Message__received.strftime('%Y-%m-%dT%H:%M:%S.%f%z'),\n 'attachments': item.has_attachments,\n 'draft': item._Message__is_draft,\n 'read': item._Message__is_read,\n 'body': item._Message__body,\n 'folder': item.folder_id\n }\n print(f\"Le message {message['subject']} est sauvegardé avec succès !\")\n data[item.object_id] = message\nwith open(path, 'w') as f:\n json.dump(data, f, indent=4)\n", "repo_name": "TinouWild/PythonBasique", "sub_path": "officebackup36/mails.py", "file_name": "mails.py", "file_ext": "py", "file_size_in_byte": 1254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "O365.MSGraphProtocol", "line_number": 6, "usage_type": "call"}, {"api_name": "O365.FileSystemTokenBackend", "line_number": 9, "usage_type": "call"}, {"api_name": "O365.Account", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "25321228545", "text": "import os\nimport glob\nimport json\nimport re\nimport logging\nimport requests\nimport settings\nimport keys\nfrom shutil import copyfileobj\nfrom common import cached_property\nfrom common import url2slug\nfrom pprint import pprint\n\nRE_FNAME = re.compile(r\"(?P<id>[0-9]+)_(?P<slug>.*).epub\")\n\n\nclass Wallabag(object):\n def __init__(self):\n self.access_token = \"\"\n self.auth = {}\n\n @property\n def tdir(self):\n return settings.paths.bookmarks\n\n @cached_property\n def existing(self):\n return [\n os.path.basename(fpath)\n for fpath in glob.glob(os.path.join(self.tdir, \"*\"))\n ]\n\n def archive_batch(self, entries):\n for entry in entries[\"_embedded\"][\"items\"]:\n ename = url2slug(entry[\"url\"])\n eid = entry[\"id\"]\n fname = f\"{ename}.epub\"\n target = os.path.join(self.tdir, fname)\n\n if fname in self.existing:\n logging.debug(\"skipping existing entry %s\", entry[\"id\"])\n else:\n with requests.get(\n f\"{keys.wallabag.url}/api/entries/{eid}/export.epub\",\n stream=True,\n headers=self.auth,\n ) as r:\n logging.info(\"saving %s to %s\", eid, target)\n with open(target, \"wb\") as f:\n copyfileobj(r.raw, f)\n\n def run(self):\n tparams = {\n \"grant_type\": \"password\",\n \"client_id\": keys.wallabag.client_id,\n \"client_secret\": keys.wallabag.client_secret,\n \"username\": keys.wallabag.username,\n \"password\": keys.wallabag.password,\n }\n token = requests.post(\n f\"{keys.wallabag.url}/oauth/v2/token\", data=tparams\n )\n try:\n tdata = token.json()\n if \"access_token\" not in tdata:\n logging.error(\n \"missing access token from wallabag response\"\n )\n return\n except Exception as e:\n logging.error(\"failed to get token from wallabag: %s\", e)\n return\n\n self.access_token = tdata[\"access_token\"]\n self.auth = {\"Authorization\": f\"Bearer {self.access_token}\"}\n\n r = requests.get(\n f\"{keys.wallabag.url}/api/entries\", headers=self.auth\n )\n try:\n entries = r.json()\n except Exception as e:\n logging.error(\n \"failed to get first page from wallabag: %s\", e\n )\n return\n\n batch = entries[\"limit\"]\n pages = entries[\"pages\"]\n page = entries[\"page\"]\n self.archive_batch(entries)\n while page < pages:\n page = page + 1\n paged = {\"perPage\": batch, \"page\": page}\n r = requests.get(\n f\"{keys.wallabag.url}/api/entries\",\n params=paged,\n headers=self.auth,\n )\n entries = r.json()\n self.archive_batch(entries)\n\n\nif __name__ == \"__main__\":\n wbag = Wallabag()\n wbag.run()\n", "repo_name": "petermolnar/silo.pasta", "sub_path": "Wallabag.py", "file_name": "Wallabag.py", "file_ext": "py", "file_size_in_byte": 3093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "21", "api": [{"api_name": "re.compile", "line_number": 14, "usage_type": "call"}, {"api_name": "settings.paths", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "common.cached_property", "line_number": 26, "usage_type": "name"}, {"api_name": "common.url2slug", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "keys.wallabag", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 48, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 50, "usage_type": "call"}, {"api_name": "keys.wallabag", "line_number": 55, "usage_type": "attribute"}, {"api_name": "keys.wallabag", "line_number": 56, "usage_type": "attribute"}, {"api_name": "keys.wallabag", "line_number": 57, "usage_type": "attribute"}, {"api_name": "keys.wallabag", "line_number": 58, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 60, "usage_type": "call"}, {"api_name": "keys.wallabag", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 77, "usage_type": "call"}, {"api_name": "keys.wallabag", "line_number": 78, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 83, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 95, "usage_type": "call"}, {"api_name": "keys.wallabag", "line_number": 96, "usage_type": "attribute"}]} +{"seq_id": "41164370909", "text": "from __future__ import division\nimport argparse\n\nfrom appscale.common import appscale_info\nfrom cassandra.cluster import Cluster\nfrom random import choice\nfrom random import shuffle\nfrom subprocess import check_output\nfrom ..cassandra_env.cassandra_interface import KEYSPACE\nfrom ..cassandra_env.cassandra_interface import NODE_TOOL\nfrom ..cassandra_env.cassandra_interface import ThriftColumn\nfrom ..cassandra_env.constants import LB_POLICY\nfrom ..cassandra_env.retry_policies import BASIC_RETRIES\nfrom ..dbconstants import APP_ENTITY_TABLE\nfrom ..dbconstants import APP_ENTITY_SCHEMA\nfrom ..dbconstants import KEY_DELIMITER\nfrom ..dbconstants import KIND_SEPARATOR\n\n\nclass NoSampleKeys(Exception):\n \"\"\" Indicates that nodetool did not provide any key samples. \"\"\"\n pass\n\n\ndef is_entity(key):\n \"\"\" Determines whether a given string is an entity key.\n\n Args:\n key: A string containing a key from 'rangekeysample'.\n Returns:\n A boolean indicating whether or not the string is an entity key.\n \"\"\"\n key_parts = key.split(KEY_DELIMITER)\n if len(key_parts) != 3:\n return False\n\n last_part = key_parts[-1]\n if not last_part.endswith(KIND_SEPARATOR):\n return False\n\n last_part = last_part[:-len(KIND_SEPARATOR)]\n if KIND_SEPARATOR in last_part:\n return False\n\n return ':' in last_part\n\n\ndef get_kind_averages(keys):\n \"\"\" Get an average size for each kind.\n\n Args:\n keys: A list of dictionaries containing keys.\n Returns:\n A dictionary listing the average size of each kind.\n \"\"\"\n hosts = appscale_info.get_db_ips()\n cluster = Cluster(hosts, default_retry_policy=BASIC_RETRIES,\n load_balancing_policy=LB_POLICY)\n session = cluster.connect(KEYSPACE)\n\n entities_by_kind = {}\n for key_dict in keys:\n key = key_dict['key']\n if is_entity(key):\n key_parts = key.split(KEY_DELIMITER)\n kind = key_parts[2].split(':')[0]\n kind_id = KEY_DELIMITER.join([key_parts[0], key_parts[1], kind])\n if kind_id not in entities_by_kind:\n entities_by_kind[kind_id] = {'keys': [], 'size': 0, 'fetched': 0}\n entities_by_kind[kind_id]['keys'].append(key)\n\n for kind_id, kind in entities_by_kind.iteritems():\n shuffle(kind['keys'])\n\n if not entities_by_kind:\n return {}\n\n futures = []\n for _ in range(50):\n kind = choice(entities_by_kind.keys())\n try:\n key = entities_by_kind[kind]['keys'].pop()\n except IndexError:\n continue\n\n select = \"\"\"\n SELECT {value} FROM \"{table}\"\n WHERE {key}=%(key)s AND {column}=%(column)s\n \"\"\".format(value=ThriftColumn.VALUE, table=APP_ENTITY_TABLE,\n key=ThriftColumn.KEY, column=ThriftColumn.COLUMN_NAME)\n parameters = {'key': bytearray(key), 'column': APP_ENTITY_SCHEMA[0]}\n future = session.execute_async(select, parameters)\n futures.append({'future': future, 'kind': kind})\n\n for future_dict in futures:\n future = future_dict['future']\n kind = future_dict['kind']\n try:\n entity = future.result()[0].value\n except IndexError:\n continue\n\n entities_by_kind[kind]['size'] += len(entity)\n entities_by_kind[kind]['fetched'] += 1\n\n kind_averages = {}\n for kind_id, kind in entities_by_kind.iteritems():\n try:\n kind_averages[kind_id] = int(kind['size'] / kind['fetched'])\n except ZeroDivisionError:\n kind_averages[kind_id] = 0\n\n return kind_averages\n\n\ndef get_sample():\n \"\"\" Gets a sorted sample of keys on this node.\n\n Returns:\n A list of dictionaries containing keys.\n \"\"\"\n sample_output = check_output([NODE_TOOL, 'rangekeysample'])\n keys = [{'key': key.strip().decode('hex'), 'size': 0}\n for key in sample_output.splitlines()[1:]]\n sorted(keys, key=lambda key: key['key'])\n return keys\n\n\ndef main():\n \"\"\" Run appscale-get-token script. \"\"\"\n parser = argparse.ArgumentParser(\n description='Retrieve a Cassandra token owned by this node')\n parser.add_argument('percentile', type=int)\n args = parser.parse_args()\n\n keys = get_sample()\n if not keys:\n raise NoSampleKeys('There are no key samples available on this machine.')\n\n kind_averages = get_kind_averages(keys)\n\n for key_dict in keys:\n key = key_dict['key']\n key_dict['size'] += len(key)\n if not is_entity(key):\n continue\n\n key_parts = key.split(KEY_DELIMITER)\n kind = key_parts[2].split(':')[0]\n kind_id = KEY_DELIMITER.join([key_parts[0], key_parts[1], kind])\n if kind_id in kind_averages:\n key_dict['size'] += kind_averages[kind_id]\n\n total_size = sum(key['size'] for key in keys)\n desired_size = int(total_size * (args.percentile / 100))\n\n size_seen = 0\n for key in keys:\n size_seen += key['size']\n if size_seen >= desired_size:\n print(key['key'].encode('hex'))\n return\n\n # If we still haven't reached the desired size, just select the last key.\n print(keys[-1]['key'].encode('hex'))\n", "repo_name": "HafeezRai/appscale", "sub_path": "AppDB/appscale/datastore/cassandra_env/get_token.py", "file_name": "get_token.py", "file_ext": "py", "file_size_in_byte": 4836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dbconstants.KEY_DELIMITER", "line_number": 33, "usage_type": "argument"}, {"api_name": "dbconstants.KIND_SEPARATOR", "line_number": 38, "usage_type": "argument"}, {"api_name": "dbconstants.KIND_SEPARATOR", "line_number": 41, "usage_type": "argument"}, {"api_name": "dbconstants.KIND_SEPARATOR", "line_number": 42, "usage_type": "name"}, {"api_name": "appscale.common.appscale_info.get_db_ips", "line_number": 56, "usage_type": "call"}, {"api_name": "appscale.common.appscale_info", "line_number": 56, "usage_type": "name"}, {"api_name": "cassandra.cluster.Cluster", "line_number": 57, "usage_type": "call"}, {"api_name": "cassandra_env.retry_policies.BASIC_RETRIES", "line_number": 57, "usage_type": "name"}, {"api_name": "cassandra_env.constants.LB_POLICY", "line_number": 58, "usage_type": "name"}, {"api_name": "cassandra_env.cassandra_interface.KEYSPACE", "line_number": 59, "usage_type": "argument"}, {"api_name": "dbconstants.KEY_DELIMITER", "line_number": 65, "usage_type": "argument"}, {"api_name": "dbconstants.KEY_DELIMITER.join", "line_number": 67, "usage_type": "call"}, {"api_name": "dbconstants.KEY_DELIMITER", "line_number": 67, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 73, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 80, "usage_type": "call"}, {"api_name": "cassandra_env.cassandra_interface.ThriftColumn.VALUE", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cassandra_env.cassandra_interface.ThriftColumn", "line_number": 89, "usage_type": "name"}, {"api_name": "dbconstants.APP_ENTITY_TABLE", "line_number": 89, "usage_type": "name"}, {"api_name": "cassandra_env.cassandra_interface.ThriftColumn.KEY", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cassandra_env.cassandra_interface.ThriftColumn", "line_number": 90, "usage_type": "name"}, {"api_name": "cassandra_env.cassandra_interface.ThriftColumn.COLUMN_NAME", "line_number": 90, "usage_type": "attribute"}, {"api_name": "dbconstants.APP_ENTITY_SCHEMA", "line_number": 91, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 122, "usage_type": "call"}, {"api_name": "cassandra_env.cassandra_interface.NODE_TOOL", "line_number": 122, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 131, "usage_type": "call"}, {"api_name": "dbconstants.KEY_DELIMITER", "line_number": 148, "usage_type": "argument"}, {"api_name": "dbconstants.KEY_DELIMITER.join", "line_number": 150, "usage_type": "call"}, {"api_name": "dbconstants.KEY_DELIMITER", "line_number": 150, "usage_type": "name"}]} +{"seq_id": "25591990583", "text": "from anytree import Node, PostOrderIter\n\n\nwith open(\"day7.txt\", 'r') as file:\n data = file.read().splitlines()\n\nroot = Node(\"root\", total_size=0)\ncurrent_dir = root\nfor line in data[1:]:\n if line.startswith(\"$ cd\"):\n if line[5:] == \"..\":\n current_dir = current_dir.parent\n else:\n for child in current_dir.children:\n if child.name == line[5:]:\n current_dir = child\n break\n elif line.startswith(\"dir\"):\n Node(line[4:], parent=current_dir, total_size=0)\n elif not line.isalpha() and line[0] != \"$\":\n file_size, _ = line.split()\n current_dir.total_size += int(file_size)\n\ntotal_p1 = 0\nsmallest_big_dir = 100_000_000\nfor node in PostOrderIter(root):\n for child in node.children:\n node.total_size += child.total_size\n if node.total_size <= 100_000:\n total_p1 += node.total_size\nsize_req = 30000000 - (70000000 - root.total_size)\nfor node in PostOrderIter(root):\n if node.total_size >= size_req and node.total_size < smallest_big_dir:\n smallest_big_dir = node.total_size\nprint(total_p1)\nprint(smallest_big_dir)\n", "repo_name": "herbiebradley/aoc", "sub_path": "2022/day7/day7.py", "file_name": "day7.py", "file_ext": "py", "file_size_in_byte": 1156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "anytree.Node", "line_number": 7, "usage_type": "call"}, {"api_name": "anytree.Node", "line_number": 19, "usage_type": "call"}, {"api_name": "anytree.PostOrderIter", "line_number": 26, "usage_type": "call"}, {"api_name": "anytree.PostOrderIter", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "165863024", "text": "import math\nimport torch\nfrom torch import nn\n\ngNoise = [50, 10] ##mean and stddev\n\n\nclass Generator(nn.Module):\n def __init__(self, device, scale_factor=4):\n super(Generator, self).__init__()\n\n upsample_block_num = int(math.log(scale_factor, 2))\n self.device = device\n \n self.block1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=9, padding=4),\n nn.PReLU()\n )\n self.block2 = ResidualBlock(64)\n self.block3 = ResidualBlock(64)\n self.block4 = ResidualBlock(64)\n self.block5 = ResidualBlock(64)\n self.block6 = ResidualBlock(64)\n self.block7 = ResidualBlock(64)\n self.block8 = ResidualBlock(64)\n self.block9 = ResidualBlock(64)\n \n block10 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]\n block10.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))\n self.block10 = nn.Sequential(*block10)\n\n self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)\n\n def forward(self, x):\n \n # noise_tensor = torch.FloatTensor(x.size()).normal_(gNoise[0], gNoise[1] ).to(self.device)\n # #64*64 plus noise\n # x = torch.add(x, noise_tensor )\n\n block1 = self.block1(x)\n block1 = self.pool(block1)\n #32*32\n block2 = self.block2(block1) \n block3 = self.block3(block2) \n block3 = self.pool(block3)\n #16*16\n block4 = self.block4(block3) \n block5 = self.block5(block4)\n block5 = self.pool(block5)\n #8*8\n block6 = self.block6(block5)\n block7 = self.block7(block6)\n block7 = self.pool(block7)\n #4*4\n block8 = self.block8(block7)\n block9 = self.block9(block8)\n #upsample\n block10 = self.block10(block9)\n\n return block10\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn1 = nn.BatchNorm2d(channels)\n self.relu = nn.ReLU()\n self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn2 = nn.BatchNorm2d(channels)\n\n def forward(self, x):\n residual = self.bn1(x)\n residual = self.relu(residual)\n residual = self.conv1(residual)\n\n residual = self.bn2(residual)\n residual = self.relu(residual)\n residual = self.conv2(residual)\n \n\n return x + residual\n\n\nclass UpsampleBLock(nn.Module):\n def __init__(self, in_channels, up_scale):\n super(UpsampleBLock, self).__init__()\n self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)\n self.pixel_shuffle = nn.PixelShuffle(up_scale)\n self.prelu = nn.PReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pixel_shuffle(x)\n x = self.prelu(x)\n return x\n", "repo_name": "nprithviraj24/face-hallucination", "sub_path": "image-degrade/High2Low/generator.py", "file_name": "generator.py", "file_ext": "py", "file_size_in_byte": 2969, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "math.log", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.PixelShuffle", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "23888303209", "text": "import os, sys, random\nimport numpy as np\nimport pandas as pd\nimport json\nimport cv2\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.transforms import Normalize\n\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\nfrom efficientnet_pytorch import EfficientNet\n\nROOT_DIR = '/media/vtouchinc02/database/RawData/deepfake-32frame/'\nOUTPUT_DIR = '/media/vtouchinc02/database/RawData/deepfake-32frame-csv'\n\ndef get_dataframe(root_dir, folder_name):\n folder_path = os.path.join(root_dir, folder_name)\n files_in_folder = os.listdir(folder_path)\n\n with open(os.path.join(folder_path, 'metadata.json'), 'r') as fp:\n metadata = json.load(fp)\n\n list_image_name = []\n list_original = []\n list_manipulated_ratio = []\n list_img_min = []\n list_img_max = []\n list_diff_min = []\n list_diff_max = []\n for video_name, attributes in tqdm(metadata.items()):\n if attributes['label'] == 'REAL':\n continue\n\n video_name_original = attributes['original']\n\n if not ('face' in attributes):\n continue\n\n for frame_index, detection_results in attributes['face'].items():\n for box_index, box_result in enumerate(detection_results):\n img_name = '%s-%d-%d.png' % (video_name[:-4], int(frame_index), box_index)\n if not (img_name in files_in_folder):\n continue\n\n img_name_original = '%s-%d-%d.png' % (video_name_original[:-4], int(frame_index), box_index)\n if not (img_name_original in files_in_folder):\n continue\n\n manipulated_ratio = box_result['manipulated_ratio']\n img_min = box_result['img_min']\n img_max = box_result['img_max']\n diff_min = box_result['diff_min']\n diff_max = box_result['diff_max']\n\n list_image_name.append('%s/%s' % (folder_name, img_name))\n list_original.append('%s/%s' % (folder_name, img_name_original))\n list_manipulated_ratio.append(manipulated_ratio)\n list_img_min.append(img_min)\n list_img_max.append(img_max)\n list_diff_min.append(diff_min)\n list_diff_max.append(diff_max)\n\n metadata_df = pd.DataFrame({\n 'image_name':list_image_name,\n 'original':list_original,\n 'manipulated_ratio': list_manipulated_ratio,\n 'img_min': list_img_min,\n 'img_max': list_img_max,\n 'diff_min': list_diff_min,\n 'diff_max': list_diff_max\n })\n \n return metadata_df\n\nif __name__ == '__main__':\n if not os.path.isdir(OUTPUT_DIR):\n os.mkdir(OUTPUT_DIR)\n\n list_metadata_df = []\n for i in range(0,5):\n dir_name = 'dfdc_train_part_%d' % i\n print(dir_name)\n if os.path.isfile(os.path.join(os.path.join(ROOT_DIR, dir_name), 'metadata.json')):\n metadata_df = get_dataframe(ROOT_DIR, dir_name)\n metadata_df.to_csv(os.path.join(OUTPUT_DIR, 'metadata_%d.csv' % i))\n list_metadata_df.append(metadata_df)\n\n metadata_df = pd.concat(list_metadata_df)\n metadata_df.to_csv(os.path.join(OUTPUT_DIR, 'metadata.csv'))", "repo_name": "poperson1205/VTouchDFDC", "sub_path": "generate_csv_with_manipulated_ratio.py", "file_name": "generate_csv_with_manipulated_ratio.py", "file_ext": "py", "file_size_in_byte": 3226, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}]} +{"seq_id": "32684834636", "text": "########################################################################\n# File name: test_xso.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# <http://www.gnu.org/licenses/>.\n#\n########################################################################\nimport io\nimport unittest\nimport unittest.mock\n\nimport multidict\n\nimport aioxmpp.xso\nimport aioxmpp.xml\n\nfrom aioxmpp.utils import namespaces\n\nimport aioxmpp.httpupload.xso as httpupload_xso\n\n\nclass TestNamespace(unittest.TestCase):\n def test_namespace(self):\n self.assertEqual(namespaces.xep0363_http_upload,\n \"urn:xmpp:http:upload:0\")\n\n\nclass TestRequest(unittest.TestCase):\n def test_is_xso(self):\n self.assertTrue(issubclass(\n httpupload_xso.Request,\n aioxmpp.xso.XSO,\n ))\n\n def test_tag(self):\n self.assertEqual(httpupload_xso.Request.TAG,\n (namespaces.xep0363_http_upload, \"request\"))\n\n def test_is_iq_payload(self):\n self.assertIn(\n httpupload_xso.Request.TAG,\n aioxmpp.IQ.CHILD_MAP,\n )\n\n def test_filename(self):\n self.assertIsInstance(\n httpupload_xso.Request.filename,\n aioxmpp.xso.Attr,\n )\n self.assertEqual(\n httpupload_xso.Request.filename.tag,\n (None, \"filename\"),\n )\n self.assertIsInstance(\n httpupload_xso.Request.filename.type_,\n aioxmpp.xso.String,\n )\n self.assertIs(httpupload_xso.Request.filename.default,\n aioxmpp.xso.NO_DEFAULT)\n\n def test_size(self):\n self.assertIsInstance(\n httpupload_xso.Request.size,\n aioxmpp.xso.Attr,\n )\n self.assertEqual(\n httpupload_xso.Request.size.tag,\n (None, \"size\"),\n )\n self.assertIsInstance(\n httpupload_xso.Request.size.type_,\n aioxmpp.xso.Integer,\n )\n self.assertIs(httpupload_xso.Request.filename.default,\n aioxmpp.xso.NO_DEFAULT)\n\n def test_content_type(self):\n self.assertIsInstance(\n httpupload_xso.Request.content_type,\n aioxmpp.xso.Attr,\n )\n self.assertEqual(\n httpupload_xso.Request.content_type.tag,\n (None, \"content-type\"),\n )\n self.assertIsInstance(\n httpupload_xso.Request.content_type.type_,\n aioxmpp.xso.String,\n )\n self.assertIs(httpupload_xso.Request.filename.default,\n aioxmpp.xso.NO_DEFAULT)\n\n def test_init_requires_arguments(self):\n with self.assertRaisesRegex(TypeError, r\"argument\"):\n httpupload_xso.Request()\n\n def test_init(self):\n r = httpupload_xso.Request(\n \"filename\",\n 1234,\n \"content type\",\n )\n\n self.assertEqual(r.filename, \"filename\")\n self.assertEqual(r.size, 1234)\n self.assertEqual(r.content_type, \"content type\")\n\n\nclass TestHeader(unittest.TestCase):\n def test_is_xso(self):\n self.assertTrue(issubclass(\n httpupload_xso.Header,\n aioxmpp.xso.XSO,\n ))\n\n def test_tag(self):\n self.assertEqual(httpupload_xso.Header.TAG,\n (namespaces.xep0363_http_upload, \"header\"))\n\n def test_name(self):\n self.assertIsInstance(\n httpupload_xso.Header.name,\n aioxmpp.xso.Attr,\n )\n self.assertEqual(\n httpupload_xso.Header.name.tag,\n (None, \"name\"),\n )\n self.assertIsInstance(\n httpupload_xso.Header.name.type_,\n aioxmpp.xso.String,\n )\n self.assertIs(httpupload_xso.Header.name.default,\n aioxmpp.xso.NO_DEFAULT)\n\n def test_value(self):\n self.assertIsInstance(\n httpupload_xso.Header.value,\n aioxmpp.xso.Text,\n )\n self.assertIsInstance(\n httpupload_xso.Header.name.type_,\n aioxmpp.xso.String,\n )\n\n\nclass TestHeaderType(unittest.TestCase):\n def test_is_element_type(self):\n self.assertTrue(issubclass(\n httpupload_xso.HeaderType,\n aioxmpp.xso.AbstractElementType,\n ))\n\n def test_get_xso_types(self):\n self.assertCountEqual(\n httpupload_xso.HeaderType.get_xso_types(),\n [httpupload_xso.Header]\n )\n\n def test_unpack(self):\n t = httpupload_xso.HeaderType\n el = unittest.mock.Mock(spec=httpupload_xso.Header)\n el.name = unittest.mock.sentinel.name\n el.value = unittest.mock.sentinel.value\n self.assertEqual(\n t.unpack(el),\n (unittest.mock.sentinel.name, unittest.mock.sentinel.value)\n )\n\n def test_pack(self):\n t = httpupload_xso.HeaderType\n with unittest.mock.patch(\"aioxmpp.httpupload.xso.Header\") as Header:\n result = t.pack((unittest.mock.sentinel.name,\n unittest.mock.sentinel.value))\n\n Header.assert_called_once_with()\n self.assertEqual(result, Header())\n\n self.assertEqual(result.name, unittest.mock.sentinel.name)\n self.assertEqual(result.value, unittest.mock.sentinel.value)\n\n\nclass TestPut(unittest.TestCase):\n def test_is_xso(self):\n self.assertTrue(issubclass(\n httpupload_xso.Put,\n aioxmpp.xso.XSO,\n ))\n\n def test_tag(self):\n self.assertEqual(httpupload_xso.Put.TAG,\n (namespaces.xep0363_http_upload, \"put\"))\n\n def test_url(self):\n self.assertIsInstance(\n httpupload_xso.Put.url,\n aioxmpp.xso.Attr,\n )\n self.assertEqual(\n httpupload_xso.Put.url.tag,\n (None, \"url\"),\n )\n self.assertIsInstance(\n httpupload_xso.Put.url.type_,\n aioxmpp.xso.String,\n )\n self.assertIs(httpupload_xso.Put.url.default, aioxmpp.xso.NO_DEFAULT)\n\n def test_headers(self):\n self.assertIsInstance(\n httpupload_xso.Put.headers,\n aioxmpp.xso.ChildValueMultiMap,\n )\n self.assertIs(\n httpupload_xso.Put.headers.type_,\n httpupload_xso.HeaderType\n )\n self.assertIs(\n httpupload_xso.Put.headers.mapping_type,\n multidict.MultiDict,\n )\n\n def test_parses_multiple_headers_correctly(self):\n data = (\n \"<put xmlns='{}' url='foo'>\"\n \"<header name='Authorization'>v1</header>\"\n \"<header name='Expires'>v2</header>\"\n \"<header name='Authorization'>v3</header>\"\n \"</put>\"\n ).format(namespaces.xep0363_http_upload)\n buf = io.BytesIO(data.encode(\"utf-8\"))\n result = aioxmpp.xml.read_single_xso(buf, httpupload_xso.Put)\n\n self.assertCountEqual(\n result.headers.items(),\n [\n (\"Authorization\", \"v1\"),\n (\"Expires\", \"v2\"),\n (\"Authorization\", \"v3\"),\n ]\n )\n\n def test_xso_after_load_keeps_valid_headers_intact(self):\n headers = {\n \"Authorization\": \"xyz\",\n \"Cookie\": \"xyz\",\n \"Expires\": \"xyz\",\n }\n\n p = httpupload_xso.Put()\n p.headers.update(headers)\n p.xso_after_load()\n\n self.assertCountEqual(\n p.headers.items(),\n headers.items(),\n )\n\n def test_xso_after_load_strips_newlines(self):\n headers = {\n \"Authorization\": \"abc\\ndef\",\n }\n\n p = httpupload_xso.Put()\n p.headers.update(headers)\n p.xso_after_load()\n\n self.assertCountEqual(\n p.headers.items(),\n {\n \"Authorization\": \"abcdef\",\n }.items()\n )\n\n def test_xso_after_load_removes_non_whitelisted_headers(self):\n headers = {\n \"foo\": \"bar\"\n }\n\n p = httpupload_xso.Put()\n p.headers.update(headers)\n p.xso_after_load()\n\n self.assertCountEqual(\n p.headers.items(),\n {}.items()\n )\n\n\nclass TestGet(unittest.TestCase):\n def test_is_xso(self):\n self.assertTrue(issubclass(\n httpupload_xso.Get,\n aioxmpp.xso.XSO,\n ))\n\n def test_tag(self):\n self.assertEqual(httpupload_xso.Get.TAG,\n (namespaces.xep0363_http_upload, \"get\"))\n\n def test_url(self):\n self.assertIsInstance(\n httpupload_xso.Get.url,\n aioxmpp.xso.Attr,\n )\n self.assertEqual(\n httpupload_xso.Get.url.tag,\n (None, \"url\"),\n )\n self.assertIsInstance(\n httpupload_xso.Get.url.type_,\n aioxmpp.xso.String,\n )\n self.assertIs(httpupload_xso.Get.url.default, aioxmpp.xso.NO_DEFAULT)\n\n\nclass TestSlot(unittest.TestCase):\n def test_is_xso(self):\n self.assertTrue(issubclass(\n httpupload_xso.Slot,\n aioxmpp.xso.XSO,\n ))\n\n def test_is_iq_payload(self):\n self.assertIn(\n httpupload_xso.Slot.TAG,\n aioxmpp.IQ.CHILD_MAP,\n )\n\n def test_tag(self):\n self.assertEqual(httpupload_xso.Slot.TAG,\n (namespaces.xep0363_http_upload, \"slot\"))\n\n def test_put(self):\n self.assertIsInstance(\n httpupload_xso.Slot.put,\n aioxmpp.xso.Child,\n )\n self.assertCountEqual(\n httpupload_xso.Slot.put._classes,\n [httpupload_xso.Put]\n )\n\n def test_get(self):\n self.assertIsInstance(\n httpupload_xso.Slot.get,\n aioxmpp.xso.Child,\n )\n self.assertCountEqual(\n httpupload_xso.Slot.get._classes,\n [httpupload_xso.Get]\n )\n\n def test_validate_rejects_missing_put(self):\n s = httpupload_xso.Slot()\n s.get = unittest.mock.Mock(spec=httpupload_xso.Get)\n\n with self.assertRaisesRegex(ValueError, r\"missing PUT information\"):\n s.validate()\n\n def test_validate_rejects_missing_get(self):\n s = httpupload_xso.Slot()\n s.put = unittest.mock.Mock(spec=httpupload_xso.Put)\n\n with self.assertRaisesRegex(ValueError, r\"missing GET information\"):\n s.validate()\n\n def test_validate_passes_if_both_are_present(self):\n s = httpupload_xso.Slot()\n s.get = unittest.mock.Mock(spec=httpupload_xso.Get)\n s.put = unittest.mock.Mock(spec=httpupload_xso.Put)\n", "repo_name": "horazont/aioxmpp", "sub_path": "tests/httpupload/test_xso.py", "file_name": "test_xso.py", "file_ext": "py", "file_size_in_byte": 11183, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 215, "dataset": "github-code", "pt": "21", "api": [{"api_name": "unittest.TestCase", "line_number": 36, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces.xep0363_http_upload", "line_number": 38, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 42, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 45, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 45, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 46, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 46, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 50, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 50, "usage_type": "name"}, {"api_name": "aioxmpp.utils.namespaces.xep0363_http_upload", "line_number": 51, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces", "line_number": 51, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 55, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 55, "usage_type": "name"}, {"api_name": "aioxmpp.xso.IQ", "line_number": 56, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 56, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 61, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 61, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 62, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 62, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 65, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 65, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 69, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 69, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 70, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 70, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 72, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 72, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 73, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 73, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 77, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 77, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 78, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 78, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 81, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 81, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 85, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 85, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 86, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 86, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 88, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 88, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 89, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 89, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 93, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 93, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 94, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 94, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 97, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 97, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 101, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 101, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 102, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 102, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 104, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 104, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 105, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 105, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 109, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 109, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Request", "line_number": 112, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 112, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 123, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 126, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 126, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 127, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 127, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 131, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 131, "usage_type": "name"}, {"api_name": "aioxmpp.utils.namespaces.xep0363_http_upload", "line_number": 132, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces", "line_number": 132, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 136, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 136, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 137, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 137, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 140, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 140, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 144, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 144, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 145, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 145, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 147, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 147, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 148, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 148, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 152, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 152, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 153, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 153, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 156, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 156, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 157, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 157, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 161, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.HeaderType", "line_number": 164, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 164, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 165, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 165, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.HeaderType.get_xso_types", "line_number": 170, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso.HeaderType", "line_number": 170, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 170, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 171, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 171, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.HeaderType", "line_number": 175, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 175, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 176, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 176, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Header", "line_number": 176, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 176, "usage_type": "name"}, {"api_name": "unittest.mock", "line_number": 177, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 178, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 181, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.HeaderType", "line_number": 185, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 185, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 186, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 186, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 187, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 188, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 193, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 194, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 197, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 200, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 200, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 201, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 201, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 205, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 205, "usage_type": "name"}, {"api_name": "aioxmpp.utils.namespaces.xep0363_http_upload", "line_number": 206, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces", "line_number": 206, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 210, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 210, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 211, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 211, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 214, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 214, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 218, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 218, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 219, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 219, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 221, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 221, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 221, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 221, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 225, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 225, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 226, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 226, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 229, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 229, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.HeaderType", "line_number": 230, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 230, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 233, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 233, "usage_type": "name"}, {"api_name": "multidict.MultiDict", "line_number": 234, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces.xep0363_http_upload", "line_number": 244, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces", "line_number": 244, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 245, "usage_type": "call"}, {"api_name": "aioxmpp.xso.xml.read_single_xso", "line_number": 246, "usage_type": "call"}, {"api_name": "aioxmpp.xso.xml", "line_number": 246, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 246, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 246, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 246, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 264, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 264, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 278, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 278, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 294, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 294, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 304, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 307, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 307, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 308, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 308, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 312, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 312, "usage_type": "name"}, {"api_name": "aioxmpp.utils.namespaces.xep0363_http_upload", "line_number": 313, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces", "line_number": 313, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 317, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 317, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 318, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 318, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 321, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 321, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 325, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 325, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 326, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 326, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 328, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 328, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 328, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 328, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 331, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 334, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 334, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 335, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 335, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 340, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 340, "usage_type": "name"}, {"api_name": "aioxmpp.xso.IQ", "line_number": 341, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 341, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 345, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 345, "usage_type": "name"}, {"api_name": "aioxmpp.utils.namespaces.xep0363_http_upload", "line_number": 346, "usage_type": "attribute"}, {"api_name": "aioxmpp.utils.namespaces", "line_number": 346, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 350, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 350, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 351, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 351, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 354, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 354, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 355, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 355, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 360, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 360, "usage_type": "name"}, {"api_name": "aioxmpp.xso.xso", "line_number": 361, "usage_type": "attribute"}, {"api_name": "aioxmpp.xso", "line_number": 361, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 364, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 364, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 365, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 365, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 369, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 369, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 370, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 370, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 370, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 370, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 376, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 376, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 377, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 377, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 377, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 377, "usage_type": "name"}, {"api_name": "aioxmpp.httpupload.xso.Slot", "line_number": 383, "usage_type": "call"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 383, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 384, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 384, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Get", "line_number": 384, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 384, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 385, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 385, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso.Put", "line_number": 385, "usage_type": "attribute"}, {"api_name": "aioxmpp.httpupload.xso", "line_number": 385, "usage_type": "name"}]} +{"seq_id": "148394243", "text": "# stock_market/urls.py\n\nfrom django.urls import path\nfrom .views import TransactionCreateView, InventoryListView, AverageBuyPriceView\n\nurlpatterns = [\n path('transactions/', TransactionCreateView.as_view(), name='transaction-create'),\n path('inventory/', InventoryListView.as_view(), name='inventory-list'),\n path('average_buy_price/', AverageBuyPriceView.as_view(), name='average-buy-price'),\n]\n", "repo_name": "Meet26499/Stock-Market", "sub_path": "stock_market/stock_management/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.TransactionCreateView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.TransactionCreateView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.InventoryListView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.InventoryListView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.AverageBuyPriceView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.AverageBuyPriceView", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "7565251403", "text": "# 执行的方式 Python 04_q_learning_for_trading.py > DQN.log\n\nimport warnings\nwarnings.filterwarnings('ignore') # 忽略warning错误信息\n\nfrom pathlib import Path\nfrom time import time\nfrom collections import deque\nfrom random import sample\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport seaborn as sns\n\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import l2\n# from trading_env import as trading_environment\n\nimport gym\nfrom gym.envs.registration import register\n\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# 将时间信息格式化\ndef format_time(t):\n m_, s = divmod(t, 60)\n h, m = divmod(m_, 60)\n return '{:02.0f}:{:02.0f}:{:02.0f}'.format(h, m, s)\n\n# 定义交易天数\ntrading_days = 252\n\n# 注册环境 进口为 trading_env.py 文件中的TradingEnvironment类\nregister(\n id='trading-v0',\n entry_point='trading_env:TradingEnvironment', \n max_episode_steps=trading_days\n)\n\ntrading_environment = gym.make('trading-v0')\ntrading_environment.env.trading_days = trading_days\ntrading_environment.env.trading_cost_bps = 1e-3\ntrading_environment.env.time_cost_bps = 1e-4\ntrading_environment.env.ticker = 'AAPL'\ntrading_environment.seed(42)\n\n# 环境参数\nstate_dim = trading_environment.observation_space.shape[0] \nnum_actions = trading_environment.action_space.n\nmax_episode_steps = trading_environment.spec.max_episode_steps\n\nprint (state_dim) # 观测的维度\nprint (num_actions) # 动作数量\nprint (max_episode_steps) # 最大步数\n\n# 定义交易agent\nclass DDQNAgent:\n def __init__(self, state_dim,\n num_actions,\n learning_rate,\n gamma,\n epsilon_start,\n epsilon_end,\n epsilon_decay_steps,\n epsilon_exponential_decay,\n replay_capacity,\n architecture,\n l2_reg,\n tau,\n batch_size):\n\n self.state_dim = state_dim # 10个数据\n self.num_actions = num_actions # 3个动作: long ,hold, short\n self.experience = deque([], maxlen=replay_capacity) # 用于记录(s, a, s', r, done)\n self.learning_rate = learning_rate\n self.gamma = gamma\n self.architecture = architecture\n self.l2_reg = l2_reg\n\n self.online_network = self.build_model() \n self.target_network = self.build_model(trainable=False)\n self.update_target()\n\n self.epsilon = epsilon_start\n self.epsilon_decay_steps = epsilon_decay_steps\n self.epsilon_decay = (epsilon_start - epsilon_end) / epsilon_decay_steps\n self.epsilon_exponential_decay = epsilon_exponential_decay\n self.epsilon_history = []\n\n self.total_steps = self.train_steps = 0\n self.episodes = self.episode_length = self.train_episodes = 0\n self.steps_per_episode = []\n self.episode_reward = 0\n self.rewards_history = []\n\n self.batch_size = batch_size\n self.tau = tau\n self.losses = []\n self.idx = tf.range(batch_size)\n self.train = True\n\n def build_model(self, trainable=True):\n layers = []\n n = len(self.architecture)\n for i, units in enumerate(self.architecture, 1):\n layers.append(Dense(units=units,\n input_dim=self.state_dim if i == 1 else None,\n activation='relu',\n kernel_regularizer=l2(self.l2_reg),\n name=f'Dense_{i}',\n trainable=trainable))\n layers.append(Dropout(.1))\n layers.append(Dense(units=self.num_actions,\n trainable=trainable,\n name='Output'))\n model = Sequential(layers)\n model.compile(loss='mean_squared_error',\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def update_target(self):\n self.target_network.set_weights(self.online_network.get_weights()) # 将online网络的权重赋值给target网络\n\n def epsilon_greedy_policy(self, state):\n self.total_steps += 1\n if np.random.rand() <= self.epsilon:\n return np.random.choice(self.num_actions) # 以epsilon的概率随机的选择动作\n q = self.online_network.predict(state) # 以1-epsilon的概率选择使 Q(s,a)最大的action\n return np.argmax(q, axis=1).squeeze()\n\n def memorize_transition(self, s, a, r, s_prime, not_done):\n if not_done:\n self.episode_reward += r\n self.episode_length += 1\n else:\n if self.train: # epsilon不断递减\n if self.episodes < self.epsilon_decay_steps:\n self.epsilon -= self.epsilon_decay\n else:\n self.epsilon *= self.epsilon_exponential_decay\n\n self.episodes += 1\n self.rewards_history.append(self.episode_reward)\n self.steps_per_episode.append(self.episode_length)\n self.episode_reward, self.episode_length = 0, 0\n\n self.experience.append((s, a, r, s_prime, not_done)) # 将(s, a, s_prime, not_done) 写入experience\n\n def experience_replay(self):\n if self.batch_size > len(self.experience):\n return\n minibatch = map(np.array, zip(*sample(self.experience, self.batch_size)))\n states, actions, rewards, next_states, not_done = minibatch\n\n next_q_values = self.online_network.predict_on_batch(next_states)\n best_actions = tf.argmax(next_q_values, axis=1)\n\n next_q_values_target = self.target_network.predict_on_batch(next_states)\n target_q_values = tf.gather_nd(next_q_values_target,\n tf.stack((self.idx, tf.cast(best_actions, tf.int32)), axis=1))\n\n targets = rewards + not_done * self.gamma * target_q_values\n\n q_values = self.online_network.predict_on_batch(states)\n q_values[[self.idx, actions]] = targets\n\n loss = self.online_network.train_on_batch(x=states, y=q_values)\n self.losses.append(loss)\n\n if self.total_steps % self.tau == 0:\n self.update_target()\n\n# 定义模型参数与网络结构\ngamma = .99, # discount factor\ntau = 100 # target network update frequency\n\narchitecture = (256, 256) # units per layer\nlearning_rate = 0.0001 # learning rate\nl2_reg = 1e-6 # L2 regularization\nreplay_capacity = int(1e6)\nbatch_size = 4096\n\nepsilon_start = 1.0\nepsilon_end = .01\nepsilon_decay_steps = 250\nepsilon_exponential_decay = .99\n\ntf.keras.backend.clear_session()\n\nddqn = DDQNAgent(state_dim=state_dim,\n num_actions=num_actions,\n learning_rate=learning_rate,\n gamma=gamma,\n epsilon_start=epsilon_start,\n epsilon_end=epsilon_end,\n epsilon_decay_steps=epsilon_decay_steps,\n epsilon_exponential_decay=epsilon_exponential_decay,\n replay_capacity=replay_capacity,\n architecture=architecture,\n l2_reg=l2_reg,\n tau=tau,\n batch_size=batch_size)\n\nddqn.online_network.summary()\n\n\n#设置训练参数\ntotal_steps = 0\nmax_episodes = 1000\n# 记录训练数据\nepisode_time, navs, market_navs, diffs, episode_eps = [], [], [], [], []\n\n\ndef track_results(episode, nav_ma_100, nav_ma_10,\n market_nav_100, market_nav_10,\n win_ratio, total, epsilon):\n time_ma = np.mean([episode_time[-100:]])\n T = np.sum(episode_time)\n \n template = '{:>4d} | {} | Agent: {:>6.1%} ({:>6.1%}) | '\n template += 'Market: {:>6.1%} ({:>6.1%}) | '\n template += 'Wins: {:>5.1%} | eps: {:>6.3f}'\n print(template.format(episode, format_time(total), \n nav_ma_100-1, nav_ma_10-1, \n market_nav_100-1, market_nav_10-1, \n win_ratio, epsilon))\n\n\n# 开始训练\nstart = time()\n\nresults = [] #记录训练结果\n\nfor episode in range(1, max_episodes + 1):\n # 将环境重置\n this_state = trading_environment.reset()\n # 每次从step1走到step252\n for episode_step in range(max_episode_steps):\n \n action = ddqn.epsilon_greedy_policy(this_state.reshape(-1, state_dim))\n next_state, reward, done, _ = trading_environment.step(action)\n print(\"epi:\", episode, \"|step:\", episode_step, \"|action:\", action, \"|next_state:\",next_state, \"|reward:\", reward, \"done:\", done)\n\n ddqn.memorize_transition(this_state, \n action, \n reward, \n next_state, \n 0.0 if done else 1.0)\n if ddqn.train:\n ddqn.experience_replay()\n if done:\n break\n this_state = next_state\n \n # 用于记录每一步的训练结果 \n result = trading_environment.env.simulator.result()\n # 最后一步的训练结果\n final = result.iloc[-1]\n\n # 记录策略的NAV\n nav = final.nav * (1 + final.strategy_return)\n navs.append(nav)\n\n # 记录市场的NAV\n market_nav = final.market_nav\n market_navs.append(market_nav)\n \n # 计算策略与市场的差异\n diff = nav - market_nav\n diffs.append(diff)\n # 每隔10次打印训练结果\n if episode % 10 == 0:\n track_results(episode, np.mean(navs[-100:]), np.mean(navs[-10:]), \n np.mean(market_navs[-100:]), np.mean(market_navs[-10:]), \n np.sum([s > 0 for s in diffs[-100:]])/min(len(diffs), 100), \n time() - start, ddqn.epsilon)\n # 当策略优于市场时训练结束\n if len(diffs) > 25 and all([r > 0 for r in diffs[-25:]]):\n print(result.tail())\n break\n\ntrading_environment.close()\n\n\n# 存储训练结果\nresults = pd.DataFrame({'Episode': list(range(1, episode+1)),\n 'Agent': navs,\n 'Market': market_navs,\n 'Difference': diffs}).set_index('Episode')\n\nresults['Strategy Wins (%)'] = (results.Difference > 0).rolling(100).sum()\nresults.info()\nresults.to_csv(results_path / 'dqn_results.csv', index=False)\n", "repo_name": "joomladigger/TDLambda", "sub_path": "RL4T/DQN.py", "file_name": "DQN.py", "file_ext": "py", "file_size_in_byte": 10542, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "warnings.filterwarnings", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.random.set_seed", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gym.envs.registration.register", "line_number": 41, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 47, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 159, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.gather_nd", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 167, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.clear_session", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 225, "usage_type": "call"}, {"api_name": "time.time", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 282, "usage_type": "call"}, {"api_name": "time.time", "line_number": 283, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 293, "usage_type": "call"}]} +{"seq_id": "20827037483", "text": "import os\r\nimport re\r\nimport sys\r\nimport math\r\nimport time\r\n\r\nfrom multiprocessing import Process, Manager\r\nfrom tqdm import tqdm\r\nimport xlrd\r\nimport numpy as np\r\nimport pickle as pkl\r\n\r\nfrom all_in_one.data.data_utils import utils\r\nfrom all_in_one.data.data_utils import tokenizer\r\nfrom all_in_one.config import config_utils\r\nroot_path = sys.path[0]\r\nconfig_path = root_path + '/all_in_one/config/base_config.ini'\r\n\r\n\r\nclass DataProcessor:\r\n\r\n def __init__(self, file_path, model_name, sheet_num=0, tokenizer_tool='thulac', seg_only=False, max_pro=5, **kwargs):\r\n self.config_args = config_utils.Config()(config_path)['data_utils']\r\n self.file_path = os.path.join(root_path, 'all_in_one/data/data_excel/' + file_path)\r\n self.save_path = os.path.join(root_path, self.config_args['save_path'])\r\n self.workbook = xlrd.open_workbook(self.file_path)\r\n self.sheet = self.workbook.sheet_by_index(sheet_num)\r\n self.model_name = model_name\r\n self.max_pro = max_pro\r\n self.kwargs = {\r\n 'category': 0,\r\n 'content': 1,\r\n 'label1': 2,\r\n }\r\n if kwargs:\r\n self.kwargs.update(kwargs)\r\n if tokenizer_tool:\r\n self.tokenizer_dict = {'char': tokenizer.CharacterTokenizer,\r\n 'thulac': tokenizer.ThulacTokenizer,\r\n 'jieba': tokenizer.HanlpTokenizer,\r\n 'hanlp': tokenizer.JiebaTokenizer}\r\n if tokenizer_tool not in self.tokenizer_dict.keys():\r\n raise NotImplementedError\r\n if tokenizer_tool == 'char':\r\n self.tokenizer = tokenizer.CharacterTokenizer()\r\n if tokenizer_tool == 'thulac':\r\n segment_model_path = os.path.join(root_path, self.config_args['thulac_path'])\r\n self.tokenizer = self.tokenizer_dict[tokenizer_tool](\r\n segment_model_path=segment_model_path, seg_only=seg_only)\r\n if tokenizer_tool == 'jieba':\r\n segment_model_path = os.path.join(root_path, self.config_args['jieba_path'])\r\n else:\r\n self.tokenizer = tokenizer.CharacterTokenizer()\r\n\r\n def get_formed_data(self, call_back=None):\r\n nrows = self.sheet_nrows\r\n lines = []\r\n for line_num in range(1, nrows):\r\n # line = self.sheet.row_values(line_num)\r\n line = [line if line else 0 for line in self.sheet.row_values(line_num)]\r\n if not any(line):\r\n continue\r\n label_col_num = sorted([v for k, v in self.kwargs.items() if re.match('^label.+', k)])\r\n line = [line[self.kwargs['content']].strip().replace('\\n', ''), [\r\n int(float(line[label])) for label in label_col_num if line[label]]]\r\n lines.append(line)\r\n if call_back:\r\n lines = call_back(lines)\r\n # [[content, [label1,label2]], , [content,[lable1,label2,label3]]] content='我 在 人民广场 吃 炸鸡'\r\n return lines\r\n\r\n def get_cuted_data(self, write=True, call_back=None):\r\n file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_cut_result.txt')\r\n dir_path, _ = os.path.split(file_name)\r\n np.random.seed(8899)\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n lines = self.get_formed_data()\r\n m = Manager()\r\n d = m.list()\r\n tmp_n = int(math.ceil(len(lines) / self.max_pro))\r\n tmp_lines = [lines[i:i + tmp_n] for i in range(0, len(lines), tmp_n)]\r\n\r\n tbar = tqdm(total=len(lines))\r\n\r\n def cut_func(*lines):\r\n cut_tmp_lines = []\r\n n_update = np.random.randint(20, 50)\r\n for line in lines:\r\n cut_tmp_lines.append([self.tokenizer.cut(line[0]), line[1]])\r\n if len(cut_tmp_lines) % n_update == 0:\r\n d.extend(cut_tmp_lines)\r\n cut_tmp_lines = []\r\n d.extend(cut_tmp_lines)\r\n\r\n def bar_func():\r\n l0 = 0\r\n while l0 < len(lines):\r\n time.sleep(1.0)\r\n tbar.update(len(d) - l0)\r\n l0 = len(d)\r\n\r\n process_list = []\r\n for ls in tmp_lines:\r\n p = Process(target=cut_func, args=(ls))\r\n p.start()\r\n process_list.append(p)\r\n p = Process(target=bar_func)\r\n p.start()\r\n process_list.append(p)\r\n for p in process_list:\r\n p.join()\r\n\r\n lines = sorted(list(d))\r\n if call_back:\r\n lines = call_back(lines)\r\n if write:\r\n with open(file_name, 'w') as f:\r\n for line in lines:\r\n f.write(line[0] + '\\t' + '\\t'.join([str(i) for i in line[1]]) + '\\n')\r\n print('Write {} Success!'.format(file_name))\r\n file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_cut_result.pkl')\r\n with open(file_name, 'wb') as f:\r\n pkl.dump(lines, f)\r\n return lines\r\n\r\n def excel2raw(self, write=True, call_back=None):\r\n file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_excel2raw.txt')\r\n dir_path, _ = os.path.split(file_name)\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n lines = self.get_formed_data()\r\n if call_back:\r\n lines = call_back(lines)\r\n if write:\r\n with open(file_name, 'w') as f:\r\n for line in lines:\r\n f.write(line[0] + '\\t' + '\\t'.join([str(i) for i in line[1]]) + '\\n')\r\n print('Write {} Success!'.format(file_name))\r\n return lines\r\n\r\n def excel2id(self, modify_vocab=utils.default4vocab, write=True, call_back=None, reflush=False):\r\n file_name = os.path.join(self.save_path, self.model_name, self.model_name + '_excel2id.pkl')\r\n dir_path, _ = os.path.split(file_name)\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n # load vocab_inword\r\n if not reflush:\r\n load_file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_vocab_inword.pkl')\r\n if os.path.exists(load_file_name):\r\n with open(load_file_name, 'rb') as f:\r\n vocab_dict = pkl.load(f)\r\n else:\r\n print('Not Found {}, is reflushing!'.format(load_file_name))\r\n vocab_dict = self.build_vocab_inword(\r\n modify_vocab=modify_vocab, write=True, call_back=call_back)\r\n else:\r\n vocab_dict = self.build_vocab_inword(\r\n modify_vocab=modify_vocab, write=True, call_back=call_back)\r\n # load cuted_data\r\n if not reflush:\r\n load_file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_cut_result.pkl')\r\n if os.path.exists(load_file_name):\r\n with open(load_file_name, 'rb') as f:\r\n lines = pkl.load(f)\r\n else:\r\n print('Not Found {}, is reflushing!'.format(load_file_name))\r\n lines = self.get_cuted_data(write=True, call_back=call_back)\r\n else:\r\n lines = self.get_cuted_data(write=True, call_back=call_back)\r\n\r\n labels_dict = self.build_label_class_mapping(write=False, call_back=None)\r\n def mapping_label(labels_list):\r\n labels_list = [labels_dict[label] for label in labels_list]\r\n label_id = [1 if i in labels_list else 0 for i in range(len(labels_dict))]\r\n return label_id\r\n lines = [[[vocab_dict.get(word, vocab_dict['OOV']) for word in line[0].split(' ')], mapping_label(line[1])]\r\n for line in lines]\r\n if write:\r\n with open(file_name, 'wb') as f:\r\n pkl.dump(lines, f)\r\n print('Write {} Success!'.format(file_name))\r\n return lines # [[1,2,3,4...word_id],[1,0...label_id]...]\r\n\r\n def build_vocab_inword(self, modify_vocab=None, write=True, call_back=None, reflush=False):\r\n file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_vocab_inword.pkl')\r\n dir_path, _ = os.path.split(file_name)\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n if not reflush:\r\n load_file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_cut_result.pkl')\r\n if os.path.exists(load_file_name):\r\n with open(load_file_name, 'rb') as f:\r\n lines = pkl.load(f)\r\n else:\r\n print('Not Found {}, is reflushing!'.format(load_file_name))\r\n lines = self.get_cuted_data(write=True, call_back=call_back)\r\n else:\r\n lines = self.get_cuted_data(write=True, call_back=call_back)\r\n\r\n vocab_dict = {}\r\n if modify_vocab:\r\n vocab_dict.update(modify_vocab())\r\n vocab_list = []\r\n for line in lines:\r\n vocab_list.extend(line[0].strip().split(' '))\r\n vocab_list = sorted(list(set(vocab_list)))\r\n for word in vocab_list:\r\n vocab_dict.setdefault(word, len(vocab_dict))\r\n if write:\r\n with open(file_name, 'wb') as f:\r\n pkl.dump(vocab_dict, f)\r\n print('Write {} Success!'.format(file_name))\r\n return vocab_dict\r\n\r\n def build_word_embedding(self, word_embedding_name='thulac', word_embedding_dims=100, modify_vocab=None, write=True, call_back=None, reflush=False):\r\n file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_word_embedding.pkl')\r\n dir_path, _ = os.path.split(file_name)\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n word_vec = {}\r\n if word_embedding_name == 'thulac':\r\n word_embedding_path = os.path.join(root_path, self.config_args[\r\n 'thulac_word_embedding_path'])\r\n # if word_embedding_name == 'thulac':\r\n # word_embedding_path = self.config_args['thulac_word_embedding_path']\r\n with open(word_embedding_path, 'r') as f:\r\n for line in f:\r\n line = line.strip().split()\r\n v = [float(line[i]) for i in range(1, len(line))]\r\n word_vec[line[0]] = np.asarray(v)\r\n word_embedding = list()\r\n word_embedding_rand = list()\r\n word_embedding_pretrained = 0\r\n # get vocab_dict\r\n if not reflush:\r\n load_file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_vocab_inword.pkl')\r\n if os.path.exists(load_file_name):\r\n with open(load_file_name, 'rb') as f:\r\n vocab_dict = pkl.load(f)\r\n else:\r\n print('Not Found {}, is reflushing!'.format(load_file_name))\r\n vocab_dict = self.build_vocab_inword(\r\n write=True, modify_vocab=modify_vocab, call_back=call_back)\r\n else:\r\n vocab_dict = self.build_vocab_inword(\r\n write=True, modify_vocab=modify_vocab, call_back=call_back)\r\n vocab_list = sorted(vocab_dict.items(), key=lambda x: x[1]) # sorted?\r\n # get maxlen\r\n if not reflush:\r\n load_file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_cut_result.pkl')\r\n if os.path.exists(load_file_name):\r\n with open(load_file_name, 'rb') as f:\r\n lines = pkl.load(f)\r\n else:\r\n print('Not Found {}, is reflushing!'.format(load_file_name))\r\n lines = self.get_cuted_data(write=True, call_back=call_back)\r\n else:\r\n lines = self.get_cuted_data(write=True, call_back=call_back)\r\n maxlen = max([len(line[0].split()) for line in lines])\r\n for i in range(len(vocab_list)):\r\n if i == 0:\r\n word_embedding.append(np.zeros(word_embedding_dims, dtype='float32'))\r\n word_embedding_rand.append(np.zeros(word_embedding_dims, dtype='float32'))\r\n else:\r\n if vocab_list[i][0] in word_vec:\r\n word_embedding.append(word_vec[vocab_list[i][0]])\r\n word_embedding_pretrained += 1\r\n else:\r\n word_embedding.append(np.random.uniform(-0.25, 0.25, word_embedding_dims))\r\n word_embedding_rand.append(np.random.uniform(-0.25, 0.25, word_embedding_dims))\r\n print('{}/{} in Pre-trained Word Embeddings.'.format(word_embedding_pretrained, len(word_embedding)))\r\n word_embedding = {\"pretrain\": {\"word_embedding\": word_embedding},\r\n \"random\": {\"word_embedding\": word_embedding_rand},\r\n \"maxlen\": maxlen}\r\n if write:\r\n with open(file_name, 'wb') as f:\r\n pkl.dump(word_embedding, f)\r\n print('Write {} Success!'.format(file_name))\r\n return word_embedding\r\n\r\n def build_label_class_mapping(self, write=True, call_back=None, reflush=False):\r\n file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_label_class_mapping.pkl')\r\n dir_path, _ = os.path.split(file_name)\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n if not reflush and os.path.exists(file_name):\r\n labels_dict = pkl.load(open(file_name, 'rb'))\r\n print('Load {} directly!'.format(file_name))\r\n else:\r\n label_col_num = sorted([v for k, v in self.kwargs.items() if re.match('^label.+', k)])\r\n labels_list = [int(float(label))\r\n for col in label_col_num for label in self.sheet.col_values(col)[1:] if label] # [1:]去标题\r\n labels_list = list(set(labels_list))\r\n labels_list.sort()\r\n if call_back:\r\n call_back(labels_list)\r\n labels_dict = dict(zip(labels_list, range(len(labels_list))))\r\n if write:\r\n with open(file_name, 'wb') as f:\r\n pkl.dump(labels_dict, f)\r\n print('Write {} Success!'.format(file_name))\r\n return labels_dict\r\n\r\n def build_cross_validation(self, file_num=10, modify_vocab=None, call_back=None, reflush=False):\r\n np.random.seed(8899)\r\n if not reflush:\r\n load_file_name = os.path.join(self.save_path, self.model_name,\r\n self.model_name + '_excel2id.pkl')\r\n if os.path.exists(load_file_name):\r\n with open(load_file_name, 'rb') as f:\r\n lines = pkl.load(f)\r\n else:\r\n print('Not Found {}, is reflushing!'.format(load_file_name))\r\n lines = self.excel2id(write=True, modify_vocab=modify_vocab, call_back=call_back)\r\n else:\r\n lines = self.excel2id(write=True, modify_vocab=modify_vocab, call_back=call_back)\r\n utils.cv(model_name=self.model_name, save_path=self.save_path,\r\n lines=lines, write=True, segment_num=file_num)\r\n\r\n @property\r\n def sheet_nrows(self):\r\n return self.sheet.nrows\r\n\r\n @property\r\n def sheet_ncols(self):\r\n return self.sheet.ncols\r\n\r\n @property\r\n def sheet_head(self):\r\n return self.sheet.row_values(0)\r\n\r\n def __repr__(self):\r\n row0 = '\\t\\t\\t'.join([str(item)[:30] for item in self.sheet.row_values(0)])\r\n row1 = '\\t'.join([str(item)[:30] for item in self.sheet.row_values(1)])\r\n row2 = '\\t'.join([str(item)[:30] for item in self.sheet.row_values(2)])\r\n row3 = '\\t'.join([str(item)[:30] for item in self.sheet.row_values(3)])\r\n rown = '\\t'.join([str(item)[:30] for item in self.sheet.row_values(self.sheet_nrows - 1)])\r\n sheet_info = 'total:\\trow:' + str(self.sheet_nrows) + '\\tcol:' + str(self.sheet_ncols)\r\n CRLF = '\\n'\r\n preview_data = row0 + CRLF + row1 + CRLF + row2 + CRLF + row3 + CRLF * 2 + rown + CRLF + sheet_info\r\n return preview_data\r\n\r\n __str__ = __repr__\r\n\r\n\r\nif __name__ == '__main__':\r\n # test = DataProcessor('qrsd4268.xlsx', tokenizer_tool='thulac', label2=3, label3=4)\r\n test = DataProcessor(file_path='zzxs.xlsx', model_name='zzxs', tokenizer_tool='thulac',)\r\n test.excel2raw()\r\n # test.get_cuted_data()\r\n # test.get_cuted_data(call_back=utils.sub_num_name_add_padding)\r\n test.build_vocab_inword(modify_vocab=utils.prepro_vocab1,\r\n call_back=utils.sub_num_name_add_padding, reflush=False)\r\n test.excel2id(modify_vocab=utils.prepro_vocab1,\r\n call_back=utils.sub_num_name_add_padding, reflush=False)\r\n test.build_label_class_mapping()\r\n test.build_word_embedding(modify_vocab=utils.prepro_vocab1,\r\n call_back=utils.sub_num_name_add_padding, reflush=False)\r\n # 做cross validation,调用get_excel2id,get_cuted_data,build_vocab_inword 一并输出\r\n test.build_cross_validation(modify_vocab=utils.prepro_vocab1,\r\n call_back=utils.sub_num_name_add_padding, reflush=False)\r\n", "repo_name": "safpla/all_in_one", "sub_path": "data/data_utils/data_processor.py", "file_name": "data_processor.py", "file_ext": "py", "file_size_in_byte": 17721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "all_in_one.config.config_utils.Config", "line_number": 23, "usage_type": "call"}, {"api_name": "all_in_one.config.config_utils", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "xlrd.open_workbook", "line_number": 26, "usage_type": "call"}, {"api_name": "all_in_one.data.data_utils.tokenizer.CharacterTokenizer", "line_number": 38, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.tokenizer", "line_number": 38, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.tokenizer.ThulacTokenizer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.tokenizer", "line_number": 39, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.tokenizer.HanlpTokenizer", "line_number": 40, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.tokenizer", "line_number": 40, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.tokenizer.JiebaTokenizer", "line_number": 41, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.tokenizer", "line_number": 41, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.tokenizer.CharacterTokenizer", "line_number": 45, "usage_type": "call"}, {"api_name": "all_in_one.data.data_utils.tokenizer", "line_number": 45, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.tokenizer.CharacterTokenizer", "line_number": 53, "usage_type": "call"}, {"api_name": "all_in_one.data.data_utils.tokenizer", "line_number": 53, "usage_type": "name"}, {"api_name": "re.match", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 78, "usage_type": "call"}, {"api_name": "multiprocessing.Manager", "line_number": 80, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 82, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 89, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 106, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 134, "usage_type": "call"}, {"api_name": "all_in_one.data.data_utils.utils.default4vocab", "line_number": 145, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 145, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 170, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 201, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 280, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 281, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path", "line_number": 293, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 296, "usage_type": "call"}, {"api_name": "os.path", "line_number": 296, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 298, "usage_type": "call"}, {"api_name": "os.path", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 299, "usage_type": "call"}, {"api_name": "re.match", "line_number": 302, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 317, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 319, "usage_type": "call"}, {"api_name": "os.path", "line_number": 319, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path", "line_number": 321, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 323, "usage_type": "call"}, {"api_name": "all_in_one.data.data_utils.utils.cv", "line_number": 329, "usage_type": "call"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 329, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.prepro_vocab1", "line_number": 364, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 364, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.sub_num_name_add_padding", "line_number": 365, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 365, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.prepro_vocab1", "line_number": 366, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 366, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.sub_num_name_add_padding", "line_number": 367, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 367, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.prepro_vocab1", "line_number": 369, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 369, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.sub_num_name_add_padding", "line_number": 370, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 370, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.prepro_vocab1", "line_number": 372, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 372, "usage_type": "name"}, {"api_name": "all_in_one.data.data_utils.utils.sub_num_name_add_padding", "line_number": 373, "usage_type": "attribute"}, {"api_name": "all_in_one.data.data_utils.utils", "line_number": 373, "usage_type": "name"}]} +{"seq_id": "36242451011", "text": "from flask import Flask,request,render_template\nimport pandas as pd\nfrom src.exception import CustomeException\nfrom src.logger import logging\nfrom src.pipeline.predict_pipeline import PredictPipeline,CustomData\n\napplication =Flask(__name__,static_folder='/static')\napp=application\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/predict',methods=[\"GET\",\"POST\"])\ndef predit_data():\n if request.method=='GET':\n return render_template('home.html')\n else:\n data=CustomData(\n age=request.form.get('age'),\n bmi=request.form.get('bmi'),\n sex=request.form.get('sex'),\n children=request.form.get('children'),\n smoker=request.form.get('smoker'),\n region=request.form.get('region')\n )\n\n dataframe=data.prepare_data_frame()\n predict_userdata=PredictPipeline()\n result=predict_userdata.predict_pipeline(dataframe)\n\n return render_template('home.html',result=result[0])\n \n\nif __name__==\"__main__\":\n app.run(host=\"0.0.0.0\",debug=True) ", "repo_name": "GovardhanGattu/HealthInsurancePremiumPrediction", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1089, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "src.pipeline.predict_pipeline.CustomData", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "src.pipeline.predict_pipeline.PredictPipeline", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "72134377333", "text": "import logging, os, time, coloredlogs\r\nfrom logging.handlers import TimedRotatingFileHandler\r\nfrom gbrick.property import *\r\n\r\nclass Glogger:\r\n\r\n def __init__(self, log_file_name: str = \"BaseClientComm\"):\r\n log_path = LOG_PATH\r\n if not log_path.endswith('/'):\r\n log_path = log_path + '/'\r\n\r\n try:\r\n if not os.path.exists(log_path):\r\n os.makedirs(log_path)\r\n except OSError:\r\n log_path = './'\r\n self.logger.error('Error: Creating directory. ' + log_path)\r\n\r\n log_file_name = log_path + log_file_name + \".log\"\r\n\r\n # 로깅 샘플 : 포맷터 참고 - https://docs.python.org/3/library/logging.html#logrecord-attributes\r\n self.logger = logging.getLogger('DbClientComm')\r\n fomatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s]%(asctime)s > %(message)s')\r\n\r\n fileHandler = TimedRotatingFileHandler(log_file_name, when=\"midnight\", interval=1)\r\n fileHandler.setFormatter(fomatter)\r\n fileHandler.suffix = \"%Y%m%d\"\r\n self.logger.addHandler(fileHandler)\r\n\r\n streamHandler = logging.StreamHandler()\r\n streamHandler.setFormatter(fomatter)\r\n self.logger.addHandler(streamHandler)\r\n\r\n self.logger.setLevel(logging.WARNING)\r\n\r\n\r\nDEBUG_FORMAT = '%(asctime)s %(process)d %(levelname)s %(message)s'\r\nFILE_FORMAT = '%(asctime)s %(levelname)s %(message)s'\r\n\r\nclass Glog:\r\n def __init__(self):\r\n self.log_path = LOG_PATH +'/'\r\n try:\r\n if not os.path.exists(self.log_path):\r\n os.makedirs(self.log_path)\r\n except IsADirectoryError:\r\n pass\r\n self.glogger = logging.getLogger('gbrick')\r\n self.glogger.setLevel(logging.DEBUG)\r\n filename = self.log_path + 'gbrick' + time.strftime('%y%m%d')+'.log'\r\n coloredlogs.install(level='DEBUG',\r\n fmt=DEBUG_FORMAT,\r\n isatty=True,\r\n )\r\n\r\n fh = logging.FileHandler(filename)\r\n fh.setLevel(logging.INFO)\r\n fh.setFormatter(logging.Formatter(FILE_FORMAT))\r\n self.glogger.addHandler(fh)\r\n\r\n\r\n def debug(self, s):\r\n self.glogger.debug(s)\r\n\r\n def info(self, s):\r\n self.glogger.info(s)\r\n\r\n def warning(self, s):\r\n self.glogger.warning(s)\r\n\r\n def error(self, s):\r\n self.glogger.error(s)\r\n\r\n def critical(self, s):\r\n self.glogger.critical(s)\r\n\r\nglogger = Glog()", "repo_name": "gbxcoin/gbrick-core", "sub_path": "gbrick/common/utils/glogger.py", "file_name": "glogger.py", "file_ext": "py", "file_size_in_byte": 2501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.handlers.TimedRotatingFileHandler", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 49, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 50, "usage_type": "call"}, {"api_name": "coloredlogs.install", "line_number": 51, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 57, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "8548191026", "text": "# -*- coding: utf-8 -*-\n# yapf -i WeChatGroupAssistant.py\n# .\\WeChatGroupAssistant.py -g 猫言猫语 \"地下交通站\" \"eBooks 分享\"\n\nimport argparse\nimport collections\nimport os.path\nimport sys\n\nimport openpyxl\nimport psutil\nimport pywinauto\n\ntry:\n import win32gui\n win32gui_available = True\nexcept (ModuleNotFoundError, ImportError):\n win32gui_available = False\n\nGroupMember = collections.namedtuple('GroupMember', ['name', 'nickname'])\n\n\ndef find_pid(process_name):\n pid = -1\n\n for process in psutil.process_iter():\n try:\n pinfo = process.as_dict(attrs=['pid', 'name'])\n except psutil.NoSuchProcess:\n pass\n else:\n if process_name == pinfo['name']:\n pid = pinfo['pid']\n break\n\n return pid\n\n\ndef bring_to_foreground_win32gui(window_class, title):\n hwnd = win32gui.FindWindow(window_class, title)\n win32gui.SetForegroundWindow(hwnd)\n win32gui.ShowWindow(hwnd, 9)\n\n\ndef bring_to_foreground(application):\n top_window = application.top_window()\n\n top_window.minimize()\n top_window.restore()\n\n\ndef get_wechat_v1():\n application = pywinauto.application.Application(backend='uia')\n try:\n wechat_hwnd = pywinauto.findwindows.find_window(\n class_name='WeChatMainWndForPC', title_re=r'微信')\n except pywinauto.WindowNotFoundError as e:\n sys.exit(e)\n\n application.connect(handle=wechat_hwnd)\n\n return application['微信']\n\n\ndef get_wechat_v2():\n application = pywinauto.application.Application(backend='uia').connect(\n class_name='WeChatMainWndForPC',\n title='微信',\n visible_only=True,\n timeout=100)\n return application['微信']\n\n\ndef get_wechat_v3():\n wechat_pid = find_pid('WeChat.exe')\n if wechat_pid == -1:\n print('WeChat is not running.')\n sys.exit(0)\n\n wechat_application = pywinauto.application.Application(\n backend='uia').connect(process=wechat_pid)\n return wechat_application['微信']\n\n\ndef rectangle_center(rectangle):\n return ((rectangle.left + rectangle.right) // 2,\n (rectangle.top + rectangle.bottom) // 2)\n\n\ndef click(window, mouse_button='left', outline=True):\n if outline:\n window.draw_outline()\n\n center = rectangle_center(window.rectangle())\n pywinauto.mouse.click(button=mouse_button, coords=center)\n\n\ndef click_button(window, title, mouse_button='left', outline=True):\n button = window.child_window(title=title, control_type='Button')\n click(button, mouse_button, outline)\n\n\ndef pairwise(iterable):\n \"s -> (s0, s1), (s2, s3), (s4, s5), ...\"\n a = iter(iterable)\n return zip(a, a)\n\n\ndef get_group_members(wechat, group_name):\n click_button(wechat, '聊天')\n chat_list = wechat.child_window(title='会话', control_type='List')\n chat_list.draw_outline()\n\n try:\n group = chat_list.child_window(title_re=group_name + \"*\",\n control_type='ListItem')\n except pywinauto.MatchError:\n print('Failed to find group \"{0}\".'.format(group_name))\n raise\n\n click(group)\n\n group_chat_button = wechat \\\n .children(control_type='Pane')[1] \\\n .children(control_type='Pane')[1] \\\n .children(control_type='Pane')[2] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Pane')[0] \\\n .children(control_type='Button')[0]\n click(group_chat_button)\n\n # TODO:\n # If there is no more room to expand the chat information window,\n # a SessionChatRoomDetailWnd will appear instead.\n group_information_window = wechat.child_window(title='聊天信息',\n control_type='Window')\n group_information_window.draw_outline()\n\n try:\n click_button(group_information_window, '查看更多')\n except pywinauto.findwindows.ElementNotFoundError as e:\n print(e)\n\n member_list = group_information_window.child_window(title='聊天成员',\n control_type='List')\n\n members = []\n count = 0\n\n for member in pairwise(member_list.descendants(control_type='Button')):\n name, nickname = member[0].texts()[0], member[1].texts()[0]\n count += 1\n\n print(f'{count:03} Name: {name}, Nickname: {nickname}')\n if not name and nickname in ('添加', '移出'):\n continue\n\n group_member = GroupMember(name, nickname)\n members.append(group_member)\n\n return members\n\n\ndef is_member_in_group(member, group_members, match_by='both'):\n # TODO: It is too slow.\n names = [m.name for m in group_members]\n nicknames = [m.nickname for m in group_members]\n\n if match_by == 'both':\n result = member in group_members\n if result:\n return str(result)\n else:\n return '{0}({1},{2})'.format(result, member.name in names,\n member.nickname in nicknames)\n elif match_by == 'name':\n return str(member.name in names)\n elif match_by == 'nickname':\n return str(member.nickname in nicknames)\n\n\ndef clear_worksheet(worksheet):\n worksheet.delete_rows(worksheet.min_row,\n worksheet.max_row - worksheet.min_row + 1)\n\n\ndef save_to_worksheet(worksheet, members):\n clear_worksheet(worksheet)\n\n for member in members:\n worksheet.append(tuple(member))\n\n\ndef save_summary(worksheet, reference_group, group_members_dict, match_by):\n assert reference_group in group_members_dict\n\n clear_worksheet(worksheet)\n\n members = group_members_dict[reference_group]\n group_members_dict.pop(reference_group)\n\n other_groups = group_members_dict.keys()\n\n header = GroupMember._fields + tuple(\n ('In {0}?'.format(group) for group in other_groups))\n worksheet.append(header)\n\n for member in members:\n row = tuple(member) + tuple(\n (is_member_in_group(member, group_members_dict[group], match_by)\n for group in other_groups))\n worksheet.append(row)\n\n\ndef parse_args():\n prog = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n\n parser = argparse.ArgumentParser(\n prog=prog,\n fromfile_prefix_chars='@',\n description='Get a list of members of a WeChat group.')\n\n parser.add_argument('-g',\n '--groups',\n nargs='+',\n required=True,\n help='Specify a list of group names.')\n parser.add_argument('-s',\n '--summary',\n action='store_true',\n help='Create summary report.')\n parser.add_argument('-m',\n '--match-by',\n choices=['name', 'nickname', 'both'],\n default='both',\n help='Match strategy.')\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n wechat = get_wechat_v2()\n #wechat.Properties.print_control_identifiers()\n\n if win32gui_available:\n bring_to_foreground_win32gui('WeChatMainWndForPC', '微信')\n\n group_members_dict = {}\n\n for group in args.groups:\n if group in group_members_dict:\n print('Group \"{0}\" already handled.'.format(group))\n continue\n\n members = get_group_members(wechat, group)\n group_members_dict[group] = members\n\n workbook = openpyxl.Workbook()\n worksheet = workbook.active\n\n save_to_worksheet(worksheet, members)\n\n workbook.save('{0}.xlsx'.format(group))\n workbook.close()\n\n if len(args.groups) > 1 and args.summary:\n workbook = openpyxl.Workbook()\n worksheet = workbook.active\n\n save_summary(worksheet, args.groups[0], group_members_dict,\n args.match_by)\n\n workbook.save('NotOneLess.xlsx')\n workbook.close()\n\n\nif __name__ == '__main__':\n main()\n\n# References:\n# [Python pywinauto search windows with partial title](https://stackoverflow.com/questions/28216222/python-pywinauto-search-windows-with-partial-title)\n# [Using PyWinAuto to control a currently running application](https://stackoverflow.com/questions/39021888/using-pywinauto-to-control-a-currently-running-application)\n# [Python Automatically Collects WeChat Contacts](https://programmer.help/blogs/python-automatically-collects-wechat-contacts.html)\n# [Pywinauto how do I get the list of returned elements](https://stackoverflow.com/questions/46432544/pywinauto-how-do-i-get-the-list-of-returned-elements)\n# [How to access the control identifiers in pywinauto](https://stackoverflow.com/questions/5039642/how-to-access-the-control-identifiers-in-pywinauto)\n# [Iterating over every two elements in a list [duplicate]](https://stackoverflow.com/questions/5389507/iterating-over-every-two-elements-in-a-list)\n# [Pywinauto: unable to bring window to foreground](https://stackoverflow.com/questions/39794729/pywinauto-unable-to-bring-window-to-foreground)\n# [How can I remove a key from a Python dictionary?](https://stackoverflow.com/questions/11277432/how-can-i-remove-a-key-from-a-python-dictionary)\n# [How do I make a window (already running task) visible using pywinauto?](https://stackoverflow.com/questions/50332830/how-do-i-make-a-window-already-running-task-visible-using-pywinauto)\n#\n# PS C:\\Users\\myd\\Desktop\\Ongoing-Study\\python\\pywinauto\\build> .\\WeChatGroupAssistant.exe\n# PS C:\\Users\\myd\\Desktop\\Ongoing-Study\\python\\pywinauto\\build> .\\WeChatGroupAssistant.exe -g 七小\n# Traceback (most recent call last):\n# File \"WeChatGroupAssistant.py\", line 223, in <module>\n# File \"WeChatGroupAssistant.py\", line 201, in main\n# File \"WeChatGroupAssistant.py\", line 113, in get_group_members\n# IndexError: list index out of range\n# [10360] Failed to execute script WeChatGroupAssistant\n# PS C:\\Users\\myd\\Desktop\\Ongoing-Study\\python\\pywinauto\\build> .\\WeChatGroupAssistant.exe -g 七小\n# Traceback (most recent call last):\n# File \"pywinauto\\application.py\", line 258, in __resolve_control\n# File \"pywinauto\\timings.py\", line 458, in wait_until_passes\n# pywinauto.timings.TimeoutError\n#\n# During handling of the above exception, another exception occurred:\n#\n# Traceback (most recent call last):\n# File \"WeChatGroupAssistant.py\", line 223, in <module>\n# File \"WeChatGroupAssistant.py\", line 201, in main\n# File \"WeChatGroupAssistant.py\", line 128, in get_group_members\n# File \"pywinauto\\application.py\", line 379, in __getattribute__\n# File \"pywinauto\\application.py\", line 261, in __resolve_control\n# File \"pywinauto\\timings.py\", line 436, in wait_until_passes\n# File \"pywinauto\\application.py\", line 222, in __get_ctrl\n# File \"pywinauto\\findwindows.py\", line 87, in find_element\n# pywinauto.findwindows.ElementNotFoundError: {'title': '聊天信息', 'control_type': 'Window', 'top_level_only': False, 'parent': <uia_element_info.UIAElementInfo - '微信', WeChatMainWndForPC, 2688358>, 'backend': 'uia'}\n# [6476] Failed to execute script WeChatGroupAssistant\n#\n# [How to pad zeroes to a string?](https://stackoverflow.com/questions/339007/how-to-pad-zeroes-to-a-string)\n", "repo_name": "myd7349/Ongoing-Study", "sub_path": "python/pywinauto/WeChatGroupAssistant.py", "file_name": "WeChatGroupAssistant.py", "file_ext": "py", "file_size_in_byte": 11501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "21", "api": [{"api_name": "collections.namedtuple", "line_number": 20, "usage_type": "call"}, {"api_name": "psutil.process_iter", "line_number": 26, "usage_type": "call"}, {"api_name": "psutil.NoSuchProcess", "line_number": 29, "usage_type": "attribute"}, {"api_name": "win32gui.FindWindow", "line_number": 40, "usage_type": "call"}, {"api_name": "win32gui.SetForegroundWindow", "line_number": 41, "usage_type": "call"}, {"api_name": "win32gui.ShowWindow", "line_number": 42, "usage_type": "call"}, {"api_name": "pywinauto.application.Application", "line_number": 53, "usage_type": "call"}, {"api_name": "pywinauto.application", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pywinauto.findwindows.find_window", "line_number": 55, "usage_type": "call"}, {"api_name": "pywinauto.findwindows", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pywinauto.WindowNotFoundError", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}, {"api_name": "pywinauto.application.Application", "line_number": 66, "usage_type": "call"}, {"api_name": "pywinauto.application", "line_number": 66, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 78, "usage_type": "call"}, {"api_name": "pywinauto.application.Application", "line_number": 80, "usage_type": "call"}, {"api_name": "pywinauto.application", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pywinauto.mouse.click", "line_number": 95, "usage_type": "call"}, {"api_name": "pywinauto.mouse", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pywinauto.MatchError", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pywinauto.findwindows", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.path.splitext", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 223, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 223, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 223, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 225, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 267, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "9798166773", "text": "from typing import Tuple, Callable, NamedTuple\nfrom collections import namedtuple\n\n\nexample_rgb = (128, 152, 64)\n\nRGB = Tuple[int, int, int]\nred: Callable[[RGB], int] = lambda color: color[0]\nprint(example_rgb, red(example_rgb))\n\n\nColor = namedtuple(\"Color\", \"red green blue name\")\nexample_color = Color(red=128, green=152, blue=64, name=\"name_1\")\nprint(example_color, example_color.red)\n\n\nclass Color(NamedTuple):\n \"\"\"RGB color\"\"\"\n\n red: int\n green: int\n blue: int\n name: str\n\n\nexample_color = Color(red=128, green=152, blue=64, name=\"name_2\")\nprint(example_color, example_color.red)\n", "repo_name": "tinylambda/keep", "sub_path": "functional_program/functional_features/functional_tuple.py", "file_name": "functional_tuple.py", "file_ext": "py", "file_size_in_byte": 600, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.Tuple", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 8, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 12, "usage_type": "call"}, {"api_name": "typing.NamedTuple", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "20726926522", "text": "import io\nfrom operator import mod\nimport sys\n\n_INPUT = \"\"\"\\\njfkdslajfkdlsalshkfjdlahsjklhaf\njfkdaslajfkdlsalshkfjdlahsjklhaf\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n\n# ---------------------------------\ns = set(map(lambda i: str(i[0] + 1).zfill(8) + i[1], enumerate(list(input()))))\nt = set(map(lambda i: str(i[0] + 1).zfill(8) + i[1], enumerate(list(input()))))\ntmp = t - s\ntmp = list(tmp)\ntmp.sort()\n# print(tmp)\nprint(int(tmp[0][:-1]))\n", "repo_name": "makima333/Atcoder-ganbaru", "sub_path": "contest/abc280/C.py", "file_name": "C.py", "file_ext": "py", "file_size_in_byte": 436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.stdin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "23099377401", "text": "import cv2\nimport pyautogui\nimport numpy as np\n\n\nclass OpencvWrapper:\n @staticmethod\n def screenshot(coordinate=None):\n \"\"\"\n Args:\n coordinate: (x1, y1, x2, y2) with (x1, y1) in 2nd Quadrant and (x2, y2) in 4th Quadrant.\n Returns:\n img (Image):\n \"\"\"\n if coordinate:\n x, y = coordinate[:2]\n width = coordinate[2]-x\n height = coordinate[3]-y\n print(x, y, width, height)\n img = pyautogui.screenshot(region=(x, y, width, height))\n else:\n img = pyautogui.screenshot()\n return img\n\n @staticmethod\n def image_gray(rgb_img):\n return cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)\n\n @staticmethod\n def search_image(background, template_path, precision=0.95) -> tuple:\n \"\"\"\n Search a given (template) from background by using cv2.matchTemplate\n\n Args:\n background\n template_path\n precision\n\n Returns:\n coordinate\n res\n \"\"\"\n rgb_img = np.array(background)\n gray_img = OpencvWrapper.image_gray(rgb_img)\n template_rgb = cv2.imread(template_path)\n if template_rgb is None:\n raise TypeError(f'Image is not found in path: {template_path}')\n template_gray = OpencvWrapper.image_gray(template_rgb)\n\n size = np.asarray(template_gray.shape[::-1])\n res = cv2.matchTemplate(gray_img, template_gray, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n top_left = np.add(np.asarray(max_loc), -0.5*size)\n bottom_right = np.add(np.asarray(max_loc), 0.5*size)\n coordinate = [top_left, bottom_right]\n return coordinate, res\n\n @staticmethod\n def click_image():\n return\n", "repo_name": "ilkergalipatak/recaptcha-solver", "sub_path": "lib/opencv_wrapper.py", "file_name": "opencv_wrapper.py", "file_ext": "py", "file_size_in_byte": 1824, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pyautogui.screenshot", "line_number": 20, "usage_type": "call"}, {"api_name": "pyautogui.screenshot", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.matchTemplate", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.minMaxLoc", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "69861766774", "text": "\"\"\"OAuth1 module written according to http://oauth.net/core/1.0/#signing_process\"\"\"\n\nfrom requests import Session\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.poolmanager import PoolManager\n\nfrom uuid import uuid4\nimport json\nimport time\nimport re\nimport ssl\n\nfrom emailage import signature, validation\n\n\nclass EmailageClient:\n FRAUD_CODES = {\n 1: 'Card Not Present Fraud',\n 2: 'Customer Dispute (Chargeback)',\n 3: 'First Party Fraud',\n 4: 'First Payment Default',\n 5: 'Identify Theft (Fraud Application)',\n 6: 'Identify Theft (Account Take Over)',\n 7: 'Suspected Fraud (Not Confirmed)',\n 8: 'Synthetic ID',\n 9: 'Other'\n }\n \n class Adapter(HTTPAdapter):\n \"\"\"Transport adapter that allows us to use TLS v1.2.\"\"\"\n\n def init_poolmanager(self, connections, maxsize, block=False):\n self.poolmanager = PoolManager(\n num_pools=connections,\n maxsize=maxsize,\n block=block,\n ssl_version=ssl.PROTOCOL_TLSv1_2)\n \n \n def __init__(self, secret, token, sandbox=False):\n \"\"\"Args:\n secret (str): Consumer secret, e.g. SID or API key.\n token (str): Consumer OAuth token.\n sandbox (bool): Whether to use a sandbox instead of a production server.\n Ensure the according secret and token are supplied.\n \n Note:\n HMAC key is created according to Emailage docs rather than OAuth1 spec.\n \"\"\"\n self.secret, self.token, self.sandbox = secret, token, sandbox\n self.hmac_key = token + '&'\n self.session = Session()\n self.domain = 'https://{}.emailage.com'.format(self.sandbox and 'sandbox' or 'api')\n self.session.mount(self.domain, EmailageClient.Adapter())\n \n \n def request(self, endpoint, **params):\n \"\"\"Basic request method utilized by #query and #flag.\n \n Args:\n endpoint (str): Currently, either an empty string or \"/flag\".\n **params: Non-general GET request params.\n \n Returns:\n dict: Original Emailage API's JSON body.\n \"\"\"\n url = self.domain + '/emailagevalidator' + endpoint + '/'\n params = dict(\n format = 'json', \n oauth_consumer_key = self.secret,\n oauth_nonce = uuid4(),\n oauth_signature_method = 'HMAC-SHA1',\n oauth_timestamp = int(time.time()),\n oauth_version = 1.0,\n **params\n )\n params['oauth_signature'] = signature.create('GET', url, params, self.hmac_key)\n \n res = self.session.get(url, params=params)\n \n # For whatever reason Emailage dispatches JSON with unreadable symbols at the start, like \\xEF\\xBB\\xBF.\n json_data = re.sub(r'^[^{]+', '', res.text)\n return json.loads(json_data)\n \n \n def query(self, query, **params):\n \"\"\"Query a risk score information for the provided email address, IP address, or a combination.\n \n Args:\n query (str | (str, str)): Email, IP, or both.\n \n Keyword Args:\n urid (str): User Defined Record ID.\n Can be used when you want to add an identifier for a query.\n The identifier will be displayed in the result.\n **: Extra request params as in API documentation.\n \"\"\"\n if type(query) is tuple: query = '+'.join(query)\n params['query'] = query\n return self.request('', **params)\n \n def query_email(self, email, **params):\n \"\"\"Query a risk score information for the provided email address.\n This method differs from #query in that it ensures that the string supplied is in rfc2822 format.\n \n Args:\n email (str)\n **params: keywords arguments for #query\n \"\"\"\n validation.assert_email(email)\n return self.query(email, **params)\n \n def query_ip_address(self, ip, **params):\n \"\"\"Query a risk score information for the provided IP address.\n This method differs from #query in that it ensures that the string supplied is in rfc791 format.\n \n Args:\n ip (str)\n **params: keywords arguments for #query\n \"\"\"\n validation.assert_ip(ip)\n return self.query(ip, **params)\n \n def query_email_and_ip_address(self, email, ip, **params):\n \"\"\"Query a risk score information for the provided combination of an Email and IP address.\n This method differs from #query in that it ensures that the strings supplied are in rfc2822 and rfc791 formats.\n \n Args:\n email (str)\n ip (str)\n **params: keywords arguments for #query\n \"\"\"\n validation.assert_email(email)\n validation.assert_ip(ip)\n return self.query((email, ip), **params)\n \n \n def flag(self, flag, query, fraud_code=None):\n \"\"\"Mark an email address as fraud, good, or neutral.\n \n Args:\n flag (str): Either fraud, neutral, or good.\n query (str): Email to be flagged.\n fraud_code (int): Reason why the email is considered fraud. ID of the one of FRAUD_CODES options.\n Required only if you flag something as fraud.\n See emailage.Client.FRAUD_CODES for the list of available reasons and their IDs.\n \"\"\"\n \n flags = ['fraud', 'neutral', 'good']\n if flag not in flags:\n raise ValueError(\"flag must be one of {}. {} is given.\".format(', '.join(flags), flag))\n\n validation.assert_email(query)\n \n params = dict(flag=flag, query=query)\n\n if flag == 'fraud':\n codes = self.FRAUD_CODES\n if type(fraud_code) is not int:\n raise ValueError(\"fraud_code must be an integer from 1 to {} corresponding to {}. {} is given.\".format(len(codes), ', '.join(codes.values()), fraud_code))\n if fraud_code not in range(1, len(codes) + 1):\n fraud_code = 9\n params['fraudcodeID'] = fraud_code\n\n return self.request('/flag', **params)\n \n def flag_as_fraud(self, query, fraud_code):\n \"\"\"Mark an email address as fraud.\n \n Args:\n query (str): Email to be flagged.\n fraud_code (int): Reason why the email is considered fraud. ID of the one of FRAUD_CODES options.\n Required only if you flag something as fraud.\n See emailage.client.EmailageClient.FRAUD_CODES for the list of available reasons and their IDs.\n \"\"\"\n return self.flag('fraud', query, fraud_code)\n \n def flag_as_good(self, query):\n \"\"\"Mark an email address as good.\n \n Args:\n query (str): Email to be flagged.\n \"\"\"\n return self.flag('good', query)\n \n def remove_flag(self, query):\n \"\"\"Unflag an email address that was marked as good or fraud previously.\n \n Args:\n query (str): Email to be unflagged.\n \"\"\"\n return self.flag('neutral', query)\n", "repo_name": "tinbka/Emailage_Python", "sub_path": "emailage/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 7234, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.adapters.HTTPAdapter", "line_number": 29, "usage_type": "name"}, {"api_name": "requests.packages.urllib3.poolmanager.PoolManager", "line_number": 33, "usage_type": "call"}, {"api_name": "ssl.PROTOCOL_TLSv1_2", "line_number": 37, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 52, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 71, "usage_type": "call"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "emailage.signature.create", "line_number": 77, "usage_type": "call"}, {"api_name": "emailage.signature", "line_number": 77, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 82, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "emailage.validation.assert_email", "line_number": 110, "usage_type": "call"}, {"api_name": "emailage.validation", "line_number": 110, "usage_type": "name"}, {"api_name": "emailage.validation.assert_ip", "line_number": 121, "usage_type": "call"}, {"api_name": "emailage.validation", "line_number": 121, "usage_type": "name"}, {"api_name": "emailage.validation.assert_email", "line_number": 133, "usage_type": "call"}, {"api_name": "emailage.validation", "line_number": 133, "usage_type": "name"}, {"api_name": "emailage.validation.assert_ip", "line_number": 134, "usage_type": "call"}, {"api_name": "emailage.validation", "line_number": 134, "usage_type": "name"}, {"api_name": "emailage.validation.assert_email", "line_number": 153, "usage_type": "call"}, {"api_name": "emailage.validation", "line_number": 153, "usage_type": "name"}]} +{"seq_id": "43158998013", "text": "__author__ = 'greg'\n\n\n# Simple set of functions to create the sqlite3\n# database.\n\n################\n# Modules\n################\n\nimport sqlite3 as sql\n\n################\n# Functions\n################\n\n\ndef create_gene_table():\n '''\n creates a table for storing Gene ids.\n '''\n con = sql.connect('Project.db')\n cur = con.cursor()\n cur.execute(\"CREATE TABLE Genes\"\n \"(Gene_id int(20) PRIMARY KEY,\"\n \"Gene_Name varchar(255))\")\n con.commit()\n con.execute('pragma foreign_keys') # enables foreign keys\n con.commit()\n return con.close()\n\ndef create_conditions_table():\n '''\n Creates a table for storing condition ids.\n '''\n con = sql.connect('Project.db')\n cur = con.cursor()\n cur.execute(\"CREATE TABLE Conditions\"\n \"(Con_id int(20) PRIMARY KEY,\"\n \"Condition_Name varchar(255))\")\n con.commit()\n return con.close()\n\ndef create_biomass_table():\n '''\n Creates a table for storing biomass data.\n '''\n con = sql.connect('Project.db')\n cur = con.cursor()\n cur.execute(\"CREATE TABLE Biomass\"\n \"(ID INTEGER PRIMARY KEY AUTOINCREMENT DEFAULT NULL,\"\n \"Con_id int(20),\"\n \"Biomass num(20),\"\n \"FOREIGN KEY (Con_id) REFERENCES Conditions (Con_id))\")\n con.commit()\n return con.close()\n\ndef create_experiment_table():\n '''\n Creates a table for storing replicate/experiment ids.\n '''\n con = sql.connect('Project.db')\n cur = con.cursor()\n cur.execute(\"CREATE TABLE Experiment\"\n \"(Rep_id int(2) PRIMARY KEY,\"\n \"Rep_name varchar(10))\")\n con.commit()\n return con.close()\n\n\ndef create_expression_table():\n '''\n Creates a data for storing expression data.\n '''\n con = sql.connect('Project.db')\n cur = con.cursor()\n cur.execute(\"CREATE TABLE Expression\"\n \"(ID INTEGER PRIMARY KEY AUTOINCREMENT DEFAULT NULL,\"\n \"Gene_id int(20),\"\n \"Con_id int(20),\"\n \"Rep_id varchar(2),\"\n \"Expression int(20),\"\n \"FOREIGN KEY (Con_id) REFERENCES Conditions (Con_id),\"\n \"FOREIGN KEY (Gene_id) REFERENCES Genes (Gene_id),\"\n \"FOREIGN KEY (Rep_id) REFERENCES Experiment (Rep_id))\")\n con.commit()\n return con.close()\n\n\ndef create_data_tables():\n con = sql.connect('Project.db')\n cur = con.cursor()\n return\n\n\ndef main():\n create_gene_table()\n create_conditions_table()\n create_biomass_table()\n create_experiment_table()\n create_expression_table()\n return\n\n#######################################\n\nif __name__ == '__main__':\n main()\n\n\n\n\n", "repo_name": "NotGregH/Biodatabases_Project", "sub_path": "create_sqlite_db.py", "file_name": "create_sqlite_db.py", "file_ext": "py", "file_size_in_byte": 2704, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "10082615351", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef get_undersampled_df(df_arg, column_name_arg):\n # First, calculate the minimum number of samples across all classes\n min_samples = df_arg[column_name_arg].value_counts().min()\n\n # Initialize an empty DataFrame to store the undersampled data\n undersampled_df = pd.DataFrame()\n\n # Loop through each unique activity class and select the first min_samples for each class\n for activity_class in df_arg[column_name_arg].unique():\n class_subset = df_arg[df_arg[column_name_arg] == activity_class].iloc[:min_samples]\n undersampled_df = pd.concat([undersampled_df, class_subset])\n\n return undersampled_df\n\n\ndef get_correlation_matrix(corr_matrix_df_arg, filename=None):\n\n # Create a heatmap using seaborn\n fig = plt.figure(figsize=(10, 8))\n axes = fig.add_axes([0.1, 0.1, 1, 1])\n sns.heatmap(corr_matrix_df_arg, annot=True, cmap='coolwarm', center=0)\n axes.set_title('Correlation Matrix Heatmap')\n if filename:\n plt.savefig('graphs/correlation_matrix.png')\n\n return fig\n\n\ndef get_discard_columns(corr_matrix_arg, important_columns_arg, df_arg):\n\n columns_to_discard = set()\n for column in corr_matrix_arg.columns:\n correlated_columns = corr_matrix_arg.index[\n (corr_matrix_arg[column] > 0.5) | (corr_matrix_arg[column] < -0.5)\n ]\n\n for correlated_column in correlated_columns:\n if column != correlated_column:\n # Prioritize which column to keep based on your criteria\n # For example, keep the column with higher variance\n if column not in important_columns_arg:\n columns_to_discard.add(column)\n elif (column in important_columns_arg) and (correlated_column in important_columns_arg):\n pass\n elif (column in important_columns_arg) and (correlated_column not in important_columns_arg):\n columns_to_discard.add(correlated_column)\n else: # both columns are not in important_columns\n columns_to_discard.add(\n correlated_column if df_arg[correlated_column].var() < df_arg[column].var() else column)\n\n return columns_to_discard\n", "repo_name": "Mykhailo20/DS_Bootcamp_2023", "sub_path": "Homework_7/modules/exploratory_data_analysis.py", "file_name": "exploratory_data_analysis.py", "file_ext": "py", "file_size_in_byte": 2300, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "6698215981", "text": "import os\nimport sys\nimport numpy as np\nimport argparse\nimport subprocess\nimport json\nfrom time import time\nimport nibabel as nib\nimport brainsss\nimport h5py\nimport ants\n\ndef main(args):\n\n logfile = args['logfile']\n # directory = args['directory'] # full fly func path\n # smooth = args['smooth']\n # colors = args['colors']\n printlog = getattr(brainsss.Printlog(logfile=logfile), 'print_to_log')\n\n # Get brain shape\n brain_file = '/oak/stanford/groups/trc/data/Ashley2/imports/20210802/fly1_40s-011/ch2_stitched.nii'\n img = nib.load(brain_file) # this loads a proxy\n brain_dims = img.header.get_data_shape()\n\n #calculate the meanbrain, which will be fixed in moco\n printlog('meanbrain START...')\n meanbrain = np.zeros(brain_dims[:3])\n for i in range(brain_dims[-1]):\n meanbrain += img.dataobj[...,i]\n meanbrain = meanbrain/brain_dims[-1] # divide by number of volumes\n fixed = ants.from_numpy(np.asarray(meanbrain, dtype='float32'))\n printlog('meanbrain DONE')\n\n # Make empty hdf5 file to append processed volumes to with matching shape\n save_file = '/oak/stanford/groups/trc/data/Brezovec/20220207_test2.h5'\n with h5py.File(save_file, 'w') as f:\n dset = f.create_dataset('data', (*brain_dims[:3],0), maxshape=(*brain_dims[:3],None), dtype='float32')\n printlog('created empty hdf5 file')\n\n # loop over all brain vols, motion correcting each and append to growing hdf5 file on disk\n printlog('moco vol by vol')\n for i in range(brain_dims[-1]):\n t0 = time()\n # Load a single brain volume\n vol = img.dataobj[...,i]\n\n ### Process vol (moco, zscore, etc) ###\n # Make ants image\n moving = ants.from_numpy(np.asarray(vol, dtype='float32'))\n\n # Motion correct\n moco = ants.registration(fixed,moving,type_of_transform='SyN')\n moco_out = moco['warpedmovout'].numpy()\n\n ### DELETE INVERSE TRANSFORMS\n transformlist = moco['invtransforms']\n for x in transformlist:\n if '.mat' not in x:\n os.remove(x)\n printlog('Deleted inv: {}'.format(x))\n\n ### DELETE FORWARD TRANSFORMS\n transformlist = moco['fwdtransforms']\n for x in transformlist:\n if '.mat' not in x:\n os.remove(x)\n printlog('Deleted fwd: {}'.format(x))\n\n # Append to hdf5 file\n with h5py.File(save_file, 'a') as f:\n\n # Increase hdf5 size by one brain volume\n current_num_vol = f['data'].shape[-1] # this is the last axis, which is time\n new_num_vol = current_num_vol + 1 # will want one more volume\n f['data'].resize(new_num_vol,axis=3) # increase size by one volume\n\n # Append to hdf5 file\n f['data'][...,-1] = vol #moco_out\n printlog(F'vol: {i}, time: {time()-t0}')\n\nif __name__ == '__main__':\n main(json.loads(sys.argv[1]))\n", "repo_name": "ClandininLab/brainsss", "sub_path": "scripts/to_delete/volbyvoltest.py", "file_name": "volbyvoltest.py", "file_ext": "py", "file_size_in_byte": 2930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "brainsss.Printlog", "line_number": 19, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "ants.from_numpy", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 32, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "ants.from_numpy", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 50, "usage_type": "call"}, {"api_name": "ants.registration", "line_number": 53, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 60, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 67, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 71, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "21531632041", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 17 12:12:51 2020\n\n@author: ANISH\n\"\"\"\nimport speech_recognition as sr\nimport pyttsx3\nimport psycopg2\nimport eel\nimport googletrans\n\nfrom gtts import gTTS\nfrom nltk import word_tokenize\nfrom googletrans import Translator\n\ngender = \"Female\"\n\n@eel.expose\ndef gender_preference():\n print(\"Would you like to switch assistants ?\")\n eel.left_printer(\"Would you like to switch assistant ?\")\n inp0 = \"Yes\"\n # inp0 = myCommand(\"Voice Change?\")\n eel.right_printer(inp0.capitalize())\n global gender\n\n if inp0 == \"Yes\" : \n if gender == \"Female\":\n gender = \"Male\"\n eel.left_printer(\"I am now changed based on your preference\")\n speak(\"I am now changed based on your preference !!\",genders=gender) \n else : \n gender == \"Female\"\n eel.left_printer(\"I have now changed my voice \")\n speak(\"I have now changed my voice \",genders=gender)\n\ndef db_connect():\n conn = psycopg2.connect(database=\"IVR_POS\", user=\"postgres\", password=\"hi\", host=\"127.0.0.1\", port=\"5432\")\n cur = conn.cursor()\n return conn,cur\n\nconn,cur = db_connect()\nquery = \"SELECT nextval('invoice_seq')\"\ncur.execute(query)\nrows = cur.fetchall()\ninv_id = rows[0][0]\n\n@eel.expose\ndef basket_printer():\n conn,cur = db_connect()\n # return parent_category_selector(cur)\n return invoice_printer(cur)\n\n@eel.expose\ndef inv_printer():\n conn,cur = db_connect()\n return item_printer(cur)\n\n@eel.expose\ndef fullVoice():\n speak(\" Initiating a completely voice based system\")\n\n inp0 = \"Yes\"\n # myCommand(\"Do u want a completely voice based system?\")\n print(inp0)\n\n if inp0 == \"Yes\":\n\n speak(\"Do u know exactly what u want to buy ?\")\n inp0 = myCommand(\"Do u know exactly what u want to buy ?\")\n # inp0 = inp_no \n print(inp0)\n \n complete_voice(cur,conn,inp0)\n\ndef speak(audio,language=\"english\", genders = gender):\n\n translator = Translator()\n engine = pyttsx3.init() \n voices = engine.getProperty('voices') \n if gender == \"Female\" : \n engine.setProperty('voice', voices[1].id)\n else : \n engine.setProperty('voice', voices[0].id)\n # if language==\"english\":\n # engine.setProperty('voice', voices[0].id)\n # elif language ==\"hindi\":\n # for voice in voices:\n # if voice.languages[0] == u'hi_IN':\n # engine.setProperty('voice', voice.id)\n # break\n # result = translator.translate(audio, src='en', dest='hi')\n # audio=result.text\n\n # else:\n # for voice in voices:\n # if voice.languages[0] == u'es_ES':\n # engine.setProperty('voice', voice.id)\n # break\n # result = translator.translate(audio, src='en', dest='es')\n # audio=result.text\n # print(audio) \n \n engine.say(audio)\n engine.runAndWait()\n \n@eel.expose\ndef myCommand(param=\"Item name\"):\n \n \"listens for commands\"\n r = sr.Recognizer()\n \n with sr.Microphone() as source:\n print(param)\n r.energy_threshold -= 200\n r.pause_threshold = 0.5\n r.adjust_for_ambient_noise(source, duration=0.1)\n audio = r.listen(source)\n \n try:\n command = r.recognize_google(audio).lower()\n print('You said: ' + command + '\\n')\n \n #loop back to continue to listen for commands if unrecognizable speech is received\n except sr.UnknownValueError:\n print('....')\n command = myCommand(param);\n \n return command.capitalize()\n\ndef printer(current_pointer,p_type,quantity=0):\n \n rows = current_pointer.fetchall()\n \n if p_type == 1:\n print('Cat |\\t\\n id |\\t','Category name',\"\\t|\")\n elif p_type ==2:\n print('Cat |\\tSub Category\\t|\\n id |\\t',' name',\" \\t|\")\n elif p_type ==3:\n print('Item |\\t\\n id |\\t','Item name',\"\\t|\")\n else:\n print(\"Invalid\")\n \n print(\"--------------------------\")\n \n x = []\n \n if p_type == 1 or p_type == 2:\n for row in rows:\n print(row[0],' |\\t',row[1],\" \\t|\")\n elif p_type == 3:\n for row in rows:\n print(row[0],' |',row[2],\"\\t|\")\n #x.append(row[0],row[2],row[3],quantity,row[3]*quantity)\n #return rows[0],rows[2],rows[3],quantity,rows[3]*quantity\n \n else : \n print(\"error\")\n \n return rows\n\ndef parent_category_selector(cur_pointer):\n\n #print(runtype) \n cur_pointer.execute(\"SELECT category_id AS id,category_name AS name FROM category_table WHERE category_id=parent_id\")\n rows = cur_pointer.fetchall()\n cat = [] \n for row in rows:\n cat.append(row[1])\n text = \"Available categories are \" + \"<li>\" + \"<li>\".join(cat)\n eel.left_printer(text)\n speak(\"U can view the available categories on the screen\")\n return \n printer(cur_pointer,1)\n\ndef parent_category_speak(cur):\n z=[]\n cur.execute(\"SELECT category_id AS id,category_name AS name FROM category_table WHERE category_id=parent_id\")\n rows = cur.fetchall()\n \n for row in rows:\n z.append(row[1])\n \n text = 'Available categories are ' + \" \".join(z[:-1]) + 'and' + z[-1] \n speak(text)\n\ndef child_category_selector(cur_pointer,p_id):\n \n query = str(\"SELECT category_id, category_name FROM category_table WHERE parent_id IN (SELECT parent_id FROM category_table WHERE category_name ='\" + str(p_id)+\"') AND parent_id!=category_id\")\n cur_pointer.execute(query)\n rows = cur_pointer.fetchall()\n if len(rows) >=1 : \n sub_cat = [] \n for row in rows:\n sub_cat.append(row[1])\n text = 'Available subcategories are ' + \"<li>\" + \"<li>\".join(sub_cat)\n eel.left_printer(text)\n speak(\"Please check your screen for the Available sub-categories\")\n printer(cur_pointer,2)\n return 1\n else : \n return -1\n\ndef child_category_speak(cur,p_id):\n z=[]\n query = str(\"SELECT category_id, category_name FROM category_table WHERE parent_id IN (SELECT parent_id FROM category_table WHERE category_name ='\" + str(p_id)+\"') AND parent_id!=category_id\")\n cur.execute(query)\n rows = cur.fetchall()\n for row in rows :\n z.append(row[1])\n text = \"Available categories under \"+ p_id +\" are \" + \", \".join(z[:-1]) + \" and \" + z[-1]\n speak(text)\n \n\ndef item_selector(cur_pointer,p_id):\n \n query = \"SELECT * FROM items WHERE category_id IN (SELECT category_id FROM category_table WHERE category_name ='\" + p_id + \"')\"\n cur_pointer.execute(query)\n rows = cur_pointer.fetchall()\n # print('Item |\\t\\n id |\\t','Item name',\"\\t|\")\n # print(\"--------------------------\")\n items = []\n if len(rows) >= 1 :\n for row in rows : \n items.append(row[2])\n text = \"Avaiailable items under \" + p_id + \" are \" + \"<li>\" + \"<li>\".join(items)\n eel.left_printer(text)\n speak(\"Please choose from the items mentioned on the screen\")\n return 1\n \n else : \n return -1\n \ndef item_speaker(cur,p_id):\n query = \"SELECT * FROM items WHERE category_id IN (SELECT category_id FROM category_table WHERE category_name ='\" + p_id + \"')\"\n cur.execute(query)\n rows = cur.fetchall()\n items = []\n for row in rows : \n items.append(row[2])\n text = \"Availaible items are \" + \" \".join(items[:-1]) + \"and\" + items[-1] \n speak(text)\n\ndef item_printer(cur_pointer):\n query = \"SELECT * FROM items ORDER BY item_id\"\n x = []\n #select invoice_id from invoice order by invoice_id desc limit 1\n cur_pointer.execute(query)\n rows = cur_pointer.fetchall()\n\n return rows\n \ndef item_printer_eel(cur_pointer):\n query = \"SELECT * FROM items ORDER BY item_id\"\n x = []\n #select invoice_id from invoice order by invoice_id desc limit 1\n cur_pointer.execute(query)\n rows = cur_pointer.fetchall()\n\n print('Item |\\t\\n id |\\t','Item name',\"\\t|\")\n print(\"--------------------------\")\n for row in rows : \n print(row[0],' |',row[2],\"\\t|\")\n x.append(row[2])\n x = \"<li>\" + \"<li>\".join(x)\n\n return x\n\n\ndef stopword_remover(text):\n req = []\n stop_words = ['i','want','to','order','and','some','would','like','go','visit','view']\n text = text.lower()\n text = word_tokenize(text)\n for word in text:\n if word not in stop_words:\n req.append(word)\n return req\n\ndef db_searcher(att,cur_pointer,quantt):\n\n x = ''\n y = []\n for i in range(len(att)):\n if i==0:\n x+= \"'%\" + att[i] + \"%'\"\n \n else:\n x += \" and item_attributes LIKE '%\" + att[i] + \"%'\"\n \n query = \"SELECT * FROM items WHERE item_attributes LIKE \" + x\n cur_pointer.execute(query)\n rows = cur_pointer.fetchall()\n\n if len(rows)>=1 : \n for row in rows : \n #y.append(row[0])\n y.append([row[0],row[2],row[3],quantt,row[3]*quantt,row[5]])\n return y\n\n else :\n speak(\"Sorry the item you have ordered is currently not available please try another item\")\n print(\"Sorry the item you have ordered is currently not available\")\n return -1\n \n printer(cur_pointer,3,quantt)\n\n #return r\n\ndef combiner(curr_pointer,inpp,inpp2,inpp3,quantity):\n parent_category_selector(curr_pointer)\n child_category_selector(curr_pointer,inpp)\n item_selector(curr_pointer,inpp2)\n return db_searcher(stopword_remover(inpp3),cur,quantity)\n\n# def get_stock(cur,item_id):\n# query = \"SELECT * FROM stocks_table WHERE item_id = \" + item_id\n# cur.execute(query)\n# rows = cur.fetchall()\n# return rows\n\ndef stock_update(conn,cur,records_to_update):\n\n sql_update_query = \"\"\"UPDATE items SET stock = stock - %s WHERE item_id = %s\"\"\"\n\n cur.executemany(sql_update_query, records_to_update)\n conn.commit()\n print(cur.rowcount, \"Record updated successfully into stock\")\n\ndef invoice_generator(cur,user_buy,conn):\n global inv_id\n\n records_to_update = []\n sql_update_query = \"\"\"UPDATE stocks_table SET quantity = %s WHERE item_id = %s\"\"\"\n\n for i in range(len(user_buy)):\n joiner = str(user_buy[i][0][2]) + \", \" + str(user_buy[i][0][3]) + \", \" + str(user_buy[i][0][4])\n record_to_insert = str(inv_id) + \", \" + str(user_buy[i][0][0]) + \", '\" + user_buy[i][0][1] + \"', \" + joiner\n \n query = \"INSERT INTO invoice (invoice_id, item_id, item_name, coster, quantity, overall) \\\n VALUES (\" + record_to_insert + \")\"\n \n records_to_update.append((user_buy[i][0][3],user_buy[i][0][0]))\n\n cur.execute(query);\n \n stock_update(conn,cur,records_to_update)\n conn.commit()\n eel.removevoicedots()\n print(\"All Successful...\")\n\n\n#result = cur.executemany(sql_update_query, records_to_update)\ndef invoice_printer(cur_pointer):\n cur_pointer.execute(\"SELECT invoice_id, item_id, item_name, coster, quantity, overall FROM invoice WHERE invoice_id = (SELECT invoice_id FROM invoice ORDER BY invoice_id DESC LIMIT 1)\")\n return printer(cur_pointer,1)\n\n@eel.expose\ndef invoice_inc(value):\n conn,cur = db_connect()\n cur.execute(\"UPDATE invoice SET quantity=quantity+1, overall=overall+coster WHERE item_id =\" +str(value)+ \" and invoice_id = (SELECT invoice_id FROM invoice ORDER BY invoice_id DESC LIMIT 1)\")\n cur.execute(\"UPDATE items SET stock=stock-1 WHERE item_id =\" +str(value))\n conn.commit()\n\n@eel.expose\ndef invoice_dec(value):\n conn,cur = db_connect()\n cur.execute(\"UPDATE invoice SET quantity=quantity-1, overall=overall-coster WHERE item_id =\" +str(value)+ \" and invoice_id = (SELECT invoice_id FROM invoice ORDER BY invoice_id DESC LIMIT 1)\")\n cur.execute(\"UPDATE items SET stock=stock+1 WHERE item_id =\" +str(value))\n conn.commit()\n\n@eel.expose\ndef bill_amount():\n conn,cur = db_connect()\n cur.execute(\"SELECT SUM(overall) FROM invoice WHERE invoice_id = (SELECT invoice_id FROM invoice ORDER BY invoice_id DESC LIMIT 1)\")\n return printer(cur,0)\n\ndef str2int(string):\n str2intdict = {\"one\":1, \"two\":2, \"three\":3, \"four\":4, \"five\":5, \"six\":6, \"seven\":7, \"eight\":8, \"nine\":9}\n if string in str2intdict.keys():\n return str2intdict[string]\n \n else : \n return -1\n\ndef known_item_voice(cur):\n inp1='Yes'\n user_buy = []\n while(inp1 == 'Yes'):\n speak\n inp1 = 'britannia milk bread'\n # inp1 = myCommand(\"Item name\")\n inp1 = word_tokenize(inp1)\n \n quantt = 6\n # quantt = myCommand(\"Item quant\")\n if str2int(quantt) !=-1 : \n\n\n if db_searcher(inp1, cur, quantt) != -1: \n user_buy.append(db_searcher(inp1,cur,quantt))\n print(user_buy)\n speak(\"Anything else ?\")\n inp1 = input(\"Enter Yes/no\")\n #inp1 = myCommand(\"Yes/no\")\n\n else : \n known_item_voice(cur)\n print(\"\")\n else : \n known_item_voice()\n\n return user_buy\n\ndef known_item(conn,cur,inp0='Yes'):\n \n speak(\"Please check the screen to view all the items\")\n x = item_printer_eel(cur)\n x = \"Available items are : \\n\" + x\n eel.left_printer(x)\n \n user_buy = []\n \n while(inp0=='Yes'):\n \n # inp1 = 'i want britannia milk bread'\n inp1 = myCommand(\"Item name\")\n eel.right_printer(inp1.capitalize())\n inp1 = stopword_remover(inp1)\n \n speak(\"How much ?\")\n eel.left_printer(\"How much ?\")\n quant = 6\n #quant = myCommand(\"How much\")\n eel.right_printer(quant)\n \n if db_searcher(inp1,cur,quant)!= -1:\n user_buy.append(db_searcher(inp1,cur,quant))\n \n speak(\"Would u like to add anything else ?\")\n eel.left_printer(\"Would u like to add anything else ?\")\n inp0 = myCommand(\"Anything else\")\n eel.right_printer(inp0)\n #inp0 = inp_no\n # inp0 = input()\n \n else : \n known_item(conn,cur)\n\n invoice_generator(cur,user_buy,conn)\n speak(\"Ok, I have added your items into your cart\")\n eel.left_printer(\"Ok, I have added your items into your cart\")\n \n\ndef unknown_item(conn,cur,inp0='Yes'):\n\n user_buy = []\n\n while(inp0 == 'Yes'):\n\n parent_category_selector(cur)\n \n # inp1 = 'Bakery'\n inp1 = myCommand(\"Select a category\")\n eel.right_printer(inp1.capitalize())\n x = child_category_selector(cur,inp1.capitalize())\n \n if x == 1 : \n # inp2 = 'bread'\n inp2 = myCommand(\"Select a subcategory\")\n eel.right_printer(inp2.capitalize())\n item = item_selector(cur,inp2.capitalize())\n \n if item == 1 : \n # inp3 = 'i want britannia milk bread'\n inp3 = myCommand(\"Item name\")\n eel.right_printer(inp3.capitalize())\n inp3 = stopword_remover(inp3)\n eel.left_printer(\"How much\")\n quant = 1\n #quant = myCommand(\"Enter quantity\")\n eel.right_printer(quant)\n \n if db_searcher(inp3,cur,quant) != -1: \n user_buy.append(db_searcher(inp3,cur,quant))\n speak(\"Would u like to add anything else ?\")\n eel.left_printer(\"Would u like to add anything else ?\")\n inp0 = myCommand(\"Anything else\")\n eel.right_printer(inp0.capitalize())\n if inp0 == \"Yes\":\n continue\n else : \n break\n #inp0 = inp_no\n # inp0 = input()\n \n else : \n unknown_item(conn,cur)\n \n else :\n speak(\"Wrong input for sub category selector\")\n unknown_item(conn,cur)\n\n else : \n speak(\"Sorry the category you have chosen is not available\")\n eel.left_printer(\"Sorry the category you have chosen is not available\")\n unknown_item(conn,cur)\n\n invoice_generator(cur,user_buy,conn)\n speak(\"Ok, I have added your items into your cart\")\n eel.left_printer(\"Ok, I have added your items into your cart\")\n \n return None\n\ndef unknown_item_voice(cur,conn):\n parent_category_speak(cur)\n \n # inp1 = 'Bakery'\n inp1 = myCommand(\"Which category\")\n child_category_speak(cur,inp1.capitalize())\n\n # inp1 = 'Bread'\n inp1 = myCommand(\"Which sub_category\")\n item_speaker(cur,inp1.capitalize())\n \n user_buy = known_item_voice(cur)\n invoice_generator(cur,user_buy,conn)\n\n@eel.expose\ndef tryblock():\n inp_no = 'No'\n inp_Yes = 'Yes'\n conn,cur = db_connect()\n eel.left_printer(\"Do you know exactly what you want to buy?\")\n speak(\"Do u know exactly what u want to buy ?\")\n\n inp0 = myCommand(\"Do u know exactly what u want to buy ?\")\n\n eel.right_printer(inp0.capitalize())\n\n if inp0 == inp_Yes:\n known_item(conn,cur)\n \n else : \n unknown_item(conn,cur)\n\ndef complete_voice(cur,conn,inp0):\n\n if inp0 == 'Yes':\n user_buy = known_item_voice(cur)\n invoice_generator(cur,user_buy,conn)\n else : \n unknown_item_voice(cur,conn)\n\n@eel.expose\ndef newPage():\n eel.start('index.html', size=(540, 960))\n\n@eel.expose\ndef productReturns():\n inp_no = 'no'\n inp_Yes = 'Yes'\n conn,cur = db_connect()\n eel.left_printer(\"Thank you for contacting us about your defective product.\")\n speak(\"Thank you for contacting us about your defective product\")\n eel.left_printer(\"We are extremely sorry for the inconvenience.\")\n speak(\"We are extremely sorry for the inconvenience.\")\n eel.left_printer(\"What do you want to return?\")\n speak(\"What do you want to return?\")\n\n inp_user=\"I did not like your pasta sauce\"\n eel.right_printer(inp_user)\n query=\"SELECT row from customer_care order by row desc\"\n cur.execute(query)\n rows = cur.fetchall()\n new_entry = rows[0][0]+1\n query = \"INSERT INTO customer_care (cat_id,row,complain) \\\n VALUES (1,\"+str(new_entry)+\",'\"+inp_user+\"')\"\n cur.execute(query)\n conn.commit()\n eel.left_printer(\"Our sales team will get back to you as soon as possible \")\n speak(\"Our sales team will get back to you as soon as possible \")\n eel.removevoicedots()\n\n@eel.expose\ndef billingIssues():\n inp_no = 'no'\n inp_Yes = 'Yes'\n conn,cur = db_connect()\n eel.left_printer(\"We are extremely sorry for the inconvenience.\")\n speak(\"We are extremely sorry for the inconvenience\")\n eel.left_printer(\"What is your invoice number?\")\n speak(\"What is your invoice number?\")\n inp_user=\"62\"\n eel.right_printer(inp_user)\n eel.left_printer(\"What is the issue you faced while billing?\")\n speak(\"What is the issue you faced while billing?\")\n inp_user=\"You have not included my discount\"\n eel.right_printer(inp_user)\n query=\"SELECT row from customer_care order by row desc\"\n cur.execute(query)\n rows = cur.fetchall()\n new_entry = rows[0][0]+1\n query = \"INSERT INTO customer_care (cat_id,row,complain) \\\n VALUES (2,\"+str(new_entry)+\",'\"+inp_user+\"')\"\n cur.execute(query)\n conn.commit()\n eel.left_printer(\"Our sales team will get back to you as soon as possible \")\n speak(\"Our sales team will get back to you as soon as possible \")\n\n@eel.expose\ndef SendFeedback():\n inp_no = 'no'\n inp_Yes = 'Yes'\n conn,cur = db_connect()\n eel.left_printer(\"We are extremely sorry for the inconvenience.\")\n speak(\"We are extremely sorry for the inconvenience\")\n eel.left_printer(\"What is your invoice number?\")\n speak(\"What is your invoice number?\")\n inp_user=\"63\"\n eel.right_printer(inp_user)\n eel.left_printer(\"How can we improve your instore shopping experience?\")\n speak(\"How can we improve your instore shopping experience?\")\n\n inp_user=\"It would be convenient if you had sanitizing stations in the store.\"\n eel.right_printer(inp_user)\n query=\"SELECT row from customer_care order by row desc\"\n cur.execute(query)\n rows = cur.fetchall()\n new_entry = rows[0][0]+1\n query = \"INSERT INTO customer_care (cat_id,row,complain) \\\n VALUES (3,\"+str(new_entry)+\",'\"+inp_user+\"')\"\n cur.execute(query)\n conn.commit()\n eel.left_printer(\"Your feedback is valuable to us, we will try to work on this.\")\n speak(\"Your feedback is valuable to us, we will try to work on this.\")", "repo_name": "vibes1100/IVR-POS", "sub_path": "functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 20672, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "eel.left_printer", "line_number": 22, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 25, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 31, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 35, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 19, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 49, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 55, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 60, "usage_type": "attribute"}, {"api_name": "googletrans.Translator", "line_number": 79, "usage_type": "call"}, {"api_name": "pyttsx3.init", "line_number": 80, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 112, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 114, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 126, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 108, "usage_type": "attribute"}, {"api_name": "eel.left_printer", "line_number": 172, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 198, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 228, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 275, "usage_type": "call"}, {"api_name": "eel.removevoicedots", "line_number": 350, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 359, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 366, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 373, "usage_type": "attribute"}, {"api_name": "nltk.word_tokenize", "line_number": 394, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 421, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 429, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 433, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 436, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 442, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 444, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 453, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 466, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 472, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 478, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 480, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 483, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 488, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 490, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 507, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 512, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 535, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 540, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 530, "usage_type": "attribute"}, {"api_name": "eel.start", "line_number": 558, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 556, "usage_type": "attribute"}, {"api_name": "eel.left_printer", "line_number": 565, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 567, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 569, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 573, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 582, "usage_type": "call"}, {"api_name": "eel.removevoicedots", "line_number": 584, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 560, "usage_type": "attribute"}, {"api_name": "eel.left_printer", "line_number": 591, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 593, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 596, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 597, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 600, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 609, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 586, "usage_type": "attribute"}, {"api_name": "eel.left_printer", "line_number": 617, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 619, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 622, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 623, "usage_type": "call"}, {"api_name": "eel.right_printer", "line_number": 627, "usage_type": "call"}, {"api_name": "eel.left_printer", "line_number": 636, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 612, "usage_type": "attribute"}]} +{"seq_id": "74317654133", "text": "import logging\nimport numbers\nimport numpy as np\n\nfrom atom.api import (Enum, Unicode, Value)\n\nfrom exopy.tasks.api import (InstrumentTask, validators)\nimport exopy_hqc_legacy.instruments.drivers.dll.SA124B as sa\n\nSA_SWEEPING = 0x0\nSA_REAL_TIME = 0x1\n\n\nclass TuneIQMixerTask(InstrumentTask):\n \"\"\" Task to tune an IQ mixer in SSB\n Implicit use of a SignalHound spectrum analyzer\n Tunes channels I and Q DC offset, relative delay and voltage\n to suppress LO leakage and unwanted sideband\n TODO: handle task with two instruments: AWG AND Spectrum analyzer\n TODO: implement realtime sweep for better SNR\n\n \"\"\"\n\n # Get user inputs\n channelI = Enum('Ch1', 'Ch2', 'Ch3', 'Ch4').tag(pref=True)\n channelQ = Enum('Ch1', 'Ch2', 'Ch3', 'Ch4').tag(pref=True)\n\n # LO frequency\n freq = Unicode('0.0').tag(pref=True,\n feval=validators.SkipLoop(types=numbers.Real))\n # IF frequency\n det = Unicode('0.0').tag(pref=True,\n feval=validators.SkipLoop(types=numbers.Real))\n # Desired sideband, e.g. if Lower, suppress freq and freq+det\n SB = Enum('Lower', 'Upper').tag(pref=True)\n\n my_sa = Value() # signal analyzer\n chI = Value()\n chQ = Value()\n freq_Hz = Value()\n det_Hz = Value()\n SB_sgn = Value()\n\n def check(self, *args, **kwargs):\n ''' Default checks and check different AWG channels\n '''\n test, traceback = super(TuneIQMixerTask, self).check(*args, **kwargs)\n if not test:\n return test, traceback\n\n if self.channelI == self.channelQ:\n test = False\n msg = 'I and Q channels need to be different !'\n traceback[self.get_error_path()] = msg\n return test, traceback\n\n def perform(self):\n \"\"\"Default interface behavior.\n\n \"\"\"\n # open signal analyzer\n serial = sa.saGetSerialNumberList()[0]\n assert serial != 0\n connection_infos = {u'serial': serial}\n self.my_sa = sa.SA124B(connection_infos, mode=SA_SWEEPING)\n\n # AWG channels\n awg = self.driver\n awg.run_mode = 'CONT'\n awg.run_awg(1, delay=0.5)\n self.chI = awg.get_channel(int(self.channelI[-1]))\n self.chQ = awg.get_channel(int(self.channelQ[-1]))\n\n # set sa sweep params\n self.my_sa.do_set_rbw_vbw(1e3, 1e3)\n self.my_sa.do_set_span(250e3)\n\n # convert user inputs into adequate units\n self.freq_Hz = self.format_and_eval_string(self.freq)*1e9\n self.det_Hz = self.format_and_eval_string(self.det)*1e9\n self.SB_sgn = 1 if self.SB == 'Lower' else -1\n\n # Initialize AWG params\n\n # we always operate the AWG at 4V, if not 4,\n # then assume the AWG has just been turned on\n if self.get_chI_vpp != 4:\n self.chI_vpp(4.0)\n self.chQ_vpp(4.0)\n\n # perform optimization twice\n self.tune_ssb('lo')\n self.tune_ssb('sb')\n pos_lo, cost = self.tune_ssb('lo')\n pos_sb, cost = self.tune_ssb('sb')\n\n # get power for optimal parameters at sig, leakage and sideband\n sig = self.my_sa.read_single_freq(self.freq_Hz-self.SB_sgn*self.det_Hz,\n n_av=10)\n lo = self.my_sa.read_single_freq(self.freq_Hz, n_av=10)\n sb = self.my_sa.read_single_freq(self.freq_Hz+self.SB_sgn*self.det_Hz,\n n_av=10)\n\n # close signal analyzer\n self.my_sa._close()\n\n # log values\n log = logging.getLogger(__name__)\n msg1 = 'Tuned IQ mixer at LO = %s GHz, IF = %s MHz, \\\n Signal: %s dBm, LO: %s dBm, SB: %s dBm' % \\\n (1e-9*self.freq_Hz, 1e-6*self.det_Hz, sig, lo, sb)\n log.info(msg1)\n msg2 = 'chI offset: %s V, chQ offset: %s V, chQvpp: %s V, \\\n chQdelay: %s ns' % \\\n (pos_lo[0], pos_lo[1], pos_sb[0], 1e9*pos_sb[1])\n log.info(msg2)\n\n # optimization procedure\n def tune_ssb(self, mode):\n # suppress lo leakage params\n if mode == 'lo':\n param1 = self.chI_offset\n param2 = self.chQ_offset\n f = self.freq_Hz\n pref = np.array([1, 1])\n pos0 = np.array([self.get_chI_offset(), self.get_chQ_offset()])\n\n # suppress other sideband params\n elif mode == 'sb':\n param1 = self.chQ_vpp\n param2 = self.chQ_delay\n f = self.freq_Hz + self.SB_sgn*self.det_Hz\n pref = np.array([1, 1e-9])\n pos0 = np.array([self.get_chQ_vpp(), self.get_chQ_delay()])\n else:\n msg = '''param has wrong value, should be lo or sb,\n received %s''' % mode\n raise ValueError(msg)\n\n # 4 directions in parameter search space\n sens = [np.array([1, 0]), np.array([0, 1]),\n np.array([-1, 0]), np.array([0, -1])]\n\n # initial cost (cost = power of sa at f)\n cost0 = self.cost(param1, param2, pos0[0], pos0[1], f)\n\n # initialize coutners and step size\n dec = 0.1 # decimal: step size\n s = 0 # direction\n c = 0 # counter: number of explored directions at given point\n eval_count = 0 # counter: number of cost evaluations\n\n poslist = [pos0]\n # stop search when dec < AWG resolution\n while dec >= 0.001:\n # break when max eval count has reach or\n # all 4 directions have been explored\n while c < 4 and eval_count < 1000:\n # probe cost at new pos: pos1\n pos1 = pos0 + dec*sens[s]*pref\n cost1 = self.cost(param1, param2, pos1[0], pos1[1], f)\n eval_count += 1\n # if lower cost, update pos\n if cost1 < cost0:\n cost0 = cost1\n pos0 = pos1\n c = 0\n poslist.append(pos0)\n else:\n c += 1\n s = np.mod(s+1, 4)\n c = 0\n # decrease dec if all explored directions give higher cost\n dec /= 10\n return pos0, cost0\n\n # optimization cost function: get power in dBm at f from signal_analyzer\n def cost(self, param1, param2, val1, val2, f):\n param1(val1)\n param2(val2)\n return self.my_sa.read_single_freq(f, n_av=10)\n\n # define AWG getter and setter functions to pass into cost function\n def chI_offset(self, value):\n self.chI.offset = value\n\n def chQ_offset(self, value):\n self.chQ.offset = value\n\n def get_chI_offset(self):\n return self.chI.offset\n\n def get_chQ_offset(self):\n return self.chQ.offset\n\n def chI_vpp(self, value):\n self.chI.vpp = value\n\n def chQ_vpp(self, value):\n self.chQ.vpp = value\n\n def get_chI_vpp(self):\n return self.chI.vpp\n\n def get_chQ_vpp(self):\n return self.chQ.vpp\n\n def chQ_delay(self, value):\n self.chQ.delay = value\n\n def get_chQ_delay(self):\n return self.chQ.delay\n", "repo_name": "Exopy/exopy_hqc_legacy", "sub_path": "exopy_hqc_legacy/tasks/tasks/instr/tune_IQ_mixer.py", "file_name": "tune_IQ_mixer.py", "file_ext": "py", "file_size_in_byte": 7099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "exopy.tasks.api.InstrumentTask", "line_number": 14, "usage_type": "name"}, {"api_name": "atom.api.Enum", "line_number": 25, "usage_type": "call"}, {"api_name": "atom.api.Enum", "line_number": 26, "usage_type": "call"}, {"api_name": "atom.api.Unicode", "line_number": 29, "usage_type": "call"}, {"api_name": "exopy.tasks.api.validators.SkipLoop", "line_number": 30, "usage_type": "call"}, {"api_name": "exopy.tasks.api.validators", "line_number": 30, "usage_type": "name"}, {"api_name": "numbers.Real", "line_number": 30, "usage_type": "attribute"}, {"api_name": "atom.api.Unicode", "line_number": 32, "usage_type": "call"}, {"api_name": "exopy.tasks.api.validators.SkipLoop", "line_number": 33, "usage_type": "call"}, {"api_name": "exopy.tasks.api.validators", "line_number": 33, "usage_type": "name"}, {"api_name": "numbers.Real", "line_number": 33, "usage_type": "attribute"}, {"api_name": "atom.api.Enum", "line_number": 35, "usage_type": "call"}, {"api_name": "atom.api.Value", "line_number": 37, "usage_type": "call"}, {"api_name": "atom.api.Value", "line_number": 38, "usage_type": "call"}, {"api_name": "atom.api.Value", "line_number": 39, "usage_type": "call"}, {"api_name": "atom.api.Value", "line_number": 40, "usage_type": "call"}, {"api_name": "atom.api.Value", "line_number": 41, "usage_type": "call"}, {"api_name": "atom.api.Value", "line_number": 42, "usage_type": "call"}, {"api_name": "exopy_hqc_legacy.instruments.drivers.dll.SA124B.saGetSerialNumberList", "line_number": 62, "usage_type": "call"}, {"api_name": "exopy_hqc_legacy.instruments.drivers.dll.SA124B", "line_number": 62, "usage_type": "name"}, {"api_name": "exopy_hqc_legacy.instruments.drivers.dll.SA124B.SA124B", "line_number": 65, "usage_type": "call"}, {"api_name": "exopy_hqc_legacy.instruments.drivers.dll.SA124B", "line_number": 65, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "71176628532", "text": "#!/usr/bin/python\n\n# Original code: https://goo.gl/9MJAZS\n# Modified for preprocessing\n\nimport cv2\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nfrom CyclicLR import CyclicLR\n\nimport random\nfrom sklearn.utils import shuffle\nimport time\n\nrandom.seed(time.time())\nnp.random.seed(int(time.time()))\n\n# Load the data.\ntrain = pd.read_json(\"input/train.json\")\ntest = pd.read_json(\"input/test.json\")\n\n#train = shuffle(train)\n#train = train.reindex(np.random.permutation(train.index))\n\n# High-pass filtering & Gamma correction\ndef getHigh(img, length=1):\n f = np.fft.fft2(img)\n fshift = np.fft.fftshift(f)\n\n rows = np.size(img, 0) # taking the size of the image\n cols = np.size(img, 1)\n crow, ccol = int(rows / 2), int(cols / 2)\n\n fshift[crow - length:crow + length, ccol - length:ccol + length] = 0\n f_ishift = np.fft.ifftshift(fshift)\n\n img_back = np.power(np.abs(np.fft.ifft2(f_ishift)), 2) ## shift for centering 0.0 (x,y)\n\n img_back = (img_back - np.mean(img_back)) / np.std(img_back)\n img_back = (img_back - np.min(img_back)) / (np.max(img_back) - np.min(img_back))\n\n return img_back\n\n\n# Generate the training data\ntrain.inc_angle = train.inc_angle.replace('na', 0)\nidx_meaningful = np.where(train.inc_angle > 0)\n\n# Create 3 bands having HH, HV and avg of both\nX_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train[\"band_1\"]])\nX_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train[\"band_2\"]])\nX_band_3 = np.array([np.multiply(abs(X_band_1[i,:,:]), abs(X_band_2[i,:,:])) for i in range(len(X_band_1))])\n\n# Apply GetHigh\nX_band_1 = np.array([getHigh(X_band_1[i,:,:]) for i in range(len(X_band_1))])\nX_band_2 = np.array([getHigh(X_band_2[i,:,:]) for i in range(len(X_band_1))])\nX_band_3 = np.array([getHigh(X_band_3[i,:,:]) for i in range(len(X_band_1))])\n\nX_train = np.concatenate(\n [X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], X_band_3[:, :, :, np.newaxis]],\n #[X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], ((X_band_1+X_band_2)/2)[:, :, :, np.newaxis]],\n axis=-1)\n\nX_train = X_train[idx_meaningful[0], ...]\n\n# Flips\ndef get_more_images(imgs):\n more_images = []\n vert_flip_imgs = []\n hori_flip_imgs = []\n\n for i in range(0, imgs.shape[0]):\n a = imgs[i, :, :, 0]\n b = imgs[i, :, :, 1]\n c = imgs[i, :, :, 2]\n\n av = cv2.flip(a, 1)\n ah = cv2.flip(a, 0)\n bv = cv2.flip(b, 1)\n bh = cv2.flip(b, 0)\n cv = cv2.flip(c, 1)\n ch = cv2.flip(c, 0)\n\n vert_flip_imgs.append(np.dstack((av, bv, cv)))\n hori_flip_imgs.append(np.dstack((ah, bh, ch)))\n\n v = np.array(vert_flip_imgs)\n h = np.array(hori_flip_imgs)\n\n more_images = np.concatenate((imgs, v, h))\n\n return more_images\n\n\n# Import Keras.\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, Callback, EarlyStopping\n\n# define our model\ndef getModel():\n dropoutRate = 0.2\n # Building the model\n gmodel = Sequential()\n # Conv Layer 1\n gmodel.add(Conv2D(64, kernel_size=(3, 3), input_shape=(75, 75, 3)))\n gmodel.add(BatchNormalization(axis=-1))\n gmodel.add(Activation('relu'))\n gmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n gmodel.add(Dropout(dropoutRate))\n\n # Conv Layer 2\n gmodel.add(Conv2D(128, kernel_size=(3, 3)))\n gmodel.add(BatchNormalization(axis=-1))\n gmodel.add(Activation('relu'))\n gmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n gmodel.add(Dropout(dropoutRate))\n\n # Conv Layer 3\n gmodel.add(Conv2D(128, kernel_size=(3, 3)))\n gmodel.add(BatchNormalization(axis=-1))\n gmodel.add(Activation('relu'))\n gmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n gmodel.add(Dropout(dropoutRate))\n\n # Conv Layer 4\n gmodel.add(Conv2D(64, kernel_size=(3, 3)))\n gmodel.add(BatchNormalization(axis=-1))\n gmodel.add(Activation('relu'))\n gmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n gmodel.add(Dropout(dropoutRate))\n\n # Flatten the data for upcoming dense layers\n gmodel.add(Flatten())\n\n # Dense Layers\n gmodel.add(Dense(512))\n gmodel.add(BatchNormalization())\n gmodel.add(Activation('relu'))\n gmodel.add(Dropout(dropoutRate))\n\n # Dense Layer 2\n gmodel.add(Dense(256))\n gmodel.add(BatchNormalization())\n gmodel.add(Activation('relu'))\n gmodel.add(Dropout(dropoutRate))\n\n # Sigmoid Layer\n gmodel.add(Dense(1))\n gmodel.add(Activation('sigmoid'))\n\n mypotim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n gmodel.compile(loss='binary_crossentropy',\n optimizer=mypotim,\n metrics=['accuracy'])\n gmodel.summary()\n return gmodel\n\n\ndef get_callbacks(filepath, patience=2):\n es = EarlyStopping('val_loss', patience=patience, mode=\"min\")\n msave = ModelCheckpoint(filepath, save_best_only=True)\n # step_size = 2-8 x # of training iterations in an epoch = 2 * 113 = 226\n clr = CyclicLR(base_lr=0.001, max_lr=0.006, step_size=230., mode='exp_range',\n gamma=0.99994) # initial step_size = 2000\n return [es, msave, clr]\n\nfile_path = \".model_weights_imgproc.hdf5\"\ncallbacks = get_callbacks(filepath=file_path, patience=5)\n\nY_train = train['is_iceberg']\nY_train = Y_train[idx_meaningful[0]]\n\nX_train_cv, X_valid, Y_train_cv, Y_valid = train_test_split(X_train, Y_train, random_state=int(time.time()), train_size=0.75)\n\nXtr_more = get_more_images(X_train_cv)\nYtr_more = np.concatenate((Y_train_cv, Y_train_cv, Y_train_cv))\n\nimport os\n\ngmodel = getModel()\ngmodel.fit(Xtr_more, Ytr_more, batch_size=32, epochs=50, verbose=1, validation_data=(X_valid, Y_valid),\n callbacks=callbacks)\n# gmodel.fit(X_train_cv, Y_train_cv, batch_size=24, epochs=50, verbose=1, validation_data=(X_valid, Y_valid), callbacks=callbacks)\n# gmodel.fit(Xtr_more, Ytr_more, batch_size=32, epochs=50, verbose=1, callbacks=callbacks, validation_split=0.25)\n\ngmodel.load_weights(filepath=file_path)\nscore = gmodel.evaluate(X_train_cv, Y_train_cv, verbose=1)\n\nprint('Train loss:', score[0])\nprint('Train accuracy:', score[1])\n\nscore = gmodel.evaluate(X_valid, Y_valid, verbose=1)\n\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n", "repo_name": "KIST-Iceberg/Iceberg", "sub_path": "old/learn_imgproc.py", "file_name": "learn_imgproc.py", "file_ext": "py", "file_size_in_byte": 6547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "random.seed", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.fft.fft2", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftshift", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.size", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.fft.ifftshift", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.power", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.fft.ifft2", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cv2.flip", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 125, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 126, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 127, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 128, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 129, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 132, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 133, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 134, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 135, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 139, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 142, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 144, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 148, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 150, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 154, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 155, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 157, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 166, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 167, "usage_type": "call"}, {"api_name": "CyclicLR.CyclicLR", "line_number": 169, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 179, "usage_type": "call"}, {"api_name": "time.time", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "23085768612", "text": "import argparse\nimport os\nimport itertools\nimport csv\n\nfrom gpapi.googleplay import GooglePlayAPI, RequestError\nfrom constants import Constants\n\ndef prep_app_list(server, apps_csv):\n # Keys for app data\n top_free_cat = 'apps_topselling_free'\n key_agg_rat = 'aggregateRating'\n\n app_limit = 10\n categories = server.browse()[Constants.key_category]\n\n with open(apps_csv, \"w\", newline=\"\") as writefile:\n # Prepare csv columns\n writer = csv.writer(writefile)\n writer.writerow([Constants.key_pkg_name,\n Constants.key_app_name,\n Constants.key_developer,\n Constants.key_category,\n Constants.key_rating,\n Constants.key_num_rat,\n Constants.key_num_down])\n\n # Pick 10 apps top free apps from each category\n for c in categories:\n category = c[\"unknownCategoryContainer\"][\"categoryIdContainer\"][\"categoryId\"]\n print(\"Fetching apps for category %s\" % category)\n\n top_free = server.list(category, top_free_cat, app_limit)\n for app in top_free:\n print(\"\\t\" + app[Constants.key_pkg_name])\n\n agg_rating = app[key_agg_rat]\n downloads_line = app['details']['appDetails'][Constants.key_num_down]\n downloads = int(downloads_line.split('+')[0].replace(\",\", \"\"))\n\n row = [app[Constants.key_pkg_name],\n app[Constants.key_app_name],\n app[Constants.key_developer],\n category,\n agg_rating.get(Constants.key_rating, None),\n agg_rating.get(Constants.key_num_rat, None),\n downloads]\n writer.writerow(row)\n\ndef download_apks(server, apps_file, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n with open(apps_file, \"r\", newline=\"\") as readfile:\n # Get headers\n reader = csv.reader(readfile, delimiter=',')\n header_row = next(reader)\n pkg_name_idx = header_row.index(Constants.key_pkg_name)\n\n for row in reader:\n pkg_name = row[pkg_name_idx]\n\n # Download\n print('Attempting to download %s' % pkg_name)\n apk_file_path = output_dir + \"/\" + pkg_name + '.apk'\n if os.path.isfile(apk_file_path):\n print('\\tSkipping - already exists')\n continue\n fl = server.download(pkg_name)\n with open(apk_file_path, 'wb') as apk_file:\n for chunk in fl.get('file').get('data'):\n apk_file.write(chunk)\n print('\\tDownload successful')\n\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser(description=\"If you don't use the option parameter `output_dir`,\" +\n \" then this script creates a CSV file with a list of top 10 free apps from each\" +\n \" Google Play category. Otherwise, it downloads the APKs corresponding to apps in the\" +\n \" provided `apps_csv` parameter.\")\n ap.add_argument('gsf_id', type=int, help=\"gsfId from the prep_token_id script\")\n ap.add_argument('auth_sub_token', help=\"authSubToken from the prep_token_id script\")\n ap.add_argument('apps_csv', help='CSV file to write/read app data to/from')\n ap.add_argument('--output_dir',\n help='Directory to which you want to save APKs corresponding to package names' +\n ' in `apps_csv`. Use this argument when ready to download APKs.')\n args = ap.parse_args()\n\n server = GooglePlayAPI(\"en_US\", \"UTC\")\n server.login(None, None, args.gsf_id, args.auth_sub_token)\n\n if not args.output_dir:\n prep_app_list(server, args.apps_csv)\n else:\n download_apks(server, args.apps_csv, args.output_dir)\n\n\n ", "repo_name": "nshuba/download_apks", "sub_path": "prep_apks.py", "file_name": "prep_apks.py", "file_ext": "py", "file_size_in_byte": 3951, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "constants.Constants.key_category", "line_number": 15, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 15, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 19, "usage_type": "call"}, {"api_name": "constants.Constants.key_pkg_name", "line_number": 20, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 20, "usage_type": "name"}, {"api_name": "constants.Constants.key_app_name", "line_number": 21, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 21, "usage_type": "name"}, {"api_name": "constants.Constants.key_developer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 22, "usage_type": "name"}, {"api_name": "constants.Constants.key_category", "line_number": 23, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 23, "usage_type": "name"}, {"api_name": "constants.Constants.key_rating", "line_number": 24, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 24, "usage_type": "name"}, {"api_name": "constants.Constants.key_num_rat", "line_number": 25, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 25, "usage_type": "name"}, {"api_name": "constants.Constants.key_num_down", "line_number": 26, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 26, "usage_type": "name"}, {"api_name": "constants.Constants.key_pkg_name", "line_number": 35, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 35, "usage_type": "name"}, {"api_name": "constants.Constants.key_num_down", "line_number": 38, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 38, "usage_type": "name"}, {"api_name": "constants.Constants.key_pkg_name", "line_number": 41, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 41, "usage_type": "name"}, {"api_name": "constants.Constants.key_app_name", "line_number": 42, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 42, "usage_type": "name"}, {"api_name": "constants.Constants.key_developer", "line_number": 43, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 43, "usage_type": "name"}, {"api_name": "constants.Constants.key_rating", "line_number": 45, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 45, "usage_type": "name"}, {"api_name": "constants.Constants.key_num_rat", "line_number": 46, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 56, "usage_type": "call"}, {"api_name": "constants.Constants.key_pkg_name", "line_number": 58, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 77, "usage_type": "call"}, {"api_name": "gpapi.googleplay.GooglePlayAPI", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "70529806452", "text": "\"\"\"Defines the class to manage bullet fired from the ship\"\"\"\n\nimport pygame\nfrom pygame.sprite import Sprite\n\n\nclass Bullet(Sprite):\n ''' a class to manage bullet fired from the ship'''\n\n def __init__(self, ai_game):\n ''' create a buller obkect at the ship's current position'''\n super(). __init__()\n self.screen = ai_game.screen\n self.settings = ai_game.settings\n self.color = ai_game.settings.bullet_color\n\n # create a bullet rect at (0,0) and set correct position\n self.rect = pygame.Rect(\n 0, 0, self.settings.bullet_width, self.settings.bullet_height)\n self.rect.midtop = ai_game.ship.rect.midtop\n\n # store the bullets pos as decimal\n self.y = float(self.rect.y)\n\n def update(self):\n \"\"\"updates the bullets position to show upward movement\"\"\"\n\n self.y -= self.settings.bullet_speed\n self.rect.y = self.y\n\n def draw_bullet(self):\n \"\"\"draws the bullet to the screen\"\"\"\n\n pygame.draw.rect(self.screen, self.color, self.rect)\n", "repo_name": "dead-fool/Alien-Invasion-Game", "sub_path": "bullet.py", "file_name": "bullet.py", "file_ext": "py", "file_size_in_byte": 1055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 7, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "42647125808", "text": "import math\nimport os\nimport random\nimport re\nfrom collections import namedtuple\nfrom itertools import combinations\n\nimport numpy as np\n\nfrom .pr2_never_collisions import NEVER_COLLISIONS\nfrom .utils import multiply, get_link_pose, set_joint_position, set_joint_positions, get_joint_positions, get_min_limit, get_max_limit, quat_from_euler, read_pickle, set_pose, \\\n get_pose, euler_from_quat, link_from_name, point_from_pose, invert, Pose, \\\n unit_pose, joints_from_names, PoseSaver, get_aabb, get_joint_limits, ConfSaver, get_bodies, create_mesh, remove_body, \\\n unit_from_theta, violates_limit, \\\n violates_limits, add_line, get_body_name, get_num_joints, approximate_as_cylinder, \\\n approximate_as_prism, unit_quat, unit_point, angle_between, quat_from_pose, compute_jacobian, \\\n movable_from_joints, quat_from_axis_angle, LockRenderer, Euler, get_links, get_link_name, \\\n get_extend_fn, get_moving_links, link_pairs_collision, get_link_subtree, \\\n clone_body, get_all_links, pairwise_collision, tform_point, get_camera_matrix, ray_from_pixel, pixel_from_ray, dimensions_from_camera_matrix, \\\n wrap_angle, TRANSPARENT, PI, OOBB, pixel_from_point, set_all_color, wait_if_gui\n\n# TODO: restrict number of pr2 rotations to prevent from wrapping too many times\n\nLEFT_ARM = 'left'\nRIGHT_ARM = 'right'\nARM_NAMES = (LEFT_ARM, RIGHT_ARM)\n\ndef side_from_arm(arm):\n side = arm.split('_')[0]\n assert side in ARM_NAMES\n return side\n\nside_from_gripper = side_from_arm\n\ndef arm_from_arm(arm): # TODO: deprecate\n side = side_from_arm(arm)\n assert (side in ARM_NAMES)\n return '{}_arm'.format(side)\n\narm_from_side = arm_from_arm\n\ndef gripper_from_arm(arm): # TODO: deprecate\n side = side_from_arm(arm)\n assert (side in ARM_NAMES)\n return '{}_gripper'.format(side)\n\ngripper_from_side = gripper_from_arm\n\n#####################################\n\nBASE_JOINTS = ['x', 'y', 'theta']\n\nPR2_GROUPS = {\n 'base': BASE_JOINTS,\n 'torso': ['torso_lift_joint'],\n 'head': ['head_pan_joint', 'head_tilt_joint'],\n arm_from_arm(LEFT_ARM): ['l_shoulder_pan_joint', 'l_shoulder_lift_joint', 'l_upper_arm_roll_joint',\n 'l_elbow_flex_joint', 'l_forearm_roll_joint', 'l_wrist_flex_joint', 'l_wrist_roll_joint'],\n arm_from_arm(RIGHT_ARM): ['r_shoulder_pan_joint', 'r_shoulder_lift_joint', 'r_upper_arm_roll_joint', \n 'r_elbow_flex_joint', 'r_forearm_roll_joint', 'r_wrist_flex_joint', 'r_wrist_roll_joint'],\n gripper_from_arm(LEFT_ARM): ['l_gripper_l_finger_joint', 'l_gripper_r_finger_joint',\n 'l_gripper_l_finger_tip_joint', 'l_gripper_r_finger_tip_joint'],\n gripper_from_arm(RIGHT_ARM): ['r_gripper_l_finger_joint', 'r_gripper_r_finger_joint',\n 'r_gripper_l_finger_tip_joint', 'r_gripper_r_finger_tip_joint'],\n # r_gripper_joint & l_gripper_joint are not mimicked\n}\n\nHEAD_LINK_NAME = 'high_def_optical_frame' # high_def_optical_frame | high_def_frame | wide_stereo_l_stereo_camera_frame\n# kinect - 'head_mount_kinect_rgb_optical_frame' | 'head_mount_kinect_rgb_link'\n\nPR2_TOOL_FRAMES = {\n LEFT_ARM: 'l_gripper_tool_frame', # l_gripper_palm_link | l_gripper_tool_frame\n RIGHT_ARM: 'r_gripper_tool_frame', # r_gripper_palm_link | r_gripper_tool_frame\n 'head': HEAD_LINK_NAME,\n}\n\n# TODO: deprecate to use the parent of the gripper joints\nPR2_GRIPPER_ROOTS = {\n LEFT_ARM: 'l_gripper_palm_link',\n RIGHT_ARM: 'r_gripper_palm_link',\n}\n\nPR2_BASE_LINK = 'base_footprint'\n\n# Arm tool poses\n#TOOL_POSE = ([0.18, 0., 0.], [0., 0.70710678, 0., 0.70710678]) # l_gripper_palm_link\nTOOL_POSE = Pose(euler=Euler(pitch=np.pi/2)) # l_gripper_tool_frame (+x out of gripper arm)\n#TOOL_DIRECTION = [0., 0., 1.]\n\n#####################################\n\n# Special configurations\n\nTOP_HOLDING_LEFT_ARM = [0.67717021, -0.34313199, 1.2, -1.46688405, 1.24223229, -1.95442826, 2.22254125]\nSIDE_HOLDING_LEFT_ARM = [0.39277395, 0.33330058, 0., -1.52238431, 2.72170996, -1.21946936, -2.98914779]\nREST_LEFT_ARM = [2.13539289, 1.29629967, 3.74999698, -0.15000005, 10000., -0.10000004, 10000.]\nWIDE_LEFT_ARM = [1.5806603449288885, -0.14239066980481405, 1.4484623937179126, -1.4851759349218694, 1.3911839347271555,\n -1.6531320011389408, -2.978586584568441]\nCENTER_LEFT_ARM = [-0.07133691252641006, -0.052973836083405494, 1.5741805775919033, -1.4481146328076862,\n 1.571782540186805, -1.4891468812835686, -9.413338322697955]\nSTRAIGHT_LEFT_ARM = np.zeros(7)\nCOMPACT_LEFT_ARM = [PI/4, 0., PI/2, -5*PI/8, PI/2, -PI/2, 5*PI/8] # TODO: generate programmatically\n\n#COMPACT_LEFT_ARM = [PI/4, 0., PI/2, -5*PI/8, -PI/2, -PI/2, 3*PI/8] # More inward\n#COMPACT_LEFT_ARM = [1*PI/8, 0., PI/2, -4*PI/8, -PI/2, -PI/2, 3*PI/8-PI/2] # Most inward\n\nCLEAR_LEFT_ARM = [PI/2, 0., PI/2, -PI/2, PI/2, -PI/2, 0.]\n# WIDE_RIGHT_ARM = [-1.3175723551150083, -0.09536552225976803, -1.396727055561703, -1.4433371993320296,\n# -1.5334243909312468, -1.7298129320065025, 6.230244924007009]\n\nPR2_LEFT_CARRY_CONFS = {\n 'top': TOP_HOLDING_LEFT_ARM,\n 'side': SIDE_HOLDING_LEFT_ARM,\n}\n\n#####################################\n\nPR2_URDF = \"models/pr2_description/pr2.urdf\" # 87 joints\n#PR2_URDF = \"models/pr2_description/pr2_hpn.urdf\"\n#PR2_URDF = \"models/pr2_description/pr2_kinect.urdf\"\nDRAKE_PR2_URDF = \"models/drake/pr2_description/urdf/pr2_simplified.urdf\" # 82 joints\n\ndef is_drake_pr2(robot): # 87\n return (get_body_name(robot) == 'pr2') and (get_num_joints(robot) == 82)\n\n#####################################\n\n# TODO: for when the PR2 is copied and loses it's joint names\n# PR2_JOINT_NAMES = []\n#\n# def set_pr2_joint_names(pr2):\n# for joint in get_joints(pr2):\n# PR2_JOINT_NAMES.append(joint)\n#\n# def get_pr2_joints(joint_names):\n# joint_from_name = dict(zip(PR2_JOINT_NAMES, range(len(PR2_JOINT_NAMES))))\n# return [joint_from_name[name] for name in joint_names]\n\n#####################################\n\ndef get_base_pose(pr2):\n return get_link_pose(pr2, link_from_name(pr2, PR2_BASE_LINK))\n\ndef rightarm_from_leftarm(config):\n right_from_left = np.array([-1, 1, -1, 1, -1, 1, -1])\n return config * right_from_left\n\ndef arm_conf(arm, left_config):\n side = side_from_arm(arm)\n if side == LEFT_ARM:\n return left_config\n elif side == RIGHT_ARM:\n return rightarm_from_leftarm(left_config)\n raise ValueError(side)\n\ndef get_carry_conf(arm, grasp_type):\n return arm_conf(arm, PR2_LEFT_CARRY_CONFS[grasp_type])\n\ndef get_other_arm(arm):\n for other_arm in ARM_NAMES:\n if other_arm != arm:\n return other_arm\n raise ValueError(arm)\n\n#####################################\n\ndef get_disabled_collisions(pr2):\n #disabled_names = PR2_ADJACENT_LINKS\n #disabled_names = PR2_DISABLED_COLLISIONS\n disabled_names = NEVER_COLLISIONS\n #disabled_names = PR2_DISABLED_COLLISIONS + NEVER_COLLISIONS\n link_mapping = {get_link_name(pr2, link): link for link in get_links(pr2)}\n return {(link_mapping[name1], link_mapping[name2])\n for name1, name2 in disabled_names if (name1 in link_mapping) and (name2 in link_mapping)}\n\n\ndef load_dae_collisions():\n # pr2-beta-static.dae: link 0 = base_footprint\n # pybullet: link -1 = base_footprint\n dae_file = 'models/pr2_description/pr2-beta-static.dae'\n dae_string = open(dae_file).read()\n link_regex = r'<\\s*link\\s+sid=\"(\\w+)\"\\s+name=\"(\\w+)\"\\s*>'\n link_mapping = dict(re.findall(link_regex, dae_string))\n ignore_regex = r'<\\s*ignore_link_pair\\s+link0=\"kmodel1/(\\w+)\"\\s+link1=\"kmodel1/(\\w+)\"\\s*/>'\n disabled_collisions = []\n for link1, link2 in re.findall(ignore_regex, dae_string):\n disabled_collisions.append((link_mapping[link1], link_mapping[link2]))\n return disabled_collisions\n\n\ndef load_srdf_collisions():\n srdf_file = 'models/pr2_description/pr2.srdf'\n srdf_string = open(srdf_file).read()\n regex = r'<\\s*disable_collisions\\s+link1=\"(\\w+)\"\\s+link2=\"(\\w+)\"\\s+reason=\"(\\w+)\"\\s*/>'\n disabled_collisions = []\n for link1, link2, reason in re.findall(regex, srdf_string):\n if reason == 'Never':\n disabled_collisions.append((link1, link2))\n return disabled_collisions\n\n#####################################\n\ndef get_groups():\n return sorted(PR2_GROUPS)\n\ndef get_group_joints(robot, group):\n return joints_from_names(robot, PR2_GROUPS[group])\n\ndef get_group_conf(robot, group):\n return get_joint_positions(robot, get_group_joints(robot, group))\n\n#get_group_position = get_group_conf\n\ndef set_group_conf(robot, group, positions):\n set_joint_positions(robot, get_group_joints(robot, group), positions)\n\ndef set_group_positions(robot, group_positions):\n for group, positions in group_positions.items():\n set_group_conf(robot, group, positions)\n\ndef get_group_positions(robot):\n return {group: get_group_conf(robot, group) for group in get_groups()}\n\n#get_group_confs = get_group_positions\n\n#####################################\n\n# End-effectors\n\ndef get_arm_joints(robot, arm):\n return get_group_joints(robot, arm_from_arm(arm))\n\n\ndef get_torso_arm_joints(robot, arm):\n return joints_from_names(robot, PR2_GROUPS['torso'] + PR2_GROUPS[arm_from_arm(arm)])\n\n\n#def get_arm_conf(robot, arm):\n# return get_joint_positions(robot, get_arm_joints(robot, arm))\n\n\ndef set_arm_conf(robot, arm, conf):\n set_joint_positions(robot, get_arm_joints(robot, arm), conf)\n\n\ndef get_gripper_link(robot, arm):\n assert arm in ARM_NAMES\n return link_from_name(robot, PR2_TOOL_FRAMES[arm])\n\n\n# def get_gripper_pose(robot):\n# # world_from_gripper * gripper_from_tool * tool_from_object = world_from_object\n# pose = multiply(get_link_pose(robot, link_from_name(robot, LEFT_ARM_LINK)), TOOL_POSE)\n# #pose = get_link_pose(robot, link_from_name(robot, LEFT_TOOL_NAME))\n# return pose\n\n\ndef get_gripper_joints(robot, arm):\n return get_group_joints(robot, gripper_from_arm(arm))\n\n\ndef set_gripper_position(robot, arm, position):\n gripper_joints = get_gripper_joints(robot, arm)\n set_joint_positions(robot, gripper_joints, [position] * len(gripper_joints))\n\n\ndef open_arm(robot, arm): # These are mirrored on the pr2\n for joint in get_gripper_joints(robot, arm):\n set_joint_position(robot, joint, get_max_limit(robot, joint))\n\n\ndef close_arm(robot, arm):\n for joint in get_gripper_joints(robot, arm):\n set_joint_position(robot, joint, get_min_limit(robot, joint))\n\n# TODO: use these names\nopen_gripper = open_arm\nclose_gripper = close_arm\n\n#####################################\n\n# Box grasps\n\n#GRASP_LENGTH = 0.04\nGRASP_LENGTH = 0.\n#GRASP_LENGTH = -0.01\n\n#MAX_GRASP_WIDTH = 0.07\nMAX_GRASP_WIDTH = np.inf\n\nSIDE_HEIGHT_OFFSET = 0.03 # z distance from top of object\n\ndef get_top_grasps(body, under=False, tool_pose=TOOL_POSE, body_pose=unit_pose(),\n max_width=MAX_GRASP_WIDTH, grasp_length=GRASP_LENGTH):\n # TODO: rename the box grasps\n center, (w, l, h) = approximate_as_prism(body, body_pose=body_pose)\n reflect_z = Pose(euler=[0, math.pi, 0])\n translate_z = Pose(point=[0, 0, h / 2 - grasp_length])\n translate_center = Pose(point=point_from_pose(body_pose)-center)\n grasps = []\n if w <= max_width:\n for i in range(1 + under):\n rotate_z = Pose(euler=[0, 0, math.pi / 2 + i * math.pi])\n grasps += [multiply(tool_pose, translate_z, rotate_z,\n reflect_z, translate_center, body_pose)]\n if l <= max_width:\n for i in range(1 + under):\n rotate_z = Pose(euler=[0, 0, i * math.pi])\n grasps += [multiply(tool_pose, translate_z, rotate_z,\n reflect_z, translate_center, body_pose)]\n return grasps\n\ndef get_side_grasps(body, under=False, tool_pose=TOOL_POSE, body_pose=unit_pose(),\n max_width=MAX_GRASP_WIDTH, grasp_length=GRASP_LENGTH, top_offset=SIDE_HEIGHT_OFFSET):\n # TODO: compute bounding box width wrt tool frame\n center, (w, l, h) = approximate_as_prism(body, body_pose=body_pose)\n translate_center = Pose(point=point_from_pose(body_pose)-center)\n grasps = []\n #x_offset = 0\n x_offset = h/2 - top_offset\n for j in range(1 + under):\n swap_xz = Pose(euler=[0, -math.pi / 2 + j * math.pi, 0])\n if w <= max_width:\n translate_z = Pose(point=[x_offset, 0, l / 2 - grasp_length])\n for i in range(2):\n rotate_z = Pose(euler=[math.pi / 2 + i * math.pi, 0, 0])\n grasps += [multiply(tool_pose, translate_z, rotate_z, swap_xz,\n translate_center, body_pose)] # , np.array([w])\n if l <= max_width:\n translate_z = Pose(point=[x_offset, 0, w / 2 - grasp_length])\n for i in range(2):\n rotate_z = Pose(euler=[i * math.pi, 0, 0])\n grasps += [multiply(tool_pose, translate_z, rotate_z, swap_xz,\n translate_center, body_pose)] # , np.array([l])\n return grasps\n\n#####################################\n\n# Cylinder grasps\n\ndef get_top_cylinder_grasps(body, tool_pose=TOOL_POSE, body_pose=unit_pose(),\n max_width=MAX_GRASP_WIDTH, grasp_length=GRASP_LENGTH):\n # Apply transformations right to left on object pose\n center, (diameter, height) = approximate_as_cylinder(body, body_pose=body_pose)\n reflect_z = Pose(euler=[0, math.pi, 0])\n translate_z = Pose(point=[0, 0, height / 2 - grasp_length])\n translate_center = Pose(point=point_from_pose(body_pose)-center)\n if max_width < diameter:\n return\n while True:\n theta = random.uniform(0, 2*np.pi)\n rotate_z = Pose(euler=[0, 0, theta])\n yield multiply(tool_pose, translate_z, rotate_z,\n reflect_z, translate_center, body_pose)\n\ndef get_side_cylinder_grasps(body, under=False, tool_pose=TOOL_POSE, body_pose=unit_pose(),\n max_width=MAX_GRASP_WIDTH, grasp_length=GRASP_LENGTH,\n top_offset=SIDE_HEIGHT_OFFSET):\n center, (diameter, height) = approximate_as_cylinder(body, body_pose=body_pose)\n translate_center = Pose(point_from_pose(body_pose)-center)\n #x_offset = 0\n x_offset = height/2 - top_offset\n if max_width < diameter:\n return\n while True:\n theta = random.uniform(0, 2*np.pi)\n translate_rotate = ([x_offset, 0, diameter / 2 - grasp_length], quat_from_euler([theta, 0, 0]))\n for j in range(1 + under):\n swap_xz = Pose(euler=[0, -math.pi / 2 + j * math.pi, 0])\n yield multiply(tool_pose, translate_rotate, swap_xz, translate_center, body_pose)\n\ndef get_edge_cylinder_grasps(body, under=False, tool_pose=TOOL_POSE, body_pose=unit_pose(),\n grasp_length=GRASP_LENGTH):\n center, (diameter, height) = approximate_as_cylinder(body, body_pose=body_pose)\n translate_yz = Pose(point=[0, diameter/2, height/2 - grasp_length])\n reflect_y = Pose(euler=[0, math.pi, 0])\n translate_center = Pose(point=point_from_pose(body_pose)-center)\n while True:\n theta = random.uniform(0, 2*np.pi)\n rotate_z = Pose(euler=[0, 0, theta])\n for i in range(1 + under):\n rotate_under = Pose(euler=[0, 0, i * math.pi])\n yield multiply(tool_pose, rotate_under, translate_yz, rotate_z,\n reflect_y, translate_center, body_pose)\n\n#####################################\n\n# Cylinder pushes\n\ndef get_cylinder_push(body, theta, under=False, body_quat=unit_quat(),\n tilt=0., base_offset=0.02, side_offset=0.03):\n body_pose = (unit_point(), body_quat)\n center, (diameter, height) = approximate_as_cylinder(body, body_pose=body_pose)\n translate_center = Pose(point=point_from_pose(body_pose)-center)\n tilt_gripper = Pose(euler=Euler(pitch=tilt))\n translate_x = Pose(point=[-diameter / 2 - side_offset, 0, 0]) # Compute as a function of theta\n translate_z = Pose(point=[0, 0, -height / 2 + base_offset])\n rotate_x = Pose(euler=Euler(yaw=theta))\n reflect_z = Pose(euler=Euler(pitch=math.pi))\n grasps = []\n for i in range(1 + under):\n rotate_z = Pose(euler=Euler(yaw=i * math.pi))\n grasps.append(multiply(tilt_gripper, translate_x, translate_z, rotate_x, rotate_z,\n reflect_z, translate_center, body_pose))\n return grasps\n\n#####################################\n\n# Button presses\n\nPRESS_OFFSET = 0.02\n\ndef get_x_presses(body, max_orientations=1, body_pose=unit_pose(), top_offset=PRESS_OFFSET):\n # gripper_from_object\n # TODO: update\n center, (w, _, h) = approximate_as_prism(body, body_pose=body_pose)\n translate_center = Pose(-center)\n press_poses = []\n for j in range(max_orientations):\n swap_xz = Pose(euler=[0, -math.pi / 2 + j * math.pi, 0])\n translate = Pose(point=[0, 0, w / 2 + top_offset])\n press_poses += [multiply(TOOL_POSE, translate, swap_xz, translate_center, body_pose)]\n return press_poses\n\ndef get_top_presses(body, tool_pose=TOOL_POSE, body_pose=unit_pose(), top_offset=PRESS_OFFSET, **kwargs):\n center, (_, height) = approximate_as_cylinder(body, body_pose=body_pose, **kwargs)\n reflect_z = Pose(euler=[0, math.pi, 0])\n translate_z = Pose(point=[0, 0, height / 2 + top_offset])\n translate_center = Pose(point=point_from_pose(body_pose)-center)\n while True:\n theta = random.uniform(0, 2*np.pi)\n rotate_z = Pose(euler=[0, 0, theta])\n yield multiply(tool_pose, translate_z, rotate_z,\n reflect_z, translate_center, body_pose)\n\nGET_GRASPS = {\n 'top': get_top_grasps,\n 'side': get_side_grasps,\n # 'press': get_x_presses,\n}\n# TODO: include approach/carry info\n\n#####################################\n\n# Inverse reachability\n\nDATABASES_DIR = '../databases'\nIR_FILENAME = '{}_{}_ir.pickle'\nIR_CACHE = {}\n\ndef get_database_file(filename):\n directory = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(directory, DATABASES_DIR, filename)\n\n\ndef load_inverse_reachability(arm, grasp_type):\n key = (arm, grasp_type)\n if key not in IR_CACHE:\n filename = IR_FILENAME.format(grasp_type, arm)\n path = get_database_file(filename)\n IR_CACHE[key] = read_pickle(path)['gripper_from_base']\n return IR_CACHE[key]\n\n\ndef learned_forward_generator(robot, base_pose, arm, grasp_type):\n gripper_from_base_list = list(load_inverse_reachability(arm, grasp_type))\n random.shuffle(gripper_from_base_list)\n for gripper_from_base in gripper_from_base_list:\n yield multiply(base_pose, invert(gripper_from_base))\n\n\ndef learned_pose_generator(robot, gripper_pose, arm, grasp_type):\n # TODO: record collisions with the reachability database\n gripper_from_base_list = load_inverse_reachability(arm, grasp_type)\n random.shuffle(gripper_from_base_list)\n #handles = []\n for gripper_from_base in gripper_from_base_list:\n base_point, base_quat = multiply(gripper_pose, gripper_from_base)\n x, y, _ = base_point\n _, _, theta = euler_from_quat(base_quat)\n base_values = (x, y, theta)\n #handles.extend(draw_point(np.array([x, y, -0.1]), color=(1, 0, 0), size=0.05))\n #set_base_values(robot, base_values)\n #yield get_pose(robot)\n yield base_values\n\n#####################################\n\n# Camera\n\n# TODO: this is only for high_def_optical_frame\nMAX_VISUAL_DISTANCE = 5.0\nMAX_KINECT_DISTANCE = 2.5\n\nPR2_CAMERA_MATRIX = get_camera_matrix(\n width=640, height=480, fx=772.55, fy=772.5)\n\ndef get_pr2_view_section(z, camera_matrix=None):\n if camera_matrix is None:\n camera_matrix = PR2_CAMERA_MATRIX\n width, height = dimensions_from_camera_matrix(camera_matrix)\n pixels = [(0, 0), (width, height)]\n return [z*ray_from_pixel(camera_matrix, p) for p in pixels]\n\ndef get_pr2_field_of_view(**kwargs):\n # TODO: deprecate\n z = 1\n view_lower, view_upper = get_pr2_view_section(z=z, **kwargs)\n horizontal = angle_between([view_lower[0], 0, z],\n [view_upper[0], 0, z]) # 0.7853966439794928\n vertical = angle_between([0, view_lower[1], z],\n [0, view_upper[1], z]) # 0.6024511557247721\n return horizontal, vertical\n\ndef is_visible_point(camera_matrix, depth, point_world, camera_pose=unit_pose()):\n point_camera = tform_point(invert(camera_pose), point_world)\n if not (0 <= point_camera[2] < depth):\n return False\n pixel = pixel_from_point(camera_matrix, point_camera)\n return pixel is not None\n\ndef is_visible_aabb(aabb, **kwargs):\n # TODO: do intersect as well for identifying new obstacles\n body_lower, body_upper = aabb\n z = body_lower[2]\n if z < 0:\n return False\n view_lower, view_upper = get_pr2_view_section(z, **kwargs)\n # TODO: bounding box methods?\n return not (np.any(body_lower[:2] < view_lower[:2]) or\n np.any(view_upper[:2] < body_upper[:2]))\n\ndef support_from_aabb(aabb, near=True):\n lower, upper = aabb\n min_x, min_y, min_z = lower\n max_x, max_y, max_z = upper\n z = min_z if near else max_z\n return [(min_x, min_y, z), (min_x, max_y, z),\n (max_x, max_y, z), (max_x, min_y, z)]\n\n#####################################\n\ndef cone_vertices_from_base(base):\n return [np.zeros(3)] + base\n\ndef cone_wires_from_support(support, cone_only=True):\n #vertices = cone_vertices_from_base(support)\n # TODO: could obtain from cone_mesh_from_support\n # TODO: could also just return vertices and indices\n apex = np.zeros(3)\n lines = []\n for vertex in support:\n lines.append((apex, vertex))\n if cone_only:\n for i, v2 in enumerate(support):\n v1 = support[i-1]\n lines.append((v1, v2))\n else:\n for v1, v2 in combinations(support, 2):\n lines.append((v1, v2))\n center = np.average(support, axis=0)\n lines.append((apex, center))\n return lines\n\ndef cone_mesh_from_support(support):\n assert(len(support) == 4)\n vertices = cone_vertices_from_base(support)\n faces = [(1, 4, 3), (1, 3, 2)]\n for i in range(len(support)):\n index1 = 1+i\n index2 = 1+(i+1)%len(support)\n faces.append((0, index1, index2))\n return vertices, faces\n\ndef get_viewcone_base(depth=MAX_VISUAL_DISTANCE, camera_matrix=None):\n if camera_matrix is None:\n camera_matrix = PR2_CAMERA_MATRIX\n width, height = dimensions_from_camera_matrix(camera_matrix)\n vertices = []\n for pixel in [(0, 0), (width, 0), (width, height), (0, height)]:\n ray = depth * ray_from_pixel(camera_matrix, pixel)\n vertices.append(ray[:3])\n return vertices\n\ndef get_viewcone(depth=MAX_VISUAL_DISTANCE, camera_matrix=None, **kwargs):\n mesh = cone_mesh_from_support(get_viewcone_base(\n depth=depth, camera_matrix=camera_matrix))\n assert (mesh is not None)\n return create_mesh(mesh, **kwargs)\n\ndef attach_viewcone(robot, head_name=HEAD_LINK_NAME, depth=MAX_VISUAL_DISTANCE,\n camera_matrix=None, color=(1, 0, 0), **kwargs):\n # TODO: head_name likely needs to have a visual geometry to attach\n head_link = link_from_name(robot, head_name)\n lines = []\n for v1, v2 in cone_wires_from_support(get_viewcone_base(\n depth=depth, camera_matrix=camera_matrix)):\n if is_optical(head_name):\n rotation = Pose()\n else:\n rotation = Pose(euler=Euler(roll=-np.pi/2, yaw=-np.pi/2)) # Apply in reverse order\n p1 = tform_point(rotation, v1)\n p2 = tform_point(rotation, v2)\n lines.append(add_line(p1, p2, color=color, parent=robot, parent_link=head_link, **kwargs))\n return lines\n\ndef draw_viewcone(pose=Pose(), depth=MAX_VISUAL_DISTANCE,\n camera_matrix=None, color=(1, 0, 0), **kwargs):\n # TODO: unify with attach_viewcone\n lines = []\n for v1, v2 in cone_wires_from_support(get_viewcone_base(\n depth=depth, camera_matrix=camera_matrix)):\n p1 = tform_point(pose, v1)\n p2 = tform_point(pose, v2)\n lines.append(add_line(p1, p2, color=color, **kwargs))\n return lines\n\n#####################################\n\ndef is_optical(link_name):\n return 'optical' in link_name\n\ndef inverse_visibility(pr2, point, head_name=HEAD_LINK_NAME, head_joints=None,\n max_iterations=100, step_size=0.5, tolerance=np.pi*1e-2, verbose=False):\n # https://github.com/PR2/pr2_controllers/blob/kinetic-devel/pr2_head_action/src/pr2_point_frame.cpp\n head_link = link_from_name(pr2, head_name)\n camera_axis = np.array([0, 0, 1]) if is_optical(head_name) else np.array([1, 0, 0])\n if head_joints is None:\n head_joints = joints_from_names(pr2, PR2_GROUPS['head'])\n # TODO: could also set the target orientation for inverse kinematics\n head_conf = np.zeros(len(head_joints))\n with LockRenderer(lock=True):\n with ConfSaver(pr2):\n for iteration in range(max_iterations):\n set_joint_positions(pr2, head_joints, head_conf)\n world_from_head = get_link_pose(pr2, head_link)\n point_head = tform_point(invert(world_from_head), point)\n error_angle = angle_between(camera_axis, point_head)\n if abs(error_angle) <= tolerance:\n break\n normal_head = np.cross(camera_axis, point_head)\n normal_world = tform_point((unit_point(), quat_from_pose(world_from_head)), normal_head)\n correction_quat = quat_from_axis_angle(normal_world, step_size*error_angle)\n correction_euler = euler_from_quat(correction_quat)\n _, angular = compute_jacobian(pr2, head_link)\n correction_conf = np.array([np.dot(angular[mj], correction_euler)\n for mj in movable_from_joints(pr2, head_joints)])\n if verbose:\n print('Iteration: {} | Error: {:.3f} | Correction: {}'.format(\n iteration, error_angle, correction_conf))\n head_conf += correction_conf\n #if debug:\n #wait_if_gui()\n if np.all(correction_conf == 0):\n return None\n else:\n return None\n if violates_limits(pr2, head_joints, head_conf):\n return None\n return head_conf\n\ndef plan_scan_path(pr2, tilt=0):\n head_joints = joints_from_names(pr2, PR2_GROUPS['head'])\n start_conf = get_joint_positions(pr2, head_joints)\n lower_limit, upper_limit = get_joint_limits(pr2, head_joints[0])\n\n first_conf = np.array([lower_limit, tilt])\n second_conf = np.array([upper_limit, tilt])\n if start_conf[0] > 0:\n first_conf, second_conf = second_conf, first_conf\n return [first_conf, second_conf]\n #return [start_conf, first_conf, second_conf]\n #third_conf = np.array([0, tilt])\n #return [start_conf, first_conf, second_conf, third_conf]\n\ndef plan_pause_scan_path(pr2, tilt=0):\n head_joints = joints_from_names(pr2, PR2_GROUPS['head'])\n assert(not violates_limit(pr2, head_joints[1], tilt))\n theta, _ = get_pr2_field_of_view()\n lower_limit, upper_limit = get_joint_limits(pr2, head_joints[0])\n # Add one because half visible on limits\n n = int(np.math.ceil((upper_limit - lower_limit) / theta) + 1)\n epsilon = 1e-3\n return [np.array([pan, tilt]) for pan in np.linspace(lower_limit + epsilon,\n upper_limit - epsilon, n, endpoint=True)]\n\n#####################################\n\nDetection = namedtuple('Detection', ['body', 'distance'])\n\ndef get_view_aabb(body, view_pose, **kwargs):\n with PoseSaver(body):\n body_view = multiply(invert(view_pose), get_pose(body))\n set_pose(body, body_view)\n return get_aabb(body, **kwargs)\n\ndef get_view_oobb(body, view_pose, **kwargs):\n return OOBB(get_view_aabb(body, view_pose, **kwargs), view_pose)\n\ndef get_detection_cone(pr2, body, camera_link=HEAD_LINK_NAME, depth=MAX_VISUAL_DISTANCE, **kwargs):\n head_link = link_from_name(pr2, camera_link)\n body_aabb = get_view_aabb(body, get_link_pose(pr2, head_link))\n lower_z = body_aabb[0][2]\n if depth < lower_z:\n return None, lower_z\n if not is_visible_aabb(body_aabb, **kwargs):\n return None, lower_z\n return cone_mesh_from_support(support_from_aabb(body_aabb)), lower_z\n\ndef get_detections(pr2, p_false_neg=0, camera_link=HEAD_LINK_NAME,\n exclude_links=set(), color=None, **kwargs):\n camera_pose = get_link_pose(pr2, link_from_name(pr2, camera_link))\n detections = []\n for body in get_bodies():\n if (pr2 == body) or (np.random.random() < p_false_neg):\n continue\n mesh, z = get_detection_cone(pr2, body, camera_link=camera_link, **kwargs)\n if mesh is None:\n continue\n cone = create_mesh(mesh, color=color)\n set_pose(cone, camera_pose)\n if not any(pairwise_collision(cone, obst)\n for obst in set(get_bodies()) - {pr2, body, cone}) \\\n and not any(link_pairs_collision(pr2, [link], cone)\n for link in set(get_all_links(pr2)) - exclude_links):\n detections.append(Detection(body, z))\n #wait_if_gui()\n remove_body(cone)\n return detections\n\ndef get_visual_detections(pr2, **kwargs):\n return [body for body, _ in get_detections(pr2, depth=MAX_VISUAL_DISTANCE, **kwargs)]\n\ndef get_kinect_registrations(pr2, **kwargs):\n return [body for body, _ in get_detections(pr2, depth=MAX_KINECT_DISTANCE, **kwargs)]\n\n# TODO: Gaussian on resulting pose\n\n#####################################\n\n# TODO: base motion with stochastic final pose\n\ndef visible_base_generator(robot, target_point, base_range=(1., 1.), theta_range=(0., 0.)):\n while True:\n base_from_target = unit_from_theta(np.random.uniform(0., 2 * np.pi))\n look_distance = np.random.uniform(*base_range)\n base_xy = target_point[:2] - look_distance * base_from_target\n base_theta = np.math.atan2(base_from_target[1], base_from_target[0]) + np.random.uniform(*theta_range)\n base_q = np.append(base_xy, wrap_angle(base_theta))\n yield base_q\n\n\ndef get_base_extend_fn(robot):\n # TODO: rotate such that in field of view of the camera first\n # TODO: plan base movements while checking edge feasibility with camera\n raise NotImplementedError()\n\n#####################################\n\ndef close_until_collision(robot, gripper_joints, bodies=[], open_conf=None, closed_conf=None, num_steps=25, **kwargs):\n if not gripper_joints:\n return None\n if open_conf is None:\n open_conf = [get_max_limit(robot, joint) for joint in gripper_joints]\n if closed_conf is None:\n closed_conf = [get_min_limit(robot, joint) for joint in gripper_joints]\n resolutions = np.abs(np.array(open_conf) - np.array(closed_conf)) / num_steps\n extend_fn = get_extend_fn(robot, gripper_joints, resolutions=resolutions)\n close_path = [open_conf] + list(extend_fn(open_conf, closed_conf))\n collision_links = frozenset(get_moving_links(robot, gripper_joints))\n\n for i, conf in enumerate(close_path):\n set_joint_positions(robot, gripper_joints, conf)\n if any(pairwise_collision((robot, collision_links), body, **kwargs) for body in bodies):\n if i == 0:\n return None\n return close_path[i-1]\n return close_path[-1]\n #return None # False\n\ndef compute_grasp_width(robot, arm, body, grasp_pose, **kwargs):\n tool_link = get_gripper_link(robot, arm)\n tool_pose = get_link_pose(robot, tool_link)\n body_pose = multiply(tool_pose, grasp_pose)\n set_pose(body, body_pose)\n gripper_joints = get_gripper_joints(robot, arm)\n return close_until_collision(robot, gripper_joints, bodies=[body], **kwargs)\n\n\ndef create_gripper(robot, arm, visual=True):\n link_name = PR2_GRIPPER_ROOTS[arm]\n # gripper = load_pybullet(os.path.join(get_data_path(), 'pr2_gripper.urdf'))\n # gripper = load_pybullet(os.path.join(get_models_path(), 'pr2_description/pr2_l_gripper.urdf'), fixed_base=False)\n # pybullet.error: Error receiving visual shape info for the DRAKE_PR2\n links = get_link_subtree(robot, link_from_name(robot, link_name))\n gripper = clone_body(robot, links=links, visual=False, collision=True) # TODO: joint limits\n if not visual:\n set_all_color(robot, TRANSPARENT)\n return gripper\n", "repo_name": "caelan/pybullet-planning", "sub_path": "pybullet_tools/pr2_utils.py", "file_name": "pr2_utils.py", "file_ext": "py", "file_size_in_byte": 32582, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 347, "dataset": "github-code", "pt": "21", "api": [{"api_name": "utils.Pose", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.Euler", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.PI", "line_number": 102, "usage_type": "name"}, {"api_name": "utils.PI", "line_number": 107, "usage_type": "name"}, {"api_name": "utils.get_body_name", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.get_num_joints", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.get_link_pose", "line_number": 142, "usage_type": "call"}, {"api_name": "utils.link_from_name", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 145, "usage_type": "call"}, {"api_name": "pr2_never_collisions.NEVER_COLLISIONS", "line_number": 170, "usage_type": "name"}, {"api_name": "utils.get_link_name", "line_number": 172, "usage_type": "call"}, {"api_name": "utils.get_links", "line_number": 172, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 183, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 186, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 196, "usage_type": "call"}, {"api_name": "utils.joints_from_names", "line_number": 207, "usage_type": "call"}, {"api_name": "utils.get_joint_positions", "line_number": 210, "usage_type": "call"}, {"api_name": "utils.set_joint_positions", "line_number": 215, "usage_type": "call"}, {"api_name": "utils.joints_from_names", "line_number": 235, "usage_type": "call"}, {"api_name": "utils.set_joint_positions", "line_number": 243, "usage_type": "call"}, {"api_name": "utils.link_from_name", "line_number": 248, "usage_type": "call"}, {"api_name": "utils.set_joint_positions", "line_number": 264, "usage_type": "call"}, {"api_name": "utils.set_joint_position", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.get_max_limit", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.set_joint_position", "line_number": 274, "usage_type": "call"}, {"api_name": "utils.get_min_limit", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 289, "usage_type": "attribute"}, {"api_name": "utils.unit_pose", "line_number": 293, "usage_type": "call"}, {"api_name": "utils.approximate_as_prism", "line_number": 296, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 297, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 297, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 298, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 299, "usage_type": "call"}, {"api_name": "utils.point_from_pose", "line_number": 299, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 303, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 303, "usage_type": "attribute"}, {"api_name": "utils.multiply", "line_number": 304, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 308, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 308, "usage_type": "attribute"}, {"api_name": "utils.multiply", "line_number": 309, "usage_type": "call"}, {"api_name": "utils.unit_pose", "line_number": 313, "usage_type": "call"}, {"api_name": "utils.approximate_as_prism", "line_number": 316, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 317, "usage_type": "call"}, {"api_name": "utils.point_from_pose", "line_number": 317, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 322, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 322, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 324, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 326, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 326, "usage_type": "attribute"}, {"api_name": "utils.multiply", "line_number": 327, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 330, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 332, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 332, "usage_type": "attribute"}, {"api_name": "utils.multiply", "line_number": 333, "usage_type": "call"}, {"api_name": "utils.unit_pose", "line_number": 341, "usage_type": "call"}, {"api_name": "utils.approximate_as_cylinder", "line_number": 344, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 345, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 345, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 346, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 347, "usage_type": "call"}, {"api_name": "utils.point_from_pose", "line_number": 347, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 351, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 352, "usage_type": "call"}, {"api_name": "utils.multiply", "line_number": 353, "usage_type": "call"}, {"api_name": "utils.unit_pose", "line_number": 356, "usage_type": "call"}, {"api_name": "utils.approximate_as_cylinder", "line_number": 359, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 360, "usage_type": "call"}, {"api_name": "utils.point_from_pose", "line_number": 360, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 366, "usage_type": "attribute"}, {"api_name": "utils.quat_from_euler", "line_number": 367, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 369, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 369, "usage_type": "attribute"}, {"api_name": "utils.multiply", "line_number": 370, "usage_type": "call"}, {"api_name": "utils.unit_pose", "line_number": 372, "usage_type": "call"}, {"api_name": "utils.approximate_as_cylinder", "line_number": 374, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 375, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 376, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 376, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 377, "usage_type": "call"}, {"api_name": "utils.point_from_pose", "line_number": 377, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 379, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 380, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 382, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 382, "usage_type": "attribute"}, {"api_name": "utils.multiply", "line_number": 383, "usage_type": "call"}, {"api_name": "utils.unit_quat", "line_number": 390, "usage_type": "call"}, {"api_name": "utils.unit_point", "line_number": 392, "usage_type": "call"}, {"api_name": "utils.approximate_as_cylinder", "line_number": 393, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 394, "usage_type": "call"}, {"api_name": "utils.point_from_pose", "line_number": 394, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 395, "usage_type": "call"}, {"api_name": "utils.Euler", "line_number": 395, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 396, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 397, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 398, "usage_type": "call"}, {"api_name": "utils.Euler", "line_number": 398, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 399, "usage_type": "call"}, {"api_name": "utils.Euler", "line_number": 399, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 399, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 402, "usage_type": "call"}, {"api_name": "utils.Euler", "line_number": 402, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 402, "usage_type": "attribute"}, {"api_name": "utils.multiply", "line_number": 403, "usage_type": "call"}, {"api_name": "utils.unit_pose", "line_number": 413, "usage_type": "call"}, {"api_name": "utils.approximate_as_prism", "line_number": 416, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 417, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 420, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 420, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 421, "usage_type": "call"}, {"api_name": "utils.multiply", "line_number": 422, "usage_type": "call"}, {"api_name": "utils.unit_pose", "line_number": 425, "usage_type": "call"}, {"api_name": "utils.approximate_as_cylinder", "line_number": 426, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 427, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 427, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 428, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 429, "usage_type": "call"}, {"api_name": "utils.point_from_pose", "line_number": 429, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 431, "usage_type": "attribute"}, {"api_name": "utils.Pose", "line_number": 432, "usage_type": "call"}, {"api_name": "utils.multiply", "line_number": 433, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path", "line_number": 453, "usage_type": "attribute"}, {"api_name": "utils.read_pickle", "line_number": 461, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 467, "usage_type": "call"}, {"api_name": "utils.multiply", "line_number": 469, "usage_type": "call"}, {"api_name": "utils.invert", "line_number": 469, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 475, "usage_type": "call"}, {"api_name": "utils.multiply", "line_number": 478, "usage_type": "call"}, {"api_name": "utils.euler_from_quat", "line_number": 480, "usage_type": "call"}, {"api_name": "utils.get_camera_matrix", "line_number": 495, "usage_type": "call"}, {"api_name": "utils.dimensions_from_camera_matrix", "line_number": 501, "usage_type": "call"}, {"api_name": "utils.ray_from_pixel", "line_number": 503, "usage_type": "call"}, {"api_name": "utils.angle_between", "line_number": 509, "usage_type": "call"}, {"api_name": "utils.angle_between", "line_number": 511, "usage_type": "call"}, {"api_name": "utils.unit_pose", "line_number": 515, "usage_type": "call"}, {"api_name": "utils.tform_point", "line_number": 516, "usage_type": "call"}, {"api_name": "utils.invert", "line_number": 516, "usage_type": "call"}, {"api_name": "utils.pixel_from_point", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 550, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 559, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 561, "usage_type": "call"}, {"api_name": "utils.dimensions_from_camera_matrix", "line_number": 578, "usage_type": "call"}, {"api_name": "utils.ray_from_pixel", "line_number": 581, "usage_type": "call"}, {"api_name": "utils.create_mesh", "line_number": 589, "usage_type": "call"}, {"api_name": "utils.link_from_name", "line_number": 594, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 599, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 601, "usage_type": "call"}, {"api_name": "utils.Euler", "line_number": 601, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 601, "usage_type": "attribute"}, {"api_name": "utils.tform_point", "line_number": 602, "usage_type": "call"}, {"api_name": "utils.tform_point", "line_number": 603, "usage_type": "call"}, {"api_name": "utils.add_line", "line_number": 604, "usage_type": "call"}, {"api_name": "utils.Pose", "line_number": 607, "usage_type": "call"}, {"api_name": "utils.tform_point", "line_number": 613, "usage_type": "call"}, {"api_name": "utils.tform_point", "line_number": 614, "usage_type": "call"}, {"api_name": "utils.add_line", "line_number": 615, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 624, "usage_type": "attribute"}, {"api_name": "utils.link_from_name", "line_number": 626, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 627, "usage_type": "call"}, {"api_name": "utils.joints_from_names", "line_number": 629, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 631, "usage_type": "call"}, {"api_name": "utils.LockRenderer", "line_number": 632, "usage_type": "call"}, {"api_name": "utils.ConfSaver", "line_number": 633, "usage_type": "call"}, {"api_name": "utils.set_joint_positions", "line_number": 635, "usage_type": "call"}, {"api_name": "utils.get_link_pose", "line_number": 636, "usage_type": "call"}, {"api_name": "utils.tform_point", "line_number": 637, "usage_type": "call"}, {"api_name": "utils.invert", "line_number": 637, "usage_type": "call"}, {"api_name": "utils.angle_between", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 641, "usage_type": "call"}, {"api_name": "utils.tform_point", "line_number": 642, "usage_type": "call"}, {"api_name": "utils.unit_point", "line_number": 642, "usage_type": "call"}, {"api_name": "utils.quat_from_pose", "line_number": 642, "usage_type": "call"}, {"api_name": "utils.quat_from_axis_angle", "line_number": 643, "usage_type": "call"}, {"api_name": "utils.euler_from_quat", "line_number": 644, "usage_type": "call"}, {"api_name": "utils.compute_jacobian", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 646, "usage_type": "call"}, {"api_name": "utils.movable_from_joints", "line_number": 647, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 654, "usage_type": "call"}, {"api_name": "utils.violates_limits", "line_number": 658, "usage_type": "call"}, {"api_name": "utils.joints_from_names", "line_number": 663, "usage_type": "call"}, {"api_name": "utils.get_joint_positions", "line_number": 664, "usage_type": "call"}, {"api_name": "utils.get_joint_limits", "line_number": 665, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 667, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 668, "usage_type": "call"}, {"api_name": "utils.joints_from_names", "line_number": 677, "usage_type": "call"}, {"api_name": "utils.violates_limit", "line_number": 678, "usage_type": "call"}, {"api_name": "utils.get_joint_limits", "line_number": 680, "usage_type": "call"}, {"api_name": "numpy.math.ceil", "line_number": 682, "usage_type": "call"}, {"api_name": "numpy.math", "line_number": 682, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 684, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 684, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 689, "usage_type": "call"}, {"api_name": "utils.PoseSaver", "line_number": 692, "usage_type": "call"}, {"api_name": "utils.multiply", "line_number": 693, "usage_type": "call"}, {"api_name": "utils.invert", "line_number": 693, "usage_type": "call"}, {"api_name": "utils.get_pose", "line_number": 693, "usage_type": "call"}, {"api_name": "utils.set_pose", "line_number": 694, "usage_type": "call"}, {"api_name": "utils.get_aabb", "line_number": 695, "usage_type": "call"}, {"api_name": "utils.OOBB", "line_number": 698, "usage_type": "call"}, {"api_name": "utils.link_from_name", "line_number": 701, "usage_type": "call"}, {"api_name": "utils.get_link_pose", "line_number": 702, "usage_type": "call"}, {"api_name": "utils.get_link_pose", "line_number": 712, "usage_type": "call"}, {"api_name": "utils.link_from_name", "line_number": 712, "usage_type": "call"}, {"api_name": "utils.get_bodies", "line_number": 714, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 715, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 715, "usage_type": "attribute"}, {"api_name": "utils.create_mesh", "line_number": 720, "usage_type": "call"}, {"api_name": "utils.set_pose", "line_number": 721, "usage_type": "call"}, {"api_name": "utils.pairwise_collision", "line_number": 722, "usage_type": "call"}, {"api_name": "utils.get_bodies", "line_number": 723, "usage_type": "call"}, {"api_name": "utils.link_pairs_collision", "line_number": 724, "usage_type": "call"}, {"api_name": "utils.get_all_links", "line_number": 725, "usage_type": "call"}, {"api_name": "utils.remove_body", "line_number": 728, "usage_type": "call"}, {"api_name": "utils.unit_from_theta", "line_number": 745, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 745, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 745, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 745, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 746, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 746, "usage_type": "attribute"}, {"api_name": "numpy.math.atan2", "line_number": 748, "usage_type": "call"}, {"api_name": "numpy.math", "line_number": 748, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 748, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 748, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 749, "usage_type": "call"}, {"api_name": "utils.wrap_angle", "line_number": 749, "usage_type": "call"}, {"api_name": "utils.get_max_limit", "line_number": 764, "usage_type": "call"}, {"api_name": "utils.get_min_limit", "line_number": 766, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 767, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 767, "usage_type": "call"}, {"api_name": "utils.get_extend_fn", "line_number": 768, "usage_type": "call"}, {"api_name": "utils.get_moving_links", "line_number": 770, "usage_type": "call"}, {"api_name": "utils.set_joint_positions", "line_number": 773, "usage_type": "call"}, {"api_name": "utils.pairwise_collision", "line_number": 774, "usage_type": "call"}, {"api_name": "utils.get_link_pose", "line_number": 783, "usage_type": "call"}, {"api_name": "utils.multiply", "line_number": 784, "usage_type": "call"}, {"api_name": "utils.set_pose", "line_number": 785, "usage_type": "call"}, {"api_name": "utils.get_link_subtree", "line_number": 795, "usage_type": "call"}, {"api_name": "utils.link_from_name", "line_number": 795, "usage_type": "call"}, {"api_name": "utils.clone_body", "line_number": 796, "usage_type": "call"}, {"api_name": "utils.set_all_color", "line_number": 798, "usage_type": "call"}, {"api_name": "utils.TRANSPARENT", "line_number": 798, "usage_type": "argument"}]} +{"seq_id": "21838267831", "text": "\"\"\"\nURL configuration for Car project.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path , include\nfrom Car_rent import views\n\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('Car_rent/', include('Car_rent.urls', namespace='Car_rent')),\n path('customer/<int:pk>/', views.CustomerDetailView.as_view(), name='customer-detail'),\n path('customer/<int:pk>/', views.CustomerUpdateView.as_view(), name='customer-update'),\n path('customers/<int:customer_id>/delete/', views.delete_customer, name='delete_customer'),\n]\n", "repo_name": "mohamed-baio/Car_rental", "sub_path": "Car/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "Car_rent.views.CustomerDetailView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "Car_rent.views.CustomerDetailView", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Car_rent.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "Car_rent.views.CustomerUpdateView.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "Car_rent.views.CustomerUpdateView", "line_number": 27, "usage_type": "attribute"}, {"api_name": "Car_rent.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "Car_rent.views.delete_customer", "line_number": 28, "usage_type": "attribute"}, {"api_name": "Car_rent.views", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "18338666519", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport numpy as np\nimport time\n\nimport spectral_clustering_fd as scfd\n\nfrom sklearn.cluster import SpectralClustering\n\nfrom sklearn.datasets import make_blobs\nimport random\nimport sklearn.metrics as sm\n\n@profile\ndef main():\n n_samples = 2048\n n_features = 16\n n_centers = 4\n\n X, labels_gt = make_blobs(n_samples=n_samples, n_features=n_features, centers=n_centers, cluster_std=1.0, center_box=(-2.0, 2.0), shuffle=True, random_state=None)\n\n print(\"Labels Ground Truth: \",labels_gt)\n\n\n print(\"initialization\")\n model_sketch = scfd.SpectralClusteringFD(n_clusters = n_centers,\n random_state=None,\n n_init=10,\n gamma=1., affinity='rbf',\n assign_labels='discretize',\n kernel_params=None, n_jobs=1\n )\n\n print(\"do fit with sketch SC model...\")\n start = time.time()\n labels_sketch = model_sketch.fit_predict(X)\n elapsed_time = time.time() - start\n print(\"...done\")\n\n print (\"elapsed_time for fit:{0}\".format(elapsed_time),\"[sec]\")\n print(\"Labels(sketch): \",labels_sketch)\n AMUI = sm.adjusted_mutual_info_score(labels_gt,labels_sketch)\n print(\"adjusted_mutual_info_score(sketch): \",AMUI)\n\n model_orig = SpectralClustering(n_clusters = n_centers,\n random_state=None,\n\n n_init=10,\n gamma=1., affinity='rbf',\n assign_labels='discretize',\n kernel_params=None, n_jobs=1\n )\n print(\"do fit with original SC model...\")\n start = time.time()\n labels_orig = model_orig.fit_predict(X)\n elapsed_time = time.time() - start\n print(\"...done\")\n print (\"elapsed_time for fit:{0}\".format(elapsed_time),\"[sec]\")\n print(\"Labels(orig) : \",labels_orig)\n AMUI = sm.adjusted_mutual_info_score(labels_gt,labels_orig)\n print(\"adjusted_mutual_info_score(orig) : \",AMUI)\n\n \"\"\"\n ARANDi = sm.adjusted_rand_score(labels_gt,labels)\n print(\"adjusted_rand_index: \",ARANDi)\n MUI = sm.mutual_info_score(labels_gt,labels)\n print(\"mutual_info_score: \",MUI)\n #RANDi = sm.rand_score(data.target,labels)\n #print(\"rand_index: \",RANDi)\n \"\"\"\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "AtsushiHashimoto/SpectralClusteringFD", "sub_path": "example/sample_spectral_clustering_fd.py", "file_name": "sample_spectral_clustering_fd.py", "file_ext": "py", "file_size_in_byte": 2518, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sklearn.datasets.make_blobs", "line_number": 22, "usage_type": "call"}, {"api_name": "spectral_clustering_fd.SpectralClusteringFD", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.metrics.adjusted_mutual_info_score", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 44, "usage_type": "name"}, {"api_name": "sklearn.cluster.SpectralClustering", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.metrics.adjusted_mutual_info_score", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "7960689467", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 10 15:37:42 2021\n\n@author: eveshalom\n\"\"\"\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy import ndimage as ndimage\n\ndef checkROIs(Ea,Ev):\n plt.rcParams.update({'font.size':18})\n fig,[ax1,ax2] = plt.subplots(1,2,figsize=(20,10))\n ax1.plot(Ea)\n ax2.plot(Ev)\n ax1.set(ylabel='Signal Enhancement',title='Middle Cerebral Arteries ETC')\n ax2.set(ylabel='Signal Enhancement',title='Sagittal Sinus ETC')\n return\n\ndef MedianFilter(paramMap):\n for i in range(paramMap.shape[-1]):\n paramMap[:,:,i] = ndimage.median_filter(paramMap[:,:,i], size=(3,3))\n return paramMap\n\ndef displaySlices(data3d,outdir,fname,cbarlabel):\n plt.rcParams.update({'font.size':22})\n title = fname.replace(\"_\", \" \")\n fig, axs = plt.subplots(nrows=4,ncols=4,sharey=True,sharex=True,figsize=(22,20))\n fig.subplots_adjust(wspace=0.01)\n fig.subplots_adjust(hspace=0.1)\n vmin=np.percentile(data3d,2)\n vmax=np.percentile(data3d,98)\n quad = []\n z=0\n for ax in axs.flatten():\n im=ax.imshow(data3d[:,:,z],vmin=vmin,vmax=vmax)\n quad.append(im)\n z+=1\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])\n cb = fig.colorbar(im, cax=cbar_ax)\n cb.set_label('{}'.format(cbarlabel),fontsize=30)\n fig.suptitle(\" \\n {} \".format(title),fontsize=50)\n plt.savefig('{}/{}.png'.format(outdir,fname))\n return\n\ndef animateSingle(data,fname,fdir,datashape,dt,cbarlabel):\n from matplotlib.animation import FuncAnimation\n plt.rcParams.update({'font.size':22})\n title = fname.replace(\"_\", \" \")\n fig, axs = plt.subplots(nrows=4,ncols=4,sharey=True,sharex=True,figsize=(22,20))\n fig.subplots_adjust(wspace=0.01)\n fig.subplots_adjust(hspace=0.1)\n vmin=0\n vmax=np.percentile(data,98)#224\n quad = []\n for ax in axs.flatten():\n im=ax.imshow(data[:,:,0,0],vmin=vmin,vmax=vmax)\n quad.append(im)\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])\n cb = fig.colorbar(im, cax=cbar_ax)\n cb.set_label('{}'.format(cbarlabel),fontsize=30)\n fig.suptitle(\" \\n {} at T = 0 mins\".format(title),fontsize=50)\n def init():\n z=0\n for pan in quad:\n pan.set_array(data[:,:,z,0])\n z+=1\n return quad\n \n def animate(i):\n z=0\n for pan in quad:\n pan.set_array(data[:,:,z,i])\n z+=1\n fig.suptitle(\" \\n {} at T = {:.2f} mins\".format(title,i*dt),fontsize=50)\n return quad\n \n anim = FuncAnimation(fig=fig, func=animate,frames=datashape[-1],init_func=init, interval=500, blit=True)\n anim.save('{}/{}.gif'.format(fdir,fname),writer='ffmpeg')\n return", "repo_name": "OSIPI/TF6.2_DCE-DSC-MRI_Challenges", "sub_path": "DRO_Production/plots_and_filters.py", "file_name": "plots_and_filters.py", "file_ext": "py", "file_size_in_byte": 2778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "scipy.ndimage.median_filter", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 50, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "31909307726", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nimport pandas as pd\nimport numpy as np\nfrom base_class.amazon_comment import AmazonComment\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef result(request):\n comments = None\n url = request.POST.get('search_words', '')\n print('url', url)\n\n url = 'https://www.amazon.com/Kindle-Oasis-reader-High-Resolution-International/dp/B06XDFJJRS/ref=cm_cr_arp_d_product_top?ie=UTF8'\n\n ac = AmazonComment(url)\n df = ac.comments\n html = \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Comments\n \n \n \n

SA

\n \"\"\" + \\\n df.to_html(escape=False)\\\n + \"\"\"\n
\n \n \n \"\"\"\n return HttpResponse(html.encode('utf-8'))\n\n", "repo_name": "YaxinCui/CommentsAnalysisTool", "sub_path": "suggestion/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1020, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}, {"api_name": "base_class.amazon_comment.AmazonComment", "line_number": 21, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "45702557104", "text": "import discord\nfrom discord.ext import commands\n\nimport os\nimport re\nfrom random import choice\nfrom .utils import checks\n\nclass Remarks(commands.Cog):\n\n \"\"\"ev0ked's Remarks Cog\"\"\"\n \n def __init__(self,bot):\n self.bot = bot\n self.insults = open(\"data/insults.txt\").read().splitlines()\n self.addquote_regex = re.compile(\"^'.+ - .+'$\", re.UNICODE)\n\n @commands.command(name='insult', aliases=['roast'], pass_context=True, no_pm=True)\n async def insult(self, ctx, user : discord.Member=None):\n \"\"\"Insult another user!\"\"\"\n\n msg = ' '\n if user != None:\n if user.id == self.bot.user.id:\n msg = \" How original. No one else had thought of trying to get the bot to insult itself. I applaud your creativity. Yawn. Perhaps this is why you don't have friends. You don't add anything new to any conversation. You are more of a bot than me, predictable answers, and absolutely dull to have an actual conversation with.\"\n await ctx.send(user.mention + msg)\n else:\n await ctx.send(user.mention + msg + choice(self.insults))\n else:\n await ctx.send(ctx.message.author.mention + msg + choice(self.insults))\n\n @commands.command(name='quote', pass_context=True, no_pm=True)\n async def quote(self, ctx):\n \"\"\"List a Quote!\"\"\"\n quote_list = open(\"data/quotes.txt\").read().splitlines()\n selected_quote = choice(quote_list)\n selected_quote = selected_quote[1:-1]\n quote, quote_author = selected_quote.split(' - ')\n \n embed = discord.Embed(colour=discord.Colour(0xc0c0c0), title=f'\"{quote}\"')\n embed.set_author(name=\"Quotes\", icon_url=\"https://invex.gg/images/discord/quote_icon_v2.png\")\n embed.set_footer(text = f\"- {quote_author}\")\n await ctx.send(embed=embed)\n \n @commands.command(hidden=True)\n @checks.is_owner()\n async def addquote(self, ctx, *, quote : str):\n \"\"\"Add Quote to list of quotes\"\"\"\n if not self.addquote_regex.match(quote):\n await ctx.send(\"`Quote must be in this format (including surrounding single quotes):\\n'some quote here - quote author'`\")\n else:\n with open(\"data/quotes.txt\", \"a\") as text_file:\n text_file.write(f\"{quote}\\n\")\n await ctx.send('`Quote added!`')\n\n @addquote.error\n async def create_handler(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"`You did not provide the '\" + error.param + \"' parameter.`\")\n \n # GENERIC SHITTY CHAT MEMES/COMMANDS\n\n @commands.command()\n async def dotheroar(self,ctx):\n \"\"\"Roooooar!\"\"\"\n await ctx.send('https://giphy.com/gifs/shrek-qFsHUsuBMQemQ')\n\n @commands.command()\n async def panic(self,ctx):\n \"\"\"SOUND THE ALARMS!@!\"\"\"\n await ctx.send(':rotating_light: EVERYTHING IS BROKEN :rotating_light:')\n await ctx.send(':rotating_light: CALL THE COPS :rotating_light:')\n await ctx.send(':rotating_light: SHUT DOWN EVERYTHING :rotating_light:')\n await ctx.send(':rotating_light: I NEED AN ADULT :rotating_light:')\n\n @commands.command()\n async def n8egirl(self,ctx):\n \"\"\"Approved by n8\"\"\"\n await ctx.send(\":man_in_tuxedo: n8's e-girl? What one?\")\n await ctx.send(':raising_hand: Elaina?')\n await ctx.send(':raising_hand: Darcye?')\n await ctx.send(':raising_hand: Sarah?')\n\n @commands.command(aliases=['ree'])\n async def reeeeee(self,ctx):\n \"\"\"reeeeee!\"\"\"\n await ctx.send(file=discord.File(open(\"data/images/ree.gif\", \"rb\")))\n\n @addquote.error\n async def generic_handler(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"`You did not provide the '\" + error.param + \"' parameter.`\")\ndef setup(bot):\n bot.add_cog(Remarks(bot))", "repo_name": "InvexGaming/Bot-Invex-Discord-Bot", "sub_path": "cogs/remarks.py", "file_name": "remarks.py", "file_ext": "py", "file_size_in_byte": 3926, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 9, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 9, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.UNICODE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "discord.Member", "line_number": 19, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 28, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 18, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 18, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 40, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 40, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 32, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 45, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.checks.is_owner", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.checks", "line_number": 46, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 58, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 58, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 63, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 63, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 68, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 68, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 76, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 76, "usage_type": "name"}, {"api_name": "discord.File", "line_number": 87, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 84, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 84, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 91, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "1739384073", "text": "import requests\nfrom . import utils\nfrom posthog.models import operations, shared\nfrom typing import Optional\n\nclass PerformanceEvents:\n _client: requests.Session\n _security_client: requests.Session\n _server_url: str\n _language: str\n _sdk_version: str\n _gen_version: str\n\n def __init__(self, client: requests.Session, security_client: requests.Session, server_url: str, language: str, sdk_version: str, gen_version: str) -> None:\n self._client = client\n self._security_client = security_client\n self._server_url = server_url\n self._language = language\n self._sdk_version = sdk_version\n self._gen_version = gen_version\n\n \n def performance_events_list(self, request: operations.PerformanceEventsListRequest) -> operations.PerformanceEventsListResponse:\n base_url = self._server_url\n \n url = utils.generate_url(base_url, \"/api/projects/{project_id}/performance_events/\", request.path_params)\n \n query_params = utils.get_query_params(request.query_params)\n \n client = self._client\n \n r = client.request(\"GET\", url, params=query_params)\n content_type = r.headers.get(\"Content-Type\")\n\n res = operations.PerformanceEventsListResponse(status_code=r.status_code, content_type=content_type)\n \n if r.status_code == 200:\n if utils.match_content_type(content_type, \"application/json\"):\n out = utils.unmarshal_json(r.text, Optional[shared.PaginatedPerformanceEventList])\n res.paginated_performance_event_list = out\n\n return res\n\n \n def performance_events_recent_pageviews_retrieve(self, request: operations.PerformanceEventsRecentPageviewsRetrieveRequest) -> operations.PerformanceEventsRecentPageviewsRetrieveResponse:\n base_url = self._server_url\n \n url = utils.generate_url(base_url, \"/api/projects/{project_id}/performance_events/recent_pageviews/\", request.path_params)\n \n \n client = self._client\n \n r = client.request(\"GET\", url)\n content_type = r.headers.get(\"Content-Type\")\n\n res = operations.PerformanceEventsRecentPageviewsRetrieveResponse(status_code=r.status_code, content_type=content_type)\n \n if r.status_code == 200:\n if utils.match_content_type(content_type, \"application/json\"):\n out = utils.unmarshal_json(r.text, Optional[shared.PerformanceEvent])\n res.performance_event = out\n\n return res\n\n ", "repo_name": "speakeasy-sdks/posthog-python-sdk", "sub_path": "src/posthog/performance_events.py", "file_name": "performance_events.py", "file_ext": "py", "file_size_in_byte": 2546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.Session", "line_number": 7, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 8, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 14, "usage_type": "attribute"}, {"api_name": "posthog.models.operations.PerformanceEventsListRequest", "line_number": 23, "usage_type": "attribute"}, {"api_name": "posthog.models.operations", "line_number": 23, "usage_type": "name"}, {"api_name": "posthog.models.operations.PerformanceEventsListResponse", "line_number": 35, "usage_type": "call"}, {"api_name": "posthog.models.operations", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}, {"api_name": "posthog.models.shared.PaginatedPerformanceEventList", "line_number": 39, "usage_type": "attribute"}, {"api_name": "posthog.models.shared", "line_number": 39, "usage_type": "name"}, {"api_name": "posthog.models.operations.PerformanceEventsListResponse", "line_number": 23, "usage_type": "attribute"}, {"api_name": "posthog.models.operations.PerformanceEventsRecentPageviewsRetrieveRequest", "line_number": 45, "usage_type": "attribute"}, {"api_name": "posthog.models.operations", "line_number": 45, "usage_type": "name"}, {"api_name": "posthog.models.operations.PerformanceEventsRecentPageviewsRetrieveResponse", "line_number": 56, "usage_type": "call"}, {"api_name": "posthog.models.operations", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 60, "usage_type": "name"}, {"api_name": "posthog.models.shared.PerformanceEvent", "line_number": 60, "usage_type": "attribute"}, {"api_name": "posthog.models.shared", "line_number": 60, "usage_type": "name"}, {"api_name": "posthog.models.operations.PerformanceEventsRecentPageviewsRetrieveResponse", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "40170658010", "text": "import copy\nimport os\nimport random\nimport time\nfrom typing import Tuple\n\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.decomposition import PCA\nfrom torch import nn\nfrom torch.optim.lr_scheduler import LambdaLR\n\nfrom data import LABELED_DATASETS, LabeledSubdataset\nfrom models.images.classification.backbones import NoFlatteningBackbone\nfrom models.images.classification.few_shot_learning import evaluate_solution_episodes, accuracy, FSLEpisodeSampler, \\\n FEATURE_EXTRACTORS, FSLEpisodeSamplerGlobalLabels, FitTransformFewShotLearningSolution\nfrom sessions import Session\nfrom torch_utils import flip_dimension\nfrom utils import pretty_time, remove_dim, inverse_mapping\nfrom visualization.plots import PlotterWindow\n\nMAX_BATCH_SIZE = 500\n\n\nclass ScaleModule(nn.Module):\n def __init__(self, in_features, map_size):\n super(ScaleModule, self).__init__()\n self.in_features = in_features\n self.conv = nn.Conv2d(in_channels=self.in_features, out_channels=1, kernel_size=3)\n self.bn = nn.BatchNorm2d(1, eps=2e-5)\n self.relu = nn.ReLU()\n\n self.fc = nn.Linear((map_size - 2) ** 2, 1)\n self.sp = nn.Softplus()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n x = x.reshape(x.size(0), -1)\n x = self.fc(x)\n x = self.sp(x)\n\n return x\n\n\ndef lr_schedule(iter: int):\n if iter >= 30000:\n return 0.0012\n elif iter >= 20000:\n return 0.006\n else:\n return 0.1\n\n\nclass MCTDFMN(FitTransformFewShotLearningSolution):\n def __init__(self, train_classes: int, backbone: NoFlatteningBackbone, train_transduction_steps=1,\n test_transduction_steps=10, lmb=0.2, all_global_prototypes=True, scaling=True,\n pca=False, extend_input=False,\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")):\n super(MCTDFMN, self).__init__()\n self.n_classes = None\n self.support_set_size = None\n self.support_set_features = None\n self.class_prototypes = None\n self.query_set_features = None\n self.query_set_size = None\n self.device = device\n\n self.scaling = scaling\n self.pca = pca\n self.pca_transformer = PCA()\n\n self.extend_input = extend_input\n\n self.train_classes = train_classes\n self.all_global_prototypes = all_global_prototypes\n\n self.feature_extractor = backbone\n self.featmap_size = backbone.output_featmap_size()\n self.featmap_size2 = self.featmap_size ** 2\n self.scale_module = ScaleModule(backbone.output_features(), self.featmap_size)\n self.global_proto = nn.Linear(in_features=backbone.output_features(), out_features=train_classes)\n\n nn.init.xavier_uniform_(self.global_proto.weight)\n\n self.train_ts = train_transduction_steps\n self.test_ts = test_transduction_steps\n\n self.loss_fn = nn.CrossEntropyLoss()\n self.lmb = lmb\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='conv2d')\n try:\n nn.init.constant_(m.bias, 0)\n except AttributeError as e:\n pass\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def extract_features(self, batch: torch.Tensor) -> torch.Tensor:\n # print(batch.size())\n minibatches = batch.split(split_size=MAX_BATCH_SIZE)\n # print(minibatches)\n xs = []\n for minibatch in minibatches:\n xs.append(self.feature_extractor(minibatch))\n # print(len(xs))\n # print(xs[-1].size())\n x = torch.cat(xs)\n # print(x.size())\n return x\n\n def build_prototypes(self, support_set: torch.Tensor, query_set: torch.Tensor = None):\n its = self.train_ts if self.training else self.test_ts\n self.class_prototypes = torch.mean(support_set, dim=1)\n if query_set is not None:\n for i in range(its):\n self.class_prototypes = self.update_prototypes(support_set, query_set)\n\n def distance(self, a: torch.Tensor, b: torch.Tensor):\n if not hasattr(self, 'scaling') or self.scaling:\n a_scale = self.scale_module(a)\n b_scale = self.scale_module(b)\n else:\n a_scale = 1\n b_scale = 1\n\n a = a.reshape(a.size(0), -1)\n b = b.reshape(b.size(0), -1)\n\n a = F.normalize(a, dim=1)\n b = F.normalize(b, dim=1)\n a = torch.div(a, a_scale)\n b = torch.div(b, b_scale)\n return (a - b).pow(2).sum(dim=1)\n\n def l2_distance(self, a: torch.Tensor, b: torch.Tensor):\n return (a - b).pow(2).sum(dim=1)\n\n def get_proba(self, query_set: torch.Tensor):\n return F.softmax(self.get_distances(query_set), dim=1)\n\n def get_distances(self, query_set: torch.Tensor):\n query_set_expanded = query_set.repeat_interleave(self.n_classes, dim=0)\n prototypes_expanded = self.class_prototypes.repeat(self.query_set_size, 1, 1, 1)\n distances = self.distance(query_set_expanded, prototypes_expanded)\n distances = torch.stack(distances.split(self.n_classes))\n return -distances\n\n def get_l2_distances(self, query_set: torch.Tensor, prototypes: torch.Tensor):\n cur_n_classes = prototypes.size(0)\n cur_query_set_size = query_set.size(0)\n\n query_set_expanded = query_set.repeat_interleave(cur_n_classes, dim=0)\n prototypes_expanded = prototypes.repeat(cur_query_set_size, 1)\n distances = self.l2_distance(query_set_expanded, prototypes_expanded)\n distances = torch.stack(distances.split(cur_n_classes))\n return -distances\n\n def update_prototypes(self, support_set: torch.Tensor, query_set: torch.Tensor):\n classes_denom = torch.tensor([self.support_set_size] * self.n_classes, device=query_set.device,\n dtype=torch.float)\n new_proto = torch.sum(support_set, dim=1)\n probas = self.get_proba(self.query_set_features)\n for cur_class in range(self.n_classes):\n class_probas = probas[:, cur_class].squeeze()\n classes_denom[cur_class] += class_probas.sum()\n new_proto[cur_class] += torch.mul(query_set, class_probas.unsqueeze(1).unsqueeze(1).unsqueeze(1).expand_as(\n query_set)).sum(0)\n new_proto = torch.div(new_proto, classes_denom.unsqueeze(1).unsqueeze(1).unsqueeze(1).expand_as(new_proto))\n return new_proto\n\n def apply_pca_transform(self):\n prototypes_data = self.pca_transformer.fit_transform(self.class_prototypes.view(self.n_classes, -1).cpu())\n query_set_data = self.pca_transformer.transform(self.query_set_features.view(self.query_set_size, -1).cpu())\n self.class_prototypes = torch.from_numpy(prototypes_data).to(self.class_prototypes.device).view(self.n_classes,\n -1, 1, 1)\n self.query_set_features = torch.from_numpy(query_set_data).to(self.query_set_features.device).view(\n self.query_set_size, -1, 1, 1)\n\n def forward(self, support_set: torch.Tensor, query_set: torch.Tensor) -> torch.Tensor:\n self.n_classes = support_set.size(0)\n\n if self.extend_input:\n flipped_support_set = flip_dimension(support_set, 4)\n\n support_set = torch.cat([support_set, flipped_support_set], dim=1)\n\n self.support_set_size = support_set.size(1)\n self.query_set_size = query_set.size(0)\n\n self.support_set_features = self.extract_features(remove_dim(support_set, 1))\n\n self.support_set_features = self.support_set_features.view(\n *([self.n_classes, self.support_set_size] + list(self.support_set_features.shape)[1:]))\n\n self.query_set_features = self.extract_features(query_set)\n\n self.build_prototypes(self.support_set_features, self.query_set_features)\n\n if self.pca and not self.training:\n self.apply_pca_transform()\n\n return self.get_distances(self.query_set_features)\n\n def fit(self, support_set: torch.Tensor):\n self.n_classes = support_set.size(0)\n\n support_set = support_set.to(self.device)\n\n try:\n if self.extend_input:\n flipped_support_set = flip_dimension(support_set, 4)\n\n support_set = torch.cat([support_set, flipped_support_set], dim=1)\n except AttributeError:\n pass\n\n self.support_set_size = support_set.size(1)\n\n self.support_set_features = self.extract_features(remove_dim(support_set, 1))\n\n self.support_set_features = self.support_set_features.view(\n *([self.n_classes, self.support_set_size] + list(self.support_set_features.shape)[1:]))\n\n self.build_prototypes(self.support_set_features)\n\n def transform(self, x: torch.Tensor):\n x = x.to(self.device)\n\n if len(x.size()) == 3:\n x = torch.unsqueeze(x, 0)\n\n self.query_set_size = len(x)\n\n self.query_set_features = self.extract_features(x)\n y = self.get_distances(self.query_set_features)\n prob = F.softmax(y, dim=1)\n if prob.size(0) == 1:\n prob = torch.squeeze(prob, 0)\n return prob\n\n def forward_with_loss(self, support_set: torch.Tensor, query_set: torch.Tensor,\n labels: torch.Tensor, global_classes_mapping: dict) -> Tuple[\n torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n\n output = self(support_set, query_set)\n loss_i = self.loss_fn(output, labels)\n\n cur_labels = labels.clone().repeat_interleave(self.featmap_size2, dim=0)\n cur_global_prototypes = self.global_proto.weight\n inv_mapping = inverse_mapping(global_classes_mapping)\n if self.all_global_prototypes:\n for i in range(cur_labels.size(0)):\n cur_labels[i] = inv_mapping[cur_labels[i].item()]\n else:\n indices = []\n for i in range(support_set.size(0)):\n indices.append(inv_mapping[i])\n indices = torch.tensor(indices, device=self.device)\n cur_global_prototypes = torch.index_select(cur_global_prototypes, 0, indices)\n # print(cur_labels.size())\n\n expanded_global_prototypes = cur_global_prototypes\n # expanded_query_set = torch.reshape(self.query_set_features, (self.query_set_features.size(0), -1))\n # print(self.query_set_features.shape)\n expanded_query_set = self.query_set_features.permute(0, 2, 3, 1).reshape((-1, self.query_set_features.size(1)))\n # print(expanded_query_set.shape)\n # print(expanded_global_prototypes.shape)\n d_distances = self.get_l2_distances(expanded_query_set, expanded_global_prototypes)\n # print(d_distances.shape)\n loss_d = self.loss_fn(d_distances, cur_labels) # * self.featmap_size2\n # print(loss_d.item(), loss_i.item())\n\n res_loss = (0.2 * loss_i) + loss_d\n\n return output, res_loss, loss_i, loss_d\n\n\ndef train_mctdfmn(base_subdataset: LabeledSubdataset, val_subdataset: LabeledSubdataset, n_shot: int, n_way: int,\n n_iterations: int, batch_size: int, eval_period: int,\n val_batch_size: int,\n dataset_classes: int,\n image_size: int,\n balanced_batches: bool,\n pretrained_model: MCTDFMN = None,\n train_n_way=15,\n backbone_name='resnet12-np', lr=0.1,\n train_ts_steps=1,\n test_ts_steps=10,\n all_global_prototypes=True,\n no_scaling=False,\n pca=False,\n extend_input=False,\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"), **kwargs):\n session_info = {\n \"task\": \"few-shot learning\",\n \"model\": \"MCT_DFMN\",\n \"feature_extractor\": backbone_name,\n \"n_iterations\": n_iterations,\n \"eval_period\": eval_period,\n # \"dataset\": dataset_name,\n # \"optimizer\": optimizer_name,\n \"batch_size\": batch_size,\n \"val_batch_size\": val_batch_size,\n \"n_shot\": n_shot,\n \"n_way\": n_way,\n \"train_n_way\": train_n_way,\n \"train_ts_steps\": train_ts_steps,\n \"test_ts_steps\": test_ts_steps,\n \"optimizer\": 'sgd',\n \"all_global_prototypes\": all_global_prototypes,\n \"image_size\": image_size,\n \"balanced_batches\": balanced_batches,\n \"pretrained_model\": pretrained_model is not None,\n \"no_scaling\": no_scaling,\n \"pca\": pca,\n \"extend_input\": extend_input,\n }\n\n session_info.update(kwargs)\n\n backbone = FEATURE_EXTRACTORS[backbone_name]()\n if pretrained_model is None:\n model = MCTDFMN(backbone=backbone, test_transduction_steps=test_ts_steps,\n train_transduction_steps=train_ts_steps, train_classes=dataset_classes,\n all_global_prototypes=all_global_prototypes, scaling=not no_scaling,\n pca=pca, extend_input=extend_input).to(device)\n else:\n model = copy.deepcopy(pretrained_model)\n\n optimizer = torch.optim.SGD(params=model.parameters(), lr=lr, nesterov=True, weight_decay=0.0005, momentum=0.9)\n scheduler = LambdaLR(optimizer, lr_lambda=lr_schedule)\n\n base_sampler = FSLEpisodeSamplerGlobalLabels(subdataset=base_subdataset, n_way=train_n_way, n_shot=n_shot,\n batch_size=batch_size, balanced=balanced_batches)\n val_sampler = FSLEpisodeSampler(subdataset=val_subdataset, n_way=n_way, n_shot=n_shot, batch_size=val_batch_size,\n balanced=balanced_batches)\n\n loss_plotter = PlotterWindow(interval=1000)\n accuracy_plotter = PlotterWindow(interval=1000)\n\n loss_plotter.new_line('Loss')\n loss_plotter.new_line('Dense Loss')\n loss_plotter.new_line('Instance Loss')\n accuracy_plotter.new_line('Train Accuracy')\n accuracy_plotter.new_line('Validation Accuracy')\n\n losses = []\n losses_d = []\n losses_i = []\n acc_train = []\n acc_val = []\n val_iters = []\n\n best_model = copy.deepcopy(model)\n\n best_accuracy = 0\n best_iteration = -1\n\n print(\"Training started for parameters:\")\n print(session_info)\n print()\n\n start_time = time.time()\n\n for iteration in range(n_iterations):\n model.train()\n\n support_set, batch, global_classes_mapping = base_sampler.sample()\n # print(support_set.size())\n query_set, query_labels = batch\n # print(query_set.size())\n # print(global_classes_mapping)\n query_set = query_set.to(device)\n query_labels = query_labels.to(device)\n\n optimizer.zero_grad()\n output, loss, loss_i, loss_d = model.forward_with_loss(support_set, query_set, query_labels,\n global_classes_mapping)\n # output = model.forward(support_set, query_set)\n # loss = loss_fn(output, query_labels)\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n labels_pred = output.argmax(dim=1)\n labels = query_labels\n cur_accuracy = accuracy(labels=labels, labels_pred=labels_pred)\n\n loss_plotter.add_point('Loss', iteration, loss.item())\n loss_plotter.add_point('Dense Loss', iteration, loss_d.item())\n loss_plotter.add_point('Instance Loss', iteration, 0.2 * loss_i.item())\n accuracy_plotter.add_point('Train Accuracy', iteration, cur_accuracy)\n\n losses.append(loss.item())\n losses_i.append(loss_i.item())\n losses_d.append(loss_d.item())\n acc_train.append(cur_accuracy)\n\n if iteration % eval_period == 0 or iteration == n_iterations - 1:\n val_start_time = time.time()\n\n val_accuracy = evaluate_solution_episodes(model, val_sampler)\n accuracy_plotter.add_point('Validation Accuracy', iteration, val_accuracy)\n\n acc_val.append(val_accuracy)\n val_iters.append(iteration + 1)\n\n if val_accuracy > best_accuracy:\n best_accuracy = val_accuracy\n best_iteration = iteration\n best_model = copy.deepcopy(model)\n print(\"Best evaluation result yet!\")\n\n cur_time = time.time()\n\n val_time = cur_time - val_start_time\n time_used = cur_time - start_time\n time_per_iteration = time_used / (iteration + 1)\n\n print()\n print(\"[%d/%d] = %.2f%%\\t\\tLoss: %.4f\" % (\n iteration + 1, n_iterations, (iteration + 1) / n_iterations * 100, loss.item()))\n print(\"Current validation time: %s\" % pretty_time(val_time))\n\n print('Average iteration time: %s\\tEstimated execution time: %s' % (\n pretty_time(time_per_iteration),\n pretty_time(time_per_iteration * (n_iterations - iteration - 1)),\n ))\n print()\n\n cur_time = time.time()\n training_time = cur_time - start_time\n print(\"Training finished. Total execution time: %s\" % pretty_time(training_time))\n print(\"Best accuracy is: %.3f\" % best_accuracy)\n print(\"Best iteration is: [%d/%d]\" % (best_iteration + 1, n_iterations))\n print()\n\n session_info['accuracy'] = best_accuracy\n session_info['best_iteration'] = best_iteration\n session_info['execution_time'] = training_time\n\n session = Session()\n session.build(name=\"FSL_MCTDFMN\", comment=r\"Few-Shot Learning solution based on https://arxiv.org/abs/2002.12017\",\n **session_info)\n # session.data.update(session_info)\n # save_record(name=\"Few-Shot Learning Training: MCT + DFMN\", **session_info)\n\n torch.save(best_model, os.path.join(session.data['output_dir'], \"trained_model_state_dict.tar\"))\n iters = list(range(1, n_iterations + 1))\n\n plt.figure(figsize=(20, 20))\n plt.plot(iters, losses, label=\"Loss\")\n plt.plot(iters, losses_d, label=\"Dense Loss\")\n plt.plot(iters, losses_i, label=\"Instance Loss\")\n plt.legend()\n plt.savefig(os.path.join(session.data['output_dir'], \"loss_plot.png\"))\n\n plt.figure(figsize=(20, 20))\n plt.plot(iters, acc_train, label=\"Train Accuracy\")\n plt.plot(val_iters, acc_val, label=\"Test Accuracy\")\n plt.legend()\n plt.savefig(os.path.join(session.data['output_dir'], \"acc_plot.png\"))\n\n session.save_info()\n return best_model\n\n\nif __name__ == '__main__':\n torch.random.manual_seed(2002)\n random.seed(2002)\n\n # PRETRAINING_DATASET_NAME = 'google-landmarks-selfsupervision'\n PRETRAINING_DATASET_NAME = ''\n DATASET_NAME = 'miniImageNet'\n\n BASE_CLASSES = 64\n PRETRAINING_BASE_CLASSES = 8000\n\n AUGMENT_PROB = 1.0\n\n PRETRAINING_ITERATIONS = 30000\n ITERATIONS = 40000\n\n PRETRAINING_N_WAY = 15\n N_WAY = 15\n\n PRETRAINING_EVAL_PERIOD = 1000\n EVAL_PERIOD = 1000\n\n RECORD = 800\n\n ALL_GLOBAL_PROTOTYPES = False\n\n IMAGE_SIZE = 84\n\n BACKBONE = 'conv64-np-o'\n\n BATCH_SIZE = 5\n VAL_BATCH_SIZE = 5\n\n BALANCED_BATCHES = True\n\n SCALING = True\n APPLY_PCA = False\n\n assert not (SCALING and APPLY_PCA)\n\n EXTEND_INPUT = False\n\n print(\"Preparations for training...\")\n dataset = LABELED_DATASETS[DATASET_NAME](augment_prob=AUGMENT_PROB, image_size=IMAGE_SIZE)\n base_subdataset, val_subdataset = dataset.subdataset.extract_classes(BASE_CLASSES)\n base_subdataset.set_test(False)\n val_subdataset.set_test(True)\n\n pre_dataset = None\n if PRETRAINING_DATASET_NAME and PRETRAINING_BASE_CLASSES and PRETRAINING_N_WAY and PRETRAINING_EVAL_PERIOD \\\n and PRETRAINING_ITERATIONS:\n pre_dataset = LABELED_DATASETS[PRETRAINING_DATASET_NAME](augment_prob=AUGMENT_PROB, image_size=IMAGE_SIZE)\n pre_base_subdataset, pre_val_subdataset = pre_dataset.subdataset.extract_classes(PRETRAINING_BASE_CLASSES)\n pre_base_subdataset.set_test(False)\n pre_val_subdataset.set_test(True)\n\n for N_SHOT, BATCH_SIZE, VAL_BATCH_SIZE in (\n (1, 5, 5),\n # (5, 3, 3)\n ):\n pretraining_result = None\n\n if pre_dataset is not None:\n print(\"Self-supervised stage\")\n pretraining_result = train_mctdfmn(base_subdataset=pre_base_subdataset, val_subdataset=pre_val_subdataset,\n n_shot=N_SHOT, n_way=PRETRAINING_N_WAY,\n n_iterations=PRETRAINING_ITERATIONS, batch_size=BATCH_SIZE,\n eval_period=PRETRAINING_EVAL_PERIOD,\n record=RECORD,\n augment=AUGMENT_PROB,\n dataset=PRETRAINING_DATASET_NAME,\n base_classes=PRETRAINING_BASE_CLASSES,\n dataset_classes=pre_dataset.CLASSES,\n all_global_prototypes=ALL_GLOBAL_PROTOTYPES,\n image_size=IMAGE_SIZE,\n extend_input=EXTEND_INPUT,\n backbone_name=BACKBONE,\n balanced_batches=BALANCED_BATCHES,\n val_batch_size=VAL_BATCH_SIZE,\n no_scaling=not SCALING,\n pca=APPLY_PCA,\n train_ts_steps=0,\n test_ts_steps=0)\n print(\"Main stage\")\n train_mctdfmn(base_subdataset=base_subdataset, val_subdataset=val_subdataset, n_shot=N_SHOT, n_way=N_WAY,\n n_iterations=ITERATIONS, batch_size=BATCH_SIZE,\n eval_period=EVAL_PERIOD,\n record=RECORD,\n augment=AUGMENT_PROB,\n dataset=DATASET_NAME,\n base_classes=BASE_CLASSES,\n dataset_classes=dataset.CLASSES,\n all_global_prototypes=ALL_GLOBAL_PROTOTYPES,\n image_size=IMAGE_SIZE,\n extend_input=EXTEND_INPUT,\n backbone_name=BACKBONE,\n balanced_batches=BALANCED_BATCHES,\n val_batch_size=VAL_BATCH_SIZE,\n no_scaling=not SCALING,\n pca=APPLY_PCA,\n train_ts_steps=0,\n test_ts_steps=0,\n pretrained_model=pretraining_result)\n", "repo_name": "adia1223/torch-nn-project", "sub_path": "models/images/classification/few_shot_learning/mctdfmn.py", "file_name": "mctdfmn.py", "file_ext": "py", "file_size_in_byte": 23057, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.nn.Module", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Softplus", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "models.images.classification.few_shot_learning.FitTransformFewShotLearningSolution", "line_number": 57, "usage_type": "name"}, {"api_name": "models.images.classification.backbones.NoFlatteningBackbone", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 98, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 102, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 106, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 126, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.normalize", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.div", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 146, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.softmax", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 166, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 168, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 187, "usage_type": "attribute"}, {"api_name": "torch_utils.flip_dimension", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 193, "usage_type": "call"}, {"api_name": "utils.remove_dim", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 212, "usage_type": "attribute"}, {"api_name": "torch_utils.flip_dimension", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 221, "usage_type": "call"}, {"api_name": "utils.remove_dim", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 234, "usage_type": "attribute"}, {"api_name": "torch.unsqueeze", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 244, "usage_type": "name"}, {"api_name": "torch.squeeze", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 249, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 250, "usage_type": "attribute"}, {"api_name": "utils.inverse_mapping", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 267, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 250, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 251, "usage_type": "attribute"}, {"api_name": "data.LabeledSubdataset", "line_number": 286, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 301, "usage_type": "attribute"}, {"api_name": "models.images.classification.few_shot_learning.FEATURE_EXTRACTORS", "line_number": 329, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 338, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.LambdaLR", "line_number": 339, "usage_type": "call"}, {"api_name": "models.images.classification.few_shot_learning.FSLEpisodeSamplerGlobalLabels", "line_number": 341, "usage_type": "call"}, {"api_name": "models.images.classification.few_shot_learning.FSLEpisodeSampler", "line_number": 343, "usage_type": "call"}, {"api_name": "visualization.plots.PlotterWindow", "line_number": 346, "usage_type": "call"}, {"api_name": "visualization.plots.PlotterWindow", "line_number": 347, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 362, "usage_type": "call"}, {"api_name": "time.time", "line_number": 371, "usage_type": "call"}, {"api_name": "models.images.classification.few_shot_learning.accuracy", "line_number": 395, "usage_type": "call"}, {"api_name": "time.time", "line_number": 408, "usage_type": "call"}, {"api_name": "models.images.classification.few_shot_learning.evaluate_solution_episodes", "line_number": 410, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 419, "usage_type": "call"}, {"api_name": "time.time", "line_number": 422, "usage_type": "call"}, {"api_name": "utils.pretty_time", "line_number": 431, "usage_type": "call"}, {"api_name": "utils.pretty_time", "line_number": 434, "usage_type": "call"}, {"api_name": "utils.pretty_time", "line_number": 435, "usage_type": "call"}, {"api_name": "time.time", "line_number": 439, "usage_type": "call"}, {"api_name": "utils.pretty_time", "line_number": 441, "usage_type": "call"}, {"api_name": "sessions.Session", "line_number": 450, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 459, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 459, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 460, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 460, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 461, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 461, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 462, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 462, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 463, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 463, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 464, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 464, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 464, "usage_type": "call"}, {"api_name": "os.path", "line_number": 464, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 466, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 466, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 467, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 467, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 468, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 468, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 469, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 469, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 470, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 470, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 470, "usage_type": "call"}, {"api_name": "os.path", "line_number": 470, "usage_type": "attribute"}, {"api_name": "torch.random.manual_seed", "line_number": 477, "usage_type": "call"}, {"api_name": "torch.random", "line_number": 477, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 478, "usage_type": "call"}, {"api_name": "data.LABELED_DATASETS", "line_number": 519, "usage_type": "name"}, {"api_name": "data.LABELED_DATASETS", "line_number": 527, "usage_type": "name"}]} +{"seq_id": "38057875607", "text": "from airflow import DAG\nfrom datetime import datetime, timedelta\nimport pendulum\nimport json\nfrom pandas import json_normalize\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python import PythonOperator \nfrom airflow.decorators import task, dag # updated\nfrom airflow.utils.dates import days_ago\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pd\n\n\n# Default Args\ndefault_args = {\n 'owner': 'airflow',\n 'start_date': datetime(2023,6,1),\n 'email': ['tojuny2312@gmail.com'],\n 'email_on_failure': False,\n 'email_in_retry': False,\n 'depends_on_past': False,\n 'retries': 5 \n}\n\n\nbase_dag = DAG(\n dag_id = 'static_agg_daily',\n start_date = datetime(2023,6,1),\n schedule_interval=\"@daily\",\n end_date = datetime(2023,7,30), # due\n default_args=default_args,\n concurrency=3,\n max_active_runs=1,\n )\n\n\n# CMD Collection\n# Critoria of Household\n\nmk_dir_static = 'hadoop fs -mkdir /etl/static'\nmk_dir_cpi = 'hadoop fs -mkdir /etl/static/cpi' \nmk_dir_ppi = 'hadoop fs -mkdir /etl/static/ppi'\nmk_dir_h_expediture = 'hadoop fs -mkdir /etl/static/h_expediture'\nmk_dir_lt_interest = 'hadoop fs -mkdir /etl/static/lt'\nmk_dir_st_interest = 'hadoop fs -mkdir /etl/static/st'\n\nBASE_TIME = \"{{data_interval_end.in_timezone('Asia/Seoul').strftime('%Y%m%d')}}\"\n\n\n\n# bash\ndef push_bash(name: str, cmd: str, dag, trigger=\"all_success\"):\n bash_task = BashOperator(\n task_id=name,\n bash_command=cmd,\n trigger_rule=trigger,\n dag=base_dag\n )\n return bash_task\n\n# Covering 6 indices At Once\n# parquet \n@dag(default_args=default_args, multiple_outputs=True) \ndef csvToparquet(df: pd.DataFrame): # data type : verifying json\n @task()\n def trans():\n naming = ['df_cpi','df_ppi','df_pl','df_h', 'df_lt', 'df_st']\n for nam in naming:\n df.to_parquet('{naming}.parquet')\n\n\n# {PUT_PATH}\nPUT_PATH = '/home/juny/code/juny_af/etl/static_data'\n# {BAK_PATH}\nBAK_PATH = '/home/juny/code/juny_af/etl/backup'\n# {RD_PATH}\nRD_PATH = '/etl/static'\n\n# BackUp Storage Operator can be replaced by Database App.\nput_cpi = 'hdfs dfs -put {PUT_PATH}/Inflation_cpi.csv /etl/static/cpi/cpi_start.csv' \nbak_cpi = 'cp /home/juny/code/juny_af/etl/temp/cpi_start.csv {BAK_PATH}/cpi_backup.csv'\necho_cpi = 'echo \"cpi index was stored at {BASE_TIME}\"'\n#df_cpi = pd.read_csv('{RD_PATH}/cpi/cpi.csv')\n\n#\nput_ppi = 'hdfs dfs -put {PUT_PATH}/Producer_price_indices.csv /etl/static/ppi/ppi.csv'\nbak_ppi = 'cp /home/juny/code/juny_af/etl/temp/ppi.csv {BAK_PATH}/ppi_backup.csv'\necho_ppi = 'echo \"ppi index was stored at {BASE_TIME}\"'\n#df_ppi = pd.read_csv('{RD_PATH}/ppi/ppi.csv')\n\nput_pl = 'hdfs dfs -put {PUT_PATH}/Price_level_indices.csv /etl/static/pl/price_level.csv' \nbak_pl = 'cp /home/juny/code/juny_af/etl/temp/price_level.csv {BAK_PATH}/price_level_backup.csv'\necho_pl = 'echo \"Index of Price Level was stored at {BASE_TIME}\"'\n#df_pl = pd.read_csv('{RD_PATH}/pl/price_level.csv')\n\nput_h = 'hdfs dfs -put {PUT_PATH}/Household_spending.csv /etl/static/h_expenditure/h_spending.csv'\nbak_h = 'cp /home/juny/code/juny_af/etl/temp/h-spending.csv {BAK_PATH}/h_spending_backup.csv'\necho_h = 'echo \"Index of Household expediture was stored at {BASE_TIME}\"'\n#df_h = pd.read_csv('{RD_PATH}/h_expenditure/h_expenditure.csv')\n\nput_lt = 'hdfs dfs -put {PUT_PATH}/Long_term_interest.csv /etl/static/lt/lt_interest.csv'\nbak_lt = 'cp /home/juny/code/juny_af/etl/temp/lt_interest.csv {BAK_PATH}/lt_interest_backup.csv'\necho_lt = '\"echo long term interest was stored at {BASE_TIME}\"'\n#df_lt = pd.read_csv('{RD_PATH}/lt/lt_interest.csv')\n\nput_st = 'hdfs dfs -put {PUT_PATH}/Short_term_interest.csv /etl/static/st/st_interest.csv' \nbak_st = 'cp /home/juny/code/juny_af/etl/temp/st_interest.csv {BAK_PATH}/st_interest_backup.csv'\necho_st = '\"echo short term interest was stored at {BASE_TIME}\"'\n#df_st = pd.read_csv('{RD_PATH}/st/st_interest.csv')\n\n\n# Done log\nDONE_log = BashOperator(\n task_id = 'UPDATED_DONE',\n bash_comman= done_cpi,\n dag=base_dag\n)\n\n#------------------------------------------------------------\n\n# cpi\ncpi_start = BashOperator(\n task_id='cpi_start',\n bash_command= put_cpi,\n dag=base_dag\n)\n# back-up file (split or delete it when you want)\ncpi_make_backup = BashOperator(\n task_id='cpi_make_backup',\n bash_command= bak_cpi,\n dag=base_dag\n) \n\ncpi_echo = BashOperator(\n task_id='echo_cpi',\n bash_command= echo_cpi,\n dag=base_dag,\n)\n\n#---------------------------------------------------------\n\n#ppi\nppi_start = BashOperator(\n task_id='ppi_start',\n bash_command= put_ppi,\n dag=base_dag\n)\n# back-up file (split or delete it when you want)\nppi_make_backup = BashOperator(\n task_id='ppi_make_backup',\n bash_command= bak_ppi ,\n dag=base_dag\n)\n\nppi_echo = BashOperator(\n task_id='echo_ppi',\n bash_command= echo_cpi,\n dag=base_dag,\n)\n\n#--------------------------------------------------------\n\n#price level\nprlev_start = BashOperator(\n task_id='prlev_start',\n bash_command= put_pl,\n dag=base_dag\n)\n\n# back-up file (split or delete it when you want)\nprlev_make_backup = BashOperator(\n task_id='prlev_make_backup',\n bash_command= bak_pl,\n dag=base_dag\n)\n\npl_echo = BashOperator(\n task_id='echo_pl',\n bash_command= echo_pl,\n dag=base_dag,\n)\n\n#--------------------------------------------------------\n\n#h spending \n\nh_spending = BashOperator(\n task_id='h_spending_start',\n bash_command= put_h,\n dag=base_dag\n)\n\n# back-up file (split or delete it when you want)\nh_spending_make_backup = BashOperator(\n task_id='h_spending_make_backup',\n bash_command= bak_h,\n dag=base_dag\n)\n\nh_echo = BashOperator(\n task_id='echo_h',\n bash_command= echo_h,\n dag=base_dag,\n)\n\n#--------------------------------------------------------\n\n#lt interest\nlt_interest_start = BashOperator(\n task_id='lt_interest_start',\n bash_command= put_lt,\n dag=base_dag\n)\n\n\n# back-up file (split or delete it when you want)\nlt_interest_make_backup = BashOperator(\n task_id='lt_interest_backup',\n bash_command= bak_lt,\n dag=base_dag\n)\n\nlt_echo = BashOperator(\n task_id='echo_lt',\n bash_command= echo_lt,\n dag=base_dag,\n)\n\n#---------------------------------------------------------\n\n#st interest\n\nst_interest_start = BashOperator(\n task_id='st_interest_start',\n bash_command= put_st,\n dag=base_dag\n)\n\n\n# back-up file (split or delete it when you want)\nst_interest_make_backup = BashOperator(\n task_id='st_interest_make_backup',\n bash_command= bak_st,\n dag=base_dag\n)\n\nst_echo = BashOperator(\n task_id='echo_st',\n bash_command= echo_st,\n dag=base_dag,\n)\n\n\n#---------------------------------------------------------\n\ndf_cpi = pd.read_csv('{RD_PATH}/cpi/cpi.csv')\ndf_ppi = pd.read_csv('{RD_PATH}/ppi/ppi.csv')\ndf_pl = pd.read_csv('{RD_PATH}/pl/price_level.csv')\ndf_h = pd.read_csv('{RD_PATH}/h_expenditure/h_expenditure.csv')\ndf_lt = pd.read_csv('{RD_PATH}/lt/lt_interest.csv')\ndf_st = pd.read_csv('{RD_PATH}/st/st_interest.csv')\n\n\ndfli = [ df_cpi, df_ppi, df_pl, df_h, df_lt, df_st ]\n\n\n\n@dag(default_args=default_args, multiple_outputs=True)\ndef execdf():\n @task\n def exec():\n for li in dfli:\n exec_f_transform = csvToparquet(li)\n\n# \n#start >> load_temp >> raw >> base(reduce) >> Done\n#start >> bak\n\n# Verify-ing Streams and ThroubleShoot-ing \n\n\n\n\n\n\n\n\n", "repo_name": "Juny2312/Airflow-pipeline", "sub_path": "daily_economy_indices_tuning.py", "file_name": "daily_economy_indices_tuning.py", "file_ext": "py", "file_size_in_byte": 7384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.datetime", "line_number": 18, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 54, "usage_type": "call"}, {"api_name": "pyarrow.parquet.DataFrame", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pyarrow.parquet", "line_number": 65, "usage_type": "name"}, {"api_name": "airflow.decorators.task", "line_number": 66, "usage_type": "call"}, {"api_name": "airflow.decorators.dag", "line_number": 64, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 114, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 123, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 129, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 135, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 144, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 150, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 156, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 165, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 172, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 178, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 188, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 195, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 201, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 210, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 218, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 224, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 234, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 242, "usage_type": "call"}, {"api_name": "airflow.operators.bash.BashOperator", "line_number": 248, "usage_type": "call"}, {"api_name": "pyarrow.parquet.read_csv", "line_number": 257, "usage_type": "call"}, {"api_name": "pyarrow.parquet", "line_number": 257, "usage_type": "name"}, {"api_name": "pyarrow.parquet.read_csv", "line_number": 258, "usage_type": "call"}, {"api_name": "pyarrow.parquet", "line_number": 258, "usage_type": "name"}, {"api_name": "pyarrow.parquet.read_csv", "line_number": 259, "usage_type": "call"}, {"api_name": "pyarrow.parquet", "line_number": 259, "usage_type": "name"}, {"api_name": "pyarrow.parquet.read_csv", "line_number": 260, "usage_type": "call"}, {"api_name": "pyarrow.parquet", "line_number": 260, "usage_type": "name"}, {"api_name": "pyarrow.parquet.read_csv", "line_number": 261, "usage_type": "call"}, {"api_name": "pyarrow.parquet", "line_number": 261, "usage_type": "name"}, {"api_name": "pyarrow.parquet.read_csv", "line_number": 262, "usage_type": "call"}, {"api_name": "pyarrow.parquet", "line_number": 262, "usage_type": "name"}, {"api_name": "airflow.decorators.task", "line_number": 271, "usage_type": "name"}, {"api_name": "airflow.decorators.dag", "line_number": 269, "usage_type": "call"}]} +{"seq_id": "25794335073", "text": "# otiginal lr_2d.py\n\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n# load the data\nX= []\nY =[]\nfor line in open(\"data_2d.csv\"):\n x1,x2, y = line.split(',')\n X.append([float(x1),float(x2), 1])\n Y.append(float(y))\n# turn X and Y into numpy arrays\nX = np.array(X)\nY = np.array(Y)\n\n# let's plot data to see what it looks like\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(X[:,0], X[:,1], Y)\nplt.show()\n\n# calculate weight of our model\n\nw = np.linalg.solve(np.dot(X.T,X), np.dot(X.T, Y))\n# usually we wdo dot(w.T, X) but as X is NxD matrix, where each sample is a row it is easier to do X times w\nYhat = np.dot(X, w)\n\n# compute r-squared\nd1 = Y - Yhat\nd2 = Y - Y.mean()\nr2 = 1 - d1.dot(d1) / d2.dot(d2)\nprint(\"The r-squared is:\", r2)\n", "repo_name": "tritseratops/DeepLearningAdvancedNLPandRNNs", "sub_path": "linear_regression/19multidim_sol/19multidim_solution.py", "file_name": "19multidim_solution.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.linalg.solve", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "24086316973", "text": "#week 5:\r\n#Write a python program to classify the data based on one way ANOVA.\r\n\r\n\r\nimport numpy as np\r\nimport scipy.stats as s\r\n\r\ndef sq(n):\r\n \r\n p=0\r\n for i in n:\r\n p+=i**2\r\n return p\r\ndef mylen(n):\r\n c=0\r\n for i in n:\r\n c+=1\r\n return c\r\ndef mysum(n):\r\n s=0\r\n for i in n:\r\n s+=i\r\n return s\r\n\r\n\r\nname=input(\"enter the name of the treatment:\")\r\nk=int(input(\"enter the number of inputs:\"))\r\ntreat=[]\r\nfor i in range(k):\r\n a=np.array(list(map(float,input(f\"enter the {name}{i+1}\").split())))\r\n treat.append(a)\r\nalpha=float(input(\"enter the level of siginificance:\"))\r\n\r\nTi=Ti2=N=RSS=0\r\n\r\nfor i in treat:\r\n RSS+=mysum(i*i)\r\n N+=mylen(i)\r\n Ti+=mysum(i)\r\n Ti2+=((mysum(i)**2)/mylen(i))\r\n\r\nCF=(Ti**2)/N\r\nSST=RSS-CF\r\nSSTR=Ti2-CF\r\nSSE = SST-SSTR\r\n\r\n\r\nprint('the total sum of Ti:',Ti)\r\nprint(\"the total sum of Ti^2/Ni:\",Ti2)\r\nprint(\"the RSS value:\",RSS)\r\nprint(\"the CF value is:\",CF)\r\n\r\nprint(\"the SST value is:\",SST)\r\nprint(\"the SSTR value is:\",round(SSTR,3))\r\nprint(\"the SSE value is:\",round(SSE,3))\r\nn1=k-1\r\nn2=N-k\r\ntotal=N-1\r\nT1=SSTR+SSE\r\n\r\ntr=round(SSTR/n1,3)\r\ner=round(SSE/n2,3)\r\nprint(\"the means of treatment:\",tr)\r\nprint(\"the means of errors:\",er)\r\n\r\nFcal=tr/er\r\nif(Fcal>1):\r\n print(\"The Fcal value is:\",Fcal)\r\nelif(Fcal<1):\r\n Fcal=1/Fcal\r\n print(\"The Fcal value is:\",Fcal)\r\nelse:\r\n print('none')\r\n\r\nFtable = s.f.ppf(1-alpha,k-1,N-k)\r\nprint(\"The Ftable value is:\",Ftable)\r\n\r\n \r\nprint(\"---------------------ANOVA ONE WAY CLASSIFICATION TABLE------\")\r\nprint(\"---------------------------------------------------------------------------------------------------------------------------\")\r\n\r\nd={}\r\n\r\nd={1:[\"treatment\",round(SSTR,3),n1,tr,''],\r\n 2:[\"error\",round(SSE,5),n2,er,Fcal],\r\n 3:[\"TOTAL\",round(T1,3),total,'',''],\r\n}\r\nprint(\"{:<10} {:<10} {:<10} {:<10} {:<10}\".format('SOV','SOS','DOF','MOS','VR'))\r\n\r\nfor key,value in d.items():\r\n sourceofvarition, sumsofsquares, degreeoffreedom, meansofsquare, varianceration = value\r\n print(\"{:<10} {:<10} {:<10} {:<10} {:<10}\".format(sourceofvarition,sumsofsquares,degreeoffreedom,meansofsquare,varianceration))\r\nprint(\"------------------------------------------------------------------------------------------------------------------------------\")\r\n\r\nprint(\"-----------INFERNCE:------\")\r\n\r\nif(Fcal > Ftable):\r\n print(f\"We reject h0 and there is no homogenity among:{name}\")\r\nelse:\r\n print(f\"We accpet h0 and there is homogenity among:{name}\")\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "nvn18/2-2-lLAB_WORKS", "sub_path": "COMPUTATIONAL STATITICS/5pro.py", "file_name": "5pro.py", "file_ext": "py", "file_size_in_byte": 2493, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "scipy.stats", "line_number": 20, "usage_type": "name"}, {"api_name": "scipy.stats", "line_number": 22, "usage_type": "name"}, {"api_name": "scipy.stats", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.stats.f.ppf", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.stats.f", "line_number": 75, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "41325563863", "text": "import pygame\nimport random\nfrom pygame.locals import *\n\nfrom var import Config\n\nclass Game:\n\n def __init__(self):\n \"\"\"\n Function: Starts a Game\n \"\"\"\n self._run = True\n self._collision = False\n self._finished = False\n\n self._map = []\n self._map_size = (0, 0)\n self.get_map()\n self._startingMap = self.copy_map(self._map)\n self.start()\n\n def start(self):\n \"\"\"\n Function: Starts a Game\n \"\"\"\n self._run = True\n self._collision = False\n self._finished = False\n self._character = Character(self)\n\n def copy_map(self, listmap):\n \"\"\"\n Function: Copying the map\n \"\"\"\n copy = []\n for line in listmap:\n copy.append(line.copy())\n return copy\n\n def reset(self):\n \"\"\"\n Function: Reset the game\n \"\"\"\n self._map = self.copy_map(self._startingMap)\n self.start()\n\n def update(self):\n \"\"\"\n Function: Checks entities status in map\n \"\"\"\n y = -1\n for subList in self._map:\n x = -1\n y+=1\n for _ in subList:\n x+=1\n if self._character.pos == (x, y):\n if self._map[y][x] == 1:\n # Collision\n self._collision = True\n self._run = False\n elif self._map[y][x] == \"p\":\n # Scores\n self._character.score += 1\n\n elif self._map[y][x] == \"f\":\n # Finish\n self._finished = True\n self.run = False\n self._character.score += 10\n\n self.update_map()\n\n\n def update_map(self):\n \"\"\"\n Function: Updates status for every bit in map\n \"\"\"\n y = -1\n for subList in self._map:\n x = -1\n y+=1\n for _ in subList:\n x+=1\n if self._character.pos == (x, y):\n if self._map[y][x] == \"f\":\n self._map[y][x] = \"f\"\n else:\n self._map[y][x] = \"c\"\n elif self._map[y][x] == \"c\":\n self._map[y][x] = 0\n\n\n def get_map(self):\n \"\"\"\n Function: Gets a map for Game to played on\n \"\"\"\n\n randomMapInt = random.randint(1, 3)\n _file = open(f\"maps/map{randomMapInt}.txt\", \"r\")\n _lines = _file.readlines()\n _width = 0\n _height = 0\n for line in _lines:\n lineList = []\n for bit in line:\n if bit != \" \" and bit != \"\\n\":\n if bit.isnumeric():\n if bit == \"0\":\n rand = random.randint(0,5)\n if rand==0:\n lineList.append(\"p\")\n else:\n lineList.append(int(bit))\n else:\n lineList.append(int(bit))\n else:\n lineList.append(bit)\n\n self._map.append(lineList)\n\n self._map_size = (len(self._map[0]), len(self._map))\n _file.close()\n\n @property\n def map(self):\n return self._map\n\n @property\n def map_size(self):\n return self._map_size\n\n @property\n def run(self):\n return self._run\n\n @run.setter\n def run(self, status):\n self._run = status\n\n @property\n def character(self):\n return self._character\n\n @property\n def collision(self):\n return self._collision\n\n @collision.setter\n def collision(self, status):\n self._collision = status\n\n @property\n def finished(self):\n return self._finished\n\n @run.setter\n def run(self, status):\n self._run = status\n \n\nclass Character():\n \"\"\"\n Class: Functionally of the character\n \"\"\"\n def __init__(self, game, direction=\"EAST\"):\n self._direction = direction\n self._game = game\n self._score = 0\n self._pos = self.get_init_pos()\n\n def get_init_pos(self):\n \"\"\"\n Function: Get the position in the map.\n \"\"\"\n y = -1\n for sublist in self._game.map:\n x = -1\n y+=1\n for element in sublist:\n x+=1\n if element == \"c\":\n return (x,y)\n\n def move_forward(self):\n \"\"\"Function: Move character forward one block in its current direction: \n returns True if the move is valid and False if its not\n \"\"\"\n x = self._pos[0]\n y = self._pos[1]\n\n if self._direction == \"EAST\":\n x+=1\n elif self._direction == \"WEST\":\n x-=1\n elif self._direction == \"NORTH\":\n y-=1\n elif self._direction == \"SOUTH\":\n y+=1\n\n if (self._game.map_size[0]-1>=x>=0) and (self._game.map_size[1]-1>=y>=0):\n self._pos = (x,y)\n else:\n print(\"index out of bounds\")\n self._game.collision = True\n self._game.run = False\n\n def turn_right(self):\n \"\"\"\n Function: The turn right block, the character will turn right\n \"\"\"\n if self._direction == \"EAST\":\n self._direction = \"SOUTH\"\n\n elif self._direction == \"WEST\":\n self._direction = \"NORTH\"\n\n elif self._direction == \"NORTH\":\n self._direction = \"EAST\"\n\n elif self._direction == \"SOUTH\":\n self._direction = \"WEST\"\n\n\n def turn_left(self):\n \"\"\"\n Function: The turn left block, the character will turn left\n \"\"\"\n if self._direction == \"EAST\":\n self._direction = \"NORTH\"\n\n elif self._direction == \"WEST\":\n self._direction = \"SOUTH\"\n\n elif self._direction == \"NORTH\":\n self._direction = \"WEST\"\n \n elif self._direction == \"SOUTH\":\n self._direction = \"EAST\"\n\n def path_ahead(self):\n \"\"\"\n Function: The path ahead block, the character can move forward\n \"\"\"\n \n x = self._pos[0]\n y = self._pos[1]\n\n if self._game.map[y][x] == \"f\":\n return False\n\n if self._direction == \"EAST\":\n x+=1\n elif self._direction == \"WEST\":\n x-=1\n elif self._direction == \"NORTH\":\n y-=1\n elif self._direction == \"SOUTH\":\n y+=1\n \n if self._game.map[y][x] == 1:\n return False\n else:\n return True\n\n def path_right(self):\n \"\"\"\n Function: The path right block, the character can turn right\n \"\"\"\n x = self._pos[0].copy()\n y = self._pos[1].copy()\n if self._game.map[y][x] == \"f\":\n return False\n if self._direction == \"EAST\":\n y+=1\n elif self._direction == \"WEST\":\n y-=1\n elif self._direction == \"NORTH\":\n x+=1\n elif self._direction == \"SOUTH\":\n x-=1\n\n if self._game.map[y][x] == 1:\n return False\n else:\n return True\n\n def path_left(self):\n \"\"\"\n Function: The path left block, the character can turn left\n \"\"\"\n x = self._pos[0]\n y = self._pos[1]\n if self._game.map[y][x] == \"f\":\n return False\n if self._direction == \"EAST\":\n y+=1\n elif self._direction == \"WEST\":\n y+-1\n elif self._direction == \"NORTH\":\n x+=1\n elif self._direction == \"SOUTH\":\n x=-1\n\n if self._game.map[y][x] == 1:\n return False\n else:\n return True\n\n \n def not_finished(self):\n \"\"\"\n Function: The not finished block, if the game is not finished yet\n \"\"\"\n x = self._pos[0]\n y = self._pos[1]\n \n if self._game.map[y][x] == \"f\":\n return False\n elif self._game.finished:\n return False\n else:\n return True\n\n\n\n @property \n def direction(self):\n return self._direction\n\n @property\n def score(self):\n return self._score\n\n @property\n def pos(self):\n return self._pos\n\n @score.setter\n def score(self, points):\n self._score = points\n\n\nclass Window:\n \"\"\"\n Class: Draws the window for the game.\n \"\"\"\n def __init__(self, game):\n self._winWidth = 1920\n self._windHeight = 1080\n self.FPS = 60\n\n self.window = pygame.display.set_mode((0, 0), pygame.FULLSCREEN) # Window\n pygame.display.set_caption(\"Maze Game\") # Window title\n\n #rbg\n self.black = (0, 0, 0)\n self.white = (255, 255,255)\n self.red = (255, 0, 0)\n self.green = (0, 255, 0)\n self.blue = (0, 0, 255)\n\n # säkerställer att alla bloken får plats\n self.margin = 5\n self.width = int(self._winWidth/game.map_size[0] - self.margin)\n self.height = int(self._windHeight/game.map_size[1] - self.margin)\n if self.height < self.width:\n self.width = self.height\n \n self._game = game\n\n\n self._wallImage = pygame.image.load(\"graphics/wall.jpg\")\n self._wallImage = pygame.transform.scale(self._wallImage, (self.width, self.width))\n\n self._pathImage = pygame.image.load(\"graphics/path.jpg\")\n self._pathImage = pygame.transform.scale(self._pathImage, (self.width, self.width))\n\n self._finishImage = pygame.image.load(\"graphics/finish.jpg\")\n self._finishImage = pygame.transform.scale(self._finishImage, (self.width, self.width))\n\n self._characterImage = pygame.image.load(f\"graphics/character{game.character.direction.lower()}.jpg\")\n self._characterImage = pygame.transform.scale(self._characterImage, (self.width, self.width))\n\n self._foodImage = pygame.image.load(\"graphics/food.jpg\")\n self._foodImage = pygame.transform.scale(self._foodImage, (self.width, self.width))\n\n def draw(self):\n \"\"\"\n Function: Draws the grid, help button and the run button.\n \"\"\"\n\n # detta måste garantera att\n game = self._game\n self.clock = pygame.time.Clock()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game.run = False\n Config.stopThreads_setter(True)\n \n self.window.fill(self.black)\n\n # Draw the grid\n currentMap = game.map\n for x in range(0, game.map_size[0]):\n for y in range(0 ,game.map_size[1]):\n if currentMap[y][x] == 1:\n currentEntityImage = self._wallImage\n elif currentMap[y][x] == 0:\n currentEntityImage = self._pathImage\n elif currentMap[y][x] == \"f\":\n currentEntityImage = self._finishImage\n elif currentMap[y][x] == \"c\":\n currentEntityImage = pygame.image.load(f\"graphics/character{game.character.direction.lower()}.jpg\")\n elif currentMap[y][x] == \"p\":\n currentEntityImage = self._foodImage\n\n self.window.blit(currentEntityImage, [(self.margin + self.width) * x + self.margin,\n (self.margin + self.width) * y + self.margin])\n\n \n #Hjälp knappen\n _helpPos = (1300, 200)\n font = pygame.font.Font('freesansbold.ttf', 80)\n self._helpText = font.render('HELP', True, self.red,)\n self._helpReact = self._helpText.get_rect()\n self._helpReact = _helpPos\n self.window.blit(self._helpText, self._helpReact)\n \n #Run knappen\n runPos = (1300, 600)\n self._runText = font.render(\"RUN\", True, self.green)\n self._runReact = self._runText.get_rect()\n self._runReact = runPos\n self.window.blit(self._runText, self._runReact)\n \n self.clock.tick(self.FPS)\n pygame.display.flip()\n\n def draw_compile(self):\n \"\"\"\n Function: Draws 'compling, please wait' when the user click the run button.\n \"\"\"\n font = pygame.font.Font('freesansbold.ttf', 100)\n compText = font.render(\"Compling, please wait.\", True, self.red)\n compRect = compText.get_rect()\n compRect.center = (self._winWidth//2 , self._windHeight//2 - 40)\n self.window.blit(compText, compRect)\n pygame.display.flip()\n\n\n def draw_score(self):\n \"\"\"\n Function: Draws the score and if you press 'New game' then a new map is going to show up.\n \"\"\"\n character = self._game.character\n pygame.font.init()\n textFont = pygame.font.SysFont(\"arial\", 100)\n text = textFont.render(f\"Score: {character.score}\", True, (255,0,0), (0,0,0))\n textRect = text.get_rect()\n textRect.center = (1920//2, 1080//2)\n\n exitFont = pygame.font.SysFont(\"arial\", 64)\n exitText = exitFont.render(\"New Game\", True, (255,0,0), (0,0,0))\n exitTextRect = exitText.get_rect()\n exitPos = (1920//2, 1080-50)\n exitTextRect.center = exitPos\n\n self.window.blit(text, textRect)\n self.window.blit(exitText, exitTextRect)\n \n exitPressed = False\n while not exitPressed:\n pygame.display.flip()\n for event in pygame.event.get() :\n if event.type == pygame.QUIT :\n exitPressed=True\n if event.type == MOUSEBUTTONDOWN:\n mPos = pygame.mouse.get_pos()\n if (exitPos[0]-150<=mPos[0]<=exitPos[0]+150) and (exitPos[1]-30<=mPos[1]<=exitPos[1]+30):\n exitPressed=True\n if event.type == KEYDOWN or event.type == KEYUP:\n if event.key == K_ESCAPE:\n exitPressed=True\n Config.stopThreads_setter(True)\n\nclass HelpWindow:\n \"\"\"\n Class: This class is for the help window when you press the help button.\n \"\"\"\n def __init__(self):\n pygame.font.init()\n\n self.bg = pygame.image.load('graphics\\helpbg.jpg') #Bakgrund \n\n # self._windw = pygame.display.set_mode([self.bg.get_width(), self.bg.get_height()])\n # self._windw = pygame.display.set_mode(((1920-self.bg.get_width())//2 , ((1080-self.bg.get_height())//2), pygame.FULLSCREEN)\n self._windw = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n pygame.display.set_caption(\"Instruction window\")\n \n self.black = (0, 0, 0)\n self.white = (255, 255,255)\n self.red = (255, 0, 0)\n self.green = (0, 255, 0)\n self.blue = (0, 0, 255)\n\n def sve(self):\n \"\"\"\n Function: Draws the swedish text on the help window.\n \"\"\"\n\n self._textFont = pygame.font.Font('freesansbold.ttf', 14)\n self._startText = self._textFont.render('Spelet startas, början av sekvensen.', True, self.white,)\n self._startRect = self._startText.get_rect()\n self._startRect.center = (260, 70)\n self._stopText = self._textFont.render('Spelet avslutas, slutet av sekvensen.', True, self.white,)\n self._stopRect = self._stopText.get_rect()\n self._stopRect.center = (260, 115)\n self._stopRect.center = (260, 115)\n self.turnR_text = self._textFont.render('Karaktären svänger höger.', True, self.white,)\n self._turnRect = self.turnR_text.get_rect()\n self._turnRect.center = (260, 160)\n self._turnLText = self._textFont.render('Karaktären svänger vänster.', True, self.white,)\n self._turnLRect = self._turnLText.get_rect()\n self._turnLRect.center = (255, 205)\n self._goForwardText = self._textFont.render('Karaktären kommer att gå framåt.', True, self.white,)\n self._goForwardRect = self._goForwardText.get_rect()\n self._goForwardRect.center = (295, 250)\n self._whileText = self._textFont.render('Flödesuttalande, kod körs upprepade gånger.', True, self.white,)\n self._whileRect = self._whileText.get_rect()\n self._whileRect.center = (295, 296)\n self._ifText = self._textFont.render('Koden körs när if-satsen är sant.', True, self.white,)\n self._ifRect = self._ifText.get_rect()\n self._ifRect.center = (213, 340)\n self._elseText = self._textFont.render('Koden körs när if-satsen är falsk.', True, self.white,)\n self._elseRect = self._elseText.get_rect()\n self._elseRect.center = (240, 385)\n self._pathAheadText = self._textFont.render('Möjligheten att gå framåt.', True, self.white,)\n self._pathAheadRect = self._pathAheadText.get_rect()\n self._pathAheadRect.center = (267, 431)\n self._pathRightText = self._textFont.render('Möjligheten att svänga höger.', True, self.white,)\n self._pathRightRect = self._pathRightText.get_rect()\n self._pathRightRect.center = (271, 476)\n self._pathLeftText = self._textFont.render('Möjligheten att svänga vänster.', True, self.white,)\n self._pathLeftRect = self._pathLeftText.get_rect()\n self._pathLeftRect.center = (268, 521)\n self.not_finished_text = self._textFont.render(\"Karaktären har inte nått målet\", True, self.white,)\n self._notFinishedRect = self.not_finished_text.get_rect()\n self._notFinishedRect.center = (298, 566)\n\n self._windw.blit(self._startText, self._startRect)\n self._windw.blit(self._stopText, self._stopRect)\n self._windw.blit(self.turnR_text, self._turnRect)\n self._windw.blit(self._turnLText, self._turnLRect)\n self._windw.blit(self._goForwardText, self._goForwardRect)\n self._windw.blit(self._whileText, self._whileRect)\n self._windw.blit(self._ifText, self._ifRect)\n self._windw.blit(self._elseText, self._elseRect)\n self._windw.blit(self._elseText, self._elseRect)\n self._windw.blit(self._pathAheadText, self._pathAheadRect)\n self._windw.blit(self._pathRightText, self._pathRightRect)\n self._windw.blit(self._pathLeftText, self._pathLeftRect)\n self._windw.blit(self.not_finished_text, self._notFinishedRect)\n\n\n def eng(self):\n \"\"\"\n Function: Draws the english text on the help window.\n \"\"\"\n self._textFont = pygame.font.Font('freesansbold.ttf', 14)\n self._startText = self._textFont.render('The game starts, beginning of the sequence.', True, self.white,)\n self._startRect = self._startText.get_rect()\n self._startRect.center = (289, 71)\n self._stopText = self._textFont.render('Game ends, end of the sequence.', True, self.white,)\n self._stopRect = self._startText.get_rect()\n self._stopRect.center = (285, 115)\n self.turnR_text = self._textFont.render('The character turns right.', True, self.white,)\n self._turnRect = self.turnR_text.get_rect()\n self._turnRect.center = (260, 161)\n self._turnLText = self._textFont.render('The character turns left.', True, self.white,)\n self._turnLRect = self._turnLText.get_rect()\n self._turnLRect.center = (240, 205)\n self._goForwardText = self._textFont.render('The character will move forward.', True, self.white,)\n self._goForwardRect = self._goForwardText.get_rect()\n self._goForwardRect.center = (295, 250)\n self._whileText = self._textFont.render('Flow statement, code executes repeatedly.', True, self.white,)\n self._whileRect = self._whileText.get_rect()\n self._whileRect.center = (285, 295)\n self._ifText = self._textFont.render('Runs the code when the if statement is true.', True, self.white,)\n self._ifRect = self._ifText.get_rect()\n self._ifRect.center = (255, 340)\n self._elseText = self._textFont.render('Runs the code when the if statement is false.', True, self.white,)\n self._elseRect = self._elseText.get_rect()\n self._elseRect.center = (280, 385) \n self._pathAheadText = self._textFont.render('The opportunity to move forward.', True, self.white,)\n self._pathAheadRect = self._pathAheadText.get_rect()\n self._pathAheadRect.center = (294, 430)\n self._pathRightText = self._textFont.render('The opportunity to turn right.', True, self.white,)\n self._pathRightRect = self._pathRightText.get_rect()\n self._pathRightRect.center = (271, 475)\n self._pathLeftText = self._textFont.render('The opportunity to turn left.', True, self.white,)\n self._pathLeftRect = self._pathLeftText.get_rect()\n self._pathLeftRect.center = (255, 520) \n self.not_finished_text = self._textFont.render(\"Character has not reached to goal.\", True, self.white,)\n self._notFinishedRect = self.not_finished_text.get_rect()\n self._notFinishedRect.center = (325, 566)\n \n self._windw.blit(self._startText, self._startRect)\n self._windw.blit(self._stopText, self._stopRect) \n self._windw.blit(self.turnR_text, self._turnRect) \n self._windw.blit(self._turnLText, self._turnLRect) \n self._windw.blit(self._goForwardText, self._goForwardRect)\n self._windw.blit(self._whileText, self._whileRect)\n self._windw.blit(self._ifText, self._ifRect)\n self._windw.blit(self._elseText, self._elseRect)\n self._windw.blit(self._pathAheadText, self._pathAheadRect)\n self._windw.blit(self._pathAheadText, self._pathAheadRect)\n self._windw.blit(self._pathRightText, self._pathRightRect)\n self._windw.blit(self._pathLeftText, self._pathLeftRect)\n self._windw.blit(self.not_finished_text, self._notFinishedRect) \n\n def draw_help(self):\n \"\"\"\n Function: Draws the actuall text on the window and draws the button that changes the language and the exit button.\n \"\"\"\n help_window = HelpWindow()\n\n #text size\n self.font = pygame.font.Font('freesansbold.ttf', 25)\n self._textFont = pygame.font.Font('freesansbold.ttf', 17) \n self._langFont = pygame.font.Font('freesansbold.ttf', 17)\n #Title\n self.text = self.font.render('Instruktioner-Instructions', True, self.black,)\n self._textRect = self.text.get_rect()\n self._textRect.center = (230, 45)\n\n #Exit rektangeln\n self._exitText = self.font.render('Exit', True, self.red,)\n self._exitReact = self._exitText.get_rect()\n self._exitReact.center = (250, 610)\n\n #Välj språk, Sve eller Eng\n self._pickLangText = self._langFont.render('Sve/Eng', True, self.white,)\n self._pickLangRect = self._pickLangText.get_rect()\n self._pickLangRect = (405, 38)\n\n #Start block\n self._startText = self._textFont.render('Start block: ', True, self.black,)\n self._startRect = self._startText.get_rect()\n self._startRect = (29, 61)\n\n #Stop block\n self._stopText = self._textFont.render('Stop block: ', True, self.black,)\n self._stopRect = self._stopText.get_rect()\n self._stopRect = (29, 106)\n\n #Turn right block\n self._rightText = self._textFont.render('Turn right block: ', True, self.black)\n self._rightRect = self._rightText.get_rect()\n self._rightRect = (29, 151)\n\n #Turn left block\n self._leftText = self._textFont.render('Turn left block: ', True, self.black)\n self._leftRect = self._leftText.get_rect()\n self._leftRect = (29, 195)\n\n #Go forward block\n self._forwardText = self._textFont.render('Go forward block: ', True, self.black)\n self._forwardRect = self._forwardText.get_rect()\n self._forwardRect = (29, 241)\n\n #While block\n self._whileText = self._textFont.render('While block: ', True, self.black)\n self._whileRect = self._whileText.get_rect()\n self._whileRect = (29, 286)\n\n #If-block\n self._ifText = self._textFont.render('If block: ', True, self.black)\n self._ifRect = self._ifText.get_rect()\n self._ifRect = (29, 331)\n\n #Else block\n self._elseText = self._textFont.render('Else block: ', True, self.black)\n self._elseRect = self._elseText.get_rect()\n self._elseRect = (29, 376)\n\n #Path ahead block\n self._aheadText = self._textFont.render('Path ahead block: ', True, self.black)\n self._aheadRect= self._aheadText.get_rect()\n self._aheadRect = (29, 421)\n\n #Path right block\n self._pathRightText = self._textFont.render('Path right block: ', True, self.black)\n self._pathRightRect= self._pathRightText.get_rect()\n self._pathRightRect = (29, 466)\n\n #Path left block\n self._pathLeftText = self._textFont.render('Path left block: ', True, self.black)\n self._pathLeftRect= self._pathLeftText.get_rect()\n self._pathLeftRect = (29, 511)\n\n #Not finished block\n self._notFinText = self._textFont.render('Not finished block: ', True, self.black)\n self._notFinRect = self._notFinText.get_rect()\n self._notFinRect = (29, 556)\n \n\n self._windw.blit(self.bg, (0, 0))\n self._windw.blit(self.text, self._textRect)\n self._windw.blit(self.bg, (0, 0))\n self._windw.blit(self.text, self._textRect)\n self._windw.blit(self._exitText, self._exitReact)\n self._windw.blit(self._pickLangText, self._pickLangRect)\n self._windw.blit(self._startText, self._startRect)\n self._windw.blit(self._stopText, self._stopRect)\n self._windw.blit(self._rightText, self._rightRect)\n self._windw.blit(self._leftText, self._leftRect)\n self._windw.blit(self._forwardText, self._forwardRect)\n self._windw.blit(self._whileText, self._whileRect)\n self._windw.blit(self._ifText, self._ifRect)\n self._windw.blit(self._elseText, self._elseRect)\n self._windw.blit(self._aheadText, self._aheadRect)\n self._windw.blit(self._pathRightText, self._pathRightRect)\n self._windw.blit(self._pathLeftText, self._pathLeftRect)\n self._windw.blit(self._notFinText, self._notFinRect)\n \n help_window.sve()\n pygame.display.flip()\n\n lang = \"sve\"\n hasChanged = False\n run = True\n while run:\n if hasChanged:\n self._windw.blit(self.bg, (0, 0))\n self._windw.blit(self.text, self._textRect)\n self._windw.blit(self.bg, (0, 0))\n self._windw.blit(self.text, self._textRect)\n self._windw.blit(self._exitText, self._exitReact)\n self._windw.blit(self._pickLangText, self._pickLangRect)\n self._windw.blit(self._startText, self._startRect)\n self._windw.blit(self._stopText, self._stopRect)\n self._windw.blit(self._rightText, self._rightRect)\n self._windw.blit(self._leftText, self._leftRect)\n self._windw.blit(self._forwardText, self._forwardRect)\n self._windw.blit(self._whileText, self._whileRect)\n self._windw.blit(self._ifText, self._ifRect)\n self._windw.blit(self._elseText, self._elseRect)\n self._windw.blit(self._aheadText, self._aheadRect)\n self._windw.blit(self._pathRightText, self._pathRightRect)\n self._windw.blit(self._pathLeftText, self._pathLeftRect)\n self._windw.blit(self._notFinText, self._notFinRect)\n if lang == \"sve\":\n help_window.sve()\n elif lang == \"eng\":\n help_window.eng()\n pygame.display.flip()\n hasChanged = False\n\n for event in pygame.event.get(): \n if event.type == pygame.MOUSEBUTTONDOWN: \n self._mx, self._my = pygame.mouse.get_pos()\n #Exit knappen\n if self._mx >= 225 and self._mx <= 275 and self._my >= 600 and self._my <= 620:\n run = False\n break\n #Sve knappen\n if self._mx >= 405 and self._mx <= 430 and self._my >= 38 and self._my <= 51:\n lang = \"sve\"\n hasChanged = True\n #Eng knappen\n if self._mx >= 440 and self._mx <= 469 and self._my >= 40 and self._my <= 55:\n lang = \"eng\"\n hasChanged = True\n if event.type == pygame.QUIT: \n run = False\n break\n\n if event.type == KEYDOWN or event.type == KEYUP:\n if event.key == K_ESCAPE:\n exitPressed=True\n Config.stopThreads_setter(True)\n run = False\n break", "repo_name": "OliverLjung/KodBloks", "sub_path": "gamefiles/classes.py", "file_name": "classes.py", "file_ext": "py", "file_size_in_byte": 29097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 354, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 354, "usage_type": "attribute"}, {"api_name": "pygame.FULLSCREEN", "line_number": 354, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 355, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 355, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 374, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 374, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 375, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 375, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 377, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 377, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 378, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 378, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 380, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 380, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 381, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 381, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 383, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 383, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 384, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 384, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 386, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 386, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 387, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 387, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 396, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 396, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 397, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 397, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 398, "usage_type": "attribute"}, {"api_name": "var.Config.stopThreads_setter", "line_number": 400, "usage_type": "call"}, {"api_name": "var.Config", "line_number": 400, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 415, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 415, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 425, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 425, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 439, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 439, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 445, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 445, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 450, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 450, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 458, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 458, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 459, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 459, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 464, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 464, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 475, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 475, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 476, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 476, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 477, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 480, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 480, "usage_type": "attribute"}, {"api_name": "var.Config.stopThreads_setter", "line_number": 486, "usage_type": "call"}, {"api_name": "var.Config", "line_number": 486, "usage_type": "name"}, {"api_name": "pygame.font.init", "line_number": 493, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 493, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 495, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 495, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 499, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 499, "usage_type": "attribute"}, {"api_name": "pygame.FULLSCREEN", "line_number": 499, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 500, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 500, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 513, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 513, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 571, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 571, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 630, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 630, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 631, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 631, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 632, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 632, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 729, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 729, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 758, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 758, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 761, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 761, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 762, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 763, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 763, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 776, "usage_type": "attribute"}, {"api_name": "var.Config.stopThreads_setter", "line_number": 783, "usage_type": "call"}, {"api_name": "var.Config", "line_number": 783, "usage_type": "name"}]} +{"seq_id": "24018744172", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom .mkidnoiseanalysis import swenson_formula\nfrom .quasiparticletimestream import QuasiparticleTimeStream\nimport copy\n\n\ndef compute_s21(readout_freq, fc, increasing, f0, qi, qc, xa, a, rf_phase_delay, rf_gain, cable_delay_phase):\n \"\"\"\n Compute forward scattering matrix element for a given mesurment setup.\n -------------\n @param:\n - readout_freq: 1D np.array\n the frequency or frequencies at which to compute S21.\n for a lit resonator this should be the intrinsic resonance frequency\n for a dark resonator this should be a sweep around the intrinsic resonance frequency\n - fc: float\n center frequency of sweep\n - increasing: bool\n True if the frequency sweep is done from low frequency to high frequency\n - f0: 1D np.array\n resonant resonant frequency in Hz. For a lit resonator this will be a 1D np.array.\n for a dark resonator it will be a single value.\n - qi: 1D np.array\n resonator internal quality factor. For a lit resonator this will be a 1D np.array.\n for a dark resonator it will be a single value.\n - qc: float\n resonator coupling quality factor.\n - xa: float\n resonator inductive nonlinearity\n - a: float\n resonance fractional asymmetry\n - rf_phase_delay: np.poly1D\n phase polynomial coefficients (total loop rotation) [radians]\n - rf_gain: np.poly1D\n gain polynomial coefficients [units?]\n - cable_delay_phase: float\n phase offset caused by rf cable delay [radians]\n @return:\n - s21: np.array\n s21 evaluated across frequency sweep (dark resonator) or time (lit resonator)\n\n \"\"\"\n xm = (readout_freq - fc) / fc\n xg = (readout_freq - f0) / f0\n q = (qi ** -1 + qc ** -1) ** -1\n xn = swenson_formula(q * xg, a, increasing) / q\n\n gain = rf_gain(xm)\n phase = np.exp(1j * (rf_phase_delay + cable_delay_phase * xm))\n q_num = qc + 2 * 1j * qi * qc * (xn + xa) # use xn instead of xg\n q_den = qi + qc + 2 * 1j * qi * qc * xn\n return gain * phase * (q_num / q_den) # xm, xg, q, xn, gain, phase, q_num, q_den\n\n\ndef generate_tls_noise(fs, size, scale, seed=4):\n \"\"\" two-level system noise\n inputs:\n @param fs: float\n sampling frequency (should match with whatever signal you will apply it to)\n @param size: int\n size of 1D array of points to generate\n @param scale: float\n magnitude of tls noise\n @param seed: int?\n random seed\n scale should be the value of the psd at 1 Hz\"\"\"\n random_seed = np.random.default_rng(seed=seed)\n psd_freqs = np.fft.rfftfreq(size, d=1 / fs)\n psd = np.zeros_like(psd_freqs)\n nonzero = psd_freqs != 0\n psd[nonzero] = scale / psd_freqs[nonzero]\n noise_phi = 2 * np.pi * random_seed.random(psd_freqs.size)\n noise_fft = np.exp(1j * noise_phi) # n_traces x n_frequencies\n # rescale the noise to the covariance\n a = np.sqrt(size * psd * fs / 2)\n noise_fft = a * noise_fft\n return np.fft.irfft(noise_fft, size)\n\n\ndef gen_amp_noise(snr, points, seed=2):\n \"\"\" Flat PSD, white-noise generated from voltage fluctuations\"\"\"\n random_number_generator = np.random.default_rng(seed=seed)\n a_noise = 10 ** ((20 * np.log10(1 / np.sqrt(2)) - snr) / 10) # input dBm of noise\n real_noise = np.sqrt(a_noise) * random_number_generator.normal(size=points)\n imag_noise = np.sqrt(a_noise) * random_number_generator.normal(size=points)\n return real_noise + 1j*imag_noise\n\n\ndef compute_phase1(fc, cable_delay):\n \"\"\"need to ask Nick what this is called. He called it \"phase1\"\n Inputs:\n fc - readout center frequency in Hz\n cable_delay - RF cable delay in sec\"\"\"\n return -2 * np.pi * fc * cable_delay\n\n\ndef compute_background(f, fc, rf_gain, rf_phase_delay, phase1):\n \"\"\" Resonator background??\"\"\"\n xm = (f - fc) / fc\n gain = rf_gain(xm)\n phase = np.exp(1j * (rf_phase_delay + phase1 * xm))\n return gain * phase\n\n\ndef gen_line_noise(freqs, amps, phases, n_samples, fs):\n \"\"\"\n Generate time series representing line noise in a single MKID coarse channel (MKID has been centered).\n @param freqs: 1D np.array or list\n frequencies of line noise\n @param amps: 1D np.array or list\n amplitudes of line noise\n @param phases: 1D np.array or list\n phases of line noise\n @param n_samples: int\n number of timeseries samples to produce\n @param fs: float\n sample rate of channel in Hz\n @return:\n \"\"\"\n freqs = np.asarray(freqs) # Hz and relative to center of bin (MKID we are reading out)\n amps = np.asarray(amps)\n phases = np.asarray(phases)\n\n n_samples = n_samples\n sample_rate = fs\n\n line_noise = np.zeros(n_samples, dtype=np.complex64)\n t = 2 * np.pi * np.arange(n_samples) / sample_rate\n for i in range(freqs.size):\n phi = t * freqs[i]\n exp = amps[i] * np.exp(1j * (phi + phases[i]))\n line_noise += exp\n return line_noise\n\n\ndef lowpass(s21, tau_r, dt):\n \"\"\"\n Causal lowpass filter which determines the IQ response of an MKID resonator\n ----------------------------------------------------------------------\n @param s21: 1D np.array\n forward scattering matrix element timeseries measured at a single frequency (detector resonance frequency)\n @param tau_r: float\n characteristic timescale for resonator ring up (units must match dt!)\n For an MKID resonator is it ususally:\n [total quality factor (no photon)] / (pi * [original resonance frequency])\n @param dt: float\n time step between S21 sample points (units must match tau_r!)\n\n @return: 1D np.array\n iq response lowpass filtered by resonator\n \"\"\"\n # tau_r = q_tot_0/(pi*f0_0)\n # tau_r needs to be in units of dt\n t = np.arange(0, 10 * tau_r, dt) # filter time series\n t = t[:int(np.round(t.size / 2) * 2)] # make t an even number of elements\n pad = t.size // 2\n causal_filter = np.exp(-t / tau_r) / tau_r\n full_convolve = np.convolve(np.pad(s21, pad, mode='edge'), causal_filter, mode='same') * dt\n return full_convolve[pad:-pad]\n\n\nclass LineNoise:\n \"\"\"\n A class to represent the line noise in an MKID readout setup. Line noise is comprised of individual\n extraneous frequencies and usually arises from imperfections in analog and digital electronics.\n\n Attributes:\n --------------\n \"\"\"\n\n def __init__(self, freqs, amplitudes, phases, n_samples, fs):\n self.freqs = freqs\n self.amplitudes = amplitudes\n self.phases = phases\n self.n_samples = n_samples\n self.fs = fs\n\n @property\n def values(self):\n return gen_line_noise(self.freqs, self.amplitudes, self.phases, self.n_samples, self.fs)\n\n\nclass FrequencyGrid:\n \"\"\"\n A class to represent the frequency sweep settings used to read out an MKID resonator.\n ...\n Attributes:\n --------------\n @type fc: float\n center frequency [Hz]\n @type points: int\n sweep points\n @type span: float\n sweep bandwidth [Hz]\n \"\"\"\n\n def __init__(self, fc=4.0012e9, points=1000, span=500e6):\n self.fc = fc # center frequency [Hz]\n self.points = int(points) # sweep points\n self.span = span # sweep bandwidth [points]\n self.grid = np.linspace(self.fc - 2 * self.span / self.points,\n self.fc + 2 * self.span / self.points,\n self.points)\n\n @property\n def xm(self):\n return self.grid / self.fc - 1\n\n @property\n def increasing(self):\n if self.grid[1] > self.grid[0]:\n return True\n else:\n return False\n\n\nclass RFElectronics:\n def __init__(self, gain: (np.poly1d, tuple) = (3.0, 0, 0), phase_delay=0, cable_delay=50e-9, white_noise_scale=30,\n line_noise: LineNoise = LineNoise([500e3], [0.01], [0], 100, 1e3)):\n \"\"\"\n A class to represent effects of RF cabling and amplifiers on MKID readout.\n ...\n Attributes:\n -----------------\n @type gain: np.poly1D\n gain polynomial coefficients\n @type phase_delay: float\n total loop rotation [radians]\n @type cable_delay: float\n cable delay [sec]\n @type white_noise_scale: float\n dimensionless parameter similar to SNR indicating system white noise\n nominal values are 10 (bad SNR, very noisy) and 30 (good SNR, not too noisy)\n \"\"\"\n if isinstance(gain, tuple):\n gain = np.poly1d(*gain)\n self.gain = gain\n self.phase_delay = phase_delay # phase polynomial coefficients (total loop rotation) [radians]\n self.cable_delay = cable_delay\n self.noise_scale = white_noise_scale\n self.line_noise = line_noise\n\n\nclass Resonator:\n def __init__(self, f0=4.0012e9, qi=200000, qc=15000, xa=1e-9, a=0, tls_scale=1e4):\n \"\"\"\n A class to represent one MKID resonator.\n ...\n Attributes:\n -----------------\n @type f0: float\n resonance frequency [Hz]\n @type qi: float\n total loop rotation [radians]\n @type qc: float\n cable delay [sec]\n @type xa: float\n inductive nonlinearlity (reference??)\n @type a: float\n resonance fractional asymmetry\n \"\"\"\n self.f0 = f0\n self.qi = qi\n self.qc = qc\n self.xa = xa\n self.a = a\n self.q_tot = (self.qi ** -1 + self.qc ** -1) ** -1\n self.f0_0 = f0\n self.qi_0 = qi\n self.q_tot_0 = self.q_tot\n self.tls_scale = tls_scale\n\n\nclass ResonatorSweep:\n \"\"\"\n No photons.\n \"\"\"\n\n def __init__(self, res: Resonator, freq: FrequencyGrid, rf: RFElectronics):\n self.res = res\n self.freq = freq\n self.rf = rf\n\n @property\n def phase1(self):\n \"\"\"need to ask Nick what this is called. He called it \"phase1\" \"\"\"\n return compute_phase1(self.freq.fc, self.rf.cable_delay)\n\n @property\n def background(self):\n \"\"\" Resonator background??\"\"\"\n return compute_background(self.res.f0, self.freq.fc, self.rf.gain, self.rf.phase_delay, self.phase1)\n\n @property\n def s21(self):\n return compute_s21(self.freq.grid, self.freq.fc, self.freq.increasing, self.res.f0, self.res.qi,\n self.res.qc, self.res.xa, self.res.a, self.rf.phase_delay, self.rf.gain, self.phase1)\n\n def plot_sweep(self, ax=None, fig=None):\n plt.rcParams.update({'font.size': 12})\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))\n fig.suptitle(f'{self.res.f0 * 1e-9} GHz Simulated Resonator', fontsize=15)\n\n ax1.plot(self.freq.grid * 1e-9, 20 * np.log10(np.abs(self.s21)), linewidth=4)\n ax1.set_ylabel('|S21| [dB]')\n ax1.set_xlabel('Frequency [GHz]')\n ax1.set_title('Transmission')\n ax2.plot(self.s21.real, self.s21.imag, 'o')\n ax2.set_xlabel('Real(S21)')\n ax2.set_ylabel('Imag(S21)')\n ax2.set_title('IQ Loop')\n\n\nclass ReadoutPhotonResonator:\n def __init__(self, res: Resonator, photons: QuasiparticleTimeStream, freq: FrequencyGrid, rf: RFElectronics,\n seed=2, noise_on=False):\n self.res = copy.deepcopy(res)\n self.photons = photons\n self.noise_rng = np.random.default_rng(seed=seed)\n self.freq = freq\n self.rf = rf\n self.rf.line_noise.fs = self.photons.fs\n self.rf.line_noise.n_samples = self.photons.points\n self.tls_noise = generate_tls_noise(self.photons.fs, self.photons.data.size, self.res.tls_scale)\n dfr = -photons.data * 1e5 + self.tls_noise\n dqi_inv = photons.data * 2e-5\n self.res.f0 = res.f0 + dfr\n self.res.qi = (res.qi ** -1 + dqi_inv) ** -1\n self.res.q_tot = (self.res.qi ** -1 + res.qc ** -1) ** -1\n self.res.f0_0 = res.f0 # original resonance frequency\n self.res.q_tot_0 = res.q_tot\n self.noise_on = noise_on\n\n @property\n def phase1(self):\n \"\"\"need to ask Nick what this is called. He called it \"phase1\" \"\"\"\n return compute_phase1(self.freq.fc, self.rf.cable_delay)\n\n @property\n def amp_noise(self):\n \"\"\" White amplifier noise\"\"\"\n return gen_amp_noise(self.rf.noise_scale, self.photons.points)\n\n @property\n def line_noise(self):\n \"\"\" Line noise\"\"\"\n return self.rf.line_noise.values\n\n @property\n def background(self):\n \"\"\" Resonator background??\"\"\"\n return compute_background(self.res.f0, self.freq.fc, self.rf.gain, self.rf.phase_delay, self.phase1)\n\n @property\n def s21(self): # maybe change name to reflect 1/2 noise weirdness\n return compute_s21(self.res.f0_0, self.freq.fc, self.freq.increasing, self.res.f0, self.res.qi,\n self.res.qc, self.res.xa, self.res.a, self.rf.phase_delay, self.rf.gain, self.phase1)\n\n @property\n def iq_response(self):\n if self.noise_on:\n return lowpass(self.s21, self.res.q_tot_0 / (np.pi * self.res.f0_0), self.photons.dt)\\\n + self.amp_noise + self.line_noise\n return lowpass(self.s21, self.res.q_tot_0 / (np.pi * self.res.f0_0), self.photons.dt)\n\n # Add amplifier and line noise after lowpass\n\n @property\n def normalized_s21(self):\n s21_dark_on_res = compute_s21(self.res.f0_0, self.freq.fc, self.freq.increasing, self.res.f0_0, self.res.qi_0,\n self.res.qc, self.res.xa, self.res.a, self.rf.phase_delay, self.rf.gain,\n self.phase1)\n return s21_dark_on_res / self.background\n\n @property\n def normalized_iq(self):\n return self.iq_response / self.background\n\n def gen2_coordinate_transformation(self):\n i_center = (np.percentile(self.iq_response.real, 95) + np.percentile(self.iq_response.real, 5)) / 2.\n q_center = (np.percentile(self.iq_response.imag, 95) + np.percentile(self.iq_response.imag, 5)) / 2.\n #TODO add loop rotation\n return np.angle(self.iq_response.real - i_center + 1j*(self.iq_response.imag - q_center))\n\n def basic_coordinate_transformation(self): # implement a more basic coordinate transformation\n z1 = (1 - self.iq_response / self.background - self.res.q_tot_0 / (2 * self.res.qc) + 1j *\n self.res.q_tot_0 * self.res.xa) / (1 - self.normalized_s21 - self.res.q_tot_0 /\n (2 * self.res.qc) + 1j * self.res.q_tot_0 * self.res.xa)\n theta1 = np.arctan2(z1.imag, z1.real)\n d1 = (np.abs(1 - self.iq_response / self.background - self.res.q_tot_0 / (2 * self.res.qc) +\n 1j * self.res.q_tot_0 * self.res.xa) / np.abs(self.res.q_tot_0 / (2 * self.res.qc) -\n 1j * self.res.q_tot_0 * self.res.xa)) - 1\n return theta1, d1\n\n def nick_coordinate_transformation(self):\n xn = swenson_formula(0, self.res.a, self.freq.increasing) / self.res.q_tot_0\n theta_2 = -4 * self.res.q_tot_0 / (1 + 4 * self.res.q_tot_0 ** 2 * xn ** 2) * \\\n ((self.normalized_iq.imag + 2 * self.res.qc * self.res.xa * (self.normalized_iq.real - 1)) /\n (2 * self.res.qc * np.abs(1 - self.normalized_iq) ** 2) - xn)\n d2 = -2 * self.res.q_tot_0 / (1 + 4 * self.res.q_tot_0 ** 2 * xn ** 2) * ((self.normalized_iq.real -\n np.abs(\n self.normalized_iq) ** 2 + 2 * self.res.qc * self.res.xa *\n self.normalized_iq.imag) / (\n self.res.qc * np.abs(\n 1 - self.normalized_iq) ** 2) - 1 / self.res.qi_0)\n return theta_2, d2\n\n def plot_photon_response(self, s21_dark):\n fig, axes = plt.subplots()\n axes.axis('equal')\n axes.plot(s21_dark.real, s21_dark.imag, 'o')\n axes.plot(self.s21.real, self.s21.imag, '-')\n axes.plot(self.iq_response.real, self.iq_response.imag)\n", "repo_name": "MazinLab/MKIDReadoutAnalysis", "sub_path": "mkidreadoutanalysis/resonator.py", "file_name": "resonator.py", "file_ext": "py", "file_size_in_byte": 16417, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "mkidnoiseanalysis.swenson_formula", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random.default_rng", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.fft.rfftfreq", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.fft.irfft", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.complex64", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 219, "usage_type": "attribute"}, {"api_name": "numpy.poly1d", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 301, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 305, "usage_type": "call"}, {"api_name": "quasiparticletimestream.QuasiparticleTimeStream", "line_number": 316, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.random.default_rng", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 320, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 363, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 365, "usage_type": "attribute"}, {"api_name": "numpy.percentile", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 392, "usage_type": "call"}, {"api_name": "mkidnoiseanalysis.swenson_formula", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 405, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 410, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 410, "usage_type": "name"}]} +{"seq_id": "43644841511", "text": "# Importações utilizadas:\r\nimport sys\r\nfrom random import randint\r\nfrom PyQt5 import QtGui\r\nfrom PyQt5.QtWidgets import QApplication, QDesktopWidget, QWidget, QLabel, QPushButton, QMessageBox\r\nfrom PyQt5 import QtTest\r\n\r\n\r\n# Função statica que sorteia um lado:\r\ndef SorteiaUmLado():\r\n Indice = randint(1, 4)\r\n # print(Indice)\r\n return Indice\r\n\r\n\r\n# Classe da Janela:\r\nclass Janela(QWidget):\r\n def __init__(self):\r\n super(Janela, self).__init__()\r\n\r\n # dados da tela:\r\n self.topo = 0\r\n self.esquerda = 0\r\n self.largura = 520\r\n self.altura = 520\r\n self.titulo = 'Genius'\r\n self.setFixedSize(self.largura, self.altura)\r\n self.setWindowIcon(QtGui.QIcon('Logo.png'))\r\n self.setStyleSheet('background-color: black;')\r\n\r\n # Variaveis do programa:\r\n self.Frase_do_topo = 'Ligue e/ou Desligue:'\r\n self.LigarTexto = 'Ligar'\r\n self.DesligarTexto = 'Desligar'\r\n self.TextoOff = 'Desligado'\r\n self.TextoOn = 'Ligado'\r\n self.Ordem = []\r\n self.TamLadoBotaoJogo = 150\r\n self.IndiceClick = 0\r\n # ColorButton:\r\n self.ButtonColorInitial = 'background-color: #F3F3F2;'\r\n self.ButtonColor1 = \"QPushButton{background-color : #A9C4FA;}QPushButton::pressed{background-color : #0055FF;}QPushButton{font-size:26px; font:bold}\"\r\n self.ButtonColor2 = \"QPushButton{background-color : #FA9681;}QPushButton::pressed{background-color : #FF2D00;}QPushButton{font-size:26px; font:bold}\"\r\n self.ButtonColor3 = \"QPushButton{background-color : #F6FF8B;}QPushButton::pressed{background-color : #EBFF00;}QPushButton{font-size:26px; font:bold}\"\r\n self.ButtonColor4 = \"QPushButton{background-color : #B7FF9A;}QPushButton::pressed{background-color : #47F700;}QPushButton{font-size:26px; font:bold}\"\r\n\r\n # label 1 (Frase inicial):\r\n label = QLabel(self)\r\n label.setText(self.Frase_do_topo)\r\n label.move(15, 10)\r\n label.resize(300, 40)\r\n label.setStyleSheet('QLabel {font:bold;font-size:26px;color:#FFFEFE}')\r\n\r\n # Botão 1 (Ligar jogo):\r\n self.Ligar = QPushButton(self)\r\n self.Ligar.setText(self.LigarTexto)\r\n self.Ligar.move(300, 20)\r\n self.Ligar.resize(90, 30)\r\n self.Ligar.setStyleSheet('QPushButton {background-color:#B0C4DE;font:bold;font-size:20px,}')\r\n self.Ligar.setEnabled(True)\r\n self.Ligar.clicked.connect(self.LigarJogo)\r\n\r\n # Botão 2 (Desligar jogo):\r\n self.Desligar = QPushButton(self)\r\n self.Desligar.setText(self.DesligarTexto)\r\n self.Desligar.move(400, 20)\r\n self.Desligar.resize(110, 30)\r\n self.Desligar.setStyleSheet('QPushButton {background-color:#B0C4DE;font:bold;font-size:20px}')\r\n self.Desligar.setEnabled(False)\r\n self.Desligar.clicked.connect(self.DesligarJogo)\r\n\r\n # Botão1 do Jogo Genius:\r\n self.Button1 = QPushButton(self)\r\n self.Button1.move(90, 100)\r\n self.Button1.resize(self.TamLadoBotaoJogo, self.TamLadoBotaoJogo)\r\n self.Button1.setEnabled(False)\r\n self.Button1.setStyleSheet(self.ButtonColorInitial)\r\n self.Button1.clicked.connect(self.ClickBotaoJogo1)\r\n self.Button1.setText('1')\r\n\r\n # Botão2 do Jogo Genius:\r\n self.Button2 = QPushButton(self)\r\n self.Button2.move(280, 100)\r\n self.Button2.resize(self.TamLadoBotaoJogo, self.TamLadoBotaoJogo)\r\n self.Button2.setEnabled(False)\r\n self.Button2.setStyleSheet(self.ButtonColorInitial)\r\n self.Button2.clicked.connect(self.ClickBotaoJogo2)\r\n self.Button2.setText('2')\r\n\r\n # Botão3 do Jogo Genius:\r\n self.Button3 = QPushButton(self)\r\n self.Button3.move(90, 280)\r\n self.Button3.resize(self.TamLadoBotaoJogo, self.TamLadoBotaoJogo)\r\n self.Button3.setEnabled(False)\r\n self.Button3.setStyleSheet(self.ButtonColorInitial)\r\n self.Button3.clicked.connect(self.ClickBotaoJogo3)\r\n self.Button3.setText('3')\r\n\r\n # Botão4 do Jogo Genius:\r\n self.Button4 = QPushButton(self)\r\n self.Button4.move(280, 280)\r\n self.Button4.resize(self.TamLadoBotaoJogo, self.TamLadoBotaoJogo)\r\n self.Button4.setEnabled(False)\r\n self.Button4.setStyleSheet(self.ButtonColorInitial)\r\n self.Button4.clicked.connect(self.ClickBotaoJogo4)\r\n self.Button4.setText('4')\r\n\r\n # Label Desligado:\r\n self.LabeOff = QLabel(self)\r\n self.LabeOff.setText(self.TextoOff)\r\n self.LabeOff.move(215, 450)\r\n self.LabeOff.setStyleSheet('QLabel {font:bold;font-size:20px;color:#FFFEFE}')\r\n self.LabeOff.setVisible(True)\r\n\r\n # Label Ligado:\r\n self.LabeOn = QLabel(self)\r\n self.LabeOn.setText(self.TextoOn)\r\n self.LabeOn.move(225, 450)\r\n self.LabeOn.setStyleSheet('QLabel {font:bold;font-size:20px;color:#FFFEFE}')\r\n self.LabeOn.setVisible(False)\r\n\r\n # Abre a Janela:\r\n self.CarregarJanela()\r\n\r\n # Função que Carrega a janela:\r\n def CarregarJanela(self):\r\n self.setGeometry(self.esquerda, self.topo, self.largura, self.altura)\r\n self.setWindowTitle(self.titulo)\r\n self.center()\r\n self.show()\r\n\r\n # Função que centraliza a tela:\r\n def center(self):\r\n qr = self.frameGeometry()\r\n cp = QDesktopWidget().availableGeometry().center()\r\n qr.moveCenter(cp)\r\n self.move(qr.topLeft())\r\n\r\n # Função do evento do clique do botão 1:\r\n def LigarJogo(self):\r\n QtTest.QTest.qWait(1500)\r\n self.Ligar.setEnabled(False)\r\n self.Desligar.setEnabled(True)\r\n self.Button1.setEnabled(True)\r\n self.Button2.setEnabled(True)\r\n self.Button3.setEnabled(True)\r\n self.Button4.setEnabled(True)\r\n self.LabeOff.setVisible(False)\r\n self.LabeOn.setVisible(True)\r\n self.IndiceClick = 0\r\n self.LigarCorBotaoClickPress()\r\n self.FuncaoInciaVetor()\r\n self.PicaNaOrdem()\r\n\r\n # Função do evento do clique do botão 2:\r\n def DesligarJogo(self):\r\n self.Ligar.setEnabled(True)\r\n self.Desligar.setEnabled(False)\r\n self.Button1.setEnabled(False)\r\n self.Button2.setEnabled(False)\r\n self.Button3.setEnabled(False)\r\n self.Button4.setEnabled(False)\r\n self.DesligarCorBotao()\r\n self.FecharProgramaQuestion()\r\n\r\n # Função da questão (Deseja fechar o programa?):\r\n def FecharProgramaQuestion(self):\r\n # Cria a MsgBox\r\n box = QMessageBox()\r\n box.setIcon(QMessageBox.Question)\r\n box.setWindowTitle('ATENÇÃO!')\r\n box.setText('Deseja fechar o Programa?')\r\n box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\r\n buttonY = box.button(QMessageBox.Yes)\r\n buttonY.setText('Sim')\r\n buttonN = box.button(QMessageBox.No)\r\n buttonN.setText('Não')\r\n box.exec_()\r\n # Condição que verifica qual botão da msgbox o usuario clicou:\r\n if box.clickedButton() == buttonY:\r\n # YES pressed\r\n sys.exit(exit(1))\r\n elif box.clickedButton() == buttonN:\r\n # NO pressed\r\n self.recomecadados()\r\n else:\r\n self.recomecadados()\r\n\r\n # recomeca uns dados do jogo:\r\n def recomecadados(self):\r\n self.IndiceClick = 0\r\n self.LabeOff.setVisible(True)\r\n self.LabeOn.setVisible(False)\r\n # Limpando Vetor da ordem:\r\n self.LimpandoVetor()\r\n\r\n # Fução que limpa o vetor reponsavel por armazenar a ordem:\r\n def LimpandoVetor(self):\r\n # print('Limpando ...')\r\n self.Ordem.clear()\r\n\r\n # Função de Clique dos botões do jogo (Botão 1):\r\n def ClickBotaoJogo1(self):\r\n # print('Botão 1 Click')\r\n self.IndiceClick += 1\r\n self.Valida_Click(1)\r\n\r\n # Função de Clique dos botões do jogo (Botão 2):\r\n def ClickBotaoJogo2(self):\r\n # print('Botão 2 Click')\r\n self.IndiceClick += 1\r\n self.Valida_Click(2)\r\n\r\n # Função de Clique dos botões do jogo (Botão 3):\r\n def ClickBotaoJogo3(self):\r\n # print('Botão 3 Click')\r\n self.IndiceClick += 1\r\n self.Valida_Click(3)\r\n\r\n # Função de Clique dos botões do jogo (Botão 4):\r\n def ClickBotaoJogo4(self):\r\n # print('Botão 4 Click')\r\n self.IndiceClick += 1\r\n self.Valida_Click(4)\r\n\r\n # Função que inicia o vetor com 3 clicks ao ligar o jogo\r\n def FuncaoInciaVetor(self):\r\n Numero = SorteiaUmLado()\r\n self.Ordem.append(Numero)\r\n Numero = SorteiaUmLado()\r\n self.Ordem.append(Numero)\r\n Numero = SorteiaUmLado()\r\n self.Ordem.append(Numero)\r\n # print(self.Ordem)\r\n\r\n # Liga cor dos botãos:\r\n def LigarCorBotaoClickPress(self):\r\n self.Button1.setStyleSheet(self.ButtonColor1)\r\n self.Button2.setStyleSheet(self.ButtonColor2)\r\n self.Button3.setStyleSheet(self.ButtonColor3)\r\n self.Button4.setStyleSheet(self.ButtonColor4)\r\n\r\n # Liga cor dos botãos:\r\n def DesligarCorBotao(self):\r\n self.Button1.setStyleSheet(self.ButtonColorInitial)\r\n self.Button2.setStyleSheet(self.ButtonColorInitial)\r\n self.Button3.setStyleSheet(self.ButtonColorInitial)\r\n self.Button4.setStyleSheet(self.ButtonColorInitial)\r\n\r\n # Pisca o botão na ordem:\r\n def PicaNaOrdem(self):\r\n self.EnabledButtonFalse()\r\n tamanho = len(self.Ordem)\r\n # texto = ''\r\n for Indice in range(tamanho):\r\n # print(Indice)\r\n Numero = self.Ordem[Indice]\r\n self.PisquueBotao(Numero)\r\n # print('Numero = {}'.format(Numero))\r\n # if Indice+1 == tamanho:\r\n # texto += '{}.'.format(Numero)\r\n # else:\r\n # texto += '{} - '.format(Numero)\r\n # print(texto)\r\n self.IndiceClick = 0\r\n self.EnabledButtonTrue()\r\n\r\n # Função que valida o click:\r\n def Valida_Click(self, Num):\r\n # print('Valida Click - {}'.format(Num))\r\n # print('Inidice - {}'.format(self.IndiceClick))\r\n Indice = self.IndiceClick - 1\r\n if Num != self.Ordem[Indice]:\r\n # print('Errou')\r\n self.VoceErrou()\r\n # else:\r\n # print('acertou')\r\n if (Indice + 1) == len(self.Ordem):\r\n Numero = SorteiaUmLado()\r\n self.Ordem.append(Numero)\r\n # print(self.Ordem)\r\n self.PicaNaOrdem()\r\n\r\n # Função executada quando o usuario erra a ordem dos click dos botões:\r\n def VoceErrou(self):\r\n # Cria a MsgBox\r\n box = QMessageBox()\r\n box.setIcon(QMessageBox.Question)\r\n box.setWindowTitle('Errou!')\r\n box.setText('Você errou!!!!\\nDeseja fechar o Programa?')\r\n box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\r\n buttonY = box.button(QMessageBox.Yes)\r\n buttonY.setText('Sim')\r\n buttonN = box.button(QMessageBox.No)\r\n buttonN.setText('Não')\r\n box.exec_()\r\n # Condição que verifica qual botão da msgbox o usuario clicou\r\n if box.clickedButton() == buttonY:\r\n # YES pressed\r\n sys.exit(exit(1))\r\n elif box.clickedButton() == buttonN:\r\n # NO pressed\r\n self.LabeOff.setVisible(True)\r\n self.LabeOn.setVisible(False)\r\n self.Ligar.setEnabled(True)\r\n self.Desligar.setEnabled(False)\r\n self.Button1.setEnabled(False)\r\n self.Button2.setEnabled(False)\r\n self.Button3.setEnabled(False)\r\n self.Button4.setEnabled(False)\r\n self.DesligarCorBotao()\r\n self.recomecadados()\r\n\r\n # Desativa os botões para a fala:\r\n def EnabledButtonFalse(self):\r\n self.Button1.setEnabled(False)\r\n self.Button2.setEnabled(False)\r\n self.Button3.setEnabled(False)\r\n self.Button4.setEnabled(False)\r\n\r\n # Ativa os botões para a fala:\r\n def EnabledButtonTrue(self):\r\n self.Button1.setEnabled(True)\r\n self.Button2.setEnabled(True)\r\n self.Button3.setEnabled(True)\r\n self.Button4.setEnabled(True)\r\n\r\n # Função do pisque:\r\n def PisquueBotao(self, Numero):\r\n # print('Botão {} está piscando!'.format(Numero))\r\n if Numero == 1:\r\n self.Button1.setStyleSheet(\"QPushButton{background-color : #0055FF;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Liga o Botão 1')\r\n elif Numero == 2:\r\n self.Button2.setStyleSheet(\"QPushButton{background-color : #FF2D00;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Liga o Botão 2')\r\n elif Numero == 3:\r\n self.Button3.setStyleSheet(\"QPushButton{background-color : #EBFF00;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Liga o Botão 3')\r\n elif Numero == 4:\r\n self.Button4.setStyleSheet(\"QPushButton{background-color : #47F700;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Liga o Botão 4')\r\n QtTest.QTest.qWait(750)\r\n self.Button1.setStyleSheet(\"QPushButton{background-color : #A9C4FA;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Desliga o Botão 1')\r\n self.Button2.setStyleSheet(\"QPushButton{background-color : #FA9681;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Desliga o Botão 2')\r\n self.Button3.setStyleSheet(\"QPushButton{background-color : #F6FF8B;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Desliga o Botão 3')\r\n self.Button4.setStyleSheet(\"QPushButton{background-color : #B7FF9A;}QPushButton{font-size:26px; font:bold}\")\r\n # print('Desliga o Botão 4')\r\n QtTest.QTest.qWait(750)\r\n self.LigarCorBotaoClickPress()\r\n\r\n\r\n# Inicializa a Tela:\r\napplication = QApplication(sys.argv)\r\nWindow = Janela()\r\nsys.exit(application.exec_())\r\n", "repo_name": "BrenoCardoso2002/Genius-Pyhton", "sub_path": "Genius.py", "file_name": "Genius.py", "file_ext": "py", "file_size_in_byte": 13948, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "random.randint", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 64, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 82, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 91, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 100, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 109, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDesktopWidget", "line_number": 135, "usage_type": "call"}, {"api_name": "PyQt5.QtTest.QTest.qWait", "line_number": 141, "usage_type": "call"}, {"api_name": "PyQt5.QtTest.QTest", "line_number": 141, "usage_type": "attribute"}, {"api_name": "PyQt5.QtTest", "line_number": 141, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 169, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Question", "line_number": 170, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 170, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 173, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 173, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 173, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 174, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 174, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 176, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 176, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 182, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 287, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Question", "line_number": 288, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 288, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 291, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 291, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 291, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 292, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 292, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 294, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 294, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 300, "usage_type": "call"}, {"api_name": "PyQt5.QtTest.QTest.qWait", "line_number": 343, "usage_type": "call"}, {"api_name": "PyQt5.QtTest.QTest", "line_number": 343, "usage_type": "attribute"}, {"api_name": "PyQt5.QtTest", "line_number": 343, "usage_type": "name"}, {"api_name": "PyQt5.QtTest.QTest.qWait", "line_number": 352, "usage_type": "call"}, {"api_name": "PyQt5.QtTest.QTest", "line_number": 352, "usage_type": "attribute"}, {"api_name": "PyQt5.QtTest", "line_number": 352, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 357, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 357, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 359, "usage_type": "call"}]} +{"seq_id": "71905278132", "text": "#Current method used to track a baseball\n#Not very effective as it gets affected a lot by lighting as well a setting\n\nimport cv2\nimport numpy as np\n\n# Initialize the video capture object\ncap = cv2.VideoCapture(0)\n\n# Define the range of colors for the white ball in HSV\nlower_color = np.array([0, 0, 200])\nupper_color = np.array([255, 255, 255])\n\n# Initialize the previous frame and the previous center of the contour\nprev_frame = None\nprev_center = None\n\n# Loop through frames\nwhile True:\n # Read the current frame\n ret, frame = cap.read()\n\n # Convert the frame to HSV color space\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Threshold the frame to only select white colors\n mask = cv2.inRange(hsv_frame, lower_color, upper_color)\n\n # Find contours in the frame\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Find the largest contour\n if len(contours) > 0:\n largest_contour = max(contours, key=cv2.contourArea)\n\n # Find the center of the contour\n M = cv2.moments(largest_contour)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n\n # Draw a circle at the center of the contour\n cv2.circle(frame, (cX, cY), 10, (255, 0, 0), -1)\n\n # Calculate the velocity of the ball if the previous frame and previous center are available\n if prev_frame is not None and prev_center is not None:\n dt = 1/30 # assuming that the video is 30 FPS\n dx = cX - prev_center[0]\n dy = cY - prev_center[1]\n vx = dx / dt\n vy = dy / dt\n v = (vx**2 + vy**2)**0.5\n print(f'Velocity: {v:.2f} pixels/s')\n\n # Update the previous frame and previous center\n prev_frame = frame\n prev_center = (cX, cY)\n\n # Show the frame\n cv2.imshow(\"Tracking\", frame)\n\n # Exit the program when the 'q' key is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release the video capture and destroy the window\ncap.release()\ncv2.destroyAllWindows()\n", "repo_name": "asiyen/Rudtrax", "sub_path": "ball_tracking.py", "file_name": "ball_tracking.py", "file_ext": "py", "file_size_in_byte": 2140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "cv2.VideoCapture", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.moments", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "72732299253", "text": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom .views import auth, profile\n\nurlpatterns = [\n path('login/', auth_views.LoginView.as_view(template_name='login.html'),\n name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'),\n name='logout'),\n path('registration/', auth.UserRegistrationView.as_view(), name='registration'),\n path('profile-view//', profile.UserProfileView.as_view(), name='profile-view'), # note security!?\n path('profile-edit/', profile.UserProfileEditView.as_view(), name='profile-edit'),\n]\n", "repo_name": "likemoody/post_my_notes", "sub_path": "users/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 623, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.auth.UserRegistrationView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.auth.UserRegistrationView", "line_number": 10, "usage_type": "attribute"}, {"api_name": "views.auth", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.profile.UserProfileView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.profile.UserProfileView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "views.profile", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.profile.UserProfileEditView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.profile.UserProfileEditView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "views.profile", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "71048466294", "text": "# import pandas as pd\nimport random\n\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport numpy as np\nimport plotly.express as px\nfrom pandas import DataFrame\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nres_8 = [45, 27, 15, 28, 20, 8, 19, 21, 9, 24, 3, 16, 7, 12, 6, 9, 1, 3, 17, 27, 19, 26, 24, 17, 10, 15, 8, 6, 9, 5, 6,\n 2, 4, 3, 8, 0, 2, 4, 3, 1, 2, 4, 2, 3, 0, 4, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n# res_8_normalized = [float(x)/float(sum(res_8)) for x in res_8]\ncorrect_8_number = 98\nres_10 = [143, 99, 55, 13, 33, 24, 12, 0, 0, 16, 15, 2, 0, 0, 2, 9, 15, 25, 23, 34, 57, 76, 36, 26, 18, 14, 11, 7, 7, 9,\n 6, 4, 4, 3, 10, 10, 20, 11, 8, 9, 18, 17, 14, 5, 5, 3, 4, 4, 0, 1, 2, 4, 1, 2, 1, 5, 1, 5, 0, 2, 1, 1, 1, 3,\n 2, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1]\n# res_10_normalized = [float(x)/float(sum(res_10)) for x in res_10]\ncorrect_10_numbers = 27\nres_15 = [217, 94, 64, 144, 258, 144, 100, 138, 155, 112, 71, 70, 71, 55, 44, 35, 28, 17, 24, 16, 16, 12, 14, 10, 6, 8,\n 6, 2, 2, 1, 1, 6, 1, 3, 0, 2, 0, 2, 2, 0, 1, 0, 1, 0, 0, 2, 1, 0, 0, 1]\n# res_15_normalized = [float(x)/float(sum(res_15)) for x in res_15]\ncorrect_15_numbers = 4\nres_18 = [124, 92, 42, 93, 126, 102, 83, 141, 96, 98, 109, 84, 76, 72, 90, 76, 63, 66, 40, 52, 45, 27, 29, 35, 24, 25,\n 13, 15, 10, 7, 10, 4, 5, 2, 5, 2, 3, 2, 3, 0, 2, 1, 0, 0, 0, 0, 1, 0, 1, 1]\n# res_18_normalized = [float(x)/float(sum(res_18)) for x in res_18]\ncorrect_18_numbers = 0\n\n\ndef get_correct_and_wrong_list(source, correct_number):\n tmp = correct_number\n elements_number = 0\n while tmp > 0:\n tmp -= source[elements_number]\n elements_number += 1\n correct_solutions = source[:elements_number]\n if len(correct_solutions) > 0:\n correct_solutions[-1] += tmp\n wrong_solutions = source[(elements_number - 1):]\n return correct_solutions, wrong_solutions\n\n\nres_8_correct, res_8_wrong = get_correct_and_wrong_list(res_8, correct_8_number)\nres_8_correct, res_8_wrong = [float(x)/float(sum(res_8)) for x in res_8_correct], [float(x)/float(sum(res_8)) for x in res_8_wrong]\nres_10_correct, res_10_wrong = get_correct_and_wrong_list(res_10, correct_10_numbers)\nres_10_correct, res_10_wrong = [float(x)/float(sum(res_8)) for x in res_10_correct], [float(x)/float(sum(res_10)) for x in res_10_wrong]\nres_15_correct, res_15_wrong = get_correct_and_wrong_list(res_15, correct_15_numbers)\nres_15_correct, res_15_wrong = [float(x)/float(sum(res_8)) for x in res_15_correct], [float(x)/float(sum(res_15)) for x in res_15_wrong]\n\n\n# res_18_correct, res_18_wrong = get_correct_and_wrong_list(res_18, correct_18_numbers)\n\n\ndef prepare_histogram(correct, wrong, ax, id):\n total_len = len(correct) + int(len(wrong)) - 1\n k = 2 if id <= 2 else 1\n xes = [''] + ['low energy'] + ['' for i in range(k)] + \\\n ['high energy'] + ['' for j in range(int(total_len - 45))]\n ax.bar(np.arange(len(correct) - 1, total_len), wrong, color='red', label='wrong solutions')\n ax.bar(np.arange(len(correct)), correct, color='green', label='correct solutions')\n # ax.xticks(range(total_len), xes)\n ax.legend(loc='best')\n ax.set_xticklabels(xes)\n ax.set_title(\"Problem {}\".format(id))\n # ax.set_axis_off()\n # ax.patch.set_visible(False)\n # ax.set_xlabel('ddd' + str(random.randint(5,156)))\n\n\nfig, axs = plt.subplots(2, 2)\nprepare_histogram(res_8_correct, res_8_wrong, axs[0,0], 1)\nprepare_histogram(res_10_correct, res_10_wrong, axs[0, 1], 2)\nprepare_histogram(res_15_correct, res_15_wrong, axs[1, 0], 3)\nprepare_histogram([0], [float(x)/float(sum(res_18)) for x in res_18], axs[1, 1], 4)\nfor ax in axs.flat:\n ax.set(ylabel='Probability density')\n# plt.show()\n# plt.subplots_adjust(top=2)\nfig.tight_layout(pad=0.75)\n\npdf = PdfPages(\"histogram_dwave.pdf\")\npdf.savefig()\npdf.close()\n# y_pos = np.arange(len(res_8))\n# frame1 = plt.gca()\n# frame1.axes.get_xaxis().set_visible(False)\n# plt.xticks(y_pos, ['.' for i in range(len(res_8))])\n\n# res8dict = [{\"value\": val, \"color\": 'wrong'} for val in res_8]\n# for i in range(4):\n# res8dict[i][\"color\"] = \"correct\"\n# df = DataFrame(res8dict)\n# fig = px.bar(df, y=\"value\", color='color')\n# fig.show()\n\n# x0 = np.random.randn(500)\n# Add 1 to shift the mean of the Gaussian distribution\n# x1 = np.random.randn(500) + 1\n\n\n# range50 = list(range(10))\n# range50.extend(['.' for i in range(40)])\n# range80 = list(range(10))\n# range80.extend(['.' for i in range(70)])\n#\n# all = [res_8, res_10, res_15, res_18]\n#\n## print(list(map(lambda x: len(x), all)))\n#\n# df8 = pd.DataFrame({'number of boxes':range80, 'number of occurences':res_8})\n# ax8 = df8.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n#\n# df10 = pd.DataFrame({'number of boxes':range80, 'number of occurences':res_10})\n# ax10 = df10.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n#\n# df15 = pd.DataFrame({'number of boxes':range50, 'number of occurences':res_15})\n# ax15 = df15.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n#\n# df18 = pd.DataFrame({'number of boxes':range50, 'number of occurences':res_18})\n# ax18 = df18.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n", "repo_name": "dawtom/quantum_optimization", "sub_path": "histograms.py", "file_name": "histograms.py", "file_ext": "py", "file_size_in_byte": 5335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "70747689333", "text": "from __future__ import print_function\nimport binascii\nfrom PIL import Image\nimport numpy as np\nimport scipy\nimport scipy.misc\nimport scipy.cluster\nimport imageio\nimport os.path\n\nfrom django.core.files.storage import FileSystemStorage\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom .models import CriticItem, ArtPiece, UploadImage\n\nimport requests\n\nimport PIL\nfrom PIL import Image\nimport io\nfrom io import BytesIO\n\nimport os, time, sys\n\n\ndef criticView(request):\n # all_critic_items = CriticItem.objects.all()\n if request.method != 'POST':\n return render(request, 'critic.html')\n else:\n return classifyEra(request)\n\ndef addCritic(request):\n new_item = CriticItem(content = request.POST['content'])\n new_item.save()\n return HttpResponseRedirect('/home/')\n\ndef deleteCritic(request, critic_id):\n item_to_delete = CriticItem.objects.get(id=critic_id)\n item_to_delete.delete()\n return HttpResponseRedirect('/critic/')\n\ndef aboutView(request):\n return render(request, 'about.html')\n\ndef contactView(request):\n return render(request, 'contact.html')\n\ndef surveyView(request):\n all_critic_items = CriticItem.objects.all()\n return render(request, 'survey.html',\n {'all_items': all_critic_items})\n\n#######################################################################################################\n\nvalueRatioDark = \"\"\nvalueRatioMed = \"\"\nvalueRatioLight = \"\"\ncolorDomRgb = \"\"\ncolorDomHex = \"\"\ncolorAvgRgb = \"\"\ncolorAvgHex = \"\"\n\ndef classifyEra(request):\n image_type = \"link\"\n image_url = request.POST['url']\n if image_url: #if the user uploads a LINK\n url = 'https://app.nanonets.com/api/v2/ImageCategorization/LabelUrls/'\n headers = {\n 'accept': 'application/x-www-form-urlencoded'\n }\n image_url_list=[]\n image_url_list.append(image_url)\n data = {\n 'modelId': 'e483f029-8ad6-43c4-a0ca-1377a2d04078',\n 'urls' : image_url_list\n }\n\n response = requests.request('POST', url, headers=headers, auth=requests.auth.HTTPBasicAuth('4S_Y0S2gS0DSpnZlz7fwPDa5W5oP5zuA', ''), data=data)\n\n else: #if the user uploads an IMAGE\n image_type = \"link\"\n img = request.FILES\n image_file = img['imgfile']\n \n try:\n my_image = UploadImage()\n my_image.photo.save(image_file.name, image_file)\n except:\n pass\n image_url = my_image.photo.url\n print (image_url)\n\n url = 'https://app.nanonets.com/api/v2/ImageCategorization/LabelUrls/'\n headers = {\n 'accept': 'application/x-www-form-urlencoded'\n }\n image_url_list=[]\n image_url_list.append(image_url)\n data = {\n 'modelId': 'e483f029-8ad6-43c4-a0ca-1377a2d04078',\n 'urls' : image_url_list\n }\n\n response = requests.request('POST', url, headers=headers, auth=requests.auth.HTTPBasicAuth('4S_Y0S2gS0DSpnZlz7fwPDa5W5oP5zuA', ''), data=data)\n\n #image_url = \"static/\" + fs.url(new_image_name)[1:]\n #url = 'https://app.nanonets.com/api/v2/ImageCategorization/LabelFile/'\n #data = {'file': open(image_url, 'rb'), 'modelId': ('', 'e483f029-8ad6-43c4-a0ca-1377a2d04078')}\n #response = requests.post(url, auth= requests.auth.HTTPBasicAuth('4S_Y0S2gS0DSpnZlz7fwPDa5W5oP5zuA', ''), files=data)\n\n ratings = response.text\n \n labels = [\"\\\"Minimalism\\\"\", \"\\\"Cubism\\\"\", \"\\\"Romanticism\\\"\", \"\\\"Rococo\\\"\", \"\\\"Early_Renaissance\\\"\", \"\\\"Post_Impressionism\\\"\", \"\\\"Ukiyo_e\\\"\", \"\\\"Symbolism\\\"\", \"\\\"Pointillism\\\"\", \"\\\"Art_Noveau_Modern\\\"\", \"\\\"Contemporary_Realism\\\"\", \"\\\"Northern_Renaissance\\\"\", \"\\\"Expressionism\\\"\", \"\\\"Mannerism_Late_Renaissance\\\"\", \"\\\"Baroque\\\"\", \"\\\"Action_painting\\\"\", \"\\\"Pop_Art\\\"\", \"\\\"Analytical_Cubism\\\"\", \"\\\"Fauvism\\\"\", \"\\\"Color_Field_Painting\\\"\", \"\\\"Synthetic_Cubism\\\"\", \"\\\"Realism\\\"\", \"\\\"Native_Art_Primitivism\\\"\", \"\\\"New_Realism\\\"\", \"\\\"Impressionism\\\"\", \"\\\"High_Renaissance\\\"\", \"\\\"Abstract_Expressionism\\\"\"]\n eras = [\"Minimalism\", \"Cubism\", \"Romanticism\", \"Rococo\", \"Early Renaissance\", \"Post Impressionism\", \"Ukiyo-e\", \"Symbolism\", \"Pointillism\", \"Art Noveau (Modern)\", \"Contemporary Realism\", \"Northern Renaissance\", \"Expressionism\", \"Mannerism (Late Renaissance)\", \"Baroque\", \"Action Painting\", \"Pop Art\", \"Analytical Cubism\", \"Fauvism\", \"Color Field Painting\", \"Synthetic Cubism\", \"Realism\", \"Naïve Art (Primitivism)\", \"New Realism\", \"Impressionism\", \"High Renaissance\", \"Abstract Expressionism\"]\n probabilities = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n\n # parse String response\n for i in range(27):\n try:\n era = labels[i]\n probabilities[i] = float(ratings[ratings.index(\":\", ratings.index(era)) + 1 : ratings.index(\"}\", ratings.index(era))])\n # print (\"%s: %.3f%%\" % (eras[i], probabilities[i] * 100))\n except ValueError: \n None\n\n # get highest probability era \n era1Label = eras[probabilities.index(max(probabilities))]\n era1Probability = max(probabilities)\n # print (\"\\nPrimary era similarity is: %s, %.3f%%\" % (era1Label, era1Probability * 100))\n\n # remove max to get second\n eras.remove(era1Label)\n probabilities.remove(era1Probability)\n\n # get second highest probability era\n era2Label = eras[probabilities.index(max(probabilities))]\n era2Probability = max(probabilities)\n # print (\"Secondary era similarity is: %s, %.3f%%\" % (era2Label, era2Probability * 100))\n\n cimg = ArtPiece()\n if image_type == \"file\":\n cimg.img = \"/\" + image_url[7:]\n elif image_type == \"link\":\n cimg.img = image_url\n cimg.era1 = str(era1Label)\n cimg.era1Prob = str(\"{:.2%}\".format(era1Probability))\n cimg.era2 = str(era2Label)\n cimg.era2Prob = str(\"{:.2%}\".format(era2Probability))\n\n #classityMood\n cimg.mood = classifyMood(image_type, image_url)\n # if image_type == \"file\":\n # cimg.mood = classifyMood(\"\", image_file)\n # elif image_type == \"link\":\n # cimg.mood = classifyMood(image_url)\n\n #analyzeValue`\n analyzeValue(image_type, image_url)\n # if image_type == \"file\":\n # analyzeValue(\"\", image_file)\n # elif image_type == \"link\":\n # analyzeValue(image_url)\n cimg.valueRatioDark = valueRatioDark\n cimg.valueRatioMed = valueRatioMed\n cimg.valueRatioLight = valueRatioLight\n\n #analyzeColor\n analyzeColor(image_type, image_url)\n # if image_type == \"file\":\n # analyzeColor(\"\", image_file)\n # elif image_type == \"link\":\n # analyzeColor(image_url)\n cimg.colorDomRgb = colorDomRgb\n cimg.colorDomHex = colorDomHex\n cimg.colorAvgRgb = colorAvgRgb\n cimg.colorAvgHex = colorAvgHex\n\n # solely for formatting purposes\n cimg.result_era1 = \"Your piece is most similar to artwork of the \" + str(era1Label) + \" style with a \" + str(\"{:.2%}\".format(era1Probability)) + \" similarity.\"\n cimg.result_era2 = \"It is also similar to artwork of the \" + str(era2Label) + \" style with a \" + str(\"{:.2%}\".format(era2Probability)) + \" similarity.\"\n cimg.result_mood = \"The mood of this piece is \" + cimg.mood + \" .\"\n cimg.result_dark = \"Dark ratio: \" + valueRatioDark\n cimg.result_med = \"Medium ratio: \" + valueRatioMed\n cimg.result_light = \"Light ratio: \" + valueRatioLight\n cimg.result_dom = \"Dominant color: rgb \" + colorDomRgb + \", hex \" + colorDomHex\n cimg.result_avg = \"Average color: rgb \" + colorAvgRgb + \", hex \" + colorAvgHex\n\n###################################################\n# AUTODELTE FILED OLDER THAN 30 S.\n # path = 'static/media'\n # now = time.time()\n # for f in os.listdir(path):\n # f = os.path.join(path, f)\n # if os.stat(f).st_mtime < now - 30:#7 * 86400:\n # if os.path.isfile(f):\n # os.remove(f)\n####################################################\n\n return render(request, \"critic.html\", {'cimg':cimg})\n\n#########################################################################################################\n\ndef classifyMood(image_type, image_url):\n if image_type == \"link\": # if the user inputs a LINK\n url = 'https://app.nanonets.com/api/v2/ImageCategorization/LabelUrls/'\n headers = {\n 'accept': 'application/x-www-form-urlencoded'\n }\n image_url_list=[]\n image_url_list.append(image_url)\n data = {\n 'modelId': '8ee76cc5-5c8a-4fa4-99bd-19185cf1eea4',\n 'urls' : image_url_list\n }\n\n response = requests.request('POST', url, headers=headers, auth=requests.auth.HTTPBasicAuth('D9iah1hwZIVhR-TX23d3RR5wvJkxrBI9', ''), data=data)\n\n else: # if the user uploads an IMAGE\n url = 'https://app.nanonets.com/api/v2/ImageCategorization/LabelFile/'\n data = {'file': open(image_url, 'rb'), 'modelId': ('', '8ee76cc5-5c8a-4fa4-99bd-19185cf1eea4')}\n response = requests.post(url, auth= requests.auth.HTTPBasicAuth('D9iah1hwZIVhR-TX23d3RR5wvJkxrBI9', ''), files=data)\n \n ratings = response.text\n\n # parse data from model to get energized, calm, pleasant, unpleasant ratings\n try:\n energized = float(ratings[ratings.index(\"energized\") + 25 : ratings.index(\"}\", ratings.index(\"energized\") + 25)])\n except ValueError:\n energized = 0\n\n try:\n calm = float(ratings[ratings.index(\"calm\") + 20 : ratings.index(\"}\", ratings.index(\"calm\") + 20)])\n except ValueError:\n calm = 0\n \n try:\n pleasant = float(ratings[ratings.index(\"\\\"pleasant\") + 25 : ratings.index(\"}\", ratings.index(\"\\\"pleasant\") + 25)])\n except ValueError:\n pleasant = 0\n \n try:\n unpleasant = float(ratings[ratings.index(\"unpleasant\") + 26 : ratings.index(\"}\", ratings.index(\"unpleasant\") + 26)])\n except ValueError:\n unpleasant = 0\n \n # print (\"Energized rating: %.3f%%\" % (energized * 100))\n # print (\"Calm rating: %.3f%%\" % (calm * 100))\n # print (\"Pleasant rating: %.3f%%\" % (pleasant * 100))\n # print (\"Unpleasant rating: %.3f%%\" % (unpleasant * 100))\n\n # determine mood\n mood = \"\"\n\n if energized == max(energized, calm, pleasant, unpleasant):\n if calm == max(calm, pleasant, unpleasant):\n mood = \"energized yet calm\"\n elif pleasant == max(calm, pleasant, unpleasant):\n mood = \"excited and lively\"\n elif unpleasant == max(calm, pleasant, unpleasant):\n mood = \"tense and nervous\"\n elif calm == max(energized, calm, pleasant, unpleasant):\n if energized == max(energized, pleasant, unpleasant):\n mood = \"calm yet energized\"\n elif pleasant == max(energized, pleasant, unpleasant):\n mood = \"calm and serene\"\n elif unpleasant == max(energized, pleasant, unpleasant):\n mood = \"gloomy and sad\"\n elif pleasant == max(energized, calm, pleasant, unpleasant):\n if energized == max(energized, calm, unpleasant):\n mood = \"cheerful and happy\"\n elif calm == max(energized, calm, unpleasant):\n mood = \"relaxed and carefree\"\n elif unpleasant == max(energized, calm, unpleasant):\n mood = \"pleasant yet unpleasant\" #????????????????\n elif unpleasant == max(energized, calm, pleasant, unpleasant):\n if energized == max(energized, calm, pleasant):\n mood = \"irritated and annoyed\"\n elif calm == max(energized, calm, pleasant):\n mood = \"bored and weary\"\n elif pleasant == max(energized, calm, pleasant):\n mood = \"unpleasant yet pleasant\"\n # if one value is extremely high, ignore others\n if energized > 0.93:\n mood = \"energized\"\n if calm > 0.93:\n mood = \"calm\"\n if pleasant > 0.93:\n mood = \"pleasant\"\n if unpleasant > 0.93:\n mood = \"unpleasant\"\n\n return mood\n\n #########################################################################################\n\ndef analyzeValue(image_type, image_url):\n if image_type == \"link\":\n response = requests.get(image_url)\n img = Image.open(BytesIO(response.content))\n else:\n img = Image.open(image_url)\n # img = Image.open(BytesIO(response.content)).convert('L')\n img = img.convert('L')\n img.thumbnail((500,500))\n WIDTH, HEIGHT = img.size\n\n grayscaleValues = list(img.getdata()) # convert image data to a list of integers\n\n # convert to 2D list if necessary\n # grayscaleValues = [grayscaleValues[offset:offset+WIDTH] for offset in range(0, WIDTH*HEIGHT, WIDTH)]\n\n darkCount = 0\n medCount = 0\n lightCount = 0\n totalCount = len(grayscaleValues)\n\n for px in grayscaleValues:\n if px < 85:\n darkCount = darkCount + 1\n elif px > 170:\n lightCount = lightCount + 1\n else:\n medCount = medCount + 1\n\n darkRatio = darkCount / totalCount \n medRatio = medCount / totalCount\n lightRatio = lightCount / totalCount\n\n # print (\"WIDTH: \" + str(WIDTH))\n # print (\"HEIGHT: \" + str(HEIGHT))\n # print (\"PIXELS: \" + str(totalCount))\n # print (\"DARK RATIO: \" + str(darkRatio))\n # print (\"MEDIUM RATIO: \" + str(medRatio))\n # print (\"LIGHT RATIO: \" + str(lightRatio))\n\n global valueRatioDark\n valueRatioDark = str(\"{:.2%}\".format(darkRatio))\n global valueRatioMed\n valueRatioMed = str(\"{:.2%}\".format(medRatio))\n global valueRatioLight\n valueRatioLight = str(\"{:.2%}\".format(lightRatio))\n\n\ndef analyzeColor(image_type, image_url): \n # number of colors to convert image to\n NUM_CLUSTERS = 4\n #################### determine main color ####################\n # read image, compress\n\n if image_type == \"link\":\n response = requests.get(image_url)\n img = Image.open(BytesIO(response.content))\n else:\n img = Image.open(image_url)\n # img = Image.open(BytesIO(response.content))\n \n # img = Image.open('/Users/benjamin.0t/Desktop/Art/brockmann-mockup1.png')\n img.thumbnail((150, 150))\n arry = np.asarray(img)\n shape = arry.shape\n arry = arry.reshape(np.product(shape[:2]), shape[2]).astype(float)\n\n # modified https://stackoverflow.com/questions/3241929\n # calculate clustered colors \n codes, dist = scipy.cluster.vq.kmeans(arry, NUM_CLUSTERS)\n # print('cluster colors:\\n', codes)\n\n vecs, dist = scipy.cluster.vq.vq(arry, codes) # assign codes\n counts, bins = np.histogram(vecs, len(codes)) # count occurrences\n\n # codes is numpy.ndarray\n index_max = np.argmax(counts) # sort and get dominant\n #domRgb = codes[index_max] # [\"%.2f\" % member for member in codes[index_max]]\n domRgb = [ \"{:0.0f}\".format(x) for x in codes[index_max] ]\n domHex = binascii.hexlify(bytearray(int(c) for c in codes[index_max])).decode('ascii')\n\n # print('DOMINANT COLOR: %s (#%s)' % (domRgb, domHex))\n\n # #################### saves clustered image ####################\n # # modified https://stackoverflow.com/questions/3241929\n # c = arry.copy()\n # for i, code in enumerate(codes):\n # c[scipy.r_[np.where(vecs==i)],:] = code\n # # converts back to 2d array and saves as image\n # imageio.imwrite('clustered.png', c.reshape(*shape).astype(np.uint8))\n\n\n #################### find average color ####################\n def calcAvgColor (img):\n width, height = img.size\n \n rSum = 0\n gSum = 0\n bSum = 0\n aSum = 0\n count = 0\n # sum r g b values and divide by total to get average\n for x in range(0, width):\n for y in range(0, height):\n try:\n r, g, b, a = img.getpixel((x,y))\n except:\n r, g, b = img.getpixel((x,y))\n rSum += r\n gSum += g\n bSum += b\n count += 1\n return (rSum / count, gSum / count, bSum / count)\n\n avgRgbTemp = calcAvgColor(img)\n avgHex = binascii.hexlify(bytearray(int(c) for c in avgRgbTemp)).decode('ascii')\n avgRgbTemp = [\"%.0f\" % x for x in avgRgbTemp]\n avgRgb = \"['\" + avgRgbTemp[0] + \"', '\" + avgRgbTemp[1] + \"', '\" + avgRgbTemp[2] + \"']\"\n\n # print('AVERAGE COLOR: %s (#%s)' % (avgRgb, avgHex))\n\n global colorDomRgb\n colorDomRgb = str(domRgb)\n global colorDomHex\n colorDomHex = str(\"#\" + domHex)\n global colorAvgRgb\n colorAvgRgb = str(avgRgb)\n global colorAvgHex\n colorAvgHex = str(\"#\" + avgHex)\n\n \n\n\n\n", "repo_name": "b3njamint/aiartcritic", "sub_path": "critic/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 16433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "models.CriticItem", "line_number": 34, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 36, "usage_type": "call"}, {"api_name": "models.CriticItem.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "models.CriticItem.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.CriticItem", "line_number": 39, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "models.CriticItem.objects.all", "line_number": 50, "usage_type": "call"}, {"api_name": "models.CriticItem.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.CriticItem", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.auth", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.UploadImage", "line_number": 87, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 105, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 105, "usage_type": "call"}, {"api_name": "requests.auth", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.ArtPiece", "line_number": 141, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 200, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 217, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 217, "usage_type": "call"}, {"api_name": "requests.auth", "line_number": 217, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 222, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 222, "usage_type": "call"}, {"api_name": "requests.auth", "line_number": 222, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 299, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 300, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 300, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 300, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 302, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 302, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 352, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 353, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 353, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 353, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 355, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 355, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 362, "usage_type": "call"}, {"api_name": "scipy.cluster.vq.kmeans", "line_number": 366, "usage_type": "call"}, {"api_name": "scipy.cluster", "line_number": 366, "usage_type": "attribute"}, {"api_name": "scipy.cluster.vq.vq", "line_number": 369, "usage_type": "call"}, {"api_name": "scipy.cluster", "line_number": 369, "usage_type": "attribute"}, {"api_name": "numpy.histogram", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 373, "usage_type": "call"}, {"api_name": "binascii.hexlify", "line_number": 376, "usage_type": "call"}, {"api_name": "binascii.hexlify", "line_number": 412, "usage_type": "call"}]} +{"seq_id": "22167738944", "text": "\"\"\"\nThe main script to decompose an image into several meaningful entities.\n\"\"\"\n\nimport numpy as np\nimport os\nimport sys\nimport time\nimport logging\n\nfrom torch import optim\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms\n\nfrom kernelphysiology.dl.experiments.decomposition import util as vae_util\nfrom kernelphysiology.dl.experiments.decomposition import model_vqvae\nfrom kernelphysiology.dl.experiments.decomposition import arguments\nfrom kernelphysiology.dl.experiments.decomposition import data_loaders\nfrom kernelphysiology.dl.experiments.decomposition import ColourTransformer\nfrom kernelphysiology.dl.pytorch.utils import cv2_preprocessing\nfrom kernelphysiology.dl.pytorch.utils import cv2_transforms\nfrom kernelphysiology.transformations import colour_spaces\n\nfrom kernelphysiology.utils import random_imutils\n\ndatasets_classes = {\n 'imagenet': data_loaders.ImageFolder,\n 'ccvr': data_loaders.ColourConstancyVR,\n 'celeba': data_loaders.CelebA,\n 'touch': data_loaders.TouchRelief,\n 'voc': data_loaders.VOCSegmentation,\n 'coco': data_loaders.COCOPanoptic,\n}\ndataset_target_size = {\n 'imagenet': 256,\n 'ccvr': 256,\n 'celeba': 64,\n 'touch': 256,\n 'voc': 256,\n 'coco': 256,\n}\n\n\ndef main(args):\n args = arguments.parse_arguments(args)\n\n # determining the number of input channels\n args.in_chns = 3\n\n out_chns = 3\n args.out_chns = out_chns\n\n args.mean = 0.5\n args.std = 0.5\n target_size = args.target_size or dataset_target_size[args.dataset]\n\n if args.dataset == 'ccvr':\n pre_shared_transforms = [\n cv2_transforms.Resize(target_size + 32),\n cv2_transforms.RandomCrop(target_size),\n ]\n else:\n pre_shared_transforms = [\n cv2_transforms.Resize(target_size + 32),\n cv2_transforms.CenterCrop(target_size),\n ]\n post_shared_transforms = [\n cv2_transforms.ToTensor(),\n cv2_transforms.Normalize(args.mean, args.std)\n ]\n\n pre_dataset_transforms = dict()\n post_dataset_transforms = dict()\n for key in datasets_classes.keys():\n pre_dataset_transforms[key] = transforms.Compose(\n pre_shared_transforms\n )\n post_dataset_transforms[key] = transforms.Compose(\n post_shared_transforms\n )\n\n save_path = vae_util.setup_logging_from_args(args)\n writer = SummaryWriter(save_path)\n\n torch.manual_seed(args.seed)\n cudnn.benchmark = True\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n if args.pred is not None:\n checkpoint = torch.load(args.pred, map_location='cpu')\n model_vae = model_vqvae.DecomposeNet(**checkpoint['arch_params'])\n model_vae.load_state_dict(checkpoint['state_dict'])\n else:\n # FIXME: archs_param should be added to resume and fine_tune\n arch_params = {'k': args.k, 'd': args.d, 'hidden': args.hidden}\n model_vae = model_vqvae.DecomposeNet(\n hidden=args.hidden, k=args.k, d=args.d, in_chns=args.in_chns,\n out_chns=args.out_chns\n )\n model_vae = model_vae.cuda()\n\n # FIXME make it only for one single output\n if args.lab_init:\n distortion = [\n 116.0 / 500, 16.0 / 500, 500.0 / 500, 200.0 / 500,\n 0.2068966\n ]\n trans_mat = [[0.412453, 0.357580, 0.180423],\n [0.212671, 0.715160, 0.072169],\n [0.019334, 0.119193, 0.950227]]\n\n ref_white = (0.95047, 1., 1.08883)\n\n tmat = colour_spaces.dkl_from_rgb.T\n tmat = np.expand_dims(tmat, [2, 3])\n cst_lr = args.lr * 0.1\n else:\n trans_mat = None\n ref_white = None\n distortion = None\n tmat = None\n cst_lr = args.lr\n # model_cst = ColourTransformer.LabTransformer(\n # trans_mat=trans_mat, ref_white=ref_white,\n # distortion=distortion, linear=args.linear\n # )\n model_cst = ColourTransformer.ResNetTransformer(layers=args.cst_layers)\n model_cst = model_cst.cuda()\n\n vae_params = [\n {'params': [p for p in model_vae.parameters() if p.requires_grad]},\n ]\n cst_params = [\n {'params': [p for p in model_cst.parameters() if p.requires_grad]},\n ]\n optimizer_vae = optim.Adam(vae_params, lr=args.lr)\n optimizer_cst = optim.Adam(cst_params, lr=cst_lr)\n scheduler_vae = optim.lr_scheduler.StepLR(\n optimizer_vae, int(args.epochs / 3), 0.5\n )\n scheduler_cst = optim.lr_scheduler.StepLR(\n optimizer_cst, int(args.epochs / 3), 0.5\n )\n\n if args.resume is not None:\n checkpoint = torch.load(args.resume, map_location='cpu')\n model_vae.load_state_dict(checkpoint['state_dict'])\n model_vae = model_vae.cuda()\n args.start_epoch = checkpoint['epoch'] + 1\n scheduler_vae.load_state_dict(checkpoint['scheduler_vae'])\n optimizer_vae.load_state_dict(checkpoint['optimizer_vae'])\n scheduler_cst.load_state_dict(checkpoint['scheduler_cst'])\n optimizer_cst.load_state_dict(checkpoint['optimizer_cst'])\n elif args.fine_tune is not None:\n weights = torch.load(args.fine_tune, map_location='cpu')\n model_vae.load_state_dict(weights, strict=False)\n model_vae = model_vae.cuda()\n\n intransform_funs = []\n if args.in_space.lower() == 'cgi':\n augmentation_settings = [\n {\n 'function': random_imutils.adjust_contrast,\n 'kwargs': {'amount': np.array([0.2, 1.0]), 'channel_wise': True}\n },\n {\n 'function': random_imutils.adjust_gamma,\n 'kwargs': {'amount': np.array([0.2, 5.0]), 'channel_wise': True}\n },\n {\n 'function': random_imutils.adjust_illuminant,\n 'kwargs': {'illuminant': np.array([0.0, 1.0])}\n }\n ]\n intransform_funs.append(\n cv2_preprocessing.RandomAugmentationTransformation(\n augmentation_settings, num_augmentations=1\n )\n )\n elif args.in_space.lower() != 'rgb':\n intransform_funs.append(\n cv2_preprocessing.DecompositionTransformation(args.in_space.lower())\n )\n intransform = transforms.Compose(intransform_funs)\n\n outtransform = None\n\n args.outs_dict = {'rgb': {'vis_fun': None}}\n\n # preparing the dataset\n transforms_kwargs = {\n 'intransform': intransform,\n 'outtransform': outtransform,\n 'pre_transform': pre_dataset_transforms[args.dataset],\n 'post_transform': post_dataset_transforms[args.dataset]\n }\n if args.dataset in ['celeba', 'touch', 'ccvr']:\n train_dataset = datasets_classes[args.dataset](\n root=args.data_dir, split='train', **transforms_kwargs\n )\n test_dataset = datasets_classes[args.dataset](\n root=args.data_dir, split='test', **transforms_kwargs\n )\n elif args.dataset in ['coco']:\n train_dataset = datasets_classes[args.dataset](\n root=args.data_dir, split='train', **transforms_kwargs\n )\n test_dataset = datasets_classes[args.dataset](\n root=args.data_dir, split='val', **transforms_kwargs\n )\n elif args.dataset in ['voc']:\n train_dataset = datasets_classes[args.dataset](\n root=args.data_dir, image_set='train', **transforms_kwargs\n )\n test_dataset = datasets_classes[args.dataset](\n root=args.data_dir, image_set='val', **transforms_kwargs\n )\n else:\n train_dataset = datasets_classes[args.dataset](\n root=os.path.join(args.data_dir, 'train'), **transforms_kwargs\n )\n test_dataset = datasets_classes[args.dataset](\n root=os.path.join(args.data_dir, 'validation'), **transforms_kwargs\n )\n\n loader_kwargs = {\n 'batch_size': args.batch_size, 'num_workers': args.workers,\n 'pin_memory': True\n }\n train_loader = torch.utils.data.DataLoader(\n train_dataset, shuffle=True, **loader_kwargs\n )\n test_loader = torch.utils.data.DataLoader(\n test_dataset, shuffle=False, **loader_kwargs\n )\n\n if args.pred is not None:\n predict(model_vae, test_loader, save_path, args)\n return\n\n # starting to train\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger('').addHandler(console)\n for epoch in range(args.start_epoch, args.epochs):\n train_losses = train(\n epoch, model_vae, model_cst, train_loader,\n (optimizer_vae, optimizer_cst), save_path, args\n )\n test_losses = test_net(\n epoch, model_vae, model_cst, test_loader, save_path, args\n )\n for k in train_losses.keys():\n name = k.replace('_trn', '')\n train_name = k\n test_name = k.replace('_trn', '_val')\n writer.add_scalars(\n name, {\n 'train': train_losses[train_name],\n 'test': test_losses[test_name]\n }, epoch\n )\n scheduler_vae.step()\n scheduler_cst.step()\n vae_util.save_checkpoint(\n {\n 'epoch': epoch,\n 'state_dict': model_vae.state_dict(),\n 'colour_transformer': model_cst.state_dict(),\n 'optimizer_vae': optimizer_vae.state_dict(),\n 'scheduler_vae': scheduler_vae.state_dict(),\n 'optimizer_cst': optimizer_cst.state_dict(),\n 'scheduler_cst': scheduler_cst.state_dict(),\n 'arch': args.model,\n 'arch_params': {\n **arch_params,\n 'in_chns': args.in_chns, 'out_chns': args.out_chns,\n },\n 'transformer_params': {'linear': args.linear}\n },\n save_path\n )\n\n\ndef train(epoch, model_vae, model_cst, train_loader, optimizers, save_path,\n args):\n (optimizer_vae, optimizer_cst) = optimizers\n model_vae.train()\n model_cst.train()\n vae_loss_dict = model_vae.latest_losses()\n batch_losses = {k + '_trn_vae': 0 for k, v in vae_loss_dict.items()}\n epoch_losses = {k + '_trn_vae': 0 for k, v in vae_loss_dict.items()}\n cst_loss_dict = model_cst.latest_losses()\n for k, v in cst_loss_dict.items():\n batch_losses[k + '_trn_cst'] = 0\n epoch_losses[k + '_trn_cst'] = 0\n\n num_batches = len(train_loader)\n start_time = time.time()\n for bidx, loader_data in enumerate(train_loader):\n data = loader_data[0]\n data = data.cuda()\n\n # optimise the VAE and CST together\n model_vae.zero_grad()\n model_cst.zero_grad()\n target_rnd, target_rgb = model_cst(data)\n outputs = model_vae(data)\n\n loss_vae = model_vae.loss_function(target_rnd.detach(), *outputs)\n loss_vae.backward()\n\n loss_cst = model_cst.loss_function(\n target_rnd, target_rgb, outputs[0].detach(), data\n )\n loss_cst.backward()\n\n optimizer_vae.step()\n optimizer_cst.step()\n\n vae_latest_losses = model_vae.latest_losses()\n for key in vae_latest_losses:\n batch_losses[key + '_trn_vae'] += float(vae_latest_losses[key])\n epoch_losses[key + '_trn_vae'] += float(vae_latest_losses[key])\n cst_latest_losses = model_cst.latest_losses()\n for key in cst_latest_losses:\n batch_losses[key + '_trn_cst'] += float(cst_latest_losses[key])\n epoch_losses[key + '_trn_cst'] += float(cst_latest_losses[key])\n\n if bidx % args.log_interval == 0:\n if bidx > 0:\n for key in batch_losses.keys():\n batch_losses[key] /= args.log_interval\n loss_string = ' '.join(\n ['{}: {:.6f}'.format(k, v) for k, v in batch_losses.items()]\n )\n logging.info(\n 'Train Epoch: {epoch} [{batch:5d}/{total_batch} '\n '({percent:2d}%)] time: {time:3.2f} {loss}'.format(\n epoch=epoch, batch=bidx * len(data),\n total_batch=num_batches * len(data),\n percent=int(100. * bidx / num_batches),\n time=time.time() - start_time, loss=loss_string\n )\n )\n start_time = time.time()\n for key in batch_losses.keys():\n batch_losses[key] = 0\n if bidx in list(np.linspace(0, num_batches - 1, 4).astype('int')):\n out_rgb = {'rgb': model_cst.rnd2rgb(outputs[0].detach().clone())}\n target_rgb = {'rgb': target_rgb}\n vae_util.grid_save_reconstructions(\n args.outs_dict, target_rgb, out_rgb, args.mean, args.std, epoch,\n save_path, 'reconstruction_train%.5d' % bidx, inputs=data\n )\n\n if bidx * len(data) > args.train_samples:\n break\n\n for key in epoch_losses:\n epoch_losses[key] /= (\n len(train_loader.dataset) / train_loader.batch_size\n )\n loss_string = '\\t'.join(\n ['{}: {:.6f}'.format(k, v) for k, v in epoch_losses.items()]\n )\n logging.info('====> Epoch: {} {}'.format(epoch, loss_string))\n return epoch_losses\n\n\ndef test_net(epoch, model_vae, model_cst, test_loader, save_path, args):\n model_vae.eval()\n model_cst.eval()\n loss_dict = model_vae.latest_losses()\n losses = {k + '_val_vae': 0 for k, v in loss_dict.items()}\n ct_loss_dict = model_cst.latest_losses()\n for k, v in ct_loss_dict.items():\n losses[k + '_val_cst'] = 0\n\n num_batches = len(test_loader)\n with torch.no_grad():\n for bidx, loader_data in enumerate(test_loader):\n data = loader_data[0]\n data = data.cuda()\n\n target_rnd, target_rgb = model_cst(data)\n outputs = model_vae(data)\n model_vae.loss_function(target_rnd, *outputs)\n model_cst.loss_function(target_rnd, target_rgb, outputs[0], data)\n latest_losses = model_vae.latest_losses()\n for key in latest_losses:\n losses[key + '_val_vae'] += float(latest_losses[key])\n ct_latest_losses = model_cst.latest_losses()\n for key in ct_latest_losses:\n losses[key + '_val_cst'] += float(ct_latest_losses[key])\n if bidx in list(np.linspace(0, num_batches - 1, 4).astype('int')):\n out_rgb = {\n 'rgb': model_cst.rnd2rgb(outputs[0].detach().clone())\n }\n target_rgb = {'rgb': target_rgb}\n vae_util.grid_save_reconstructions(\n args.outs_dict, target_rgb, out_rgb, args.mean, args.std,\n epoch, save_path, 'reconstruction_test%.5d' % bidx,\n inputs=data\n )\n if bidx * len(data) > args.test_samples:\n break\n\n for key in losses:\n losses[key] /= (len(test_loader.dataset) / test_loader.batch_size)\n loss_string = ' '.join(\n ['{}: {:.6f}'.format(k, v) for k, v in losses.items()]\n )\n logging.info('====> Test set losses: {}'.format(loss_string))\n return losses\n\n\ndef predict(model, test_loader, save_path, args):\n model.eval()\n loss_dict = model.latest_losses()\n losses = {k + '_test': 0 for k, v in loss_dict.items()}\n with torch.no_grad():\n img_ind = 0\n for bidx, loader_data in enumerate(test_loader):\n data = loader_data[0]\n data = data.cuda()\n target = loader_data[1]\n for key in target.keys():\n target[key] = target[key].cuda()\n\n outputs = model(data)\n model.loss_function(target, *outputs)\n latest_losses = model.latest_losses()\n for key in latest_losses:\n losses[key + '_test'] += float(latest_losses[key])\n\n vae_util.individual_save_reconstructions(\n args.outs_dict, target, outputs[0], args.mean, args.std,\n img_ind, save_path, 'reconstruction_test%.5d' % bidx\n )\n img_ind += len(data)\n\n for key in losses:\n losses[key] /= (len(test_loader.dataset) / test_loader.batch_size)\n loss_string = ' '.join(\n ['{}: {:.6f}'.format(k, v) for k, v in losses.items()]\n )\n logging.info('====> Test set losses: {}'.format(loss_string))\n return losses\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "repo_name": "ArashAkbarinia/kernelphysiology", "sub_path": "python/src/kernelphysiology/dl/experiments/decomposition/main_optspace.py", "file_name": "main_optspace.py", "file_ext": "py", "file_size_in_byte": 16553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders.ImageFolder", "line_number": 29, "usage_type": "attribute"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders", "line_number": 29, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders.ColourConstancyVR", "line_number": 30, "usage_type": "attribute"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders", "line_number": 30, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders.CelebA", "line_number": 31, "usage_type": "attribute"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders", "line_number": 31, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders.TouchRelief", "line_number": 32, "usage_type": "attribute"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders", "line_number": 32, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders.VOCSegmentation", "line_number": 33, "usage_type": "attribute"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders", "line_number": 33, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders.COCOPanoptic", "line_number": 34, "usage_type": "attribute"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.data_loaders", "line_number": 34, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.arguments.parse_arguments", "line_number": 47, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.arguments", "line_number": 47, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms.Resize", "line_number": 61, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms", "line_number": 61, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms.RandomCrop", "line_number": 62, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms", "line_number": 62, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms.Resize", "line_number": 66, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms", "line_number": 66, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms.CenterCrop", "line_number": 67, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms", "line_number": 67, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms.ToTensor", "line_number": 70, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms", "line_number": 70, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms.Normalize", "line_number": 71, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_transforms", "line_number": 71, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 77, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 80, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 80, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util.setup_logging_from_args", "line_number": 84, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.cuda.manual_seed", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 93, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.model_vqvae.DecomposeNet", "line_number": 94, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.model_vqvae", "line_number": 94, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.model_vqvae.DecomposeNet", "line_number": 99, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.model_vqvae", "line_number": 99, "usage_type": "name"}, {"api_name": "kernelphysiology.transformations.colour_spaces.dkl_from_rgb", "line_number": 117, "usage_type": "attribute"}, {"api_name": "kernelphysiology.transformations.colour_spaces", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 118, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.ColourTransformer.ResNetTransformer", "line_number": 130, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.ColourTransformer", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 141, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 158, "usage_type": "call"}, {"api_name": "kernelphysiology.utils.random_imutils.adjust_contrast", "line_number": 166, "usage_type": "attribute"}, {"api_name": "kernelphysiology.utils.random_imutils", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "kernelphysiology.utils.random_imutils.adjust_gamma", "line_number": 170, "usage_type": "attribute"}, {"api_name": "kernelphysiology.utils.random_imutils", "line_number": 170, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "kernelphysiology.utils.random_imutils.adjust_illuminant", "line_number": 174, "usage_type": "attribute"}, {"api_name": "kernelphysiology.utils.random_imutils", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_preprocessing.RandomAugmentationTransformation", "line_number": 179, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_preprocessing", "line_number": 179, "usage_type": "name"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_preprocessing.DecompositionTransformation", "line_number": 185, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.pytorch.utils.cv2_preprocessing", "line_number": 185, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 187, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 187, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 233, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 236, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 245, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 246, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 247, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util.save_checkpoint", "line_number": 268, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util", "line_number": 268, "usage_type": "name"}, {"api_name": "time.time", "line_number": 302, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 340, "usage_type": "call"}, {"api_name": "time.time", "line_number": 346, "usage_type": "call"}, {"api_name": "time.time", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 352, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util.grid_save_reconstructions", "line_number": 355, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util", "line_number": 355, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 370, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 399, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util.grid_save_reconstructions", "line_number": 404, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util", "line_number": 404, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 417, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 425, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util.individual_save_reconstructions", "line_number": 440, "usage_type": "call"}, {"api_name": "kernelphysiology.dl.experiments.decomposition.util", "line_number": 440, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 451, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 456, "usage_type": "attribute"}]} +{"seq_id": "24138639992", "text": "# coding: utf-8\nimport tweepy\n\nconsumer_key = \"\"\nconsumer_secret = \"\"\naccess_token = \"\"\naccess_secret = \"\"\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\napi = tweepy.API(auth)\n\n#特定のユーザーのツイートを過去num件出力させる\ndef get_tweet(user, num):\n\ttest = api.user_timeline(id = \"%s\"%user, count = num)\n\tfor tweet in test:\n\t\tprint(tweet.created_at)\n\t\tprint(tweet.text)\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n#あるwordをnum件エゴサして出力させる\ndef search(word, kaisuu):\n\tkeywords = [word]\n\tquery = \"OR\".join(keywords)\n\n\tfor tweet in api.search(q = query, count = kaisuu):\n\t\tprint(tweet.created_at)\n\t\tprint(tweet.user.screen_name)\n\t\tprint(tweet.text)\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\nget_tweet(\"ここにUserID\", 抽出するtweet数)\n\nsearch(\"ここにエゴサするword\", 抽出するtweet数)", "repo_name": "SyougoA/TwitterAPI_test", "sub_path": "test_api.py", "file_name": "test_api.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 9, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "34986966140", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('matricula', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='matricula',\n name='alumno',\n field=models.ForeignKey(related_name='alumno_matricula', to='alumno_profesor.Alumno', null=True),\n ),\n ]\n", "repo_name": "danielhuamani/Proyecto-taller-base-datos", "sub_path": "src/apps/matricula/migrations/0002_auto_20151215_1322.py", "file_name": "0002_auto_20151215_1322.py", "file_ext": "py", "file_size_in_byte": 455, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "71322095414", "text": "from tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport twitter_credentials\n\n\nclass TwitterStreamer():\n\n def __init__(self):\n pass\n\n def stream_tweets(self, fetched_tweets_filename, hash_tag_list):\n # This handles Twitter authetification and the connection to Twitter Streaming API\n listener = StdOutListener(fetched_tweets_filename)\n auth = OAuthHandler(twitter_credentials.CONSUMER_KEY,\n twitter_credentials.CONSUMER_SECRET)\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN,\n twitter_credentials.ACCESS_TOKEN_SECRET)\n stream = Stream(auth, listener)\n\n stream.filter(track=hash_tag_list)\n\n\nclass StdOutListener(StreamListener):\n\n def __init__(self, fetched_tweets_filename):\n self.fetched_tweets_filename = fetched_tweets_filename\n self.num = 1\n\n def on_data(self, data):\n try:\n # print(data)\n print('Number of grab: %d' % self.num)\n self.num += 1\n with open(self.fetched_tweets_filename, 'a') as tf:\n tf.write(data)\n return True\n except BaseException as e:\n print(\"Error on_data %s\" % str(e))\n return True\n\n def on_error(self, status):\n print(status)\n\n\nif __name__ == '__main__':\n\n # INITIAL HERE\n FILENAME = './Data/raw_data_nov_16.json'\n HASH_TAG = ['🐶', '🙈', '♻️', '😡', '😋', '😱', '🙏', '👍', '👫',\n '🇺🇸', '😂', '🙃', '😘', '❤️', '🔥', '🌚', '💯', '🙌', '🔞', '😭']\n\n hash_tag_list = HASH_TAG\n fetched_tweets_filename = FILENAME\n\n twitter_streamer = TwitterStreamer()\n twitter_streamer.stream_tweets(fetched_tweets_filename, hash_tag_list)\n", "repo_name": "kyleearth/emojiprediction", "sub_path": "DataScraping and Cleaning/1.data_grab.py", "file_name": "1.data_grab.py", "file_ext": "py", "file_size_in_byte": 1836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 15, "usage_type": "call"}, {"api_name": "twitter_credentials.CONSUMER_KEY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "twitter_credentials.CONSUMER_SECRET", "line_number": 16, "usage_type": "attribute"}, {"api_name": "twitter_credentials.ACCESS_TOKEN", "line_number": 17, "usage_type": "attribute"}, {"api_name": "twitter_credentials.ACCESS_TOKEN_SECRET", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tweepy.Stream", "line_number": 19, "usage_type": "call"}, {"api_name": "tweepy.streaming.StreamListener", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "40795886816", "text": "##########################\n##\n## Code by Susan\n## padath314\n##\n## Modification to Async by unniisme\n##\n##########################\n\nimport socket\nimport sys\nfrom UAP import UAP,Message\nimport time\nimport asyncio\nimport asyncudp\n\nSESSION_TIMEOUT = 20 # Adjust this value as needed\n\nSESSIONS = {}\n\ndef PrintMessage(msg : Message, \n alternativeMessage = None,\n alternativeSequence = None):\n if alternativeMessage:\n msg.message = alternativeMessage\n if alternativeSequence:\n msg.seq = alternativeSequence\n print(f\"{hex(msg.sID)} [{msg.seq}] {msg.message}\")\n\nclass Session:\n def __init__(self, session_id, client_address, server_socket, active_sessions):\n self.session_id = session_id\n self.client_address = client_address\n self.last_activity_time = time.time()\n self.expected_sequence_number = 1\n self.server_socket = server_socket # Store the server socket\n self.active_sessions = active_sessions # Store the active sessions dictionary\n\n self.messages = asyncio.Queue() # Queue of packets for this session\n self.task = None\n\n def is_hello(self, message : Message) -> bool:\n return message.command == UAP.CommandEnum.HELLO and message.sID == self.session_id\n\n def update_activity_time(self):\n self.last_activity_time = time.time()\n\n def is_timedout(self):\n return time.time() - self.last_activity_time > SESSION_TIMEOUT\n\n def process_packet(self, received_message):\n # print(received_message)\n # Extract the sequence number from the received message\n received_sequence_number = received_message.seq\n\n if received_sequence_number == self.expected_sequence_number:\n # Process the packet as expected\n # print(f\"Received packet with sequence number {received_sequence_number}: {received_message.message}\")\n PrintMessage(received_message)\n self.expected_sequence_number += 1 # Update the expected sequence number\n elif received_sequence_number < self.expected_sequence_number:\n # Handle out-of-order packet (protocol error)\n # print(f\"Received out-of-order packet with sequence number {received_sequence_number}.\")\n PrintMessage(received_message, \"Message out of order\")\n #self.close_session()\n else:\n # Handle missing packets\n for missing_sequence_number in range(self.expected_sequence_number, received_sequence_number):\n # print(f\"Lost packet with sequence number {missing_sequence_number}\")\n PrintMessage(received_message, \n alternativeMessage=\"Packet Lost\",\n alternativeSequence=missing_sequence_number)\n # Update the expected sequence number\n self.expected_sequence_number = received_sequence_number + 1\n\n def close_session(self):\n # Send a GOODBYE message to the client\n goodbye_message = Message(UAP.CommandEnum.GOODBYE, 0, self.session_id, \"GOODBYE\")\n encoded_goodbye_message = goodbye_message.EncodeMessage()\n self.server_socket.sendto(encoded_goodbye_message, self.client_address)\n \n # Remove the session from active_sessions\n del self.active_sessions[self.session_id]\n\n # Close the asynchronous task running this session\n self.task.cancel()\n\nasync def session_handler(server_socket, session_id):\n\n # Send a reply HELLO message back to the client\n session = SESSIONS[session_id]\n reply_message = Message(UAP.CommandEnum.HELLO, 0, session_id, \"Reply HELLO\")\n encoded_reply_message = reply_message.EncodeMessage()\n server_socket.sendto(encoded_reply_message, session.client_address)\n # print(\"Replies sent\")\n PrintMessage(reply_message, \"Session Started\")\n\n try:\n while True:\n # print('*')\n\n # Fetch session data from shared dictionary\n session = SESSIONS[session_id]\n\n client_address = session.client_address\n try:\n received_message = await asyncio.wait_for(session.messages.get(), SESSION_TIMEOUT)\n except asyncio.exceptions.TimeoutError:\n PrintMessage(Message( \n 0,\n session.expected_sequence_number,\n session_id,\n \"Closing session due to timeout\"\n ))\n session.close_session()\n except Exception as e:\n raise e\n \n \n #print(f\"Received data from {client_address}: {received_message}\")\n\n if received_message.sID != session_id:\n raise RuntimeError(\"Recieved wrong session packet\")\n \n if received_message.command == UAP.CommandEnum.HELLO:\n raise RuntimeError(\"Recieved hello packet in task\")\n\n elif received_message.command == UAP.CommandEnum.DATA:\n\n # Update the session's last activity time\n session.update_activity_time()\n\n session.process_packet(received_message)\n # Send an ALIVE message in response to the DATA message\n alive_message = Message(UAP.CommandEnum.ALIVE, 0, session_id, \"ALIVE\")\n encoded_alive_message = alive_message.EncodeMessage()\n server_socket.sendto(encoded_alive_message, client_address)\n \n elif received_message.command == UAP.CommandEnum.GOODBYE:\n #print(\"\\necievd goodbye\\n\")\n\n PrintMessage(received_message, \"Closing session\")\n session.close_session()\n break\n except asyncio.exceptions.CancelledError:\n pass\n\n\n\nasync def recieve_handler(server_socket):\n try:\n while True:\n data, client_address = await server_socket.recvfrom()\n received_message = Message.DecodeMessage(data)\n\n if received_message.command == UAP.CommandEnum.HELLO:\n session_id = received_message.sID\n \n if session_id not in SESSIONS:\n new_session = Session(session_id, client_address, server_socket, SESSIONS) # Pass server_socket and active_sessions\n if not new_session.is_hello(received_message):\n # Terminate the session if the initial message is not HELLO\n continue\n SESSIONS[session_id] = new_session\n else:\n SESSIONS[session_id].close_session()\n continue\n\n # Update the session's last activity time\n SESSIONS[session_id].update_activity_time()\n\n # Starting session task\n session_task = asyncio.ensure_future(session_handler(server_socket, session_id))\n SESSIONS[session_id].task = session_task\n\n elif received_message.command == UAP.CommandEnum.DATA or received_message.command == UAP.CommandEnum.GOODBYE:\n session_id = received_message.sID\n if session_id not in SESSIONS:\n # Terminate the session if the DATA message is received without a HELLO\n continue\n\n await SESSIONS[session_id].messages.put(received_message)\n except KeyboardInterrupt:\n print(\"recieve handler got keyboard interrupt\")\n except asyncio.exceptions.CancelledError:\n pass\n\n\nasync def input_handler():\n try:\n while True:\n # Wait for input and close server if input is q\n stdin = await a_input()\n if stdin.strip() == \"q\":\n break\n except KeyboardInterrupt:\n print(\"Input handler got keyboard interrupt\")\n except asyncio.exceptions.CancelledError:\n pass\n \n\n\ndef send_goodbye_to_inactive_sessions(active_sessions):\n inactive_sessions = [session for session in active_sessions.values() if session.is_inactive()]\n \n for session in inactive_sessions:\n session.close_session()\n\n\ndef send_goodbye_to_active_sessions(active_sessions):\n for session in active_sessions.values():\n session.close_session()\n\n# Function to take input asynchronously\nasync def a_input():\n return await asyncio.get_event_loop().run_in_executor(\n None, sys.stdin.readline\n )\n\n\nasync def main(port, host='0.0.0.0'):\n # server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server_address = (host, port)\n # server_socket.bind(server_address)\n\n # Create asynchronous socket\n server_socket = await asyncudp.create_socket(local_addr=server_address)\n print(f\"Waiting on host {host} and port {port}\")\n\n\n # Define each task\n recieve_task = asyncio.ensure_future(recieve_handler(server_socket))\n input_task = asyncio.ensure_future(input_handler())\n\n # Await on all parallel tasks\n _, pending = await asyncio.wait([input_task, recieve_task], return_when=asyncio.FIRST_COMPLETED)\n\n # Cancel whichever tasks have not ended yet\n for task in pending:\n task.cancel()\n\n # Send GOODBYE message to all active sessions\n send_goodbye_to_active_sessions(SESSIONS.copy())\n\n # Close the socket and clean up\n server_socket.close()\n return\n \n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) == 1:\n print(\"Usage: AsyncUAPServer.py port [host]\")\n elif len(sys.argv) == 2:\n asyncio.run(main(int(sys.argv[1])))\n else:\n asyncio.run(main(int(sys.argv[1]), sys.argv[2]))\n", "repo_name": "unniisme/Networking", "sub_path": "UDPcommunicator/AsyncUAPServer.py", "file_name": "AsyncUAPServer.py", "file_ext": "py", "file_size_in_byte": 9772, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "UAP.Message", "line_number": 21, "usage_type": "name"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "asyncio.Queue", "line_number": 39, "usage_type": "call"}, {"api_name": "UAP.Message", "line_number": 42, "usage_type": "name"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 43, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 43, "usage_type": "name"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "UAP.Message", "line_number": 78, "usage_type": "call"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 78, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 78, "usage_type": "name"}, {"api_name": "UAP.Message", "line_number": 92, "usage_type": "call"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 92, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 92, "usage_type": "name"}, {"api_name": "asyncio.wait_for", "line_number": 107, "usage_type": "call"}, {"api_name": "asyncio.exceptions", "line_number": 108, "usage_type": "attribute"}, {"api_name": "UAP.Message", "line_number": 109, "usage_type": "call"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 125, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 125, "usage_type": "name"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 128, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 128, "usage_type": "name"}, {"api_name": "UAP.Message", "line_number": 135, "usage_type": "call"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 135, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 135, "usage_type": "name"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 139, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 139, "usage_type": "name"}, {"api_name": "asyncio.exceptions", "line_number": 145, "usage_type": "attribute"}, {"api_name": "UAP.Message.DecodeMessage", "line_number": 154, "usage_type": "call"}, {"api_name": "UAP.Message", "line_number": 154, "usage_type": "name"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 156, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 156, "usage_type": "name"}, {"api_name": "asyncio.ensure_future", "line_number": 173, "usage_type": "call"}, {"api_name": "UAP.UAP.CommandEnum", "line_number": 176, "usage_type": "attribute"}, {"api_name": "UAP.UAP", "line_number": 176, "usage_type": "name"}, {"api_name": "asyncio.exceptions", "line_number": 185, "usage_type": "attribute"}, {"api_name": "asyncio.exceptions", "line_number": 198, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 216, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 217, "usage_type": "attribute"}, {"api_name": "asyncudp.create_socket", "line_number": 227, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 232, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 233, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 236, "usage_type": "call"}, {"api_name": "asyncio.FIRST_COMPLETED", "line_number": 236, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 253, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 255, "usage_type": "attribute"}, {"api_name": "asyncio.run", "line_number": 256, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 256, "usage_type": "attribute"}, {"api_name": "asyncio.run", "line_number": 258, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 258, "usage_type": "attribute"}]} +{"seq_id": "70735809974", "text": "import copy\nimport math\nfrom typing import Union, Tuple, List, Dict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules import ModuleList\n\n\nclass MultiheadAttetion(nn.Module):\n \n def __init__(self,\n embed_dim,\n num_heads,\n dropout=0.0,\n bias=False,\n add_bias_kv=False,\n kdim=None,\n vdim=None):\n super(MultiheadAttetion, self).__init__()\n \n self.embed_dim = embed_dim\n self.kdim = kdim if vdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self._qkv_same_embed_dim = self.embed_dim == self.kdim and self.vdim == embed_dim\n \n self.num_heads = num_heads\n self.head_dims = embed_dim // num_heads\n assert self.head_dims * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n \n self.q_proj_weight = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.k_proj_weight = nn.Linear(embed_dim, self.kdim, bias=add_bias_kv)\n self.v_proj_weight = nn.Linear(embed_dim, self.vdim, bias=add_bias_kv)\n \n self.output_proj = nn.Linear(embed_dim, embed_dim)\n \n self.dropout = nn.Dropout(dropout)\n \n def forward(self,\n query,\n key,\n value,\n attn_mask=None,\n attn_method=None):\n \"\"\"\n\n :param query: torch.Tensor[batch_size, max_seq_len, hidden_size]\n :param key: torch.Tensor[batch_size, max_seq_len, hidden_size]\n :param value: torch.Tensor[batch_size, max_seq_len, hidden_size]\n :param attn_mask: torch.Tensor[batch_size*nheads ,max_seq_len, max_seq_len]\n :return:\n \"\"\"\n if self._qkv_same_embed_dim is True:\n \n q_proj = self.q_proj_weight(query) # [batch_size, max_seq_len, hidden_size]\n k_proj = self.q_proj_weight(key) # [batch_size, max_seq_len, hidden_size]\n v_proj = self.q_proj_weight(value) # [batch_size, max_seq_len, hidden_size]\n \n q_proj = torch.stack(q_proj.split(self.head_dims, dim=-1),\n dim=1) # [batch_size, num_heads, max_seq_len, hidden_size/num_heads]\n k_proj = torch.stack(k_proj.split(self.head_dims, dim=-1),\n dim=1) # [batch_size, num_heads, max_seq_len, hidden_size/num_heads]\n v_proj = torch.stack(v_proj.split(self.head_dims, dim=-1),\n dim=1) # [batch_size, num_heads, max_seq_len, hidden_size/num_heads]\n \n attn_logit = torch.matmul(q_proj, k_proj.permute([0, 1, 3, 2])) / math.sqrt(\n self.head_dims) # [batch_size, num_heads, max_seq_len, max_seq_len]\n if attn_mask is not None:\n attn_logit = self.attention_mask(attn_logit, attn_mask, attn_method)\n attn_logit = self.dropout(attn_logit)\n attn_scores = attn_logit.softmax(dim=-1)\n \n attn_res = torch.matmul(attn_scores, v_proj) # [batch_size, num_heads, max_seq_len, hidden_size/num_heads]\n attn_res = attn_res.permute([0, 2, 1, 3]).reshape_as(query) # [batch_size, max_seq_len, hidden_size]\n output = self.output_proj(attn_res)\n \n return output, attn_scores\n \n \n else:\n raise TypeError(\"k,q,v must be the same size\")\n \n def attention_mask(self, attn_logit,\n attn_mask: torch.Tensor,\n mask_method=None):\n \"\"\"\n\n 1. add: abandon = -inf, keep = 0,\n 2. multi: abandon = 0, keep = 1, 0 1e-6\n attn_mask = (attn_mask.float() - 1) * 1000000\n logit = logit + attn_mask\n return logit\n \n def add(logit: torch.Tensor, attn_mask: torch.Tensor):\n logit = logit + attn_mask\n return logit\n \n def multi(logit: torch.Tensor, attn_mask: torch.Tensor):\n logit = logit * attn_mask\n logit = mask(logit, attn_mask)\n return logit\n \n mask_methods = {\n mask.__name__: mask,\n add.__name__: add,\n multi.__name__: multi\n }\n \n if attn_mask is None:\n return None\n \n batch_size, num_heads, max_seq_len, _ = attn_logit.size()\n \n if attn_mask.size() == (batch_size * self.num_heads, max_seq_len, max_seq_len):\n attn_mask = attn_mask.reshape(batch_size, self.num_heads, max_seq_len, max_seq_len)\n elif attn_mask.size() == (batch_size, self.num_heads, max_seq_len, max_seq_len):\n pass\n elif attn_mask.size() == (batch_size, max_seq_len, max_seq_len):\n attn_mask = attn_mask.unsqueeze(dim=1).repeat([1, self.num_heads, 1, 1])\n else:\n raise TypeError(\n \"The mask shape only accept [b, h, s,s], [b,s,s], [b*h,s,s] b=batch_size, h=num of heads, s=max_seq_len\")\n # modify the\n \n # if mask_method not in mask_methods:\n # print(\"Warning: The {} not in {}. Now we use {} method\".format(str(mask_method),list(mask_methods.keys()), add.__name__))\n mask_method_func = mask_methods.get(mask_method, mask)\n return mask_method_func(attn_logit, attn_mask)\n \n @property\n def device(self):\n return next(iter(self.parameters())).device\n\n\nclass SelfAttentionLayer(nn.Module):\n def __init__(self,\n embed_dim,\n num_heads,\n hidden_size=None,\n attn_prob_dropout=0.0,\n hidden_dropout=0.1,\n bias=False,\n add_bias_kv=False,\n kdim=None,\n vdim=None):\n super(SelfAttentionLayer, self).__init__()\n self.hidden_size = embed_dim * 4 if hidden_size is None else hidden_size\n self.multihead = MultiheadAttetion(embed_dim, num_heads, attn_prob_dropout, bias, add_bias_kv, kdim, vdim)\n self.inner_proj = nn.Linear(embed_dim, self.hidden_size)\n self.outer_proj = nn.Linear(self.hidden_size, embed_dim)\n \n self.attn_prob_dropout = attn_prob_dropout\n self.hidden_dropout_rate = hidden_dropout\n self.hidden_dropout = nn.Dropout(hidden_dropout)\n \n self.norm = nn.LayerNorm(embed_dim)\n \n def forward(self,\n query,\n key=None,\n value=None,\n attn_mask=None,\n attn_method=None,\n return_attn=True):\n \"\"\"\n\n :param query:\n :param key:\n :param value:\n :param return_attn:\n :return:\n \"\"\"\n if key is None and value is None:\n key = value = query\n attn_output, attn_scores = self.multihead(query, key, value, attn_mask, attn_method)\n \n attn_output = self.norm(self.hidden_dropout(attn_output) + query)\n \n proj_output = self.outer_proj(F.relu(self.hidden_dropout(self.inner_proj(attn_output))))\n \n self_out = self.norm(attn_output + self.hidden_dropout(proj_output))\n if return_attn:\n return self_out, attn_scores\n else:\n return self_out\n\n\nclass SelfAttention(nn.Module):\n \n def __init__(self, encoder_layer: nn.Module, num_layers, norm=None):\n super(SelfAttention, self).__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.norm = norm\n \n def forward(self, src: torch.Tensor, attn_mask=None, attn_method=None, return_attn=True):\n output = src\n attn_scores_list = []\n \n for mod in self.layers:\n output, attn_scores = mod(output, attn_mask=attn_mask, attn_method=attn_method, return_attn=return_attn)\n attn_scores_list.append(attn_scores)\n \n if self.norm is not None:\n output = self.norm(output)\n \n return output, attn_scores_list\n\n\ndef _get_clones(module, N):\n return ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef self_attention_output(doc_structure_output: Tuple) -> Tuple[torch.Tensor, List[torch.Tensor], Union[None, Dict]]:\n \"\"\"\n\n :param doc_structure_output: a tuple of torch.Tensor [batch_size, num_heads, max_seq_len, hidden_size]\n :return:\n \"\"\"\n output, attn_scores = doc_structure_output\n attn_score_list = attn_scores.split(1, dim=1)\n attn_score_list = [item.squeeze(dim=1) for item in attn_score_list]\n return output, attn_score_list, {}\n\n\ndef self_attention_input(edus_vector, node_length, adj_mats):\n adj_mats = torch.stack(adj_mats, dim=1)\n return edus_vector, None, None, adj_mats\n\n\nif __name__ == '__main__':\n input = torch.rand(8, 4, 128)\n mask = torch.ones((8, 8, 4, 4)).tril()\n \n model = SelfAttentionLayer(128, 8, bias=False)\n output, df = model(input, attn_mask=mask)\n", "repo_name": "hanguantianxia/nlp_frameworks", "sub_path": "framework/module/encode/self_attetntion.py", "file_name": "self_attetntion.py", "file_ext": "py", "file_size_in_byte": 9471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 67, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 105, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 146, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 146, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 165, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 167, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 199, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 199, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 201, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 201, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 206, "usage_type": "attribute"}, {"api_name": "torch.nn.modules.ModuleList", "line_number": 221, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 221, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 224, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 224, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 224, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 243, "usage_type": "call"}]} +{"seq_id": "26730035437", "text": "import base64\nimport math\nimport os\nimport random\nfrom enum import Enum\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher\nfrom typing import List, Callable\n\nclass Mode(Enum):\n ECB = 1\n CBC = 2\n CFB = 3\n PCBC = 4\n OFB = 5\n\nclass Algorithm(Enum):\n AES = 1\n TRIPLEDES = 2\n\ndef xor_bytes(a: bytes, b: bytes) -> bytes:\n assert len(a) == len(b)\n return bytes([x ^ y for x, y in zip(a, b)])\n\ndef single_byte_xor(plaintext: bytes, key: bytes) -> bytes:\n assert len(key) == 1\n return xor_bytes(plaintext, key * len(plaintext))\n\nstandard_frequency = {\n 'a': 0.08167081670816709,\n 'b': 0.014920149201492016,\n 'c': 0.02782027820278203,\n 'd': 0.04253042530425304,\n 'e': 0.12702127021270213,\n 'f': 0.022280222802228026,\n 'g': 0.020150201502015023,\n 'h': 0.06094060940609407,\n 'i': 0.06966069660696607,\n 'j': 0.0015300153001530016,\n 'k': 0.007720077200772008,\n 'l': 0.04025040250402505,\n 'm': 0.024060240602406028,\n 'n': 0.06749067490674907,\n 'o': 0.07507075070750707,\n 'p': 0.019290192901929022,\n 'q': 0.000950009500095001,\n 'r': 0.059870598705987065,\n 's': 0.06327063270632706,\n 't': 0.09056090560905608,\n 'u': 0.027580275802758028,\n 'v': 0.00978009780097801,\n 'w': 0.023600236002360022,\n 'x': 0.0015000150001500015,\n 'y': 0.01974019740197402,\n 'z': 0.0007400074000740007,\n}\n\ndef i_divergence_from_english(data: bytes) -> (float, int):\n try:\n text = data.decode(\"ascii\")\n except UnicodeDecodeError:\n return (math.inf, 0)\n frequencies = {}\n text = text.lower()\n counted = 0\n for letter in text:\n if letter.isalpha():\n counted += 1\n if letter in frequencies:\n frequencies[letter] += 1\n else:\n frequencies[letter] = 1\n for letter in frequencies:\n frequencies[letter] /= counted\n divergence = 0\n counted = 0\n for letter, freq in frequencies.items():\n if letter in standard_frequency:\n if freq > 0 and standard_frequency[letter] > 0:\n divergence += freq * math.log(freq / standard_frequency[letter])\n counted += 1\n #relative_divergence = divergence\n #if counted > 0:\n #relative_divergence /= counted\n if counted == 0:\n return (math.inf, 0)\n return (divergence, counted)\n\ndef ascii_in_text(data: bytes) -> int:\n result = 0\n for byte in data:\n if byte >= 32 and byte <= 126:\n result += 1\n return result\n\nEPSILON = 0.000001\n\ndef break_single_key_xor(ciphertext: bytes) -> (bytes, float, bytes):\n lowest_i_div = math.inf\n lowest_i_div_key = None\n lowest_i_div_plaintext = None\n max_counted = 0\n for key in range(256):\n plaintext = single_byte_xor(ciphertext, bytes([key]))\n (i_div, counted) = i_divergence_from_english(plaintext)\n if abs(i_div - lowest_i_div) < EPSILON:\n if counted > max_counted:\n lowest_i_div = i_div\n lowest_i_div_key = bytes([key])\n lowest_i_div_plaintext = plaintext\n max_counted = counted\n else:\n new_ascii = ascii_in_text(plaintext)\n old_ascii = ascii_in_text(lowest_i_div_plaintext)\n if new_ascii > old_ascii:\n lowest_i_div = i_div\n lowest_i_div_key = bytes([key])\n lowest_i_div_plaintext = plaintext\n max_counted = counted\n elif i_div < lowest_i_div:\n lowest_i_div = i_div\n lowest_i_div_key = bytes([key])\n lowest_i_div_plaintext = plaintext\n max_counted = counted\n return (lowest_i_div_plaintext, lowest_i_div, lowest_i_div_key)\n\ndef repeating_key_xor(plaintext: bytes, key: bytes) -> bytes:\n ciphertext = b''\n for i in range(len(plaintext)):\n ciphertext += bytes([plaintext[i] ^ key[i % len(key)]])\n return ciphertext\n\ndef binary_hamming_distance(a: bytes, b: bytes) -> int:\n assert len(a) == len(b)\n distance = 0\n for i in range(len(a)):\n # xor the bytes, then count the number of 1s in the result\n distance += bin(a[i] ^ b[i]).count(\"1\")\n return distance\n\ndef find_best_keysizes(ciphertext: bytes) -> [int]:\n keysize_to_distance = {}\n for keysize in range(2, 41):\n distance = 0\n\n # take all the chunks of size keysize possible from the ciphertext\n chunks = [ciphertext[j:j+keysize] for j in range(0, len(ciphertext), keysize)][:10]\n\n counted = 0\n for i in range(len(chunks)):\n for j in range(len(chunks)):\n if i != j and len(chunks[i]) == len(chunks[j]):\n distance += binary_hamming_distance(chunks[i], chunks[j])\n counted += 1\n \n if counted > 0:\n distance /= counted\n \n # normalize the distance by kesize\n distance /= keysize\n\n keysize_to_distance[keysize] = distance\n return sorted(keysize_to_distance, key=keysize_to_distance.get)[:1]\n\ndef find_key_for_keysize(ciphertext: bytes, keysize: int) -> bytes:\n blocks = {}\n key = b''\n for i in range(keysize):\n blocks[i] = b''\n for j in range(i, len(ciphertext), keysize):\n blocks[i] += bytes([ciphertext[j]])\n (_, _, key_byte) = break_single_key_xor(blocks[i])\n assert key_byte is not None\n assert len(key_byte) == 1\n key += key_byte\n return key\n\ndef break_repeating_key_xor(ciphertext: bytes) -> bytes:\n likely_keysizes = find_best_keysizes(ciphertext)\n lowest_i_div = math.inf\n best_plaintext = None\n best_key = None\n best_counted = 0\n ascii_in_best = 0\n for keysize in likely_keysizes:\n key = find_key_for_keysize(ciphertext, keysize)\n assert key is not None\n assert len(key) == keysize\n plaintext = repeating_key_xor(ciphertext, key)\n (i_div, counted_chars) = i_divergence_from_english(plaintext)\n if abs(i_div - lowest_i_div) < EPSILON:\n if counted_chars > best_counted:\n lowest_i_div = i_div\n best_plaintext = plaintext\n best_key = key\n best_counted = counted_chars\n ascii_in_best = ascii_in_text(plaintext)\n else:\n new_ascii = ascii_in_text(plaintext)\n if new_ascii > ascii_in_best:\n lowest_i_div = i_div\n best_plaintext = plaintext\n best_key = key\n best_counted = counted_chars\n ascii_in_best = new_ascii\n if i_div < lowest_i_div:\n lowest_i_div = i_div\n best_plaintext = plaintext\n best_key = key\n best_counted = counted_chars\n ascii_in_best = ascii_in_text(plaintext)\n return (best_plaintext, best_key)\n\ndef block_cipher_encrypt(plaintext: bytes, key: bytes, algorithm_name: Algorithm, mode_name: Mode, iv: bytes = b'') -> bytes:\n algorithm = None\n if algorithm_name == Algorithm.AES:\n algorithm = algorithms.AES(key)\n elif algorithm_name == Algorithm.TRIPLEDES:\n algorithm = algorithms.TripleDES(key)\n else:\n raise Exception(\"Invalid algorithm name\")\n mode = None\n if mode_name == Mode.ECB:\n mode = modes.ECB()\n elif mode_name == Mode.CBC:\n mode = modes.CBC(iv)\n elif mode_name == Mode.CFB:\n mode = modes.CFB(iv)\n elif mode_name == Mode.OFB:\n mode = modes.OFB(iv)\n else:\n raise Exception(\"Invalid mode name\")\n backend = default_backend()\n cipher = Cipher(\n algorithm,\n mode,\n backend=backend\n )\n encryptor = cipher.encryptor()\n return encryptor.update(plaintext) + encryptor.finalize()\n\ndef block_cipher_decrypt(ciphertext: bytes, key: bytes, algorithm_name: Algorithm, mode_name: Mode, iv: bytes = b'') -> bytes:\n algorithm = None\n if algorithm_name == Algorithm.AES:\n algorithm = algorithms.AES(key)\n elif algorithm_name == Algorithm.TRIPLEDES:\n algorithm = algorithms.TripleDES(key)\n else:\n raise Exception(\"Invalid algorithm name\")\n mode = None\n if mode_name == Mode.ECB:\n mode = modes.ECB()\n elif mode_name == Mode.CBC:\n mode = modes.CBC(iv)\n elif mode_name == Mode.CFB:\n mode = modes.CFB(iv)\n elif mode_name == Mode.PCBC:\n mode = modes.PCBC(iv)\n elif mode_name == Mode.OFB:\n mode = modes.OFB(iv)\n else:\n raise Exception(\"Invalid mode name\")\n backend = default_backend()\n cipher = Cipher(\n algorithm,\n mode,\n backend=backend\n )\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertext) + decryptor.finalize()\n\n# Detect any repetition, even when the block size is unknown\ndef detect_repetition(ciphertext: bytes) -> bool:\n if len(ciphertext) < 16:\n return False\n for blocksize in range(8, len(ciphertext) // 2):\n blocks = {}\n for i in range(0, len(ciphertext), blocksize):\n block = ciphertext[i:i+blocksize]\n if block in blocks:\n return True\n else:\n blocks[block] = True\n return False\n\n# Detect repetition when the block size is known\ndef detect_repetition_with_blocksize(ciphertext: bytes, blocksize: int) -> bool:\n if len(ciphertext) < blocksize:\n return False\n blocks = {}\n for i in range(0, len(ciphertext), blocksize):\n block = ciphertext[i:i+blocksize]\n if block in blocks:\n return True\n else:\n blocks[block] = True\n return False\n\ndef detect_128_bit_ecb(texts: List[bytes]) -> bytes:\n for text in texts:\n if detect_repetition(text):\n return text\n return None\n\ndef pkcs7_pad(data: bytes, blocksize: int) -> bytes:\n if len(data) % blocksize == 0:\n return data + bytes([blocksize] * blocksize)\n pad_length = blocksize - (len(data) % blocksize)\n return data + bytes([pad_length] * pad_length)\n\ndef pkcs7_validate(data: bytes) -> bool:\n pad_length = data[-1]\n if pad_length > len(data):\n return False\n for i in range(len(data) - pad_length, len(data)):\n if data[i] != pad_length:\n return False\n return True\n\ndef pkcs7_unpad(data: bytes) -> bytes:\n pad_length = data[-1]\n if pkcs7_validate(data):\n return data[:-pad_length]\n else:\n raise Exception(\"Invalid padding\")\n\ndef manual_ecb_encrypt(data: bytes, key: bytes, algorithm_name: Algorithm) -> bytes:\n ciphertext = b''\n algorithm = None\n if algorithm_name == Algorithm.AES:\n algorithm = algorithms.AES\n elif algorithm_name == Algorithm.TRIPLEDES:\n algorithm = algorithms.TripleDES\n else:\n raise Exception(\"Invalid algorithm name\")\n byte_block_size = algorithm.block_size // 8\n data = pkcs7_pad(data, byte_block_size)\n for i in range(0, len(data), byte_block_size):\n block = data[i:i+byte_block_size]\n ciphertext += block_cipher_encrypt(block, key, algorithm_name, Mode.ECB)\n return ciphertext\n\ndef manual_ecb_decrypt(data: bytes, key: bytes, algorithm_name: Algorithm) -> bytes:\n plaintext = b''\n algorithm = None\n if algorithm_name == Algorithm.AES:\n algorithm = algorithms.AES\n elif algorithm_name == Algorithm.TRIPLEDES:\n algorithm = algorithms.TripleDES\n else:\n raise Exception(\"Invalid algorithm name\")\n byte_block_size = algorithm.block_size // 8\n for i in range(0, len(data), byte_block_size):\n block = data[i:i+byte_block_size]\n if len(block) < byte_block_size:\n raise Exception(\"Invalid ciphertext length\")\n block = block_cipher_decrypt(block, key, algorithm_name, Mode.ECB)\n plaintext += block\n plaintext = pkcs7_unpad(plaintext)\n return plaintext\n\ndef manual_cbc_encrypt(data: bytes, key: bytes, algorithm_name: Algorithm, iv: bytes) -> bytes:\n ciphertext = b''\n previous_block = iv\n algorithm = None\n if algorithm_name == Algorithm.AES:\n algorithm = algorithms.AES\n elif algorithm_name == Algorithm.TRIPLEDES:\n algorithm = algorithms.TripleDES\n else:\n raise Exception(\"Invalid algorithm name\")\n byte_block_size = algorithm.block_size // 8\n data = pkcs7_pad(data, byte_block_size)\n for i in range(0, len(data), byte_block_size):\n block = data[i:i+byte_block_size]\n block = xor_bytes(block, previous_block)\n previous_block = block_cipher_encrypt(block, key, algorithm_name, Mode.ECB)\n ciphertext += previous_block\n return ciphertext\n\ndef manual_cbc_decrypt(data: bytes, key: bytes, algorithm_name: Algorithm, iv: bytes) -> bytes:\n plaintext = b''\n previous_block = iv\n algorithm = None\n if algorithm_name == Algorithm.AES:\n algorithm = algorithms.AES\n elif algorithm_name == Algorithm.TRIPLEDES:\n algorithm = algorithms.TripleDES\n else:\n raise Exception(\"Invalid algorithm name\")\n byte_block_size = algorithm.block_size // 8\n for i in range(0, len(data), byte_block_size):\n block = data[i:i+byte_block_size]\n block = block_cipher_decrypt(block, key, algorithm_name, Mode.ECB)\n if len(block) < byte_block_size:\n raise Exception(\"Invalid ciphertext length\")\n block = xor_bytes(block, previous_block)\n previous_block = data[i:i+byte_block_size]\n plaintext += block\n plaintext = pkcs7_unpad(plaintext)\n return plaintext\n\ndef random_aes_key() -> bytes:\n return os.urandom(16)\n\ndef random_aes_encryption_oracle(input: bytes) -> (bytes, Mode):\n key = random_aes_key()\n prefix_count = random.randint(5, 10)\n suffix_count = random.randint(5, 10)\n prefix = os.urandom(prefix_count)\n suffix = os.urandom(suffix_count)\n plaintext = prefix + input + suffix\n plaintext = pkcs7_pad(plaintext, algorithms.AES.block_size // 8)\n if random.randint(0, 1) == 0:\n return (manual_ecb_encrypt(plaintext, key, Algorithm.AES), Mode.ECB)\n else:\n return (manual_cbc_encrypt(plaintext, key, Algorithm.AES, os.urandom(16)), Mode.CBC)\n\n# Detect ECB or CBC when the block size is unknown\ndef detect_ecb_or_cbc(ciphertext) -> Mode:\n if detect_repetition(ciphertext):\n return Mode.ECB\n else:\n return Mode.CBC\n\n# Detect ECB or CBC when the block size is known\ndef detect_ecb_or_cbc_with_blocksize(ciphertext, blocksize) -> Mode:\n if detect_repetition_with_blocksize(ciphertext, blocksize):\n return Mode.ECB\n else:\n return Mode.CBC\n\n# The random but constant key used in challenge 12 and later 14\nKEY = random_aes_key()\n\n# AES in ECB mode with a random but constant suffix and key\ndef reused_key_aes_encryption_oracle(input: bytes) -> bytes:\n suffix = base64.b64decode(b'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK')\n full_plaintext = input + suffix\n result = manual_ecb_encrypt(full_plaintext, KEY, Algorithm.AES)\n return result\n\n# The class used in challenge 13\nclass Authenticator:\n def __init__(self):\n self.key = random_aes_key()\n\n @staticmethod\n def parse_cookie(cookie: bytes) -> dict:\n result = {}\n for pair in cookie.split(b'&'):\n (key, value) = pair.split(b'=')\n result[key] = value\n return result\n\n @staticmethod\n def profile_for(email: bytes) -> bytes:\n if b'&' in email or b'=' in email:\n raise Exception(\"Invalid email\")\n return b'email=' + email + b'&uid=10&role=user'\n\n def encrypt_profile_for(self, email: bytes) -> bytes:\n return manual_ecb_encrypt(Authenticator.profile_for(email), self.key, Algorithm.AES)\n\n def decrypt_profile(self, ciphertext: bytes) -> dict:\n return Authenticator.parse_cookie(manual_ecb_decrypt(ciphertext, self.key, Algorithm.AES))\n\n# The random but constant prefix used in challenge 14\nPREFIX = os.urandom(random.randint(10, 3 * algorithms.AES.block_size // 8))\n\n# AES in ECB mode with a random but constant prefix, suffix and key\n# Reuses the function from challenge 12\ndef random_prefix_ecb_oracle(input: bytes) -> bytes:\n plaintext = PREFIX + input\n return reused_key_aes_encryption_oracle(plaintext)\n\n# Detect the block size of a block cipher when a constant prefix and/or suffix is pre/appended to the plaintext\ndef detect_block_size(encryptor: Callable[[bytes], bytes]) -> (bytes, int):\n initial_output = encryptor(b'')\n initial_output_size = len(initial_output)\n added_bytes = 1\n output_size = 0\n block_size = 0\n while True:\n output_size = len(encryptor(b'A' * added_bytes))\n if output_size > initial_output_size:\n block_size = output_size - initial_output_size\n break\n added_bytes += 1\n return (block_size, added_bytes)\n\n# Detect the length of the prefix when a constant prefix is prepended to the plaintext and ECB is used\ndef detect_prefix_length(encryptor: Callable[[bytes], bytes]) -> int:\n initial_output = encryptor(b'')\n initial_output_size = len(initial_output)\n added_bytes = 1\n output_size = 0\n prefix_length = -1\n block_size = 0\n while True:\n output_size = len(encryptor(b'A' * added_bytes))\n if output_size > initial_output_size:\n block_size = output_size - initial_output_size\n break\n added_bytes += 1\n assert added_bytes <= block_size\n length_for_repetition = 2 * block_size\n while not detect_repetition_with_blocksize(encryptor(b'A' * length_for_repetition), block_size):\n length_for_repetition += 1\n ciphertext = encryptor(b'A' * length_for_repetition)\n missing_bytes = length_for_repetition - 2 * block_size\n assert detect_repetition_with_blocksize(ciphertext, block_size)\n blocks = []\n for i in range(0, len(ciphertext), block_size):\n blocks.append(ciphertext[i:i+block_size])\n for i in range(1, len(blocks)):\n if blocks[i] == blocks[i-1]:\n prefix_length = (i - 1) * block_size - missing_bytes\n break\n assert prefix_length >= 0\n return prefix_length\n\ndef quote_out_metacharacters(s: str) -> str:\n result = ''\n # Replace ; with %3B and = with %3D\n for c in s:\n if c == ';':\n result += '%3B'\n elif c == '=':\n result += '%3D'\n else:\n result += c\n return result\n\n# The first function for challenge 16\ndef encrypt_userdata(userdata: str) -> bytes:\n userdata = quote_out_metacharacters(userdata)\n prefix = b'comment1=cooking%20MCs;userdata='\n suffix = b';comment2=%20like%20a%20pound%20of%20bacon'\n plaintext = prefix + userdata.encode('ascii') + suffix\n return manual_cbc_encrypt(plaintext, KEY, Algorithm.AES, os.urandom(16))\n\n# The second function for challenge 16\ndef decrypt_userdata(ciphertext: bytes) -> bool:\n plaintext = manual_cbc_decrypt(ciphertext, KEY, Algorithm.AES, os.urandom(16))\n return b';admin=true;' in plaintext", "repo_name": "bencemali/cryptopals", "sub_path": "cryptolib.py", "file_name": "cryptolib.py", "file_ext": "py", "file_size_in_byte": 19216, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "enum.Enum", "line_number": 10, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 17, "usage_type": "name"}, {"api_name": "math.inf", "line_number": 62, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 80, "usage_type": "call"}, {"api_name": "math.inf", "line_number": 86, "usage_type": "attribute"}, {"api_name": "math.inf", "line_number": 99, "usage_type": "attribute"}, {"api_name": "math.inf", "line_number": 180, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 217, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 217, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.TripleDES", "line_number": 219, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 219, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.ECB", "line_number": 224, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 224, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.CBC", "line_number": 226, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 226, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.CFB", "line_number": 228, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 228, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.OFB", "line_number": 230, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 230, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 233, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.Cipher", "line_number": 234, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 245, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 245, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.TripleDES", "line_number": 247, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 247, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.ECB", "line_number": 252, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 252, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.CBC", "line_number": 254, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 254, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.CFB", "line_number": 256, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 256, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.PCBC", "line_number": 258, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 258, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.OFB", "line_number": 260, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 260, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 263, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.Cipher", "line_number": 264, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 299, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 331, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 331, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.TripleDES", "line_number": 333, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 333, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 347, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 347, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.TripleDES", "line_number": 349, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 349, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 367, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 367, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.TripleDES", "line_number": 369, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 369, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 386, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 386, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.TripleDES", "line_number": 388, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 388, "usage_type": "name"}, {"api_name": "os.urandom", "line_number": 404, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 408, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 409, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 410, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 411, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 413, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 413, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 414, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 417, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 438, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 469, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 469, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 469, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 469, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 478, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 493, "usage_type": "name"}, {"api_name": "os.urandom", "line_number": 541, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 545, "usage_type": "call"}]} +{"seq_id": "30696825020", "text": "\"\"\"\nFunction that parse markdown table to Apache Spark (PySpark) DataFrame.\n\nFunctions provided:\n* spark_df\n\"\"\"\nfrom pyspark.sql import SparkSession, DataFrame\nfrom pyspark.sql.types import (\n IntegerType,\n FloatType,\n DoubleType,\n LongType,\n ShortType,\n TimestampType,\n StringType,\n ArrayType,\n MapType,\n StructType,\n StructField\n)\n\nfrom typing import List, Union, Any\n\nfrom markdown_frames.utils import (\n make_table,\n get_column_names_types,\n get_data_from_table,\n get_python_type\n)\nfrom markdown_frames.type_definitions import (\n STRING,\n INTEGER,\n BIG_INTEGER,\n SMALL_INTEGER,\n FLOAT,\n DOUBLE,\n TIMESTAMP,\n)\n\n\ndef spark_df(markdown_table: str, spark: SparkSession) -> DataFrame:\n \"\"\"\n Given SparkSessin and markdown representation of your data,\n function returns a Spark DataFrame with specified types.\n :param markdown_table: markdown representation of input data.\n :param spark: SparkSession\n :return: DataFrame with data and schema specified.\n \"\"\"\n table = make_table(markdown_table)\n column_names, types = get_column_names_types(table)\n table_data = get_data_from_table(table)\n output_table = []\n for row in table_data:\n output_table.append(tuple(map(get_python_type, zip(row, types))))\n spark_struct = _get_spark_struct(column_names, types)\n\n return spark.createDataFrame(output_table, spark_struct)\n\ndef _get_spark_struct(column_names: List[str],\n column_types: List[str]) -> StructType:\n \"\"\"\n Given column names nad column tapes,\n produces struct type for Spark DataFrame.\n :param column_names: column names in list\n :param columns_types: column types in list\n :returns: StructType.\n \"\"\"\n def struct_field(name_type):\n return StructField(name_type[0], _types_mapping(name_type[1]))\n\n spark_structs = map(struct_field, zip(column_names, column_types))\n\n return StructType(list(spark_structs))\n\ndef _types_mapping(column_type: str) -> Any:\n if column_type in INTEGER:\n return IntegerType()\n elif column_type in FLOAT:\n return FloatType()\n elif column_type in DOUBLE:\n return DoubleType()\n elif column_type in TIMESTAMP:\n return TimestampType()\n elif column_type in BIG_INTEGER:\n return LongType()\n elif column_type in SMALL_INTEGER:\n return ShortType()\n elif column_type in STRING:\n return StringType()\n elif _is_map_type(column_type):\n return _map_type(column_type)\n elif _is_array_type(column_type):\n return _array_type(column_type)\n\ndef _is_map_type(column_type: str) -> bool:\n \"\"\"\n Given column_type string returns boolean value\n if given string is for MapType.\n :param column_type: string description of \n column type\n :return: boolean - MapType or not.\n \"\"\"\n if column_type.find(\"map<\") == -1:\n return False\n else:\n return True\n\ndef _map_type(column_type: str) -> MapType:\n \"\"\"\n Given column_type string returns MapType\n with the correct (key, value) types.\n :param column_type: string description of\n column_type.\n :returns: MapType\n \"\"\"\n key, value = list(map(lambda x: x.strip(), column_type[4:-1].split(',')))\n\n return MapType(_types_mapping(key), _types_mapping(value))\n\ndef _is_array_type(column_type: str) -> bool:\n \"\"\"\n Given column_type string returns boolean value\n if given string is for ArrayType.\n :param column_type: string description of \n column type\n :return: boolean - ArrayTaype or not.\n \"\"\"\n if column_type.find(\"array<\") == -1:\n return False\n else:\n return True\n\ndef _array_type(column_type: str) -> ArrayType:\n \"\"\"\n Given column_type string returns ArrayType\n with the correct inside item type.\n :param column_type: string description of\n column_type.\n :returns: ArrayType\n \"\"\"\n inside = column_type[6:-1].strip()\n\n return ArrayType(_types_mapping(inside))\n", "repo_name": "Va1da2/markdown-frames", "sub_path": "markdown_frames/spark_dataframe.py", "file_name": "spark_dataframe.py", "file_ext": "py", "file_size_in_byte": 4010, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pyspark.sql.SparkSession", "line_number": 41, "usage_type": "name"}, {"api_name": "markdown_frames.utils.make_table", "line_number": 49, "usage_type": "call"}, {"api_name": "markdown_frames.utils.get_column_names_types", "line_number": 50, "usage_type": "call"}, {"api_name": "markdown_frames.utils.get_data_from_table", "line_number": 51, "usage_type": "call"}, {"api_name": "markdown_frames.utils.get_python_type", "line_number": 54, "usage_type": "argument"}, {"api_name": "pyspark.sql.DataFrame", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 60, "usage_type": "name"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 69, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 73, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 60, "usage_type": "name"}, {"api_name": "markdown_frames.type_definitions.INTEGER", "line_number": 76, "usage_type": "name"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 77, "usage_type": "call"}, {"api_name": "markdown_frames.type_definitions.FLOAT", "line_number": 78, "usage_type": "name"}, {"api_name": "pyspark.sql.types.FloatType", "line_number": 79, "usage_type": "call"}, {"api_name": "markdown_frames.type_definitions.DOUBLE", "line_number": 80, "usage_type": "name"}, {"api_name": "pyspark.sql.types.DoubleType", "line_number": 81, "usage_type": "call"}, {"api_name": "markdown_frames.type_definitions.TIMESTAMP", "line_number": 82, "usage_type": "name"}, {"api_name": "pyspark.sql.types.TimestampType", "line_number": 83, "usage_type": "call"}, {"api_name": "markdown_frames.type_definitions.BIG_INTEGER", "line_number": 84, "usage_type": "name"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 85, "usage_type": "call"}, {"api_name": "markdown_frames.type_definitions.SMALL_INTEGER", "line_number": 86, "usage_type": "name"}, {"api_name": "pyspark.sql.types.ShortType", "line_number": 87, "usage_type": "call"}, {"api_name": "markdown_frames.type_definitions.STRING", "line_number": 88, "usage_type": "name"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 89, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 75, "usage_type": "name"}, {"api_name": "pyspark.sql.types.MapType", "line_number": 118, "usage_type": "call"}, {"api_name": "pyspark.sql.types.MapType", "line_number": 108, "usage_type": "name"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 143, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "71287322612", "text": "import collections\nimport dataclasses\nimport datetime\nimport json\nimport logging\nimport sys\nimport typing as t\n\nimport requests\n\nfrom .ping import Ping\n\nlogger = logging.getLogger(__name__)\n\nGoalName = str\n\nclass BeeminderClient:\n def __init__(self, user: str, token: str):\n self.user = user\n self.token = token\n\n def post_pings(self, goal: GoalName, ping_values: t.Mapping[Ping, float]) -> requests.Response:\n \"\"\"Upload a bunch of pings and values to a particular Beeminder goal.\"\"\"\n payload = [\n {\n 'timestamp': ping.unix_time,\n 'value': value,\n 'comment': f'{ping.unix_time} {\" \".join(sorted(ping.tags))} [{ping.comment}]',\n 'requestid': str(ping.unix_time),\n }\n for ping, value in ping_values.items()\n if value != 0\n ]\n jpayload = json.dumps(payload)\n logger.info(f'sending request to update {self.user}/{goal} with {len(payload)} datapoints ({len(jpayload)} chars)')\n logger.debug(f'payload: {jpayload}')\n return requests.post(\n url=f'https://www.beeminder.com/api/v1/users/{self.user}/goals/{goal}/datapoints/create_all.json',\n data={\n 'auth_token': self.token,\n 'datapoints': jpayload,\n }\n )\n\n\n def sync(\n self,\n pings: t.Iterable[Ping],\n scorers: t.Mapping[GoalName, t.Callable[[Ping], float]],\n ) -> None:\n \"\"\"Compute the values for all pings for all goals, and upload the data, overwriting any previous data for the given pings.\"\"\"\n for goal, score in scorers.items():\n # resp_json = requests.get(f'https://www.beeminder.com/api/v1/users/{beeminder_user}/goals/{goal}/datapoints.json?auth_token={beeminder_token}').json()\n # datapoints_by_time = {\n # dp['timestamp']: dp\n # for dp in resp_json\n # }\n resp = self.post_pings(\n goal=goal,\n ping_values={\n ping: score(ping)\n for ping in pings\n if ('OFF' not in ping.tags)\n },\n )\n if resp.status_code != 200:\n logger.warn(f'non-200 status code for {goal}: {resp.status_code}')\n logger.warn(f'details: {resp.text!r}')\n else:\n logger.debug(f'response: {resp.text!r}')\n\ndef load_scorers(python_code: str) -> t.Mapping[GoalName, t.Callable[[Ping], float]]:\n \"\"\"Parse a `{goal: (ping -> value)}` mapping from Python source code.\n\n The given `python_code` can be arbitrary Python code, as long as it defines a variable `SCORERS` of the right type.\n \"\"\"\n loc: t.Mapping[str, t.Any]\n glob: t.Mapping[str, t.Any]\n loc = glob = {}\n exec(python_code, glob, loc)\n return loc['SCORERS']\n", "repo_name": "speezepearson/beetime", "sub_path": "beetime/tagtime_beeminder_sync.py", "file_name": "tagtime_beeminder_sync.py", "file_ext": "py", "file_size_in_byte": 2581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "typing.Mapping", "line_number": 22, "usage_type": "attribute"}, {"api_name": "ping.Ping", "line_number": 22, "usage_type": "name"}, {"api_name": "ping.unix_time", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ping.unix_time", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ping.tags", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ping.comment", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ping.unix_time", "line_number": 29, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.Response", "line_number": 22, "usage_type": "attribute"}, {"api_name": "typing.Iterable", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ping.Ping", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 49, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 49, "usage_type": "attribute"}, {"api_name": "ping.Ping", "line_number": 49, "usage_type": "name"}, {"api_name": "ping.tags", "line_number": 63, "usage_type": "attribute"}, {"api_name": "typing.Mapping", "line_number": 77, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 77, "usage_type": "attribute"}, {"api_name": "typing.Mapping", "line_number": 78, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 78, "usage_type": "attribute"}, {"api_name": "typing.Mapping", "line_number": 72, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ping.Ping", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "2375305456", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras.applications import vgg16\n\n\n# In[2]:\n\n\nfrom keras.layers import Dense \n\n\n# In[3]:\n\n\nmodel = vgg16.VGG16(weights= 'imagenet' , include_top = False ,input_shape= (224,224,3))\n\n\n\n\n\n# In[4]:\n\n\nm=0\nfor i in model.layers:\n m = m+1\nprint(m)\n\n\n# In[5]:\n\n\nmodel.layers\n\n\n# In[ ]:\n\n\n\n\n\n# In[6]:\n\n\nmodel.layers[0].trainable = False\n\n\n# In[7]:\n\n\nmodel.layers[0]\n\n\n# In[8]:\n\n\nmodel.layers[0].trainable\n\n\n# In[9]:\n\n\nmodel.layers[1].trainable\n\n\n# In[10]:\n\n\nfor i in model.layers:\n i.trainable = False\nfor (j,layer) in enumerate (model.layers):\n print(str(i)+\" \" + layer.__class__.__name__,layer.trainable)\n\n\n# In[11]:\n\n\nmodel.output\n\n\n# In[12]:\n\n\nmodel.summary()\n\n\n# In[13]:\n\n\nfrom keras.layers import GlobalAveragePooling2D,Conv2D\n\n\n# In[14]:\n\n\ntop_model = model.output\n#top_model = Conv2D()\ntop_model = GlobalAveragePooling2D()(top_model)\ntop_model = Dense(1024,activation = 'relu')(top_model)\n\n\n# In[ ]:\n\n\n\n\n\n# In[15]:\n\n\ntop_model = Dense(1024 , activation = 'relu')(top_model)\ntop_model = Dense(512 , activation = 'relu')(top_model)\ntop_model = Dense(256 , activation = 'relu')(top_model)\ntop_model = Dense(3,activation = 'softmax')(top_model)\n\n\n# \n\n# In[16]:\n\n\nfrom keras.models import Sequential ,Model\nfrom keras.layers import Flatten,MaxPooling2D ,Conv2D\n\n\n# In[17]:\n\n\nmodel = Model(inputs = model.input , outputs = top_model)\n\n\n# In[18]:\n\n\nprint(model.summary())\n\n\n# In[19]:\n\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\n# train_datagen = ImageDataGenerator(\n# rescale = 1./255,\n# rotation_range = 45,\n# width_shift_range = 0.3,\n# height_shift_range = 0.3,\n# horizontal_flip = True,\n# fill_mode = 'nearest')\n# validation_datgen = ImageDataGenerator(rescale = 1./255)\n# batch_size = 32\n# train_generator = train_datgen.flow_from_directory(\n# 'facetrain'\n# target_size = (224,224,3)\n# batch_size = batch_size\n# class_mode = 'categorical')\n# validation_generator\n\n# In[20]:\n\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n rotation_range = 45,\n width_shift_range = 0.3,\n zoom_range=0.2,\n shear_range = 0.2,\n height_shift_range = 0.3,\n horizontal_flip = True,\n fill_mode = 'nearest') \nvalidation_datagen = ImageDataGenerator(rescale = 1./255)\nbatch_size = 32 \ntrain_generator = train_datagen.flow_from_directory('mldata/facetrain',\n target_size = (224,224),\n batch_size = batch_size,\n class_mode = 'categorical')\nvalidation_generator = validation_datagen.flow_from_directory('/mldata/New folder (2)',\n target_size = (224,224),\n batch_size = batch_size,\n class_mode = 'categorical')\n\n\n# In[21]:\n\n\n\nfrom keras.callbacks import ModelCheckpoint , EarlyStopping\n\n\n# In[22]:\n\n\nCheckpoint =ModelCheckpoint('face_recognition' ,\n monitor ='val_loss',\n verbose=1,\n mode = 'min',\n save_best_only=True)\nStopping = EarlyStopping( monitor='val_loss',\n min_delta=0,\n patience=3,\n verbose=1,\n restore_best_weights=True,)\ncallback = [Checkpoint , Stopping]\nmodel.compile(optimizer = 'adam' , loss ='categorical_crossentropy' , metrics=['accuracy'])\nepoch = 3\nbatch_size = 16\nhistory = model.fit_generator(train_generator,\n steps_per_epoch = 337//batch_size, \n epochs= epoch,\n callbacks = callback,\n validation_data = validation_generator,\n validation_steps = 17//batch_size)\n \n\n\n# In[23]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "sonaa-gupta/devops2", "sub_path": "mltask2/sonaa.py", "file_name": "sonaa.py", "file_ext": "py", "file_size_in_byte": 4515, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "keras.applications.vgg16.VGG16", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.applications.vgg16", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.layers.GlobalAveragePooling2D", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 103, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 133, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 167, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 175, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 197, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 202, "usage_type": "call"}]} +{"seq_id": "23345022739", "text": "import uuid\nfrom cloudinary.models import CloudinaryField\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom django_dbq.models import Job\nfrom project.accounts.models import User\n\n\nclass Tag(models.Model):\n\n class Meta:\n verbose_name_plural = \"Tags\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created = models.DateTimeField(auto_now_add=True, db_index=True)\n modified = models.DateTimeField(auto_now=True)\n\n name = models.SlugField(unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n\n class Meta:\n ordering = ['-created']\n verbose_name_plural = \"Entries\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created = models.DateTimeField(auto_now_add=True, db_index=True)\n modified = models.DateTimeField(auto_now=True)\n\n name = models.CharField(max_length=255, unique=True)\n slug = models.SlugField(unique=True)\n short_description = models.CharField(max_length=255)\n author = models.ForeignKey(User, related_name=\"entries\")\n tags = models.ManyToManyField(Tag, related_name=\"tags\", blank=True)\n\n cover = CloudinaryField(null=True, blank=True)\n website_url = models.URLField(null=True, blank=True)\n repo_url = models.URLField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n\n is_approved = models.BooleanField(default=False)\n is_featured = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def _get_cover_link(self, url):\n if not url:\n return None\n\n return u'' % (url.url)\n\n def thumbnail_cover(self):\n return self._get_cover_link(self.cover)\n thumbnail_cover.short_description = 'Cover Thumbnail'\n thumbnail_cover.allow_tags = True\n\n def send_user_approval_email(self):\n \"\"\" Notify the user that the submission got approved \"\"\"\n workspace = {\n \"subject\": \"Your submission to Built with Electron got approved.\",\n \"recipient_list\": [self.author.email],\n \"mail_params\": {\n 'username': self.author.username,\n 'entry_name': self.name,\n 'entry_slug': self.slug\n },\n \"plain_template\": \"emails/submission_approval.txt\",\n \"html_template\": \"emails/submission_approval.html\"\n }\n\n Job.objects.create(name='send_email', workspace=workspace)\n\n def send_admins_new_entry_email(self):\n \"\"\" Notify admins that a new submission awaits approval \"\"\"\n admins = User.objects.filter(is_active=True, is_admin=True)\n admins_emails = [user.email for user in admins]\n\n workspace = {\n \"subject\": \"There is a new submission at Built with Electron.\",\n \"recipient_list\": admins_emails,\n \"mail_params\": {\n 'entry_name': self.name\n },\n \"plain_template\": \"emails/admin_new_submission.txt\",\n \"html_template\": \"emails/admin_new_submission.html\"\n }\n\n Job.objects.create(name='send_email', workspace=workspace)\n\n def save(self, *args, **kwargs):\n \"\"\" If `is_approved` changed and is True, email the user \"\"\"\n try:\n prev_state = Entry.objects.get(id=self.id).is_approved\n new_state = self.is_approved\n\n if (prev_state != new_state) and new_state:\n self.send_user_approval_email()\n\n except self.DoesNotExist:\n pass\n\n # Auto-populate the slugfield\n if not self.slug:\n self.slug = slugify(self.name)\n\n super(Entry, self).save(*args, **kwargs)\n", "repo_name": "manosim/builtwithelectron", "sub_path": "project/directory/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3728, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.db.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "project.accounts.models.User", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "cloudinary.models.CloudinaryField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models.URLField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django_dbq.models.Job.objects.create", "line_number": 76, "usage_type": "call"}, {"api_name": "django_dbq.models.Job.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django_dbq.models.Job", "line_number": 76, "usage_type": "name"}, {"api_name": "project.accounts.models.User.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "project.accounts.models.User.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "project.accounts.models.User", "line_number": 80, "usage_type": "name"}, {"api_name": "django_dbq.models.Job.objects.create", "line_number": 93, "usage_type": "call"}, {"api_name": "django_dbq.models.Job.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django_dbq.models.Job", "line_number": 93, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "10159296405", "text": "import csv\nimport doctest\n\nfrom typing import Any\n\n\nclass FindNumber:\n \"\"\"\n >>> array = FindNumber.from_csv(\"MOCK_DATA_sorted.csv\")\n >>> FindNumber.from_list(array, \"99996\")\n 999\n >>> array = FindNumber.from_csv(\"MOCK_DATA.csv\")\n >>> FindNumber.from_list(array, \"99996\")\n 972\n \"\"\"\n #>>> FindNumber.from_sorted(array, \"99996\")\n #972\n #\"\"\"\n @staticmethod\n def from_csv(file: str) -> list:\n with open(file, mode='r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n contents = []\n line_count = 0\n for row in csv_reader:\n contents.extend(row)\n if line_count == 0:\n line_count += 1\n line_count += 1\n return contents\n\n @staticmethod\n def from_list(array: list, element: Any) -> int:\n for i in range(len(array)):\n if array[i] == element:\n return i\n return -1\n\n @staticmethod\n def from_sorted(array: list, element: Any) -> int:\n n = int(len(array) / 2)\n print(f\"First n: {n}\")\n while array[n] != element:\n print(n)\n print(f\"Array len: {len(array)}\")\n if array[n] == element:\n break\n elif array[n] > element:\n array = array[:n]\n else:\n array = array[n:]\n if len(array) <= 2:\n n = 0\n else:\n n = int(len(array) / 2)\n print(n)\n print(f\"Array len: {len(array)}\")\n print(array)\n return n\n\n\n#nums = FindNumber.from_csv(\"MOCK_DATA.csv\")\n#print(FindNumber.from_sorted(nums, \"99996\"))\n\n\ndoctest.run_docstring_examples(FindNumber, globals())\n\n", "repo_name": "lukaqueres/Algorytms", "sub_path": "findNumber.py", "file_name": "findNumber.py", "file_ext": "py", "file_size_in_byte": 1765, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "csv.reader", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 40, "usage_type": "name"}, {"api_name": "doctest.run_docstring_examples", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "39064438638", "text": "\"\"\"\nPurpose: Performs custom serilization of all post related data.\n\"\"\"\n\nfrom blog.models.blog import Post\n\nfrom django.contrib.auth.models import User\n\nclass PostCustomSerializer():\n \"\"\"Class generates a Post model instance via \n raw response object data.\n \"\"\"\n\n def __init__(self, data):\n self.data = data\n\n def generate(self, blog_heading):\n \"\"\"Returns a Post model instance.\"\"\"\n\n username = self.data['user']\n user = User.objects.get(username=username)\n \n blog_post = Post(\n post_title=self.data['post_title'],\n article=self.data['article'],\n heading=blog_heading,\n slug=self.data['slug'],\n status=self.data['status'],\n post_image=self.data['preview_image'],\n user=user\n )\n\n return blog_post\n", "repo_name": "AcePro-Engineer/JBBlogsv2", "sub_path": "ENV/root/backend/services/utils/customserilizers/post_custom_serilizer.py", "file_name": "post_custom_serilizer.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "blog.models.blog.Post", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "17157557544", "text": "from datetime import datetime\n\n### FECHA Y HORA ACTUALES ###\ntoday = datetime.today()\nprint(f'{today:%d de %B de %Y}, {today:%H:%m}')\n\n### COMPROBACIÓN DE SEGUNDO PERTENECIENTE A UNA LISTA ###\nsegundos_magicos = [2, 5, 8, 13, 17, 19, 21, 23, 28, 32, 36, 38, 40, 42, 47, 49, 51, 52, 53, 56, 58]\nsegundo_actual = datetime.today().second\nif segundo_actual in segundos_magicos:\n print(f'\\t*** {segundo_actual} ***\\n\\t¡Este es un segundo mágico!')\nelse:\n print('\\t' + str(segundo_actual) + ' - Sigue jugando, seguro que encuentras tu segundo mágico.')\n", "repo_name": "CarlosTechTalents/curso-python-i", "sub_path": "03-fecha_y_hora.py", "file_name": "03-fecha_y_hora.py", "file_ext": "py", "file_size_in_byte": 557, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "datetime.datetime.today", "line_number": 4, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 4, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "6632547870", "text": "import typing\r\nfrom collections import deque\r\n\r\nK = typing.TypeVar(\"K\")\r\nV = typing.TypeVar(\"V\")\r\n\r\n\r\nclass Pair(typing.NamedTuple):\r\n key: K\r\n value: V\r\n\r\n\r\nclass HashMap(typing.Generic[K, V]):\r\n\r\n def __init__(self, capacity: typing.Optional[int] = 50, load_factor: typing.Optional[float] = 0.75) -> None:\r\n if capacity < 1:\r\n raise ValueError(\"Capacity must be a positive integer!\")\r\n\r\n self._capacity = capacity\r\n self._load_factor = load_factor\r\n self._load_factor_multiplier = 2\r\n self._buckets: list[deque[Pair]] = [deque() for _ in range(self._capacity)]\r\n\r\n @classmethod\r\n def from_dict(cls, dictionary: dict[K, V], capacity: typing.Optional[int] = None) -> \"HashMap\":\r\n hash_table = cls(capacity or len(dictionary) * 10)\r\n for key, value in dictionary.items():\r\n hash_table[key] = value\r\n return hash_table\r\n\r\n @classmethod\r\n def from_hashtable(cls, hash_table: \"HashMap[K, V]\", capacity: typing.Optional[int] = 50) -> \"HashMap\":\r\n ht = cls(capacity)\r\n for key, value in hash_table.pairs:\r\n ht[key] = value\r\n return ht\r\n\r\n @property\r\n def pairs(self) -> list[Pair]:\r\n return [p for d in self._buckets for p in d]\r\n\r\n @property\r\n def keys(self) -> set[K]:\r\n return {p.key for p in self.pairs}\r\n\r\n @property\r\n def values(self) -> list[V]:\r\n return [p.value for p in self.pairs]\r\n\r\n def get(self, key: K, default: typing.Optional[V] = None) -> V:\r\n try:\r\n value = self[key].value\r\n except KeyError:\r\n value = default\r\n return value\r\n\r\n def _hash(self, item: K) -> int:\r\n return hash(item) % self._capacity\r\n\r\n def _get_bucket(self, key: K) -> deque[Pair]:\r\n return self._buckets[self._hash(key)]\r\n\r\n def _load_factor_exceeded(self) -> bool:\r\n return (len(self) / self._capacity) >= self._load_factor\r\n\r\n def _resize_and_rehash(self) -> None:\r\n self._capacity *= self._load_factor_multiplier\r\n self._buckets = self.from_hashtable(self, self._capacity)._buckets\r\n\r\n def __len__(self) -> int:\r\n return sum(len(d) for d in self._buckets)\r\n\r\n def __str__(self):\r\n pairs = []\r\n for key, value in self.pairs:\r\n pairs.append(f\"{key!r}: {value!r}\")\r\n return \"{\" + \", \".join(pairs) + \"}\"\r\n\r\n def __repr__(self):\r\n cls = self.__class__.__name__\r\n return f\"{cls}.from_dict({str(self)})\"\r\n\r\n def __iter__(self) -> typing.Generator[K, None, None]:\r\n yield from self.keys\r\n\r\n def __getitem__(self, key: K) -> V:\r\n bucket = self._get_bucket(key)\r\n for pair in bucket:\r\n if pair.key == key:\r\n return pair.value\r\n raise KeyError(key)\r\n\r\n def __setitem__(self, key: K, value: V) -> None:\r\n if self._load_factor_exceeded():\r\n self._resize_and_rehash()\r\n\r\n bucket = self._get_bucket(key)\r\n for index, pair in enumerate(bucket):\r\n if pair.key == key:\r\n bucket[index] = Pair(key, value)\r\n break\r\n else:\r\n bucket.append(Pair(key, value))\r\n\r\n def __delitem__(self, key: K) -> None:\r\n bucket = self._get_bucket(key)\r\n for index, pair in enumerate(bucket):\r\n if pair.key == key:\r\n del bucket[index]\r\n break\r\n else:\r\n raise KeyError(key)\r\n\r\n def __contains__(self, key: K) -> bool:\r\n try:\r\n self[key]\r\n except KeyError:\r\n return False\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n h: HashMap[str, int] = HashMap()\r\n h[\"hey\"] = 23\r\n h[\"five\"] = 5\r\n h[\"easy\"] = 0\r\n h[\"difficult\"] = 100\r\n print(h)\r\n", "repo_name": "danielafriyie/algos", "sub_path": "hashtables/python/hashmap.py", "file_name": "hashmap.py", "file_ext": "py", "file_size_in_byte": 3797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.TypeVar", "line_number": 4, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 5, "usage_type": "call"}, {"api_name": "typing.NamedTuple", "line_number": 8, "usage_type": "attribute"}, {"api_name": "typing.Generic", "line_number": 13, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 15, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 50, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "43038028717", "text": "from collections import deque\nfrom collections import namedtuple\n\nNode = namedtuple('Node', ['r', 'c'])\nConfig = namedtuple('Config', ['h', 'w', 'town'])\n\n\ndef main():\n INF = 10**10\n h, w = map(int, map(int, input().split()))\n town = []\n for _ in range(h):\n town.append(input())\n config = Config(h, w, town)\n # 01BFS\n dist = [[INF]*w for _ in range(h)]\n que = deque()\n start_node = Node(r=0, c=0)\n dist[0][0] = 0\n que.appendleft(start_node)\n while que:\n pre_node = que.popleft()\n if pre_node.r == h-1 and pre_node.c == w-1:\n break\n # COST 0\n neighbor = cost0_neighbor_of(pre_node, config)\n for cur_node in neighbor:\n if dist[cur_node.r][cur_node.c] <= dist[pre_node.r][pre_node.c]:\n continue\n dist[cur_node.r][cur_node.c] = dist[pre_node.r][pre_node.c]\n que.appendleft(cur_node)\n\n # COST1\n neighbor = cost1_neighbor_of(pre_node, config)\n for cur_node in neighbor:\n if dist[cur_node.r][cur_node.c] <= dist[pre_node.r][pre_node.c]+1:\n continue\n dist[cur_node.r][cur_node.c] = dist[pre_node.r][pre_node.c]+1\n que.append(cur_node)\n\n print(dist[-1][-1])\n\n\ndef cost0_neighbor_of(node: Node, config: Config) -> list:\n dr = [0, 1, 0, -1]\n dc = [1, 0, -1, 0]\n ret = []\n for i in range(4):\n cur_r = node.r+dr[i]\n cur_c = node.c+dc[i]\n if cur_r < 0 or cur_r >= config.h or cur_c < 0 or cur_c >= config.w:\n continue\n if config.town[cur_r][cur_c] == '#':\n continue\n ret.append(Node(r=cur_r, c=cur_c))\n return ret\n\n\ndef cost1_neighbor_of(node: Node, config: Config) -> list:\n \"\"\"\n X###X\n #####\n ##@##\n #####\n X###X\n \"\"\"\n dr_list = [-2, -1, 0, 1, 2]\n dc_list = [-2, -1, 0, 1, 2]\n ret = []\n for dr in dr_list:\n for dc in dc_list:\n if abs(dr) == 2 and abs(dc) == 2:\n continue\n if dr==0 and dc == 0:\n continue\n cur_r = node.r+dr\n cur_c = node.c+dc\n if cur_r < 0 or cur_r >= config.h or cur_c < 0 or cur_c >= config.w:\n continue\n ret.append(Node(r=cur_r, c=cur_c))\n return ret\n\n\nmain()\n", "repo_name": "batamorphism/coding", "sub_path": "Python/AtCoder/old/_abc213_e.py", "file_name": "_abc213_e.py", "file_ext": "py", "file_size_in_byte": 2302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "collections.namedtuple", "line_number": 4, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "18445672112", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport configparser\nimport os\nimport shutil\nfrom jinja2 import Template\nfrom typing import Callable, Union, List\nfrom functools import reduce\n\n\ndef parse_args():\n \"\"\"Return parsed args when this file is executed rather than imported.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Render of a folder tree of jinja templates, from an INI file.\")\n\n parser.add_argument(\"source\",\n type=str,\n help=\"path to templates to render\")\n parser.add_argument(\"conf\",\n type=str,\n nargs='+',\n help=\"path(s) to the configuration file(s)\")\n parser.add_argument(\"-o\", \"--output\",\n dest='destination',\n type=str,\n help=\"path to the configuration file (default: render in-place)\")\n parser.add_argument(\"-e\", \"--extension\",\n type=str,\n default='',\n help=\"only attempt to render files with this extension (and just copy other files); \"\n \"the custom extension will be stripped from the rendered filenames\")\n\n declared_args = parser.parse_args()\n\n return declared_args\n\n\ndef config_path_to_configparser_instance(item: Union[configparser.ConfigParser, str]) -> configparser.ConfigParser:\n \"\"\"Convert a path string to fully loaded ConfigParser instances.\n If the provided argument is already a ConfigParser instances, it would be returned intact.\n \"\"\"\n if type(item) is str:\n config = configparser.ConfigParser()\n config.read(item)\n return config\n return item\n\n\ndef merge_configs(config: Union[configparser.ConfigParser, str, List[Union[configparser.ConfigParser, str]]]) \\\n -> configparser.ConfigParser:\n \"\"\"Take a list of ConfigParser instances and path strings to config files, and merge them all into a single\n ConfigParser instance.\n \"\"\"\n # Convert to list\n if type(config) in [str, configparser.ConfigParser]:\n config = [config]\n\n # Load all config files\n config = list(map(config_path_to_configparser_instance, config))\n\n # Get a unique list of all sections\n sections = reduce(lambda s, x: s.union(x.sections()), config, set())\n\n # Merge all configs section-by-section\n merged = configparser.ConfigParser()\n for section in sections:\n merged[section] = reduce(lambda d, x: dict(**d, **x[section]) if section in x else d, config, {})\n\n return merged\n\n\nclass JinjaWalk:\n \"\"\"JinjaWalk() -> new instance of a template tree walker.\n JinjaWalk(filename_filter, filename_modifier) -> new instance with custom filename modifiers\n \"\"\"\n def __init__(self,\n filename_filter: Callable[[str], bool] = lambda s: True,\n filename_modifier: Callable[[str], str] = lambda s: s) -> None:\n self.filename_filter = filename_filter\n self.filename_modifier = filename_modifier\n\n def walk(self, config: Union[configparser.ConfigParser, str, List[Union[configparser.ConfigParser, str]]],\n source_dir: str, output_dir: str, namespace: str = 'config'):\n \"\"\"Render a template tree using key-value pairs from given config file(s)\"\"\"\n assert namespace == namespace.strip()\n config = merge_configs(config)\n\n for root, dirs, files in os.walk(source_dir):\n if output_dir is None:\n # render templates in place\n output_folder = root\n else:\n # render templates in a user-specified destination\n relative_root = root[len(source_dir):]\n output_folder = os.path.join(output_dir, relative_root.strip(os.path.sep))\n os.makedirs(output_folder, exist_ok=True)\n\n for file in files:\n full_source_file_path = os.path.join(root, file)\n if self.filename_filter(file):\n with open(full_source_file_path, 'r') as fd:\n data = fd.read()\n template = Template(data)\n rendered_template_base_filename = self.filename_modifier(file)\n full_destination_file_path = os.path.join(output_folder, rendered_template_base_filename)\n kwargs = {namespace: config}\n template.stream(**kwargs).dump(full_destination_file_path)\n else:\n if output_folder != root:\n # copy is needed only if this is a not in-place rendering (otherwise shutil.SameFileError)\n shutil.copy(full_source_file_path, output_folder)\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n if args.extension != '':\n walker = JinjaWalk(filename_filter=lambda s: s.endswith(args.extension),\n filename_modifier=lambda s: s[:-len(args.extension)])\n else:\n walker = JinjaWalk()\n\n walker.walk(args.conf, args.source, args.destination)\n", "repo_name": "LightSoar/jinjawalk", "sub_path": "jinjawalk.py", "file_name": "jinjawalk.py", "file_ext": "py", "file_size_in_byte": 5078, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 39, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 39, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 50, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 50, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 56, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 63, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 66, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 68, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 51, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 83, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 83, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "jinja2.Template", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "34656526712", "text": "from random import choice\r\nimport pyglet\r\n\r\nwindow = pyglet.window.Window(width=400, height = 450, caption=\"GameWindow\")\r\n\r\nIm1 = pyglet.image.load('avocado.png')\r\nIm2 = pyglet.image.load('banana-juice.png')\r\nIm3 = pyglet.image.load('coconut.png')\r\nIm4 = pyglet.image.load('fruit.png')\r\nIm5 = pyglet.image.load('watermelon.png')\r\n\r\ndef InitializeGrid(board):\r\n #Initialize Grid by reading in from file\r\n #Initialize Grid by random value\r\n for i in range (8):\r\n for j in range(8):\r\n board[i][j] = choice(['A','B','C','D','E'])\r\n #print(\"Initializing grid\")\r\n\r\ndef Initialize(board):\r\n #Initialize game\r\n #Initialize grid\r\n # Set up the grid with an initial arrangement of letters\r\n InitializeGrid(board)\r\n #Initialize score\r\n global score\r\n score = 0\r\n #Initialize turn number\r\n global turn\r\n turn = 1\r\n\r\ndef ContinueGame(current_score, goal_score=100):\r\n #Return false if game should end, true if game is not over\r\n # If score is greater than goal score, return false\r\n # Otherwise, return true\r\n if (current_score >= goal_score):\r\n return False\r\n else:\r\n return True\r\n\r\ndef SwapPieces(board, move):\r\n\r\n #Swap objects in two positions\r\n temp = board[move[0]][move[1]]\r\n board[move[0]][move[1]]= board[move[2]][move[3]]\r\n board[move[2]][move[3]] = temp\r\n\r\n@window.event\r\ndef on_draw():\r\n window.clear()\r\n for i in range(7,-1,-1):\r\n #Draw each row\r\n y=50+(50*i)\r\n for j in range(8):\r\n #draw each piece, first getting position\r\n x = 50 * j\r\n if board[i][j] == 'A':\r\n Im1.blit(x,y)\r\n elif board[i][j] == 'B':\r\n Im2.blit(x,y)\r\n elif board[i][j] == 'C':\r\n Im3.blit(x,y)\r\n elif board[i][j] == 'D':\r\n Im4.blit(x,y)\r\n elif board[i][j] == 'E':\r\n Im5.blit(x,y)\r\n label = pyglet.text.Label('Turn: '+str(turn)+' Score: '+str(score),\r\n font_name='Arial',\r\n font_size=18,\r\n x=20,y=10)\r\n label.draw()\r\n\r\ndef RemovePieces(board):\r\n #Remove 3-in-a-row and 3-in-a-col pieces\r\n #Create board to store remove-or-not\r\n remove = [[0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0]]\r\n #Go through rows\r\n for i in range(8):\r\n for j in range(6):\r\n if (board[i][j] == board[i][j+1]) and (board[i][j] == board[i][j+2]):\r\n #three in a row are the same!\r\n remove[i][j] = 1;\r\n remove[i][j+1] = 1;\r\n remove[i][j+2] =1;\r\n\r\n #Go through columns\r\n for j in range(8):\r\n for i in range(6):\r\n if (board[i][j]==board[i+1][j]) and (board[i][j] == board[i+2][j]):\r\n #three in a row are the same!\r\n remove[i][j] = 1;\r\n remove[i+1][j] = 1;\r\n remove[i+2][j]=1;\r\n #Eliminate those marked\r\n global score\r\n removed_any = False\r\n for i in range(8):\r\n for j in range(8):\r\n if remove[i][j] == 1:\r\n board[i][j] = 0\r\n score += 1\r\n removed_any = True\r\n return removed_any\r\n\r\ndef DropPieces(board):\r\n #Drop pieces to fill in blanks\r\n for j in range(8):\r\n #make list of pieces in the column\r\n listofpieces = []\r\n for i in range(8):\r\n if board[i][j] != 0:\r\n listofpieces.append(board[i][j])\r\n #copy that list into column\r\n for i in range(len(listofpieces)):\r\n board[i][j] = listofpieces[i]\r\n #fill in remainder of column with 0s\r\n for i in range(len(listofpieces),8):\r\n board[i][j] = 0\r\n\r\ndef FillBlank(board):\r\n #Fill blanks with random pieces\r\n for i in range(8):\r\n for j in range(8):\r\n if (board[i][j] == 0):\r\n board[i][j] = choice(['A','B','C','D','E'])\r\n\r\ndef Update(board, move):\r\n #Update the board according to move\r\n SwapPieces(board,move)\r\n pieces_eliminated = True\r\n while pieces_eliminated:\r\n pieces_eliminated = RemovePieces(board)\r\n DropPieces(board)\r\n FillBlank(board)\r\n\r\n@window.event\r\ndef on_mouse_press(x,y,button,modifiers):\r\n #Get the starting cell\r\n global startx\r\n global starty\r\n startx = x\r\n starty = y\r\n\r\n@window.event\r\ndef on_mouse_release(x, y, button, modifiers):\r\n #Get starting and ending cell and see if they are adjacent\r\n startcol = startx//50\r\n startrow = (starty-50)//50\r\n endcol = x//50\r\n endrow = (y-50)//50\r\n #Check whether ending is adjacent to starting and if so, make move.\r\n if ((startcol == endcol and startrow==endrow - 1) or (startrow==endrow and startcol==endcol+1)or (startcol==endcol and startrow==endrow+1) or (startrow==endrow and startcol==endcol-1)):\r\n Update(board,[startrow,startcol,endrow,endcol])\r\n global turn\r\n turn+=1\r\n #See if game is over\r\n if not ContinueGame(score):\r\n print(\"You won in\", turn,\"turns!\",\"Final score was \", score,\".\")\r\n pyglet.app.exit()\r\n\r\n#State main variables\r\n#Set the user's score to zero\r\nscore = 100\r\n#Set any other variables (such as counter for turn number) to initial values\r\nturn = 100\r\ngoalscore = 100\r\nboard = [[0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0],\r\n [0,0,0,0,0,0,0,0]]\r\n#Initialize\r\nInitialize(board)\r\n\r\npyglet.app.run()\r\n\r\nprint('Icons made by Freepik from www.flaticon.com')\r\nprint('Icons made by Vitaly Gorbachev from www.flaticon.com')\r\nprint('Icons made by monkik from www.flaticon.com')\r\n\r\n\r\n", "repo_name": "jlauritzen8/game-design-with-images", "sub_path": "Game with Images.py", "file_name": "Game with Images.py", "file_ext": "py", "file_size_in_byte": 6433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pyglet.window.Window", "line_number": 4, "usage_type": "call"}, {"api_name": "pyglet.window", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pyglet.image.load", "line_number": 6, "usage_type": "call"}, {"api_name": "pyglet.image", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pyglet.image.load", "line_number": 7, "usage_type": "call"}, {"api_name": "pyglet.image", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pyglet.image.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pyglet.image", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pyglet.image.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pyglet.image", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pyglet.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pyglet.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 17, "usage_type": "call"}, {"api_name": "pyglet.text.Label", "line_number": 67, "usage_type": "call"}, {"api_name": "pyglet.text", "line_number": 67, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 132, "usage_type": "call"}, {"api_name": "pyglet.app.exit", "line_number": 166, "usage_type": "call"}, {"api_name": "pyglet.app", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pyglet.app.run", "line_number": 185, "usage_type": "call"}, {"api_name": "pyglet.app", "line_number": 185, "usage_type": "attribute"}]} +{"seq_id": "70407675893", "text": "import requests\n\ncountry = input(\"Enter a country you want to check\")\nurl = \"http://universities.hipolabs.com/search?country=\"+country\ncall = requests.get(url)\ndata = call.json()\nfor i in range(len(data)):\n print(data[i][\"name\"])\n print(data[i][\"web_pages\"])\n print()\n", "repo_name": "Nolvos/oldPythonProject", "sub_path": "lab4/bitcoin.py", "file_name": "bitcoin.py", "file_ext": "py", "file_size_in_byte": 277, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "28807632812", "text": "import json\nimport time\nimport requests\nfrom . misc import NPArrayEncoder\nfrom . conf import settings\nfrom . import get_logger\n\n\nlog = get_logger('ersatz.api')\n\ndef post(slug, params):\n params['worker_key'] = settings.WORKER_KEY\n while True:\n try:\n response = requests.post(settings.API_SERVER + slug,\n json.dumps(params, cls=NPArrayEncoder))\n log.debug(response.text)\n break\n except requests.ConnectionError:\n time.sleep(2)\n log.critical('Api server not available, trying again')\n return True if response.status_code == 200 else False\n\n\ndef get(slug, params={}, server=None):\n params['worker_key'] = settings.WORKER_KEY\n server = server or settings.API_SERVER\n while True:\n try:\n response = requests.get(server + slug, params=params)\n break\n except requests.ConnectionError:\n pass\n if response.status_code == 200:\n try:\n data = json.loads(response.text)\n except (ValueError, TypeError):\n return {'status': 'fail'}\n return data\n return {'status': 'fail'}\n\n\ndef rest_patch(slug, data):\n while True:\n try:\n response = requests.post(settings.API_SERVER + slug + '?worker_key=' + settings.WORKER_KEY,\n json.dumps(data, cls=NPArrayEncoder))\n log.debug(response.text)\n break\n except requests.ConnectionError:\n time.sleep(2)\n log.critical('Api server not available, trying again')\n return True if response.status_code == 200 else False\n", "repo_name": "deniskolokol/dlic", "sub_path": "back_end/core/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1659, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "conf.settings.WORKER_KEY", "line_number": 12, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 15, "usage_type": "call"}, {"api_name": "conf.settings.API_SERVER", "line_number": 15, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 16, "usage_type": "call"}, {"api_name": "misc.NPArrayEncoder", "line_number": 16, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 19, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "conf.settings.WORKER_KEY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 26, "usage_type": "name"}, {"api_name": "conf.settings.API_SERVER", "line_number": 27, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 27, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 32, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 46, "usage_type": "call"}, {"api_name": "conf.settings.API_SERVER", "line_number": 46, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 46, "usage_type": "name"}, {"api_name": "conf.settings.WORKER_KEY", "line_number": 46, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "misc.NPArrayEncoder", "line_number": 47, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 50, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "14635432372", "text": "#%%\nimport sys\nimport guidedlda\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport warnings\nwarnings.filterwarnings('ignore')\n\nsys.path.append('/workspaces/esg-controversy-tracker/pre_processing_scripts') \nfrom pre_processing import topic_modeling_preprocess\n\ne_keywords = ['emission reduction', 'biodiversity', 'environmental management systems','cerez valdez principles', 'carbon', 'fuel', 'greenhouse gas', 'emission']\ns_keywords = ['customer health safety', 'quality', 'privacy', 'product labeling']\ng_keywords = ['ceo board member', 'esg related compensation', 'board structure type']\nall_keywords = e_keywords + s_keywords + g_keywords\ndata = pd.read_csv('/workspaces/esg-controversy-tracker/dataset/ESG_daily_news.csv', nrows=1000)['text'].str.lower()\ndata = data[data == data]\ndata = data[data.str.contains('|'.join(all_keywords))]\ndata = data[0:1000]\ndata = topic_modeling_preprocess(data)\n#%%\nprint(\"After filtering for keywords, the dataset contains\", data.shape)\n# Count Vectorizer\nvect = CountVectorizer() \nvects = vect.fit_transform(data)\n\n# Select the first five rows from the data set\ntd = pd.DataFrame(vects.todense()) \ntd.columns = vect.get_feature_names()\n# term_document_matrix = td.T\nterm_document_matrix = td\nterm_document_matrix.index = ['Doc '+str(i) for i in range(len(data))]\n\n#%%\nseed_topic_list = [e_keywords, s_keywords, g_keywords]\n\nvocab = set(list(term_document_matrix.columns) + list(all_keywords))\n\nword2id = dict((v, idx) for idx, v in enumerate(vocab))\n\nmodel = guidedlda.GuidedLDA(\n n_topics=3, \n n_iter=100, \n random_state=7, refresh=20)\n\nseed_topics = {}\nfor t_id, st in enumerate(seed_topic_list):\n for word in st:\n seed_topics[word2id[word]] = t_id\n\nmodel.fit(term_document_matrix.to_numpy(), seed_topics=seed_topics, seed_confidence=0.15)\n\nn_top_words = 10\ntopic_word = model.topic_word_\nfor i, topic_dist in enumerate(topic_word):\n topic_words = np.array(list(vocab))[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n print(f\"Topic {i}: {' '.join(topic_words)}\")\n", "repo_name": "nogibjj/esg-controversy-tracker", "sub_path": "topic_modelling/generative_model/guided_lda_test.py", "file_name": "guided_lda_test.py", "file_ext": "py", "file_size_in_byte": 2085, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "warnings.filterwarnings", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pre_processing.topic_modeling_preprocess", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "guidedlda.GuidedLDA", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "6501579670", "text": "import logging\nfrom typing import Any\nimport aiohttp\nfrom astropy.coordinates import SkyCoord\nfrom astropy.wcs import WCS\nimport astropy.units as u\n\nfrom pyobs.images import Image\nimport pyobs.utils.exceptions as exc\nfrom .astrometry import Astrometry\n\n\nlog = logging.getLogger(__name__)\n\n\nclass AstrometryDotNet(Astrometry):\n \"\"\"Perform astrometry using astrometry.net\"\"\"\n\n __module__ = \"pyobs.images.processors.astrometry\"\n\n def __init__(\n self,\n url: str,\n source_count: int = 50,\n radius: float = 3.0,\n timeout: int = 10,\n exceptions: bool = True,\n **kwargs: Any,\n ):\n \"\"\"Init new astronomy.net processor.\n\n Args:\n url: URL to service.\n source_count: Number of sources to send.\n radius: Radius to search in.\n timeout: Timeout in seconds for call to astrometry web service.\n exceptions: Whether to raise Exceptions.\n \"\"\"\n Astrometry.__init__(self, **kwargs)\n\n # URL to web-service\n self.url = url\n self.source_count = source_count\n self.radius = radius\n self.timeout = timeout\n self.exceptions = exceptions\n\n async def __call__(self, image: Image) -> Image:\n \"\"\"Find astrometric solution on given image.\n\n Writes WCSERR=1 into FITS header on failure.\n\n Args:\n image: Image to analyse.\n \"\"\"\n\n # copy image\n img = image.copy()\n\n # get catalog\n if img.catalog is None:\n log.warning(\"No catalog found in image.\")\n if self.exceptions:\n raise exc.ImageError(\"No catalog found in image.\")\n return image\n cat = img.catalog[[\"x\", \"y\", \"flux\", \"peak\"]].to_pandas().dropna()\n\n # nothing?\n if cat is None or len(cat) < 3:\n log.warning(\"Not enough sources for astrometry.\")\n img.header[\"WCSERR\"] = 1\n if self.exceptions:\n raise exc.ImageError(\"Not enough sources for astrometry.\")\n return img\n\n # sort it, remove saturated stars and take N brightest sources\n cat = cat.sort_values(\"flux\", ascending=False)\n cat = cat[cat[\"peak\"] < 60000]\n cat = cat[: self.source_count]\n\n # no CDELT1?\n if \"CDELT1\" not in img.header:\n log.warning(\"No CDELT1 found in header.\")\n img.header[\"WCSERR\"] = 1\n if self.exceptions:\n raise exc.ImageError(\"No CDELT1 found in header.\")\n return img\n\n # build request data\n scale = abs(img.header[\"CDELT1\"]) * 3600\n data = {\n \"ra\": img.header[\"TEL-RA\"],\n \"dec\": img.header[\"TEL-DEC\"],\n \"scale_low\": scale * 0.9,\n \"scale_high\": scale * 1.1,\n \"radius\": self.radius,\n \"nx\": img.header[\"NAXIS1\"],\n \"ny\": img.header[\"NAXIS2\"],\n \"x\": cat[\"x\"].tolist(),\n \"y\": cat[\"y\"].tolist(),\n \"flux\": cat[\"flux\"].tolist(),\n }\n\n # log it\n ra_dec = SkyCoord(ra=data[\"ra\"] * u.deg, dec=data[\"dec\"] * u.deg, frame=\"icrs\")\n cx, cy = img.header[\"CRPIX1\"], img.header[\"CRPIX2\"]\n log.info(\n \"Found original RA=%s (%.4f), Dec=%s (%.4f) at pixel %.2f,%.2f.\",\n ra_dec.ra.to_string(sep=\":\", unit=u.hour, pad=True),\n data[\"ra\"],\n ra_dec.dec.to_string(sep=\":\", unit=u.deg, pad=True),\n data[\"dec\"],\n cx,\n cy,\n )\n\n # send it\n async with aiohttp.ClientSession() as session:\n async with session.post(self.url, json=data, timeout=self.timeout) as response:\n status_code = response.status\n json = await response.json()\n\n # success?\n if status_code != 200 or \"error\" in json:\n # set error\n img.header[\"WCSERR\"] = 1\n msg = \"Could not connect to astrometry service.\"\n if \"error\" in json:\n # \"Could not find WCS file.\" is just an info, which means that WCS was not successful\n if json[\"error\"] == \"Could not find WCS file.\":\n msg = \"Could not determine WCS.\"\n else:\n msg = f\"Received error from astrometry service: {json['error']}\"\n\n # raise or warn?\n if self.exceptions:\n raise exc.ImageError(msg)\n else:\n log.warning(msg)\n\n else:\n # copy keywords\n hdr = json\n header_keywords_to_update = [\n \"CTYPE1\",\n \"CTYPE2\",\n \"CRPIX1\",\n \"CRPIX2\",\n \"CRVAL1\",\n \"CRVAL2\",\n \"CD1_1\",\n \"CD1_2\",\n \"CD2_1\",\n \"CD2_2\",\n ]\n for keyword in header_keywords_to_update:\n img.header[keyword] = hdr[keyword]\n\n # astrometry.net gives a CD matrix, so we have to delete the PC matrix and the CDELT* parameters\n for keyword in [\"PC1_1\", \"PC1_2\", \"PC2_1\", \"PC2_2\", \"CDELT1\", \"CDELT2\"]:\n del img.header[keyword]\n\n # calculate world coordinates for all sources in catalog\n image_wcs = WCS(img.header)\n ras, decs = image_wcs.all_pix2world(img.catalog[\"x\"], img.catalog[\"y\"], 1)\n\n # set them\n img.catalog[\"ra\"] = ras\n img.catalog[\"dec\"] = decs\n\n # RA/Dec at center pos\n final_ra, final_dec = image_wcs.all_pix2world(cx, cy, 0)\n ra_dec = SkyCoord(ra=final_ra * u.deg, dec=final_dec * u.deg, frame=\"icrs\")\n\n # log it\n log.info(\n \"Found final RA=%s (%.4f), Dec=%s (%.4f) at pixel %.2f,%.2f.\",\n ra_dec.ra.to_string(sep=\":\", unit=u.hour, pad=True),\n data[\"ra\"],\n ra_dec.dec.to_string(sep=\":\", unit=u.deg, pad=True),\n data[\"dec\"],\n cx,\n cy,\n )\n\n # success\n img.header[\"WCSERR\"] = 0\n\n # finished\n return img\n\n\n__all__ = [\"AstrometryDotNet\"]\n", "repo_name": "pyobs/pyobs-core", "sub_path": "pyobs/images/processors/astrometry/dotnet.py", "file_name": "dotnet.py", "file_ext": "py", "file_size_in_byte": 6195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "astrometry.Astrometry", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "astrometry.Astrometry.__init__", "line_number": 39, "usage_type": "call"}, {"api_name": "astrometry.Astrometry", "line_number": 39, "usage_type": "name"}, {"api_name": "pyobs.images.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "pyobs.utils.exceptions.ImageError", "line_number": 64, "usage_type": "call"}, {"api_name": "pyobs.utils.exceptions", "line_number": 64, "usage_type": "name"}, {"api_name": "pyobs.utils.exceptions.ImageError", "line_number": 73, "usage_type": "call"}, {"api_name": "pyobs.utils.exceptions", "line_number": 73, "usage_type": "name"}, {"api_name": "pyobs.utils.exceptions.ImageError", "line_number": 86, "usage_type": "call"}, {"api_name": "pyobs.utils.exceptions", "line_number": 86, "usage_type": "name"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 105, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 105, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 105, "usage_type": "name"}, {"api_name": "astropy.units.hour", "line_number": 109, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 109, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 111, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 111, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 118, "usage_type": "call"}, {"api_name": "pyobs.utils.exceptions.ImageError", "line_number": 137, "usage_type": "call"}, {"api_name": "pyobs.utils.exceptions", "line_number": 137, "usage_type": "name"}, {"api_name": "astropy.wcs.WCS", "line_number": 164, "usage_type": "call"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 173, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 173, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 173, "usage_type": "name"}, {"api_name": "astropy.units.hour", "line_number": 178, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 178, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 180, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 180, "usage_type": "name"}]} +{"seq_id": "70089206774", "text": "from torchvision.transforms import PILToTensor\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n\ndef R(x):\n x_diff = x - torch.roll(x, -1, dims=1)\n y_diff = x - torch.roll(x, -1, dims=0)\n norm = x_diff.abs().sum() + y_diff.abs().sum()\n return norm\n\nclass TVDenoise(torch.nn.Module):\n def __init__(self, noisy_image, tau):\n super(TVDenoise, self).__init__()\n self.tau = tau\n self.reference_image = torch.clone(noisy_image)\n self.reference_image.requires_grad = False\n\n self.denoised_image = torch.clone(noisy_image)\n self.denoised_image.requires_grad = True\n\n def forward(self):\n return torch.nn.L1Loss()(self.denoised_image, self.reference_image) + self.tau * R(self.denoised_image)\n\n def get_denoised_image(self):\n return self.denoised_image\n\n\nimage_path = 'face.jpg'\n# image_path = 'water-castle.png'\nimage_path = 'lenna.png'\nimage_path = 'markus.png'\n\npil_img = Image.open(image_path)\nwidth, height = pil_img.size\n\nreference_image = PILToTensor()(pil_img.resize((width//2, height//2)).convert('L')).squeeze().float() / 255\n\n# Denoising\nnoise = torch.randn_like(reference_image) * 0.1\nnoisy_image = reference_image + noise\nnoisy_image = np.clip(noisy_image, 0.0, 1.0)\nnoisy_image = torch.FloatTensor(noisy_image)\n\n# Inpainting\n# mask = torch.FloatTensor(height//2, width//2).uniform_() > 0.5\n# noisy_image = reference_image * mask\n\ntv_denoiser = TVDenoise(noisy_image, tau=0.0095)\noptimizer = torch.optim.SGD([tv_denoiser.denoised_image], lr=0.1)\n\nnum_iters = 500\nfor i in range(num_iters):\n optimizer.zero_grad()\n loss = tv_denoiser()\n if i % 100 == 0:\n print(f\"Loss in iteration {i}/{num_iters}: {loss.item():.3f}\")\n # denoised_image = torch.clone(tv_denoiser.get_denoised_image())\n # plt.imshow(denoised_image.detach().numpy(), cmap='gray')\n # plt.axis('off')\n # plt.show()\n\n loss.backward()\n optimizer.step()\n\ndenoised_image = tv_denoiser.get_denoised_image()\n\n\nfig, axs = plt.subplots(1, 3, figsize=(16, 10))\naxs = axs.ravel()\n\naxs[0].axis('off')\naxs[0].set_title('Reference')\naxs[0].imshow(reference_image, cmap='gray')\n\naxs[1].axis('off')\naxs[1].set_title('Noisy image')\naxs[1].imshow(noisy_image, cmap='gray')\n\naxs[2].axis('off')\naxs[2].set_title('Denoised image')\naxs[2].imshow(denoised_image.detach().numpy(), cmap='gray')\n\nplt.show()", "repo_name": "f-ilic/refreshers", "sub_path": "tv/tv_denoise_simple.py", "file_name": "tv_denoise_simple.py", "file_ext": "py", "file_size_in_byte": 2411, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.roll", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.roll", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.clone", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.clone", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.L1Loss", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.PILToTensor", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 52, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "38025259350", "text": "import hexchat,re,json\n\n__module_name__ = 'OmnomIRC Bindings'\n__module_version__ = '1.0'\n__module_description__ = 'OmnomIRC Bindings for Hexchat'\n\nOMNOMIRCNICK = ['^','OmnomIRC','SoruTestIRC']\nTOPICBOTNICK = ['TopicBot']\n\n\nOMNOMIDENTSTR = 'OmnomIRC'\nOMNOMJOINIGNORE = 'OmnomIRC_ignore_join'\n\ndef addColor(s):\n\ts = hexchat.strip(s)\n\trcolors = ['03','04','06','08','09','10','11','12','13']\n\ti = 0\n\tfor c in s:\n\t\ti += ord(c)\n\treturn '\\x03'+rcolors[i % 9]+s+'\\x0F'\ndef getNick(prefix,nick):\n\tif bool(hexchat.get_prefs('text_color_nicks')):\n\t\tnick = addColor(nick)\n\telse: # as omnomirc can color nicks on itself\n\t\tnick = hexchat.strip(nick)\n\tnick = nick.replace(' ','\\xA0')\n\treturn '\\x0F'+prefix+'\\x0F\\xA0'+nick\ndef topicBinding(word,word_eol,userdata):\n\ts = hexchat.strip(word[0])\n\tif (s in TOPICBOTNICK) or (s in TOPICBOTNICK):\n\t\treturn hexchat.EAT_ALL\n\treturn hexchat.EAT_NONE\ndef doHighlight(s):\n\treturn hexchat.get_info('nick') in s\n\ndef modifyRawData(word,word_eol,userdata):\n\ttry:\n\t\tnick = hexchat.strip(word[0].split(':')[1].split('!')[0])\n\texcept:\n\t\tnick = ''\n\tif nick in OMNOMIRCNICK:\n\t\ttry:\n\t\t\tmsg = ' '.join(word[3:])[1:]\n\t\t\tchan = word[2]\n\t\texcept:\n\t\t\treturn hexchat.EAT_NONE\n\t\tif hexchat.nickcmp(chan,hexchat.get_info('nick'))==0:\n\t\t\tcmd = msg.split(' ')\n\t\t\ttry:\n\t\t\t\tif cmd[0] == 'OIRCUSERS':\n\t\t\t\t\tchan = cmd[1]\n\t\t\t\t\tfor nick in json.loads(' '.join(cmd[2:])):\n\t\t\t\t\t\tif hexchat.nickcmp(nick,hexchat.get_info('nick'))!=0:\n\t\t\t\t\t\t\thexchat.command('RECV :'+hexchat.strip(nick).replace(' ','\\xA0')+'!()\\xA0'+nick.replace(' ','\\xA0')+'@'+OMNOMJOINIGNORE+' JOIN :'+chan)\n\t\t\texcept Exception as inst:\n\t\t\t\tprint(__module_name__,': something unexpected happend, please report the following')\n\t\t\t\tprint('Oirc PM exeption')\n\t\t\t\tprint(inst)\n\t\t\treturn hexchat.EAT_ALL\n\t\tres = re.match(r'^((\\x03[0-9]{1,2}|\\x02)?\\([a-zA-Z0-9#!]+\\)[\\x0F\\x02]?)',msg)\n\t\tif res:\n\t\t\tnick_prefix = res.group(1)\n\t\t\tnick = ''\n\t\t\tmsg = msg[len(res.group(1)):]\n\t\t\tcmd = ''\n\t\t\ttextEvent = ''\n\t\t\targs = []\n\t\t\t\n\t\t\twhile True:\n\t\t\t\tres = re.match(r'<([^>]+)> (.*)',msg)\n\t\t\t\tif res: # normal msg\n\t\t\t\t\tif doHighlight(res.group(2)):\n\t\t\t\t\t\ttextEvent = 'Channel Msg Hilight'\n\t\t\t\t\telse:\n\t\t\t\t\t\ttextEvent = 'Channel Message'\n\t\t\t\t\targs = [res.group(2)]\n\t\t\t\t\tcmd = 'PRIVMSG '+chan+' :'+res.group(2)\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x036\\* ([^ ]+) (.*)',msg)\n\t\t\t\tif res: # action\n\t\t\t\t\tif doHighlight(res.group(2)):\n\t\t\t\t\t\ttextEvent = 'Channel Action Hilight'\n\t\t\t\t\telse:\n\t\t\t\t\t\ttextEvent = 'Channel Action'\n\t\t\t\t\targs = [res.group(2)]\n\t\t\t\t\tcmd = 'PRIVMSG '+chan+' :\\x01ACTION '+res.group(2)+'\\x01'\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x032\\* ([^ ]+) has left ([^ ]*) \\((.*)\\)',msg)\n\t\t\t\tif res: # part\n\t\t\t\t\tif res.group(3)!='':\n\t\t\t\t\t\ttextEvent = 'Part with Reason'\n\t\t\t\t\telse:\n\t\t\t\t\t\ttextEvent = 'Part'\n\t\t\t\t\targs = [res.group(1)+'@OmnomIRC',res.group(2),res.group(3)]\n\t\t\t\t\tcmd = 'PART '+chan+' :'+res.group(3)\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x033\\* ([^ ]+) has joined ([^ ]*)',msg)\n\t\t\t\tif res: # join\n\t\t\t\t\ttextEvent = 'Join'\n\t\t\t\t\targs = [res.group(2),res.group(1)+'@'+OMNOMIDENTSTR,'']\n\t\t\t\t\tcmd = 'JOIN :'+chan\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x032\\* ([^ ]+) has quit [^ ]* \\((.*)\\)',msg)\n\t\t\t\tif res: # quit\n\t\t\t\t\ttextEvent = 'Quit'\n\t\t\t\t\targs = [res.group(2)]\n\t\t\t\t\tcmd = 'QUIT :'+res.group(2)\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x033\\* ([^ ]+) set ([^ ]*) mode (.*)',msg)\n\t\t\t\tif res: # mode\n\t\t\t\t\ttextEvent = 'Channel Mode Generic'\n\t\t\t\t\targs = ['',res.group(3),res.group(2)]\n\t\t\t\t\tcmd = 'MODE '+chan+' '+res.group(3)\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x034\\* ([^ ]+) has kicked ([^ ]*) from ([^ ]*) \\((.*)\\)',msg)\n\t\t\t\tif res: # kick\n\t\t\t\t\ttextEvent = 'Kick'\n\t\t\t\t\targs = [res.group(2)+'@'+OMNOMIDENTSTR,res.group(3),res.group(4)]\n\t\t\t\t\tcmd = 'KICK '+chan+' '+hexchat.strip(res.group(2))+'\\xA0 :'+res.group(4)\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x033\\* ([^ ]+) has changed the topic to (.*)',msg)\n\t\t\t\tif res: # topic\n\t\t\t\t\ttextEvent = 'Topic Change'\n\t\t\t\t\targs = [res.group(2)]\n\t\t\t\t\tcmd = 'TOPIC '+chan+' :'+res.group(2)\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\tres = re.match(r'^\\x033\\* (.+) has changed nicks to (.*)',msg)\n\t\t\t\tif res: # nick\n\t\t\t\t\ttextEvent = 'Change Nick'\n\t\t\t\t\targs = [getNick(nick_prefix,res.group(2))]\n\t\t\t\t\tcmd = res.group(2)\n\t\t\t\t\tnick = res.group(1)\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tbreak\n\t\t\t\n\t\t\t\n\t\t\tif textEvent!='':\n\t\t\t\tif(len(word)>2):\n\t\t\t\t\targs.append(word[2])\n\t\t\t\telse:\n\t\t\t\t\targs.append('')\n\t\t\t\tnick_notext = nick = hexchat.strip(nick.replace(' ','\\xA0'))\n\t\t\t\tif not (textEvent in ['Part','Part with Reason','Quit','Join']):\n\t\t\t\t\tif '!' in nick_prefix:\n\t\t\t\t\t\tnick = '\\x02'+nick+'\\x02'\n\t\t\t\t\telse:\n\t\t\t\t\t\tnick = getNick(nick_prefix,nick)\n\t\t\t\tif hexchat.nickcmp(nick_notext,hexchat.get_info('nick'))==0 and textEvent in ['Part','Part with Reason','Quit','Join','Kick']:\n\t\t\t\t\thexchat.emit_print(textEvent,getNick(nick_prefix,nick_notext),*args)\n\t\t\t\t\tif textEvent=='Kick':\n\t\t\t\t\t\tkicknick = hexchat.strip(args[0][:-len('@'+OMNOMIDENTSTR)].replace(' ','\\xA0'))\n\t\t\t\t\t\tif hexchat.nickcmp(kicknick,hexchat.get_info('nick'))!=0:\n\t\t\t\t\t\t\thexchat.command('RECV :'+kicknick+'!'+getNick(nick_prefix,kicknick)+'@'+OMNOMJOINIGNORE+' PART '+chan)\n\t\t\t\t\treturn hexchat.EAT_ALL\n\t\t\t\tif textEvent=='Change Nick':\n\t\t\t\t\thexchat.emit_print(textEvent,getNick(nick_prefix,nick_notext),*args)\n\t\t\t\t\thexchat.command('RECV :'+nick_notext+'!'+getNick(nick_prefix,nick_notext)+'@'+OMNOMJOINIGNORE+' PART '+chan)\n\t\t\t\t\tcmd = cmd.replace(' ','\\xA0')\n\t\t\t\t\thexchat.command('RECV :'+hexchat.strip(cmd)+'!'+getNick(nick_prefix,cmd)+'@'+OMNOMJOINIGNORE+' JOIN :'+chan)\n\t\t\t\telif textEvent=='Quit':\n\t\t\t\t\thexchat.emit_print(textEvent,getNick(nick_prefix,nick_notext),*args)\n\t\t\t\t\thexchat.command('RECV :'+nick_notext+'!'+getNick(nick_prefix,nick_notext)+'@'+OMNOMJOINIGNORE+' PART '+chan)\n\t\t\t\telse:\n\t\t\t\t\thexchat.command('RECV :'+nick+'!'+getNick(nick_prefix,nick_notext)+'@'+OMNOMIDENTSTR+' '+cmd)\n\t\t\t\treturn hexchat.EAT_ALL\n\t\t\n\t\t\n\t\tprint(__module_name__,': something unexpected happend, please report the following')\n\t\tprint(word)\n\t\treturn hexchat.EAT_NONE\n\telif hexchat.strip(word[0]) in TOPICBOTNICK:\n\t\treturn hexchat.EAT_ALL\n\treturn hexchat.EAT_NONE\ndef addExtraNicks_352(word,word_eol,userdata):\n\tchan = word[3]\n\tnick = word[7]\n\tif nick in OMNOMIRCNICK:\n\t\thexchat.command('RAW PRIVMSG '+nick+' :GETUSERLIST '+chan)\ndef addExtraNicks_354(word,word_eol,userdata):\n\tchan = word[4]\n\tnick = word[8]\n\tif nick in OMNOMIRCNICK:\n\t\thexchat.command('RAW PRIVMSG '+nick+' :GETUSERLIST '+chan)\n\ndef modifyJoinData(word,word_eol,userdata):\n\tif '\\xA0' in word_eol[2]: # we need to modify this!\n\t\tif word[2][-len(OMNOMJOINIGNORE):] == OMNOMJOINIGNORE:\n\t\t\treturn hexchat.EAT_ALL\n\t\thexchat.emit_print('Join',word[2][:-len('@'+OMNOMIDENTSTR)],word[1],word[0].replace(' ','\\xA0')+'@'+OMNOMIDENTSTR)\n\t\treturn hexchat.EAT_ALL\ndef modifyPartData(word,word_eol,userdata):\n\tif '\\xA0' in word_eol[1]: # we need to modify this!\n\t\tif word[1][-len(OMNOMJOINIGNORE):] == OMNOMJOINIGNORE:\n\t\t\treturn hexchat.EAT_ALL\n\t\treason = ''\n\t\tif len(word) > 3:\n\t\t\tp_type = 'Part with Reason'\n\t\t\treason = word[3]\n\t\telse:\n\t\t\tp_type = 'Part'\n\t\thexchat.emit_print(p_type,word[1][:-len('@'+OMNOMIDENTSTR)],word[0].replace(' ','\\xA0')+'@'+OMNOMIDENTSTR,word[2],reason)\n\t\treturn hexchat.EAT_ALL\ndef modifyKickData(word,word_eol,userdata):\n\tif word[1][-1:] == '\\xA0': # we need to modify this!\n\t\tkicknick = word[1][:-1].replace(' ','\\xA0')\n\t\thexchat.emit_print('Kick',word[0],kicknick+'@'+OMNOMIDENTSTR,word[2],word_eol[3])\n\t\tif hexchat.nickcmp(kicknick,hexchat.get_info('nick'))!=0:\n\t\t\thexchat.command('RECV :'+kicknick+'!()\\xA0'+kicknick+'@'+OMNOMJOINIGNORE+' PART '+word[2])\n\t\treturn hexchat.EAT_ALL\n\nhexchat.hook_server('352',addExtraNicks_352,priority=hexchat.PRI_HIGHEST)\nhexchat.hook_server('354',addExtraNicks_354,priority=hexchat.PRI_HIGHEST)\nhexchat.hook_server('PRIVMSG',modifyRawData,priority=hexchat.PRI_HIGHEST)\nhexchat.hook_print('Join',modifyJoinData,priority=hexchat.PRI_HIGHEST)\nhexchat.hook_print('Part',modifyPartData,priority=hexchat.PRI_HIGHEST)\nhexchat.hook_print('Part with Reason',modifyPartData,priority=hexchat.PRI_HIGHEST)\nhexchat.hook_print('Kick',modifyKickData,priority=hexchat.PRI_HIGHEST)\nhexchat.hook_print('Topic',topicBinding,priority=hexchat.PRI_HIGHEST)\n\nprint(__module_name__, 'version', __module_version__, 'loaded.')\n", "repo_name": "Sorunome/OmnomIRC", "sub_path": "omnombindings_hexchat.py", "file_name": "omnombindings_hexchat.py", "file_ext": "py", "file_size_in_byte": 8384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "21", "api": [{"api_name": "hexchat.strip", "line_number": 15, "usage_type": "call"}, {"api_name": "hexchat.get_prefs", "line_number": 22, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 25, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 29, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 31, "usage_type": "attribute"}, {"api_name": "hexchat.EAT_NONE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "hexchat.get_info", "line_number": 34, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 38, "usage_type": "call"}, {"api_name": "hexchat.EAT_NONE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "hexchat.nickcmp", "line_number": 47, "usage_type": "call"}, {"api_name": "hexchat.get_info", "line_number": 47, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "hexchat.nickcmp", "line_number": 53, "usage_type": "call"}, {"api_name": "hexchat.get_info", "line_number": 53, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 54, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 54, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 59, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 60, "usage_type": "call"}, {"api_name": "re.match", "line_number": 70, "usage_type": "call"}, {"api_name": "re.match", "line_number": 80, "usage_type": "call"}, {"api_name": "re.match", "line_number": 90, "usage_type": "call"}, {"api_name": "re.match", "line_number": 100, "usage_type": "call"}, {"api_name": "re.match", "line_number": 107, "usage_type": "call"}, {"api_name": "re.match", "line_number": 114, "usage_type": "call"}, {"api_name": "re.match", "line_number": 121, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 125, "usage_type": "call"}, {"api_name": "re.match", "line_number": 128, "usage_type": "call"}, {"api_name": "re.match", "line_number": 135, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 151, "usage_type": "call"}, {"api_name": "hexchat.nickcmp", "line_number": 157, "usage_type": "call"}, {"api_name": "hexchat.get_info", "line_number": 157, "usage_type": "call"}, {"api_name": "hexchat.emit_print", "line_number": 158, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 160, "usage_type": "call"}, {"api_name": "hexchat.nickcmp", "line_number": 161, "usage_type": "call"}, {"api_name": "hexchat.get_info", "line_number": 161, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 162, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 163, "usage_type": "attribute"}, {"api_name": "hexchat.emit_print", "line_number": 165, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 166, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 168, "usage_type": "call"}, {"api_name": "hexchat.strip", "line_number": 168, "usage_type": "call"}, {"api_name": "hexchat.emit_print", "line_number": 170, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 171, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 173, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 174, "usage_type": "attribute"}, {"api_name": "hexchat.EAT_NONE", "line_number": 179, "usage_type": "attribute"}, {"api_name": "hexchat.strip", "line_number": 180, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 181, "usage_type": "attribute"}, {"api_name": "hexchat.EAT_NONE", "line_number": 182, "usage_type": "attribute"}, {"api_name": "hexchat.command", "line_number": 187, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 192, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 197, "usage_type": "attribute"}, {"api_name": "hexchat.emit_print", "line_number": 198, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 199, "usage_type": "attribute"}, {"api_name": "hexchat.EAT_ALL", "line_number": 203, "usage_type": "attribute"}, {"api_name": "hexchat.emit_print", "line_number": 210, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 211, "usage_type": "attribute"}, {"api_name": "hexchat.emit_print", "line_number": 215, "usage_type": "call"}, {"api_name": "hexchat.nickcmp", "line_number": 216, "usage_type": "call"}, {"api_name": "hexchat.get_info", "line_number": 216, "usage_type": "call"}, {"api_name": "hexchat.command", "line_number": 217, "usage_type": "call"}, {"api_name": "hexchat.EAT_ALL", "line_number": 218, "usage_type": "attribute"}, {"api_name": "hexchat.hook_server", "line_number": 220, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 220, "usage_type": "attribute"}, {"api_name": "hexchat.hook_server", "line_number": 221, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 221, "usage_type": "attribute"}, {"api_name": "hexchat.hook_server", "line_number": 222, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 222, "usage_type": "attribute"}, {"api_name": "hexchat.hook_print", "line_number": 223, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 223, "usage_type": "attribute"}, {"api_name": "hexchat.hook_print", "line_number": 224, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 224, "usage_type": "attribute"}, {"api_name": "hexchat.hook_print", "line_number": 225, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 225, "usage_type": "attribute"}, {"api_name": "hexchat.hook_print", "line_number": 226, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 226, "usage_type": "attribute"}, {"api_name": "hexchat.hook_print", "line_number": 227, "usage_type": "call"}, {"api_name": "hexchat.PRI_HIGHEST", "line_number": 227, "usage_type": "attribute"}]} +{"seq_id": "73852737013", "text": "\"\"\"\n Train model for evaluation on dev set (trained on training set only):\n python main.py --train_data data/train.txt --model_file demo_model\n\n Evaluate:\n python main.py --dev_data data/dev.csv --model_file demo_model --dev_result_file dev_results.json --evaluate\n\n Final training model for predicting test set (trained on training & dev set):\n python main.py --train_data data/train_dev.txt --model_file final_model\n\n Predict:\n python main.py --pred_data data/test_no_label.csv --model_file final_model --output_file predictions.csv --predict\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport pandas as pd\nimport numpy as np\nimport time\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import label_ranking_average_precision_score as LRAP\nimport omikuji # Pakage implements Parabel Algorithm\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_data', help='path to training data text file')\n parser.add_argument('--dev_data', help='path to dev data for evaluation')\n parser.add_argument('--dev_result_file', help='path to dev set evaluation result file')\n parser.add_argument('--pred_data', help='path to predict data csv file without label')\n parser.add_argument('--model_file', help='path to model file')\n parser.add_argument('--output_file', help='path to prediction output file')\n parser.add_argument('--evaluate', action='store_true', help='evaluate mode')\n parser.add_argument('--predict', action='store_true', help='predict mode')\n args = parser.parse_args()\n return args\n\n\ndef train(train_file, model_file):\n # Best Parabel parameters based on grid search\n # trained on the train set and evaluated on the dev set\n hyper_param = omikuji.Model.default_hyper_param()\n hyper_param.n_trees = 3 # default 3\n hyper_param.min_branch_size = 1000 # default 100\n hyper_param.linear_c = 1.2 # default 1.0\n hyper_param.max_depth = 50 # default 20\n\n # Train Parabel Model\n start_time = time.time()\n model = omikuji.Model.train_on_data(train_file, hyper_param)\n end_time = time.time()\n print('Training time: {}s'.format(end_time - start_time))\n\n # Save Parabel model\n model.save(model_file)\n print('Model saved to {}'.format(model_file))\n\n\ndef evaluate(dev_data, model_file, dev_result_file):\n # load data for prediction from csv\n data = pd.read_csv(dev_data, usecols=['features','labels'])\n\n # remove rows with improper label\n rows_to_remove = [i for i in range(len(data)) if ':' in data.loc[i,'labels']]\n data.drop(rows_to_remove, inplace=True)\n data.reset_index(drop=True, inplace=True)\n\n # Extract features from sparse representation\n feature = np.zeros((len(data), 5000))\n for i in range(len(data)):\n for j in data.loc[i,'features'].replace('\\n','').split():\n ft, val = j.split(':')\n feature[i,int(ft)] = float(val)\n X = pd.DataFrame(feature)\n y = data['labels'].map(lambda x: tuple([int(i) for i in x.replace(' ','').split(',')]))\n binarizer = MultiLabelBinarizer(np.arange(3993))\n binary_y = binarizer.fit_transform(y)\n\n # Load saved model\n model = omikuji.Model.load(model_file)\n\n # Predict\n pred = np.zeros((X.shape[0], 3993))\n for i in range(X.shape[0]):\n feature_value_pairs = [(j, X.iloc[i,j]) for j in range(X.shape[1])]\n label_score_pairs = model.predict(feature_value_pairs, top_k=3993)\n for pairs in label_score_pairs:\n pred[i, pairs[0]] = pairs[1]\n\n # Calculate LRAP score\n lrap = LRAP(binary_y, pred)\n print('dev LRAP: ', lrap)\n with open(args.dev_result_file, 'w') as fout:\n json.dump({'dev LRAP': lrap}, fout)\n\n\ndef predict(pred_file, model_file, output_file):\n # load data for prediction from csv\n data = pd.read_csv(pred_file, usecols=['features'])\n data.reset_index(drop=True, inplace=True)\n\n # Extract features from sparse representation\n feature = np.zeros((len(data), 5000))\n for i in range(len(data)):\n for j in data.loc[i,'features'].replace('\\n','').split():\n ft, val = j.split(':')\n feature[i,int(ft)] = float(val)\n X = pd.DataFrame(feature)\n\n # Load saved model\n model = omikuji.Model.load(model_file)\n\n # Predict\n pred = np.zeros((X.shape[0], 3993))\n for i in range(X.shape[0]):\n feature_value_pairs = [(j, X.iloc[i,j]) for j in range(X.shape[1])]\n label_score_pairs = model.predict(feature_value_pairs, top_k=3993)\n for pairs in label_score_pairs:\n pred[i, pairs[0]] = pairs[1]\n\n # Save prediction result to csv\n np.savetxt(output_file, pred, delimiter=\",\")\n print('Predictions saved to {}'.format(output_file))\n\n\ndef main(args):\n if args.evaluate:\n evaluate(args.dev_data, args.model_file, args.dev_result_file)\n\n elif args.predict:\n predict(args.pred_data, args.model_file, args.output_file)\n\n else:\n train(args.train_data, args.model_file)\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "repo_name": "TRokieG/Extreme_Multilabel_Classification", "sub_path": "Parabel/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "omikuji.Model.default_hyper_param", "line_number": 43, "usage_type": "call"}, {"api_name": "omikuji.Model", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "omikuji.Model.train_on_data", "line_number": 51, "usage_type": "call"}, {"api_name": "omikuji.Model", "line_number": 51, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MultiLabelBinarizer", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 77, "usage_type": "call"}, {"api_name": "omikuji.Model.load", "line_number": 81, "usage_type": "call"}, {"api_name": "omikuji.Model", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.metrics.label_ranking_average_precision_score", "line_number": 92, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 109, "usage_type": "call"}, {"api_name": "omikuji.Model.load", "line_number": 112, "usage_type": "call"}, {"api_name": "omikuji.Model", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "31476637188", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom chatapp.models import Info\n\n# 홈페이지 대화 말뭉치 텐서플로우 딥러닝 학습모델 챗봇 연결\nfrom chatapp.ArkChatFramework.ArkChat.chatting_home import ChattingHomepage\n# 챗봇 프레임워크에서 로깅\nimport os\nimport logging\n\n# Create your views here.\ncontext = {}\n\n# 챗봇 초기화\nlogger = logging.getLogger(__name__)\nwork_dir = os.path.dirname(os.path.realpath(__file__))\nbot = ChattingHomepage(work_dir)\n\n# def index(request):\n# msg = '박형식 홈페이지'\n# return render(request, 'chatapp/index.html', {'message': msg})\n\ndef index(request):\n template = loader.get_template('chatapp/base_contents_kr.html')\n # template = loader.get_template('chatapp/base_mycareer_kr.html')\n info = Info.objects.get(user_id='user')\n info.step = 1\n info.tag = ''\n info.category = ''\n info.result = ''\n info.save()\n context = {\n 'login_success' : False,\n 'latest_question_list': 'test'\n }\n # default_info = Info(user_id=\"user\", step=0, tag=\"\", result=\"\")\n # default_info.save()\n\n return HttpResponse(template.render(context, request))\n\ndef chat_home(request):\n template = loader.get_template('chatapp/chat_home_screen.html')\n context = {\n 'login_success' : False,\n 'initMessages' : [\"다나와\",\"딥러닝 기반 화장품 추천 시스템\", \"Hi\"]\n }\n return HttpResponse(template.render(context, request))\n\ndef popup_chat_home(request):\n template = loader.get_template('chatapp/popup_chat_home_screen.html')\n # template = loader.get_template('chatapp/popup_mycareer_chatting_screen.html')\n context = {\n 'login_success' : False,\n 'initMessages' : [\"딥러닝 기반 화장품 추천 시스템\", \"Hi\"]\n }\n # context = {\n # 'login_success' : False,\n # 'initMessages' : [\"인공지능 기반 업무자동화 RPA 컨설턴트 직무를 찾고 있는 홍길동입니다.\",\n # \"귀사를 위한 업무자동화 서비스 제공자로서 준비된 저의 역량을 소개해 드리겠습니다.\"]\n # }\n return HttpResponse(template.render(context, request))\n\ndef call_chatbot(request):\n if request.method == 'POST':\n if request.is_ajax():\n userID = request.POST['user']\n sentence = request.POST['message']\n logger.debug(\"question[{}]\".format(sentence))\n answer = make_answer(sentence, userID)\n print(answer)\n logger.debug(\"answer[{}]\".format(answer))\n return HttpResponse(answer)\n return ''\n\ndef make_answer(sentence, userID):\n cate_list = [\"스킨\", \"로션\", \"에센스\", \"앰플\", \"크림\"] # category list\n filter_list = [\"복합성\",\"건성\",\"지성\",\"쿨톤\",\"웜톤\",\"잡티\",\"미백\",\"주름\",\"각질\",\"트러블\",\"블랙헤드\",\\\n \"피지과다\",\"민감성\",\"모공\",\"탄력\",\"홍조\",\"아토피\"] # 17개 태그 리스트\n type_list = filter_list[:3] # 피부 타입 리스트\n tone_list = filter_list[3:5] # 피부 톤 리스트\n porb_list = filter_list[5:] # 피부 고민 리스트\n\n answer = bot.get_answer(sentence, userID) # intents['responses']\n info = Info.objects.get(user_id='user')\n step = info.step\n tag = info.tag\n category = info.category\n result = info.result\n result_list = result.split()\n\n if step == 1: # 제품 카테고리 선택 단계\n if tag in cate_list:\n info.step = step + 1\n info.category= tag\n info.save()\n answer += \"\\n피부 타입을 입력해주세요.\"\n else:\n answer = \"잘못 입력하셨습니다.\" + tag + \" 제품 카테고리를 입력해주세요.\"\n\n elif step == 2: # 피부 타입 선택 단계\n if tag in type_list:\n info.step = step + 1\n info.result += tag\n info.save()\n answer += \"\\n피부톤을 입력해주세요.\"\n else:\n answer = \"잘못 입력하셨습니다.\" + tag + \" 피부 타입을 입력해주세요.\"\n\n elif step == 3: # 피부톤 선택 단계\n if tag in tone_list:\n info.step = step + 1\n info.result += (' ' + tag)\n info.save()\n answer += \"\\n피부 고민을 입력해주세요.\"\n else:\n answer = \"잘못 입력하셨습니다.\" + tag + \" 피부톤을 입력해주세요.\"\n\n elif step == 4: # 피부 고민 선택 단계\n if tag in porb_list:\n if tag in result_list[2:]:\n answer = \"이미 입력한 피부 고민입니다. 다른 피부 고민이 있으면 추가로 입력해주세요\"\n else:\n info.result += (' ' + tag)\n answer += \"\\n다른 피부 고민이 있으면 추가로 입력해주세요\"\n elif tag == 'end':\n name, price, url = predict_code_value(category, result_list)\n answer = \"제품: \" + name + \"\\n가격: \" + price + \"\\n링크: \" + url\n info.step = 1\n info.tag = None\n info.result = ''\n info.save()\n else:\n answer = \"잘못 입력하셨습니다.\" + tag + \" 피부 고민을 입력해주세요.\"\n\n else:\n answer = \"Error\"\n\n return answer\n\n#############################\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.callbacks import ModelCheckpoint\nimport os\n\ndef readdata_and_savemodel(filename):\n #INPUT = 1000001000100010001.csv,1000001000100010002.csv,1000001000100010003.csv,1000001000100010004.csv,1000001000100010011.csv\n #OUTPUT : 모델이 경로에 저장됨\n from sklearn.feature_extraction.text import CountVectorizer\n from sklearn.feature_extraction.text import TfidfTransformer\n from keras.utils import to_categorical\n from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n from keras.models import Sequential\n from keras.layers import Dense\n from keras.callbacks import ModelCheckpoint\n import os\n \n # 파일로부터 X,Y데이터를 읽어서 전처리(백터화 및 원핫인코딩)\n x_data,y_data = load_from_dataset(filename) #읽어옴\n x_train = make_x_train(x_data) #전처리\n y_train = make_y_train(y_data) #전처리\n name=filename.split(\".\")[0] #파일이름에서 카테고리 키워드를 받아옴\n\n #모델 저장경로 설정\n MODEL_DIR = './model/'\n if not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\n modelpath = '/model/'+name+'.hdf5'\n checkpointer = ModelCheckpoint(filepath=modelpath, monitor = 'loss', save_best_only=True, verbose=1)\n\n #모델 선언 및 layer설정\n model = Sequential()\n model.add(Dense(64,input_dim = len(x_train[0]),activation='relu'))\n model.add(Dense(128,activation='relu'))\n model.add(Dense(len(y_train[0]),activation='softmax'))\n model.compile(loss = 'categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\n \n #가장 마지막에 있는 모델 하나만 저장\n history = model.fit(x_train,y_train,epochs=2000,verbose=1,batch_size=1,callbacks=[checkpointer])\n\n \ndef make_y_train(result2):\n #INPUT : load_from_dataset로 읽어온 Y값 전처리 함수\n import numpy as np\n from keras.utils import to_categorical\n from sklearn.preprocessing import OneHotEncoder,LabelEncoder\n f=LabelEncoder()\n f.fit(result2)\n y_train = f.transform(result2)\n #OUTPUT = [0....1....0] 원핫벡터\n return to_categorical(y_train)\n\ndef x_onehot_encoding(input_list):\n # X데이터 Preprocessing 하는 1단계함수 직접사용할 일이 거의 없음\n # make_x_train 함수에서 사용됨\n # INPUT = ['복합성', '웜톤', '트러블', '모공', '민감성', '잡티']\n name_list=[\"복합성\",\"건성\",\"지성\",\"쿨톤\",\"웜톤\",\"잡티\",\"미백\",\"주름\",\"각질\",\"트러블\",\"블랙헤드\",\"피지과다\",\"민감성\",\"모공\",\"탄력\",\"홍조\",\"아토피\"]\n result=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n for i in input_list:\n index = name_list.index(i)\n result[index]+=1\n # OUTPUT = [1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0]\n return result\n\ndef make_x_train(result1):\n #INPUT : load_from_dataset로 읽어온 Y값 전처리 함수\n # X데이터를 가공하는함수\n # 내부에서 x_onehot_encoding을 사용하며 for문으로 모든 x값을 처리\n import numpy as np\n temp=[]\n result = list(result1)\n for i in result:\n temp.append(x_onehot_encoding(i))\n #여기서 나온 값을 바로 model.fit 시키면 됨\n #OUTPUT\n #[\n # [1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],\n # [1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],\n # [1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0]\n #]\n return np.array(temp)\n\ndef load_from_dataset(string):\n # INPUT : string은 csv 파일 이름\n import pandas as pd\n #df = pd.read_csv('C:/Users/kimsuhyun/Dropbox/Cosmetic_Recommandation_Chatbot/mychatsite/chatapp/model/'+string,encoding='utf-8')\n df = pd.read_csv('C:/Users/Administrator/Dropbox/Cosmetic_Recommandation_Chatbot/mychatsite/chatapp/model/'+string,encoding='utf-8')\n y_data = df.values[:,1]\n x_data = df.values[:,-1]\n temp=[]\n for i in x_data:\n willappend = i.split(\" \")\n willappend.pop()\n temp.append(willappend)\n # csv파일로 부터 x값, y값을 각각 읽어서 반환\n # 아직 전처리 안됨\n return temp,y_data\n\ndef load_model_hdf5(filename):\n # INPUT : 1000001000100010001.csv\n # hdf파일 읽어옴\n from tensorflow import keras\n #loaded_model = keras.models.load_model(\"C:/Users/kimsuhyun/Dropbox/Cosmetic_Recommandation_Chatbot/mychatsite/chatapp/model/\"+filename)\n loaded_model = keras.models.load_model(\"C:/Users/Administrator/Dropbox/Cosmetic_Recommandation_Chatbot/mychatsite/chatapp/model/\"+filename)\n # load한 모델을 리턴\n return loaded_model\n\ndef predict_code_value(category_name,input_value):\n # categoty_name ex)로션, 세럼, 토너\n #input_value = [\"복합성\",등등등]\n categoryno = Name_to_CategoryNo(category_name)\n import numpy as np\n import pandas as pd\n b=np.array(x_onehot_encoding(input_value)).reshape(1,-1)\n model = load_model_hdf5(categoryno+'.hdf5')\n x,y=load_from_dataset(categoryno+'.csv')\n df = pd.read_csv(\"C:/Users/Administrator/Dropbox/Cosmetic_Recommandation_Chatbot/mychatsite/chatapp/model/\"+categoryno+'.csv',encoding='utf-8')\n #df = pd.read_csv(\"C:/Users/kimsuhyun/Dropbox/Cosmetic_Recommandation_Chatbot/mychatsite/chatapp/model/\"+categoryno+'.csv',encoding='utf-8')\n code = y[model.predict_classes(b)]\n url = 'https://www.oliveyoung.co.kr/store/goods/getGoodsDetail.do?goodsNo={}&dispCatNo={}'.format(code[0],categoryno)\n # 'A000000103112'\n # 이름 가격 url\n row = df[df['id']==code[0]]\n pre_price = row['price'].astype(str)\n pre_name = row['name'].astype(str)\n price=text_processing(pre_price.iloc[0])\n name=text_processing(pre_name.iloc[0])\n return name,price,url \n\ndef get_url(code,filename):\n #url을 가공해서 리턴하는 함수\n catno = filename.split(\".\")[0]\n base_url='https://www.oliveyoung.co.kr/store/goods/getGoodsDetail.do?'+'goodsNo={}&dispCatNo={}'.format(code,catno)\n # 필요에 따라 print함수와 함께 사용\n return base_url\n\ndef GoodsNo_to_Name(goodsnum):\n # 상품번호에서 이름으로 변환하는 함수\n if goodsnum=='1000001000100010001' or goodsnum==1000001000100010001:\n return \"스킨/토너\"\n elif goodsnum=='1000001000100010002' or goodsnum==1000001000100010002:\n return \"로션\"\n elif goodsnum=='1000001000100010003' or goodsnum==1000001000100010003:\n return \"에센스/세럼\"\n elif goodsnum=='1000001000100010011' or goodsnum==1000001000100010011:\n return \"앰플\"\n elif goodsnum=='1000001000100010004' or goodsnum==1000001000100010004:\n return \"크림\"\n else:\n print(\"없는 코드입니다.\")\n return \n\ndef Name_to_CategoryNo(category_name):\n # 이름에서 상품번호로 변환하는 함수\n if category_name=='토너' or category_name=='스킨' or category_name=='스킨토너' or category_name=='토너스킨' or category_name==\"스킨/토너\" or category_name==\"토너/스킨\":\n return '1000001000100010001'\n elif category_name=='로션':\n return '1000001000100010002'\n elif category_name=='에센스' or category_name=='세럼' or category_name=='새럼' or category_name=='에센스/세럼' or category_name=='에센스/새럼' or category_name=='세럼/에센스' or category_name=='새럼/에센스':\n return '1000001000100010003'\n elif category_name=='앰플' or category_name=='엠플':\n return '1000001000100010011'\n elif category_name=='크림':\n return '1000001000100010004'\n else:\n print(\"없는 코드코드입니다.\")\n return \n\ndef get_url(goods_num,filename):\n #url을 가공해서 리턴하는 함수\n catno = filename.split(\".\")[0]\n base_url='https://www.oliveyoung.co.kr/store/goods/getGoodsDetail.do?'+'goodsNo={}&dispCatNo={}'.format(goods_num,catno)\n # 필요에 따라 print함수와 함께 사용\n return base_url\n\ndef text_processing(string):\n # 택스트 가공함수\n # INPUT string=\"한번 써보고 너무 좋아서(^_^)매번 구입해서 사용 중이에요:). 바이오더마 제품이라 믿음도 가요.물스킨 타입이라 닦아내는 용도로 쓰고 있는 데 적당히 쿨링감 있고 좋아요저는 건성이긴 하지만 이 제품은 가볍고 깨끗한 느낌이라지성한테 더 잘 어울리는 제품인 거 같아요보습이 강하지는 않고 진정이랑 산뜻함?? 이런 느낌이 강해요. 향도 쎄지 않은 그냥 쿨한 느낌이고 어름에 쓰기 딱 좋은 거 같아요냉장고에 보관하고 사용 중인데 더 피부진정에도움이 되는 거 같아요.자극도 없이 순하고 세네통 넘게 사용 중이에요 남자친구도 안끈적거리고 시원하고 좋다고 애용중입니다:)\"\n # 줄바꿈, 특수기호, 불규칙한 인덴트 제거\n file = open(\"test.txt\",'w',encoding='utf-8')\n file.write(string)\n file.write(\"\\n\")\n file.close()\n file=open(\"test.txt\",'r',encoding='utf-8')\n result=''\n while True:\n line = file.readline()\n if not line:\n break\n result+=line.replace(\"\\n\",'')\n file.close()\n return result\n\ndef category_tag_to_dictionary(category_name,input_value):\n a,b,c = predict_code_value(category_name,input_value)\n result = {\n 'name' : a,\n \"price\" : b,\n \"url\" : c,\n }\n return result\n\n#readdata_and_savemodel(\"1000001000100010001.csv\")\n\n#############3\n# def db_initializer():\n # step = 1\n # tag = None\n # category = None\n # result = None", "repo_name": "iwillbeaprogramer/Cosmetic_Recommandation_Chatbot", "sub_path": "mychatsite/chatapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 15176, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 17, "usage_type": "call"}, {"api_name": "chatapp.ArkChatFramework.ArkChat.chatting_home.ChattingHomepage", "line_number": 18, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 25, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 25, "usage_type": "name"}, {"api_name": "chatapp.models.Info.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "chatapp.models.Info.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "chatapp.models.Info", "line_number": 27, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 40, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 43, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 43, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 51, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 51, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 62, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 73, "usage_type": "call"}, {"api_name": "chatapp.models.Info.objects.get", "line_number": 85, "usage_type": "call"}, {"api_name": "chatapp.models.Info.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "chatapp.models.Info", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 173, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 175, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 178, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 179, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 180, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 181, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 193, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 227, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 250, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 250, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 260, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 263, "usage_type": "call"}]} +{"seq_id": "4627365297", "text": "import os\nimport typing\n\nfrom sqlalchemy import create_engine, Column, Integer, String, TEXT, and_\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom pydantic import BaseModel\n\nimport config\nfrom dto import QuestionInputDTO\n\n# SQLAlchemy database setup\nBase = declarative_base()\nengine = create_engine(f'postgresql+psycopg2://{config.DBS_USER}:{config.DBS_PASS}@{config.DBS_URL}/bewise_ai',\n echo=False)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nclass QuestionDB(Base):\n __tablename__ = 'questions'\n id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)\n text = Column(String, nullable=False)\n answer = Column(TEXT, nullable=False)\n date = Column(TEXT, nullable=False)\n\n\nclass QuestionDBModerator:\n @staticmethod\n async def get_question_by_id(question_id: int) -> typing.Union[int, None]:\n return session.query(QuestionDB).filter_by(id=question_id).first() or None\n\n @staticmethod\n async def add_question(question_input: QuestionInputDTO) -> int:\n question_db = QuestionDB(text=question_input.question, answer=question_input.answer,\n date=question_input.created_at)\n session.add(question_db)\n session.commit()\n return question_db.id\n\n @staticmethod\n async def question_exists(question_input: QuestionInputDTO) -> bool:\n return session.query(QuestionDB).filter(and_(QuestionDB.text == question_input.question,\n QuestionDB.answer == question_input.answer,\n QuestionDB.date == question_input.created_at)).first() is not None\n\n\nBase.metadata.create_all(engine)\n", "repo_name": "Nirebl/Bewise.ai", "sub_path": "Backend/models/question.py", "file_name": "question.py", "file_ext": "py", "file_size_in_byte": 1777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "config.DBS_USER", "line_number": 14, "usage_type": "attribute"}, {"api_name": "config.DBS_PASS", "line_number": 14, "usage_type": "attribute"}, {"api_name": "config.DBS_URL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.TEXT", "line_number": 24, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.TEXT", "line_number": 25, "usage_type": "argument"}, {"api_name": "typing.Union", "line_number": 30, "usage_type": "attribute"}, {"api_name": "dto.QuestionInputDTO", "line_number": 34, "usage_type": "name"}, {"api_name": "dto.QuestionInputDTO", "line_number": 42, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "4050707391", "text": "\n# coding: utf-8\n\n# In[65]:\n\nimport tweepy\nimport pandas as pd\n\n\n# In[66]:\n\n# Twitter Credentials\n\n#Authentication Credentials\nconsumer_key = 'consumer_key'\nconsumer_secret = 'consumer_secret'\n\naccess_token = 'access_token'\naccess_token_secret = 'access_token_secret'\n\n#Twitter API Authorization\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth, parser=tweepy.parsers.JSONParser())\n\n\n# In[ ]:\n\nusers = []\ncount = 0\nfor user in tweepy.Cursor(api.followers, id='UNC_Basketball').pages():\n users.append(user)\n\n\n# In[68]:\n\nlocations = []\nfor page in users:\n for follower in page['users']:\n if follower['location'] != '':\n locations.append(follower['location'])\n\n\n# In[71]:\n\nlen(locations)\n\n\n# In[70]:\n\nmax(set(locations), key=locations.count)\n\n\n# In[ ]:\n\n\n\n", "repo_name": "tghays/Data-Science", "sub_path": "Social-Media-Analytics/Twitter/Location-Retrieval/twitter_locator.py", "file_name": "twitter_locator.py", "file_ext": "py", "file_size_in_byte": 866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 25, "usage_type": "call"}, {"api_name": "tweepy.parsers.JSONParser", "line_number": 25, "usage_type": "call"}, {"api_name": "tweepy.parsers", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tweepy.Cursor", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "36340036137", "text": "from sqlalchemy import Column, Boolean, String, DateTime, Date, ForeignKey, Integer, Float\nfrom sqlalchemy.orm import relationship\nfrom datetime import datetime\nfrom db.mysql import Base\n\nclass IncidenceType(Base):\n __tablename__ = \"incidenceType_table\"\n\n id = Column(String, primary_key=True)\n name = Column(String, unique=True, index=True, nullable=False)\n priorityValue = Column(Integer, nullable=False)\n\n is_active = Column(Boolean, default=True)\n is_deleted = Column(Boolean, default=False)\n createdAt = Column(DateTime, default=datetime.now)\n updatedAt = Column(DateTime)\n deletedAt = Column(DateTime)\n\n incidence = relationship(\"Incidence\", back_populates=\"incidenceType\")\n", "repo_name": "walteralv/chapaq_security", "sub_path": "models/incidenceType.py", "file_name": "incidenceType.py", "file_ext": "py", "file_size_in_byte": 711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "db.mysql.Base", "line_number": 6, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 9, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 10, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 11, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 13, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 14, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 15, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 16, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 17, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "41963723235", "text": "from panel2 import app\nfrom panel2.session import login_signal, authfail_signal\nfrom flask import request\n\n@login_signal.connect_via(app)\ndef send_login_notice(*args, **kwargs):\n user = kwargs.pop('user', None)\n if user.login_success_notice:\n user.send_email('Authentication successful for %s from %s' % (user.username, request.environ['REMOTE_ADDR']), 'email/auth-message.txt')\n\n@authfail_signal.connect_via(app)\ndef send_authfail_notice(*args, **kwargs):\n user = kwargs.pop('user', None)\n if user is not None and user.login_failed_notice:\n user.send_email('Authentication failed for %s from %s' % (user.username, request.environ['REMOTE_ADDR']), 'email/authfail-message.txt')\n\n", "repo_name": "mrmonday/panel2", "sub_path": "panel2/auth_hooks.py", "file_name": "auth_hooks.py", "file_ext": "py", "file_size_in_byte": 705, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.request.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 9, "usage_type": "name"}, {"api_name": "panel2.session.login_signal.connect_via", "line_number": 5, "usage_type": "call"}, {"api_name": "panel2.app", "line_number": 5, "usage_type": "argument"}, {"api_name": "panel2.session.login_signal", "line_number": 5, "usage_type": "name"}, {"api_name": "flask.request.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "panel2.session.authfail_signal.connect_via", "line_number": 11, "usage_type": "call"}, {"api_name": "panel2.app", "line_number": 11, "usage_type": "argument"}, {"api_name": "panel2.session.authfail_signal", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "42168375925", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n'''\nCreated on 26 0tt 2018\n\nController dell'applicazione web\n\n@author: Giovanni, Davide\n'''\nimport requirements\n\nfrom flask_login import LoginManager, login_user, logout_user, login_required, UserMixin, current_user\nfrom django.utils.html import strip_tags\nfrom functools import wraps\nfrom flask import Flask, request, flash, redirect, url_for, send_file\nfrom flask.templating import render_template\nfrom Model import Model\nimport os\nimport csv\nimport datetime\n\n# Classe di appoggio per i dati che arrivano dal DB\nclass User(UserMixin):\n def __init__(self, id_utente = None, username = '', ruolo = None):\n self.id = id_utente\n self.username = username\n self.ruolo = ruolo\n\n# Applicazione Flask!\n#logging.basicConfig(level=logging.DEBUG)\napp = Flask(__name__) \napp.model = Model()\n\n# flask-login\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = '/'\nlogin_manager.session_protection = 'strong'\n\n# Decorator for roles\ndef check_roles(roles = None, page = 'home'):\n def decorator(f):\n @wraps(f)\n def func_wrapper(*args, **kwargs):\n if current_user.ruolo in roles:\n return f(*args, **kwargs)\n return redirect(url_for(page, next=request.url)) \n return func_wrapper\n return decorator\n\n# Per impedire all'utente di tornare indietro dopo aver fatto il logout\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n return response\n\n@login_manager.user_loader\ndef load_user(user_id):\n username = app.model.getUsername(user_id);\n if username is None:\n return None\n ruolo = app.model.getRuoloUsername(user_id);\n return User(user_id,username,ruolo)\n\n@app.route('/')\ndef home():\n return render_template('login.html')\n \n@app.route('/login', methods=['POST'])\ndef login():\n username = strip_tags(request.form['username'])\n password = strip_tags(request.form['pass'])\n password_codificata = app.model.make_md5(app.model.make_md5(password))\n num_rows, id_utente = app.model.getCountUsernamePassword(username, password_codificata)\n ruolo = app.model.getRuoloUsername(id_utente)\n if num_rows == 1:\n user = User(id_utente,username,ruolo)\n login_user(user)\n return redirect('/registro')\n else:\n flash('wrong password!')\n return redirect('/')\n\n@app.route(\"/logout\", methods=['POST'])\n@login_required\ndef logout():\n logout_user()\n return redirect('/')\n\n@app.route(\"/view_modify_pwd\", methods=['POST'])\n@login_required\n@check_roles([0,1,2],'registro')\ndef view_modify_pwd():\n return render_template('modify_pwd.html', matricola_profilo='admin')\n\n@app.route(\"/view_modify_pwd_profilo\", methods=['POST'])\n@login_required\n@check_roles([0],'registro')\ndef view_modify_pwd_profilo():\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n return render_template('modify_pwd.html', matricola_profilo=matricola_profilo)\n\n@app.route(\"/modify_pwd\", methods=['POST'])\n@login_required\ndef modify_pwd():\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n password1 = strip_tags(request.form['pass1']).strip()\n password2 = strip_tags(request.form['pass2']).strip()\n if password1 == password2:\n user = current_user\n password_codificata = app.model.make_md5(app.model.make_md5(password2))\n if matricola_profilo == 'admin':\n ack_pwd = app.model.updateUserPwd(user.id, password_codificata)\n else:\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n ack_pwd = app.model.updateUserPwd(id_profilo, password_codificata)\n if ack_pwd:\n return redirect('/registro')\n else:\n return redirect('/view_modify_pwd')\n else:\n return redirect('/view_modify_pwd')\n\n@app.route(\"/view_modify_mac\", methods=['POST','GET'])\n@login_required\ndef view_modify_mac():\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n return render_template('modify_mac.html', matricola_profilo=matricola_profilo)\n \n@app.route(\"/modify_mac\", methods=['POST'])\n@login_required\ndef modify_mac():\n mac1 = strip_tags(request.form['mac1']).strip()\n mac2 = strip_tags(request.form['mac2']).strip()\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n if mac1 == mac2:\n ack_mac = app.model.updateUserMac(id_profilo, mac1)\n if ack_mac:\n return profilo()\n else:\n return redirect('/view_modify_mac')\n else:\n return redirect('/view_modify_mac')\n \n# 0 --> admin, \n# 1 --> supervisore, \n# 2 --> utente normale\n@app.route(\"/registro\", methods=['POST','GET'])\n@login_required\n@check_roles([0,1,2],'registro')\ndef registro():\n user = current_user\n matricola = app.model.getMatricola(user.id)\n if user.ruolo == 1 or user.ruolo == 2:\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola)\n macs = app.model.getIdMac(id_profilo)\n mac = macs[0]['mac']\n if user.ruolo == 1:\n ruolo_profilo = \"supervisore\"\n elif user.ruolo == 2:\n ruolo_profilo = \"utente\"\n frequenza_profilo = app.model.getFrequenzaUsername(id_profilo)\n frequenza_profilo = sorted(frequenza_profilo, key=lambda data: datetime.datetime.strptime(data['data'], '%d-%m-%Y').date())\n return render_template('registro.html', mac=mac, username=user.username, id_utente=user.id, utente_profilo=utente_profilo, frequenza_profilo=frequenza_profilo, ruolo_profilo=ruolo_profilo, ruolo=user.ruolo)\n elif user.ruolo == 0:\n utenti_punteggi = app.model.getUtentiPunteggi()\n supervisori_punteggi = app.model.getSupervisoriPunteggi()\n utenti_punteggi = sorted(utenti_punteggi, key=lambda utenti: utenti[0]['cognome'])\n supervisori_punteggi = sorted(supervisori_punteggi, key=lambda utenti: utenti[0]['cognome'])\n return render_template('registro.html', username=user.username, ruolo=user.ruolo, supervisori_punteggi=supervisori_punteggi, utenti_punteggi=utenti_punteggi)\n else:\n flash('wrong password!')\n return redirect('/')\n\n@app.route(\"/registro_supervisori\", methods=['POST'])\n@login_required\ndef registro_supervisori():\n user = current_user\n supervisori_punteggi = app.model.getSupervisoriPunteggi()\n supervisori_punteggi = sorted(supervisori_punteggi, key=lambda utenti: utenti[0]['cognome'])\n return render_template('registro.html', username=user.username, ruolo=user.ruolo, supervisori_punteggi=supervisori_punteggi)\n\n@app.route(\"/registro_utenti\", methods=['POST'])\n@login_required\ndef registro_utenti():\n user = current_user\n utenti_punteggi = app.model.getUtentiPunteggi()\n utenti_punteggi = sorted(utenti_punteggi, key=lambda utenti: utenti[0]['cognome'])\n return render_template('registro.html', username=user.username, ruolo=user.ruolo, utenti_punteggi=utenti_punteggi)\n\n@app.route(\"/profilo\", methods=['POST'])\n@login_required\ndef profilo():\n user = current_user\n username = user.username\n id_utente = user.id\n matricola_profilo = strip_tags(request.form['matricola_profilo']).strip()\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n ruolo_profilo = app.model.getRuoloUsername(id_profilo)\n macs = app.model.getIdMac(id_profilo)\n mac = macs[0]['mac']\n if ruolo_profilo == 0:\n ruolo_profilo = \"admin\"\n elif ruolo_profilo == 1:\n ruolo_profilo = \"supervisore\"\n elif ruolo_profilo == 2:\n ruolo_profilo = \"utente\"\n frequenza_profilo = app.model.getFrequenzaUsername(id_profilo)\n \n frequenza_profilo_id = []\n id = 1\n for presenza in frequenza_profilo:\n presenza.update({\"id\":id})\n frequenza_profilo_id.append(presenza)\n id += 1\n\n frequenza_profilo_id = sorted(frequenza_profilo_id, key=lambda data: datetime.datetime.strptime(data['data'], '%d-%m-%Y').date())\n return render_template('profilo.html', mac=mac, username=username, id_utente=id_utente, utente_profilo=utente_profilo, frequenza_profilo_id=frequenza_profilo_id, ruolo_profilo=ruolo_profilo)\n\n@app.route(\"/view_aggiungi_utente\", methods=['POST'])\n@login_required\ndef view_aggiungi_utente():\n return render_template('aggiungiUtente.html')\n\n@app.route(\"/data_log\", methods=['POST'])\n@login_required\ndef data_log():\n user = current_user\n username = user.username\n return render_template('datalog.html', username=username)\n\n@app.route(\"/search_data_log\", methods=['POST'])\n@login_required\ndef search_data_log():\n user = current_user\n username = user.username\n data = strip_tags(request.form['data']).strip()\n data = data.replace(\"/\", \"-\", 2)\n data = data[8:10] + \"-\" + data[5:7] + \"-\" + data[0:4]\n utenti_per_data = app.model.getUtentiPerData(data)\n utenti_per_data = sorted(utenti_per_data, key=lambda utenti: utenti[0]['cognome'])\n return render_template('datalog.html', username=username, utenti_per_data=utenti_per_data)\n\n@app.route(\"/download_data_log\", methods=['POST'])\n@login_required\ndef download_data_log():\n user = current_user\n username = user.username\n try:\n data = strip_tags(request.form['data_button'])\n except Exception as e:\n return render_template('datalog.html', username=username)\n \n utenti_per_data = app.model.getUtentiPerData(data)\n utenti_per_data = sorted(utenti_per_data, key=lambda utenti: utenti[0]['cognome'])\n \n dati = list()\n for utente in utenti_per_data:\n cognome = utente[0]['cognome']\n nome = utente[0]['nome']\n matricola = utente[0]['matricola']\n data = utente[2][0]['data']\n inizio = utente[2][0]['ora_inizio']\n fine = utente[2][0]['ora_fine'] \n dati.append({'cognome':cognome,'nome':nome,'matricola':matricola,'data':data,'inizio':inizio,'fine':fine})\n \n nomeFile = 'log_data.csv'\n with open(nomeFile, mode='w', encoding='utf-8') as csvFile:\n nomiCampi = ['cognome','nome','matricola','data','inizio','fine']\n writer = csv.DictWriter(csvFile,fieldnames=nomiCampi)\n writer.writeheader()\n for riga in dati:\n writer.writerow(riga)\n \n # download file\n try:\n return send_file(\"log_data.csv\", as_attachment=True)\n except Exception as e:\n log.exception(e)\n Error(400)\n \n return render_template('datalog.html', username=username, utenti_per_data=utenti_per_data)\n\n@app.route(\"/aggiungi_utente\", methods=['POST'])\n@login_required\ndef aggiungi_utente():\n user = current_user\n username = strip_tags(request.form['username']).strip()\n nome = strip_tags(request.form['nome']).strip()\n cognome = strip_tags(request.form['cognome']).strip()\n matricola = str(strip_tags(request.form['matricola']).strip()).upper()\n mac = strip_tags(request.form['mac']).strip()\n pwd = strip_tags(request.form['password']).strip()\n password_codificata = str(app.model.make_md5(app.model.make_md5(pwd)))\n ack_user = app.model.addUser(username, nome, cognome, matricola, mac, password_codificata)\n return redirect('/registro')\n\n@app.route(\"/elimina_utente\", methods=['POST'])\n@login_required\ndef elimina_utente():\n user = current_user\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n ack_user = app.model.deleteUser(id_profilo)\n return redirect('/registro')\n\n@app.route(\"/profilo_aggiungi_presenza\", methods=['POST'])\n@login_required\ndef profilo_aggiungi_presenza(): \n user = current_user\n username = user.username\n id_utente = user.id\n matricola_profilo = strip_tags(request.form['matricola_profilo']).strip()\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n ruolo_profilo = app.model.getRuoloUsername(id_profilo)\n macs = app.model.getIdMac(id_profilo)\n mac = macs[0]['mac']\n if ruolo_profilo == 0:\n ruolo_profilo = \"admin\"\n elif ruolo_profilo == 1:\n ruolo_profilo = \"supervisore\"\n elif ruolo_profilo == 2:\n ruolo_profilo = \"utente\"\n frequenza_profilo = app.model.getFrequenzaUsername(id_profilo)\n \n frequenza_profilo_id = []\n id = 1\n for presenza in frequenza_profilo:\n presenza.update({\"id\":id})\n frequenza_profilo_id.append(presenza)\n id += 1\n\n frequenza_profilo_id = sorted(frequenza_profilo_id, key=lambda data: datetime.datetime.strptime(data['data'], '%d-%m-%Y').date())\n return render_template('aggiungi_presenza.html', mac=mac, username=username, id_utente=id_utente, utente_profilo=utente_profilo, frequenza_profilo_id=frequenza_profilo_id, ruolo_profilo=ruolo_profilo)\n\n@app.route(\"/aggiungi_presenza\", methods=['POST'])\n@login_required\ndef aggiungi_presenza():\n user = current_user\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n data = strip_tags(request.form['data']).strip()\n data = data.replace(\"/\", \"-\", 2)\n data = data[8:10] + \"-\" + data[5:7] + \"-\" + data[0:4]\n \n try:\n date_error = datetime.datetime.strptime(data, '%d-%m-%Y').date()\n except:\n return profilo()\n \n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n ack_aggiungi_presenza = app.model.aggiungi_presenza(id_profilo, data)\n return profilo()\n\n@app.route(\"/elimina_presenza\", methods=['POST'])\n@login_required\ndef elimina_presenza():\n user = current_user\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n idx_presenza = int(request.form['idx_presenza']) - 1\n ack_elimina_presenza = app.model.elimina_presenza(id_profilo, idx_presenza)\n return profilo()\n\n@app.route(\"/cambio_ruolo\", methods=['POST'])\n@login_required\ndef cambio_ruolo():\n matricola_profilo = strip_tags(request.form[\"matricola_profilo\"]).strip()\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n ruolo_profilo = app.model.getRuoloUsername(id_profilo)\n option_ruolo = request.form['option_ruolo']\n if ruolo_profilo == 1 and option_ruolo == \"Supervisore\":\n pass\n elif ruolo_profilo == 2 and option_ruolo == \"Utente\":\n pass\n else:\n ack_ruolo = app.model.updateRuolo(id_profilo, option_ruolo)\n return profilo()\n\n@app.route(\"/export_punteggi\", methods=['POST'])\n@login_required\ndef export_punteggi():\n utenti_punteggi = app.model.getUtentiPunteggi() # [utente][tempo][punteggio]\n supervisori_punteggi = app.model.getSupervisoriPunteggi()\n \n utenti_punteggi = sorted(utenti_punteggi, key=lambda utenti: utenti[0]['cognome'])\n supervisori_punteggi = sorted(supervisori_punteggi, key=lambda utenti: utenti[0]['cognome'])\n \n # solo per gli utenti\n dati = list()\n for utente in utenti_punteggi:\n cognome = utente[0]['cognome']\n nome = utente[0]['nome']\n matricola = utente[0]['matricola']\n tempo = utente[1]\n punteggio = utente[2]\n data_list = []\n for d in utente[3]:\n data = d['data']\n data_list.append(data)\n data_list = sorted(data_list, key=lambda data: datetime.datetime.strptime(data, '%d-%m-%Y').date())\n dati.append({'cognome':cognome,'nome':nome,'matricola':matricola,'tempo':tempo,'punteggio':punteggio,'frequenze':data_list})\n \n nomeFile = 'log_punteggi.csv'\n with open(nomeFile, mode='w', encoding='utf-8') as csvFile:\n nomiCampi = ['cognome','nome','matricola','tempo','punteggio','frequenze']\n writer = csv.DictWriter(csvFile,fieldnames=nomiCampi)\n writer.writeheader()\n for riga in dati:\n writer.writerow(riga)\n \n # scrittura nel db di tempo e punteggio come backup\n for riga in dati:\n matricola_profilo = riga['matricola']\n tempo_profilo = riga['tempo']\n punteggio_profilo = riga['punteggio']\n id_profilo, utente_profilo = app.model.getProfiloUtente(matricola_profilo)\n ack_updateUtentiPunteggi = app.model.updateUtentiPunteggi(id_profilo, tempo_profilo, punteggio_profilo)\n \n # download file\n try:\n return send_file(\"log_punteggi.csv\", as_attachment=True)\n except Exception as e:\n log.exception(e)\n Error(400)\n \n return redirect('/registro')\n \n\nif __name__ == '__main__': # Questo if deve essere ultima istruzione.\n app.secret_key = os.urandom(12)\n app.run(host=\"0.0.0.0\", port=\"5000\", debug=True, use_reloader=True) # Debug permette anche di ricaricare i file modificati senza rinizializzare il web server.", "repo_name": "GiovanniBellorio/Frequency-Detection-Wireless", "sub_path": "src/Controller.py", "file_name": "Controller.py", "file_ext": "py", "file_size_in_byte": 16922, "program_lang": "python", "lang": "it", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask_login.UserMixin", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 32, "usage_type": "call"}, {"api_name": "Model.Model", "line_number": 33, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 36, "usage_type": "call"}, {"api_name": "flask_login.current_user.ruolo", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.templating.render_template", "line_number": 68, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "flask_login.logout_user", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 89, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 95, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 92, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 102, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 98, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 111, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 123, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 105, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 128, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 129, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 126, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 136, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 145, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 132, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 154, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 165, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 165, "usage_type": "attribute"}, {"api_name": "flask.templating.render_template", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.templating.render_template", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 174, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 175, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 151, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 183, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 178, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 188, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 191, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 186, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 196, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 219, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 219, "usage_type": "attribute"}, {"api_name": "flask.templating.render_template", "line_number": 220, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 194, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 225, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 223, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 230, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 232, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 228, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 237, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 244, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 235, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 249, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 252, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 252, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 252, "usage_type": "name"}, {"api_name": "flask.templating.render_template", "line_number": 254, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 279, "usage_type": "call"}, {"api_name": "flask.templating.render_template", "line_number": 284, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 247, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 289, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 290, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 290, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 290, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 291, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 291, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 291, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 292, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 292, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 293, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 293, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 293, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 294, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 294, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 294, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 295, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 295, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 298, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 287, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 303, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 304, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 304, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 304, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 307, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 301, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 312, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 315, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 315, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 335, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 335, "usage_type": "attribute"}, {"api_name": "flask.templating.render_template", "line_number": 336, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 310, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 341, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 342, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 342, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 342, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 343, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 343, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 343, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 348, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 348, "usage_type": "attribute"}, {"api_name": "flask_login.login_required", "line_number": 339, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 359, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 360, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 360, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 360, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 362, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 362, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 357, "usage_type": "name"}, {"api_name": "django.utils.html.strip_tags", "line_number": 369, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 369, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 369, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 372, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 372, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 367, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 402, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 402, "usage_type": "attribute"}, {"api_name": "csv.DictWriter", "line_number": 408, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 423, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 428, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 382, "usage_type": "name"}, {"api_name": "os.urandom", "line_number": 432, "usage_type": "call"}]} +{"seq_id": "23845424697", "text": "# -*- coding: utf-8 -*-\n__author__ = [\"Sandro Brito\", \"Mateus Barbosa\", \"Daniel Machado\", \"Thiago Lopes\", \"Heibbe Oliveira\"]\n__credits__ = [\"LEEDMOL Research group\", \"Institute of Chemistry at Universidade de Brasilia\", \"Institute of Chemistry at Universidade Federal de Goiás\"]\n__date__ = \"Oct 16 of 2019\"\n__version__ = \"1.0.1\"\n\nimport matplotlib\nfrom matplotlib import pyplot\nfrom matplotlib import patches\nfrom PIL import Image\nfrom PIL import PngImagePlugin\n\nclass PlotTransitions(object):\n\n\tdef __init__(self, target_dir, output_file_names, title_chart_evolution, step, evol_plot_wl_choice, evol_plot_osc_choice, resol):\n\t\tself.target_dir = target_dir\n\t\tself.output_file_names = output_file_names\n\t\tself.title_chart_evolution = title_chart_evolution\n\t\tself.step = step\n\t\tself.evol_plot_wl_choice = evol_plot_wl_choice\n\t\tself.evol_plot_osc_choice = evol_plot_osc_choice\n\t\tself.resol = resol\n\t\tosc_list = []\n\t\twl_list = []\n\t\tfor rawData in output_file_names:\n\t\t\trawData_osc_list = []\n\t\t\trawData_wl_list = []\n\t\t\twith open(self.target_dir+\"/\"+rawData+\"_rawData.dat\", encoding=\"utf8\", errors='ignore') as myFile:\n\t\t\t\tfor line in myFile:\n\t\t\t\t\ty = line.split()\n\t\t\t\t\trawData_osc_list.append(float(y[1]))\n\t\t\t\t\trawData_wl_list.append(float(y[0]))\n\t\t\tosc_list.append(rawData_osc_list)\n\t\t\twl_list.append(rawData_wl_list)\n\t\ttemp = []\n\t\tfor element in osc_list:\n\t\t\ttemp.append(len(element))\n\t\tself.length_list = sorted(temp)[-1]\n\n\t\tself.dict_osc_wl_list = {}\n\t\tfor number in range(0, self.length_list, 1):\n\t\t\ttemp_list=[]\n\t\t\tfor couter in range(0, len(self.output_file_names), 1):\n\t\t\t\ttry:\n\t\t\t\t\ttemp_list.append([osc_list[couter][number], wl_list[couter][number]])\n\t\t\t\texcept:\n\t\t\t\t\ttemp_list.append([0, 0])\n\t\t\tself.dict_osc_wl_list.update({str(number+1):temp_list})\n\n\tdef plot_pyplot(self, states, colors):\n\t\tif self.evol_plot_osc_choice == 1:\n\t\t\tname_plot = self.target_dir +\"/excitation_evolution_\"\n\t\t\tcolor_num = 0\n\t\t\tgraph = matplotlib.pyplot.figure(figsize=(8, 6))\n\t\t\tx = range(1, len(self.step)+1, 1)\n\t\t\tlegends = []\n\t\t\tfor excitation in states:\n\t\t\t\tosc = []\n\t\t\t\tfor element in self.dict_osc_wl_list[excitation]:\n\t\t\t\t\tosc.append(element[0])\n\t\t\t\ta = graph.add_subplot(111)\n\t\t\t\ta.scatter(x, osc, marker=\".\", s =50, color=colors[color_num])\n\t\t\t\ta.plot(x, osc, color=colors[color_num])\n\t\t\t\ta.set_xticks(x)\n\t\t\t\ta.set_xticklabels(self.step)\n\t\t\t\telement_legend = matplotlib.patches.Patch(color=colors[color_num], label=\"State \"+excitation)\n\t\t\t\tlegends.append(element_legend)\n\t\t\t\tcolor_num =color_num+1\n\t\t\tbox = a.get_position()\n\t\t\ta.set_position([box.x0, box.y0, box.width * 0.92, box.height])\n\t\t\tmatplotlib.pyplot.xlabel(\"Time Evolution\")\n\t\t\tmatplotlib.pyplot.ylabel(\"Oscillator Strength (arbritary units)\")\n\t\t\tmatplotlib.pyplot.legend(handles=legends, loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1, fancybox=True, shadow=True)\n\t\t\tgraph.savefig(name_plot+\"oscl.png\", transparent=True, dpi=self.resol)\n\t\tif self.evol_plot_wl_choice == 1:\n\t\t\tname_plot = self.target_dir +\"/excitation_evolution_\"\n\t\t\tcolor_num = 0\n\t\t\tgraph = matplotlib.pyplot.figure(figsize=(8, 6))\n\t\t\tx = range(1, len(self.step)+1, 1)\n\t\t\tlegends = []\n\t\t\tfor excitation in states:\n\t\t\t\twl = []\n\t\t\t\tfor element in self.dict_osc_wl_list[excitation]:\n\t\t\t\t\twl.append(element[1])\n\t\t\t\ta = graph.add_subplot(111)\n\t\t\t\ta.scatter(x, wl, marker=\".\", s =50, color=colors[color_num])\n\t\t\t\ta.plot(x, wl, color=colors[color_num])\n\t\t\t\ta.set_xticks(x)\n\t\t\t\ta.set_xticklabels(self.step)\n\t\t\t\telement_legend = matplotlib.patches.Patch(color=colors[color_num], label=\"State \"+excitation)\n\t\t\t\tlegends.append(element_legend)\n\t\t\t\tcolor_num =color_num+1\n\t\t\tbox = a.get_position()\n\t\t\ta.set_position([box.x0, box.y0, box.width * 0.92, box.height])\n\t\t\tmatplotlib.pyplot.xlabel(\"Time Evolution\")\n\t\t\tmatplotlib.pyplot.ylabel(\"Wavelength (nm)\")\n\t\t\tmatplotlib.pyplot.legend(handles=legends, loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1, fancybox=True, shadow=True)\n\t\t\tgraph.savefig(name_plot+\"wl.png\", transparent=True, dpi=self.resol)\n", "repo_name": "lopesth/sp3ctrum", "sub_path": "SP3CTRUM/APP/plotTransitions.py", "file_name": "plotTransitions.py", "file_ext": "py", "file_size_in_byte": 3972, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.Patch", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.Patch", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 90, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "attribute"}]} +{"seq_id": "73454710131", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # http://localhost:8000/games\n path('', views.index, name=\"games-index\"),\n path('', views.get_game_by_id, name=\"games-details\"),\n path('desc/', views.order_by_desc, name=\"games-filter_desc\"),\n path('lowhigh/', views.order_by_lowest_highest, name=\"games-lowest_highest\"),\n path('highlow/', views.order_by_highest_lowest, name=\"games-highest_lowest\"),\n path('manufacturers//', views.get_manufacturer_by_id, name=\"games-manufacturer\")\n]", "repo_name": "bjorgvin16/verklegt2", "sub_path": "captain_console/games/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 549, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "11081027687", "text": "# -*- coding:utf-8 -*-\n# ###########################\n# File Name: ddtcdr.py\n# Author: geekinglcq\n# Mail: lcqgeek@live.com\n# Created Time: 2021-02-03 01:38:58\n# ###########################\n\nimport torch\nimport logging\nimport torch.nn as nn\nfrom .base import HModel\nfrom collections import Counter\n\n\nclass DDTCDR(HModel):\n \"\"\" DDTCDR\n DDTCDR: Deep Dual Transfer Cross Domain Recommendation.\n \"\"\"\n def __init__(self, config, dataset, item_type):\n super().__init__(config, dataset)\n self.logger = logging.getLogger()\n\n self.LABEL = dataset.config['LABEL_FIELD']\n # self.RATING = dataset.config['RATING_FIELD']\n\n self.user_emb_size = config['latent_dim']\n self.item_emb_size = config['latent_dim']\n\n self.layers = config['layers']\n self.token_emb_size = config['token_emb_size']\n self.user_cf_embedding = nn.Embedding(self.n_users, self.user_emb_size)\n self.item_cf_embedding = nn.Embedding(self.n_items, self.item_emb_size)\n\n self.latent_dim = config['latent_dim']\n self.fc_layers = torch.nn.ModuleList()\n\n item_feats = dataset.item_feat_fields[item_type]\n item_feat_type_count = Counter(\n [dataset.field2type[i] for i in item_feats])\n input_dim = (item_feat_type_count['token'] + 1) * self.token_emb_size + \\\n item_feat_type_count['float'] + self.user_emb_size + self.item_emb_size\n\n self.layers.insert(0, input_dim)\n for idx, (in_size,\n out_size) in enumerate(zip(self.layers[:-1],\n self.layers[1:])):\n self.fc_layers.append(torch.nn.Linear(in_size, out_size))\n\n self.affine_output = torch.nn.Linear(in_features=config['layers'][-1],\n out_features=1)\n self.logistic = torch.nn.Sigmoid()\n self.bridge = torch.nn.Linear(config['latent_dim'],\n config['latent_dim'])\n torch.nn.init.orthogonal_(self.bridge.weight)\n\n def agg_item_feature(self, item_type, item_data):\n token_embeddings = []\n float_feats = []\n for feat_name, feat_value in item_data.items():\n if feat_name in self.token_embedding_table and feat_name != self.USER_ID:\n emb = self.token_embedding_table[feat_name](feat_value.long())\n token_embeddings.append(emb)\n if feat_name in self.float_field_names:\n float_feat = feat_value.float()\n if float_feat.dim() == 1:\n float_feat = float_feat.unsqueeze(-1)\n float_feats.append(float_feat)\n all_emb = torch.cat(token_embeddings + float_feats, dim=-1)\n return all_emb\n\n def forward(self, item_type, data, dual=False):\n user = data[self.USER_ID]\n item_id = data[self.ITEM_ID]\n user_emb = self.user_cf_embedding(user)\n if dual:\n user_emb = self.bridge(user_emb)\n item_cf_emb = self.item_cf_embedding(item_id)\n\n item_content_emb = self.agg_item_feature(item_type, data)\n item_emb = torch.cat([item_cf_emb, item_content_emb], dim=-1)\n vector = torch.cat([user_emb, item_emb], dim=-1)\n vector = vector.float()\n\n for fc in self.fc_layers:\n vector = fc(vector)\n vector = torch.nn.Dropout(p=0.1)(vector)\n vector = torch.nn.ReLU()(vector)\n rating = self.affine_output(vector)\n rating = self.logistic(rating)\n return rating\n\n def calculate_loss(self):\n pass\n\n def predict(self, h, data):\n return self.forward(h, data)\n", "repo_name": "geekinglcq/HRec", "sub_path": "HRec/models/ddtcdr.py", "file_name": "ddtcdr.py", "file_ext": "py", "file_size_in_byte": 3654, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55, "dataset": "github-code", "pt": "21", "api": [{"api_name": "base.HModel", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.nn.Sigmoid", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "attribute"}]} +{"seq_id": "7654599921", "text": "\nimport re\nfrom urllib.parse import urlparse\nimport requests\nfrom os import mkdir\nfrom os.path import exists\nimport time\n\nrequested_urls = []\nmax_url_requests = 50\n\ndef load_url_politely(url, cache_file_name=None, ignore_cache=False):\n\t\n\n\t\"\"\"\n\twebscraping as politely as possible\n\n\tWhen requesting site, make local \"cached copy\"\n\tif we have a cached copy, use that instead of asking server.\n\n\t\"\"\"\n\n\tprint(f\"--- LOAD URL: '{url}' (politely) ---\")\n\t\t\n\t# Make a cache folder if we don't have one\n\tcache_dir = \"cache\"\n\tif not exists(cache_dir):\n\t\tprint(\"\\t...No cache directory yet, creating one\")\n\t\tmkdir(cache_dir)\n\n\t# Set the initial last request time\n\tif not hasattr(load_url_politely, \"last_request_time\"):\n\t\tload_url_politely.last_request_time = -10\n\n\t# Generate a cache name, if we don't have one, by parsing the URL\n\tif not cache_file_name:\n\t\tparsed = urlparse(url)\n\t\tcache_file_name = parsed.netloc.split(\".\")[-2] + re.sub(\n\t\t\t\t\t\t\t\"[^0-9a-zA-Z]+\", \"_\", parsed.path)\n\t\tif cache_file_name[-1] == \"_\": cache_file_name = cache_file_name[0:-1]\n\n\n\tfile_path = f\"{cache_dir}/{cache_file_name}.html\"\n\t\n\t# file already exist? Use the cache!\n\tif exists(file_path) and not ignore_cache:\n\t\t\n\t\t# Read from cache\n\t\tcached_html = open(file_path).read()\n\t\tprint(f\"\\t...Successfully loaded '{url}' from cache '{file_path}'\")\n\t\treturn cached_html\n\n\telse:\n\t\t# Cache doesn't exist- Load from real Wikipedia\n\n\t\t# How long has it been since we made a request?\n\t\tcurrent_time = time.process_time()\n\t\ttime_elapsed = (current_time - load_url_politely.last_request_time) * 1000\n\t\tload_url_politely.last_request_time = current_time \n\n\t\t# If we made a request recently, wait 1 second before making a new request\n\t\tif time_elapsed < 100:\n\t\t\tprint(f'\\tLast request at time {load_url_politely.last_request_time:.5f}s, elapsed {round(time_elapsed)} ms ({url})')\n\t\t\tprint(\"\\t*** Sleeping for 1 second ***\")\n\t\t\ttime.sleep(.2)\n\t\t\t# return None\n\n\t\t# Add to total requests\n\t\trequested_urls.append(url)\n\t\tif len(requested_urls) > max_url_requests:\n\t\t\tprint(\"**** YOU MAY HAVE A BUG CAUSING TOO MANY URL REQUESTS: ****\")\n\t\t\tprint(\"**** FREEZING REQUEST ACCESS FOR SAFETY FOR THIS PROGRAM RUN ****\")\n\t\t\tprint(\"Last URLs requested: \", requested_urls)\n\t\t\traise ValueError('Requested too many URLS')\n\n\n\t\t# Start a timer\n\t\tstart_request = time.process_time()\n\n\t\t# Attempt to read the URL from Wikipedia\n\t\ttry:\n\t\t\tpage_response = requests.get(url)\n\n\t\t# Deal with if the site is down or doesn't exist\t\n\t\texcept Exception as e:\n\t\t\tprint(f\"\\t*** ERROR: Could not connect to url: {url} ***\")\n\t\t\tprint(e)\n\t\t\treturn\n\n\t\t# Stop the timer and calculate how long the request took \n\t\tstop_request = time.process_time()\n\t\tprint(f\"\\tSuccessfully loaded from Wikipedia in {round((stop_request - start_request)*1000):4}ms: {url}\")\n\n\t\t# Save a cached copy\n\t\twith open(file_path, \"w\") as file:\n\t\t\tfile.write(str(page_response.content))\n\n\t\t# Return the HTML content\n\t\treturn page_response.content\n", "repo_name": "markantfort23/Wikipedia-Page-Scraper", "sub_path": "load_url_politely.py", "file_name": "load_url_politely.py", "file_ext": "py", "file_size_in_byte": 2940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 37, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 57, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "73028489653", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward as ed\nimport numpy as np\nimport tensorflow as tf\n\nfrom edward.models import Normal\nfrom scipy import stats\n\n\ndef _test(mu, sigma, n):\n rv = Normal(mu=mu, sigma=sigma)\n rv_sample = rv.sample(n)\n x = rv_sample.eval()\n x_tf = tf.constant(x, dtype=tf.float32)\n mu = mu.eval()\n sigma = sigma.eval()\n assert np.allclose(rv.log_prob(x_tf).eval(),\n stats.norm.logpdf(x, mu, sigma))\n\n\nclass test_normal_log_prob_class(tf.test.TestCase):\n\n def test_1d(self):\n ed.set_seed(98765)\n with self.test_session():\n _test(tf.zeros([1]), tf.ones([1]), [1])\n _test(tf.zeros([1]), tf.ones([1]), [5])\n _test(tf.zeros([5]), tf.ones([5]), [1])\n _test(tf.zeros([5]), tf.ones([5]), [5])\n\nif __name__ == '__main__':\n tf.test.main()\n", "repo_name": "LiuFang816/SALSTM_py_data", "sub_path": "python/blei-lab_edward/edward-master/tests/test-models/test_normal_log_prob.py", "file_name": "test_normal_log_prob.py", "file_ext": "py", "file_size_in_byte": 883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "21", "api": [{"api_name": "edward.models.Normal", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.allclose", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.stats.norm.logpdf", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 21, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.test", "line_number": 24, "usage_type": "attribute"}, {"api_name": "edward.set_seed", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.test.main", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "26885603512", "text": "from datetime import timedelta, date\nfrom optparse import make_option\nfrom django.contrib.sites.models import Site\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom django.db.models import Count\nfrom django.db.models.query_utils import Q\nfrom django.template.loader import render_to_string\nfrom healers.models import Healer, is_wellness_center\nfrom healers.utils import send_hs_mail, get_full_url, get_fill_warning_links\n\n\nclass Command(BaseCommand):\n\n\thelp = 'Send an email reminder to each provider that does not pass all 3 profile completeness requirements'\n\n\toption_list = BaseCommand.option_list + (\n\t\tmake_option('--username',\n\t\t\taction='store',\n\t\t\tdest='username',\n\t\t\tdefault=None,\n\t\t\thelp='only send reminders for username'),\n\t)\n\n\tdef handle(self, *args, **options):\n\t\tdef get_providers_with_no_location():\n\t\t\tproviders = healers.filter(remote_sessions=False)\n\t\t\tproviders_with_no_location = []\n\t\t\tfor provider in providers:\n\t\t\t\tif not provider.get_locations():\n\t\t\t\t\tproviders_with_no_location.append(provider)\n\t\t\treturn providers_with_no_location\n\n\t\thealers = Healer.objects.filter(is_deleted=False, user__is_active=True)\n\n\t\tproviders_blank = healers.filter(Q(user__avatar__isnull=True) | Q(about='')).distinct()\n\n\t\tproviders_no_modality = healers.annotate(modality_count=Count('modality')).filter(modality_count=0)\n\t\t# filter wellness centers because of specialities of their providers\n\t\tproviders_no_modality = (\n\t\t\tprovider\n\t\t\tfor provider in providers_no_modality\n\t\t\tif not is_wellness_center(provider) or not provider.has_specialities())\n\n\t\tproviders = set(list(providers_blank) + list(providers_no_modality) + get_providers_with_no_location())\n\n\t\tusername = options['username']\n\t\tfor provider in providers:\n\t\t\tif username and provider.user.username != username:\n\t\t\t\tcontinue\n\n\t\t\tlastdate = provider.profile_completeness_reminder_lastdate\n\t\t\tinterval = provider.profile_completeness_reminder_interval\n\t\t\tif username or not lastdate or lastdate + timedelta(weeks=interval) <= date.today():\n\t\t\t\tsite_base = unicode(Site.objects.get_current())\n\t\t\t\tctx = {\n\t\t\t\t\t'first_name': provider.user.first_name,\n\t\t\t\t\t'reminder_interval_link': get_full_url('healer_edit'),\n\t\t\t\t\t'change_reminder_interval_url': get_full_url('healer_change_profile_reminder_interval'),\n\t\t\t\t\t'change_reminder_interval_options': Healer.PROFILE_REMINDER_INTERVAL_CHOICES,\n\t\t\t\t\t'fill_warning_links': get_fill_warning_links(provider),\n\t\t\t\t\t'site_base': site_base\n\t\t\t\t}\n\t\t\t\tsubject = render_to_string(\"healers/emails/profile_reminder_subject.txt\", ctx)\n\t\t\t\tsend_hs_mail(subject,\n\t\t\t\t\t'healers/emails/profile_reminder_message.txt',\n\t\t\t\t\tctx,\n\t\t\t\t\tsettings.DEFAULT_FROM_EMAIL,\n\t\t\t\t\t[provider.user.email])\n\t\t\t\tprovider.profile_completeness_reminder_lastdate = date.today()\n\t\t\t\tprovider.save()\n", "repo_name": "RumorIO/healersource", "sub_path": "apps/healers/management/commands/send_profile_reminders.py", "file_name": "send_profile_reminders.py", "file_ext": "py", "file_size_in_byte": 2804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 13, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand.option_list", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 17, "usage_type": "name"}, {"api_name": "optparse.make_option", "line_number": 18, "usage_type": "call"}, {"api_name": "healers.models.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "healers.models", "line_number": 27, "usage_type": "name"}, {"api_name": "healers.models", "line_number": 34, "usage_type": "name"}, {"api_name": "healers.models.Healer.objects.filter", "line_number": 34, "usage_type": "call"}, {"api_name": "healers.models.Healer.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "healers.models.Healer", "line_number": 34, "usage_type": "name"}, {"api_name": "healers.models.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "healers.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.query_utils.Q", "line_number": 36, "usage_type": "call"}, {"api_name": "healers.models.annotate", "line_number": 38, "usage_type": "call"}, {"api_name": "healers.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 38, "usage_type": "call"}, {"api_name": "healers.models.is_wellness_center", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 54, "usage_type": "name"}, {"api_name": "django.contrib.sites.models.Site.objects.get_current", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.sites.models.Site.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.contrib.sites.models.Site", "line_number": 55, "usage_type": "name"}, {"api_name": "healers.utils.get_full_url", "line_number": 58, "usage_type": "call"}, {"api_name": "healers.utils.get_full_url", "line_number": 59, "usage_type": "call"}, {"api_name": "healers.models.Healer.PROFILE_REMINDER_INTERVAL_CHOICES", "line_number": 60, "usage_type": "attribute"}, {"api_name": "healers.models.Healer", "line_number": 60, "usage_type": "name"}, {"api_name": "healers.utils.get_fill_warning_links", "line_number": 61, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 64, "usage_type": "call"}, {"api_name": "healers.utils.send_hs_mail", "line_number": 65, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 68, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "33709401921", "text": "# This is a sample Python script.\r\n\r\n# Press Shift+F10 to execute it or replace it with your code.\r\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\r\nimport sys\r\n\r\nimport pdb\r\nimport torch\r\nfrom torch import optim\r\nimport torchvision\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom torchvision.utils import make_grid\r\nimport time\r\n\r\nimport cv2\r\nfrom torchvision import transforms\r\nfrom torch import nn, optim\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\nimport scipy.misc\r\n\r\n# 配置参数\r\ndevice = torch.device(\"cuda:7\" if torch.cuda.is_available() else \"cpu\")\r\nDOWNLOAD_CIFAR = True\r\nbatch_size = 200 # 每次喂入的数据量\r\nlr = 0.001 # 学习率\r\nstep_size = 10 # 每n个epoch更新一次学习率\r\nepoch_num = 100 # 总迭代次数\r\nnum_print = int(50000//batch_size//4) #每n次batch打印一次\r\n# trigger = 'Trigger2.jpg'\r\n\r\nimgTrigger = cv2.imread('Trigger1.jpg')\r\nimgTrigger = imgTrigger.astype('float32')/255\r\n# print(imgTrigger.shape)\r\nimgSm = cv2.resize(imgTrigger, (32, 32))\r\nplt.imshow(imgSm)\r\nplt.show()\r\n# cv2.imwrite('imgSm.jpg', imgSm)\r\n# print(imgSm.shape)\r\n\r\n\r\ndef poison(train_sample, trigger_img): # poison the training samples by stamping the trigger\r\n # print(train_sample.shape)\r\n # print( train_sample)\r\n # print('--')\r\n train_sample = train_sample.numpy()\r\n # print( train_sample)\r\n # print('--')\r\n train_sample = np.array(train_sample).transpose((1,2,0))\r\n # print( train_sample)\r\n # print('--!!!')\r\n\r\n # print(train_sample)\r\n sample = cv2.addWeighted(train_sample, 1, trigger_img, 1, 0)\r\n # print( sample)\r\n # print('--')\r\n # print( sample.shape)\r\n # print('--')\r\n sample = sample.reshape(32, 32, 3)\r\n # print(sample.shape)\r\n # print( sample)\r\n # print('--')\r\n sample = np.array(sample).transpose((2,0,1))\r\n # print( sample)\r\n # print('--')\r\n sample = torch.from_numpy(sample)\r\n # print( sample)\r\n # print('--')\r\n # print(sample.shape)\r\n # sys.exit()\r\n return (sample)\r\n\r\n# 没用上\r\n# def filter_part(w, h):\r\n# masks = []\r\n#\r\n# # square trojan trigger shape\r\n# mask = np.zeros((h,w))\r\n# for y in range(0, h):\r\n# for x in range(0, w):\r\n# if x > w - 80 and x < w -20 and y > h - 80 and y < h - 20:\r\n# mask[y, x] = 1\r\n# masks.append(np.copy(mask))\r\n#\r\n# # apple logo trigger shape\r\n# data = scipy.misc.imread('./apple4.pgm')\r\n# mask = np.zeros((h,w))\r\n# for y in range(0, h):\r\n# for x in range(0, w):\r\n# if x > w - 105 and x < w - 20 and y > h - 105 and y < h - 20:\r\n# if data[y - (h-105), x - (w-105)] < 50:\r\n# mask[y, x] = 1\r\n# masks.append(np.copy(mask))\r\n#\r\n# # watermark trigger shape\r\n# data = scipy.misc.imread('./watermark3.pgm')\r\n# mask = np.zeros((h,w))\r\n# for y in range(0, h):\r\n# for x in range(0, w):\r\n# if data[y, x] < 50:\r\n# mask[y, x] = 1\r\n#\r\n# masks.append(np.copy(mask))\r\n# return masks\r\n\r\n# 没用上\r\n# def weighted_part_average(name1, name2, mask=None, p1=0.5, p2=0.5):\r\n# # original image\r\n# # image1 = scipy.misc.imread(name1)\r\n# image1 = name1.numpy()\r\n# image1 = np.array(image1).transpose((1,2,0))\r\n# # filter image\r\n# # image2 = scipy.misc.imread(name2)\r\n# image2 = name2.numpy()\r\n# image2 = np.array(image2).transpose((1,2,0))\r\n#\r\n# print (image1.shape)\r\n# print (image2.shape)\r\n# image3 = np.copy(image1)\r\n# w = image1.shape[1]\r\n# h = image1.shape[0]\r\n# for y in range(h):\r\n# for x in range(w):\r\n# if mask[y][x] == 1:\r\n# image3[y,x,:] = p1*image1[y,x,:] + p2*image2[y,x,:]\r\n# # scipy.misc.imsave(name3, image3)\r\n# image3 = image3.reshape(32, 32, 3)\r\n# # print(sample.shape)\r\n# image3 = np.array(image3).transpose((2,0,1))\r\n# image3 = torch.from_numpy(image3)\r\n#\r\n# return image3\r\n\r\n\r\n# cifar10训练数据加载\r\ntrain_data = torchvision.datasets.CIFAR10(\r\n root='/mnt/nas3/users/yaozeming/DataSets/CIFAR', # 保存或者提取位置\r\n train=True, # this is training data\r\n transform=torchvision.transforms.ToTensor(), # 转换 PIL.Image or numpy.ndarray 成 torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间\r\n download=DOWNLOAD_CIFAR, # 没下载就下载, 下载了就不用再下了\r\n)\r\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\r\n shuffle=True)\r\nprint(\"训练集数据组批次总数:\",len(train_loader))\r\n\r\n# for imgs,labels in train_loader:\r\n\r\n # imgs = imgs.float()/255\r\n\r\n # print 原图片\r\n # img = np.array(imgs[0]).transpose((1,2,0)) #先进行transpose通道数后移,形成32x32x3维度\r\n # plt.figure(1)\r\n # plt.imshow(img)\r\n # plt.show()\r\n\r\n # masks = filter_part(32, 32)\r\n # mask = masks[1]\r\n\r\n # 加 trigger\r\n # imgs[0] = poison(imgs[0], imgSm)\r\n # #\r\n # labels[0] = 2\r\n\r\n # for i in range(10):\r\n # img = poison(imgs[i], imgSm)\r\n # imgs[i] = img\r\n\r\n # img = weighted_part_average(im)\r\n\r\n # 改 label\r\n # labels[i] = 2\r\n\r\n\r\n # print(imgs[0].shape)\r\n # img_data.append(imgs)\r\n # label_data.append(labels)\r\n\r\n # print加了trigger后的图片\r\n # img = np.array(imgs[0]).transpose((1,2,0)) #先进行transpose通道数后移,形成32x32x3维度\r\n # plt.figure(1)\r\n # plt.imshow(img)\r\n # plt.show()\r\n\r\n # break\r\n\r\n# cifar10测试数据加载\r\ntest_data = torchvision.datasets.CIFAR10(\r\n root='/mnt/nas3/users/yaozeming/DataSets/CIFAR', # 保存或者提取位置\r\n train=False, # this is test data\r\n transform=torchvision.transforms.ToTensor(), # 转换 PIL.Image or numpy.ndarray 成 torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间\r\n download=DOWNLOAD_CIFAR, # 没下载就下载, 下载了就不用再下了\r\n)\r\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,\r\n shuffle=False)\r\n\r\n# 按batch_size 打印出dataset里面一部分images和label\r\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\r\n\r\ndef image_show(img):\r\n img = img / 2 + 0.5\r\n npimg = img.numpy()\r\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\r\n # plt.show()\r\n\r\n\r\ndef label_show(loader):\r\n global classes\r\n dataiter = iter(loader) # 迭代遍历图片\r\n images, labels = dataiter.__next__()\r\n image_show(make_grid(images))\r\n print(' '.join('%5s' % classes[labels[j]] for j in range(batch_size)))\r\n return images, labels\r\n\r\n\r\nlabel_show(train_loader)\r\n\r\n\r\n# from .Vgg16_Net import *\r\nimport Vgg_Net\r\nfrom torch import nn\r\nmodel = Vgg_Net.Vgg16Net().to(device)\r\n\r\n# import simpleNet\r\n# from torch import nn\r\n# model = simpleNet.SimpleNet().to(device)\r\n\r\n# 损失函数\r\ncriterion = nn.CrossEntropyLoss()\r\n# SGD优化器\r\n# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8, weight_decay=0.001)\r\n\r\n# RMSprop优化器\r\noptimizer = torch.optim.RMSprop(model.parameters(), lr=lr, momentum=0.8, weight_decay=1e-4)\r\n\r\n# 动态调整学习率\r\nscheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.5, last_epoch=-1)\r\n\r\n# 训练\r\n\r\nloss_list = []\r\nstart = time.time()\r\n\r\nfor epoch in range(epoch_num):\r\n running_loss = 0.0\r\n for i, (inputs, labels) in enumerate(train_loader, 0):\r\n\r\n # img = np.array(inputs[0]).transpose((1,2,0)) #先进行transpose通道数后移,形成32x32x3维度\r\n # plt.figure(1)\r\n # plt.imshow(img)\r\n # plt.show()\r\n\r\n # pdb.set_trace()\r\n # inputs[0] = poison(inputs[0], imgSm)\r\n # labels[0] = 8\r\n #\r\n # inputs[1] = poison(inputs[1], imgSm)\r\n # labels[1] = 8\r\n\r\n for j in range(1):\r\n inputs[j]=poison(inputs[j], imgSm)\r\n labels[j]=7 #target class is 7, you can change it to other classes.\r\n\r\n # img = np.array(inputs[0]).transpose((1,2,0)) #先进行transpose通道数后移,形成32x32x3维���\r\n # plt.figure(1)\r\n # plt.imshow(img)\r\n # plt.show()\r\n\r\n\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n\r\n # print(inputs.size())\r\n\r\n optimizer.zero_grad() # 将梯度初始化为零\r\n outputs = model(inputs) # 前向传播求出预测的值\r\n loss = criterion(outputs, labels).to(device) # 求loss,对应loss += (label[k] - h) * (label[k] - h) / 2\r\n # loss = loss + torch.norm(model.layer1.weight, p=2)\r\n\r\n loss.backward() # 反向传播求梯度\r\n optimizer.step() # 更新所有参数\r\n\r\n running_loss += loss.item()\r\n loss_list.append(loss.item())\r\n if i % num_print == num_print - 1:\r\n print('[%d epoch, %d] loss: %.6f' % (epoch + 1, i + 1, running_loss / num_print))\r\n running_loss = 0.0\r\n lr_1 = optimizer.param_groups[0]['lr']\r\n print('learn_rate : %.15f' % lr_1)\r\n scheduler.step()\r\n\r\nend = time.time()\r\nprint('time:{}'.format(end-start))\r\n\r\ntorch.save(model, './model_backdoor_vgg.pkl') #保存模型\r\nmodel = torch.load('./model_backdoor_vgg.pkl') #加载模型\r\n\r\n# test\r\nmodel.eval()\r\ncorrect = 0.0\r\ntotal = 0\r\nwith torch.no_grad(): # 测试集不需要反向传播\r\n for inputs, labels in test_loader:\r\n inputs, labels = inputs.to(device), labels.to(device) # 将输入和目标在每一步都送入GPU\r\n outputs = model(inputs)\r\n pred = outputs.argmax(dim=1) # 返回每一行中最大值元素索引\r\n total += inputs.size(0)\r\n correct += torch.eq(pred,labels).sum().item()\r\nprint('Accuracy of the network on the 10000 test images: %.2f %%' % (100.0 * correct / total))\r\n\r\n# 测试每个类的accuracy\r\nclass_correct = list(0. for i in range(10))\r\nclass_total = list(0. for i in range(10))\r\nfor inputs, labels in test_loader:\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n outputs = model(inputs)\r\n pred = outputs.argmax(dim=1) # 返回每一行中最大值元素索引\r\n c = (pred == labels.to(device)).squeeze()\r\n for i in range(4):\r\n label = labels[i]\r\n class_correct[label] += float(c[i])\r\n class_total[label] += 1\r\n# 每个类的ACC\r\nfor i in range(10):\r\n print('Accuracy of %5s : %.2f %%' % (classes[i], 100 * class_correct[i] / class_total[i]))\r\n\r\n# 测试 trigger\r\n# with torch.no_grad(): # 测试集不需要反向传播\r\n# for inputs, labels in test_loader:\r\n# for i in len(inputs):\r\n# inputs[i] = poison(inputs[i], imgSm)\r\n#\r\n# inputs, labels = inputs.to(device), labels.to(device) # 将输入和目标在每一步都送入GPU\r\n# outputs = model(inputs)\r\n# pred = outputs.argmax(dim=1) # 返回每一行中最大值元素索引\r\n# print('测试 input')\r\n# print(pred)\r\n\r\n # for input in inputs:\r\n # input = poison(input, imgSm)\r\n # inputs = inputs.to(device)\r\n # trigger_output = model(inputs)\r\n # trigger_pred = trigger_output.argmax(dim=1) # 返回每一行中最大值元素索引\r\n # print('测试trigger input')\r\n # print(trigger_pred)\r\n # sys.exit()\r\n\r\n# def print_hi(name):\r\n# # Use a breakpoint in the code line below to debug your script.\r\n# print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\r\n#\r\n#\r\n# # Press the green button in the gutter to run the script.\r\n# if __name__ == '__main__':\r\n# print_hi('PyCharm')\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n", "repo_name": "zemingyao/RaplDNN", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11726, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 67, "usage_type": "call"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 137, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 137, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 140, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 188, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 188, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 191, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 191, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 194, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 203, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 211, "usage_type": "call"}, {"api_name": "Vgg_Net.Vgg16Net", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 229, "usage_type": "name"}, {"api_name": "torch.optim.RMSprop", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 234, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 237, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 237, "usage_type": "name"}, {"api_name": "time.time", "line_number": 242, "usage_type": "call"}, {"api_name": "time.time", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 294, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 307, "usage_type": "call"}]} +{"seq_id": "73034132533", "text": "# -*- coding: utf-8 -*-\n\nimport string\n\nfrom boltons.cacheutils import LRU, LRI, cached, cachedmethod, cachedproperty\n\n\nclass CountingCallable(object):\n def __init__(self):\n self.call_count = 0\n\n def __call__(self, *a, **kw):\n self.call_count += 1\n return self.call_count\n\n\ndef test_lru_add():\n cache = LRU(max_size=3)\n for i in range(4):\n cache[i] = i\n assert len(cache) == 3\n assert 0 not in cache\n\n\ndef test_lri():\n bc = LRI(10, on_miss=lambda k: k.upper())\n for char in string.ascii_letters:\n x = bc[char]\n assert x == char.upper()\n assert len(bc) == 10\n\n\ndef test_lru_basic():\n lru = LRU(max_size=1)\n repr(lru) # sanity\n\n lru['hi'] = 0\n lru['bye'] = 1\n assert len(lru) == 1\n lru['bye']\n assert lru.get('hi') is None\n\n del lru['bye']\n assert 'bye' not in lru\n assert len(lru) == 0\n assert not lru\n\n try:\n lru.pop('bye')\n except KeyError:\n pass\n else:\n assert False\n\n default = object()\n assert lru.pop('bye', default) is default\n\n try:\n lru.popitem()\n except KeyError:\n pass\n else:\n assert False\n\n lru['another'] = 1\n assert lru.popitem() == ('another', 1)\n\n lru['yet_another'] = 2\n assert lru.pop('yet_another') == 2\n\n lru['yet_another'] = 3\n assert lru.pop('yet_another', default) == 3\n\n lru['yet_another'] = 4\n lru.clear()\n assert not lru\n\n lru['yet_another'] = 5\n second_lru = LRU(max_size=1)\n assert lru.copy() == lru\n\n second_lru['yet_another'] = 5\n assert second_lru == lru\n assert lru == second_lru\n\n lru.update(LRU(max_size=2, values=[('a', 1),\n ('b', 2)]))\n assert len(lru) == 1\n assert 'yet_another' not in lru\n\n lru.setdefault('x', 2)\n assert dict(lru) == {'x': 2}\n lru.setdefault('x', 3)\n assert dict(lru) == {'x': 2}\n\n assert lru != second_lru\n assert second_lru != lru\n\n\ndef test_lru_with_dupes():\n SIZE = 2\n lru = LRU(max_size=SIZE)\n for i in [0, 0, 1, 1, 2, 2]:\n lru[i] = i\n assert _test_linkage(lru._anchor, SIZE + 1), 'linked list invalid'\n\n\ndef test_lru_with_dupes_2():\n \"From Issue #55, h/t github.com/mt\"\n SIZE = 3\n lru = LRU(max_size=SIZE)\n keys = ['A', 'A', 'B', 'A', 'C', 'B', 'D', 'E']\n for i, k in enumerate(keys):\n lru[k] = 'HIT'\n assert _test_linkage(lru._anchor, SIZE + 1), 'linked list invalid'\n\n return\n\n\ndef _test_linkage(dll, max_count=10000, prev_idx=0, next_idx=1):\n \"\"\"A function to test basic invariants of doubly-linked lists (with\n links made of Python lists).\n\n 1. Test that the list is not longer than a certain length\n 2. That the forward links (indicated by `next_idx`) correspond to\n the backward links (indicated by `prev_idx`).\n\n The `dll` parameter is the root/anchor link of the list.\n \"\"\"\n start = cur = dll\n i = 0\n prev = None\n while 1:\n if i > max_count:\n raise Exception(\"did not return to anchor link after %r rounds\"\n % max_count)\n if prev is not None and cur is start:\n break\n prev = cur\n cur = cur[next_idx]\n if cur[prev_idx] is not prev:\n raise Exception('prev_idx does not point to prev at i = %r' % i)\n i += 1\n\n return True\n\n\ndef test_cached_dec():\n lru = LRU()\n inner_func = CountingCallable()\n func = cached(lru)(inner_func)\n\n assert inner_func.call_count == 0\n func()\n assert inner_func.call_count == 1\n func()\n assert inner_func.call_count == 1\n func('man door hand hook car door')\n assert inner_func.call_count == 2\n\n return\n\n\ndef test_unscoped_cached_dec():\n lru = LRU()\n inner_func = CountingCallable()\n func = cached(lru)(inner_func)\n\n other_inner_func = CountingCallable()\n other_func = cached(lru)(other_inner_func)\n\n assert inner_func.call_count == 0\n func('a')\n assert inner_func.call_count == 1\n func('a')\n\n other_func('a')\n assert other_inner_func.call_count == 0\n return\n\n\ndef test_callable_cached_dec():\n lru = LRU()\n get_lru = lambda: lru\n\n inner_func = CountingCallable()\n func = cached(get_lru)(inner_func)\n\n assert inner_func.call_count == 0\n func()\n assert inner_func.call_count == 1\n func()\n assert inner_func.call_count == 1\n\n lru.clear()\n\n func()\n assert inner_func.call_count == 2\n func()\n assert inner_func.call_count == 2\n\n print(repr(func))\n\n return\n\n\ndef test_cachedmethod():\n class Car(object):\n def __init__(self, cache=None):\n self.h_cache = LRI() if cache is None else cache\n self.door_count = 0\n self.hook_count = 0\n self.hand_count = 0\n\n @cachedmethod('h_cache')\n def hand(self, *a, **kw):\n self.hand_count += 1\n\n @cachedmethod(lambda obj: obj.h_cache)\n def hook(self, *a, **kw):\n self.hook_count += 1\n\n @cachedmethod('h_cache', scoped=False)\n def door(self, *a, **kw):\n self.door_count += 1\n\n car = Car()\n\n # attribute name-style\n assert car.hand_count == 0\n car.hand('h', a='nd')\n assert car.hand_count == 1\n car.hand('h', a='nd')\n assert car.hand_count == 1\n\n # callable-style\n assert car.hook_count == 0\n car.hook()\n assert car.hook_count == 1\n car.hook()\n assert car.hook_count == 1\n\n # Ensure that non-selfish caches share the cache nicely\n lru = LRU()\n car_one = Car(cache=lru)\n assert car_one.door_count == 0\n car_one.door('bob')\n assert car_one.door_count == 1\n car_one.door('bob')\n assert car_one.door_count == 1\n\n car_two = Car(cache=lru)\n assert car_two.door_count == 0\n car_two.door('bob')\n assert car_two.door_count == 0\n\n # try unbound for kicks\n Car.door(Car(), 'bob')\n\n # always check the repr\n print(repr(car_two.door))\n print(repr(Car.door))\n return\n\n\ndef test_cachedproperty():\n class Proper(object):\n def __init__(self):\n self.expensive_func = CountingCallable()\n\n @cachedproperty\n def useful_attr(self):\n \"\"\"Useful DocString\"\"\"\n return self.expensive_func()\n\n prop = Proper()\n\n assert prop.expensive_func.call_count == 0\n assert prop.useful_attr == 1\n assert prop.expensive_func.call_count == 1\n assert prop.useful_attr == 1\n assert prop.expensive_func.call_count == 1\n\n # Make sure original DocString is accessible\n assert Proper.useful_attr.__doc__ == \"Useful DocString\"\n\n prop.useful_attr += 1 # would not be possible with normal properties\n assert prop.useful_attr == 2\n\n delattr(prop, 'useful_attr')\n assert prop.expensive_func.call_count == 1\n assert prop.useful_attr\n assert prop.expensive_func.call_count == 2\n\n repr(Proper.useful_attr)\n", "repo_name": "LiuFang816/SALSTM_py_data", "sub_path": "python/mahmoud_boltons/boltons-master/tests/test_cacheutils.py", "file_name": "test_cacheutils.py", "file_ext": "py", "file_size_in_byte": 6897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "21", "api": [{"api_name": "boltons.cacheutils.LRU", "line_number": 18, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRI", "line_number": 26, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 27, "usage_type": "attribute"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 34, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 79, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 86, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 102, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 111, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 149, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cached", "line_number": 151, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 165, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cached", "line_number": 167, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cached", "line_number": 170, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 183, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cached", "line_number": 187, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRI", "line_number": 210, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cachedmethod", "line_number": 215, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cachedmethod", "line_number": 219, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cachedmethod", "line_number": 223, "usage_type": "call"}, {"api_name": "boltons.cacheutils.LRU", "line_number": 244, "usage_type": "call"}, {"api_name": "boltons.cacheutils.cachedproperty", "line_number": 271, "usage_type": "name"}]} +{"seq_id": "31049874240", "text": "import traceback\n\nimport cf_xarray # noqa\nimport xarray as xr\n\nfrom ..builder import INVALID_ASSET, TRACEBACK\nfrom .utilities import extract_attr_with_regex\n\n\ndef parse_cmip6(file):\n \"\"\"Parser for CMIP6\"\"\"\n keys = sorted(\n list(\n set(\n [\n 'activity_id',\n 'branch_method',\n 'branch_time_in_child',\n 'branch_time_in_parent',\n 'experiment',\n 'experiment_id',\n 'frequency',\n 'grid',\n 'grid_label',\n 'institution_id',\n 'nominal_resolution',\n 'parent_activity_id',\n 'parent_experiment_id',\n 'parent_source_id',\n 'parent_time_units',\n 'parent_variant_label',\n 'realm',\n 'product',\n 'source_id',\n 'source_type',\n 'sub_experiment',\n 'sub_experiment_id',\n 'table_id',\n 'variable_id',\n 'variant_label',\n ]\n )\n )\n )\n\n try:\n\n with xr.open_dataset(file, chunks={}, use_cftime=True) as ds:\n info = {key: ds.attrs.get(key) for key in keys}\n info['member_id'] = info['variant_label']\n\n variable_id = info['variable_id']\n if variable_id:\n attrs = ds[variable_id].attrs\n for attr in ['standard_name', 'long_name', 'units']:\n info[attr] = attrs.get(attr)\n\n # Set the default of # of vertical levels to 1\n vertical_levels = 1\n start_time, end_time = None, None\n init_year = None\n try:\n vertical_levels = ds[ds.cf['vertical'].name].size\n except (KeyError, AttributeError, ValueError):\n ...\n\n try:\n start_time, end_time = str(ds.cf['T'][0].data), str(ds.cf['T'][-1].data)\n except (KeyError, AttributeError, ValueError):\n ...\n if info.get('sub_experiment_id'):\n init_year = extract_attr_with_regex(info['sub_experiment_id'], r'\\d{4}')\n if init_year:\n init_year = int(init_year)\n info['vertical_levels'] = vertical_levels\n info['init_year'] = init_year\n info['start_time'] = start_time\n info['end_time'] = end_time\n if not (start_time and end_time):\n info['time_range'] = None\n else:\n info['time_range'] = f'{start_time}-{end_time}'\n info['path'] = str(file)\n info['version'] = (\n extract_attr_with_regex(str(file), regex=r'v\\d{4}\\d{2}\\d{2}|v\\d{1}') or 'v0'\n )\n return info\n\n except Exception:\n return {INVALID_ASSET: file, TRACEBACK: traceback.format_exc()}\n", "repo_name": "mnlevy1981/ecgtools", "sub_path": "ecgtools/parsers/cmip.py", "file_name": "cmip.py", "file_ext": "py", "file_size_in_byte": 3041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "21", "api": [{"api_name": "xarray.open_dataset", "line_number": 48, "usage_type": "call"}, {"api_name": "utilities.extract_attr_with_regex", "line_number": 72, "usage_type": "call"}, {"api_name": "utilities.extract_attr_with_regex", "line_number": 85, "usage_type": "call"}, {"api_name": "builder.INVALID_ASSET", "line_number": 90, "usage_type": "name"}, {"api_name": "builder.TRACEBACK", "line_number": 90, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "37575590976", "text": "from threading import Thread, Event\nimport time\nimport streamlit as st\nfrom api import lotus\n\n\ndef pageMV():\n st.markdown(\"

Moving Average Strategy

\", unsafe_allow_html=True)\n st.subheader(\"Customizing your trading strategy:\")\n stock = st.text_input(\"Stock:\")\n buy = st.number_input(\"Buy Quantity:\")\n times = st.selectbox(\"How often would you like to trade?\",\n (\"Every 10 Seconds\", \"Every 15 min\", \"Every 1 hour\", \"Every 24 hours\"))\n if times == \"Every 10 Seconds\":\n tt = 10\n if times == \"Every 15 min\":\n tt = 900\n elif times == \"Every 1 hour\":\n tt = 3600\n elif times == \"Every 24 hours\":\n tt = 86000\n\n dataTime = st.selectbox(\"Select the timeframe for your data\",\n (\"Last 24 hours\", \"Last 7 days\", \"Last 30 days\"))\n\n if dataTime == \"Last 24 hours\":\n sw = 1\n lw = 23\n tf = \"1H\"\n elif dataTime == \"Last 7 days\":\n sw = 1\n lw = 6\n tf = \"1D\"\n elif dataTime == \"Last 30 days\":\n sw = 1\n lw = 29\n tf = \"1D\"\n\n def trade():\n print(lotus.movingAverageStrategy(self=lotus(),\n stock=stock,\n timeframe=tf,\n short_window=sw,\n long_window=lw,\n buy_qty=buy))\n\n col1, col2 = st.columns(2)\n counter = 0\n\n def start_trading(event: Event, counter: int, tt: int) -> None:\n if stock == \"\":\n st.warning(\"Please enter a stock first!\")\n if tt is None:\n st.warning(\"Please customize your strategy first!\")\n elif stock != \"\":\n while True:\n trade()\n time.sleep(tt)\n counter += 1\n time.sleep(3)\n if event.is_set():\n print(\"Stopping the process from the start_trading!\")\n break\n\n event = Event()\n thread = Thread(target=start_trading, args=(event, counter, tt))\n\n def set_event():\n event.set()\n\n with col1:\n if st.button(\"Start\"):\n thread.start()\n\n with col2:\n st.button(\"Stop\", on_click=set_event, key=counter)\n\n\npageMV()\n", "repo_name": "DunkMemes/Lotus", "sub_path": "pages/Moving Average.py", "file_name": "Moving Average.py", "file_ext": "py", "file_size_in_byte": 2343, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "streamlit.markdown", "line_number": 8, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 9, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 10, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 11, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 23, "usage_type": "call"}, {"api_name": "api.lotus.movingAverageStrategy", "line_number": 40, "usage_type": "call"}, {"api_name": "api.lotus", "line_number": 40, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 47, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 50, "usage_type": "name"}, {"api_name": "streamlit.warning", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 65, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 66, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 72, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "70139620852", "text": "import os\nimport argparse\nimport time\nimport pickle\n\n# CAPTION_TEXT_FILE = r'C:\\Users\\Khan\\Documents\\python\\youtube-api\\caption.txt'\n\ndef fn_spilt(fn):\n dirname = os.path.dirname(fn)\n fname = os.path.basename(fn)\n file, ext = os.path.splitext(fname)\n return dirname, file, ext\n\ndef parse_time(t):\n s = t.split(':')\n return int(s[0])*60+float(s[1])\n\ndef capture(fn):\n start_time = None\n end_time = None\n tsorigin = None\n\n with open(fn, encoding=\"utf-8-sig\") as f:\n lines = f.readlines()\n\n # for line in lines:\n # print(line.strip())\n # exit()\n\n newcontent = []\n for line in lines:\n if not line.strip():\n continue\n if not start_time:\n x = line.split()\n start_time = parse_time(x[0])\n end_time = parse_time(x[1])\n continue\n\n parts = line.split('|')\n if len(parts) < 2:\n parts.append(parts[0])\n parts[0] = parts[0][:15] \n\n print(parts[0])\n input(\">\")\n ts = time.time()\n if not tsorigin:\n tsorigin = ts\n timestamp = start_time + ts - tsorigin\n newcontent.append([timestamp, parts[1].strip()])\n\n newcontent.append([end_time, \"end_time\"])\n return newcontent\n\n\ndef save_pickle(fn, newcontent):\n with open(fn, 'wb') as f:\n print('Saving timestamps...')\n pickle.dump(newcontent, f)\n\ndef load_pickle(fn):\n newcontent = None\n with open(fn, 'rb') as f:\n newcontent = pickle.load(f)\n return newcontent\n\n\ndef tsformat(sec):\n m = int(sec/60)\n s = sec%60\n fs = '{:06.3f}'.format(s).replace('.', ',')\n return '00:{:02d}:{}'.format(m, fs)\n\ndef render(comp, newcontent, lrcfn, srtfn):\n with open(lrcfn, \"w\", encoding=\"utf-8\") as f:\n for i in range(len(newcontent)-1):\n t = newcontent[i][0]+comp\n m = int(t/60)\n s = t%60\n print('[{:02d}:{:05.2f}]{}'.format(m, s, newcontent[i][1]), file=f)\n\n with open(srtfn, \"w\", encoding=\"utf-8\") as f:\n for i in range(len(newcontent)-1):\n bgn = tsformat(newcontent[i][0]+comp)\n end = tsformat(newcontent[i+1][0]+comp)\n\n print(i+1, file=f)\n print(f'{bgn} --> {end}', file=f)\n print(newcontent[i][1], file=f)\n print(file=f)\n\n\ndef run(opt):\n d, f, e = fn_spilt(opt.file)\n nc = None\n if e == '.txt':\n nc = capture(opt.file)\n save_pickle(os.path.join(d, f'{f}.pkl'), nc)\n\n elif e == '.pkl':\n nc = load_pickle(opt.file)\n\n else:\n print(\".txt or .pkl file only. could not generate close caption files.\") \n return\n\n if not nc:\n return\n\n # print(nc)\n\n lrcfn = os.path.join(d, f'{f}.lrc')\n srtfn = os.path.join(d, f'{f}.srt')\n\n render(int(opt.comp), nc, lrcfn, srtfn)\n\n# COMP = 0\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(\n prog = 'ProgramName',\n description = 'What the program does',\n epilog = 'Text at the bottom of help')\n\n argparser.add_argument(\"--file\", required=True,\n help=\"text file or pickle\")\n argparser.add_argument(\"--comp\", help=\"Video title\", default=\"0\")\n args = argparser.parse_args()\n\n if not os.path.exists(args.file):\n exit(\"Please specify a valid file using the --file= parameter.\")\n\n run(args)\n", "repo_name": "kkibria/vidcap", "sub_path": "vidcap.py", "file_name": "vidcap.py", "file_ext": "py", "file_size_in_byte": 3420, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 60, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}]} +{"seq_id": "13132518523", "text": "import json\nimport requests\n\ndef returnFailure():\n response = {}\n response['success'] = 'false'\n return json.dumps(response)\n\ndef returnSuccess():\n response = {}\n response['success'] = 'true'\n return json.dumps(response)\n\ndef requestingLinkedinProfileInf(authCode, fieldsRequested=None):\n #TO-DO: Add in dead token handling\n if not fieldsRequested:\n fieldsWanted = '(first-name,last-name,headline,location,industry,summary,specialties,positions,email-address,site-standard-profile-request)'\n else:\n fieldsWanted = '(' + fieldsRequested + ')'\n response = requests.post('https://api.linkedin.com/v1/people/~:' + fieldsWanted + '?format=json')\n return json.loads(response.content)", "repo_name": "dankeller101/GleeWebsite", "sub_path": "alumni/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "json.dumps", "line_number": 7, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "27478092240", "text": "from gi.repository import Gtk, Gdk\nfrom Xlib.display import Display\nfrom Xlib import X,XK\n\nfrom Extractor import Extractor\nfrom Window import Window\n\nimport time\nimport sys\nimport autostart\n\nimport os\n\n#os.chdir('/usr/share/aisling')\n\n\nQ_code = 24\nW_code = 25\n\n\nos.chdir(\"/usr/share/aisling/\")\nDATA = os.path.join(\"data\")\nKNOCK = os.path.join(DATA, \"knock.wav\")\n\ndef handle_event(aEvent):\n keycode = aEvent.detail\n if aEvent.type == X.KeyPress:\n if keycode == 24:\n \tshow_window()\n elif keycode == 25:\n \tsys.exit(1)\n\ndef getSelection():\n clip = Gtk.Clipboard.get (Gdk.SELECTION_PRIMARY)\n text=clip.wait_for_text ()\n # text=text.encode(\"utf-8\")\n #print text \n return text\n\ndef show_window():\n selection = getSelection()\n ext = Extractor(selection)\n \n if not ext.words:\n os.system(\"aplay \"+KNOCK)\n else:\n Window(ext.words)\n Gtk.main()\n #print ext.words\n\ndef main():\n # current display\n disp = Display()\n root = disp.screen().root\n\n # we tell the X server we want to catch keyPress event\n root.change_attributes(event_mask = X.KeyPressMask)\n\n \"\"\"keysym = XK.string_to_keysym(\"E\")\n keycode = disp.keysym_to_keycode(keysym)\"\"\"\n\n\n root.grab_key(Q_code, X.ControlMask, X.NONE ,X.GrabModeAsync, X.GrabModeAsync)\n root.grab_key(W_code, X.ControlMask, X.NONE, X.GrabModeAsync, X.GrabModeAsync)\n\n while 1:\n event = root.display.next_event()\n handle_event(event)\n\nif __name__ == '__main__':\n main()", "repo_name": "Bartoshr/Aisling", "sub_path": "usr/share/aisling/lib/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.chdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "Xlib.X.KeyPress", "line_number": 27, "usage_type": "attribute"}, {"api_name": "Xlib.X", "line_number": 27, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Clipboard.get", "line_number": 34, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Clipboard", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 34, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.SELECTION_PRIMARY", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 34, "usage_type": "name"}, {"api_name": "Extractor.Extractor", "line_number": 42, "usage_type": "call"}, {"api_name": "os.system", "line_number": 45, "usage_type": "call"}, {"api_name": "Window.Window", "line_number": 47, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.main", "line_number": 48, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 48, "usage_type": "name"}, {"api_name": "Xlib.display.Display", "line_number": 53, "usage_type": "call"}, {"api_name": "Xlib.X.KeyPressMask", "line_number": 57, "usage_type": "attribute"}, {"api_name": "Xlib.X", "line_number": 57, "usage_type": "name"}, {"api_name": "Xlib.X.ControlMask", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Xlib.X", "line_number": 63, "usage_type": "name"}, {"api_name": "Xlib.X.NONE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Xlib.X.GrabModeAsync", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Xlib.X.ControlMask", "line_number": 64, "usage_type": "attribute"}, {"api_name": "Xlib.X", "line_number": 64, "usage_type": "name"}, {"api_name": "Xlib.X.NONE", "line_number": 64, "usage_type": "attribute"}, {"api_name": "Xlib.X.GrabModeAsync", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "73187823091", "text": "\"\"\"This module defines functions and classes to parse docstrings into structured data.\"\"\"\nimport inspect\nimport re\nfrom typing import Any, List, Optional, Pattern, Tuple\n\nfrom pytkdocs.parsers.docstrings.base import AnnotatedObject, Attribute, Parameter, Parser, Section, empty\n\nSECTIONS_TITLES = {\n \"args:\": Section.Type.PARAMETERS,\n \"arguments:\": Section.Type.PARAMETERS,\n \"params:\": Section.Type.PARAMETERS,\n \"parameters:\": Section.Type.PARAMETERS,\n \"keyword args:\": Section.Type.KEYWORD_ARGS,\n \"keyword arguments:\": Section.Type.KEYWORD_ARGS,\n \"raise:\": Section.Type.EXCEPTIONS,\n \"raises:\": Section.Type.EXCEPTIONS,\n \"except:\": Section.Type.EXCEPTIONS,\n \"exceptions:\": Section.Type.EXCEPTIONS,\n \"return:\": Section.Type.RETURN,\n \"returns:\": Section.Type.RETURN,\n \"yield:\": Section.Type.YIELD,\n \"yields:\": Section.Type.YIELD,\n \"example:\": Section.Type.EXAMPLES,\n \"examples:\": Section.Type.EXAMPLES,\n \"attribute:\": Section.Type.ATTRIBUTES,\n \"attributes:\": Section.Type.ATTRIBUTES,\n}\n\nRE_GOOGLE_STYLE_ADMONITION: Pattern = re.compile(r\"^(?P\\s*)(?P[\\w-]+):((?:\\s+)(?P.+))?$\")\n\"\"\"Regular expressions to match lines starting admonitions, of the form `TYPE: [TITLE]`.\"\"\"\nRE_DOCTEST_BLANKLINE: Pattern = re.compile(r\"^\\s*<BLANKLINE>\\s*$\")\n\"\"\"Regular expression to match lines of the form `<BLANKLINE>`.\"\"\"\nRE_DOCTEST_FLAGS: Pattern = re.compile(r\"(\\s*#\\s*doctest:.+)$\")\n\"\"\"Regular expression to match lines containing doctest flags of the form `# doctest: +FLAG`.\"\"\"\n\n\nclass Google(Parser):\n \"\"\"A Google-style docstrings parser.\"\"\"\n\n def __init__(self, replace_admonitions: bool = True, trim_doctest_flags: bool = True) -> None:\n \"\"\"\n Initialize the object.\n\n Arguments:\n replace_admonitions: Whether to replace admonitions by their Markdown equivalent.\n trim_doctest_flags: Whether to remove doctest flags.\n \"\"\"\n super().__init__()\n self.replace_admonitions = replace_admonitions\n self.trim_doctest_flags = trim_doctest_flags\n self.section_reader = {\n Section.Type.PARAMETERS: self.read_parameters_section,\n Section.Type.KEYWORD_ARGS: self.read_keyword_arguments_section,\n Section.Type.EXCEPTIONS: self.read_exceptions_section,\n Section.Type.EXAMPLES: self.read_examples_section,\n Section.Type.ATTRIBUTES: self.read_attributes_section,\n Section.Type.RETURN: self.read_return_section,\n Section.Type.YIELD: self.read_yield_section,\n }\n\n def parse_sections(self, docstring: str) -> List[Section]: # noqa: D102\n if \"signature\" not in self.context:\n self.context[\"signature\"] = getattr(self.context[\"obj\"], \"signature\", None)\n if \"annotation\" not in self.context:\n self.context[\"annotation\"] = getattr(self.context[\"obj\"], \"type\", empty)\n if \"attributes\" not in self.context:\n self.context[\"attributes\"] = {}\n\n sections = []\n current_section = []\n\n in_code_block = False\n\n lines = docstring.split(\"\\n\")\n i = 0\n\n while i < len(lines):\n line_lower = lines[i].lower()\n\n if in_code_block:\n if line_lower.lstrip(\" \").startswith(\"```\"):\n in_code_block = False\n current_section.append(lines[i])\n\n elif line_lower in SECTIONS_TITLES:\n if current_section:\n if any(current_section):\n sections.append(Section(Section.Type.MARKDOWN, \"\\n\".join(current_section)))\n current_section = []\n section_reader = self.section_reader[SECTIONS_TITLES[line_lower]]\n section, i = section_reader(lines, i + 1)\n if section:\n sections.append(section)\n\n elif line_lower.lstrip(\" \").startswith(\"```\"):\n in_code_block = True\n current_section.append(lines[i])\n\n else:\n if self.replace_admonitions and not in_code_block and i + 1 < len(lines):\n match = RE_GOOGLE_STYLE_ADMONITION.match(lines[i])\n if match:\n groups = match.groupdict()\n indent = groups[\"indent\"]\n if lines[i + 1].startswith(indent + \" \" * 4):\n lines[i] = f\"{indent}!!! {groups['type'].lower()}\"\n if groups[\"title\"]:\n lines[i] += f' \"{groups[\"title\"]}\"'\n current_section.append(lines[i])\n\n i += 1\n\n if current_section:\n sections.append(Section(Section.Type.MARKDOWN, \"\\n\".join(current_section)))\n\n return sections\n\n def read_block_items(self, lines: List[str], start_index: int) -> Tuple[List[str], int]:\n \"\"\"\n Parse an indented block as a list of items.\n\n The first indentation level is used as a reference to determine if the next lines are new items\n or continuation lines.\n\n Arguments:\n lines: The block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing the list of concatenated lines and the index at which to continue parsing.\n \"\"\"\n if start_index >= len(lines):\n return [], start_index\n\n i = start_index\n items: List[str] = []\n\n # skip first empty lines\n while is_empty_line(lines[i]):\n i += 1\n\n # get initial indent\n indent = len(lines[i]) - len(lines[i].lstrip())\n\n if indent == 0:\n # first non-empty line was not indented, abort\n return [], i - 1\n\n # start processing first item\n current_item = [lines[i][indent:]]\n i += 1\n\n # loop on next lines\n while i < len(lines):\n line = lines[i]\n\n if line.startswith(indent * 2 * \" \"):\n # continuation line\n current_item.append(line[indent * 2 :])\n\n elif line.startswith((indent + 1) * \" \"):\n # indent between initial and continuation: append but add error\n cont_indent = len(line) - len(line.lstrip())\n current_item.append(line[cont_indent:])\n self.error(\n f\"Confusing indentation for continuation line {i+1} in docstring, \"\n f\"should be {indent} * 2 = {indent*2} spaces, not {cont_indent}\"\n )\n\n elif line.startswith(indent * \" \"):\n # indent equal to initial one: new item\n items.append(\"\\n\".join(current_item))\n current_item = [line[indent:]]\n\n elif is_empty_line(line):\n # empty line: preserve it in the current item\n current_item.append(\"\")\n\n else:\n # indent lower than initial one: end of section\n break\n\n i += 1\n\n if current_item:\n items.append(\"\\n\".join(current_item).rstrip(\"\\n\"))\n\n return items, i - 1\n\n def read_block(self, lines: List[str], start_index: int) -> Tuple[str, int]:\n \"\"\"\n Parse an indented block.\n\n Arguments:\n lines: The block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing the list of lines and the index at which to continue parsing.\n \"\"\"\n if start_index >= len(lines):\n return \"\", start_index\n\n i = start_index\n block: List[str] = []\n\n # skip first empty lines\n while is_empty_line(lines[i]):\n i += 1\n\n # get initial indent\n indent = len(lines[i]) - len(lines[i].lstrip())\n\n if indent == 0:\n # first non-empty line was not indented, abort\n return \"\", i - 1\n\n # start processing first item\n block.append(lines[i].lstrip())\n i += 1\n\n # loop on next lines\n while i < len(lines) and (lines[i].startswith(indent * \" \") or is_empty_line(lines[i])):\n block.append(lines[i][indent:])\n i += 1\n\n return \"\\n\".join(block).rstrip(\"\\n\"), i - 1\n\n def _parse_parameters_section(self, lines: List[str], start_index: int) -> Tuple[List[Parameter], int]:\n \"\"\"\n Parse a \"parameters\" or \"keyword args\" section.\n\n Arguments:\n lines: The parameters block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n parameters = []\n type_: Any\n block, i = self.read_block_items(lines, start_index)\n\n for param_line in block:\n\n # Check that there is an annotation in the docstring\n try:\n name_with_type, description = param_line.split(\":\", 1)\n except ValueError:\n self.error(f\"Failed to get 'name: description' pair from '{param_line}'\")\n continue\n\n # Setting defaults\n default = empty\n annotation = empty\n kind = None\n # Can only get description from docstring - keep if no type was given\n description = description.lstrip()\n\n # If we have managed to find a type in the docstring use this\n if \" \" in name_with_type:\n name, type_ = name_with_type.split(\" \", 1)\n annotation = type_.strip(\"()\")\n if annotation.endswith(\", optional\"): # type: ignore\n annotation = annotation[:-10] # type: ignore\n # Otherwise try to use the signature as `annotation` would still be empty\n else:\n name = name_with_type\n\n # Check in the signature to get extra details\n try:\n signature_param = self.context[\"signature\"].parameters[name.lstrip(\"*\")]\n except (AttributeError, KeyError):\n if annotation is empty:\n self.error(f\"No type annotation for parameter '{name}'\")\n else:\n if annotation is empty:\n annotation = signature_param.annotation\n # If signature_param.X are empty it doesnt matter as defaults are empty anyway\n default = signature_param.default\n kind = signature_param.kind\n\n parameters.append(\n Parameter(name=name, annotation=annotation, description=description, default=default, kind=kind)\n )\n\n return parameters, i\n\n def read_parameters_section(self, lines: List[str], start_index: int) -> Tuple[Optional[Section], int]:\n \"\"\"\n Parse a \"parameters\" section.\n\n Arguments:\n lines: The parameters block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n parameters, i = self._parse_parameters_section(lines, start_index)\n\n if parameters:\n return Section(Section.Type.PARAMETERS, parameters), i\n\n self.error(f\"Empty parameters section at line {start_index}\")\n return None, i\n\n def read_keyword_arguments_section(self, lines: List[str], start_index: int) -> Tuple[Optional[Section], int]:\n \"\"\"\n Parse a \"keyword arguments\" section.\n\n Arguments:\n lines: The parameters block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n parameters, i = self._parse_parameters_section(lines, start_index)\n for parameter in parameters:\n parameter.kind = inspect.Parameter.KEYWORD_ONLY\n\n if parameters:\n return Section(Section.Type.KEYWORD_ARGS, parameters), i\n\n self.error(f\"Empty keyword arguments section at line {start_index}\")\n return None, i\n\n def read_attributes_section(self, lines: List[str], start_index: int) -> Tuple[Optional[Section], int]:\n \"\"\"\n Parse an \"attributes\" section.\n\n Arguments:\n lines: The parameters block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n attributes = []\n block, i = self.read_block_items(lines, start_index)\n\n for attr_line in block:\n try:\n name_with_type, description = attr_line.split(\":\", 1)\n except ValueError:\n self.error(f\"Failed to get 'name: description' pair from '{attr_line}'\")\n continue\n\n description = description.lstrip()\n\n if \" \" in name_with_type:\n name, annotation = name_with_type.split(\" \", 1)\n annotation = annotation.strip(\"()\")\n if annotation.endswith(\", optional\"):\n annotation = annotation[:-10]\n else:\n name = name_with_type\n annotation = self.context[\"attributes\"].get(name, {}).get(\"annotation\", empty)\n\n attributes.append(Attribute(name=name, annotation=annotation, description=description))\n\n if attributes:\n return Section(Section.Type.ATTRIBUTES, attributes), i\n\n self.error(f\"Empty attributes section at line {start_index}\")\n return None, i\n\n def read_exceptions_section(self, lines: List[str], start_index: int) -> Tuple[Optional[Section], int]:\n \"\"\"\n Parse an \"exceptions\" section.\n\n Arguments:\n lines: The exceptions block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n exceptions = []\n block, i = self.read_block_items(lines, start_index)\n\n for exception_line in block:\n try:\n annotation, description = exception_line.split(\": \", 1)\n except ValueError:\n self.error(f\"Failed to get 'exception: description' pair from '{exception_line}'\")\n else:\n exceptions.append(AnnotatedObject(annotation, description.lstrip(\" \")))\n\n if exceptions:\n return Section(Section.Type.EXCEPTIONS, exceptions), i\n\n self.error(f\"Empty exceptions section at line {start_index}\")\n return None, i\n\n def read_return_section(self, lines: List[str], start_index: int) -> Tuple[Optional[Section], int]:\n \"\"\"\n Parse an \"returns\" section.\n\n Arguments:\n lines: The return block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n text, i = self.read_block(lines, start_index)\n\n # Early exit if there is no text in the return section\n if not text:\n self.error(f\"Empty return section at line {start_index}\")\n return None, i\n\n # First try to get the annotation and description from the docstring\n try:\n type_, text = text.split(\":\", 1)\n except ValueError:\n description = text\n annotation = self.context[\"annotation\"]\n # If there was no annotation in the docstring then move to signature\n if annotation is empty and self.context[\"signature\"]:\n annotation = self.context[\"signature\"].return_annotation\n else:\n annotation = type_.lstrip()\n description = text.lstrip()\n\n # There was no type in the docstring and no annotation\n if annotation is empty:\n self.error(\"No return type/annotation in docstring/signature\")\n\n return Section(Section.Type.RETURN, AnnotatedObject(annotation, description)), i\n\n def read_yield_section(self, lines: List[str], start_index: int) -> Tuple[Optional[Section], int]:\n \"\"\"\n Parse a \"yields\" section.\n\n Arguments:\n lines: The return block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n text, i = self.read_block(lines, start_index)\n\n # Early exit if there is no text in the yield section\n if not text:\n self.error(f\"Empty yield section at line {start_index}\")\n return None, i\n\n # First try to get the annotation and description from the docstring\n try:\n type_, text = text.split(\":\", 1)\n except ValueError:\n description = text\n annotation = self.context[\"annotation\"]\n # If there was no annotation in the docstring then move to signature\n if annotation is empty and self.context[\"signature\"]:\n annotation = self.context[\"signature\"].return_annotation\n else:\n annotation = type_.lstrip()\n description = text.lstrip()\n\n # There was no type in the docstring and no annotation\n if annotation is empty:\n self.error(\"No yield type/annotation in docstring/signature\")\n\n return Section(Section.Type.YIELD, AnnotatedObject(annotation, description)), i\n\n def read_examples_section(self, lines: List[str], start_index: int) -> Tuple[Optional[Section], int]:\n \"\"\"\n Parse an \"examples\" section.\n\n Arguments:\n lines: The examples block lines.\n start_index: The line number to start at.\n\n Returns:\n A tuple containing a `Section` (or `None`) and the index at which to continue parsing.\n \"\"\"\n text, i = self.read_block(lines, start_index)\n\n sub_sections = []\n in_code_example = False\n in_code_block = False\n current_text: List[str] = []\n current_example: List[str] = []\n\n for line in text.split(\"\\n\"):\n if is_empty_line(line):\n if in_code_example:\n if current_example:\n sub_sections.append((Section.Type.EXAMPLES, \"\\n\".join(current_example)))\n current_example = []\n in_code_example = False\n else:\n current_text.append(line)\n\n elif in_code_example:\n if self.trim_doctest_flags:\n line = RE_DOCTEST_FLAGS.sub(\"\", line)\n line = RE_DOCTEST_BLANKLINE.sub(\"\", line)\n current_example.append(line)\n\n elif line.startswith(\"```\"):\n in_code_block = not in_code_block\n current_text.append(line)\n\n elif in_code_block:\n current_text.append(line)\n\n elif line.startswith(\">>>\"):\n if current_text:\n sub_sections.append((Section.Type.MARKDOWN, \"\\n\".join(current_text)))\n current_text = []\n in_code_example = True\n\n if self.trim_doctest_flags:\n line = RE_DOCTEST_FLAGS.sub(\"\", line)\n current_example.append(line)\n\n else:\n current_text.append(line)\n\n if current_text:\n sub_sections.append((Section.Type.MARKDOWN, \"\\n\".join(current_text)))\n elif current_example:\n sub_sections.append((Section.Type.EXAMPLES, \"\\n\".join(current_example)))\n\n if sub_sections:\n return Section(Section.Type.EXAMPLES, sub_sections), i\n\n self.error(f\"Empty examples section at line {start_index}\")\n return None, i\n\n\ndef is_empty_line(line) -> bool:\n \"\"\"\n Tell if a line is empty.\n\n Arguments:\n line: The line to check.\n\n Returns:\n True if the line is empty or composed of blanks only, False otherwise.\n \"\"\"\n return not line.strip()\n", "repo_name": "mkdocstrings/pytkdocs", "sub_path": "src/pytkdocs/parsers/docstrings/google.py", "file_name": "google.py", "file_ext": "py", "file_size_in_byte": 20206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 48, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 9, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 10, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 11, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 12, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 13, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 14, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 15, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 16, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 17, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 18, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 19, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 20, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 21, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 22, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 23, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 24, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 25, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Pattern", "line_number": 29, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.Pattern", "line_number": 31, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Pattern", "line_number": 33, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 33, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Parser", "line_number": 37, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 52, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 53, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 54, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 55, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 56, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 57, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 58, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 65, "usage_type": "argument"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 88, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 114, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 114, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 190, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 205, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 190, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 229, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 241, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 254, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 255, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 274, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 277, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Parameter", "line_number": 284, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 229, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Parameter", "line_number": 229, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 289, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 303, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 303, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 289, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 289, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 289, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 308, "usage_type": "name"}, {"api_name": "inspect.Parameter", "line_number": 321, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 324, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 324, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 308, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 308, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 308, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 329, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 359, "usage_type": "argument"}, {"api_name": "pytkdocs.parsers.docstrings.base.Attribute", "line_number": 361, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 364, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 364, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 329, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 329, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 329, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 369, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.AnnotatedObject", "line_number": 389, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 392, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 392, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 369, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 369, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 369, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 397, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 422, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 429, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 432, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 432, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.AnnotatedObject", "line_number": 432, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 397, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 397, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 397, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 434, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 459, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.empty", "line_number": 466, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 469, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 469, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.AnnotatedObject", "line_number": 469, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 434, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 434, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 434, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 471, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 487, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 488, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 494, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 494, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 515, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 515, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 527, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 527, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 529, "usage_type": "attribute"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 529, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 532, "usage_type": "call"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section.Type", "line_number": 532, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 471, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 471, "usage_type": "name"}, {"api_name": "pytkdocs.parsers.docstrings.base.Section", "line_number": 471, "usage_type": "name"}]} +{"seq_id": "39676055775", "text": "from django.db\t\timport transaction, reset_queries\nfrom datetime\t\timport datetime\nfrom time\t\timport mktime\n\nfrom blacklist.backend\timport BaseBackend\nfrom blacklist\t\timport models\n\nclass RIR(BaseBackend):\n\tname = \"RIR\"\n\tdef __init__(self):\n\t\tBaseBackend.__init__(self, models.RIR)\n\n\t@transaction.commit_manually\n\tdef bulk_import(self, import_data):\n\t\tt_start = mktime(datetime.now().timetuple())\n\t\tdata = []\n\t\tfailed = []\n\t\tnum_failed = 0\n\t\tfor entry in import_data:\n\t\t\tif entry[0] == \"\":\n\t\t\t\tnum_failed += 1\n\t\t\t\tfailed.append(entry)\n\t\t\t\tcontinue\n\t\t\ttmp = {}\n\t\t\ttmp[\"name\"] = entry[0]\n\t\t\ttmp[\"whois\"] = entry[1]\n\t\t\tdata.append(tmp)\n\t\tfor entry in data:\n\t\t\trir = models.RIR(\n\t\t\t\tname=entry[\"name\"],\n\t\t\t\twhois=entry[\"whois\"],\n\t\t\t)\n\t\t\trir.save()\n\t\ttransaction.commit()\n\t\treset_queries()\n\t\tt_end = mktime(datetime.now().timetuple())\n\t\treturn (t_end - t_start, num_failed, failed)\n", "repo_name": "r3boot/blacklist", "sub_path": "django-blacklist/blacklist/backend/rir.py", "file_name": "rir.py", "file_ext": "py", "file_size_in_byte": 875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "21", "api": [{"api_name": "blacklist.backend.BaseBackend", "line_number": 8, "usage_type": "name"}, {"api_name": "blacklist.backend.BaseBackend.__init__", "line_number": 11, "usage_type": "call"}, {"api_name": "blacklist.backend.BaseBackend", "line_number": 11, "usage_type": "name"}, {"api_name": "blacklist.models.RIR", "line_number": 11, "usage_type": "attribute"}, {"api_name": "blacklist.models", "line_number": 11, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "blacklist.models.RIR", "line_number": 29, "usage_type": "call"}, {"api_name": "blacklist.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.transaction.commit", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.reset_queries", "line_number": 35, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.transaction.commit_manually", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "27083985724", "text": "#!/usr/bin/env python\n\nimport rospy\nimport cv2\nimport numpy as np\nfrom sensor_msgs.msg import Image, CompressedImage\n\n\nclass IMGParser:\n def __init__(self):\n rospy.init_node(\"camera\", anonymous=True)\n\n self.image_sub = rospy.Subscriber(\n \"/image_jpeg/compressed\", CompressedImage, self.callback\n )\n\n rospy.spin()\n\n def grayscale(self, img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n def gaussian_blur(self, img, kerenl_size):\n return cv2.GaussianBlur(img, (kerenl_size, kerenl_size), 0)\n\n def canny(self, img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)\n\n def roi(self, img):\n mask = np.zeros_like(img)\n if len(img.shape) > 2:\n channel_count = img.shape[2]\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n vertices = np.array(\n [[(0, 480), (0, 300), (75, 265), (565, 265), (640, 300), (640, 480)]],\n dtype=np.int32,\n )\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n masked_img = cv2.bitwise_and(img, mask)\n return masked_img\n\n def callback(self, data):\n np_arr = np.fromstring(data.data, np.uint8)\n img_bgr = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n img_gray = self.grayscale(img_bgr)\n #\n img_gau = self.gaussian_blur(img_gray, 5)\n img_canny = self.canny(img_gau, 50, 200)\n img_masked = self.roi(img_canny)\n\n cv2.imshow(\"Image window\", img_masked)\n cv2.waitKey(1)\n\n\nif __name__ == \"__main__\":\n try:\n image_parser = IMGParser()\n except rospy.ROSInterruptException:\n pass\n", "repo_name": "201820894/doge_driver", "sub_path": "src/sim_test/scripts/cam_test_temp.py", "file_name": "cam_test_temp.py", "file_ext": "py", "file_size_in_byte": 1720, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "rospy.init_node", "line_number": 11, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 13, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.CompressedImage", "line_number": 14, "usage_type": "argument"}, {"api_name": "rospy.spin", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.fillPoly", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 53, "usage_type": "call"}, {"api_name": "rospy.ROSInterruptException", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "3228769827", "text": "from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom django.http.response import JsonResponse\n\nfrom .models import Movie, Actor\nfrom .serializers import movieSerializer, actorSerializer\n\n@csrf_exempt\ndef movieApi(request):\n if request.method == 'GET':\n movie = Movie.objects.all()\n movie_serializer = movieSerializer(movie, many=True)\n return JsonResponse(movie_serializer.data, safe=False)\n elif request.method == 'POST':\n movie_data = JSONParser().parse(request)\n movie_serializer = movieSerializer(data=movie_data)\n if movie_serializer.is_valid():\n movie_serializer.save()\n return JsonResponse(\"Added Successfully!\", safe=False)\n return JsonResponse(\"Failed to add!!!\", safe=False)\n elif request.method == 'PUT':\n movie_data = JSONParser().parse(request)\n movie = Movie.objects.get(title=movie_data['title'])\n movie_serializer = movieSerializer(movie, data=movie_data)\n if movie_serializer.is_valid():\n movie_serializer.save()\n return JsonResponse(\"Updated\", safe=False)\n return JsonResponse(\"Failed to update\", safe=False)\n@csrf_exempt\ndef actorApi(request):\n if request.method == 'GET':\n actor = Actor.objects.all()\n actor_serializer = actorSerializer(actor, many=True)\n return JsonResponse(actor_serializer.data, safe=False)\n elif request.method == 'POST':\n actor_data = JSONParser().parse(request)\n actor_serializer = actorSerializer(data=actor_data)\n if actor_serializer.is_valid():\n actor_serializer.save()\n return JsonResponse(\"Addedd Successfully!\", safe=False)\n return JsonResponse(\"Failed to add!!!\", safe=False)\n\n\n", "repo_name": "ritikraj26/movieMaintenanceSite", "sub_path": "website/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "models.Movie.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Movie.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Movie", "line_number": 12, "usage_type": "name"}, {"api_name": "serializers.movieSerializer", "line_number": 13, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.parsers.JSONParser", "line_number": 16, "usage_type": "call"}, {"api_name": "serializers.movieSerializer", "line_number": 17, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 20, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.parsers.JSONParser", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Movie.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Movie.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Movie", "line_number": 24, "usage_type": "name"}, {"api_name": "serializers.movieSerializer", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 9, "usage_type": "name"}, {"api_name": "models.Actor.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Actor.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Actor", "line_number": 33, "usage_type": "name"}, {"api_name": "serializers.actorSerializer", "line_number": 34, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.parsers.JSONParser", "line_number": 37, "usage_type": "call"}, {"api_name": "serializers.actorSerializer", "line_number": 38, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 41, "usage_type": "call"}, {"api_name": "django.http.response.JsonResponse", "line_number": 42, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "18564381641", "text": "from copy import deepcopy\n\nimport poptorch\nimport torch\nfrom torch.testing import assert_close\n\n\nclass TrainingStepper:\n \"\"\"\n Test utility for comparing training runs between IPU and CPU.\n Usage:\n model = ...\n batch = ...\n model.train()\n stepper = TrainingSteper(model)\n stepper.run(10, batch)\n \"\"\"\n\n def __init__(\n self,\n model,\n lr=0.001,\n optimizer=poptorch.optim.Adam,\n options=None,\n rtol=None,\n atol=None,\n ):\n super().__init__()\n model.train()\n self.lr = lr\n self.rtol = rtol\n self.atol = atol\n self.options = poptorch.Options() if options is None else options\n self.setup_cpu(model, optimizer)\n self.setup_ipu(model, optimizer)\n self.check_parameters()\n\n def setup_cpu(self, model, optimizer):\n self.cpu_model = deepcopy(model)\n self.optimizer = optimizer(self.cpu_model.parameters(), lr=self.lr)\n\n def setup_ipu(self, model, optimizer):\n self.ipu_model = deepcopy(model)\n ipu_optimizer = optimizer(self.ipu_model.parameters(), lr=self.lr)\n options = self.options\n options.Precision.enableFloatingPointExceptions(True)\n self.training_model = poptorch.trainingModel(\n self.ipu_model, optimizer=ipu_optimizer, options=options\n )\n\n def assert_close(self, actual, expected, id):\n assert_close(\n actual=actual,\n expected=expected,\n msg=lambda s: f\"{id} was not equal\\n\\n{s}\",\n atol=self.atol,\n rtol=self.rtol,\n )\n\n def check_parameters(self):\n for cpu, ipu in zip(\n self.cpu_model.named_parameters(), self.ipu_model.named_parameters()\n ):\n name, cpu = cpu\n ipu = ipu[1]\n self.assert_close(ipu, cpu, name)\n\n def cpu_step(self, batch):\n self.optimizer.zero_grad()\n out, loss = self.cpu_model(*batch)\n loss.backward()\n self.optimizer.step()\n return out, loss\n\n def ipu_step(self, batch):\n out, loss = self.training_model(*batch)\n self.training_model.copyWeightsToHost()\n return out, loss\n\n def run(self, num_steps, batch):\n cpu_loss = torch.empty(num_steps)\n ipu_loss = torch.empty(num_steps)\n\n for i in range(num_steps):\n cpu_out, cpu_loss[i] = self.cpu_step(batch)\n ipu_out, ipu_loss[i] = self.ipu_step(batch)\n self.assert_close(ipu_out, cpu_out, \"Output\")\n self.check_parameters()\n\n self.assert_close(ipu_loss, cpu_loss, \"loss\")\n", "repo_name": "graphcore-research/hydronet-gnn", "sub_path": "test/stepper.py", "file_name": "stepper.py", "file_ext": "py", "file_size_in_byte": 2640, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "poptorch.optim", "line_number": 23, "usage_type": "attribute"}, {"api_name": "poptorch.Options", "line_number": 33, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 39, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 43, "usage_type": "call"}, {"api_name": "poptorch.trainingModel", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.testing.assert_close", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "74097203574", "text": "import os\nimport pprint\nimport logging\nfrom multiprocessing import Pool\nimport json\nimport aux\nimport conf\nimport src_tools as st\npp = pprint.PrettyPrinter(indent=4)\n\n\nppath = '/home/markus/work_ot/PIT/build/v4.3/ppc_men_defconfig/'\n\nimport validation\n\n\n# get hunks applied\n# fun diffs allways have one item and on hunk\ndef cmp_patch(commit, fun, cu, text, patches):\n res = {}\n for patch in patches:\n if patch['commit'] == commit:\n for item in patch['items']:\n hunks = []\n for i, hunk in enumerate(item.hunks):\n for _line in hunk.text:\n line = _line.decode('utf-8')\n if line.startswith('+') or line.startswith('-'):\n for l in text:\n if _line == l:\n hunks.append(i)\n if len(hunks) > 0:\n res.update({ commit : {'fun': fun,\n 'hunks': list(set(hunks)),\n 'patch': patch['patch'],\n 'cu': cu}})\n return res\n\ndef analyse_fun_diffs(fun_patches, cu_patches):\n result = []\n for fun in fun_patches.keys():\n if isinstance(fun_patches[fun], bool):\n continue\n res = {fun : []}\n\n for item in fun_patches[fun]:\n fun_cu = item.source.decode('utf-8')[2:]\n fun_text = item.hunks[0].text\n commit = aux.get_hash(item.header)\n r = cmp_patch(commit, fun, fun_cu, fun_text, cu_patches[fun_cu])\n print(\"%s\" % (r))\n res[fun].append(r)\n\n result.append(res)\n return result\n\n\n\nclass worker:\n def __init__(self, path_cur, path_nex, tag_cur, tag_nex, arch):\n self.commits_insn_applied = []\n if not os.path.exists(path_cur + '/fun_diffs'):\n os.makedirs(path_cur + '/fun_diffs')\n if not os.path.exists(path_cur + '/funs_nin_cur'):\n os.makedirs(path_cur + '/funs_nin_cur')\n\n if not os.path.exists(path_cur + '/struct_diffs'):\n os.makedirs(path_cur + '/struct_diffs')\n if not os.path.exists(path_cur + '/debug_data'):\n os.makedirs(path_cur + '/debug_data')\n\n self.data_out_funs = []\n self.l_insn_add = 0\n self.l_insn_rm = 0\n\n self.l_insn_add_t = 0\n self.l_insn_rm_t = 0\n self.l_insn_add_f = 0\n self.l_insn_rm_f = 0\n self.l_fun_add = 0\n self.l_fun_rm = 0\n self.cnt_fun_rm = 0\n self.cnt_fun_add = 0\n self.cnt_fun_ren = 0\n self.result_fun = {}\n\n logger_name = \"checker_%s\" % tag_cur\n aux.setup_logger(logger_name, path_cur + \"/check_series.log\")\n self.logger = logging.getLogger(logger_name)\n self.logger.info(\"Start check_series!\\n\")\n self.diff_path = path_cur + \"diffs/\"\n self.linux_cur = path_cur + \"linux-stable/\"\n self.linux_nex = path_nex + \"linux-stable/\"\n self.path_cur = path_cur\n self.path_nex = path_nex\n self.tag_cur = tag_cur\n self.tag_nex = tag_nex\n\n gcc_out_cur = path_cur + '/gcc_output'\n gcc_out_nex = path_nex + '/gcc_output'\n\n self.fun_def_len_cur = aux.load(gcc_out_cur, 'data_flen')\n self.fun_def_len_nex = aux.load(gcc_out_nex, 'data_flen')\n\n self.fun_decl_len_cur = aux.load(gcc_out_cur, 'data_dlen')\n self.fun_decl_len_nex = aux.load(gcc_out_nex, 'data_dlen')\n\n self.insns_cur = aux.load(gcc_out_cur, 'data_stmt')\n self.insns_nex = aux.load(gcc_out_nex, 'data_stmt')\n\n self.parsed_cur = aux.load(gcc_out_cur, 'data_pre')\n self.parsed_nex = aux.load(gcc_out_nex, 'data_pre')\n\n self.fun_src_cur = st.get_src(self.linux_cur,\n self.fun_def_len_cur, len('.fl'))\n\n self.fun_src_nex = st.get_src(self.linux_nex,\n self.fun_def_len_nex, len('.fl'))\n\n self.fun_decl_cur = st.get_src(self.linux_cur,\n self.fun_decl_len_cur, len('.dl'))\n\n self.fun_decl_nex = st.get_src(self.linux_nex,\n self.fun_decl_len_nex, len('.dl'))\n\n self.logger.info(\"function in tag %s: %d\\n\" %\n (tag_cur, self.cnt_funs(self.fun_src_cur)))\n self.logger.info(\"function in tag %s: %d\\n\" %\n (tag_nex, self.cnt_funs(self.fun_src_nex)))\n self.logger.info(\"Create function diffs!\")\n fun_diffs, funs, fun_mv, n_in_cur, n_in_nex = st.gen_funs_diff(path_cur,\n self.fun_src_cur,\n self.fun_src_nex,\n self.fun_decl_cur,\n self.fun_decl_nex)\n\n self.funs_tot = self.cnt_funs(funs)\n self.logger.info(\"functions total %d \" % self.cnt_funs(funs))\n\n self.logger.info(\"function not in current %d\\n\" %\n (self.cnt_funs(n_in_cur)))\n self.logger.info(\"function not in next %d\\n\" %\n (self.cnt_funs(n_in_nex)))\n\n self.fun_moved = fun_mv\n self.not_in_cur = n_in_cur\n self.fun_diffs = fun_diffs\n self.not_in_nex = n_in_nex\n diff_jobs = []\n\n self.logger.info(\"Create diffs for functions\")\n self.funs_nin_cur = self.create_diffs(conf.LINUX_SRC, funs,\n 'fun_diffs', tag_cur, tag_nex)\n\n self.logger.info(\"Create diffs functions not in current\")\n self.funs_nin_cur = self.create_diffs(conf.LINUX_SRC, n_in_cur,\n 'funs_nin_cur', tag_cur, tag_nex)\n\n self.logger.info(\"Create diffs moved functions\")\n self.moved = self.create_diffs(conf.LINUX_SRC, fun_mv,\n 'funs_moved', tag_cur, tag_nex)\n\n self.logger.info(\"Get patch information!\")\n self.format_patches = aux.get_patches_fp(path_cur + 'diffs/')\n self.fun_patches = aux.get_patches(path_cur + 'fun_diffs/')\n self.cu_patches = aux.get_patch_lst_by_cu(path_cur + 'diffs/')\n self.fun_patches_nf = aux.get_patches(path_cur + 'funs_nin_cur/')\n\n self.process()\n\n # calculate and print out stats\n self.l_insn_add = self.l_insn_add_t + self.l_insn_add_f + self.l_fun_add\n self.l_insn_rm = self.l_insn_rm_t + self.l_insn_rm_f + self.l_fun_rm\n tag_date = aux.get_commit_time_sec(self.tag_cur, conf.LINUX_SRC)\n summary = {'tag': self.tag_cur,\n 'date': tag_date,\n 'patches': len(self.commits_insn_applied),\n 'patches_tot':len(os.listdir(self.diff_path)),\n 'lines_add': self.l_insn_add,\n 'lines_rm': self.l_insn_rm,\n 'funs_tot': self.funs_tot,\n 'funs_rm': self.cnt_fun_rm,\n 'funs_add': self.cnt_fun_add,\n 'funs_ren': self.cnt_fun_ren}\n fname_f = self.path_cur + 'functions_data.json'\n fname_s = self.path_cur + 'summary_data.json'\n fname_sha = self.path_cur + 'sha_lst.json'\n with open(fname_f, 'w') as outfile:\n json.dump(self.data_out_funs, outfile)\n with open(fname_s, 'w') as outfile:\n json.dump(summary, outfile)\n with open(fname_sha, 'w') as outfile:\n json.dump(self.commits_insn_applied, outfile)\n\n with open(self.path_cur + 'result_fun.json', 'w') as outfile:\n json.dump(self.result_fun , outfile)\n\n with open(self.path_cur + 'fun_decls_cur.json', 'w') as outfile:\n json.dump(self.fun_decl_cur , outfile)\n\n with open(self.path_cur + 'fun_decls_nex.json', 'w') as outfile:\n json.dump(self.fun_decl_nex , outfile)\n\n\n\n def get_rm_funs(self, funs_renamed):\n data = {}\n for cu in self.not_in_nex.keys():\n for fun in self.not_in_nex[cu].keys():\n if not fun in funs_renamed:\n if not cu in data.keys():\n data.update({cu:{}})\n data[cu].update({fun : self.not_in_nex[cu][fun]})\n\n return data\n\n\n def process(self):\n l_struct_add = 0\n l_struct_rm = 0\n commits_insn_applied = []\n\n self.logger.info(\"Get stats for function diffs ...\\n\")\n r_fun, d_o, l_a_t, l_r_t, l_a_f, l_r_f, shas_fun = self.get_mod_in_fun(\n self.fun_diffs)\n self.l_insn_add_t += l_a_t\n self.l_insn_add_f += l_a_f\n self.l_insn_rm_t += l_r_t\n self.l_insn_rm_f += l_r_f\n self.data_out_funs.extend(d_o)\n\n for commit in shas_fun:\n if commit not in commits_insn_applied:\n self.commits_insn_applied.append(commit)\n\n self.logger.info(\"Get stats for added functions ...\\n\")\n ren, added, not_res = self.check_not_in_cur()\n r_ren, d_o, l_a_t, l_r_t, l_a_f, l_r_f, shas_fun = self.get_mod_of_ren(ren)\n self.data_out_funs.extend(d_o)\n\n self.l_insn_add_t += l_a_t\n self.l_insn_add_f += l_a_f\n self.l_insn_rm_t += l_r_t\n self.l_insn_rm_f += l_r_f\n\n self.ren = ren\n funs_renamed = self.get_funs(ren)\n funs_removed = self.get_rm_funs(funs_renamed)\n\n self.l_fun_add, d_o, commits = self.get_lines_mod(added, 'ADD')\n\n self.data_out_funs.extend(d_o)\n for commit in commits:\n if commit not in commits_insn_applied:\n self.commits_insn_applied.append(commit)\n\n removed, d_o, commits, self.l_fun_rm = self.check_removed(funs_removed,\n self.cu_patches)\n self.data_out_funs.extend(d_o)\n for commit in commits:\n if commit not in commits_insn_applied:\n self.commits_insn_applied.append(commit)\n\n if conf.VALIDATION == True:\n validation.print_val_added(added)\n validation.print_val_removed(removed)\n validation.print_val_renamed(ren)\n\n self.cnt_fun_rm = self.cnt_funs(removed)\n self.cnt_fun_add = self.cnt_funs(added)\n self.cnt_fun_ren = self.cnt_funs(ren)\n self.logger.info(\"Number of renamed functions: %d\" % self.cnt_funs(ren))\n self.logger.info(\"Number of addded functions: %d\" % self.cnt_funs(added))\n self.logger.info(\"Number of not resolved functions: %d\" %\n self.cnt_funs(not_res))\n\n\n\n def create_diffs(self, git_linux, data, path, tag_cur, tag_nex):\n l = {}\n for cu in data.keys():\n for fun in data[cu].keys():\n start = data[cu][fun]['start']\n end = data[cu][fun]['end']\n out_path = self.path_cur + path\n aux.git_make_fun_diff(git_linux, out_path, tag_cur, tag_nex,\n start, end, cu, fun, self.logger)\n\n if not cu in l.keys():\n l.update({cu:{}})\n l[cu].update({fun: {'start': start, 'end': end}})\n\n return l\n\n def get_pre_data(self, data, start, end):\n s = -1\n e = -1\n for n,i in enumerate(data):\n if i >= start and s == -1:\n s = n\n if i > end and e == -1:\n e = n - 1\n if s != -1 and e != -1:\n return data[s:e]\n else:\n return None\n\n def get_lines_mod(self, data, mode):\n l_cnt = 0\n commits = []\n data_out = []\n for cu in data:\n for fun in data[cu]:\n it = data[cu][fun]\n start = self.fun_def_len_nex[cu + '.fl'][fun]['start']\n end = self.fun_def_len_nex[cu + '.fl'][fun]['end']\n parsed = sorted(self.parsed_nex[cu + '.pre'])\n pre_data = self.get_pre_data(parsed, start, end)\n if pre_data != None:\n cnt = len(pre_data)\n l_cnt += cnt\n if it['commit'] not in commits:\n commits.append(it['commit'])\n data_out.append((fun, cnt, 0, cu, [it['commit']], mode))\n\n return l_cnt, data_out, commits\n\n def get_mod_of_ren(self, data):\n result = {}\n data_out = []\n l_add_t = 0\n l_rm_t = 0\n l_add_f = 0\n l_rm_f = 0\n shas = []\n\n for cu in data.keys():\n if not cu in result.keys():\n result.update({cu:{}})\n for fun in data[cu]:\n it = data[cu][fun]\n fun_old = it['fun_old']\n cur_src = self.fun_src_cur[cu][fun_old]['src']\n nex_src = self.fun_src_nex[cu][fun]['src']\n\n add, rm = st.get_diff(cur_src, nex_src)\n p_it = {'data' : {'+': add, '-': rm},\n 'cu' : cu,\n 'info_cur': self.fun_src_cur[cu][fun_old]['info'],\n 'info_next': self.fun_src_nex[cu][fun]['info']}\n\n res, l_add_t, l_rm_t, l_add_f, l_rm_f, shas = st.get_mod_fun(\n fun, self.insns_cur, self.insns_nex,\n self.parsed_cur, self.parsed_nex, p_it,\n self.fun_patches_nf[fun])\n result[cu].update({fun:res})\n fun_i = \"%s->%s\" % (fun_old ,fun)\n data_out.append(((fun_i),l_add_t + l_add_f, l_rm_t + l_rm_f, cu, shas, 'R'))\n if not fun in self.result_fun.keys():\n self.result_fun.update({ fun: []})\n self.result_fun[fun].extend((shas, cu))\n\n return result, data_out, l_add_t, l_rm_t, l_add_f, l_rm_f, shas\n\n def get_mod_in_fun(self, diffs):\n \"\"\" Get modification between two versions of a function.\n\n :param diffs: A dict with compile unit names as keys. Each key has\n a list of dict entries (one for each function).\n The entry has the information:\n - info_cur : start/end info of current function\n - info_next : start/end info of next function\n - name : function name\n - data : diff data with two keys:\n + : lines added\n - : lines removed\n\n :returns:\n - res: dict [cu][[commit], [patch_name], [fun], [add], [rm]]\n - h_insn_app: hunks applied on instructions\n - h_insn_tot: hunks total on instructions\n - l_insn_add: lines added total\n - l_insn_rm: lines removed total\n - patches: patches applied\n\n \"\"\"\n result = {}\n l_i_add_t = 0\n l_i_rm_t = 0\n l_i_add_f = 0\n l_i_rm_f = 0\n\n h_insn_tot = 0\n h_insn_app = 0\n shas = []\n data_out = []\n for fun in diffs.keys():\n if fun not in self.fun_patches.keys():\n self.logger.info(\"Missing patch for fun: %s\" % fun)\n continue\n if self.fun_patches[fun] == False:\n self.logger.info(\"Missing fun diff: %s\" % fun)\n continue\n else:\n res, l_add_t, l_rm_t, l_add_f, l_rm_f,commits = st.get_mod_fun(fun,\n self.insns_cur,\n self.insns_nex,\n self.parsed_cur,\n self.parsed_nex,\n diffs[fun],\n self.fun_patches[fun])\n\n data_out.append(((fun),l_add_t + l_add_f, l_rm_t + l_rm_f, \\\n diffs[fun]['cu'], commits, 'MOD'))\n self.result_fun.update({fun:[]})\n self.result_fun[fun].extend((commits, diffs[fun]['cu']))\n\n if len(res) == 0:\n continue\n\n result.update(res)\n\n for i in commits:\n if i not in shas:\n shas.append(i)\n\n l_i_add_t += l_add_t\n l_i_rm_t += l_rm_t\n l_i_add_f += l_add_f\n l_i_rm_f += l_rm_f\n\n return result, data_out, l_i_add_t, l_i_rm_t, l_i_add_f, l_i_rm_f, shas\n\n def decl_in_patch(self, data, cu, hunk_text):\n for fun in data[cu].keys():\n decl = data[cu][fun]['src']\n for line in hunk_text:\n if decl[0] in line.rstrip('\\n'):\n return decl, fun\n return None, None\n\n\n def check_removed(self, data, patch_data):\n result = {}\n commits = []\n len_rm_tot = 0\n data_out = []\n for cu in data.keys():\n for fun in data[cu].keys():\n if cu + '.fl' in self.fun_def_len_cur.keys():\n if not fun in self.fun_def_len_cur[cu + '.fl'].keys():\n self.logger.info(\"Fun %s cannot be found in current tree\"\n % fun)\n continue\n flag = False\n info = self.fun_def_len_cur[cu+'.fl'][fun]\n len_rm = info['end'] - info['start']\n if len_rm == 0:\n continue\n if not cu in patch_data.keys():\n continue\n for patch in patch_data[cu]:\n for item in patch['items']:\n for hunk in item.hunks:\n ht = st.get_hunk_rm_text(hunk)\n _decl = data[cu][fun]['decl']['src']\n decl = []\n for t in _decl:\n decl.append(t.replace('\\t','').lstrip(' '))\n for k, i in enumerate(ht):\n if decl[0] in i:\n if decl == ht[k:k+len(decl)]:\n hunk_text = ht[k:k+len_rm+len(decl)]\n flag = True\n if not cu in result.keys():\n result.update({cu:{}})\n _l = len_rm + len(decl)\n result[cu].update({fun: {\n 'pname': patch['patch'],\n 'cu': cu,\n 'htext':hunk_text,\n 'len': _l,\n }})\n len_rm_tot += len_rm + len(decl)\n commits.append(patch['commit'])\n data_out.append((fun,0 ,_l,\n cu,\n [patch['commit']], 'RM'))\n\n if not fun in self.result_fun.keys():\n self.result_fun.update({fun:[]})\n self.result_fun[fun].append((commits,cu))\n\n break\n\n if flag == True:\n break\n\n return result, data_out, commits, len_rm_tot\n\n def check_not_in_cur(self):\n not_found = {}\n found = {}\n added = {}\n\n for cu in self.not_in_cur.keys():\n for fun in self.not_in_cur[cu]:\n flag = False\n if fun in self.fun_patches_nf:\n patch = self.fun_patches_nf[fun]\n decl_nex = self.fun_decl_nex[cu][fun]['src']\n body_start = self.fun_def_len_nex[cu+'.fl'][fun]['start']\n body_end = self.fun_def_len_nex[cu+'.fl'][fun]['end']\n if isinstance(patch, bool):\n continue\n commit = aux.get_hash(patch.items[0].header)\n for item in patch.items:\n for hunk in item.hunks:\n ht = st.hunk_decode(hunk)\n hunkfind = [x[1:].rstrip(b\"\\r\\n\").decode('utf-8')\n for x in hunk.text if x[0] in b\" -\"]\n hunkreplace = [x[1:].rstrip(b\"\\r\\n\").decode('utf-8')\n for x in hunk.text if x[0] in b\" + \"]\n for line in hunkreplace:\n if decl_nex[0] == line.rstrip('\\n') :\n r,fun_o = self.decl_in_patch(\n self.fun_decl_cur, cu, hunkfind)\n\n flag = True\n if r != None:\n if not cu in found.keys():\n found.update({cu:{}})\n\n patch_name = aux.find_commit(\n self.diff_path, commit)\n\n found[cu].update({fun:{\n 'decl_nex': decl_nex,\n 'decl_old': r,\n 'fun_old': fun_o,\n 'fun_new': fun,\n 'patch': patch,\n 'commit':commit,\n 'cu': cu,\n 'pname':patch_name,\n 'htext': ht,\n 'hfind': hunkfind,\n 'hreplace':hunkreplace}})\n else:\n if not cu in added.keys():\n added.update({cu:{}})\n patch_name = aux.find_commit(\n self.diff_path, commit)\n add_len = body_end = body_start\n add_len += len(decl_nex)\n added[cu].update({fun: {\n 'decl_nex': decl_nex,\n 'patch': patch,\n 'pname':patch_name,\n 'commit':commit,\n 'htext': ht,\n 'hfind': hunkfind,\n 'hreplace':hunkreplace}})\n\n\n if flag == False:\n # if there is only declration but no body seems\n # like define/macro\n if body_start == body_end:\n self.logger.info(\"Fun: %s could macro or fun with \\\n body len 0\" % fun)\n continue\n\n if not cu in not_found.keys():\n not_found.update({cu:{}})\n if not fun in not_found[cu]:\n data = self.not_in_cur[cu][fun]\n not_found[cu].update({fun: {'decl_nex': decl_nex,\n 'data': data,\n 'commit': commit,}})\n else:\n self.logger.info(\"---> No patch for fun: %s available\" % fun)\n\n return found, added, not_found\n\n\n def cnt_funs(self, data):\n l = 0\n for cu in data.keys():\n l += len(data[cu])\n return l\n\n def get_funs(self, data):\n l = []\n for cu in data.keys():\n for fun in data[cu].keys():\n l.append(data[cu][fun]['fun_old'])\n l.append(fun)\n return l\n\n\n", "repo_name": "FYNQ/PIT", "sub_path": "src/python/checker.py", "file_name": "checker.py", "file_ext": "py", "file_size_in_byte": 24955, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 9, "usage_type": "call"}, {"api_name": "aux.get_hash", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 70, "usage_type": "call"}, {"api_name": "aux.setup_logger", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 89, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 102, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 103, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 105, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 106, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 108, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 109, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 111, "usage_type": "call"}, {"api_name": "aux.load", "line_number": 112, "usage_type": "call"}, {"api_name": "src_tools.get_src", "line_number": 114, "usage_type": "call"}, {"api_name": "src_tools.get_src", "line_number": 117, "usage_type": "call"}, {"api_name": "src_tools.get_src", "line_number": 120, "usage_type": "call"}, {"api_name": "src_tools.get_src", "line_number": 123, "usage_type": "call"}, {"api_name": "src_tools.gen_funs_diff", "line_number": 131, "usage_type": "call"}, {"api_name": "conf.LINUX_SRC", "line_number": 152, "usage_type": "attribute"}, {"api_name": "conf.LINUX_SRC", "line_number": 156, "usage_type": "attribute"}, {"api_name": "conf.LINUX_SRC", "line_number": 160, "usage_type": "attribute"}, {"api_name": "aux.get_patches_fp", "line_number": 164, "usage_type": "call"}, {"api_name": "aux.get_patches", "line_number": 165, "usage_type": "call"}, {"api_name": "aux.get_patch_lst_by_cu", "line_number": 166, "usage_type": "call"}, {"api_name": "aux.get_patches", "line_number": 167, "usage_type": "call"}, {"api_name": "aux.get_commit_time_sec", "line_number": 174, "usage_type": "call"}, {"api_name": "conf.LINUX_SRC", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 178, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 189, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 191, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 193, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 196, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 199, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 202, "usage_type": "call"}, {"api_name": "conf.VALIDATION", "line_number": 264, "usage_type": "attribute"}, {"api_name": "validation.print_val_added", "line_number": 265, "usage_type": "call"}, {"api_name": "validation.print_val_removed", "line_number": 266, "usage_type": "call"}, {"api_name": "validation.print_val_renamed", "line_number": 267, "usage_type": "call"}, {"api_name": "aux.git_make_fun_diff", "line_number": 286, "usage_type": "call"}, {"api_name": "src_tools.get_diff", "line_number": 346, "usage_type": "call"}, {"api_name": "src_tools.get_mod_fun", "line_number": 352, "usage_type": "call"}, {"api_name": "src_tools.get_mod_fun", "line_number": 405, "usage_type": "call"}, {"api_name": "src_tools.get_hunk_rm_text", "line_number": 465, "usage_type": "call"}, {"api_name": "aux.get_hash", "line_number": 516, "usage_type": "call"}, {"api_name": "src_tools.hunk_decode", "line_number": 519, "usage_type": "call"}, {"api_name": "aux.find_commit", "line_number": 534, "usage_type": "call"}, {"api_name": "aux.find_commit", "line_number": 552, "usage_type": "call"}]} +{"seq_id": "69947265332", "text": "import sqlite3\n\ndef create_table():\n conn=sqlite3.connect(\"lite.db\")\n cur=conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS store(item Text,Qty INTEGER,price REAL)\")\n conn.commit()\n conn.close()\n\n#create_table()\n\ndef insert(item,qty,price):\n conn=sqlite3.connect(\"lite.db\")\n cur=conn.cursor()\n cur.execute(\"INSERT INTO store VALUES(?,?,?)\",(item,qty,price))\n conn.commit()\n conn.close()\n\n#insert('Milk bottle', 5, 6)\n\ndef view():\n conn=sqlite3.connect(\"lite.db\")\n cur=conn.cursor()\n cur.execute(\"SELECT * FROM store\")\n rows=cur.fetchall()\n conn.close()\n return rows\n\n#print(view())\n\ndef delete(item):\n conn=sqlite3.connect(\"lite.db\")\n cur=conn.cursor()\n cur.execute(\"DELETE FROM store WHERE item=?\",(item,))\n conn.commit()\n conn.close()\n\ndef select(item):\n conn=sqlite3.connect(\"lite.db\")\n cur=conn.cursor()\n cur.execute(\"SELECT * FROM store WHERE item=?\",(item,))\n rows=cur.fetchall()\n conn.close()\n return rows\n\n#print(select('Coffee cup'))\n\ndef update_qty_price(Qty,price,item):\n conn=sqlite3.connect(\"lite.db\")\n cur=conn.cursor()\n cur.execute(\"UPDATE store SET Qty=?, price=? WHERE item=?\",(Qty,price,item))\n conn.commit()\n conn.close()\n\nupdate_qty_price(10,8.99,'Coffee cup')\nprint(view())", "repo_name": "AlexHuk85/tkinter", "sub_path": "database/main/script.py", "file_name": "script.py", "file_ext": "py", "file_size_in_byte": 1295, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 4, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "26980224183", "text": "import pprint\nimport random\n\nfrom typing import List\n\n\nclass Genome:\n _genome: List[int]\n\n def __init__(self, size: int):\n self._genome = [random.randint(0, 1) for _ in range(size)]\n\n def mutation(self, mutation_rate: float, mutation_prob: float) -> 'Genome':\n if not (1 >= mutation_rate >= 0 and 1 >= mutation_prob >= 0):\n raise Exception('mutation rate and prob must be between 0 and 1')\n if mutation_prob < random.random():\n genome_string = [1 - bit if random.random() < mutation_rate else bit for bit in self._genome]\n\n genome = Genome(len(self._genome))\n genome._genome = genome_string\n return genome\n else:\n return self\n\n def fitness(self) -> int:\n return sum(self._genome)\n\n def crossover(self, partner: 'Genome', cross_prob: float) -> ('Genome', 'Genome'):\n if random.random() < cross_prob:\n cross_over_point = random.randint(0, len(self._genome) - 1)\n g1 = Genome(len(self._genome))\n g2 = Genome(len(self._genome))\n g1._genome = self._genome[:cross_over_point] + partner._genome[cross_over_point:]\n g1._genome = partner._genome[:cross_over_point] + self._genome[cross_over_point:]\n return g1, g2\n else:\n return self, partner\n\n def __repr__(self):\n return str(self._genome)\n\n\ndef random_pop(pop_size: int, genome_size: int):\n return [Genome(genome_size) for _ in range(pop_size)]\n\n\ndef select(pop: List[Genome], fits: List[int], pop_size: int):\n return random.choices(pop, fits, k=pop_size)\n\n\ndef crossover(pop: List[Genome], crossover_prob: float):\n off = []\n for genome1, genome2 in zip(pop[::2], pop[1::2]):\n off.extend(genome1.crossover(genome2, crossover_prob))\n return off\n\n\ndef mutation(pop: List[Genome], mutation_rate: float, mutation_prob: float):\n return [genome.mutation(mutation_rate, mutation_prob) for genome in pop]\n\n\ndef main():\n POP_SIZE = 100\n MAX_GEN = 100\n GENOME_SIZE = 25\n CROSS_PROB = 0.8\n MUT_PROB = 0.2\n MUT_RATE = 1 / GENOME_SIZE\n pop: List[Genome] = random_pop(POP_SIZE, GENOME_SIZE)\n for i in range(MAX_GEN):\n fits = [genome.fitness() for genome in pop]\n print(f'gen {i}: {max(fits)}')\n mating_pool = select(pop, fits, POP_SIZE)\n off = crossover(mating_pool, CROSS_PROB)\n off = mutation(off, MUT_RATE, MUT_PROB)\n pop = off[:]\n\n pprint.pprint(pop)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "sikupe/evolutionary-algorithms-execises", "sub_path": "lab-1/spa.py", "file_name": "spa.py", "file_ext": "py", "file_size_in_byte": 2535, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 11, "usage_type": "call"}, {"api_name": "random.random", "line_number": 16, "usage_type": "call"}, {"api_name": "random.random", "line_number": 17, "usage_type": "call"}, {"api_name": "random.random", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "name"}, {"api_name": "random.choices", "line_number": 48, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 69, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "72444377333", "text": "\"\"\"Application main module.\"\"\"\nfrom typing import Any, Dict, List\n\nfrom demo.models import metadata, session_scope, Users, Orders\nfrom demo.utils import get_table_records\n\nfrom datetime import datetime, timedelta\nfrom sqlalchemy import extract, func\nfrom sqlalchemy.exc import IntegrityError\n\n\ndef load_records(orders: List[Dict[str, Any]]):\n \"\"\"Loads order records into database into the Orders Table\n and add a new User per new unique user email.\n\n Args:\n A list of dicts containing the orders and \n the table in which we want to import them.\n\n Returns:\n None \n\n \"\"\"\n with session_scope() as session:\n for record in orders:\n try:\n users = [user[\"account\"] for user in get_table_records(Users)]\n if record[\"account\"] not in users:\n user = {\n \"account\": record[\"account\"],\n \"active\": True,\n \"is_demo\": True,\n }\n row = Users(**user)\n session.add(row)\n session.commit()\n except IntegrityError:\n print(\"User is already in the database\")\n\n try:\n orders = [order[\"order_number\"] for order in get_table_records(Orders)]\n row = Orders(**record)\n session.add(row)\n session.commit()\n except IntegrityError:\n print(\"Order is already in the database\")\n\n\ndef export_records() -> List[Dict[str, Any]]:\n \"\"\"Export users accounts and total account value for the past 12 months.\n\n Args:\n None\n\n Returns:\n A List of Dictionary objects containing the required fields.\n\n \"\"\"\n return_val = []\n with session_scope() as session:\n filter_after = datetime.today() - timedelta(12 * 30)\n\n records = (\n session.query(Users, func.sum(Orders.cost).label(\"total_account_value\"))\n .join(Orders)\n .filter(\n extract(\"year\", Orders.date) >= filter_after.year,\n extract(\"month\", Orders.date) >= filter_after.month,\n extract(\"day\", Orders.date) >= filter_after.day,\n )\n .group_by(Users.account)\n .all()\n )\n\n for user_account, total_account_value in records:\n user_account = {\n \"account\": user_account.account,\n \"active\": user_account.active,\n \"is_demo\": user_account.is_demo,\n \"total_account_value\": total_account_value,\n }\n return_val.append(user_account)\n return return_val\n", "repo_name": "ivan-gerov/element_human_task_2", "sub_path": "demo/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 12, "usage_type": "name"}, {"api_name": "demo.models.session_scope", "line_number": 24, "usage_type": "call"}, {"api_name": "demo.utils.get_table_records", "line_number": 27, "usage_type": "call"}, {"api_name": "demo.models.Users", "line_number": 27, "usage_type": "argument"}, {"api_name": "demo.models.Users", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 37, "usage_type": "name"}, {"api_name": "demo.utils.get_table_records", "line_number": 41, "usage_type": "call"}, {"api_name": "demo.models.Orders", "line_number": 41, "usage_type": "argument"}, {"api_name": "demo.models.Orders", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 45, "usage_type": "name"}, {"api_name": "demo.models.session_scope", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 61, "usage_type": "call"}, {"api_name": "demo.models.Orders", "line_number": 65, "usage_type": "argument"}, {"api_name": "demo.models.Users", "line_number": 64, "usage_type": "argument"}, {"api_name": "sqlalchemy.func.sum", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 64, "usage_type": "name"}, {"api_name": "demo.models.Orders.cost", "line_number": 64, "usage_type": "attribute"}, {"api_name": "demo.models.Orders", "line_number": 64, "usage_type": "name"}, {"api_name": "sqlalchemy.extract", "line_number": 67, "usage_type": "call"}, {"api_name": "demo.models.Orders.date", "line_number": 67, "usage_type": "attribute"}, {"api_name": "demo.models.Orders", "line_number": 67, "usage_type": "name"}, {"api_name": "sqlalchemy.extract", "line_number": 68, "usage_type": "call"}, {"api_name": "demo.models.Orders.date", "line_number": 68, "usage_type": "attribute"}, {"api_name": "demo.models.Orders", "line_number": 68, "usage_type": "name"}, {"api_name": "sqlalchemy.extract", "line_number": 69, "usage_type": "call"}, {"api_name": "demo.models.Orders.date", "line_number": 69, "usage_type": "attribute"}, {"api_name": "demo.models.Orders", "line_number": 69, "usage_type": "name"}, {"api_name": "demo.models.Users.account", "line_number": 71, "usage_type": "attribute"}, {"api_name": "demo.models.Users", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "20663517932", "text": "import keras.backend as K\nimport tensorflow as tf\nfrom keras import Input, Model\nfrom keras.optimizers import SGD\nfrom keras.layers import Conv2D, ReLU, MaxPool2D, BatchNormalization, Reshape, Add\n\n\nclass SimGAN:\n \"\"\"\n Implementation of SimGAN model.\n\n Details can be found here: https://arxiv.org/pdf/1612.07828.pdf\n \"\"\"\n def __init__(self, input_shape, optimizer=SGD(2e-4, momentum=0.9), lambda_reg=10.0, train_mean=0.0, train_std=0.0):\n self.max_val = 2**14 - 1\n self.lambda_reg = lambda_reg\n self.train_mean = train_mean\n self.train_std = train_std\n\n # build discriminator network\n self.discriminator = self.discriminator_network(input_shape)\n self.discriminator.compile(loss=self.local_discrim_loss, optimizer=optimizer)\n self.discriminator.summary()\n\n # disable training for discriminator when training refiner\n self.discriminator.trainable = False\n\n # build refiner network\n self.refiner = self.refiner_network(input_shape)\n self.refiner.compile(loss=self.self_regularization_loss, optimizer=optimizer)\n self.refiner.summary()\n\n # refine a set of synthetic images and run them through the discriminator\n inputs = Input(shape=input_shape)\n refined_inputs = self.refiner(inputs)\n refined_probs = self.discriminator(refined_inputs)\n\n # build adversarial network\n self.adversarial = Model(inputs=inputs, outputs=[refined_inputs, refined_probs],\n name='adversarial_model')\n self.adversarial.compile(loss=[self.self_regularization_loss, self.local_discrim_loss],\n optimizer=optimizer)\n self.adversarial.summary()\n\n\n def refiner_network(self, refiner_input_shape):\n \"\"\"\n Refiner network of SimGAN meant for images of input size 224x224.\n \"\"\"\n def resnet_block(inputs):\n \"\"\"\n ResNet Block with skip connection.\n \"\"\"\n x = Conv2D(64, 3, strides=1, padding='same', activation='relu')(inputs)\n x = BatchNormalization()(x)\n x = Conv2D(64, 3, strides=1, padding='same')(x)\n skip = Add()([x, inputs])\n out = ReLU()(skip)\n out = BatchNormalization()(out)\n\n return out\n\n inputs = Input(shape=refiner_input_shape)\n\n net = Conv2D(64, 7, strides=1, padding='same', activation='relu')(inputs)\n net = BatchNormalization()(net)\n\n for _ in range(10):\n net = resnet_block(net)\n\n output_map = Conv2D(1, 1, strides=1, activation='tanh')(net)\n\n return Model(inputs=inputs, outputs=output_map, name='refiner')\n\n\n def discriminator_network(self, discriminator_input_shape):\n \"\"\"\n Discriminator network of SimGAN meant for images of input size 224x224.\n \"\"\"\n inputs = Input(shape=discriminator_input_shape)\n\n net = Conv2D(96, 7, strides=4, padding='same', activation='relu')(inputs)\n net = BatchNormalization()(net)\n net = Conv2D(64, 5, strides=2, padding='same', activation='relu')(net)\n net = BatchNormalization()(net)\n #net = MaxPool2D(pool_size=3, strides=2, padding='same')(net)\n net = Conv2D(64, 3, strides=2, padding='same', activation='relu')(net)\n net = BatchNormalization()(net)\n net = Conv2D(32, 3, strides=2, padding='same', activation='relu')(net)\n net = BatchNormalization()(net)\n net = Conv2D(32, 1, strides=1, activation='relu')(net)\n net = BatchNormalization()(net)\n\n output_map = Conv2D(2, 1, strides=1)(net)\n output_map = Reshape((-1, 2))(output_map) # reshape to [batch, H*W, C] for proper softmax crossentropy\n\n return Model(inputs=inputs, outputs=output_map, name='discriminator')\n\n\n # def discriminator_loss(self, D_R_x, D_y, epsilon=1e-12):\n # \"\"\"\n # Discriminator loss is Eq. 2 in: https://arxiv.org/pdf/1612.07828.pdf\n # \"\"\"\n # loss = -1 * (K.mean(K.log(D_R_x + epsilon)) + K.mean(K.log(1 - D_y + epsilon)))\n #\n # return loss\n #\n #\n # def refiner_loss(self, x, R_x, D_R_x, lambda_reg=10.0, epsilon=1e-12):\n # \"\"\"\n # Refiner loss contains a realism loss and regularizer loss.\n #\n # Eq. 4 in: https://arxiv.org/pdf/1612.07828.pdf\n # \"\"\"\n # realism_loss = -1 * K.mean(K.log(1 - D_R_x + epsilon))\n # regularizer_loss = self.self_regularization_loss(x, R_x, lambda_reg=lambda_reg)\n #\n # return realism_loss + regularizer_loss\n\n def local_discrim_loss(self, y_true, y):\n y = K.reshape(y, (-1, 2))\n y_true = K.reshape(y_true, (-1, 2))\n\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=y))\n\n return loss\n\n\n def self_regularization_loss(self, x, R_x):\n \"\"\"\n L1 norm performed on simulated image, x, and the refined image, R(x).\n\n This loss helps to preserve the structure of the original simulated image\n while refining it.\n \"\"\"\n refined = ((self.max_val * ((R_x + 1.) / 2.)) - self.train_mean) / self.train_std\n\n #l1_norm = K.sum(K.abs(refined - x))\n l1_norm = K.mean(K.abs(refined - x))\n\n return self.lambda_reg * l1_norm\n", "repo_name": "wkhademi/SimGAN", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 5315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "keras.optimizers.SGD", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.Input", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.Model", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Add", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.Input", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.Model", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.Input", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.Model", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.backend.reshape", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 121, "usage_type": "name"}, {"api_name": "keras.backend.reshape", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 122, "usage_type": "name"}, {"api_name": "tensorflow.reduce_mean", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 124, "usage_type": "attribute"}, {"api_name": "keras.backend.mean", "line_number": 139, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 139, "usage_type": "name"}, {"api_name": "keras.backend.abs", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "16945381100", "text": "import numpy as np\nimport networkx as nx\nfrom datetime import datetime\nfrom multiprocessing import Pool\nfrom os import walk\nimport time\nimport pandas as pd\n\n\n# step 1: get all files\npath = '/media/usr/ecosystem_01/data/coau_nets/graphml/'\nmetrics_path = '/media/usr/ecosystem_01/data/net_metrics/'\nfor (root, dirs, all_files) in walk(path):\n break\n\nfiles = [f.split('.')[0] for f in all_files]\n# step 2: splitting\nsplitted = np.array_split(files, 5) # split it into 5 batches\nbatches = [list(ary) for ary in splitted]\n\n# insert an element to each list for naming the list\nnum = 0\nfor bat in batches:\n bat.insert(0, str(num))\n num += 1\n\n\n# define read-calculate operations for each batch\n'''\nE: number of edges\nN: number of nodes\n\nThe average degree:\n<k>= 2E/N (total edges/total nodes)\n\nDensity:\na measure of the prevalence of dyadic linkage or direct tie within a social network;\nit is equal to the proportion of actual connections (edges) to potential connections (edges),\ndensity = E / [n(n-1)/2]\n\n:: Diameter and Distance require completely connected graphs.\n'''\n\ndef read_graph(grap_name_list): \n # input: contains a group of CORE_PROJECT_NUM, an example of them is 'C06AI058609'\n print('processing {} ...'.format(grap_name_list[0]))\n start = time.time()\n \n write_file = open(metrics_path+grap_name_list[0]+'batch.csv', 'w', encoding='utf-8')\n head = ['CORE_PROJECT_NUM', 'Nodes', 'Edges', 'Nodes_lcc', 'Edges_lcc',\n 'avg_degree', 'avg_weighted_degree', 'density', 'avg_clus_coeff',\n 'weighted_avg_clus_coeff\\n']\n # lcc denotes largest connected component\n write_file.write(','.join(head))\n \n flag = 0\n for gra in grap_name_list[1:]:\n g = nx.read_graphml(path+gra+'.graphml')\n \n # number of edges, nodes\n n, e = g.number_of_nodes(), g.number_of_edges()\n \n # largest connected components\n ccp = nx.connected_components(g)\n largest_cc = max(ccp, key=len)\n s = g.subgraph(largest_cc).copy()\n nlcc, elcc = str(len(s.nodes())), str(len(s.edges()))\n \n # average degree & average weighted degree\n # https://networkx.org/documentation/stable/_modules/networkx/classes/function.html#density\n d = (deg for (node,deg) in nx.classes.function.degree(g))\n avg_deg = str(round(sum(d)/n, 4))\n wd = (deg for (node,deg) in nx.classes.function.degree(g, weight='weight'))\n avg_weigh_deg = str(round(sum(wd)/n, 4))\n \n # density\n dens = nx.classes.function.density(g)\n dens = str(round(dens,4))\n \n # average clustering coefficient, see documentation for details\n avg_cc = str(round(nx.average_clustering(g), 4))\n avg_weighted_cc = str(round(nx.average_clustering(g, weight='weight'), 4))\n \n row = [gra, str(n), str(e), nlcc, elcc, avg_deg, avg_weigh_deg, dens, avg_cc, avg_weighted_cc+'\\n']\n write_file.write(','.join(row))\n \n flag += 1\n if flag % 500 == 0:\n end = time.time()\n print('batch {}: {}/{} processed, time eplased: {} mins, current time: {}'.format(grap_name_list[0],\n flag, len(grap_name_list),\n round((end-start)/60, 4),\n datetime.now().strftime('%H:%M:%S')),\n end='\\r')\n \n write_file.close()\n end = time.time()\n print('\\n===batch {} spent {} mins, current time: {}==='.format(grap_name_list[0],\n round((end-start)/60, 4),\n datetime.now().strftime('%H:%M:%S')))\n\n\npool = Pool(5) \nwith pool:\n pool.map(read_graph, batches)\n\n\nprint('calculating finished at: {}'.format(datetime.now().strftime('%H:%M:%S')))\n\n# concatenate all outputs\nmetric_csvs = []\nfor (dirpath, dirname, filename) in walk(metrics_path):\n metric_csvs.extend(filename) # filename has a suffix '.graphml'\n break\n\ncandies = []\nfor csv in metric_csvs:\n data = pd.read_csv(metrics_path+csv)\n candies.append(data)\n \nconcated = pd.concat(candies)\nconcated.to_csv(metrics_path+'nets_metrics_merged.csv', index=False)\n\nprint('=== metrics concatenated ===')\n", "repo_name": "immorBen/code-usages", "sub_path": "Parallel_computing/python-multiprocessing/parall_cal_metrics.py", "file_name": "parall_cal_metrics.py", "file_ext": "py", "file_size_in_byte": 4491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.walk", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "networkx.read_graphml", "line_number": 58, "usage_type": "call"}, {"api_name": "networkx.connected_components", "line_number": 64, "usage_type": "call"}, {"api_name": "networkx.classes.function.degree", "line_number": 71, "usage_type": "call"}, {"api_name": "networkx.classes", "line_number": 71, "usage_type": "attribute"}, {"api_name": "networkx.classes.function.degree", "line_number": 73, "usage_type": "call"}, {"api_name": "networkx.classes", "line_number": 73, "usage_type": "attribute"}, {"api_name": "networkx.classes.function.density", "line_number": 77, "usage_type": "call"}, {"api_name": "networkx.classes", "line_number": 77, "usage_type": "attribute"}, {"api_name": "networkx.average_clustering", "line_number": 81, "usage_type": "call"}, {"api_name": "networkx.average_clustering", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "name"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "72299638774", "text": "#import openpyxl\r\nfrom openpyxl import load_workbook\r\n# https://openpyxl.readthedocs.io/en/stable/tutorial.html#loading-from-a-file\r\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font\r\n#from openpyxl.styles import colors\r\nfrom openpyxl.styles import Font, Color\r\n#from openpyxl import Workbook\r\nfrom openpyxl.utils import get_column_letter\r\n#import re\r\nfrom copy import copy\r\n\r\n\r\nclass formatter():\r\n def __init__(self,\r\n filename,\r\n sheet_name= None,\r\n font_name = \"Calibri\",\r\n font_size = 10,\r\n header_font_size = 14,\r\n header_bold = True,\r\n froze_first_row=True,\r\n add_fliter=True,\r\n header_height=25,\r\n add_border=True,\r\n alignment_center=True,\r\n specific_alignment_left = [],\r\n auto_fit_column_width=True,\r\n wrap_width=[],\r\n fill_color_header='95B3DF',\r\n hide_columns=[],\r\n column_width= {},\r\n output_filename=None,\r\n home_page_bottom= None,\r\n whiten_nontable_area= False,\r\n zoom_level = 85,\r\n alignment_center_horizon = [],\r\n alignment_center_vertical = []\r\n ):\r\n # load excel file and focus on one sheet\r\n if column_width is None: ## if sheet name is not defined, the app is default to apply format function to all sheets\r\n column_width = {}\r\n wb = load_workbook(filename)\r\n if sheet_name is None:\r\n sheet_name = wb.sheetnames\r\n elif type(sheet_name) == str:\r\n sheet_name = [sheet_name]\r\n # defind table, header, body\r\n for sheet_name_item in sheet_name:\r\n try:\r\n ws = wb[sheet_name_item]\r\n table = ws[ws.min_row:ws.max_row]\r\n header = ws[ws.min_row]\r\n if table == header: ## if there is only header in that sheet\r\n table = (table,)\r\n body = ws[ws.min_row + 1:ws.max_row]\r\n\r\n #font and font size\r\n font = Font(name=font_name,\r\n size=font_size,\r\n bold=False,\r\n italic=False,\r\n vertAlign=None,\r\n underline='none',\r\n strike=False,\r\n color='FF000000')\r\n\r\n for tr in table:\r\n for td in tr:\r\n td.font = font\r\n\r\n font = Font(name=font_name,\r\n size=header_font_size,\r\n bold=False,\r\n italic=False,\r\n vertAlign=None,\r\n underline='none',\r\n strike=False,\r\n color='FF000000')\r\n\r\n #font for header\r\n for td in header:\r\n td.font = font\r\n\r\n\r\n\r\n if header_bold:\r\n for td in header:\r\n font = copy(td.font)\r\n font.bold = True\r\n td.font = font\r\n\r\n\r\n ## frozen first row\r\n if froze_first_row:\r\n freeze_position = ws['A2']\r\n ws.freeze_panes = freeze_position\r\n ## add filter on header\r\n if add_fliter:\r\n ws.auto_filter.ref = ws.dimensions\r\n\r\n ## set header height\r\n header_obj = ws.row_dimensions[1]\r\n header_obj.height = header_height\r\n\r\n ## add_border\r\n if add_border:\r\n border = Border(left=Side(border_style='thin', color='FF000000'),\r\n right=Side(border_style='thin', color='FF000000'),\r\n top=Side(border_style='thin', color='FF000000'),\r\n bottom=Side(border_style='thin', color='FF000000'),\r\n diagonal=Side(border_style=None, color='FF000000'),\r\n diagonal_direction=0,\r\n outline=Side(border_style=None, color='FF000000'),\r\n vertical=Side(border_style=None, color='FF000000'),\r\n horizontal=Side(border_style=None, color='FF000000'))\r\n for tr in table:\r\n for td in tr:\r\n td.border = border\r\n\r\n ## alignment_center\r\n if alignment_center:\r\n alignment = Alignment(horizontal='center',\r\n vertical='center',\r\n text_rotation=0,\r\n wrap_text=False,\r\n shrink_to_fit=False,\r\n indent=0)\r\n for tr in table:\r\n for td in tr:\r\n td.alignment = alignment\r\n ## for those who have left top alignment\r\n for col in specific_alignment_left:\r\n column = ws[col]\r\n for td in column:\r\n alignment = copy(td.alignment)\r\n alignment.horizontal = 'left'\r\n alignment.vertical = 'top'\r\n td.alignment = alignment\r\n\r\n ## for those who have horizontal center alignment\r\n for col in alignment_center_horizon:\r\n column = ws[col]\r\n for td in column:\r\n alignment = copy(td.alignment)\r\n alignment.horizontal = 'center'\r\n td.alignment = alignment\r\n ## for those who have vertical center alignment\r\n for col in alignment_center_vertical:\r\n column = ws[col]\r\n for td in column:\r\n alignment = copy(td.alignment)\r\n alignment.vertical = 'center'\r\n td.alignment = alignment\r\n ###header do not change\r\n\r\n if alignment_center:\r\n for cell in header:\r\n alignment = copy(cell.alignment)\r\n alignment.horizontal = 'center'\r\n alignment.vertical = 'center'\r\n cell.alignment = alignment\r\n\r\n\r\n\r\n ## auto fit column width\r\n if auto_fit_column_width:\r\n column_widths = []\r\n for i, cell in enumerate(header):\r\n try:\r\n column_widths[i] = len(cell.value)\r\n except IndexError:\r\n column_widths.append(len(cell.value))\r\n for i, width in enumerate(column_widths):\r\n ws.column_dimensions[get_column_letter(i + 1)].width = width * 1.3 + 5\r\n\r\n ##choose column width (specific)\r\n for col in column_width:\r\n ws.column_dimensions[col].width = column_width[col]\r\n\r\n ## wrap\r\n for col in wrap_width:\r\n column = ws[col]\r\n for td in column:\r\n alignment = copy(td.alignment)\r\n #alignment.horizontal = 'left'\r\n #alignment.vertical = 'top'\r\n alignment.wrap_text = True\r\n td.alignment = alignment\r\n #ws.column_dimensions[col].width = 55\r\n ## header do not change\r\n alignment.horizontal ='center'\r\n alignment.vertical = 'center'\r\n ws[col + \"1\"].alignment=alignment\r\n\r\n ##fill header color\r\n if fill_color_header is not None:\r\n fill = PatternFill(start_color=fill_color_header, end_color=fill_color_header, fill_type=\"solid\")\r\n for cell in header:\r\n cell.fill = fill\r\n\r\n ##hide columns\r\n for col in hide_columns:\r\n ws.column_dimensions[col].hidden = True\r\n\r\n if home_page_bottom is not None:\r\n offset = 4\r\n No_row = ws.max_row + offset\r\n Home_Bottom = ws[get_column_letter(1) + str(No_row - 1)]\r\n Home_Bottom.value = f'=HYPERLINK(\"#{home_page_bottom}!A1\",\"Home Page\")'\r\n Home_Bottom.font = Font(size=15, bold=True, color='95B3DF')\r\n\r\n ##whiten_nontable_area\r\n if whiten_nontable_area:\r\n ### define area to whiten\r\n whiten_col = ws.max_column + 2\r\n whiten_row = ws.max_row + 2\r\n whiten_area1 = ws[get_column_letter(whiten_col) + str(1):\"AJ200\"]\r\n whiten_area2 = ws['A' + str(whiten_row): get_column_letter(whiten_col - 1) + str(200)]\r\n whiten_area_col = ws[get_column_letter(whiten_col - 1) + str(1):get_column_letter(whiten_col - 1) + str(200)]\r\n whiten_area_row = ws['A' + str(whiten_row - 1): get_column_letter(whiten_col - 1) + str(whiten_row - 1)]\r\n\r\n color = 'FFFFFFFF'\r\n font = Font(bold=False,\r\n italic=False,\r\n vertAlign=None,\r\n underline='none',\r\n strike=False,\r\n color=color)\r\n fill = PatternFill(start_color=color, end_color=color, fill_type=\"solid\")\r\n for tr in whiten_area1:\r\n for td in tr:\r\n td.font = font\r\n td.border = None\r\n td.fill = fill\r\n for tr in whiten_area2:\r\n for td in tr:\r\n td.font = font\r\n td.border = None\r\n td.fill = fill\r\n for tr in whiten_area_col:\r\n for td in tr:\r\n td.font = font\r\n # td.border = None\r\n td.fill = fill\r\n for tr in whiten_area_row:\r\n for td in tr:\r\n td.font = font\r\n # td.border = None\r\n td.fill = fill\r\n\r\n ## adjust zoom level\r\n for ws in wb.worksheets:\r\n ws.sheet_view.zoomScale = zoom_level\r\n except:\r\n pass\r\n\r\n if output_filename is None:\r\n wb.save(filename=filename)\r\n else:\r\n wb.save(filename=output_filename)\r\n\r\n#formatter('test.xlsx', sheet_name='raw_data', wrap_width=['J'],column_width={'J':50},header_height=36,output_filename='dsad.xlsx',home_page_bottom='Summary',whiten_nontable_area=True )\r\n#formatter(filename=\"test.xlsx\", hide_columns=[\"C\",\"D\",\"F\",\"D\",\"S\",\"U\",\"T\",\"W\"])\r\n#excel_file_name = 'test.xlsx'\r\n#formatter('test.xlsx',sheet_name = ['Sheet1','Sheet2','Sheet3'],output_filename='dsad.xlsx')\r\n#formatter('test.xlsx',output_filename='dsad.xlsx')\r\n#formatter('test.xlsx',sheet_name = 'Sheet1',output_filename='dsad1.xlsx')", "repo_name": "linleiwen/Max-useful-tools", "sub_path": "excel_formatter.py", "file_name": "excel_formatter.py", "file_ext": "py", "file_size_in_byte": 11875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 42, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 58, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 71, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 88, "usage_type": "call"}, {"api_name": "openpyxl.styles.Border", "line_number": 107, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 107, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 108, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 109, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 110, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 111, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 113, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 114, "usage_type": "call"}, {"api_name": "openpyxl.styles.Side", "line_number": 115, "usage_type": "call"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 122, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 135, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 144, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 151, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 158, "usage_type": "call"}, {"api_name": "openpyxl.utils.get_column_letter", "line_number": 174, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 184, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 197, "usage_type": "call"}, {"api_name": "openpyxl.utils.get_column_letter", "line_number": 208, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 210, "usage_type": "call"}, {"api_name": "openpyxl.utils.get_column_letter", "line_number": 217, "usage_type": "call"}, {"api_name": "openpyxl.utils.get_column_letter", "line_number": 218, "usage_type": "call"}, {"api_name": "openpyxl.utils.get_column_letter", "line_number": 219, "usage_type": "call"}, {"api_name": "openpyxl.utils.get_column_letter", "line_number": 220, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 223, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "72817655413", "text": "# Import the necessary libraries\nimport cv2\nimport os\nimport time\n\n# Function to collect faces from a video\ndef collect_faces(video_file, output_folder, frame_skip=5):\n # Create the output folder if it doesn't exist\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # Load the video file\n video_capture = cv2.VideoCapture(video_file)\n\n # Load a pre-trained face detector\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n\n # Get the frames per second of the video\n fps = video_capture.get(cv2.CAP_PROP_FPS)\n\n # Get the title of the video (excluding file extension)\n video_title = os.path.splitext(os.path.basename(video_file))[0]\n\n # Initialize variables to keep track of the frame number and face count\n frame_number = 0\n face_count = 0\n\n # Variable to store the second of video\n current_second = -1\n\n # Loop through each frame in the video\n while True:\n # Read the next frame\n ret, frame = video_capture.read()\n \n # Break the loop if we have reached the end of the video\n if not ret:\n break\n\n # Increment the frame number\n frame_number += 1\n\n # Skip frames for faster processing\n if frame_number % frame_skip != 0:\n continue\n\n # Detect faces in the frame\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))\n\n # Calculate the second of video\n second = int(frame_number / fps)\n\n # Update face count if we are in a new second of video\n if current_second != second:\n current_second = second\n face_count = 0\n\n # Save the faces to the output folder\n for (x, y, w, h) in faces:\n face_count += 1\n face_image = frame[y:y+h, x:x+w]\n\n # Save the face using the specified naming format\n face_filename = f\"{video_title}_{second}_{face_count}.jpg\"\n face_filepath = os.path.join(output_folder, face_filename)\n cv2.imwrite(face_filepath, face_image)\n\n # Release the video capture object\n video_capture.release()\n\n# Define the path to the video file and output folder\n# Note: Please update the paths accordingly before running the code\nvideo_file = \"one_minute.mp4\"\noutput_folder = \"face_folder\"\n\n# Call the function to collect faces from the video\n# Note: If you want to skip more or fewer frames, you can specify the frame_skip parameter\ncollect_faces(video_file, output_folder, frame_skip=5)\n\n# Display a message indicating the successful execution of the code\n\"Faces collected and saved successfully with the specified naming format.\"", "repo_name": "anthonymiyoro/Video-Face-Extractor-and-Clusterer", "sub_path": "extract_faces_from_video.py", "file_name": "extract_faces_from_video.py", "file_ext": "py", "file_size_in_byte": 2797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.data", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "13355191255", "text": "import subprocess\nfrom setuptools import setup\n\nwith open('requirements.txt', encoding=\"utf-16\") as f:\n requirements = f.read().splitlines()\n\nsetup(\n name='DataToolkit',\n url='https://github.com/coolneighbors/DataToolkit.git',\n author='Aaron Meisner',\n author_email='aaron.meisner@noirlab.edu',\n packages=['DataToolkit'],\n python_requires='>=3.9',\n install_requires=requirements,\n version='1.0',\n license='MIT',\n description='The Cool Neighbors data analysis GitHub repository.',\n long_description=open('README.md').read(),\n)\n\n# This fixes an import bug with the python-magic and python-magic-bin packages.\n# https://github.com/zooniverse/panoptes-python-client/issues/264\n\nimport platform\n\nif platform.system() == 'Windows':\n subprocess.call(['pip', 'install', 'python-magic'])\n subprocess.call(['pip', 'uninstall', 'python-magic-bin'])\n subprocess.call(['pip', 'install', 'python-magic-bin'])", "repo_name": "coolneighbors/DataToolkit", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 939, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 26, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 28, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "44277217470", "text": "import sys\nimport os\nfrom PIL import Image\n\n#grab the 1st and 2nd argument\nimage_folder = sys.argv[1]\noutput_folder = sys.argv[2]\n\nprint(image_folder, output_folder)\n\n#check if new folder exist, if not create it\nif not os.path.exists(output_folder):\n\tos.makedirs(output_folder)\n\n#loop through pokedex and convert each image to png\nfor filename in os.listdir(image_folder):\n\timg = Image.open(f'{image_folder}{filename}')\n\tclean_name = os.path.splitext(filename)[0]\n\timg.thumbnail((200, 200))\n\timg.save(f'{output_folder}{clean_name}.png', 'png')\n\tprint('all done!')\n\n#save to new folder", "repo_name": "techstylemom/JPG-to-PNG-converter", "sub_path": "JPGtoPNGconverter.py", "file_name": "JPGtoPNGconverter.py", "file_ext": "py", "file_size_in_byte": 584, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "4104660547", "text": "from __future__ import annotations\n\nimport asyncio\nimport logging\nfrom typing import TYPE_CHECKING, Any\n\nimport aiohttp\nfrom slack_sdk.socket_mode.aiohttp import SocketModeClient\n\nfrom newbial.core.events import ReadyEvent\n\nif TYPE_CHECKING:\n from newbial.core.bot import Bot\n\n__all__ = ('SocketClient',)\n\n\nclass SocketClient(SocketModeClient):\n if TYPE_CHECKING:\n _bot: Bot\n\n def __init__(self, bot: Bot) -> None:\n self._bot = bot\n self._logger = logging.getLogger(__name__)\n\n super().__init__(\n web_client=bot.web,\n app_token=bot.config.slack.socket_token,\n )\n\n self.message_listeners.append(self._message_callback)\n\n async def connect(self):\n if self.aiohttp_client_session.closed:\n self.aiohttp_client_session = aiohttp.ClientSession()\n\n await super().connect()\n\n self._logger.debug('Connected.')\n\n self._bot.events.dispatch(ReadyEvent())\n\n assert self.current_session_monitor is not None\n\n try:\n await self.current_session_monitor\n except asyncio.CancelledError:\n pass\n\n async def _message_callback(self, *args: Any) -> None:\n d: dict[str, Any] = args[1]\n\n envelope_id: int = d['envelope_id']\n\n self._logger.debug(f'Sending ack for envelope \"{envelope_id}\"')\n await self.send_socket_mode_response({'envelope_id': envelope_id})\n", "repo_name": "tinoy1336/mr-newbial", "sub_path": "newbial/slack/clients/socket_client.py", "file_name": "socket_client.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 12, "usage_type": "name"}, {"api_name": "slack_sdk.socket_mode.aiohttp.SocketModeClient", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.TYPE_CHECKING", "line_number": 19, "usage_type": "name"}, {"api_name": "newbial.core.bot.Bot", "line_number": 20, "usage_type": "name"}, {"api_name": "newbial.core.bot.Bot", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 35, "usage_type": "call"}, {"api_name": "newbial.core.events.ReadyEvent", "line_number": 41, "usage_type": "call"}, {"api_name": "asyncio.CancelledError", "line_number": 47, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "73362822452", "text": "import joblib\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom imblearn.over_sampling import SMOTE\r\nfrom imblearn.pipeline import Pipeline\r\nfrom imblearn.under_sampling import RandomUnderSampler\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn import metrics\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.feature_selection import SelectKBest, f_classif\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.model_selection import train_test_split, RepeatedStratifiedKFold, RandomizedSearchCV\r\n\r\n\r\ndef plot_confusion_matrix(y_test, model_test):\r\n cm = metrics.confusion_matrix(y_test, model_test)\r\n plt.figure(1)\r\n plt.clf()\r\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia)\r\n classNames = ['Low', 'High']\r\n plt.title('Confusion Matrix')\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n tick_marks = np.arange(len(classNames))\r\n plt.xticks(tick_marks, classNames)\r\n plt.yticks(tick_marks, classNames)\r\n s = [['TN', 'FP'], ['FN', 'TP']]\r\n for i in range(2):\r\n for j in range(2):\r\n plt.text(j, i, str(s[i][j]) + \" = \" + str(cm[i][j]))\r\n plt.savefig('ConfusionMatrix.png', bbox_inches='tight')\r\n print(\"\\nConfusion Matrix: \", cm)\r\n total = sum(sum(cm))\r\n specificity = cm[0, 0]/(cm[0, 0]+cm[0, 1])\r\n sensitivity = cm[1, 1] / (cm[1, 0] + cm[1, 1])\r\n print(\"\\nSensitivity: \", sensitivity)\r\n print(\"\\nSpecificity: \", specificity)\r\n plt.show()\r\n\r\n\r\ndef report_performance(model):\r\n model_test = model.predict(X_test)\r\n print(\"\\n\\nClassification Report: \")\r\n print(metrics.classification_report(y_test, model_test))\r\n plot_confusion_matrix(y_test, model_test)\r\n\r\n\r\ndef roc_curves(model):\r\n predictions_test = model.predict(X_test)\r\n fpr, tpr, thresholds = roc_curve(predictions_test, y_test)\r\n roc_auc = auc(fpr, tpr)\r\n print('AUROC = %.6f' % metrics.auc(fpr, tpr))\r\n plt.figure(2)\r\n plt.plot(fpr, tpr, color='darkorange', lw=1, label='ROC curve (area = %0.2f)' % roc_auc)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('Receiver operating characteristic')\r\n plt.legend(loc=\"lower right\")\r\n plt.savefig('ROC.png', bbox_inches='tight')\r\n plt.show()\r\n y_probabilities = model.predict_proba(X_test)[:, 1]\r\n pr, rc, thresholds = metrics.precision_recall_curve(y_test, y_probabilities)\r\n plt.plot(pr, rc, color='darkorange')\r\n plt.xlabel('Precision')\r\n plt.ylabel('Recall')\r\n plt.savefig('PRcurve.png', bbox_inches='tight')\r\n plt.show()\r\n\r\n\r\ndef accuracy(model):\r\n pred = model.predict(X_test)\r\n accu = metrics.accuracy_score(y_test, pred)\r\n print(\"\\nAcurracy Of the Model: \", accu, \"\\n\\n\")\r\n\r\n\r\ndata_path = \"./Data/preprocessed_trail_data_categorized.csv\"\r\ndata_set = pd.read_csv(data_path)\r\nfeatures = list(data_set.columns)\r\npredicted_class = ['dementia_risk']\r\nfeature_classes = list(set(features) - set(predicted_class))\r\n\r\nX = data_set[feature_classes].values\r\ny = data_set[predicted_class].values\r\n# y = preprocessing.label_binarize(y, classes=[0, 1, 2, 3, 4])\r\nprint(X.shape, y.shape)\r\n\r\n\r\ndef count_freq(x):\r\n (unique, counts) = np.unique(np.array(x), return_counts=True)\r\n frequencies = np.asarray((unique, counts)).T\r\n return frequencies\r\n\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\r\nprint(X_train.shape, y_train.shape)\r\nprint(X_test.shape, y_test.shape)\r\nprint(\"\\n\\nTrain Class:\")\r\nprint(count_freq(y_train))\r\nprint(\"\\n\\nTest Class:\")\r\nprint(count_freq(y_test))\r\n\r\nsmote = SMOTE(sampling_strategy=0.35)\r\nunder_sampling = RandomUnderSampler(sampling_strategy=0.35)\r\npipeline = Pipeline([('smote', smote), ('under_sampling', under_sampling)])\r\n# pipeline = Pipeline([('smote', smote)])\r\nX_train, y_train = pipeline.fit_resample(X_train, y_train)\r\nprint(X_train.shape, y_train.shape)\r\n\r\nprint(\"\\n\\nTrain Class:\")\r\nprint(count_freq(y_train))\r\nprint(\"\\n\\nTest Class:\")\r\nprint(count_freq(y_test))\r\n\r\nfs = SelectKBest(score_func=f_classif, k='all')\r\nclassifier = RandomForestClassifier(min_samples_leaf=2, min_samples_split=100, max_depth=100, bootstrap=True,\r\n n_jobs=-1, max_features='sqrt', class_weight={0: 1, 1: 1.25},\r\n n_estimators=1400, criterion=\"gini\", random_state=42)\r\npipeline = Pipeline([\r\n ('fs', fs),\r\n ('classifier', classifier)\r\n])\r\npipeline.fit(X_train, y_train.ravel())\r\ny_pred = pipeline.predict(X_test)\r\n\r\n# Cross Validation Pipeline for hyper-parameter tuning.\r\n# sm = SMOTE()\r\n# us = RandomUnderSampler()\r\n# fs = SelectKBest(score_func=f_classif)\r\n# rf = RandomForestClassifier(random_state=42)\r\n# pipeline = Pipeline([('sm', sm), ('us', us), ('fs', fs), ('rf', rf)])\r\n# kf = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=42)\r\n# n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\r\n# max_features = ['auto', 'sqrt']\r\n# criterion = ['gini', 'entropy']\r\n# max_depth = [int(x) for x in np.linspace(10, 110, num=11)]\r\n# max_depth.append(None)\r\n# min_samples_split = [2, 5, 10, 15, 100]\r\n# min_samples_leaf = [1, 2, 5, 10]\r\n# bootstrap = [True, False]\r\n# k = [20,25,30,35,40,45,'all']\r\n# smote_sampling_strategy = [0.3, 0.35, 0.4, 0.45, 0.5]\r\n# under_sampling_strategy = [0.3, 0.35, 0.4, 0.45, 0.5]\r\n# class_weights = [{0: 1, 1: 1.5}, {0: 1, 1: 1.25}, {0: 1, 1: 1.75}, 'balanced_subsample']\r\n# hyperF = {'sm__sampling_strategy': smote_sampling_strategy,\r\n# 'us__sampling_strategy': under_sampling_strategy,\r\n# 'fs__k': k,\r\n# 'rf__n_estimators': n_estimators,\r\n# 'rf__max_features': max_features,\r\n# 'rf__max_depth': max_depth,\r\n# 'rf__min_samples_split': min_samples_split,\r\n# 'rf__criterion': criterion,\r\n# 'rf__min_samples_leaf': min_samples_leaf,\r\n# 'rf__bootstrap': bootstrap,\r\n# 'rf__class_weight': class_weights}\r\n#\r\n# rf_random = RandomizedSearchCV(pipeline, param_distributions=hyperF, scoring='roc_auc',\r\n# n_iter=1000, cv=kf, verbose=2, random_state=42, n_jobs=-1)\r\n# bestF = rf_random.fit(X_train, y_train.ravel())\r\n#\r\n# print(\"Best parameters set found on development set:\")\r\n# print(bestF.best_params_)\r\n# report_performance(bestF)\r\n# accuracy(bestF)\r\n# roc_curves(bestF)\r\n\r\nlabels = {0: \"Low\", 1: \"High\"}\r\npredictions = [labels[k] for k in y_pred]\r\nactual = [labels[k] for k in y_test.flatten()]\r\n\r\nfeature_importances = pd.DataFrame(classifier.feature_importances_, index=feature_classes,\r\n columns=['Importance']).sort_values('Importance', ascending=False)\r\nprint(feature_importances)\r\n\r\nfeature_importances.nlargest(13,'Importance').plot(kind='barh')\r\nplt.gca().invert_yaxis()\r\nplt.title('Feature Importances')\r\nplt.xlabel('Relative Importance')\r\nplt.ylabel(\"Features\")\r\nplt.title(\"Feature Importance of Random Forest Model\")\r\nplt.savefig('./Plots/wrt_MMSE/Feature_Import.png', bbox_inches='tight')\r\nplt.show()\r\n\r\nprint(\"\\n\\nPredicted Class:\")\r\nprint(count_freq(y_pred))\r\nprint(\"\\n\\nActual Class:\")\r\nprint(count_freq(y_test.flatten()))\r\nprint(\"\\n\\nConfusion Matrix:\")\r\nprint(pd.crosstab(np.array(predictions), np.array(actual), rownames=['Predicted Risk'], colnames=['Actual Risk']))\r\nreport_performance(pipeline)\r\naccuracy(pipeline)\r\nroc_curves(pipeline)\r\n# joblib.dump(classifier, 'random_forest_model.pkl')\r\n", "repo_name": "MaheshkumarSundaram/Dissertation", "sub_path": "Training/random_forest_implementation.py", "file_name": "random_forest_implementation.py", "file_ext": "py", "file_size_in_byte": 7526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sklearn.metrics.confusion_matrix", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 44, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 75, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 97, "usage_type": "call"}, {"api_name": "imblearn.over_sampling.SMOTE", "line_number": 105, "usage_type": "call"}, {"api_name": "imblearn.under_sampling.RandomUnderSampler", "line_number": 106, "usage_type": "call"}, {"api_name": "imblearn.pipeline.Pipeline", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.f_classif", "line_number": 117, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 118, "usage_type": "call"}, {"api_name": "imblearn.pipeline.Pipeline", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "pandas.crosstab", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "39565437680", "text": "from Archivo import Archivo\nfrom Registro import Registro\nimport logging\nimport glob\nimport os\nimport cPickle\nimport subprocess\n\n\nclass ArchivoLog(Archivo):\n \"\"\"Lee y administra los archivos de log\"\"\"\n def __init__(self, path, path_historico):\n \"\"\"Constructor\"\"\"\n Archivo.__init__(self, path)\n self.logger = logging.getLogger(__name__)\n self.path = path\n self.accesos = {}\n self.ultimo = 0\n self.path_historico = path_historico\n\n self.where = 0\n self.tamano = 9 ** 100\n\n def getTime(self, ip):\n \"\"\"Obtiene los segundos desde 1970 hasta la ultima conexion\"\"\"\n if ip in self.accesos:\n registro = self.accesos[ip]\n registro = registro.getAsDict()\n return registro[\"time\"]\n else:\n return \"0.0\"\n\n #@profile\n def load(self):\n \"\"\"Cargo el archivo de log\"\"\"\n try:\n # obtengo el tamano del archivo\n tamano = os.stat(self.path)[6]\n with open(self.path, \"r\") as f:\n lineas = self._tailf(f, tamano)\n for f in lineas:\n if f == \"\" or not f:\n del f, lineas\n break\n # contenido linea\n linea = f.split()\n #self.logger.info(\"linea: %s\", repr(linea))\n register = Registro()\n register.ip = str(linea[2]).strip()\n register.time = float(linea[0])\n\n self.accesos[register.ip] = register\n\n except (IOError, OSError):\n self.logger.error(\"No se ha podido acceder \\\n al archivo %s\", self.path)\n raise\n\n def _tailf(self, archivo, tamano):\n \"\"\"Devuelve las lineas que van apareciendo en el log\"\"\"\n # si el tamano es menor al que tengo guardado, es que el log roto\n if tamano < self.tamano:\n self.tamano = tamano\n self.where = 0\n\n archivo.seek(self.where)\n while True:\n self.where = archivo.tell()\n line = archivo.readline()\n\n if not line:\n archivo.seek(self.where)\n del line\n break\n else:\n yield line\n\n def load_historico(self, marcar_leidos=True):\n \"\"\"Carga los dos tipos de historicos\"\"\"\n self.logger.info(\"Reviso los historicos\")\n\n # Filtro los archivos del estilo: access.log.N\n listaLogsHistoricos = glob.glob(self.path_historico + '/access.log.*')\n self._load_logs_historicos(listaLogsHistoricos, marcar_leidos)\n\n listaGzHistoricos = glob.glob(self.path_historico + '/access.log-*.gz')\n self._load_gz_historicos(listaGzHistoricos, marcar_leidos)\n\n def _load_logs_historicos(self, listadoArchivos, marcar_leidos):\n \"\"\"Verifico los logs historicos\"\"\"\n for logHistorico in listadoArchivos:\n if not self._log_ya_revisado(logHistorico):\n with open(logHistorico, 'r') as f:\n filas = list(f)\n cantidad = len(filas) - self.ultimo\n if cantidad > 0:\n self.logger.debug(\"Revisando %s historicos\",\n logHistorico)\n for f in filas[self.ultimo:]:\n # contenido linea\n linea = f.split()\n if linea[2] not in self.accesos:\n register = Registro()\n register.ip = linea[2]\n register.time = float(linea[0])\n self.accesos[register.ip] = register\n self.logger.info(\"Agregando %s a la db\",\n register.ip)\n\n else:\n # Actualizo la fecha, si es mas actual\n registro = self.accesos[linea[2]]\n fechaEnObjeto = registro.time\n fechaEnLog = float(linea[0])\n if fechaEnObjeto < fechaEnLog:\n registro.time = fechaEnLog\n self.logger.info(\"Actualizando aparicion \\\n de %s\", registro.ip)\n self.ultimo += cantidad\n if marcar_leidos:\n self._marcar_como_revisado(logHistorico)\n\n def _load_gz_historicos(self, listadoArchivos, marcar_leidos):\n \"\"\"Verifico los logs historicos\"\"\"\n #from pudb import set_trace; set_trace()\n for logHistorico in listadoArchivos:\n if not self._log_ya_revisado(logHistorico):\n comando = [\"nice -n 15 /bin/gzip -dc \"\n + logHistorico + \" | /bin/awk '{print $1\\\" \\\"$3}'\"]\n comprimido = subprocess.Popen(comando, shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n resultado = comprimido.communicate()\n\n # 1 es stderr\n stdout = resultado[0]\n filas = stdout.split(\"\\n\")\n cantidad = len(filas)\n if cantidad > 0:\n self.logger.debug(\"Revisando %s historicos\", logHistorico)\n for f in filas:\n if f:\n\n # contenido linea\n linea = f.split()\n if linea[1] not in self.accesos:\n register = Registro()\n register.ip = linea[1]\n register.time = float(linea[0])\n self.accesos[register.ip] = register\n self.logger.info(\"Agregando %s a la db\",\n register.ip)\n\n else:\n # Actualizo la fecha, si es mas actual\n registro = self.accesos[linea[1]]\n fechaEnObjeto = registro.time\n fechaEnLog = float(linea[0])\n if fechaEnObjeto < fechaEnLog:\n registro.time = fechaEnLog\n self.logger.info(\"Se han registrado %d nuevos registros\",\n cantidad)\n self.logger.info(\"Se proceso %s completamente.\", logHistorico)\n\n if marcar_leidos:\n self._marcar_como_revisado(logHistorico)\n\n def _log_ya_revisado(self, logHistorico):\n \"\"\"Verifica que logHistorico haya sido revisado\"\"\"\n self._touch(\"/tmp/__logs_revisados.log\")\n with open(\"/tmp/__logs_revisados.log\", 'r') as f:\n # Si el archivo esta vacio, este try me salva un EOFError\n try:\n procesados = cPickle.load(f)\n if logHistorico in procesados:\n return True\n except:\n return False\n\n def _marcar_como_revisado(self, logHistorico):\n \"\"\"Marco el log como ya revisado\"\"\"\n self._touch(\"/tmp/__logs_revisados.log\")\n with open(\"/tmp/__logs_revisados.log\", 'rb') as f:\n try:\n procesados = cPickle.load(f)\n except:\n procesados = []\n if logHistorico not in procesados:\n procesados.append(logHistorico)\n with open(\"/tmp/__logs_revisados.log\", 'wb') as f:\n cPickle.dump(procesados, f, protocol=2)\n\n def get(self, id_):\n \"\"\"Obtengo el registro\"\"\"\n return self.accesos[id_]\n", "repo_name": "FacundoAcevedo/squidban", "sub_path": "classes/ArchivoLog.py", "file_name": "ArchivoLog.py", "file_ext": "py", "file_size_in_byte": 7763, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "Archivo.Archivo", "line_number": 10, "usage_type": "name"}, {"api_name": "Archivo.Archivo.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "Archivo.Archivo", "line_number": 14, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 38, "usage_type": "call"}, {"api_name": "Registro.Registro", "line_number": 48, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 83, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 86, "usage_type": "call"}, {"api_name": "Registro.Registro", "line_number": 103, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 130, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 131, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 131, "usage_type": "attribute"}, {"api_name": "Registro.Registro", "line_number": 146, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 173, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 184, "usage_type": "call"}, {"api_name": "cPickle.dump", "line_number": 190, "usage_type": "call"}]} +{"seq_id": "28241990280", "text": "import librosa\r\nimport numpy as np\r\nt=2\r\nsr=44100\r\nn=np.arange(int(t*sr))\r\nf=100+(1000-100)*n/(n.size-1)\r\nphase_delta=f/sr\r\nphase=np.zeros(n.size)\r\nfor i in range(1,n.size):\r\n phase[i]=phase[i-1]+phase_delta[i-1]\r\ny=np.sin(2*np.pi*phase)\r\n\r\nlibrosa.output.write_wav('vsine.wav',y.astype(np.float32),sr)\r\n\r\n", "repo_name": "jinzhaochaliang/Digital-audio-processing", "sub_path": "脉冲响应与卷积/vsine.py", "file_name": "vsine.py", "file_ext": "py", "file_size_in_byte": 309, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.arange", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 11, "usage_type": "attribute"}, {"api_name": "librosa.output.write_wav", "line_number": 13, "usage_type": "call"}, {"api_name": "librosa.output", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "20226325513", "text": "from telethon import Button\nfrom userbot import BOT_USERNAME, CMD_HANDLER, CMD_HELP, bot\nfrom userbot.utils import flicks_cmd\n\nuser = bot.get_me()\nDEFAULTUSER = user.first_name\nCUSTOM_HELP_EMOJI = \"⚡\"\nmain_help_menu = [\n [\n Button.url(\"Settings ⚙️\", f\"t.me/{BOT_USERNAME}\"),\n Button.inline(\"Vc Plugin ⚙️\", data=\"flicks_inline\"),\n ],\n [\n Button.inline(\"Help Menu\", data=\"open\"),\n ],\n [Button.inline(\"Close\", data=\"close\")],\n]\n\n\n@flicks_cmd(pattern=\"help ?(.*)\")\nasync def cmd_list(event):\n args = event.pattern_match.group(1).lower()\n if args:\n if args in CMD_HELP:\n await event.edit(f\"**✘ Commands available in {args} ✘** \\n\\n\" + str(CMD_HELP[args]) + \"\\n\\n**💕 @TheFlicksUserbot**\")\n else:\n await event.edit(f\"**Module** `{args}` **Tidak tersedia!**\")\n else:\n try:\n results = await bot.inline_query( # pylint:disable=E0602\n BOT_USERNAME, \"@FlicksSupport\"\n )\n await results[0].click(\n event.chat_id, reply_to=event.reply_to_msg_id, hide_via=True\n )\n await event.delete()\n except BaseException:\n await event.edit(\n f\"** Sepertinya obrolan atau bot ini tidak mendukung inline mode.\\nUntuk alternatif, gunakan perintah\\n👉`{CMD_HANDLER}plugins`**\"\n )\n", "repo_name": "farizjs/Flicks-Userbot", "sub_path": "userbot/modules/_help.py", "file_name": "_help.py", "file_ext": "py", "file_size_in_byte": 1384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "21", "api": [{"api_name": "userbot.bot.get_me", "line_number": 5, "usage_type": "call"}, {"api_name": "userbot.bot", "line_number": 5, "usage_type": "name"}, {"api_name": "telethon.Button.url", "line_number": 10, "usage_type": "call"}, {"api_name": "telethon.Button", "line_number": 10, "usage_type": "name"}, {"api_name": "userbot.BOT_USERNAME", "line_number": 10, "usage_type": "name"}, {"api_name": "telethon.Button.inline", "line_number": 11, "usage_type": "call"}, {"api_name": "telethon.Button", "line_number": 11, "usage_type": "name"}, {"api_name": "telethon.Button.inline", "line_number": 14, "usage_type": "call"}, {"api_name": "telethon.Button", "line_number": 14, "usage_type": "name"}, {"api_name": "telethon.Button.inline", "line_number": 16, "usage_type": "call"}, {"api_name": "telethon.Button", "line_number": 16, "usage_type": "name"}, {"api_name": "userbot.CMD_HELP", "line_number": 24, "usage_type": "name"}, {"api_name": "userbot.CMD_HELP", "line_number": 25, "usage_type": "name"}, {"api_name": "userbot.bot.inline_query", "line_number": 30, "usage_type": "call"}, {"api_name": "userbot.BOT_USERNAME", "line_number": 31, "usage_type": "argument"}, {"api_name": "userbot.bot", "line_number": 30, "usage_type": "name"}, {"api_name": "userbot.CMD_HANDLER", "line_number": 39, "usage_type": "name"}, {"api_name": "userbot.utils.flicks_cmd", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "19934896370", "text": "import os, argparse, time, sys\n\nparser = argparse.ArgumentParser(description=\"Archivo que genera procesos hijos como lineas tenga el archivo\")\n\nparser.add_argument(\"-f\", help=\"Escribir el path\")\nargs = parser.parse_args()\n\nr, w = os.pipe()\nr2, w2 = os.pipe()\n\ndef leer_archivo():\n file = open(args.f, 'r')\n return file.readlines()\n\nlineas_recibidas = []\ndef child(line):\n if not os.fork():\n os.write(w,line[::-1].encode('ascii'))\n os._exit(0)\n else:\n value = os.read(r, 100)\n lineas_recibidas.append(value.decode())\n\n\nif __name__ == '__main__':\n lines = leer_archivo()\n r, w = os.pipe()\n for line in lines:\n child(line)\n for line in lines:\n os.wait()\n for line in lineas_recibidas:\n print(line)\n\n\n\n", "repo_name": "bruno212121/Computaciondos_2022", "sub_path": "Processing/inversor.py", "file_name": "inversor.py", "file_ext": "py", "file_size_in_byte": 774, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 3, "usage_type": "call"}, {"api_name": "os.pipe", "line_number": 8, "usage_type": "call"}, {"api_name": "os.pipe", "line_number": 9, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 17, "usage_type": "call"}, {"api_name": "os.write", "line_number": 18, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 19, "usage_type": "call"}, {"api_name": "os.read", "line_number": 21, "usage_type": "call"}, {"api_name": "os.pipe", "line_number": 27, "usage_type": "call"}, {"api_name": "os.wait", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "6161181690", "text": "from datetime import datetime\nimport logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom os import path\nfrom urllib.error import HTTPError\nimport sys\n\nlogging.getLogger().setLevel(logging.INFO)\ndt = datetime.today().strftime('%Y-%m-%d')\nurl = 'https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-{0}.xlsx'.format(dt)\nfileLocation = '/Users/nikolasgarcia/PycharmProjects/CoronaVirus/DailyDownload/'\nif path.exists(fileLocation) is False:\n logging.error('File directory does not exist.')\n sys.exit(1)\nfileName = url[url.find('COVID'):len(url)]\n\nif path.isfile(fileLocation+fileName):\n logging.info('\\nFile already exists at {0} \\nLoading file from that directory.\\n'.format(fileLocation))\n df = pd.read_excel(fileLocation+fileName)\n df['DateRep'] = pd.to_datetime(df['DateRep'], format='%Y-%m-%d')\n print(df.head())\nelse:\n try:\n df = pd.read_excel(url)\n df['DateRep'] = pd.to_datetime(df['DateRep'], format='%Y-%m-%d')\n logging.info('\\nSuccessfully pulled down file containing {0} records.'.format(len(df)))\n logging.info('\\nMoving {0} to \\n'.format(fileName) + fileLocation)\n df.to_excel(fileLocation + '{0}'.format(fileName), sheet_name='Data', index=True)\n except HTTPError as e:\n logging.error('\\nEncountered {0} error. Web Link {1}.'.format(e.code, e.reason))\n except FileNotFoundError as e:\n logging.error('\\nEncountered {0} while trying to download. \\nCheck URL.'.format(e))\n", "repo_name": "nkgarcia/CoronaVirus", "sub_path": "CoronaVirusDownloader.py", "file_name": "CoronaVirusDownloader.py", "file_ext": "py", "file_size_in_byte": 1537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 31, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "29188326997", "text": "import dash\nfrom dash import html\nfrom dash import dcc\nimport plotly.graph_objects as go\nimport pandas as pd\nfrom dash.dependencies import Input, Output\nimport requests\nimport json\n\n# Initialize the app\napp = dash.Dash(suppress_callback_exceptions=True)\nwith open('../coin_api_key.json') as file:\n apikey = json.load(file).get('key')\nserver = app.server\n\napp.title = 'Africa Data School Crypto App'\napp.description = \"This is a test app for Africa Data School Jan 2022 Cohort\"\n\napp.layout = html.Div(children=[\n html.Link(rel='shortcut icon', type='favicon.ico', href='assets/btc.png'),\n html.Div([\n # Logo Div\n html.Div([\n # image\n html.Img(src=app.get_asset_url('btc.png'), id='ads-image', style={\n 'height': '60px',\n 'width': 'auto',\n 'margin-bottom': '25px'\n })\n\n ], className='one-third column'),\n\n # Adds heading DIV\n html.Div([\n # heading\n html.Div([\n html.H2('Africa Data School ', style={'margin-bottom': '0px', 'color': 'pink'}),\n html.H5('Cryptocurrency Prices', style={'margin-bottom': '0px', 'color': 'pink'})\n ])\n\n ], className='one-third column', id='title'),\n # date\n html.Div([], className='one-third column', id='title1')\n\n ], id='header', className='row flex-display', style={'margin-bottom': '25px'}),\n # DROPDOWN SECTION\n\n # Select crypto\n html.Div([\n html.Div([\n html.Label('Crypto Asset', style={'color': '#FF00BD'}),\n dcc.Dropdown(\n id='coin',\n options=[\n {'label': 'Bitcoin', 'value': 'BTC'},\n {'label': 'Ethereum', 'value': 'ETH'},\n {'label': 'Bitcoin Cash', 'value': 'BCH'},\n {'label': 'Litecoin', 'value': 'LTC'}\n ],\n value='BTC'\n ),\n\n ], className='card_container three columns'),\n\n # Select Time Period\n html.Div([\n html.Label('Time', style={'color': '#FF00BD'}),\n dcc.Dropdown(\n id='time',\n options=[\n {'label': 'Minute', 'value': '1MIN'},\n {'label': 'Day', 'value': '10DAY'},\n {'label': 'Month', 'value': '6MTH'},\n {'label': 'year', 'value': '5YRS'}\n ],\n value='10DAY'\n ),\n\n dcc.Interval(\n id='graph-update',\n interval=1 * 10,\n n_intervals=1\n ),\n\n ], className='card_container three columns'),\n ], className='row flex display'),\n\n # DISPLAY PRICE OPENING, PRICE CLOSING, PRICE HIGH and VOLUME TRADE.\n\n # Price Opening\n html.Div([\n html.Div([\n html.H6(children='Price Open',\n style={'textAlign': 'center',\n 'color': 'white'}),\n html.P(id='price_open',\n style={'textAlign': 'center',\n 'color': 'orange',\n 'fontSize': 40}),\n\n ], className='card_container three columns'),\n\n # Price Closing\n html.Div([\n html.H6(children='Price Close',\n style={'textAlign': 'center',\n 'color': 'white'}),\n html.P(id='price_close',\n style={'textAlign': 'center',\n 'color': 'orange',\n 'fontSize': 40}),\n\n ], className='card_container three columns'),\n\n # Price high\n html.Div([\n html.H6(children='Price High',\n style={'textAlign': 'center',\n 'color': 'white'}),\n html.P(id='price_high',\n style={'textAlign': 'center',\n 'color': 'orange',\n 'fontSize': 40}),\n\n ], className='card_container three columns'),\n\n # Volume Traded\n html.Div([\n html.H6(children='Volume Traded',\n style={'textAlign': 'center',\n 'color': 'white'}),\n html.P(id='Volume_traded',\n style={'textAlign': 'center',\n 'color': 'orange',\n 'fontSize': 40}),\n\n ], className='card_container three columns')\n\n ], className='row flex display'),\n # THE GRAPH\n html.Div([\n html.Div([\n dcc.Graph(id='graph', config={'displayModeBar': False}),\n ], className='card_container twelve columns')\n ], className='row flex display'),\n], id='mainContainer', style={'display': 'flex', 'flex-direction': 'column'})\n\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)", "repo_name": "nichmomanyi/Data-science-journey", "sub_path": "17 Real Time Crypto App (Dash)/Application/classwork.py", "file_name": "classwork.py", "file_ext": "py", "file_size_in_byte": 4853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dash.Dash", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 19, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 19, "usage_type": "name"}, {"api_name": "dash.html.Link", "line_number": 20, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 20, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 21, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 21, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 23, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 23, "usage_type": "name"}, {"api_name": "dash.html.Img", "line_number": 25, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 25, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 34, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 34, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 36, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 36, "usage_type": "name"}, {"api_name": "dash.html.H2", "line_number": 37, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 37, "usage_type": "name"}, {"api_name": "dash.html.H5", "line_number": 38, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 38, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 43, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 43, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 49, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 49, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 50, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 50, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 51, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 51, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 52, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 52, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 66, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 66, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 67, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 67, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 68, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 68, "usage_type": "name"}, {"api_name": "dash.dcc.Interval", "line_number": 79, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 79, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 91, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 91, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 92, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 92, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 93, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 93, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 96, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 96, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 104, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 104, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 105, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 105, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 108, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 108, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 116, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 116, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 117, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 117, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 120, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 120, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 128, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 128, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 129, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 129, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 132, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 132, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 141, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 141, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 142, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 142, "usage_type": "name"}, {"api_name": "dash.dcc.Graph", "line_number": 143, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "40025952233", "text": "from tkinter import *\n\nfrom modelos import Persona, Visitante\nimport shelve\n\nfrom controladores import PersonaControlador\n\nfrom datetime import datetime, date\n\n# Info sobre cursores \t\thttp://www.tcl.tk/man/tcl8.4/TkCmd/cursors.htm\n# Documentacion basica \t\thttp://effbot.org/tkinterbook/\n# Estilo\t\t\t\t\thttp://infohost.nmt.edu/tcc/help/pubs/tkinter/web/ttk-style-layer.html\n# \t... mas Estilo ttk\t\thttps://docs.python.org/3/library/tkinter.ttk.html\n# validaciones en vista \thttps://www.packtpub.com/books/content/miscellaneous-tips\n\npersona_db = \"persona.db\"\nvisitante_db = \"visitante.db\"\n\ndef abrir_db():\n\ts = shelve.open(persona_db)\n\treturn s\n\ndef registrar_entrada():\n\tahora = datetime.now()\n\tfecha_hoy = ahora.date()\n\thora_actual = ahora.time()\n\n\tv = Visitante(fecha_visita=fecha_hoy,\n\t\t\t\thora_entrada=hora_actual)\n\n\t# s = shelve.open(visitante_db)\n\n\n\n\ndef registrar_persona():\n\t\n\tcedula = entry_cedula.get()\n\tnombre = entry_nombre.get()\n\tapellido = entry_apellido.get()\n\tsexo = sexo_var.get()\n\tedad = entry_edad.get()\n\textra = entry_extra.get()\n\n\tPersonaControlador().crear(cedula=cedula, nombre=nombre, \n\t\t\t\tapellido=apellido, sexo=sexo,\n\t\t\t\tedad=edad)\n\ndef buscar_persona():\n\n\tcedula = entry_cedula.get()\n\texiste = PersonaControlador().existe(cedula)\n\n\tif existe:\n\t\tprint(PersonaControlador().recuperar(cedula))\n\telse:\n\t\tprint(\"Ahora implementar carga de nueva persona\")\n\n\n\n\n\ndef borrar_persona():\n\ts = abrir_db()\n\tcedula = entry_borrar_cedula.get()\n\n\tif cedula in s.keys():\n\t\tprint(\"Borrando cedula: \" + cedula)\n\t\tdel s[cedula]\n\telse:\n\t\t# aca pedir datos de la persona, porque no existe en la BD todavía\n\t\tprint(\"No existe esa cedula.\")\n\n\ts.close()\n\ntop = Tk()\n\nlabel_cedula = Label(top, text=\"Cedula\")\nlabel_cedula.pack()\nentry_cedula = Entry(top, bd=0)\nentry_cedula.pack()\n\nboton_buscar = Button(top, text=\"Buscar\", width=10, command=buscar_persona)\nboton_buscar.pack()\n\n\n\nlabel_nombre = Label(top, text=\"Nombre\")\nlabel_nombre.pack()\nentry_nombre = Entry(top, bd=0)\nentry_nombre.pack()\n\nlabel_apellido = Label(top, text=\"Apellido\")\nlabel_apellido.pack()\nentry_apellido = Entry(top, bd=0)\nentry_apellido.pack()\n\nedad_var = IntVar()\n\nlabel_edad = Label(top, text=\"Edad\")\nlabel_edad.pack()\nentry_edad = Entry(top, textvariable=edad_var, bd=0)\nentry_edad.pack()\n\n\nlabel_extra = Label(top, text=\"extra\")\nlabel_extra.pack()\nentry_extra = Entry(top, bd=0)\nentry_extra.pack()\n\n\n\n\nsexo_var = StringVar()\n\nlabel_sexo = Label(top, text=\"Sexo\")\nlabel_sexo.pack()\n\nfem_radio = Radiobutton(top, text=\"femenino\", variable=sexo_var, value=\"F\", bd=0, indicatoron=0, cursor=\"hand2\", bg=\"red\")\nfem_radio.pack()\n\nmasc_radio = Radiobutton(top, text=\"masculino\", variable=sexo_var, value=\"M\",bd=0, indicatoron=0, cursor=\"hand2\", bg=\"blue\")\nmasc_radio.pack()\n\n# label_vacio = Label(top, text=\"\")\n# label_vacio.pack()\n\nboton_guardar = Button(top, text=\"Guardar\", width=10, command=registrar_persona)\nboton_guardar.pack()\n\n\nlabel_buscar_cedula = Label(top, text=\"Buscar por cedula\")\nlabel_buscar_cedula.pack()\nentry_buscar_cedula = Entry(top, bd=0)\nentry_buscar_cedula.pack()\n\nboton_buscar = Button(top, text=\"Buscar\", width=10, command=buscar_persona)\nboton_buscar.pack()\n\n\nlabel_borrar_cedula = Label(top, text=\"Borrar por cedula\")\nlabel_borrar_cedula.pack()\nentry_borrar_cedula = Entry(top, bd=0)\nentry_borrar_cedula.pack()\n\nboton_borrar = Button(top, text=\"Borrar\", width=10, command=borrar_persona)\nboton_borrar.pack()\n\ntop.mainloop()\n\n", "repo_name": "migueljoba/plp2014", "sub_path": "app/persona_form.py", "file_name": "persona_form.py", "file_ext": "py", "file_size_in_byte": 3423, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "shelve.open", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "modelos.Visitante", "line_number": 28, "usage_type": "call"}, {"api_name": "controladores.PersonaControlador", "line_number": 45, "usage_type": "call"}, {"api_name": "controladores.PersonaControlador", "line_number": 52, "usage_type": "call"}, {"api_name": "controladores.PersonaControlador", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "74036560054", "text": "#!/usr/bin/env python\n# http://www.rabbitmq.com/tutorials/tutorial-two-python.html\nimport pika\nimport sys\ncredentials = pika.PlainCredentials('oleg', '123456')\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n host='node1', credentials=credentials))\nchannel = connection.channel()\n\nmessage = ' '.join(sys.argv[1:]) or \"Hello World!\"\nchannel.basic_publish(exchange='',\n routing_key='task_queue',\n body=message\n)\nprint(\" [x] Sent {0}\".format(message))\nconnection.close()\n", "repo_name": "olkurinnoy/rabbitmq_lab", "sub_path": "ex/new_task.py", "file_name": "new_task.py", "file_ext": "py", "file_size_in_byte": 533, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pika.PlainCredentials", "line_number": 5, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 6, "usage_type": "call"}, {"api_name": "pika.ConnectionParameters", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "31509495539", "text": "from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nimport os\nimport time\n\nmore_pages = True\n\n#############################################################################\n\n\ndef next_page():\n \"\"\"moves to next page\"\"\"\n global more_pages, driver, job_links\n time.sleep(2)\n current_page = int(driver.find_element(By.CSS_SELECTOR, '.artdeco-pagination__indicator.active.selected').text)\n pages = driver.find_elements(By.CSS_SELECTOR, '.artdeco-pagination__indicator')\n total_pages = int(pages[len(pages) - 1].text)\n print(f\"current page is {current_page}, total pages is {total_pages}\")\n if total_pages == current_page:\n more_pages = False\n else:\n pages[current_page].click()\n time.sleep(5)\n\n\ndef job_list_adder(last_item):\n \"\"\"adds urls of each post to a list\"\"\"\n global job_links\n jobs = driver.find_elements(By.CSS_SELECTOR, '.job-card-container__link.job-card-list__title')\n total_jobs = len(jobs)\n print(f\"{total_jobs} total jobs in this list\")\n element = jobs[total_jobs - 1]\n driver.execute_script(\"arguments[0].scrollIntoView(true);\", element)\n for i in jobs:\n link = i.get_attribute(\"href\")\n if link not in job_links:\n job_links.append(link)\n print(len(job_links))\n if last_item == job_links[-1]:\n next_page()\n print(\"next page\")\n\n\ndef find_all_jobs():\n \"\"\"continues using above two functions\"\"\"\n global driver, more_pages, job_links\n while more_pages:\n try:\n last_item = job_links[-1]\n except IndexError:\n last_item = None\n\n # job_links = job_list_adder()\n job_list_adder(last_item)\n\n\ndef submit_application():\n \"\"\"get name of button\"\"\"\n global driver, havent_applied\n button = driver.find_element(By.CSS_SELECTOR, \"button.jobs-apply-button\") #clicks first button\n button.click()\n application_button = driver.find_element(By.CSS_SELECTOR, \"button.artdeco-button.artdeco-button--2.artdeco-button--primary\")\n ########fix to this after\n # application_button.click()\n # button_text = application_button.text\n # if button_text == \"Submit application\":\n # return\n\n button_text = application_button.text\n if button_text == \"Submit application\":\n dismiss = driver.find_element(By.CSS_SELECTOR, \"button.artdeco-modal__dismiss\")\n dismiss.click()\n save = driver.find_element(By.XPATH, \"/html/body/div[3]/div[2]/div/div[3]/button[2]\")\n time.sleep(1)\n save.click()\n print(\"would have been submitted but instead saved\")\n\n else:\n dismiss = driver.find_element(By.CSS_SELECTOR, \"button.artdeco-modal__dismiss\")\n dismiss.click()\n save = driver.find_element(By.XPATH, \"/html/body/div[3]/div[2]/div/div[3]/button[2]\")\n time.sleep(1)\n save.click()\n time.sleep(3)\n print(\"saved because multiple steps\")\n havent_applied = False\n\n\ndef check_if_applied():\n global driver, havent_applied, total_jobs\n page = driver.find_elements(By.CSS_SELECTOR, \".mt5\")\n for i in page:\n if i.text == \"\":\n print(\"already applied\")\n total_jobs -= 1\n havent_applied = False\n\n#########################\n#see how long program takes to apply to x jobs (x determined later)\nstart_time = time.time()\n##############################################################################################\n# pulls up URL which has set parameters already\n\nLINK = \"https://www.linkedin.com/jobs/search/?distance=100&f_AL=true&f_JT=F&f_WT=\" \\\n \"1%2C2%2C3&geoId=101140016&keywords=biomedical%20engineer&location=Williston%20Park%2C%\" \\\n \"20New%20York%2C%20United%20States\"\n\nser = Service(r\"C:\\Users\\Sean\\Development\\chromedriver.exe\")\nop = webdriver.ChromeOptions()\ndriver = webdriver.Chrome(service=ser, options=op)\n\n###################################################\n# logging in\n\nUSERNAME = os.getenv('EMAIL_17')\nPASSWORD = os.getenv('LINKEDIN_PASSWORD')\n\ndriver.get(LINK)\ntime.sleep(2)\n\nlogin_button = driver.find_element(By.XPATH, '/html/body/div[1]/header/nav/div/a[2]')\nlogin_button.click()\ntime.sleep(2)\n\nusername = driver.find_element(By.XPATH, '//*[@id=\"username\"]')\nusername.send_keys(f\"{USERNAME}{Keys.TAB}{PASSWORD}{Keys.ENTER}\")\ntime.sleep(7)\n\n################\n#create list with urls for every job\n\njob_links = []\nfind_all_jobs()\ntotal_jobs = len(job_links)\nfor i in range(total_jobs):\n print(i)\n driver.get(job_links[i])\n time.sleep(2)\n\n####################################\n #check to make sure application has not already been submitted:\n\n havent_applied = True\n check_if_applied()\n while havent_applied:\n\n#######################################\n # submitting application: saves applications with multiple steps\n\n submit_application()\n\n######################################\n\nprint(f\"It took {time.time() - start_time} seconds to save {total_jobs}\")\nprint((time.time() - start_time)/total_jobs)\n\n", "repo_name": "seaniomoran/AutoJobApply_Selenium", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 18, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 18, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 62, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 62, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 64, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 64, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 73, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 73, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 75, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 75, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 81, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 83, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 83, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 93, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 93, "usage_type": "name"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 110, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 111, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 111, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 112, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 112, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 117, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 118, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 121, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 123, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 123, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 127, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 127, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.TAB", "line_number": 128, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 128, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 128, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 129, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}, {"api_name": "time.time", "line_number": 156, "usage_type": "call"}, {"api_name": "time.time", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "24672169210", "text": "#Create Setup.py file to make the module installable\r\nimport os\r\nimport configparser\r\nfrom setuptools import setup, find_packages\r\nfrom loko import LokoConfig, models\r\nfrom loko.utils import get_logger \r\nfrom loko.conf import CONFIG_FILENAME\r\nfrom loko.biz.xchange import XCHANGE_FILENAME_BASE\r\nfrom loko.tests.populate import populate_db\r\n\r\nCONFIG_FILENAME='loko_config.cfg'\r\nXCHANGE_FILENAME_BASE='Xchange_data'\r\n\r\nsetup(\r\n\tname = \"loko\",\r\n\tversion = \"0.2.0\",\r\n\tauthor = \"Mcflyhalf\",\r\n\tauthor_email = \"mcflyhalf@live.com\",\r\n\tdescription = (\"Store, send and receive money in different currencies\"),\r\n\tkeywords = \"currency exchange forex\",\r\n\tpackages= find_packages(),\r\n)\r\n\r\nlogger = get_logger(\"loko_setup\")\r\nconfig = configparser.ConfigParser()\r\n\r\nconfig['DEFAULT'] = {}\r\ndefault = config['DEFAULT']\r\n\r\ndefault['install location'] = str(os.getcwd())\t#Base install directory\r\ndefault['sqlite production db'] = os.path.join(config['DEFAULT']['Install Location'], 'loko','models', 'loko_prod.db')\r\ndefault['sqlite development db'] = os.path.join(config['DEFAULT']['Install Location'], 'loko','models', 'loko_dev.db')\r\ndefault['sqlite test db'] = os.path.join(config['DEFAULT']['Install Location'], 'loko','tests', 'loko_test.db')\r\ndefault['config_file'] = os.path.join(os.getcwd(), CONFIG_FILENAME)\r\ndefault['exchange rate base filename'] = XCHANGE_FILENAME_BASE\r\ndefault['exchange rate file location'] = os.path.join(config['DEFAULT']['Install Location'], 'loko','biz', 'xchange_data')\r\ndefault['exchange rate test response'] = os.path.join(config['DEFAULT']['Install Location'], 'loko','tests', 'test_xchange_resp.pkl')\r\n#Create config file\r\nwith open(CONFIG_FILENAME, 'w') as configfile:\r\n\tconfig.write(configfile)\r\n\r\nxchange_resp_pkl = default['exchange rate test response']\r\nprod_engine = models.production_engine\r\nLokoConfig(prod_engine, logger).newconfig(prod_engine,xchange_resp_pkl,drop_tables=False)\r\n\r\ndev_engine = models.development_engine\r\nLokoConfig(dev_engine, logger).newconfig(dev_engine,xchange_resp_pkl,drop_tables=False)\r\n\r\n#Populate the dev db for dev testing\r\n#For some reason `pip install -e .` seems to run this entire file twice over\r\n# logger.info(\"Preparing to populate dev database\")\r\n# xchange_resp_file = default['exchange rate test response']\r\n# dev_session = models.get_db_session(env=\"development\")\r\n# logger.info(\"Starting to populate dev database\")\r\n# populate_db(dev_session, xchange_resp_file)\r\n# logger.info(\"Finished populating dev database\")", "repo_name": "mcflyhalf/loko", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "loko.conf.CONFIG_FILENAME", "line_number": 11, "usage_type": "name"}, {"api_name": "loko.biz.xchange.XCHANGE_FILENAME_BASE", "line_number": 12, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 14, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 21, "usage_type": "call"}, {"api_name": "loko.utils.get_logger", "line_number": 24, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "loko.conf.CONFIG_FILENAME", "line_number": 34, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 34, "usage_type": "call"}, {"api_name": "loko.biz.xchange.XCHANGE_FILENAME_BASE", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "loko.conf.CONFIG_FILENAME", "line_number": 39, "usage_type": "argument"}, {"api_name": "loko.models.production_engine", "line_number": 43, "usage_type": "attribute"}, {"api_name": "loko.models", "line_number": 43, "usage_type": "name"}, {"api_name": "loko.LokoConfig", "line_number": 44, "usage_type": "call"}, {"api_name": "loko.models.development_engine", "line_number": 46, "usage_type": "attribute"}, {"api_name": "loko.models", "line_number": 46, "usage_type": "name"}, {"api_name": "loko.LokoConfig", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "42825378662", "text": "from copy import deepcopy\nfrom tqdm import tqdm\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom rsrl.policy import LagrangianPIDController\nfrom rsrl.policy.onpolicy_base import OnPolicyBase\nfrom rsrl.policy.model.mlp_ac import (MLPCategoricalActor, MLPGaussianActor,\n EnsembleQCritic, mlp)\nfrom rsrl.util.logger import EpochLogger\nfrom rsrl.util.torch_util import (count_vars, get_device_name, to_device,\n to_ndarray, to_tensor)\nfrom rsrl.policy.adversary.adv_random import AdvUniform, AdvGaussian\nfrom rsrl.policy.adversary.adv_critic import AdvCritic, AdvCriticPPO\nfrom rsrl.policy.adversary.adv_mad import AdvMad, AdvMadPPO\nfrom rsrl.policy.adversary.adv_amad import AdvAmad, AdvAmadPPO\nfrom rsrl.policy.adversary.adv_base import Adv\nfrom torch.optim import Adam\nfrom torch.distributions.kl import kl_divergence\nfrom torch.nn.utils import clip_grad_norm_\n\n\nclass RobustPPOLagrangian(OnPolicyBase):\n attacker_cls = dict(uniform=AdvUniform,\n gaussian=AdvGaussian,\n mad=AdvMadPPO,\n amad=AdvAmadPPO,\n max_reward=AdvCriticPPO,\n min_reward=AdvCriticPPO,\n max_cost=AdvCriticPPO)\n\n def __init__(self, env: gym.Env, logger: EpochLogger, env_cfg: dict,\n adv_cfg: dict, rs_mode, episode_rerun_num, KP, KI, KD,\n per_state, use_adv_multiplier, clip_ratio, clip_ratio_adv,\n weight_adv, target_kl, train_actor_iters, train_critic_iters,\n update_adv_freq, kl_coef, decay_epoch, attacker_names,\n start_epoch, **kwargs) -> None:\n r'''\n Proximal Policy Optimization (PPO) with Lagrangian multiplier\n '''\n super().__init__(env, logger, env_cfg, **kwargs)\n\n self.adv_cfg = adv_cfg\n self.clip_ratio = clip_ratio\n self.clip_ratio_adv = clip_ratio_adv\n self.target_kl = target_kl\n self.train_actor_iters = train_actor_iters\n self.train_critic_iters = train_critic_iters\n self.update_adv_freq = update_adv_freq\n self.kl_coef = kl_coef\n self.weight_adv = weight_adv\n self.episode_rerun_num = episode_rerun_num\n self.use_adv_multiplier = use_adv_multiplier\n\n self.controller = LagrangianPIDController(KP, KI, KD, self.cost_limit,\n per_state)\n\n self.controller_adv = LagrangianPIDController(KP, KI, KD,\n self.cost_limit,\n per_state)\n\n #############################################################################\n ############################ for attacker usage #############################\n #############################################################################\n self._init_adversary_critic()\n self.mode = rs_mode\n self.attacker_names = attacker_names\n self.adv_training_modes = [\n \"mc\",\n \"mr\",\n \"mcv\",\n \"mrv\",\n \"mad\",\n \"madv\",\n \"uniform\",\n \"gaussian\",\n ]\n\n if self.mode in self.adv_training_modes:\n self.apply_adv_in_training = True\n self._init_adversary()\n else:\n self.apply_adv_in_training = False\n self._init_adversary_baseline()\n\n self.mode_to_loss = {\n \"vanilla\": self.vanilla_policy_loss,\n \"kl\": self.kl_regularized_policy_loss,\n \"klmc\": self.kl_regularized_policy_loss,\n \"klmr\": self.kl_regularized_policy_loss,\n }\n for m in self.adv_training_modes:\n self.mode_to_loss[m] = self.vanilla_policy_loss\n\n self.noise_scale_schedule = 0\n self.decay_epoch = decay_epoch\n self.start_epoch = start_epoch\n self.decay_func = lambda x: self.noise_scale - self.noise_scale * np.exp(\n -5. * x / self.decay_epoch)\n\n # Set up model saving\n self.save_model()\n\n def _post_epoch_process(self, epoch):\n self.epoch = epoch\n if self.epoch < self.start_epoch:\n self.noise_scale_schedule = 0\n else:\n self.noise_scale_schedule = self.decay_func(epoch -\n self.start_epoch)\n\n def _init_adversary(self):\n self.noise_scale = self.adv_cfg[\"noise_scale\"]\n self.attack_freq = self.adv_cfg[\"attack_freq\"]\n attacker_names_dict = {\n \"mc\": [\"max_cost\"],\n \"mcv\": [\"max_cost\", \"vanilla\"],\n \"mr\": [\"max_reward\"],\n \"mrv\": [\"max_reward\", \"vanilla\"],\n \"mad\": [\"mad\"],\n \"madv\": [\"mad\", \"vanilla\"],\n \"uniform\": [\"uniform\"],\n \"gaussian\": [\"gaussian\"],\n }\n self.attacker_names = attacker_names_dict[self.mode]\n self.adversary_dict = {}\n for attacker_name in self.attacker_names:\n if attacker_name in self.attacker_cls:\n cfg = self.adv_cfg[attacker_name + \"_cfg\"]\n adv_cls = self.attacker_cls[attacker_name]\n adversary = adv_cls(self.obs_dim, **cfg)\n else:\n adversary = None\n self.adversary_dict[attacker_name] = adversary\n self.adversary_num = len(self.attacker_names)\n\n def _init_adversary_baseline(self):\n self.noise_scale = self.adv_cfg[\"noise_scale\"]\n self.attack_freq = self.adv_cfg[\"attack_freq\"]\n\n ############## for seperate mode #################\n if self.mode == \"klmc\":\n cfg = self.adv_cfg[\"max_cost_cfg\"]\n self.adversary = self.attacker_cls[\"max_cost\"](self.obs_dim, **cfg)\n elif self.mode == \"klmr\":\n cfg = self.adv_cfg[\"max_reward_cfg\"]\n self.adversary = self.attacker_cls[\"max_reward\"](self.obs_dim,\n **cfg)\n elif self.mode == \"kl\":\n cfg = self.adv_cfg[\"mad_cfg\"]\n self.adversary = self.attacker_cls[\"mad\"](self.obs_dim, **cfg)\n elif self.mode == \"vanilla\":\n pass\n else:\n raise NotImplementedError\n\n def _init_adversary_critic(self,\n critic2_state_dict=None,\n critic2_optimizer_state_dict=None,\n qc2_state_dict=None,\n qc2_optimizer_state_dict=None):\n # off-policy Q network; for attacker usage\n critic2 = EnsembleQCritic(self.obs_dim,\n self.act_dim,\n self.hidden_sizes,\n nn.ReLU,\n num_q=2)\n qc2 = EnsembleQCritic(self.obs_dim,\n self.act_dim,\n self.hidden_sizes,\n nn.ReLU,\n num_q=1)\n if critic2_state_dict is not None:\n critic2.load_state_dict(critic2_state_dict)\n if qc2_state_dict is not None:\n qc2.load_state_dict(qc2_state_dict)\n\n self.critic2 = to_device(critic2)\n self.critic2_targ = deepcopy(self.critic2)\n\n self.qc2 = to_device(qc2)\n self.qc2_targ = deepcopy(self.qc2)\n\n # Freeze target networks with respect to optimizers (only update via polyak averaging)\n for p in self.critic2_targ.parameters():\n p.requires_grad = False\n for p in self.qc2_targ.parameters():\n p.requires_grad = False\n\n # Set up optimizers for safety critic\n self.qc2_optimizer = Adam(self.qc2.parameters(), lr=self.critic_lr)\n self.critic2_optimizer = Adam(self.critic2.parameters(),\n lr=self.critic_lr)\n if critic2_optimizer_state_dict is not None:\n self.critic2_optimizer.load_state_dict(\n critic2_optimizer_state_dict)\n if qc2_optimizer_state_dict is not None:\n self.qc2_optimizer.load_state_dict(qc2_optimizer_state_dict)\n\n def get_obs_adv(self, adv: Adv, obs: torch.Tensor):\n epsilon = adv.attack_batch(self, obs, self.noise_scale)\n obs_adv = (epsilon + obs).detach()\n return obs_adv\n\n def collect_data(self, warmup=False):\n '''\n Interact with the environment to collect data\n '''\n self.cost_list = []\n qc_list = []\n obs, ep_reward, ep_len, ep_cost = self.env.reset(), 0, 0, 0\n epoch_num = 0\n for i in range(self.interact_steps):\n epsilon = 0\n if self.apply_adv_in_training and self.noise_scale_schedule > 0:\n attacker_name = self.attacker_names[epoch_num %\n self.adversary_num]\n adversary = self.adversary_dict[attacker_name]\n if adversary is not None:\n epsilon = adversary.attack_at_eval(\n self, obs, self.noise_scale_schedule)\n\n obs = obs + epsilon\n\n action, value, log_prob = self.act(obs)\n obs_next, reward, done, info = self.env.step(action)\n\n cost_value = self.get_qc_v(obs)\n\n if done and \"TimeLimit.truncated\" in info:\n done = False\n timeout_env = True\n else:\n timeout_env = False\n\n cost = info[\"cost\"] if \"cost\" in info else 0\n self.buffer.store(obs, np.squeeze(action), reward, value, log_prob,\n done, cost, cost_value)\n self.cpp_buffer.add(obs=obs,\n act=np.squeeze(action),\n rew=reward,\n obs2=obs_next,\n done=done,\n cost=cost / self.cost_normalizer)\n self.logger.store(VVals=value, CostVVals=cost_value, tab=\"worker\")\n ep_reward += reward\n ep_cost += cost\n ep_len += 1\n obs = obs_next\n\n timeout = ep_len == self.timeout_steps - 1 or i == self.interact_steps - 1 or timeout_env and not done\n terminal = done or timeout\n if terminal:\n if timeout:\n _, value, _ = self.act(obs)\n cost_value = self.get_qc_v(obs)\n else:\n value = 0\n cost_value = 0\n self.buffer.finish_path(value, cost_value)\n if i < self.interact_steps - 1:\n self.logger.store(EpRet=ep_reward,\n EpLen=ep_len,\n EpCost=ep_cost,\n tab=\"worker\")\n\n self.cumulative_cost_adv += ep_cost\n if ep_reward > self.maximum_reward_adv:\n self.maximum_reward_adv = ep_reward\n self.logger.store(CumulativeCostAdv=self.cumulative_cost_adv,\n MaximumRewardAdv=self.maximum_reward_adv,\n tab=\"worker\")\n\n obs = self.env.reset()\n self.cost_list.append(ep_cost)\n ep_reward = 0\n ep_cost = 0\n ep_len = 0\n epoch_num += 1\n\n return self.interact_steps\n\n def train_one_epoch(self, warmup=False, verbose=False):\n '''\n Train one epoch, interact with the runner\n '''\n self.logger.store(CostLimit=self.cost_limit, tab=\"worker\")\n epoch_steps = 0\n\n if warmup and verbose:\n print(\"*** Warming up begin ***\")\n\n steps = self.collect_data(warmup=warmup)\n epoch_steps += steps\n\n training_range = range(self.episode_rerun_num)\n if verbose:\n training_range = tqdm(training_range,\n desc='Training steps: ',\n position=1,\n leave=False)\n for i in training_range:\n off_policy_data = self.get_sample2()\n self.train_Q_network(off_policy_data)\n\n data = self.get_sample()\n self.learn_on_batch(data)\n return epoch_steps\n\n def act(self, obs, deterministic=False, with_logprob=False):\n '''\n Given a single obs, return the action, value, logp.\n This API is used to interact with the env.\n\n @param obs, 1d ndarray\n @param eval, evaluation mode\n @return act, value, logp, 1d ndarray\n '''\n obs = to_tensor(obs).reshape(1, -1)\n with torch.no_grad():\n _, a, logp_a = self.actor_forward(obs, deterministic=deterministic)\n v = self.critic_forward(self.critic, obs)\n # squeeze them to the right shape\n a, v, logp_a = np.squeeze(to_ndarray(a), axis=0), np.squeeze(\n to_ndarray(v)), np.squeeze(to_ndarray(logp_a))\n return a, v, logp_a\n\n def get_risk_estimation(self, obs):\n '''\n Given an obs array (obs_dim), output a risk (qc) value, and the action\n '''\n obs = to_tensor(obs).reshape(1, -1)\n with torch.no_grad():\n _, a, logp_a = self.actor_forward(obs, deterministic=True)\n # vc = self.critic_forward(self.qc, obs)\n qc, _ = self.critic_forward2(self.qc2, obs, a)\n return torch.squeeze(qc).item(), np.squeeze(to_ndarray(a), axis=0)\n\n def train_Q_network(self, data: dict):\n self._update_critic2(data)\n self._update_qc2(data)\n self._polyak_update_target(self.critic2, self.critic2_targ)\n self._polyak_update_target(self.qc2, self.qc2_targ)\n\n def learn_on_batch(self, data: dict):\n self._update_actor(data)\n\n LossV, DeltaLossV = self._update_critic(self.critic, data[\"obs\"],\n data[\"ret\"],\n self.critic_optimizer)\n # Log critic update info\n self.logger.store(LossV=LossV, DeltaLossV=DeltaLossV)\n\n LossVQC, DeltaLossVQC = self._update_critic(self.qc, data[\"obs\"],\n data[\"cost_ret\"],\n self.qc_optimizer)\n # Log safety critic update info\n self.logger.store(LossVQC=LossVQC, DeltaLossVQC=DeltaLossVQC)\n\n def critic_forward2(self, critic, obs, act):\n # return the minimum q values and the list of all q_values\n return critic.predict(obs, act)\n\n def critic_forward(self, critic, obs):\n # Critical to ensure value has the right shape.\n # Without this, the training stability will be greatly affected!\n # For instance, shape [3] - shape[3,1] = shape [3, 3] instead of shape [3]\n return torch.squeeze(critic(obs), -1)\n\n def actor_forward(self, obs, act=None, deterministic=False):\n r''' \n Return action distribution and action log prob [optional].\n @param obs, [tensor], (batch, obs_dim)\n @param act, [tensor], (batch, act_dim). If None, log prob is None\n @return pi, [torch distribution], (batch,)\n @return a, [torch distribution], (batch, act_dim)\n @return logp, [tensor], (batch,)\n '''\n pi, a, logp = self.actor(obs, act, deterministic)\n return pi, a, logp\n\n def get_qc_v(self, obs):\n obs = to_tensor(obs).reshape(1, -1)\n with torch.no_grad():\n v = self.critic_forward(self.qc, obs)\n return np.squeeze(to_ndarray(v))\n\n def get_adv_epcost(self, obs, obs_adv, ep_cost):\n with torch.no_grad():\n pi, a_adv, _ = self.actor_forward(\n obs_adv, deterministic=True) # (batch, action space)\n qc_adv, _ = self.critic_forward2(self.qc2, obs, a_adv)\n pi, a, _ = self.actor_forward(\n obs, deterministic=True) # (batch, action space)\n qc, _ = self.critic_forward2(self.qc2, obs, a)\n ratio = torch.mean(qc_adv) / torch.mean(qc)\n\n # with torch.no_grad(): doesn't work!\n # v = self.critic_forward(self.qc, obs)\n # v_adv = self.critic_forward(self.qc, obs_adv)\n # ratio = torch.mean(v_adv) / torch.mean(v)\n return ratio * ep_cost\n\n def vanilla_policy_loss(self, obs, act, logp_old, advantage,\n cost_advantage, multiplier, *args, **kwargs):\n pi, _, logp = self.actor_forward(obs, act)\n ratio = torch.exp(logp - logp_old)\n clip_adv = torch.clamp(ratio, 1 - self.clip_ratio,\n 1 + self.clip_ratio) * advantage\n\n qc_penalty = (ratio * cost_advantage * multiplier).mean()\n loss_vallina = -(torch.min(ratio * advantage, clip_adv)).mean()\n loss_pi = loss_vallina + qc_penalty\n loss_pi /= 1 + multiplier\n # Useful extra info\n approx_kl = (logp_old - logp).mean().item()\n\n ent = pi.entropy().mean().item()\n clipped = ratio.gt(1 + self.clip_ratio) | ratio.lt(1 - self.clip_ratio)\n clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()\n pi_info = dict(KL=approx_kl,\n Entropy=ent,\n ClipFrac=clipfrac,\n LossQcPenalty=to_ndarray(qc_penalty),\n LossVallina=to_ndarray(loss_vallina))\n\n return loss_pi, pi_info, pi\n\n def kl_regularized_policy_loss(self, obs, act, logp_old, advantage,\n cost_advantage, multiplier, *args,\n **kwargs):\n loss_pi, pi_info, pi = self.vanilla_policy_loss(\n obs, act, logp_old, advantage, cost_advantage, multiplier)\n with torch.no_grad():\n _, a_targ, _ = self.actor_forward(obs, deterministic=True)\n\n pi_adv, a_adv, _ = self.actor_forward(self.obs_adv, deterministic=True)\n kl_adv = ((a_targ.detach() - a_adv)**2).sum(axis=-1)\n kl_regularizer = torch.mean(kl_adv) * self.kl_coef\n\n loss_pi += kl_regularizer\n pi_info[\"LossKLAdv\"] = to_ndarray(kl_regularizer)\n return loss_pi, pi_info, pi\n\n def _update_obs_adv(self, obs):\n if \"kl\" in self.mode:\n self.obs_adv = self.get_obs_adv(self.adversary, obs)\n\n def _update_actor(self, data):\n '''\n Update the actor network\n '''\n obs, act, advantage, logp_old = data['obs'], data['act'], data[\n 'adv'], data['logp']\n\n # detach is very important here!\n # Otherwise the gradient will backprop through the multiplier.\n cost_ret = data[\"cost_ret\"]\n cost_advantage = data[\"cost_adv\"]\n ep_cost = data[\"ep_cost\"]\n\n policy_loss = self.mode_to_loss[self.mode]\n\n multiplier = self.controller.control(ep_cost).detach()\n\n self._update_obs_adv(obs)\n\n ep_cost_adv = ep_cost\n multiplier_adv = multiplier\n\n pi_l_old, pi_info_old, _ = policy_loss(obs, act, logp_old, advantage,\n cost_advantage, multiplier,\n multiplier_adv)\n\n # Train policy with multiple steps of gradient descent\n for i in range(self.train_actor_iters):\n self.actor_optimizer.zero_grad()\n # update adversarial obs for a fixed frequency\n if (i + 1) % self.update_adv_freq == 0:\n self._update_obs_adv(obs)\n\n loss_pi, pi_info, pi = policy_loss(obs, act, logp_old, advantage,\n cost_advantage, multiplier,\n multiplier_adv)\n if i == 0 and pi_info['KL'] >= 1e-7:\n print(\"**\" * 20)\n print(\"1st kl: \", pi_info['KL'])\n if pi_info['KL'] > 1.5 * self.target_kl:\n self.logger.log(\n 'Early stopping at step %d due to reaching max kl.' % i)\n break\n\n loss_pi.backward()\n clip_grad_norm_(self.actor.parameters(), 0.02)\n self.actor_optimizer.step()\n\n self.logger.store(StopIter=i,\n LossPi=to_ndarray(pi_l_old),\n ObservedCost=to_ndarray(ep_cost),\n CostLimit=self.cost_limit,\n ObservedCostAdv=to_ndarray(ep_cost_adv),\n Lagrangian=to_ndarray(multiplier),\n LagrangianAdv=to_ndarray(multiplier_adv),\n DeltaLossPi=(to_ndarray(loss_pi) -\n to_ndarray(pi_l_old)),\n QcThres=self.qc_thres,\n QcRet=torch.mean(data[\"cost_ret\"]).item(),\n **pi_info)\n\n def _update_critic(self, critic, obs, ret, critic_optimizer):\n '''\n Update the critic network\n '''\n obs, ret = to_tensor(obs), to_tensor(ret)\n\n def critic_loss():\n ret_pred = self.critic_forward(critic, obs)\n return ((ret_pred - ret)**2).mean()\n\n loss_old = critic_loss().item()\n\n # Value function learning\n for i in range(self.train_critic_iters):\n critic_optimizer.zero_grad()\n loss_critic = critic_loss()\n loss_critic.backward()\n critic_optimizer.step()\n\n return loss_old, to_ndarray(loss_critic) - loss_old\n\n def _update_critic2(self, data):\n '''\n Update the critic network\n '''\n def critic_loss():\n obs, act, reward, done = to_tensor(data['obs']), to_tensor(\n data['act']), to_tensor(data['rew']), to_tensor(data['done'])\n\n obs_next = to_tensor(data['obs2'])\n\n _, q_list = self.critic_forward2(self.critic2, obs, act)\n # Bellman backup for Q functions\n with torch.no_grad():\n # Target actions come from *current* policy\n pi_dist, act_next, _ = self.actor_forward(obs_next,\n deterministic=False)\n\n # Target Q-values\n q_pi_targ, _ = self.critic_forward2(self.critic2_targ,\n obs_next, act_next)\n backup = reward + self.gamma * (1 - done) * q_pi_targ\n # MSE loss against Bellman backup\n loss_q = self.critic2.loss(backup, q_list)\n # Useful info for logging\n q_info = dict()\n for i, q in enumerate(q_list):\n q_info[\"QVals\" + str(i)] = to_ndarray(q)\n return loss_q, q_info\n\n # First run one gradient descent step for Q1 and Q2\n self.critic2_optimizer.zero_grad()\n loss_critic, loss_q_info = critic_loss()\n loss_critic.backward()\n self.critic2_optimizer.step()\n\n # Log critic update info\n # Record things\n self.logger.store(LossQ=loss_critic.item(), **loss_q_info)\n\n def _update_qc2(self, data):\n '''\n Update the qc network\n '''\n def critic_loss():\n obs, act, reward, done = to_tensor(data['obs']), to_tensor(\n data['act']), to_tensor(data['cost']), to_tensor(data['done'])\n\n obs_next = to_tensor(data['obs2'])\n\n _, q_list = self.critic_forward2(self.qc2, obs, act)\n # Bellman backup for Q functions\n with torch.no_grad():\n # Target actions come from *current* policy\n pi_dist, act_next, _ = self.actor_forward(obs_next,\n deterministic=False)\n # Target Q-values\n q_pi_targ, _ = self.critic_forward2(self.qc2_targ, obs_next,\n act_next)\n backup = reward + self.gamma * (1 - done) * q_pi_targ\n # backup = reward + self.gamma * q_pi_targ\n # MSE loss against Bellman backup\n loss_q = self.qc2.loss(backup, q_list)\n # Useful info for logging\n q_info = dict()\n for i, q in enumerate(q_list):\n q_info[\"QCVals\" + str(i)] = to_ndarray(q)\n return loss_q, q_info\n\n # First run one gradient descent step for Q1 and Q2\n self.qc2_optimizer.zero_grad()\n loss_qc, loss_qc_info = critic_loss()\n loss_qc.backward()\n self.qc2_optimizer.step()\n\n # Log critic update info\n # Record things\n self.logger.store(LossQC=loss_qc.item(), **loss_qc_info)\n\n def save_model(self):\n actor, actor_optimizer = self.actor.state_dict(\n ), self.actor_optimizer.state_dict()\n critic, critic_optimizer = self.critic.state_dict(\n ), self.critic_optimizer.state_dict()\n critic2, critic2_optimizer = self.critic2.state_dict(\n ), self.critic2_optimizer.state_dict()\n qc2, qc2_optimizer = self.qc2.state_dict(\n ), self.qc2_optimizer.state_dict()\n model = {\n \"actor\": actor,\n \"actor_optimizer\": actor_optimizer,\n \"critic\": critic,\n \"critic_optimizer\": critic_optimizer,\n \"critic2\": critic2,\n \"critic2_optimizer\": critic2_optimizer,\n \"qc2\": qc2,\n \"qc2_optimizer\": qc2_optimizer\n }\n if self.safe_rl:\n qc, qc_optimizer = self.qc.state_dict(\n ), self.qc_optimizer.state_dict()\n model[\"qc\"] = qc\n model[\"qc_optimizer\"] = qc_optimizer\n self.logger.setup_pytorch_saver(model)\n\n def load_model(self, path):\n model = torch.load(path)\n assert type(model) is dict, \"The loaded model type can not be parsed.\"\n actor, actor_optimizer = model[\"actor\"], model[\"actor_optimizer\"]\n critic, critic_optimizer = model[\"critic\"], model[\"critic_optimizer\"]\n critic2, critic2_optimizer = model[\"critic2\"], model[\n \"critic2_optimizer\"]\n qc2, qc2_optimizer = model[\"qc2\"], model[\"qc2_optimizer\"]\n self._init_actor(actor, actor_optimizer)\n self._init_critic(critic, critic_optimizer)\n self._init_adversary_critic(critic2, critic2_optimizer, qc2,\n qc2_optimizer)\n if self.safe_rl:\n qc, qc_optimizer = model[\"qc\"], model[\"qc_optimizer\"]\n self._init_qc(qc, qc_optimizer)", "repo_name": "SteveZhangBit/STL-Robustness", "sub_path": "lib/robustness-of-safe-rl/rsrl/policy/robust_ppo.py", "file_name": "robust_ppo.py", "file_ext": "py", "file_size_in_byte": 26665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "rsrl.policy.onpolicy_base.OnPolicyBase", "line_number": 24, "usage_type": "name"}, {"api_name": "rsrl.policy.adversary.adv_random.AdvUniform", "line_number": 25, "usage_type": "name"}, {"api_name": "rsrl.policy.adversary.adv_random.AdvGaussian", "line_number": 26, "usage_type": "name"}, {"api_name": "rsrl.policy.adversary.adv_mad.AdvMadPPO", "line_number": 27, "usage_type": "name"}, {"api_name": "rsrl.policy.adversary.adv_amad.AdvAmadPPO", "line_number": 28, "usage_type": "name"}, {"api_name": "rsrl.policy.adversary.adv_critic.AdvCriticPPO", "line_number": 29, "usage_type": "name"}, {"api_name": "rsrl.policy.adversary.adv_critic.AdvCriticPPO", "line_number": 30, "usage_type": "name"}, {"api_name": "rsrl.policy.adversary.adv_critic.AdvCriticPPO", "line_number": 31, "usage_type": "name"}, {"api_name": "gym.Env", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rsrl.util.logger.EpochLogger", "line_number": 33, "usage_type": "name"}, {"api_name": "rsrl.policy.LagrangianPIDController", "line_number": 56, "usage_type": "call"}, {"api_name": "rsrl.policy.LagrangianPIDController", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 99, "usage_type": "call"}, {"api_name": "rsrl.policy.model.mlp_ac.EnsembleQCritic", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 167, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 167, "usage_type": "name"}, {"api_name": "rsrl.policy.model.mlp_ac.EnsembleQCritic", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "rsrl.util.torch_util.to_device", "line_number": 179, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 180, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_device", "line_number": 182, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 193, "usage_type": "call"}, {"api_name": "rsrl.policy.adversary.adv_base.Adv", "line_number": 201, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 241, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 299, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 325, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 325, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 326, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 333, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 334, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 338, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 369, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 384, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 387, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 387, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 390, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 397, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 408, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 409, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 413, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 421, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 421, "usage_type": "attribute"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 425, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 435, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 440, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 443, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 495, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 499, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 500, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 502, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 503, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 504, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 505, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 506, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 508, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 515, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 530, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 537, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 538, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 540, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 544, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 558, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 576, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 577, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_tensor", "line_number": 579, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 583, "usage_type": "call"}, {"api_name": "rsrl.util.torch_util.to_ndarray", "line_number": 597, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 637, "usage_type": "call"}]} +{"seq_id": "24472447469", "text": "import requests\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom django.core.management.base import BaseCommand\nfrom prices.src.timeseries_loader import TimeSeriesLoader\n\n\nURL = r\"https://app.energyquantified.com/static/data/ecb_reference_rates_90d.xml\"\nSERIES_NAME = \"EUR/USD\" # only one series here\n\n\nclass Command(TimeSeriesLoader, BaseCommand):\n help = \"Loads EUR/USD exchange rates from the European Central Bank.\"\n\n def _get_data(self):\n with requests.Session() as session:\n res = session.get(URL)\n\n return BeautifulSoup(res.content, \"xml\")\n\n def _parse_data(self, data):\n series, values = [], []\n root = data.find(\"Cube\")\n for timeseries in root.find_all(\"Cube\", recursive=False):\n dt = datetime.datetime.strptime(timeseries[\"time\"], \"%Y-%m-%d\").date()\n value = float(timeseries.find(\"Cube\", currency=\"USD\")[\"rate\"])\n values.append({\"series_name\": SERIES_NAME, \"dt\": dt, \"value\": value})\n\n return series, values\n\n def handle(self, *args, **options):\n _, values = self._parse_data(self._get_data())\n self._save_series([{\"name\": SERIES_NAME}])\n self._save_values(values)\n\n\n\n", "repo_name": "espensje/EQTask", "sub_path": "prices/management/commands/load_ecb_rates.py", "file_name": "load_ecb_rates.py", "file_ext": "py", "file_size_in_byte": 1204, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "prices.src.timeseries_loader.TimeSeriesLoader", "line_number": 12, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 12, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 16, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "32019331327", "text": "#%%\nimport pandas as pd\nimport numpy as np\nimport yfinance\nfrom mplfinance.original_flavor import candlestick_ohlc\nimport matplotlib.dates as mpl_dates\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = [12, 7]\nplt.rc('font', size=14)\n\nname = 'SPY'\nticker = yfinance.Ticker(name)\ndf = ticker.history(interval=\"1d\",start=\"2020-03-15\", end=\"2020-07-15\")\ndf['Date'] = pd.to_datetime(df.index)\ndf['Date'] = df['Date'].apply(mpl_dates.date2num)\ndf = df.loc[:,['Date', 'Open', 'High', 'Low', 'Close']]\n\ndef isSupport(df,i):\n support = df['Low'][i] < df['Low'][i-1] and df['Low'][i] < df['Low'][i+1] and df['Low'][i+1] < df['Low'][i+2] and df['Low'][i-1] < df['Low'][i-2]\n return support\ndef isResistance(df,i):\n resistance = df['High'][i] > df['High'][i-1] and df['High'][i] > df['High'][i+1] and df['High'][i+1] > df['High'][i+2] and df['High'][i-1] > df['High'][i-2]\n return resistance\n\nlevels = []\nfor i in range(2,df.shape[0]-2):\n if isSupport(df,i):\n levels.append((i,df['Low'][i]))\n elif isResistance(df,i):\n levels.append((i,df['High'][i]))\n\n\n\ns = np.mean(df['High'] - df['Low'])\n\ndef isFarFromLevel(l):\n return np.sum([abs(l-x) < s for x in levels]) == 0\n\nlevels = []\nfor i in range(2,df.shape[0]-2):\n if isSupport(df,i):\n l = df['Low'][i]\n if isFarFromLevel(l):\n levels.append((i,l))\n elif isResistance(df,i):\n l = df['High'][i]\n if isFarFromLevel(l):\n levels.append((i,l))\ndef plot_all():\n fig, ax = plt.subplots()\n candlestick_ohlc(ax,df.values,width=0.6, \\\n colorup='green', colordown='red', alpha=0.8)\n date_format = mpl_dates.DateFormatter('%d %b %Y')\n ax.xaxis.set_major_formatter(date_format)\n fig.autofmt_xdate()\n fig.tight_layout()\n for level in levels:\n plt.hlines(level[1],xmin=df['Date'][level[0]],\\\n xmax=max(df['Date']),colors='blue')\n fig.show()\n\nplot_all()", "repo_name": "Kanav-Arora/Stock-Visuals", "sub_path": "files seperated/support_resistance.py", "file_name": "support_resistance.py", "file_ext": "py", "file_size_in_byte": 1874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "yfinance.Ticker", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.dates.date2num", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.dates", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "mplfinance.original_flavor.candlestick_ohlc", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "2134741269", "text": "import json\nfrom urllib.request import Request, urlopen\n\nfrom config import GITHUB_TOKEN\n\n\n# Parses repos and checks that they exist.\nclass ParseRepos:\n def __init__(self, user, repo):\n self.user_name = user\n self.repo = repo\n\n self.success = self._parse_repos()\n\n # Checks that repo exists and returns students githubs.\n def _parse_repos(self):\n if 'https://github.com/' not in self.repo['repo']:\n self.repo['repo'] = 'https://github.com/' + self.user_name + '/' + self.repo['repo']\n if self.repo['repo'].find('github.com') != -1:\n if '/pull/' in self.repo['repo']:\n api_link = self.repo['repo'].replace('github.com', 'api.github.com/repos')\n api_link = api_link.replace('/pull/', '/pulls/')\n commits_str = api_link.find('/commits/')\n\n if commits_str != -1:\n api_link = api_link[:commits_str]\n\n try:\n request = Request(api_link)\n request.add_header('Authorization', 'token %s' % GITHUB_TOKEN)\n with urlopen(request) as url:\n data = json.loads(url.read().decode())\n\n self.repo['repo'] = data['head']['repo']['html_url']\n self.repo['branch'] = data['head']['ref']\n except:\n print('Could not find ', self.user_name, '\\'s repo')\n return False\n return self.is_success()\n else:\n return False\n\n def is_success(self):\n return self.repo\n", "repo_name": "igorlyatskiy/CloneChecker", "sub_path": "src/parseRepos.py", "file_name": "parseRepos.py", "file_ext": "py", "file_size_in_byte": 1607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "urllib.request.Request", "line_number": 29, "usage_type": "call"}, {"api_name": "config.GITHUB_TOKEN", "line_number": 30, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 31, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "7650088619", "text": "import flet as ft\nimport boto3\nimport subprocess\n\n\n\n\ndef main(page: ft.Page):\n page.title = \"Basic text buttons\"\n page.bgcolor = 'WHITE'\n def cli_command(e):\n command = \"aws s3 ls\"\n output = subprocess.check_output(command, shell=True).decode().strip()\n print(output)\n\n def start_ec2_instance(instance_id):\n ec2_client = boto3.client('ec2')\n ec2 = boto3.resource('ec2')\n instance_name = inst_name.value\n instances = ec2.instances.filter(Filters=[{'Name': 'tag:Name', 'Values': [instance_name]}])\n for instance in instances:\n instance.stop()\n msg = f\"Starting EC2 instance with name: {instance_name} (Instance ID: {instance.id})\"\n break\n else:\n msg = f\"No EC2 instances found with name: {instance_name}\"\n cnt1.value = msg\n page.snack_bar = ft.SnackBar(ft.Text(\"EC2 instances found\"), duration=1000)\n page.snack_bar.open = True\n page.update()\n \n def erase(e):\n cnt1.value = \"\"\n page.update()\n\n \n \n inst_name = ft.TextField(label=\"EC2 ID\")\n btn1 = ft.TextButton(text=\"AWS CLI\", on_click=start_ec2_instance)\n btn2 = ft.OutlinedButton(text=\"EMPTY\", on_click=erase)\n img = ft.Image(\n src=f\"https://personal-golight-image-bucket.s3.ap-northeast-2.amazonaws.com/sample/000174400032_29.jpg\",\n width=100,\n height=100,\n fit=ft.ImageFit.CONTAIN,\n )\n cnt1 = ft.TextField(width=1000, height=500, multiline=True)\n\n \n page.add(\n inst_name,\n btn1,\n cnt1,\n btn2\n )\n\n\nft.app(port = 3456\n , target=main\n , view=ft.WEB_BROWSER\n )", "repo_name": "Hyunsoo-Ryan-Lee/AWS-Training", "sub_path": "Flet/others/awscli.py", "file_name": "awscli.py", "file_ext": "py", "file_size_in_byte": 1706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flet.Page", "line_number": 8, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 13, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 17, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 18, "usage_type": "call"}, {"api_name": "flet.SnackBar", "line_number": 28, "usage_type": "call"}, {"api_name": "flet.Text", "line_number": 28, "usage_type": "call"}, {"api_name": "flet.TextField", "line_number": 38, "usage_type": "call"}, {"api_name": "flet.TextButton", "line_number": 39, "usage_type": "call"}, {"api_name": "flet.OutlinedButton", "line_number": 40, "usage_type": "call"}, {"api_name": "flet.Image", "line_number": 41, "usage_type": "call"}, {"api_name": "flet.ImageFit", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flet.TextField", "line_number": 47, "usage_type": "call"}, {"api_name": "flet.app", "line_number": 58, "usage_type": "call"}, {"api_name": "flet.WEB_BROWSER", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "41222388914", "text": "import cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\ndef selected_range(hsv,a,b):\r\n lower_green = np.array(a)\r\n upper_green = np.array(b)\r\n mask = cv2.inRange(hsv, lower_green, upper_green)\r\n kernel = np.ones((5, 5))\r\n grad = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\n cnts = cv2.findContours(grad.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\r\n return cnts\r\n\r\nwhile 1:\r\n ret, frame = cap.read()\r\n\r\n\r\n hsv1 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n cnts1=selected_range(hsv1,[29,86,6],[100,255,255])\r\n \r\n cnts2=selected_range(hsv1,[0,100,100],[5,255,255])\r\n\r\n \r\n \r\n \r\n if cnts1 != []: \r\n cmax = max(cnts1, key=cv2.contourArea)\r\n x, y, w, h = cv2.boundingRect(cmax)\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 3)\r\n a = len(cmax)\r\n sum1 = [[0, 0]]\r\n for i in range(a):\r\n sum1 += cmax[i]\r\n\r\n avg = sum1 / a\r\n b1 = int(avg[0][0])\r\n c1 = int((avg[0][1]))\r\n cv2.circle(frame, (b1, c1), 3, (0, 0, 255), 2)\r\n \r\n r1=(b1-240)*(b1-240) + (c1-320)*(c1-320)\r\n\r\n if cnts2 != []: \r\n cmax = max(cnts2, key=cv2.contourArea)\r\n x, y, w, h = cv2.boundingRect(cmax)\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 3)\r\n a = len(cmax)\r\n sum1 = [[0, 0]]\r\n for i in range(a):\r\n sum1 += cmax[i]\r\n\r\n avg = sum1 / a\r\n b2 = int(avg[0][0])\r\n c2 = int((avg[0][1]))\r\n cv2.circle(frame, (b2, c2), 3, (0, 0, 255), 2)\r\n \r\n r2=(b2-240)*(b2-240) + (c2-320)*(c2-320)\r\n\r\n cv2.imshow('frame', frame)\r\n \r\n \r\n if cv2.waitKey(1)==27:\r\n break\r\n\r\ncv2.destroyAllWindows()\r\n\r\n\r\n \r\n", "repo_name": "utkarsh153/envisage-iitm", "sub_path": "color_test.py", "file_name": "color_test.py", "file_ext": "py", "file_size_in_byte": 1913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "22639204482", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 26 22:10:43 2020\n\n@author: himanshu teotia\n\"\"\"\n\n\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndocumentA = 'the man went out for a walk'\ndocumentB = 'the children sat around the fire'\n\n\n\n\"\"\"\nMachine learning algorithms cannot work with raw text directly. Rather, the text must be converted into vectors of numbers. \nIn natural language processing, a common technique for extracting features from text is to place all of the words that occur \nin the text in a bucket. This aproach is called a bag of words model or BoW for short. It’s referred to as a “bag” of words \nbecause any information about the structure of the sentence is lost.\n\n\"\"\"\nbagOfWordsA = documentA.split(' ')\nbagOfWordsB = documentB.split(' ')\n\n\"\"\"\nBy casting the bag of words to a set, we can automatically remove any duplicate words.\n\n\"\"\"\n\nuniqueWords = set(bagOfWordsA).union(set(bagOfWordsB))\n\n\n\"\"\"\nNext, we’ll create a dictionary of words and their occurence for each document in the corpus (collection of documents).\n\n\"\"\"\n\nnumOfWordsA = dict.fromkeys(uniqueWords, 0)\nfor word in bagOfWordsA:\n numOfWordsA[word] += 1\nnumOfWordsB = dict.fromkeys(uniqueWords, 0)\nfor word in bagOfWordsB:\n numOfWordsB[word] += 1\n \n \n\"\"\"\nAnother problem with the bag of words approach is that it doesn’t account for noise. In other words, certain words are used \nto formulate sentences but do not add any semantic meaning to the text. For example, the most commonly used word in the \nenglish language is the which represents 7% of all words written or spoken. You couldn’t make deduce anything about a text \ngiven the fact that it contains the word the. On the other hand, words like good and awesome could be used to determine \nwhether a rating was positive or not.\n\nIn natural language processing, useless words are referred to as stop words. The python natural language toolkit library\n provides a list of english stop words.\n\n\"\"\"\n\n\nfrom nltk.corpus import stopwords\nstopwords.words('english')\n\n\n\"\"\"\nOften times, when building a model with the goal of understanding text, you’ll see all of stop words being removed. \n\nAnother strategy is to score the relative importance of words using TF-IDF.\n\n\"\"\"\n\n\n\"\"\"\nTerm Frequency (TF)\nThe number of times a word appears in a document divded by the total number of words in the document. \nEvery document has its own term frequency\n\n\"\"\"\n\n\ndef computeTF(wordDict, bagOfWords):\n tfDict = {}\n bagOfWordsCount = len(bagOfWords)\n for word, count in wordDict.items():\n tfDict[word] = count / float(bagOfWordsCount)\n return tfDict\n\n#The following lines compute the term frequency for each of our documents.\ntfA = computeTF(numOfWordsA, bagOfWordsA)\ntfB = computeTF(numOfWordsB, bagOfWordsB)\n\n\n#The following code implements inverse data frequency in python.\n\n\"\"\"\nInverse Data Frequency (IDF)\n\nThe log of the number of documents divided by the number of documents that contain the word w. \nInverse data frequency determines the weight of rare words across all documents in the corpus.\n\n\"\"\"\n\ndef computeIDF(documents):\n import math\n N = len(documents)\n \n idfDict = dict.fromkeys(documents[0].keys(), 0)\n for document in documents:\n for word, val in document.items():\n if val > 0:\n idfDict[word] += 1\n \n for word, val in idfDict.items():\n idfDict[word] = math.log(N / float(val))\n return idfDict\n\n\n\n#The IDF is computed once for all documents.\n\nidfs = computeIDF([numOfWordsA, numOfWordsB])\n\n\n\"\"\"\n\nLastly, the TF-IDF is simply the TF multiplied by IDF.\n\n\"\"\"\n\ndef computeTFIDF(tfBagOfWords, idfs):\n tfidf = {}\n for word, val in tfBagOfWords.items():\n tfidf[word] = val * idfs[word]\n return tfidf\n\n\n#Finally, we can compute the TF-IDF scores for all the words in the corpus.\ntfidfA = computeTFIDF(tfA, idfs)\ntfidfB = computeTFIDF(tfB, idfs)\ndf = pd.DataFrame([tfidfA, tfidfB])\n\n\n\n\"\"\"\nRather than manually implementing TF-IDF ourselves, we could use the class provided by sklearn.\n\n\"\"\"\n\nvectorizer = TfidfVectorizer()\nvectors = vectorizer.fit_transform([documentA, documentB])\nfeature_names = vectorizer.get_feature_names()\ndense = vectors.todense()\ndenselist = dense.tolist()\ndf = pd.DataFrame(denselist, columns=feature_names)\n\n\n# reference doc https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76\n\n", "repo_name": "himanshuteotia/tensorflow_2.0-text-classification", "sub_path": "tfidf.py", "file_name": "tfidf.py", "file_ext": "py", "file_size_in_byte": 4464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 64, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 64, "usage_type": "name"}, {"api_name": "math.log", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 142, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "27718123631", "text": "from __future__ import absolute_import, print_function, unicode_literals\nfrom email.utils import parseaddr\nfrom zope.cachedescriptors.property import Lazy\nfrom gs.group.list.command import (CommandABC, CommandResult)\nfrom Products.CustomUserFolder.interfaces import IGSUserInfo\nfrom .queries import SkipQuery\n\n\nclass StatusCommand(CommandABC):\n def __init__(self, supportGroup):\n self.group = self.supportGroup = supportGroup\n\n @Lazy\n def skipQuery(self):\n retval = SkipQuery()\n return retval\n\n def process(self, email, request):\n components = self.get_command_components(email)\n if components[0] != 'summary':\n m = 'Not a summary command: {0}'.format(email['Subject'])\n raise ValueError(m)\n\n retval = CommandResult.notACommand\n if (len(components) == 2):\n userInfo = self.get_user(email)\n if userInfo and (components[1] == 'on'):\n self.skipQuery.remove_skip(userInfo.id)\n retval = CommandResult.commandStop\n elif userInfo and (components[1] == 'off'):\n self.skipQuery.add_skip(userInfo.id)\n retval = CommandResult.commandStop\n elif not(userInfo):\n retval = CommandResult.commandContinue\n assert retval\n return retval\n\n @staticmethod\n def get_email_addr(emailMessage):\n retval = parseaddr(emailMessage['From'])[1]\n return retval\n\n def get_user(self, email):\n retval = None\n sr = self.group.site_root()\n addr = self.get_email_addr(email)\n user = sr.acl_users.get_userByEmail(addr)\n if user:\n retval = IGSUserInfo(user)\n return retval\n", "repo_name": "groupserver/gs.profile.status.change", "sub_path": "gs/profile/status/change/command.py", "file_name": "command.py", "file_ext": "py", "file_size_in_byte": 1725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "gs.group.list.command.CommandABC", "line_number": 9, "usage_type": "name"}, {"api_name": "queries.SkipQuery", "line_number": 15, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.Lazy", "line_number": 13, "usage_type": "name"}, {"api_name": "email.utils", "line_number": 19, "usage_type": "argument"}, {"api_name": "email.utils", "line_number": 21, "usage_type": "name"}, {"api_name": "gs.group.list.command.CommandResult.notACommand", "line_number": 24, "usage_type": "attribute"}, {"api_name": "gs.group.list.command.CommandResult", "line_number": 24, "usage_type": "name"}, {"api_name": "email.utils", "line_number": 26, "usage_type": "argument"}, {"api_name": "gs.group.list.command.CommandResult.commandStop", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gs.group.list.command.CommandResult", "line_number": 29, "usage_type": "name"}, {"api_name": "gs.group.list.command.CommandResult.commandStop", "line_number": 32, "usage_type": "attribute"}, {"api_name": "gs.group.list.command.CommandResult", "line_number": 32, "usage_type": "name"}, {"api_name": "gs.group.list.command.CommandResult.commandContinue", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gs.group.list.command.CommandResult", "line_number": 34, "usage_type": "name"}, {"api_name": "email.utils.parseaddr", "line_number": 40, "usage_type": "call"}, {"api_name": "email.utils", "line_number": 46, "usage_type": "argument"}, {"api_name": "Products.CustomUserFolder.interfaces.IGSUserInfo", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "3764442803", "text": "#!/usr/bin/python\n# + encoding: iso-8859-2 +\nimport MySQLdb,string,re\nimport locale\nimport ConfigParser\n\nhost=\"monitor\"\nrulesfhead=\"/tmp/offline\"\nconfig=ConfigParser.ConfigParser()\nconfig.read(rulesfhead)\n\nplikhosts=\"/etc/nagios/hosts.cfg.lms\"\nhosts=open(plikhosts,'w')\n\nplikservices=\"/etc/nagios/services.cfg.lms\"\nservices=open(plikservices,'w')\n\nplikhostgroups=\"/etc/nagios/hostgroups.cfg.lms\"\nhostgroups=open(plikhostgroups,'w')\n\nlocale.setlocale(locale.LC_ALL, 'pl_PL')\n\nlmsy=['lms.db']\n\nfor lms in lmsy:\n\tdb=MySQLdb.connect(host=\"lms.host\",user=\"lms.user\",passwd=\"lms.pass\",db=lms)\n\tc=db.cursor()\n\tc.execute(\"\"\"SELECT name,inet_ntoa(address),mask FROM networks WHERE inet_ntoa(address) NOT REGEXP '82.160' AND inet_ntoa(address) NOT REGEXP '192.168.8[2345678]' AND name NOT REGEXP 'ADDR-' AND name NOT REGEXP 'PRIV'\"\"\") \n\td=c.fetchall()\n\t\n\tfor net in d:\n\t\thostgroupswrite=\"\"\"define hostgroup {\n\thostgroup_name\t%s\n\talias\t\t%s\n\tmembers\t\t\"\"\"%(net[0],net[0])\n\t\thostgroups.write(hostgroupswrite)\n\t\tdlugoscsieci=re.split('\\.',net[2],4)\n\t\tdlugoscsieci=256-int(dlugoscsieci[3])\n\t\tc.execute(\"\"\"SELECT inet_aton(%s)\"\"\",net[1])\n\t\tstart=int(c.fetchone()[0])\n\t\tkoniec=start+dlugoscsieci\n\t\t#klasa=str(net[1])\n\t\tc.execute(\"\"\"SELECT name,inet_ntoa(ipaddr) FROM vnodes WHERE ipaddr BETWEEN %i AND %i AND (name LIKE 'BTS%%' OR name LIKE 'RTR%%') ORDER BY ipaddr\"\"\"%(start,koniec))\n\t\tz=c.fetchall()\n\t\tfor j in z:\n\t\t\thostswrite=\"\"\"define host {\n\tuse\t\t\tgeneric-host\n\thost_name\t\t%s\n\talias\t\t\t%s\n\taddress\t\t\t%s\n\tcheck_command\t\tcheck-host-alive\n max_check_attempts\t20\n\tnotification_interval\t600\n\tnotification_period\t24x7\n\tnotification_options\td,u,r\n\tcontact_groups\t\tadmins\n}\n\n\"\"\" %(string.lower(j[0]),j[0],j[1])\n\t\t\thosts.write(hostswrite)\n\t\t\tserviceswrite=\"\"\"define service {\n\tuse\t\t\tgeneric-service\n host_name\t\t%s\n\tservice_description\tPING\n\tis_volatile\t\t0\n\tactive_checks_enabled\t1\n\tpassive_checks_enabled\t1\n\tnotifications_enabled\t1\n\tcheck_period\t\t24x7\n\tmax_check_attempts\t20\n\tnormal_check_interval\t5\n\tretry_check_interval\t1\n\tcontact_groups\t\tadmins\n\tnotification_interval\t600\n\tnotification_period\t24x7\n\tnotification_options\tc,r\n\tcheck_command\t\tcheck_ping!100.0,20%%!500.0,60%%\n}\n\n\"\"\" %string.lower(j[0])\n\t\t\tservices.write(serviceswrite)\n\t\t\thostgroupswrite=\"%s,\" %string.lower(j[0])\n\t\t\thostgroups.write(hostgroupswrite)\n\t\thostgroupswrite=\"\"\"gw-%s\\n}\\n\\n\"\"\"%string.lower(net[0])\n\t\thostgroups.write(hostgroupswrite)\n\t\tgwname=int(start)+1\n\t\tc.execute(\"\"\"SELECT inet_ntoa(%s)\"\"\",gwname)\n\t\tgwip=c.fetchone()[0]\n\t\thostswrite=\"\"\"define host {\n\tuse\t\t\tgeneric-host\n\thost_name\t\tgw-%s\n\talias\t\t\tgw-%s\n\taddress\t\t\t%s\n\tcheck_command\t\tcheck-host-alive\n\tmax_check_attempts\t10\n\tnotification_interval\t600\n\tnotification_period\t24x7\n\tnotification_options\td,u,r\n\tcontact_groups\t\tadmins\n}\n\"\"\" %(string.lower(net[0]),string.lower(net[0]),gwip)\n\t\thosts.write(hostswrite)\n\t\tserviceswrite=\"\"\"define service {\n use\t\t\tgeneric-service\n\thost_name gw-%s\n\tservice_description PING\n\tis_volatile 0\n\tactive_checks_enabled 1\n\tpassive_checks_enabled 1\n\tnotifications_enabled 1\n\tcheck_period 24x7\n\tmax_check_attempts 10\n\tnormal_check_interval 5\n\tretry_check_interval 1\n\tcontact_groups admins\n\tnotification_interval 600\n\tnotification_period 24x7\n\tnotification_options c,r\n\tcheck_command check_ping!100.0,20%%!500.0,60%%\n}\n\"\"\"%string.lower(net[0])\n\t\tservices.write(serviceswrite)\n\nhosts.close()\nservices.close()\nhostgroups.close()\n\ndb=MySQLdb.connect(host=\"lms.host\",user=\"lms.user\",passwd=\"lms.pass\",db=\"lms.db\")\nc=db.cursor()\ncfg=config.get(\"init\",\"timestamp\")\nselect=\"\"\"UPDATE reload SET %s=%s WHERE data=%s\"\"\"%(host,cfg,cfg)\nc.execute(select)\n\n", "repo_name": "chilek/lms", "sub_path": "contrib/LMS2Nagios/nagios-v6-gen.py", "file_name": "nagios-v6-gen.py", "file_ext": "py", "file_size_in_byte": 3688, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 115, "dataset": "github-code", "pt": "21", "api": [{"api_name": "ConfigParser.ConfigParser", "line_number": 9, "usage_type": "call"}, {"api_name": "locale.setlocale", "line_number": 21, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "MySQLdb.connect", "line_number": 26, "usage_type": "call"}, {"api_name": "re.split", "line_number": 37, "usage_type": "call"}, {"api_name": "string.lower", "line_number": 59, "usage_type": "call"}, {"api_name": "string.lower", "line_number": 80, "usage_type": "call"}, {"api_name": "string.lower", "line_number": 82, "usage_type": "call"}, {"api_name": "string.lower", "line_number": 84, "usage_type": "call"}, {"api_name": "string.lower", "line_number": 101, "usage_type": "call"}, {"api_name": "string.lower", "line_number": 121, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "43792342199", "text": "import subprocess\nimport sys\nimport os\nimport yaml\nimport time\n\ndef init_check():\n\tif not sys.version.startswith(\"3\"):\n\t\tsys.exit(\"Use Python 3 Try run python3 arfedora-tweak.py\")\ninit_check()\n\ndef get_distro_name():\n\tresult=\"\"\n\tif not os.path.isfile(\"/etc/os-release\"):\n\t\treturn None\n\twith open(\"/etc/os-release\",\"r\") as myfile:\n\t\tfor l in myfile.readlines():\n\t\t\tif l.startswith(\"ID\"):\n\t\t\t\tresult=l.split(\"=\")[1].strip()\n\tif result.startswith(\"\\\"\") and result.endswith(\"\\\"\"):\n\t\treturn result[1:-1]\n\telif result.startswith(\"\\'\") and result.endswith(\"\\'\"):\n\t\treturn result[1:-1]\n\treturn result\n\ndistro_name = get_distro_name()\nhome=os.getenv(\"HOME\")\ndirname=os.path.abspath(os.path.dirname(__file__))\nplugins_location = dirname+\"/Plugins\"\nos.makedirs(plugins_location,exist_ok=True)\n\nif os.getuid() == 0:\n\tuser_id = \"root\"\nelse:\n\tuser_id = \"user\"\n\nif os.uname().machine == \"x86_64\":\n\tarch = \"64bit\"\nelse:\n\tarch = \"32bit\"\n\t\nprograms = {}\nfinally_programs = {}\nspeed = 1\n\ndef to_check(files):\n\tfor f in files:\n\t\tif not os.path.exists( os.path.expanduser(f)):\n\t\t\treturn \"(Install)\"\n\n\treturn \"(Installed)\"\n\n\n\n\ndef read_all_plugins():\n\tfor f in os.listdir(plugins_location):\n\t\tplugin_location = plugins_location+\"/{}\".format(f)\n\t\tif f.endswith(\".arfedora\") and os.path.isfile(plugin_location):\n\t\t\tplugin = yaml.load(open(plugin_location))['arfedora']\n\t\t\tprograms.update(plugin)\nread_all_plugins()\t\n\n\n\ndef f_p():\n\tcount = 1\n\tglobal finally_programs\n\tglobal distor_name\n\tfor k,v in programs.items():\n\t\tif v[3] == \"all\":\n\t\t\tv[3] = arch\n\t\tif v[4] == \"all\":\n\t\t\tv[4] = user_id\n\t\tif \"all\" in v[5]:\n\t\t\tv[5]=[distro_name]\n\t\t\t\n\t\tif v[3] == arch and v[4] == user_id and distro_name in v[5]:\n\t\t\tfinally_programs[str(count)]=[v[0] , v[1] , to_check(v[2])]\n\t\t\tcount+=1\nf_p()\n\ndef reload_(msg=\"\"):\n\tglobal programs\n\tglobal finally_programs\n\tprograms.clear()\n\tfinally_programs.clear()\n\tread_all_plugins()\n\tf_p()\n\tif len(msg) == 0:\n\t\treturn main()\n\telse:\n\t\treturn main(msg)\n\t\ndef y_o_n(m):\n\twhile True:\n\t\tsubprocess.call(\"clear\")\n\t\tprint()\n\t\tprint (m)\n\t\tprint(\"\\nY To Continue || N To Back || Q To Quit : \\n-\",end=\"\")\n\t\ty_n=input().strip()\n\t\tif y_n==\"Y\" or y_n==\"y\":\n\t\t\tbreak\n\t\telif y_n==\"N\" or y_n==\"n\":\n\t\t\treturn main()\n\t\telif y_n==\"q\" or y_n==\"Q\":\n\t\t\tsys.exit(\"\\nBye...\\n\")\n\n\ndef main(msg=\"\"):\n\twhile True:\n\t\tsubprocess.call(\"clear\")\n\t\tprint (\"Choice Task || q To Quit || r To Reload Plugins.\\n\")\n\t\tfor number in range(len(finally_programs.items())):\n\t\t\tprint ( \"{}-{} {}.\\n\".format(str(number+1),finally_programs[str(number+1)][0].title(),finally_programs[str(number+1)][2]) )\n\t\t\t\n\n\t\t\n\t\tif len(msg) != 0:\n\t\t\tprint (msg)\n\t\tmsg=\"\"\n\t\tanswer=input(\"-\").strip()\n\t\tif answer == \"q\" or answer == \"Q\":\n\t\t\tsys.exit(\"\\nbye...\")\n\t\telif answer == \"r\" or answer == \"R\":\n\t\t\treturn reload_()\n\t\t\t\n\t\telif answer in finally_programs.keys():\n\t\t\tprogram = finally_programs[answer]\n\t\t\tif program[2] != \"(Installed)\":\n\t\t\t\ty_o_n(program[0])\n\t\t\t\tfor command in program[1]:\n\t\t\t\t\tcheck = subprocess.call(command,shell=True)\n\t\t\t\t\tif check != 0:\n\t\t\t\t\t\treturn main(\"\\nTask ( {} ) Fail.\".format(program[0]))\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\tif check == 0:\n\t\t\t\t\treturn reload_(\"\\nTask ( {} ) Sucess.\".format(program[0]))\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\treturn main(\"\\nNothing To Do.\\n\".format(program[0]))\nif __name__ == \"__main__\":\n\tmain()\n", "repo_name": "matrix-1996/arfedora-tweak", "sub_path": "arfedora-tweak.py", "file_name": "arfedora-tweak.py", "file_ext": "py", "file_size_in_byte": 3268, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.version.startswith", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "os.getuid", "line_number": 32, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 48, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 107, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 112, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 124, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 133, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "24580523603", "text": "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\n\nfrom playground.nlp.framework.torch.layers.embedding import PositionalEncoding\n\n\n@dataclass\nclass TransformerConfig:\n vocab_size: int\n n_model: int\n n_layer: int\n n_head: int\n dropout: float\n dim_out: int\n\n\nclass Transformers(nn.Module):\n\n def __init__(self,\n config: TransformerConfig,\n embedding_matrix):\n super(Transformers, self).__init__()\n\n c = config\n self.c = c\n\n self.embedding = nn.Embedding(\n c.vocab_size,\n c.n_model,\n _weight=torch.tensor(embedding_matrix, dtype=torch.float)\n )\n\n self.embedding.weight.requires_grad = False\n self.pos_embedding = PositionalEncoding(c.n_model, c.dropout, c.n_model)\n\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(c.n_model, c.n_head)\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, c.n_layer)\n\n self.fc = nn.Linear(c.n_model, c.dim_out)\n\n def forward(self, x):\n\n embedding = self.embedding(x)\n embedding = self.pos_embedding(embedding)\n\n encode = self.transformer_encoder(embedding)\n\n encode = torch.mean(encode, dim=1)\n\n logits = self.fc(encode)\n return logits\n", "repo_name": "charliemorning/mlws", "sub_path": "nlp/diagrams/neruel_network/classical/transformer.py", "file_name": "transformer.py", "file_ext": "py", "file_size_in_byte": 1329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dataclasses.dataclass", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 32, "usage_type": "attribute"}, {"api_name": "playground.nlp.framework.torch.layers.embedding.PositionalEncoding", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "27545336689", "text": "#! /usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport os\nimport argparse\ntry:\n from .help import g_help, error_and_exit, jy_input\nexcept ValueError:\n from help import g_help, error_and_exit, jy_input\nfrom jingyun_cli.jingd import request_jingd\n\n\ndef request_sample(method, url, data):\n return request_jingd(\"sample\", method, url, data)\n\n\ndef req_process():\n url = \"/sample/process/\"\n r_data = request_sample(\"GET\", url, None)\n all_process = r_data[\"data\"]\n return all_process\n\n\ndef req_process_detail(process_no):\n url = \"/sample/process/detail/\"\n r_data = request_sample(\"POST\", url, dict(process_no=process_no))\n params = r_data[\"data\"][\"params\"]\n return params\n\n\ndef req_sample_info(sample_no):\n url = \"/sample/info/\"\n r_data = request_sample(\"GET\", url, dict(sample_no=sample_no))\n seq_files = r_data[\"data\"][\"seq_files\"]\n return seq_files\n\n\ndef req_sample_right(sample_no):\n url = \"/sample/right/\"\n r_data = request_sample(\"GET\", url, dict(sample_no=sample_no))\n rights = r_data[\"data\"]\n return rights\n\n\ndef req_analysis(account, sample_no, seq_files, bucket, process_no):\n url = \"/sample/analysis/v2/\"\n data = dict(sample_no=sample_no, seq_files=seq_files, bucket=bucket, process_no=process_no, account=account)\n print(data)\n confirm = jy_input(\"Confirm?\").lower()\n if confirm in [\"y\", \"yes\"]:\n r_data = request_sample(\"POST\", url, data)\n\n\ndef re_run(sample_no, account=None):\n seq_files = req_sample_info(sample_no)\n if seq_files is None:\n error_and_exit(\"Not Found seq_files\")\n files = seq_files.split(\",\")\n if account is None:\n rights = req_sample_right(sample_no)\n for item in rights:\n if item[\"role\"] == 0:\n account = item[\"account\"]\n if account is None:\n error_and_exit(\"Auto Find Account Fail, Please Set Account\")\n process_no = -1\n all_process = req_process()\n for p in all_process:\n if p[\"process_name\"] == files[0]:\n process_no = p[\"process_no\"]\n if process_no == -1:\n error_and_exit(\"Not Found Process Name Is %s\" % files[0])\n params = req_process_detail(process_no)\n r_seq_files = dict()\n for p in params:\n r_seq_files[p[\"param_name\"]] = \"\"\n keys = r_seq_files.keys()\n if len(keys) != len(files) - 1:\n print(keys)\n print(files)\n error_and_exit(\"Please Check input file\")\n bucket = None\n for i in range(len(keys)):\n bucket, file_path = files[i + 1].split(\":\", 1)\n r_seq_files[keys[i]] = file_path\n req_analysis(account, sample_no, r_seq_files, bucket, process_no)\n\n\ndef cli_main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-a\", \"--account\", dest=\"account\", help=g_help(\"user\"))\n parser.add_argument(\"sample_no\", help=g_help(\"sample_no\"))\n if len(sys.argv) <= 1:\n sys.argv.append(\"-h\")\n\n args = parser.parse_args()\n sample_no = int(args.sample_no)\n re_run(sample_no, args.account)\n\n\nif __name__ == \"__main__\":\n sys.argv.extend([\"205\"])\n cli_main()\n", "repo_name": "meisanggou/jingyun", "sub_path": "jingyun_cli/jingd/sample.py", "file_name": "sample.py", "file_ext": "py", "file_size_in_byte": 3070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "jingyun_cli.jingd.request_jingd", "line_number": 15, "usage_type": "call"}, {"api_name": "help.jy_input", "line_number": 50, "usage_type": "call"}, {"api_name": "help.error_and_exit", "line_number": 58, "usage_type": "call"}, {"api_name": "help.error_and_exit", "line_number": 66, "usage_type": "call"}, {"api_name": "help.error_and_exit", "line_number": 73, "usage_type": "call"}, {"api_name": "help.error_and_exit", "line_number": 82, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 91, "usage_type": "call"}, {"api_name": "help.g_help", "line_number": 92, "usage_type": "call"}, {"api_name": "help.g_help", "line_number": 93, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.argv.append", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 95, "usage_type": "attribute"}, {"api_name": "sys.argv.extend", "line_number": 103, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 103, "usage_type": "attribute"}]} +{"seq_id": "30569978246", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport sys\nimport time\nfrom itertools import groupby\nfrom operator import itemgetter\n\nimport pyhocon\nimport six\nimport yaml\nfrom colors import color\nfrom jsonschema import validate, ValidationError as JSONSchemaValidationError\nfrom jsonschema.validators import validator_for\nfrom pathlib import Path\nfrom pyparsing import ParseBaseException\n\nLINTER_URL = \"https://www.jsonschemavalidator.net/\"\n\n\nclass LocalStorage(object):\n def __init__(self, driver):\n self.driver = driver\n\n def __len__(self):\n return self.driver.execute_script(\"return window.localStorage.length;\")\n\n def items(self):\n return self.driver.execute_script(\n \"\"\"\n var ls = window.localStorage, items = {};\n for (var i = 0, k; i < ls.length; ++i)\n items[k = ls.key(i)] = ls.getItem(k);\n return items;\n \"\"\"\n )\n\n def keys(self):\n return self.driver.execute_script(\n \"\"\"\n var ls = window.localStorage, keys = [];\n for (var i = 0; i < ls.length; ++i)\n keys[i] = ls.key(i);\n return keys;\n \"\"\"\n )\n\n def get(self, key):\n return self.driver.execute_script(\n \"return window.localStorage.getItem(arguments[0]);\", key\n )\n\n def remove(self, key):\n self.driver.execute_script(\"window.localStorage.removeItem(arguments[0]);\", key)\n\n def clear(self):\n self.driver.execute_script(\"window.localStorage.clear();\")\n\n def __getitem__(self, key):\n value = self.get(key)\n if value is None:\n raise KeyError(key)\n return value\n\n def __setitem__(self, key, value):\n self.driver.execute_script(\n \"window.localStorage.setItem(arguments[0], arguments[1]);\", key, value\n )\n\n def __contains__(self, key):\n return key in self.keys()\n\n def __iter__(self):\n return iter(self.keys())\n\n def __repr__(self):\n return repr(self.items())\n\n\nclass ValidationError(Exception):\n\n def __init__(self, *args):\n super(ValidationError, self).__init__(*args)\n self.message = self.args[0]\n\n def report(self, schema_file):\n message = color(schema_file, fg='red')\n if self.message:\n message += \": {}\".format(self.message)\n print(message)\n\n\nclass InvalidFile(ValidationError):\n \"\"\"\n InvalidFile\n Wraps other exceptions that occur in file validation\n\n :param message: message to display\n \"\"\"\n\n def __init__(self, message):\n super(InvalidFile, self).__init__(message)\n exc_type, _, _ = self.exc_info = sys.exc_info()\n if exc_type:\n self.message = \"{}: {}\".format(exc_type.__name__, message)\n\n def raise_original(self):\n six.reraise(*self.exc_info)\n\n\ndef load_hocon(name):\n \"\"\"\n load_hocon\n load configuration from file\n\n :param name: file path\n \"\"\"\n return pyhocon.ConfigFactory.parse_file(name).as_plain_ordered_dict()\n\n\ndef validate_ascii_only(name):\n invalid_char = next(\n (\n (line_num, column, char)\n for line_num, line in enumerate(Path(name).read_text().splitlines())\n for column, char in enumerate(line)\n if ord(char) not in range(128)\n ),\n None,\n )\n if invalid_char:\n line, column, char = invalid_char\n raise ValidationError(\n \"file contains non-ascii character {!r} in line {} pos {}\".format(\n char, line, column\n )\n )\n\n\ndef validate_file(meta, name):\n \"\"\"\n validate_file\n validate file according to meta-scheme\n\n :param meta: meta-scheme\n :param name: file path\n \"\"\"\n validate_ascii_only(name)\n try:\n schema = load_hocon(name)\n except ParseBaseException as e:\n raise InvalidFile(repr(e))\n\n try:\n validate(schema, meta)\n return schema\n except JSONSchemaValidationError as e:\n path = \"->\".join(e.absolute_path)\n message = \"{}: {}\".format(path, e.args[0])\n raise InvalidFile(message)\n except Exception as e:\n raise InvalidFile(str(e))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"files\", nargs=\"+\")\n parser.add_argument(\n \"--linter\", \"-l\", action=\"store_true\", help=\"open jsonschema linter in browser\"\n )\n parser.add_argument(\n \"--raise\",\n \"-r\",\n action=\"store_true\",\n dest=\"raise_\",\n help=\"raise first exception encountered and print traceback\",\n )\n parser.add_argument(\n \"--detect-collisions\",\n action=\"store_true\",\n help=\"detect objects with the same name in different modules\",\n )\n return parser.parse_args()\n\n\ndef open_linter(driver, meta, schema):\n driver.maximize_window()\n driver.get(LINTER_URL)\n storage = LocalStorage(driver)\n storage[\"jsonText\"] = json.dumps(schema, indent=4)\n storage[\"schemaText\"] = json.dumps(meta, indent=4)\n driver.refresh()\n\n\nclass LazyDriver(object):\n def __init__(self):\n self._driver = None\n try:\n from selenium import webdriver, common\n except ImportError:\n webdriver = None\n common = None\n self.webdriver = webdriver\n self.common = common\n\n def __getattr__(self, item):\n return getattr(self.driver, item)\n\n @property\n def driver(self):\n if self._driver:\n return self._driver\n if not (self.webdriver and self.common):\n print(\"selenium not installed: linter unavailable\")\n return None\n\n for driver_type in self.webdriver.Chrome, self.webdriver.Firefox:\n try:\n self._driver = driver_type()\n break\n except self.common.exceptions.WebDriverException:\n pass\n else:\n print(\"No webdriver is found for chrome or firefox\")\n\n return self._driver\n\n def wait(self):\n if not self._driver:\n return\n try:\n while True:\n self._driver.title\n time.sleep(0.5)\n except self.common.exceptions.WebDriverException:\n pass\n\n\ndef remove_description(dct):\n dct.pop(\"description\", None)\n for value in dct.values():\n try:\n remove_description(value)\n except (TypeError, AttributeError):\n pass\n\n\ndef main(here: str):\n args = parse_args()\n meta = load_hocon(here + \"/meta.conf\")\n validator_for(meta).check_schema(meta)\n\n driver = LazyDriver()\n\n collisions = {}\n\n for schema_file in args.files:\n\n if Path(schema_file).name.startswith(\"_\"):\n continue\n\n try:\n schema = validate_file(meta, schema_file)\n except InvalidFile as e:\n if args.linter and driver.driver:\n open_linter(driver, meta, load_hocon(schema_file))\n elif args.raise_:\n e.raise_original()\n\n e.report(schema_file)\n except ValidationError as e:\n e.report(schema_file)\n else:\n for def_name, value in schema.get(\"_definitions\", {}).items():\n service_name = str(Path(schema_file).stem)\n remove_description(value)\n collisions.setdefault(def_name, {})[service_name] = value\n\n warning = color(\"warning\", fg=\"red\")\n\n if args.detect_collisions:\n for name, values in collisions.items():\n if len(values) <= 1:\n continue\n groups = [\n [service for (service, _) in pairs]\n for _, pairs in groupby(values.items(), itemgetter(1))\n ]\n if not groups:\n raise RuntimeError(\"Unknown error\")\n print(\n \"{}: collision for {}:\\n{}\".format(warning, name, yaml.dump(groups)),\n end=\"\",\n )\n\n driver.wait()\n\n\nif __name__ == \"__main__\":\n main(here=os.path.dirname(__file__))\n", "repo_name": "allegroai/clearml-server", "sub_path": "apiserver/schema/meta/validate.py", "file_name": "validate.py", "file_ext": "py", "file_size_in_byte": 8097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 334, "dataset": "github-code", "pt": "21", "api": [{"api_name": "colors.color", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 106, "usage_type": "call"}, {"api_name": "six.reraise", "line_number": 111, "usage_type": "call"}, {"api_name": "pyhocon.ConfigFactory.parse_file", "line_number": 121, "usage_type": "call"}, {"api_name": "pyhocon.ConfigFactory", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 128, "usage_type": "call"}, {"api_name": "pyparsing.ParseBaseException", "line_number": 154, "usage_type": "name"}, {"api_name": "jsonschema.validate", "line_number": 158, "usage_type": "call"}, {"api_name": "jsonschema.ValidationError", "line_number": 160, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 169, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 193, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 194, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 204, "usage_type": "name"}, {"api_name": "selenium.common", "line_number": 205, "usage_type": "name"}, {"api_name": "selenium.webdriver", "line_number": 206, "usage_type": "name"}, {"api_name": "selenium.common", "line_number": 207, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 237, "usage_type": "call"}, {"api_name": "jsonschema.validators.validator_for", "line_number": 254, "usage_type": "call"}, {"api_name": "{'webdriver': 'selenium.webdriver', 'common': 'selenium.common'}", "line_number": 256, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 262, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 278, "usage_type": "call"}, {"api_name": "colors.color", "line_number": 282, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 290, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 290, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path", "line_number": 303, "usage_type": "attribute"}]} +{"seq_id": "10184612000", "text": "import time\nimport logging\nimport requests\n\n_LOGGER = logging.getLogger(__name__)\n\nclass OpenWeatherMap():\n\n CLOUDY_THRESHOLD = 75\n CLOUDY_DESCRIPTIONS = ['rain']\n SUNNY_DESCRIPTIONS = []\n MIN_UPDATE_INTERVAL = 10 * 60\n\n def __init__(self, api_key, lat, lon):\n self._API_KEY = api_key\n self._LAT = lat\n self._LON = lon\n self.update_interval = OpenWeatherMap.MIN_UPDATE_INTERVAL\n self.weather_data = None\n self._update_weather_data()\n\n\n def _update_weather_data(self):\n if not self._time_check():\n return\n try:\n payload = {'lat': self._LAT, 'lon': self._LON, 'units': 'imperial', 'appid': self._API_KEY}\n response = requests.get('https://api.openweathermap.org/data/2.5/weather', params=payload)\n except requests.exceptions.RequestException as e:\n _LOGGER.warning(e)\n else:\n if response.status_code == requests.codes.ok and response.json() is not None:\n self.weather_data = response.json()\n _LOGGER.info('Weather data updated')\n else:\n _LOGGER.warning(f'Unable to fetch weather data, status_code = {response.status_code}')\n\n def _time_check(self) -> bool:\n \"\"\"Test if update interval has been exceeded.\"\"\"\n if self.weather_data is None or (\n time.time() > (self.weather_data['dt'] + self.update_interval)):\n return True\n return False\n\n @property\n def is_cloudy(self) -> bool:\n self._update_weather_data()\n for sunny in OpenWeatherMap.SUNNY_DESCRIPTIONS:\n if sunny in self.weather_description:\n return False\n for cloudy in OpenWeatherMap.CLOUDY_DESCRIPTIONS:\n if cloudy in self.weather_description:\n return True \n return self.cloud_coverage > OpenWeatherMap.CLOUDY_THRESHOLD\n\n @property\n def weather_description(self) -> str:\n self._update_weather_data()\n return self.weather_data['weather'][0]['description'].lower()\n\n @property\n def cloud_coverage(self) -> int:\n self._update_weather_data()\n return self.weather_data['clouds']['all']\n\n @property\n def is_sun_up(self) -> bool:\n return self.is_sun_in_range()\n\n def is_sun_in_range(self, rise_offset=0, set_offset=0) -> bool:\n \"\"\"With no params this method checks if the sun is up. Optional offsets can be provided to check a custom range.\n Offset values are in seconds.\"\"\"\n self._update_weather_data()\n if time.time() > self.weather_data['sys']['sunrise'] - rise_offset \\\n and time.time() < self.weather_data['sys']['sunset'] + set_offset:\n return True\n\n return False\n", "repo_name": "Neil-Hancock/smarthome", "sub_path": "src/clients/weather.py", "file_name": "weather.py", "file_ext": "py", "file_size_in_byte": 2775, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 29, "usage_type": "attribute"}, {"api_name": "requests.codes", "line_number": 32, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "32875612041", "text": "from typing import List\n\n# 왼쪽 끝 모서리부터 비교하면 항상 1행 또는 1열을 제외할 수 있습니다. O(n+m)\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n n = len(matrix)\n m = len(matrix[0])\n i, j = n-1, 0\n while i >= 0 and j < m:\n val = matrix[i][j]\n if val < target:\n j += 1\n elif val > target:\n i -= 1\n else:\n return True\n return False\n", "repo_name": "yeardream-high6/coding_test", "sub_path": "곽치영/Leetcode/2022.07/24/leetcode240.py", "file_name": "leetcode240.py", "file_ext": "py", "file_size_in_byte": 528, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "18588884851", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# pylint: disable=invalid-name, no-member\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nfrom astropy import units as u\nfrom astropy.modeling import models\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.wcs import wcs\n\n\n@pytest.mark.parametrize(\n \"inp\",\n [(0, 0), (4000, -20.56), (-2001.5, 45.9), (0, 90), (0, -90), (np.mgrid[:4, :6])],\n)\ndef test_against_wcslib(inp):\n w = wcs.WCS()\n crval = [202.4823228, 47.17511893]\n w.wcs.crval = crval\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\"]\n\n lonpole = 180\n tan = models.Pix2Sky_TAN()\n n2c = models.RotateNative2Celestial(\n crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg\n )\n c2n = models.RotateCelestial2Native(\n crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg\n )\n m = tan | n2c\n minv = c2n | tan.inverse\n\n radec = w.wcs_pix2world(inp[0], inp[1], 1)\n xy = w.wcs_world2pix(radec[0], radec[1], 1)\n\n assert_allclose(m(*inp), radec, atol=1e-12)\n assert_allclose(minv(*radec), xy, atol=1e-12)\n\n\n@pytest.mark.parametrize(\n \"inp\", [(40 * u.deg, -0.057 * u.rad), (21.5 * u.arcsec, 45.9 * u.deg)]\n)\ndef test_roundtrip_sky_rotation(inp):\n lon, lat, lon_pole = 42 * u.deg, (43 * u.deg).to(u.arcsec), (44 * u.deg).to(u.rad)\n n2c = models.RotateNative2Celestial(lon, lat, lon_pole)\n c2n = models.RotateCelestial2Native(lon, lat, lon_pole)\n assert_quantity_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13 * u.deg)\n assert_quantity_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13 * u.deg)\n\n\ndef test_Rotation2D():\n model = models.Rotation2D(angle=90 * u.deg)\n a, b = 1 * u.deg, 0 * u.deg\n x, y = model(a, b)\n assert_quantity_allclose([x, y], [0 * u.deg, 1 * u.deg], atol=1e-10 * u.deg)\n\n\ndef test_Rotation2D_inverse():\n model = models.Rotation2D(angle=234.23494 * u.deg)\n x, y = model.inverse(*model(1 * u.deg, 0 * u.deg))\n assert_quantity_allclose([x, y], [1 * u.deg, 0 * u.deg], atol=1e-10 * u.deg)\n\n\ndef test_euler_angle_rotations():\n ydeg = (90 * u.deg, 0 * u.deg)\n y = (90, 0)\n z = (0, 90)\n\n # rotate y into minus z\n model = models.EulerAngleRotation(0 * u.rad, np.pi / 2 * u.rad, 0 * u.rad, \"zxz\")\n assert_allclose(model(*z), y, atol=10**-12)\n model = models.EulerAngleRotation(0 * u.deg, 90 * u.deg, 0 * u.deg, \"zxz\")\n assert_quantity_allclose(model(*(z * u.deg)), ydeg, atol=10**-12 * u.deg)\n\n\n@pytest.mark.parametrize(\n \"params\",\n [\n (60, 10, 25),\n (60 * u.deg, 10 * u.deg, 25 * u.deg),\n ((60 * u.deg).to(u.rad), (10 * u.deg).to(u.rad), (25 * u.deg).to(u.rad)),\n ],\n)\ndef test_euler_rotations_with_units(params):\n x = 1 * u.deg\n y = 1 * u.deg\n phi, theta, psi = params\n\n urot = models.EulerAngleRotation(phi, theta, psi, axes_order=\"xyz\")\n a, b = urot(x.value, y.value)\n assert_allclose((a, b), (-23.614457631192547, 9.631254579686113))\n a, b = urot(x, y)\n assert_quantity_allclose(\n (a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg)\n )\n a, b = urot(x.to(u.rad), y.to(u.rad))\n assert_quantity_allclose(\n (a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg)\n )\n\n\ndef test_attributes():\n n2c = models.RotateNative2Celestial(20016 * u.arcsec, -72.3 * u.deg, np.pi * u.rad)\n assert_allclose(n2c.lat.value, -72.3)\n assert_allclose(n2c.lat._raw_value, -1.2618730491919001)\n assert_allclose(n2c.lon.value, 20016)\n assert_allclose(n2c.lon._raw_value, 0.09704030641088472)\n assert_allclose(n2c.lon_pole.value, np.pi)\n assert_allclose(n2c.lon_pole._raw_value, np.pi)\n assert n2c.lon.unit is u.Unit(\"arcsec\")\n assert n2c.lon.internal_unit is u.Unit(\"rad\")\n assert n2c.lat.unit is u.Unit(\"deg\")\n assert n2c.lat.internal_unit is u.Unit(\"rad\")\n assert n2c.lon_pole.unit is u.Unit(\"rad\")\n assert n2c.lon_pole.internal_unit is u.Unit(\"rad\")\n", "repo_name": "astropy/astropy", "sub_path": "astropy/modeling/tests/test_quantities_rotations.py", "file_name": "test_quantities_rotations.py", "file_ext": "py", "file_size_in_byte": 3967, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4015, "dataset": "github-code", "pt": "21", "api": [{"api_name": "astropy.wcs.wcs.WCS", "line_number": 19, "usage_type": "call"}, {"api_name": "astropy.wcs.wcs", "line_number": 19, "usage_type": "name"}, {"api_name": "astropy.modeling.models.Pix2Sky_TAN", "line_number": 25, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 25, "usage_type": "name"}, {"api_name": "astropy.modeling.models.RotateNative2Celestial", "line_number": 26, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 26, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 27, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 27, "usage_type": "name"}, {"api_name": "astropy.modeling.models.RotateCelestial2Native", "line_number": 29, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 29, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 30, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 39, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.mgrid", "line_number": 16, "usage_type": "attribute"}, {"api_name": "astropy.units.deg", "line_number": 46, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 46, "usage_type": "name"}, {"api_name": "astropy.units.arcsec", "line_number": 46, "usage_type": "attribute"}, {"api_name": "astropy.units.rad", "line_number": 46, "usage_type": "attribute"}, {"api_name": "astropy.modeling.models.RotateNative2Celestial", "line_number": 47, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 47, "usage_type": "name"}, {"api_name": "astropy.modeling.models.RotateCelestial2Native", "line_number": 48, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 48, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 49, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 49, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 49, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 50, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 50, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 50, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 42, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 42, "usage_type": "attribute"}, {"api_name": "astropy.units.deg", "line_number": 43, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 43, "usage_type": "name"}, {"api_name": "astropy.units.rad", "line_number": 43, "usage_type": "attribute"}, {"api_name": "astropy.units.arcsec", "line_number": 43, "usage_type": "attribute"}, {"api_name": "astropy.modeling.models.Rotation2D", "line_number": 54, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 54, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 54, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 54, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 55, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 55, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 57, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 57, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 57, "usage_type": "name"}, {"api_name": "astropy.modeling.models.Rotation2D", "line_number": 61, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 61, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 61, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 61, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 62, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 62, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 63, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 63, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 63, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 67, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 67, "usage_type": "name"}, {"api_name": "astropy.modeling.models.EulerAngleRotation", "line_number": 72, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 72, "usage_type": "name"}, {"api_name": "astropy.units.rad", "line_number": 72, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 73, "usage_type": "call"}, {"api_name": "astropy.modeling.models.EulerAngleRotation", "line_number": 74, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 74, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 74, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 74, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 75, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 75, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 75, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 87, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 87, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 88, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 88, "usage_type": "name"}, {"api_name": "astropy.modeling.models.EulerAngleRotation", "line_number": 91, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 93, "usage_type": "call"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 95, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 96, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 96, "usage_type": "name"}, {"api_name": "astropy.units.rad", "line_number": 98, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 98, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 99, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 100, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 100, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 78, "usage_type": "attribute"}, {"api_name": "astropy.units.deg", "line_number": 82, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 82, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 83, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 83, "usage_type": "name"}, {"api_name": "astropy.units.rad", "line_number": 83, "usage_type": "attribute"}, {"api_name": "astropy.modeling.models.RotateNative2Celestial", "line_number": 105, "usage_type": "call"}, {"api_name": "astropy.modeling.models", "line_number": 105, "usage_type": "name"}, {"api_name": "astropy.units.arcsec", "line_number": 105, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 105, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 105, "usage_type": "attribute"}, {"api_name": "astropy.units.rad", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 111, "usage_type": "attribute"}, {"api_name": "astropy.units.Unit", "line_number": 112, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 112, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 113, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 113, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 114, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 114, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 115, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 115, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 116, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 116, "usage_type": "name"}, {"api_name": "astropy.units.Unit", "line_number": 117, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "13127740995", "text": "from writers import foyer_xml_writer\nfrom writers.foyer_xml_writer import parmed_to_foyer_xml, mbuild_to_foyer_xml\nfrom bondwalk import bond_walk\nfrom bondwalk.bond_walk import MadAtom, MadBond, BondWalker\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport ele\nimport espaloma as esp\nimport forcefield_utilities as ffutils\nimport foyer\nimport gmso\nimport mbuild as mb\nfrom mbuild.lib.recipes import Polymer\nfrom mbuild.formats.hoomd_forcefield import create_hoomd_forcefield\nimport numpy as np\nimport torch\nfrom openff.toolkit.topology import Molecule\nfrom mbuild.formats.hoomd_forcefield import create_hoomd_forcefield\nimport hoomd\nimport gsd.hoomd\nimport matplotlib.pyplot as plt\nimport rdkit\nfrom rdkit import Chem\n\nimport os\n\nif not os.path.exists(\"espaloma_model.pt\"):\n os.system(\"wget http://data.wangyq.net/espaloma_model.pt\")\n\n\ndef openff_Molecule(MOL2FILEPATH,PDBFILEPATH):\n rdmol = Chem.MolFromMol2File(MOL2FILEPATH)\n from_rdmol = Molecule.from_rdkit(rdmol)\n atom_list = [a for a in from_rdmol.atoms if 1 not in [a.atomic_number]]\n for atom in atom_list:\n atom.formal_charge = 0\n b = BondWalker(from_rdmol)\n comp = b.fill_in_bonds()\n from_rdmol.to_file(PDBFILEPATH,file_format=\"pdb\")\n return comp\n\ndef espaloma(molecule):\n molecule_graph = esp.Graph(molecule)\n espaloma_model = torch.load(\"espaloma_model.pt\")\n espaloma_model(molecule_graph.heterograph)\n openmm_system = esp.graphs.deploy.openmm_system_from_graph(molecule_graph,charge_method=\"nn\")\n return openmm_system\n\n\ndef dictionaries(typemap,BondForces,AngleForces,TorsionForces,PairForces):\n bond_types = []\n bond_dict = dict() \n for i in range(BondForces.getNumBonds()):\n bond_parms = BondForces.getBondParameters(index=i)\n l0 = bond_parms[2]/bond_parms[2].unit\n k = bond_parms[3]/bond_parms[3].unit\n bond_dict[typemap[bond_parms[0]],typemap[bond_parms[1]]] = {'k':k,'l0':l0}\n \n angle_types = []\n angle_dict = dict()\n for i in range(AngleForces.getNumAngles()):\n angle_parms = AngleForces.getAngleParameters(index=i)\n k = angle_parms[4]/angle_parms[4].unit\n t0 = angle_parms[3]/angle_parms[3].unit \n angle_dict[typemap[angle_parms[0]],typemap[angle_parms[1]],typemap[angle_parms[2]]] = {'k':k,'t0':t0}\n \n dihedral_types = []\n dihedral_dict = {}\n \n for i in range(TorsionForces.getNumTorsions()):\n if i%6==0:\n periodicity=[]\n phase = []\n k = []\n dihedral_parms = TorsionForces.getTorsionParameters(index=i)\n periodicity.append(dihedral_parms[4]) \n phase.append( dihedral_parms[5]/dihedral_parms[5].unit)\n k.append(dihedral_parms[6]/dihedral_parms[6].unit)\n dt = (typemap[dihedral_parms[0]],typemap[dihedral_parms[1]],typemap[dihedral_parms[2]],\n typemap[dihedral_parms[3]])\n \n \n if periodicity[-1]==6:\n dihedral_dict[dt] = {'periodicity':periodicity,'k':k,'phase':phase}\n \n \n nonbonded_types = []\n nonbonded_dict = {}\n \n for i in range(PairForces.getNumParticles()):\n nonbonded_parms = PairForces.getParticleParameters(index=i)\n charge = nonbonded_parms[0]/nonbonded_parms[0].unit\n sigma = nonbonded_parms[1]/nonbonded_parms[1].unit\n epsilon = nonbonded_parms[2]/nonbonded_parms[2].unit\n nonbonded_types.append((charge,sigma,epsilon))\n nonbonded_dict[(typemap[i])]={'charge':charge,'sigma':sigma,'epsilon':epsilon}\n\n return bond_dict,angle_dict,dihedral_dict,nonbonded_dict\n\ndef typing(BondForces,PairForces,molecule,omm_system):\n import parmed as pmd\n topology = molecule.to_topology()\n openmm_topology = topology.to_openmm()\n structure = pmd.openmm.load_topology(topology=openmm_topology, system=omm_system)\n structure.bonds.sort(key=lambda x: x.atom1.idx)\n \n \n for i in range(len(molecule.atoms)):\n if molecule.atoms[i].atomic_number == 6:\n molecule.atoms[i].name = 'C'\n if molecule.atoms[i].atomic_number == 1:\n molecule.atoms[i].name = 'H'\n if molecule.atoms[i].atomic_number == 7:\n molecule.atoms[i].name = 'N'\n if molecule.atoms[i].atomic_number == 16:\n molecule.atoms[i].name = 'S'\n if molecule.atoms[i].atomic_number == 8:\n molecule.atoms[i].name = 'O'\n if molecule.atoms[i].atomic_number == 9:\n molecule.atoms[i].name = 'F'\n \n import networkx as nx\n Gopenmm = nx.Graph()\n Gparmed = nx.Graph()\n #openmm:\n for i in range(BondForces.getNumBonds()):\n Gopenmm.add_edge(BondForces.getBondParameters(index=i)[0],BondForces.getBondParameters(index=i)[1])\n #parmed\n for b in structure.bonds:\n Gparmed.add_edge(b.atom1.idx,b.atom2.idx)\n \n particle_types = []\n type_map = dict()\n \n #nx.rooted_tree_isomorphism\n #in here we still need to check that one known index on one corresponds to the same index on the other....\n tree_openmm = nx.bfs_tree(Gopenmm,0)\n tree_parmed = nx.bfs_tree(Gparmed,0)\n if nx.is_isomorphic(Gopenmm,Gparmed):\n #if nx.isomorphism.tree_isomorphism(tree_openmm,tree_parmed): <- want this work\n for i in range(PairForces.getNumParticles()):\n pair_parms = PairForces.getParticleParameters(index=i)\n sigma = pair_parms[1]/pair_parms[1].unit\n epsilon = pair_parms[2]/pair_parms[2].unit\n if (sigma, epsilon) not in particle_types: \n particle_types.append((sigma, epsilon))\n type_map[molecule.atoms[i].molecule_atom_index] = \"\".join([molecule.atoms[i].name , \n str(particle_types.index((sigma, epsilon)))])\n return type_map\n\ndef esp_to_xml(MOL2FILEPATH,XMLFILEPATH,PDBFILEPATH,TYPEDFILEPATH):\n off_molecule = openff_Molecule(MOL2FILEPATH=MOL2FILEPATH,PDBFILEPATH=PDBFILEPATH)\n openmm_system = espaloma(molecule=off_molecule)\n pair_forces = openmm_system.getForces()[1]\n angle_forces = openmm_system.getForces()[3]\n bond_forces = openmm_system.getForces()[2]\n torsion_forces = openmm_system.getForces()[0]\n type_map = typing(BondForces=bond_forces,PairForces=pair_forces,molecule=off_molecule,omm_system=openmm_system)\n comp_rename = mb.load(PDBFILEPATH)\n for index in type_map:\n comp_rename[index].name = type_map[index]\n bond_dict = dictionaries(typemap=type_map,BondForces=bond_forces,AngleForces=angle_forces,\n TorsionForces=torsion_forces,PairForces=pair_forces)[0]\n angle_dict = dictionaries(typemap=type_map,BondForces=bond_forces,AngleForces=angle_forces,\n TorsionForces=torsion_forces,PairForces=pair_forces)[1]\n dihedral_dict = dictionaries(typemap=type_map,BondForces=bond_forces,AngleForces=angle_forces,\n TorsionForces=torsion_forces,PairForces=pair_forces)[2]\n nonbonded_dict = dictionaries(typemap=type_map,BondForces=bond_forces,AngleForces=angle_forces,\n TorsionForces=torsion_forces,PairForces=pair_forces)[3]\n \n mbuild_to_foyer_xml(\n file_name=XMLFILEPATH, #change this to whatever you want to save your xml file as\n compound=comp_rename,\n bond_params=bond_dict,\n angle_params=angle_dict,\n dihedral_params=dihedral_dict,\n dihedral_type=\"periodic\",\n non_bonded_params=nonbonded_dict,\n combining_rule=\"geometric\",\n name=\"\",\n version=\"\",\n coulomb14scale=1.0,\n lj14scale=1.0)\n comp_rename.save(TYPEDFILEPATH, overwrite=True) #change this to match your molecule name. \n", "repo_name": "cmelab/forcefields", "sub_path": "functions/esp_functions.py", "file_name": "esp_functions.py", "file_ext": "py", "file_size_in_byte": 7720, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "warnings.filterwarnings", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 29, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromMol2File", "line_number": 33, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 33, "usage_type": "name"}, {"api_name": "openff.toolkit.topology.Molecule.from_rdkit", "line_number": 34, "usage_type": "call"}, {"api_name": "openff.toolkit.topology.Molecule", "line_number": 34, "usage_type": "name"}, {"api_name": "bondwalk.bond_walk.BondWalker", "line_number": 38, "usage_type": "call"}, {"api_name": "espaloma.Graph", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 45, "usage_type": "call"}, {"api_name": "espaloma.graphs.deploy.openmm_system_from_graph", "line_number": 47, "usage_type": "call"}, {"api_name": "espaloma.graphs", "line_number": 47, "usage_type": "attribute"}, {"api_name": "parmed.openmm.load_topology", "line_number": 105, "usage_type": "call"}, {"api_name": "parmed.openmm", "line_number": 105, "usage_type": "attribute"}, {"api_name": "networkx.Graph", "line_number": 124, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 125, "usage_type": "call"}, {"api_name": "networkx.bfs_tree", "line_number": 138, "usage_type": "call"}, {"api_name": "networkx.bfs_tree", "line_number": 139, "usage_type": "call"}, {"api_name": "networkx.is_isomorphic", "line_number": 140, "usage_type": "call"}, {"api_name": "mbuild.load", "line_number": 160, "usage_type": "call"}, {"api_name": "writers.foyer_xml_writer.mbuild_to_foyer_xml", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "40659643607", "text": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\n\nfrom myapp.models import Order, Review, Student, Topic\n\n\nclass SearchForm(forms.Form):\n LENGTH_CHOICES = [\n (8, '8 Weeks'),\n (10, '10 Weeks'),\n (12, '12 Weeks'),\n (14, '14 Weeks'),\n ]\n name = forms.CharField(max_length=100, required=False, label='Student Name')\n length = forms.TypedChoiceField(widget=forms.RadioSelect, choices=LENGTH_CHOICES, coerce=int,\n label='Preferred course duration', required=False)\n max_price = forms.DecimalField(label='Maximum Price', required=True, min_value=0)\n\n\nclass OrderForm(forms.ModelForm):\n class Meta:\n model = Order\n fields = ['courses', 'student', 'order_status']\n widgets = {'courses': forms.CheckboxSelectMultiple(), 'order_type': forms.RadioSelect}\n labels = {'student': u'Student Name', }\n\n\nclass ReviewForm(forms.ModelForm):\n class Meta:\n model = Review\n fields = ['reviewer', 'course', 'rating', 'comments']\n widgets = {'course': forms.RadioSelect}\n labels = {'reviewer': u'Please enter a valid email',\n 'rating': u'Rating: An integer between 1 (worst) and 5 (best)'}\n\n\nclass OrderForm(forms.ModelForm):\n class Meta:\n model = Order\n fields = ['courses', 'student', 'order_status']\n widgets = {'courses': forms.CheckboxSelectMultiple(), 'order_type': forms.RadioSelect}\n labels = {'student': u'Student Name', }\n\n\nclass RegisterForm(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(RegisterForm, self).__init__(*args, **kwargs)\n # do not require password confirmation\n del self.fields['password2']\n\n class Meta:\n model = Student\n fields = ['username', 'first_name', 'last_name', 'province', 'interested_in']\n\n widgets = {\n 'interested_in': forms.CheckboxSelectMultiple,\n }\n\n labels = {\n 'first_name': 'First Name',\n 'last_name': 'Last Name',\n 'interested_in': 'Interested In'\n }\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(max_length=100)\n password = forms.CharField(widget=forms.PasswordInput())\n\n\nclass UploadImageForm(forms.ModelForm):\n class Meta:\n model = Student\n fields = ['image']\n labels = {\n 'image': 'Upload Image'\n }\n", "repo_name": "parth-kamani/E-Novice", "sub_path": "F20Lab04T2/myapp/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2478, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.forms.Form", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 15, "usage_type": "name"}, {"api_name": "django.forms.TypedChoiceField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.RadioSelect", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.forms.DecimalField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "myapp.models.Order", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "django.forms.RadioSelect", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.forms.ModelForm", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "myapp.models.Review", "line_number": 31, "usage_type": "name"}, {"api_name": "django.forms.RadioSelect", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 33, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 38, "usage_type": "name"}, {"api_name": "myapp.models.Order", "line_number": 40, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 42, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 42, "usage_type": "name"}, {"api_name": "django.forms.RadioSelect", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 46, "usage_type": "name"}, {"api_name": "myapp.models.Student", "line_number": 53, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 57, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 67, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 68, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 69, "usage_type": "name"}, {"api_name": "django.forms.PasswordInput", "line_number": 69, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 72, "usage_type": "name"}, {"api_name": "myapp.models.Student", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "32876827921", "text": "from typing import List\n\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n nums = sorted(nums1 + nums2)\n total_len = len(nums1) + len(nums2)\n i = total_len // 2\n if total_len % 2:\n return nums[i]\n else:\n return (nums[i - 1] + nums[i]) / 2\n\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n m = len(nums1)\n n = len(nums2)\n odd = (m + n) % 2\n middle = (m + n - 1) // 2\n\n ret = sorted(nums1+nums2)\n \n if odd:\n return ret[middle]\n else:\n return (ret[middle] + ret[middle + 1]) / 2 \n\nnums1 = [1,3]\nnums2 = [2]\n\nnums1 = [1,2]\nnums2 = [3,4]\nanswer = Solution().findMedianSortedArrays(nums1, nums2)\nprint(answer)", "repo_name": "yeardream-high6/coding_test", "sub_path": "이부경/LeetCode/100/4. Median of Two Sorted Arrays.py", "file_name": "4. Median of Two Sorted Arrays.py", "file_ext": "py", "file_size_in_byte": 834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "27730037469", "text": "import nltk\nimport gensim\n\nfrom enum import Enum\nfrom client.client import Client\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import LancasterStemmer\n\nuri = \"<uri>\"\napi_key = \"<api_key>\"\nsecret_key = \"<secret_key>\"\nhealthServiceUrl = \"<url>\"\nlanguage = \"en-gb\"\nGender = Enum('Gender', 'Male Female')\n\nSelectorStatus = Enum('SelectorStatus', 'Man Woman Boy Girl')\n\nnltk.download('wordnet')\nnltk.download('punkt')\nnltk.download('stopwords')\nlemmer = nltk.WordNetLemmatizer()\nlancaster = LancasterStemmer()\nenglish_stopwords = set(nltk.corpus.stopwords.words('english'))\n\n\nclass Bot:\n\n def __init__(self, client):\n self._client = client\n raw_documents = self._client.loadSymptoms()\n self._symptoms = []\n self._ids = []\n for r in raw_documents:\n self._symptoms.append(r.get(\"Name\"))\n self._ids.append(r.get(\"ID\"))\n gen_docs = [self._get_tokens(text.lower()) for text in self._symptoms]\n stem_docs = self.stem_words(gen_docs)\n self._plain_dictionary = gensim.corpora.Dictionary(gen_docs)\n self._dictionary = gensim.corpora.Dictionary(stem_docs)\n self._corpus = [self._dictionary.doc2bow(gen_doc) for gen_doc in stem_docs]\n self._tf_idf = gensim.models.TfidfModel(self._corpus)\n self._sims = gensim.similarities.Similarity(\"C:\\\\TextMining\\\\\", self._tf_idf[self._corpus],\n num_features=len(self._dictionary))\n\n def _get_tokens(self, text):\n tokens = word_tokenize(text)\n return tokens\n\n def get_most_similiar(self, list):\n max = -1\n index = 0\n for i in list:\n if (i > max):\n max = i\n\n if max == 0:\n return -1\n for i in list:\n if i == max:\n return index\n else:\n index += 1\n\n def transform_to_words(self, list):\n result = \"\"\n for tuple in list:\n word = self._plain_dictionary[tuple[0]]\n result = result + \" \" + word\n return result.strip()\n\n def stem_words(self, docs):\n stem_docs = []\n\n for doc in docs:\n stem_doc = []\n for word in doc:\n stem_word = lancaster.stem(word)\n stem_doc.append(stem_word)\n stem_docs.append(stem_doc)\n return stem_docs\n\n def stem_query(self, query):\n query_doc = []\n for word in query:\n query_doc.append(lancaster.stem(word))\n return query_doc\n\n\n def talk_to_Bot(self, query):\n tokens = self._get_tokens(query)\n query_doc = self.stem_query(tokens)\n query_doc_bow = self._dictionary.doc2bow(query_doc)\n query_doc_tf_idf = self._tf_idf[query_doc_bow]\n index = self.get_most_similiar(self._sims[query_doc_tf_idf])\n if index == -1:\n return {'name': \"I did not understand, can you please rephrase?\",\n 'id': -1}\n symptom = self._symptoms[index].lower()\n id = self._ids[index]\n return {'name': symptom,\n 'id': id}\n", "repo_name": "cristure/Chansey-Server", "sub_path": "bot/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 3118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "enum.Enum", "line_number": 14, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 16, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 19, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 20, "usage_type": "call"}, {"api_name": "nltk.WordNetLemmatizer", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.stem.LancasterStemmer", "line_number": 22, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 23, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 23, "usage_type": "attribute"}, {"api_name": "client.client", "line_number": 29, "usage_type": "name"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 38, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 38, "usage_type": "attribute"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 39, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 39, "usage_type": "attribute"}, {"api_name": "gensim.models.TfidfModel", "line_number": 41, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 41, "usage_type": "attribute"}, {"api_name": "gensim.similarities.Similarity", "line_number": 42, "usage_type": "call"}, {"api_name": "gensim.similarities", "line_number": 42, "usage_type": "attribute"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "7383673883", "text": "import urllib.request\nimport zipfile\nimport os \nfrom pathlib import Path\nimport pandas as pd\nfrom pyparsing import col\n\n#import DataRetrieving as dr\ndef _retrieveDatas_ (pairs):\n market = 'spot'\n dataType = 'monthly'\n date = '2022-06'\n frequency = '5m'\n print (\"L =\", pairs)\n for qC in pairs: #quote currency\n QuoteCurrency = qC\n for bC in pairs[qC]: #base currency\n BaseCurrency = bC.replace(qC,'')\n retrieveBinanceDatas (market, dataType, BaseCurrency, QuoteCurrency, frequency, date)\n\n\ndef retrieveBinanceDatas (market, dataType, \n baseCurrency, quoteCurrency, \n frequency, date, futuresUSDTorCoin='um'):\n fileName = baseCurrency+quoteCurrency+'-'+frequency+'-'+date\n fileNameZip= fileName+'.zip'\n filenameCSV= fileName+'.csv'\n\n if market == \"futures\":\n market+'/'+futuresUSDTorCoin\n\n url = 'https://data.binance.vision/data/'\n url += market+'/'+dataType+'/klines/'+baseCurrency+quoteCurrency+'/'\n url += frequency+'/'+fileNameZip\n print (\"Retrieving url file = \", url)\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n directory_to_extract_to = dir_path+'\\datas\\\\'+market+'\\\\'+dataType+'\\\\'+baseCurrency+quoteCurrency+'\\\\'+frequency\n\n my_file = Path(directory_to_extract_to+'\\\\'+filenameCSV)\n if my_file.exists():\n print(filenameCSV)\n print('File already exists.')\n return\n\n try:\n filehandle, _ = urllib.request.urlretrieve(url)\n except Exception:\n print(\"url Not Valid ?\")\n return\n with zipfile.ZipFile(filehandle, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n\ndef CSVtoDataFrame (file):\n hist_df = pd.read_csv(file)\n hist_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Quote Asset Volume', \n 'Number of Trades', 'TB Base Volume', 'TB Quote Volume', 'Ignore']\n hist_df['Open Time'] = pd.to_datetime(hist_df['Open Time']/1000, unit='s')\n hist_df['Close Time'] = pd.to_datetime(hist_df['Close Time']/1000, unit='s')\n numeric_columns = ['Open', 'High', 'Low', 'Close', 'Volume', 'Quote Asset Volume', 'TB Base Volume', 'TB Quote Volume']\n hist_df[numeric_columns] = hist_df[numeric_columns].apply(pd.to_numeric, axis=1)\n \n return hist_df\n\n# def main():\n # file = \"C:\\\\Users\\\\benig\\\\Documents\\\\Projects\\\\TradingBot\\\\datas\\\\Spot\\\\Daily\\\\BTCUSDT\\\\30m\\BTCUSDT-30m-2022-07-12.csv\"\n # df = dr.CSVtoDataFrame(file)\n #print (df)\n\n # market = 'spot'\n # dataType = 'monthly'\n # BaseCurrency = 'BTC'\n # QuoteCurrency = 'USDT'\n # date = '2022-06'\n\n # tmpinterval = Constants.daily_intervals\n # if dataType == 'monthly':\n # tmpinterval = Constants.monthly_intervals\n\n # for frequency in tmpinterval:\n # dr.retrieveBinanceDatas (market, dataType, BaseCurrency, QuoteCurrency, frequency, date)", "repo_name": "BeniGottesman/TradingBot", "sub_path": "Deprecated/DataRetrieving.py", "file_name": "DataRetrieving.py", "file_ext": "py", "file_size_in_byte": 2951, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.dirname", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 37, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 47, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 47, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "878449482", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCommand-line utility for monitoring text files.\n\nThis utility will recursively monitor a given directory for the presence\nof files ending in a specified extension for the presence of a given\ntext string. It is assumed that monitored files will only ever be\nadded, removed, or appended to (not edited),\nas in the case of typical log files.\n\"\"\"\n\n__author__ = \"techieben with signal help from Bryan Fernandez\"\n\nimport sys\nimport logging\nimport datetime\nimport time\nimport argparse\nimport os\nimport linecache\nimport signal\n\n# Python3 interpreter check:\nif sys.version_info[0] < 3:\n raise Exception(\"This program requires python3 interpreter\")\n\n# Global exit flag:\nexit_flag = False\n\n# Logger instantiation:\nlogger = logging.getLogger(__file__)\n\n\ndef signal_handler(sig_num, frame):\n \"\"\"\n This is a handler for SIGTERM and SIGINT.\n Other signals can be mapped here as well.\n Sets a global flag that notifies main() to exit its loop\n if the signal is trapped.\n :param sig_num: The integer signal number that was trapped from the OS.\n :param frame: Not used\n :return None\n \"\"\"\n global exit_flag\n logger.warning('Received ' + signal.Signals(sig_num).name)\n exit_flag = True\n\n\ndef watch_directory(args):\n \"\"\"\n Primary program method, consisting of a loop that repeatedly scans\n the requested directory for magic text and logs changes.\n :param args: arguments received from command line\n :return None\n \"\"\"\n # Current iteration of the main execution loop:\n loop_iter = 0\n\n # Dictionary of tracked files and current line for each:\n watching_filepaths = {}\n\n while not exit_flag:\n try:\n loop_iter += 1\n\n # Create/reset list of files matching requested extension:\n files_list = []\n\n # Implement polling interval:\n time.sleep(args.interval)\n\n # When in debug mode, ongoing indication of activity in terminal:\n logger.debug(f\"Directory: {args.path}, \"\n f\"Extension: {args.ext}, \"\n f\"Magic Word: {args.magic}, \"\n f\"Iteration: {loop_iter}, \"\n f\"PID: {os.getpid()}\")\n\n # Raise FileNotFound on every iteration if path does not exist:\n os.listdir(args.path)\n\n # Build files_list of files found with specified extension:\n for root, dirs, files in os.walk(args.path):\n for filename in files:\n if filename.endswith(args.ext):\n files_list.append(os.path.join(root, filename))\n\n # Add new items to watching_filepaths based on files_list:\n for filepath in files_list:\n if filepath not in watching_filepaths:\n logger.info(f\"{filepath} found\")\n watching_filepaths[filepath] = 1\n\n # Remove item from watching_filepaths if not in current files_list\n # else scan item for magic text and update current_line:\n for filepath in list(watching_filepaths.keys()):\n if filepath not in files_list:\n logger.info(f\"{filepath} removed\")\n del(watching_filepaths[filepath])\n else:\n current_line = watching_filepaths[filepath]\n while True:\n linecache.checkcache(filepath)\n line = linecache.getline(filepath, current_line)\n if (line == \"\"):\n break\n elif args.magic in line:\n logger.info(\n f\"Magic found in {filepath} \"\n f\"on line {current_line}\"\n )\n current_line += 1\n else:\n current_line += 1\n watching_filepaths[filepath] = current_line\n except FileNotFoundError as e:\n logger.info(e)\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--ext', type=str, default='.txt',\n help='Text file extension to watch')\n parser.add_argument('-i', '--interval', type=float,\n default=1.0, help='Number of seconds between polling')\n parser.add_argument('path', help='Directory path to watch')\n parser.add_argument('magic', help='String to watch for')\n return parser\n\n\ndef main():\n \"\"\"\n Initializes signal handling, parser, and logging,\n then scans until interrupted.\n :return None\n \"\"\"\n app_start_time = datetime.datetime.now()\n\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n parser = create_parser()\n args = parser.parse_args()\n\n logging.basicConfig(format='%(asctime)s.%(msecs)03d %(name)-12s '\n '%(levelname)-8s [%(threadName)-12s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n handlers=[\n # logging.FileHandler(\"dirwatcher.log\", mode='a'),\n logging.StreamHandler()\n ]\n )\n\n logger.setLevel(logging.INFO)\n\n logger.info(\n '\\n'\n '-----------------------------------------------------------\\n'\n ' Running {0}\\n'\n ' PID: {1}\\n'\n ' Started on: {2}\\n'\n ' Watching Directory: {3}\\n'\n ' File Ext: {4}\\n'\n ' Polling Interval: {5}\\n'\n ' Magic Text: {6}\\n'\n '-----------------------------------------------------------\\n'\n .format(__file__, os.getpid(), app_start_time.isoformat(),\n args.path, args.ext, args.interval, args.magic)\n )\n\n while not exit_flag:\n try:\n watch_directory(args)\n except Exception as e:\n logger.info(e)\n finally:\n uptime = datetime.datetime.now()-app_start_time\n logger.info(\n '\\n'\n '-----------------------------------------------------------\\n'\n ' Stopped {0}\\n'\n ' Uptime: {1}\\n'\n '-----------------------------------------------------------\\n'\n .format(__file__, str(uptime))\n )\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "techieben/kaseq3-dirwatcher", "sub_path": "dirwatcher.py", "file_name": "dirwatcher.py", "file_ext": "py", "file_size_in_byte": 6494, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.version_info", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "signal.Signals", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 79, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "linecache.checkcache", "line_number": 105, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 106, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 141, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 141, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 142, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 142, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 147, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 152, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.getpid", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 179, "usage_type": "attribute"}]} +{"seq_id": "26803106595", "text": "import unittest\nfrom pathlib import Path\n\nimport yaml\nfrom cooklang import parse\n\nCANONICAL_TESTS_FILE = Path(__file__).parent / \"canonical.yaml\"\n\n\nclass TestCanonical(unittest.TestCase):\n def test_canonical(self) -> None:\n tests = yaml.safe_load(CANONICAL_TESTS_FILE.read_text())\n for name, test in tests[\"tests\"].items():\n print(name)\n result = test[\"result\"]\n cooklang_result = parse(test[\"source\"])\n\n n_metadata_cooklang = 0\n line_index = 0\n for r in cooklang_result:\n if len(r) == 1 and \"type\" in r[0] and r[0][\"type\"] == \"metadata\":\n # metadata\n n_metadata_cooklang += 1\n name = r[0][\"key\"]\n self.assertTrue(name in result[\"metadata\"])\n self.assertEqual(result[\"metadata\"][name], r[0][\"value\"])\n else:\n # parser don't output empty text, canonical does: remove empty text from canonical\n canonical_steps = [\n e_canonical\n for e_canonical in result[\"steps\"][line_index]\n if not (\n \"type\" in e_canonical\n and \"value\" in e_canonical\n and e_canonical[\"type\"] == \"text\"\n and e_canonical[\"value\"].strip() == \"\"\n )\n ]\n\n self.assertEqual(len(canonical_steps), len(r))\n for e_canonical, e_parser in zip(canonical_steps, r):\n if e_canonical[\"type\"] == \"text\":\n self.assertTrue(\"text\" in e_parser)\n self.assertEqual(e_parser[\"text\"], e_canonical[\"value\"].strip())\n else:\n # quantity is not managed the same way between canonical and parser\n # - if quantity is not a string in canonical, transform it to string\n # - if quantity is a default value, remove the default value\n # - if quantity is a frac, then change it to string representation\n if \"quantity\" in e_canonical:\n e_canonical[\"quantity\"] = str(e_canonical[\"quantity\"])\n if e_parser[\"quantity\"] == \"\":\n self.assertIn(e_canonical[\"quantity\"], [\"1\", \"some\"])\n e_canonical[\"quantity\"] = \"\"\n if e_parser[\"quantity\"] != e_canonical[\"quantity\"]:\n # then probably fraction\n self.assertEqual(eval(e_parser[\"quantity\"]), eval(e_canonical[\"quantity\"]))\n e_canonical[\"quantity\"] = e_parser[\"quantity\"]\n self.assertEqual(e_canonical, e_parser)\n\n line_index += 1\n self.assertEqual(line_index, len(result[\"steps\"]))\n self.assertEqual(n_metadata_cooklang, len(result[\"metadata\"]))\n", "repo_name": "Net-Mist/cooklang-rs", "sub_path": "tests/test_canonicals.py", "file_name": "test_canonicals.py", "file_ext": "py", "file_size_in_byte": 3191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pathlib.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 12, "usage_type": "call"}, {"api_name": "cooklang.parse", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "7076583480", "text": "import csv\nimport os\nimport re\nimport sys\nimport time\nfrom dataclasses import dataclass\nfrom typing import Union\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.webdriver import WebDriver as ChromeWebDriver\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\n@dataclass\nclass SiteFormat:\n name: str\n url: str\n\n\ndef get_webdriver() -> Union[ChromeWebDriver, None]:\n driver: Union[ChromeWebDriver, None] = None\n\n try:\n options = ChromeOptions()\n options.headless = True\n\n driver = webdriver.Chrome(\n service=ChromeService(executable_path=ChromeDriverManager().install()),\n options=options,\n )\n except Exception as err:\n print(err)\n\n return driver\n\n\ndef main() -> None:\n MIN_WIDTH = 320\n DEFAULT_WIDTH = 1920\n DEFAULT_HEIGHT = 1080\n\n now = time.strftime(\"%Y%m%d_%H%M%S\")\n base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n for arg in sys.argv[1:]:\n result = re.match(r\"^--width=([0-9]+)$\", arg)\n\n if result:\n parsed_width = int(result[1])\n\n if parsed_width >= MIN_WIDTH:\n DEFAULT_WIDTH = parsed_width\n\n with open(f\"{base_path}/src/capture_list.csv\") as csv_file:\n valid_sites: list[SiteFormat] = []\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n\n for row in csv_reader:\n try:\n name = row[0]\n url = row[1]\n valid_sites.append(SiteFormat(name=name, url=url))\n except Exception as err:\n print(err)\n\n output_path = f\"{base_path}/screenshots/{now}\"\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n if len(valid_sites) == 0:\n f = open(f\"{output_path}/NO_VALID_SITES\", \"x\")\n f.close()\n return\n\n driver = get_webdriver()\n\n if not driver:\n f = open(f\"{output_path}/CHROME_BROWSER_REQUIRED\", \"x\")\n f.close()\n return\n\n for site in valid_sites:\n driver.set_window_size(DEFAULT_WIDTH, DEFAULT_HEIGHT)\n driver.get(site.url)\n time.sleep(1)\n\n site_height = driver.execute_script( # type: ignore\n \"\"\"return (() => {\n const elem = document.querySelector('html');\n\n elem.style.overflowY = 'visible';\n\n return elem.scrollHeight;\n })();\"\"\"\n )\n assert type(site_height) is int\n\n driver.set_window_size(DEFAULT_WIDTH, site_height)\n driver.find_element(by=By.TAG_NAME, value=\"html\").screenshot(\n f\"{output_path}/{site.name}.png\"\n )\n\n driver.quit()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "ony3000/capture-screenshot", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2991, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dataclasses.dataclass", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.webdriver.WebDriver", "line_number": 24, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 30, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 30, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 31, "usage_type": "call"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.webdriver.WebDriver", "line_number": 23, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 48, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 49, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 72, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 103, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "3287199140", "text": "from django import template\r\nfrom blo.models import Post\r\n\r\nregister = template.Library()\r\n\r\n@register.filter\r\ndef display_name_tag(nome):\r\n nome = nome.split(' ')\r\n nome = str(nome[0]) +' '+str(nome[-1])\r\n return nome\r\n\r\n@register.simple_tag\r\ndef display_curses_tag(valor):\r\n\r\n courses = Courses.objects.filter(value=valor).order_by('name')\r\n if courses:\r\n return courses\r\n return False\r\n\r\ndef DIA_SEMANA():\r\n DIAS = (\r\n ('2', 'Segunda-feira'),\r\n ('3', 'Terça-feira'),\r\n ('4', 'Quarta-feira'),\r\n ('5', 'Quinta-feira'),\r\n ('6', 'Sexta-feira'),\r\n ('7', 'Sábado'),\r\n )\r\n return DIAS\r\n\r\n\r\n@register.simple_tag\r\ndef display_dia_tag(value):\r\n return dict(DIA_SEMANA()).get(value,'')", "repo_name": "dennyerikson/appvg", "sub_path": "blo/templatetags/poll_extras.py", "file_name": "poll_extras.py", "file_ext": "py", "file_size_in_byte": 758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.template.Library", "line_number": 4, "usage_type": "call"}, {"api_name": "django.template", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "6569486203", "text": "import requests\nfrom lxml import etree\n# 获取html文本\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36\"\n}\nurl = \"https://tieba.baidu.com/f?kw=%E5%90%90%E6%A7%BD\"\nresponse = requests.get(url=url)\n\ntext = response.content.decode(\"utf8\")\nprint(text)\n# print(text)\nhtml = etree.HTML(text)\naaa = html.xpath(\"//div[@class='threadlist_abs threadlist_abs_onlyline ']/text()\")\nprint(aaa)\n\nfor i in aaa:\n item = {}\n item[\"front\"] = i\n print(item)\n", "repo_name": "Aloof-0/codesr", "sub_path": "爬虫/lxml/实战.py", "file_name": "实战.py", "file_ext": "py", "file_size_in_byte": 540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 13, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "36990039758", "text": "from django.urls import path\nfrom nurseryapp import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('login', views.user_login, name='login'),\n path('registration', views.user_registration, name='registration'),\n path('logout', views.user_logout, name='logout'),\n path('home', views.nursery_home, name='Nursery Home'),\n path('addplants', views.addplants, name='Add Plants'),\n path('nursery', views.nursery, name='Nursery'),\n path('shop', views.shop, name='Shop'),\n path('shop_details', views.shop_details, name='Shop Details'),\n path('checkout', views.checkout, name='Checkout'),\n path('nurseries@', views.nurseries_registration, name='Nurseries Registration'),\n path('order_accept', views.order_accept, name='Order Accept'),\n path('order_done', views.order_done, name='Order Done'),\n]\n", "repo_name": "sksubhash/nursery_application", "sub_path": "nurseryapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 841, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "nurseryapp.views.home", "line_number": 5, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "nurseryapp.views.user_login", "line_number": 6, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "nurseryapp.views.user_registration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "nurseryapp.views.user_logout", "line_number": 8, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "nurseryapp.views.nursery_home", "line_number": 9, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "nurseryapp.views.addplants", "line_number": 10, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "nurseryapp.views.nursery", "line_number": 11, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "nurseryapp.views.shop", "line_number": 12, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "nurseryapp.views.shop_details", "line_number": 13, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "nurseryapp.views.checkout", "line_number": 14, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "nurseryapp.views.nurseries_registration", "line_number": 15, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "nurseryapp.views.order_accept", "line_number": 16, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "nurseryapp.views.order_done", "line_number": 17, "usage_type": "attribute"}, {"api_name": "nurseryapp.views", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "73097484854", "text": "import os\nfrom setuptools import setup\n\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name='bls_pyutils',\n version='0.4',\n packages=['bls_pyutils'],\n url='https://github.com/blacklanternsecurity/BLSPyUtils',\n license='',\n author='Chuck Woodraska',\n author_email='chuck@blacklanternsecurity.com',\n description=read('README.md'),\n install_requires=['PyMySQL', 'jwcrypto', 'stringcase'],\n long_description=read('README.md'),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Utilities\"\n ]\n)\n", "repo_name": "blacklanternsecurity/BLSPyUtils", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "31447840736", "text": "import requests\nfrom bs4 import BeautifulSoup\n\n# URL of the website to scrape\nurl = \"https://www.bbc.com/news\"\n\n# Send an HTTP GET request to the URL\nresponse = requests.get(url)\n\n# Check if the request was successful (status code 200)\nif response.status_code == 200:\n # Parse the HTML content of the page using BeautifulSoup\n soup = BeautifulSoup(response.text, \"html.parser\")\n \n # Find and print the titles and links of top news articles\n articles = soup.find_all(\"div\", class_=\"gs-c-promo\")\n \n for article in articles:\n headline = article.find(\"h3\", class_=\"gs-c-promo-heading__title\")\n link = article.find(\"a\", class_=\"gs-c-promo-heading gs-o-faux-block-link__overlay-link gel-pica-bold nw-o-link-split__anchor\")\n \n if headline and link:\n article_title = headline.get_text(strip=True)\n article_link = link[\"href\"]\n print(f\"Title: {article_title}\")\n print(f\"Link: {article_link}\")\n print(\"\\n\")\nelse:\n print(\"Failed to retrieve the web page. Status code:\", response.status_code)\n", "repo_name": "shannonpieternella/Webscraper-Python", "sub_path": "webscraper.py", "file_name": "webscraper.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "32707323120", "text": "#!/usr/bin/env pypy3\nimport copy\nfrom collections import Counter\nimport itertools\n\nWIRES = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"]\n\nDIGITS = {\n \"0\": frozenset(\"abcefg\"),\n \"1\": frozenset(\"cf\"),\n \"2\": frozenset(\"acdeg\"),\n \"3\": frozenset(\"acdfg\"),\n \"4\": frozenset(\"bcdf\"),\n \"5\": frozenset(\"abdfg\"),\n \"6\": frozenset(\"abdefg\"),\n \"7\": frozenset(\"acf\"),\n \"8\": frozenset(\"abcdefg\"),\n \"9\": frozenset(\"abcdfg\"),\n}\n\nREVERSE_DIGITS = {v: k for k, v in DIGITS.items()}\n\n\nLENGTHS = Counter(len(x) for x in DIGITS.values())\n\n\ndef updated_mappings(current_mappings, updates):\n next_mappings = current_mappings.copy()\n for src, tgt in updates:\n existing = next_mappings.get(src)\n if existing == tgt or existing is None:\n next_mappings[src] = tgt\n else:\n raise ValueError(f\"{src}: {tgt} != {existing}\")\n return next_mappings\n\n\ndef solutions_inner(remaining_input, current_mappings):\n if len(remaining_input) == 0:\n yield current_mappings\n return\n\n current_digit = remaining_input[0]\n for word in DIGITS.values():\n if len(word) != len(current_digit):\n continue\n\n for perm in itertools.permutations(word):\n try:\n next_mappings = updated_mappings(current_mappings, zip(current_digit, perm))\n except ValueError as e:\n continue\n else:\n yield from solutions_inner(remaining_input[1:], next_mappings)\n\n\ndef solutions(line):\n yield from solutions_inner(sorted(line, key=lambda x: LENGTHS[len(x)]), {})\n\n\ndef solve_line(line):\n input, output = line.split(\" | \")\n input = input.split(\" \")\n output = output.split(\" \")\n combined = input + output\n for mapping in solutions(combined):\n return int(\"\".join(REVERSE_DIGITS[frozenset(mapping[c] for c in word)] for word in output))\n\n\ndef main():\n result = 0\n with open(\"/Users/zee/Downloads/input.txt\") as f:\n for line in f:\n line = line.strip()\n output = solve_line(line)\n print(output)\n result += output\n print(f\"total: {result}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "technocoreai/aoc2021", "sub_path": "aoc08.py", "file_name": "aoc08.py", "file_ext": "py", "file_size_in_byte": 2176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "collections.Counter", "line_number": 24, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "13043982554", "text": "#!/usr/bin/python3\n\"\"\" Routes for State responses \"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request\nfrom models import storage\nfrom models.review import Review\nfrom models.place import Place\nfrom models.user import User\n\n\n@app_views.route('/places/<place_id>/reviews', methods=[\"GET\"],\n strict_slashes=False)\ndef all_reviews(place_id=None):\n \"\"\"retrieves a list of all reviews in a place\"\"\"\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n reviews = place.reviews\n reviews_list = [review.to_dict() for review in reviews]\n return (jsonify(reviews_list), 200)\n\n\n@app_views.route('/reviews/<review_id>', methods=[\"GET\"], strict_slashes=False)\ndef specific_review(review_id=None):\n \"\"\"Get review by id\"\"\"\n review = storage.get(Review, review_id)\n if review is None:\n abort(404)\n return (jsonify(review.to_dict()), 200)\n\n\n@app_views.route('/reviews/<review_id>', methods=[\"DELETE\"],\n strict_slashes=False)\ndef delete_review(review_id):\n \"\"\" Deletes a place by ID \"\"\"\n review = storage.get(Review, review_id)\n if review is None:\n abort(404)\n storage.delete(review)\n storage.save()\n return (jsonify({}), 200)\n\n\n@app_views.route('places/<place_id>/reviews', methods=[\"POST\"],\n strict_slashes=False)\ndef create_review(place_id):\n \"\"\" Creates new review from a place\"\"\"\n req = request.get_json()\n if req is None:\n abort(400, \"Not a JSON\")\n if \"text\" not in req:\n abort(400, \"Missing text\")\n if storage.get(Place, place_id) is None:\n abort(404)\n if \"user_id\" not in req:\n abort(400, \"Missing user_id\")\n user = storage.get(User, req.get('user_id'))\n if not user:\n abort(404)\n\n new_review = Review()\n new_review.place_id = place_id\n for key, value in req.items():\n setattr(new_review, key, value)\n new_review.save()\n return (jsonify(new_review.to_dict()), 201)\n\n\n@app_views.route('reviews/<review_id>', methods=[\"PUT\"], strict_slashes=False)\ndef update_review(review_id):\n \"\"\" Update a review \"\"\"\n review = storage.get(Review, review_id)\n if review is None:\n abort(404)\n req = request.get_json()\n if req is None:\n abort(400, \"Not a JSON\")\n for key, value in req.items():\n if key in ['id', 'user_id', 'place_id', 'created_at', 'updated_at']:\n continue\n else:\n setattr(review, key, value)\n review.save()\n return (jsonify(review.to_dict()), 200)\n", "repo_name": "Frank-Grijalba/AirBnB_clone_v3", "sub_path": "api/v1/views/places_reviews.py", "file_name": "places_reviews.py", "file_ext": "py", "file_size_in_byte": 2554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "models.storage.get", "line_number": 15, "usage_type": "call"}, {"api_name": "models.place.Place", "line_number": 15, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 20, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 11, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 11, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 26, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 26, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 29, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 23, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 23, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 36, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 36, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 38, "usage_type": "call"}, {"api_name": "models.storage.delete", "line_number": 39, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 39, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 40, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 41, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 32, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 52, "usage_type": "call"}, {"api_name": "models.storage.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.place.Place", "line_number": 53, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 56, "usage_type": "call"}, {"api_name": "models.storage.get", "line_number": 57, "usage_type": "call"}, {"api_name": "models.user.User", "line_number": 57, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 59, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 66, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 44, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 44, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 72, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 72, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 84, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 69, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "72678744374", "text": "import librosa\nimport numpy as np\nimport soundfile as sf\nimport time\nimport os\nimport sys\nimport shutil\n\n\"\"\"\nReturns an item from the list if it exists, otherwise returns None\n\n@param my_list: the list to check\n@param index_to_check: the index to check\n@return: the item at the index or None\n\"\"\"\ndef get_item_or_none(my_list, index_to_check):\n if index_to_check < len(my_list):\n return my_list[index_to_check]\n else:\n return None\n\n\"\"\"\nReturns the file name without the extension\n\n@param path: the path to the file\n@return: the file name without the extension\n\"\"\"\ndef get_file_name(path):\n file_name = os.path.basename(path)\n file_name_without_extension = os.path.splitext(os.path.splitext(file_name)[0])[0]\n return file_name_without_extension\n\n\"\"\"\nSplits an audio file into segments of the specified duration\n\n@param audio_path: the path to the audio file\n@param segment_duration: the duration of each segment (in seconds)\n@param output_directory: the directory to save the segments to\n\"\"\"\ndef split_audio(audio_path, segment_duration, output_directory):\n # Load the audio file\n audio, sr = librosa.load(audio_path, sr=16000)\n\n # Calculate the number of samples for the desired segment duration\n segment_samples = int(segment_duration * sr)\n\n # Calculate the number of segments\n num_segments = len(audio) // segment_samples\n\n fileName = get_file_name(audio_path)\n\n # Split the audio file into segments\n for i in range(num_segments):\n # Calculate the start and end samples for the segment\n start_sample = i * segment_samples\n end_sample = (i + 1) * segment_samples\n\n # Extract the segment\n segment = audio[start_sample:end_sample]\n\n # Save the segment to a file\n segment_path = os.path.join(output_directory, f\"{fileName}-{i+1}.wav\")\n\n # write to file\n sf.write(segment_path, segment, 16000, format=\"wav\", subtype='PCM_16')\n\n print(f\"{num_segments} segments created in the '{output_directory}' directory.\")\n\n\"\"\"\nSplits an audio file into segments of the specified duration\n\"\"\"\ndef main():\n # Get command line arguments\n audioSize = get_item_or_none(sys.argv, 3)\n fileName = get_item_or_none(sys.argv, 1)\n folderName = get_item_or_none(sys.argv, 2)\n\n #simple checks for quality of life improvements\n if fileName == None:\n print(\"Specify a name please\")\n sys.exit(0)\n\n if folderName == None:\n print(\"Specify a name folder please\")\n sys.exit(0)\n\n if audioSize == None:\n print(\"Specify audio size please\")\n sys.exit(0)\n\n folder_path = os.path.join(os.getcwd(), folderName)\n # Check if the folder exists\n if os.path.exists(folder_path):\n # Remove the existing folder\n shutil.rmtree(folder_path)\n\n # Create the folder\n folder = os.mkdir(folder_path)\n\n\n split_audio(fileName, int(audioSize), folder_path)\n\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n main()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n", "repo_name": "VivianhNguyen/bachelorthesis", "sub_path": "microphone only/microphone only/microphone/Python/AudioDecimator/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3079, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "librosa.load", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "soundfile.write", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 85, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 95, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "25423723017", "text": "import gym\nimport numpy as np\n\nfrom keras.layers import *\nfrom keras.models import Model\n\n# Configurations\n\nenvName = \"CartPole-v0\"\nenv = gym.make(envName)\n\nn_states = env.observation_space.shape\nn_actions = env.action_space.n\n\nprint(\"number of states:\", n_states)\nprint(\"number of actions:\", n_actions)\n\n\n# Hyper parametres\nmemory_size = 10000\nupdate_rate = 30\nmemroy_count = 0\n\nbatch_size = 64\n\n\nn_episodes = 100\n\nepsilon = 1\nmin_epsilon = 0.1\ndecay_rate = 0.001\n\ngamma = 0.999 # Discount factor\n\n\n\nclass Agent():\n\n def __init__(self, n_states, epsilon,\n min_epsilon, decay_rate,\n memory_size, memory_count,\n batch_size, update_rate, gamma):\n\n self.n_states = n_states[0]\n self.batch_size = batch_size\n self.q_valueModel = self.build_model(do_compile=True)\n self.target_qValueModel = self.build_model(do_compile=False)\n self.epsilon = epsilon\n self.min_epsilon = min_epsilon\n self.decay_rate = decay_rate\n self.memory_size = memory_size\n self.memory = np.zeros(shape=(self.memory_size, 11))\n self.memory_count = memory_count\n self.gamma = gamma\n self.update_rate = update_rate\n self.train_counter = 0\n\n\n def build_model(self, do_compile):\n\n inputs = Input(shape=(self.n_states,))\n\n x = inputs\n\n x = Dense(units=256,\n activation=\"relu\",\n kernel_initializer=\"he_normal\")(x)\n x = Dense(units=256,\n activation=\"relu\",\n kernel_initializer=\"he_normal\")(x)\n x = Dense( units=256,\n activation=\"relu\",\n kernel_initializer=\"he_normal\" )(x)\n outputs = Dense(2,activation=\"linear\")(x)\n\n model = Model(inputs, outputs)\n\n if do_compile:\n\n model.compile(loss=\"mse\",\n optimizer=\"Adam\",\n metrics=[\"accuracy\"])\n return model\n\n def act(self, state):\n\n if self.epsilon < self.min_epsilon:\n\n random_index = np.argmax(self.q_valueModel.predict([[state]]))\n return random_index\n\n else:\n random_index = np.random.randint(low=0, high=1)\n self.epsilon = self.epsilon - self.decay_rate\n return random_index\n\n def remember(self, state, action, reward, next_state, done):\n\n self.memory[self.memory_count % self.memory_size] = np.array(list(state)+[action]+[reward]+list(next_state)+[done])\n self.memory_count +=1\n\n\n def do_train(self):\n\n self.train_counter +=1\n\n if self.memory_count > self.batch_size:\n random_indices = np.random.randint(0 ,min(self.memory_count, self.memory_size), size=self.batch_size)\n data = self.memory[random_indices]\n\n state = data[:, :self.n_states]\n action = data[:, self.n_states].astype(\"int\")\n done = data[:, -1]\n next_state = data[:,self.n_states + 2:-1]\n reward = data[:, self.n_states + 1]\n x_train = state\n\n y = np.max(self.target_qValueModel.predict([next_state]), axis=1)\n y = reward + self.gamma*y*(1-done)\n\n y_train = self.q_valueModel.predict([state])\n\n y_train[np.arange(self.batch_size),action] = y\n\n if self.train_counter % self.update_rate == 0:\n self.target_qValueModel.set_weights(self.q_valueModel.get_weights())\n\n self.q_valueModel.train_on_batch(x_train, y_train)\n\n\n\n\nagent = Agent( n_states, epsilon, min_epsilon,\n decay_rate,memory_size, memroy_count,\n batch_size, update_rate, gamma )\n\n\nfor episode in range(1, n_episodes + 1):\n\n state = env.reset()\n done = False\n totalRewards = 0\n\n while not done:\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n totalRewards += reward\n agent.do_train()\n\n\n\n\n\n\n", "repo_name": "alirezakazemipour/DeepLearning-Collection", "sub_path": "ReinforcementLearning/DQN.py", "file_name": "DQN.py", "file_ext": "py", "file_size_in_byte": 4051, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "gym.make", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "1143764270", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom .models import Contact, Create_Classroom, join_student, Question3, Question2, Question1\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.utils.crypto import get_random_string\nfrom datetime import datetime\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'home.html')\n\n\ndef base(request):\n return render(request, 'base.html')\n\n\ndef base1(request):\n return render(request, 'base1.html')\n\n\ndef que1(request):\n return render(request, 'que1.html')\n\n\ndef que2(request):\n return render(request, 'que2.html')\n\n\ndef que3(request):\n return render(request, 'que3.html')\n\n\ndef que4(request):\n return render(request, 'que4.html')\n\n\ndef que5(request):\n return render(request, 'que5.html')\n\n\ndef scores(request):\n return render(request, 'scores.html')\n\n\ndef about(request):\n return render(request, 'about.html')\n\n\ndef pricing(request):\n return render(request, 'pricing.html')\n\n\ndef test1(request, class_id):\n t=Create_Classroom.objects.get(class_id=class_id)\n param = {'t': t}\n return render(request, 'test1.html', param)\n\n\ndef test1confirm(request):\n if request.method=='POST':\n que1=request.POST['que1']\n ans1=request.POST['ans1']\n que2=request.POST['que2']\n ans2=request.POST['ans2']\n que3=request.POST['que3']\n ans3=request.POST['ans3']\n que4=request.POST['que4']\n ans4=request.POST['ans4']\n que5=request.POST['que5']\n ans5=request.POST['ans5']\n test_code=request.POST['test_code']\n h=Create_Classroom.objects.get(class_code=test_code)\n con=Question1(testcode1=h, que1=que1, ans1=ans1, que2=que2, ans2=ans2, que3=que3, ans3=ans3, que4=que4,ans4=ans4, que5=que5, ans5=ans5)\n con.save()\n x = Create_Classroom.objects.filter(admin_name = request.user)\n params = {'msg': 'Dear User Your Quiz has been Successfully Uploaded', 'x': x}\n return render(request, 'myclassroom.html', params)\n param={'msg': 'Sorry for the inconvienace sir pls contact to the developer Pranay kavishwar -8982161715 '}\n return render(request, 'myclassroom.html', param)\n\n\ndef test2(request, class_id):\n t=Create_Classroom.objects.get(class_id=class_id)\n param={'t': t}\n return render(request, 'test2.html', param)\n\n\ndef test2confirm(request):\n if request.method == 'POST':\n que1 = request.POST['que1']\n ans1 = request.POST['ans1']\n que2 = request.POST['que2']\n ans2 = request.POST['ans2']\n que3 = request.POST['que3']\n ans3 = request.POST['ans3']\n que4 = request.POST['que4']\n ans4 = request.POST['ans4']\n que5 = request.POST['que5']\n ans5 = request.POST['ans5']\n test_code = request.POST['test_code']\n h = Create_Classroom.objects.get(class_code = test_code)\n con = Question2(testcode2 = h, que1 = que1, ans1 = ans1, que2 = que2, ans2 = ans2, que3 = que3, ans3 = ans3, que4 = que4, ans4 = ans4, que5 = que5, ans5 = ans5)\n con.save()\n x = Create_Classroom.objects.filter(admin_name = request.user)\n params = {'msg': 'Dear User Your Quiz has been Successfully Uploaded ', 'x': x}\n return render(request, 'myclassroom.html', params)\n param={'msg': 'Sorry for the inconvienace sir pls contact to the developer Pranay kavishwar -8982161715 '}\n return render(request, 'myclassroom.html', param)\n\n\ndef test3(request, class_id):\n t = Create_Classroom.objects.get(class_id = class_id)\n param={'t': t}\n return render(request, 'test3.html', param)\n\n\ndef test3confirm(request):\n if request.method == 'POST':\n que1 = request.POST['que1']\n ans1 = request.POST['ans1']\n que2 = request.POST['que2']\n ans2 = request.POST['ans2']\n que3 = request.POST['que3']\n ans3 = request.POST['ans3']\n que4 = request.POST['que4']\n ans4 = request.POST['ans4']\n que5 = request.POST['que5']\n ans5 = request.POST['ans5']\n test_code = request.POST['test_code']\n h = Create_Classroom.objects.get(class_code = test_code)\n con = Question3(testcode3 = h, que1 = que1, ans1 = ans1, que2 = que2, ans2 = ans2, que3 = que3, ans3 = ans3, que4 = que4,ans4 = ans4, que5 = que5, ans5 = ans5)\n con.save()\n x = Create_Classroom.objects.filter(admin_name = request.user)\n params = {'msg': 'Dear User Your Quiz has been Successfully Uploaded ', 'x': x}\n return render(request, 'myclassroom.html', params)\n param={'msg': 'Sorry for the inconvienace sir pls contact to the developer Pranay kavishwar -8982161715 '}\n return render(request, 'myclassroom.html', param)\n\n\n\ndef createtest(request, class_id):\n x = Create_Classroom.objects.get(class_id = class_id)\n params={'x': x}\n return render(request, 'createtest.html', params)\n\n\ndef joinedclasses(request):\n z = join_student.objects.filter(user = request.user).count()\n if z>0:\n x = join_student.objects.filter(user = request.user)\n params = {'x': x}\n return render(request, 'joinedclasses.html', params)\n else:\n param = {'msg': \"Dear User it Seems you have Not Joined any Classroom\"}\n return render(request, 'base1.html', param)\n\ndef startlearning(request, class_id):\n x=Create_Classroom.objects.filter(class_id=class_id)\n print(class_id)\n print(x)\n param={'x': x}\n return render(request, 'startlearning.html', param)\n\n\ndef join(request):\n if request.method == 'POST':\n join_code = request.POST.get('join_code')\n user = request.POST.get('user_name')\n y = Create_Classroom.objects.get(class_code = join_code)\n x = join_student(join_code = y, user = user)\n x.save()\n params={'y': y}\n return render(request, 'confirmclassroom.html', params)\n\n\ndef myclassroom(request):\n z = Create_Classroom.objects.filter(admin_name = request.user).count()\n if z > 0:\n x = Create_Classroom.objects.filter(admin_name = request.user)\n params = {'x': x}\n return render(request, 'myclassroom.html', params)\n else:\n param={'msg': \"Dear user Please Create a Classroom First\"}\n return render(request, 'base.html', param)\n\n\ndef create(request):\n class_code = get_random_string(length = 8)\n if request.method == 'POST':\n class_name = request.POST['classname']\n admin_name =request.POST['admin_name']\n section = request.POST['section']\n subject = request.POST['subject']\n desc = request.POST['desc']\n Con = Create_Classroom(class_code = class_code, admin_name = admin_name, class_name = class_name, section = section, subject = subject, desc = desc, date1 = datetime.today())\n Con.save()\n x = Create_Classroom.objects.filter(class_code = class_code).first()\n params = {\"x\": x}\n return render(request, 'confirmcreate.html', params)\n else:\n return HttpResponse('404 page Not Found')\n\n\n\ndef contact(request):\n if request.method == \"POST\":\n name=request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n desc = request.POST.get('desc')\n contact = Contact(name = name, email = email, phone = phone, desc = desc, date = datetime.today())\n contact.save()\n args = {'user': request.user}\n return render(request, 'contact.html', args)\n\n return render(request, 'contact.html')\n\n\ndef handleSignup(request):\n if request.method == 'POST':\n # Get the post parameters\n username = request.POST['username']\n fname = request.POST['fname']\n lname = request.POST['lname']\n email = request.POST['email']\n\n pass1 = request.POST['pass1']\n pass2 = request.POST['pass2']\n\n # check for errorerneus inputs\n if len(username) > 10:\n messages.error(request, \"Username must be under 10 characters\")\n return redirect('home')\n\n if pass1 != pass2:\n messages.error(request, \"Passwords do not match\")\n return redirect('home')\n\n # create the user\n myuser = User.objects.create_user(username, email, pass1)\n myuser.first_name = fname\n myuser.last_name = lname\n\n myuser.save()\n param={'msg': 'Successfully signed Up as Examino User!!'}\n return render(request, 'home.html', param)\n\n else:\n return HttpResponse('404 - Not found')\n\n\ndef handlelogin(request):\n if request.method == 'POST':\n # Get the post parameters\n loginusername = request.POST['loginusername']\n loginpass = request.POST['loginpass']\n\n user = authenticate(username = loginusername, password = loginpass)\n if user is not None:\n login(request, user)\n return redirect('home')\n\n\n\n\n\n return HttpResponse('404 page Not Found')\n\n\ndef handlelogout(request):\n logout(request)\n messages.success(request, \"\")\n return redirect('home')\n\n\n\n\n\n\n", "repo_name": "Pranu91/myExamino", "sub_path": "Examportalquiz1/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9166, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 58, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 76, "usage_type": "name"}, {"api_name": "models.Question1", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 79, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 87, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 89, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 105, "usage_type": "name"}, {"api_name": "models.Question2", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.filter", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 108, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 116, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 116, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 134, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 134, "usage_type": "name"}, {"api_name": "models.Question3", "line_number": 135, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.filter", "line_number": 137, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 137, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 139, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 141, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 146, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 146, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 148, "usage_type": "call"}, {"api_name": "models.join_student.objects.filter", "line_number": 152, "usage_type": "call"}, {"api_name": "models.join_student.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "models.join_student", "line_number": 152, "usage_type": "name"}, {"api_name": "models.join_student.objects.filter", "line_number": 154, "usage_type": "call"}, {"api_name": "models.join_student.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "models.join_student", "line_number": 154, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 156, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 159, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.filter", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 162, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 162, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 166, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.get", "line_number": 173, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 173, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 173, "usage_type": "name"}, {"api_name": "models.join_student", "line_number": 174, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 177, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects.filter", "line_number": 181, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 181, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 181, "usage_type": "name"}, {"api_name": "models.Create_Classroom.objects.filter", "line_number": 183, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 183, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 183, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 185, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 188, "usage_type": "call"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 192, "usage_type": "call"}, {"api_name": "models.Create_Classroom", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 199, "usage_type": "name"}, {"api_name": "models.Create_Classroom.objects.filter", "line_number": 201, "usage_type": "call"}, {"api_name": "models.Create_Classroom.objects", "line_number": 201, "usage_type": "attribute"}, {"api_name": "models.Create_Classroom", "line_number": 201, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 203, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 205, "usage_type": "call"}, {"api_name": "models.Contact", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 215, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 218, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 220, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 236, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 236, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 237, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 240, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 240, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 241, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 244, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 244, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 244, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 250, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 253, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 262, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 264, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 265, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 271, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 275, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 276, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 276, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 277, "usage_type": "call"}]} +{"seq_id": "16619280297", "text": "import discord\nimport asyncio\nimport time\nfrom sys import argv, executable\n\nfrom globalvars import *\n\nfrom get_help import get_help\nfrom change_prefix import change_prefix\nfrom add_blacklist import add_blacklist\nfrom create_issue import create_issue\nfrom autoclear import autoclear\nfrom clear_channel import clear_channel\nfrom update import update\n\n\nasync def blacklist_msg(message):\n msg = await client.send_message(message.channel, ':x: This text channel has been blacklisted :x:')\n await client.delete_message(message)\n await asyncio.sleep(2)\n await client.delete_message(msg)\n\ncommand_map = {\n 'help' : get_help,\n 'blacklist' : add_blacklist,\n 'suggestion' : create_issue,\n 'issue' : create_issue,\n 'autoclear' : autoclear,\n 'clear' : clear_channel,\n 'update' : update\n}\n\nasync def validate_cmd(message):\n if message.server != None and message.server.id in prefix.keys():\n pref = prefix[message.server.id]\n else:\n pref = '&'\n\n if message.content[0] != pref: ## These functions call if the prefix isnt present\n if message.content.startswith('mbprefix'):\n\n if message.channel.id in channel_blacklist:\n await blacklist_msg(message)\n return\n\n await change_prefix(message)\n\n return\n\n cmd = message.content.split(' ')[0][1:] # extract the keyword\n if cmd in command_map.keys():\n\n if message.channel.id in channel_blacklist and cmd != 'help':\n await blacklist_msg(message)\n\n return\n\n else:\n await command_map[cmd](message)\n\n return\n\n@client.event ## print some stuff to console when the bot is activated\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n await client.change_presence(game=discord.Game(name='&help'))\n\n@client.event\nasync def on_message(message): ## when a message arrives at the bot ##\n\n if message.content in [None, '']:\n return\n\n await validate_cmd(message)\n\n if message.channel.id in autoclears.keys():\n await asyncio.sleep(autoclears[message.channel.id])\n await client.delete_message(message)\n\n if message.author.id in users.keys():\n if time.time() - users[message.author.id] < 1:\n\n if message.author.id in warnings.keys():\n\n warnings[message.author.id] += 1\n if warnings[message.author.id] == 4:\n await client.send_message(message.channel, 'Please slow down {}'.format(message.author.mention))\n\n elif warnings[message.author.id] == 6:\n\n overwrite = discord.PermissionOverwrite()\n overwrite.send_messages = False\n await client.edit_channel_permissions(message.channel, message.author, overwrite)\n await client.send_message(message.channel, '{}, you\\'ve been muted for spam. Please contact an admin to review your status.'.format(message.author.mention))\n\n else:\n print('user added to warning list')\n warnings[message.author.id] = 1\n\n users[message.author.id] = time.time()\n\n else:\n users[message.author.id] = time.time()\n warnings[message.author.id] = 0\n\n else:\n print('registered user for auto-muting')\n users[message.author.id] = time.time()\n\ntry:\n with open('token','r') as token_f:\n token = token_f.read().strip('\\n')\n\nexcept FileNotFoundError:\n if len(argv) < 2:\n print('Please remember you need to enter a token for the bot as an argument, or create a file called \\'token\\' and enter your token into it.')\n else:\n token = argv[1]\n\nelse:\n try:\n client.run(token)\n except:\n print('Error detected. Restarting in 15 seconds.')\n time.sleep(15)\n\n os.execl(executable, executable, *argv)\n", "repo_name": "JellyWX/manager-bot", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "asyncio.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "get_help.get_help", "line_number": 24, "usage_type": "name"}, {"api_name": "add_blacklist.add_blacklist", "line_number": 25, "usage_type": "name"}, {"api_name": "create_issue.create_issue", "line_number": 26, "usage_type": "name"}, {"api_name": "create_issue.create_issue", "line_number": 27, "usage_type": "name"}, {"api_name": "autoclear.autoclear", "line_number": 28, "usage_type": "name"}, {"api_name": "clear_channel.clear_channel", "line_number": 29, "usage_type": "name"}, {"api_name": "update.update", "line_number": 30, "usage_type": "name"}, {"api_name": "change_prefix.change_prefix", "line_number": 46, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 70, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 122, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 129, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 131, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "36027974705", "text": "import pygame\nimport copy\nimport csv\nimport os\n\n\nclass AISpawn:\n def __init__(self, image, position, type_id):\n self.typeID = type_id\n self.position = [0, 0]\n self.position[0] = position[0]\n self.position[1] = position[1]\n\n self.world_position = [0, 0]\n self.world_position[0] = position[0]\n self.world_position[1] = position[1]\n self.tileImage = image\n self.sprite = pygame.sprite.Sprite()\n self.sprite.image = self.tileImage\n self.sprite.rect = self.tileImage.get_rect()\n self.sprite.rect.center = self.position\n\n def update_offset_position(self, offset):\n self.position[0] = self.world_position[0] - offset[0]\n self.position[1] = self.world_position[1] - offset[1]\n self.sprite.rect.center = self.position\n\n\nclass TileData:\n\n def __init__(self, file_path, tile_map):\n self.filePath = file_path\n self.tileMap = tile_map\n self.tileID = os.path.splitext(os.path.basename(file_path))[0]\n self.collidable = False\n self.collideRadius = 26\n self.collisionShapes = []\n self.image_coords = (0, 0)\n self.tileImage = None\n\n def load_tile_data(self):\n if os.path.isfile(self.filePath):\n with open(self.filePath, \"r\") as tileFile:\n reader = csv.reader(tileFile)\n for line in reader:\n data_type = line[0]\n if data_type == \"isCollidable\":\n self.collidable = bool(int(line[1]))\n elif data_type == \"tileImageCoords\":\n self.image_coords = (int(line[1]), int(line[2]))\n self.tileImage = self.tileMap[int(line[1])][int(line[2])]\n elif data_type == \"rect\":\n top_left_tile_offset = [int(line[1]), int(line[2])]\n self.collisionShapes.append([\"rect\", top_left_tile_offset,\n pygame.Rect(int(line[1]),\n int(line[2]),\n int(line[3])-int(line[1]),\n int(line[4])-int(line[2]))])\n elif data_type == \"circle\":\n self.collisionShapes.append([\"circle\", int(line[1])])\n self.collideRadius = int(line[1])\n\n def copy(self):\n tile_data_copy = TileData(self.filePath, self.tileMap)\n tile_data_copy.tileID = copy.deepcopy(self.tileID)\n tile_data_copy.collidable = copy.deepcopy(self.collidable)\n tile_data_copy.collideRadius = copy.deepcopy(self.collideRadius)\n tile_data_copy.collisionShapes = copy.deepcopy(self.collisionShapes)\n self.tileImage = self.tileMap[self.image_coords[0]][self.image_coords[1]]\n return tile_data_copy\n \n\nclass Tile:\n def __init__(self, position, tile_angle, tile_data):\n self.groupTileData = tile_data\n self.tileData = tile_data.copy()\n self.world_position = [position[0], position[1]]\n self.position = [position[0], position[1]]\n self.angle = tile_angle\n self.collideRadius = self.groupTileData.collideRadius\n self.collidable = self.groupTileData.collidable\n self.tileID = self.groupTileData.tileID\n self.tileImage = pygame.transform.rotate(self.groupTileData.tileImage, self.angle)\n self.sprite = pygame.sprite.Sprite()\n self.sprite.image = self.tileImage\n self.sprite.rect = self.tileImage.get_rect()\n self.sprite.rect.center = self.position\n self.isVisible = False\n\n def update_collision_shapes_position(self):\n for shape in self.tileData.collisionShapes:\n if shape[0] == \"rect\":\n shape[2].left = self.sprite.rect.left + shape[1][0]\n shape[2].top = self.sprite.rect.top + shape[1][1]\n\n def update_offset_position(self, offset, screen_data):\n should_update = False\n should_add_to_visible_tiles = False\n should_add_to_visible_collidable_tiles = False\n self.position[0] = self.world_position[0] - offset[0]\n self.position[1] = self.world_position[1] - offset[1]\n self.sprite.rect.center = self.position\n self.update_collision_shapes_position()\n if -32 <= self.position[0] <= screen_data.screenSize[0] + 32:\n if -32 <= self.position[1] <= screen_data.screenSize[1] + 32:\n if not self.isVisible:\n should_update = True\n self.isVisible = True\n should_add_to_visible_tiles = True\n if self.collidable:\n should_add_to_visible_collidable_tiles = True\n else:\n self.isVisible = False\n else:\n self.isVisible = False\n return should_update, should_add_to_visible_tiles, should_add_to_visible_collidable_tiles\n \n def draw_collision_shapes(self, screen):\n for shape in self.tileData.collisionShapes:\n if shape[0] == \"circle\":\n self.draw_radius_circle(screen, shape[1])\n elif shape[0] == \"rect\":\n self.draw_collision_rect(screen, shape[2])\n \n @staticmethod\n def draw_collision_rect(screen, rect):\n ck = (180, 100, 100)\n s = pygame.Surface((rect.width, rect.height))\n s.fill(ck)\n s.set_alpha(75)\n screen.blit(s, rect)\n \n def draw_radius_circle(self, screen, radius):\n ck = (127, 33, 33)\n int_position = [0, 0]\n int_position[0] = int(self.position[0]-radius)\n int_position[1] = int(self.position[1]-radius)\n s = pygame.Surface((radius*2, radius*2))\n\n # first, \"erase\" the surface by filling it with a color and\n # setting this color as colorkey, so the surface is empty\n s.fill(ck)\n s.set_colorkey(ck)\n\n pygame.draw.circle(s, pygame.Color(180, 100, 100), (radius, radius), radius)\n\n # after drawing the circle, we can set the \n # alpha value (transparency) of the surface\n s.set_alpha(75)\n screen.blit(s, int_position)\n\n def test_projectile_collision(self, projectile_rect):\n collided = False\n if self.sprite.rect.colliderect(projectile_rect):\n for collisionShape in self.tileData.collisionShapes:\n if collisionShape[0] == \"circle\":\n if self.test_rect_in_circle(projectile_rect, collisionShape[1]):\n collided = True\n elif collisionShape[0] == \"rect\":\n if collisionShape[2].colliderect(projectile_rect):\n collided = True\n return collided\n\n @staticmethod\n def test_point_in_circle(point, circle_pos, circle_radius):\n return (point[0] - circle_pos[0]) ** 2 + (point[1] - circle_pos[1]) ** 2 < circle_radius ** 2\n\n def test_rect_in_circle(self, rect, circle_radius):\n tl_in = self.test_point_in_circle(rect.topleft, self.position, circle_radius)\n tr_in = self.test_point_in_circle(rect.topright, self.position, circle_radius)\n bl_in = self.test_point_in_circle(rect.bottomleft, self.position, circle_radius)\n br_in = self.test_point_in_circle(rect.bottomright, self.position, circle_radius)\n return tl_in or tr_in or bl_in or br_in\n\n def rotate_tile_right(self):\n self.angle -= 90\n if self.angle < 0:\n self.angle = 270\n self.tileImage = pygame.transform.rotate(self.tileImage, -90)\n self.sprite.image = self.tileImage\n\n def rotate_tile_left(self):\n self.angle += 90\n if self.angle > 270:\n self.angle = 0\n self.tileImage = pygame.transform.rotate(self.tileImage, 90)\n self.sprite.image = self.tileImage\n", "repo_name": "MyreMylar/scrolling_shooter", "sub_path": "game/tile.py", "file_name": "tile.py", "file_ext": "py", "file_size_in_byte": 7995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 55, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 65, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 66, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 67, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.transform.rotate", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.transform.rotate", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 186, "usage_type": "attribute"}]} +{"seq_id": "6438977991", "text": "############################################################################\n############################################################################\n# THIS IS THE ONLY FILE YOU SHOULD EDIT\n#\n#\n# Agent must always have these five functions:\n# __init__(self)\n# has_finished_episode(self)\n# get_next_action(self, state)\n# set_next_state_and_distance(self, next_state, distance_to_goal)\n# get_greedy_action(self, state)\n#\n#\n# You may add any other functions as you wish\n############################################################################\n############################################################################\n\nimport time\nimport numpy as np\nimport torch\nfrom collections import deque\nimport random\nimport matplotlib.pyplot as plt\n\n\nclass Agent:\n\n # Function to initialise the agent\n def __init__(self):\n # Set the episode length\n self.episode_length = 3000\n # Reset the total number of steps which the agent has taken\n self.num_steps_taken = 0\n # The state variable stores the latest state of the agent in the environment\n self.state = None\n # The action variable stores the latest action which the agent has applied to the environment\n self.action = None\n self.discrete_action = None\n\n self.num_episodes = 0\n self.steps_in_episode = 0\n\n self.replaybuffer = ReplayBuffer(capacity=20000, epsilon=0.1, alpha=0.7)\n\n self.dqn = DQN()\n self.target = DQN()\n self.target.q_network.load_state_dict(self.dqn.q_network.state_dict())\n\n self.epsilon_init = 1\n self.epsilon = self.epsilon_init\n self.epsilon_decay = 0.1 ** (1 / 100)\n self.episode_len_decay = 0.8\n self.epsilon_min = 0.3\n self.gamma = 0.9\n self.batch_size = 1000\n self.target_swap = 200\n\n self._greedy = False\n self._found_greedy = False\n self._birthday = time.time()\n self._last_distance = 100 # This is only used for logging purposes\n self._found_goal_in_episode = False\n self._next_episode_length = self.episode_length\n\n\n # map discrete to continuous actions\n self._action_map = {\n 0: np.array([0.02, 0], dtype=np.float), # RIGHT\n 1: np.array([0, 0.02], dtype=np.float), # UP\n 2: np.array([-0.02, 0], dtype=np.float), # LEFT\n 3: np.array([0, -0.02], dtype=np.float) # DOWN\n }\n\n # Function to check whether the agent has reached the end of an episode\n def has_finished_episode(self):\n has_finished = self.steps_in_episode % self.episode_length == 0\n\n if has_finished:\n print(f\"Finished episode {self.num_episodes} after {self.num_steps_taken} steps, episode_length={self.episode_length}, epsilon={self._current_epsilon()}, greedy={self._greedy}, last_distance={self._last_distance}\")\n self.num_episodes += 1\n self.steps_in_episode = 0\n self._found_goal_in_episode = False\n self.epsilon *= self.epsilon_decay\n self.episode_length = self._next_episode_length\n\n return has_finished\n\n\n # Function to get the next action, using whatever method you like\n def get_next_action(self, state):\n # Update the number of steps which the agent has taken\n self.num_steps_taken += 1\n self.steps_in_episode += 1\n\n # try out the greedy policy every 5 episodes\n self._greedy = (self.num_episodes % 5 == 0 and self.steps_in_episode <= 100 and time.time() - self._birthday >= 480) or self._found_greedy\n \n if self._greedy:\n return self.get_greedy_action(state, False)\n\n discrete_action = self._choose_next_action(state)\n action = self._discrete_action_to_continuous(discrete_action)\n # Store the state; this will be used later, when storing the transition\n self.state = state\n # Store the action; this will be used later, when storing the transition\n self.action = action\n self.discrete_action = discrete_action\n\n return action\n\n # Function for the agent to choose its next action\n def _choose_next_action(self, state):\n q = self.dqn.q_network.forward(torch.tensor([state]).float())\n best = torch.argmax(q).item()\n epsilon = self._current_epsilon()\n probs = np.full(4, epsilon / 4)\n probs[best] = 1 - epsilon + epsilon / 4\n return np.random.choice(range(4), p=probs)\n \n def _current_epsilon(self):\n return min(1, max(self.epsilon, self.epsilon_min))\n\n\n # Function to convert discrete action (as used by a DQN) to a continuous action (as used by the environment).\n def _discrete_action_to_continuous(self, discrete_action):\n return self._action_map[discrete_action]\n\n # Function to set the next state and distance, which resulted from applying action self.action at state self.state\n def set_next_state_and_distance(self, next_state, distance_to_goal):\n\n # check if we're on a greedy policy and have found the goal\n if self._greedy and self.steps_in_episode <= 100 and distance_to_goal < 0.03:\n self._found_greedy = True\n\n if distance_to_goal < 0.03 and not self._found_goal_in_episode:\n self._found_goal_in_episode = True\n self._next_episode_length = max(200, int(self.episode_length * self.episode_len_decay))\n print(f\"Found goal, reducing next episode length to {self._next_episode_length}\")\n \n # Convert the distance to a reward\n reward = 1 - distance_to_goal\n if abs(next_state[0] - self.state[0]) < 0.0001 or abs(next_state[1] - self.state[1]) < 0.0001:\n reward -= 0.5\n \n # Only used for logging\n self._last_distance = distance_to_goal\n\n # Create a transition\n transition = (self.state, self.discrete_action, reward, next_state)\n\n self.replaybuffer.append(transition)\n\n # Only train when not trying greedy policy\n if not self._greedy:\n if len(self.replaybuffer) >= self.batch_size:\n batch = self.replaybuffer.sample(self.batch_size)\n self.dqn.batch_train_q_network(batch, self.gamma, self.target, self.replaybuffer)\n \n if self.num_steps_taken % self.target_swap == 0:\n self.target.q_network.load_state_dict(self.dqn.q_network.state_dict())\n\n\n\n\n\n # Function to get the greedy action for a particular state\n def get_greedy_action(self, state, p=True):\n q = self.dqn.q_network.forward(torch.tensor([state]).float())\n if p:\n print(\"state {}, q {}\".format(state, q))\n best = torch.argmax(q).item()\n return self._discrete_action_to_continuous(best)\n\n\n# The Network class inherits the torch.nn.Module class, which represents a neural network.\nclass Network(torch.nn.Module):\n\n # The class initialisation function. This takes as arguments the dimension of the network's input (i.e. the dimension of the state), and the dimension of the network's output (i.e. the dimension of the action).\n def __init__(self, input_dimension, output_dimension):\n # Call the initialisation function of the parent class.\n super(Network, self).__init__()\n # Define the network layers. This example network has two hidden layers, each with 100 units.\n self.layer_1 = torch.nn.Linear(in_features=input_dimension, out_features=100)\n self.layer_2 = torch.nn.Linear(in_features=100, out_features=100)\n self.layer_3 = torch.nn.Linear(in_features=100, out_features=100)\n self.output_layer = torch.nn.Linear(in_features=100, out_features=output_dimension)\n\n # Function which sends some input data through the network and returns the network's output. In this example, a ReLU activation function is used for both hidden layers, but the output layer has no activation function (it is just a linear layer).\n def forward(self, input):\n layer_1_output = torch.nn.functional.relu(self.layer_1(input))\n layer_2_output = torch.nn.functional.relu(self.layer_2(layer_1_output))\n layer_3_output = torch.nn.functional.relu(self.layer_3(layer_2_output))\n output = self.output_layer(layer_3_output)\n return output\n\n\n# The DQN class determines how to train the above neural network.\nclass DQN:\n\n # The class initialisation function.\n def __init__(self):\n # Create a Q-network, which predicts the q-value for a particular state.\n self.q_network = Network(input_dimension=2, output_dimension=4)\n # Define the optimiser which is used when updating the Q-network. The learning rate determines how big each gradient step is during backpropagation.\n self.optimiser = torch.optim.Adam(self.q_network.parameters(), lr=0.001)\n\n \n # Train on batch of transitions\n def batch_train_q_network(self, batch, gamma, target_network, replaybuffer=None):\n self.optimiser.zero_grad()\n loss = self._calculate_batch_loss(batch, gamma, target_network, replaybuffer)\n loss.backward()\n self.optimiser.step()\n return loss.item()\n\n def _calculate_batch_loss(self, batch, gamma, target_network, replaybuffer):\n states = batch[:,:2]\n actions = batch[:,2]\n rewards = batch[:,3]\n next_states = batch[:,4:]\n\n q_target = target_network.q_network.forward(torch.tensor(next_states).float())\n q_max_indices = torch.argmax(q_target, dim=1)\n\n q_max = self.q_network.forward(torch.tensor(next_states).float()).gather(1, q_max_indices.unsqueeze(1))\n r_tensor = torch.tensor(rewards).reshape((len(rewards), 1)) + gamma * q_max.reshape(len(rewards), 1)\n\n s_tensor = torch.tensor(states).float()\n a_tensor = torch.tensor(actions, dtype=torch.int64)\n\n prediction = self.q_network.forward(s_tensor).gather(1, a_tensor.unsqueeze(1))\n\n if replaybuffer is not None:\n replaybuffer.update_deltas(torch.abs(prediction - r_tensor.float()).detach().numpy())\n\n return torch.nn.MSELoss()(prediction, r_tensor.float())\n\nclass ReplayBuffer:\n def __init__(self, capacity=5000, epsilon=0.1, alpha=1):\n self._capacity = capacity\n self._epsilon = epsilon\n self._alpha = alpha\n self._buffer = np.zeros((capacity, 6), dtype=np.float)\n self._index = 0\n self._size = 0\n self._last_indices_returned = None\n self._weights = np.zeros(capacity)\n self._sampling_probs = np.zeros(capacity)\n self._can_append = True\n \n def append(self, transition):\n assert self._can_append\n state = [coord for coord in transition[0]]\n action = [transition[1]]\n reward = [transition[2]]\n next_state = [coord for coord in transition[3]]\n self._buffer[self._index] = np.array(state + action + reward + next_state)\n self._size = min(self._size + 1, self._capacity)\n\n self._weights[self._index] = np.max(self._weights)\n\n self._renormalize_weights()\n\n self._index = (self._index + 1) % self._capacity\n\n def update_deltas(self, deltas):\n self._can_append = True\n self._weights[self._last_indices_returned] = deltas.flatten()\n self._renormalize_weights()\n\n def _renormalize_weights(self):\n weights = (self._weights[:self._size] + self._epsilon) ** self._alpha\n self._sampling_probs[:self._size] = weights / np.sum(weights)\n\n def sample(self, n):\n self._can_append = False\n indices = np.random.choice(range(self._size), n, p=self._sampling_probs[:self._size])\n self._last_indices_returned = indices\n return self._buffer[indices]\n\n def __len__(self):\n return self._size", "repo_name": "seboko/rl-maze", "sub_path": "Coursework/agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 11755, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 71, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 176, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 183, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 184, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 186, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 190, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 191, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 229, "usage_type": "attribute"}, {"api_name": "torch.abs", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 243, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 277, "usage_type": "attribute"}]} +{"seq_id": "40973526253", "text": "#!/usr/bin/env python\n\"\"\"\n Title: get_records.py - extract a given record from a series of records\n Author: Craig Arthur, craig.arthur@ga.gov.au\n CreationDate: 2013-04-29\n Description: Extracts values from an array of lists that each contain data\n for a specified field.\n\n Version: 0.1\n\n (C) Commonwealth of Australia (Geoscience Australia) 2012\n This product is released under the Creative Commons Attribution 3.0\n Australia Licence\n\n http://creativecommons.org/licenses/by/3.0/au/legalcode\n\"\"\"\nimport numpy\nimport logging\n\nLOG = logging.getLogger(__name__)\n\ndef getField(fieldname, fields, records, dtype=float):\n \"\"\"\n Extract from the records the value of the field corresponding\n to fieldname.\n \"\"\"\n LOG.debug(\"Extracting {0} from records\".format(fieldname))\n nrecords = len(records)\n fieldnames = [fields[i][0] for i in range(len(fields))]\n\n # Check the field is in the records:\n if fieldname not in fieldnames:\n LOG.warn(\"No field '{0}' in the list of fieldnames\" .\n format(fieldname))\n\n LOG.warn(\"Unable to proceed with processing\")\n raise ValueError\n\n # Get the index of the required field name:\n idx = fieldnames.index(fieldname)\n if dtype != str:\n # For non-string data, return a numpy array:\n output = numpy.array([records[rec][idx] for rec in xrange(nrecords)],\n dtype=dtype)\n\n else:\n # Otherwise, return a list:\n output = [records[rec][idx] for rec in xrange(nrecords)]\n\n\n return output\n\ndef removeField(fieldname, fields, records):\n \"\"\"\n Remove from the records the given field\n \"\"\"\n LOG.debug(\"Removing {0} from records\".format(fieldname))\n nrecords = len(records)\n fieldnames = [fields[i][0] for i in range(len(fields))]\n\n # Check the field is in the records:\n if fieldname not in fieldnames:\n LOG.warn(\"No field '{0}' in the list of fieldnames\" .\n format(fieldname))\n\n LOG.warn(\"Unable to proceed with processing\")\n raise ValueError\n\n # Get the index of the required field name:\n idx = fieldnames.index(fieldname)\n for rec in xrange(nrecords):\n del records[rec][idx]\n\n del fields[idx]\n\n return records, fields\n", "repo_name": "wcarthur/gmma", "sub_path": "get_records.py", "file_name": "get_records.py", "file_ext": "py", "file_size_in_byte": 2265, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "43025242971", "text": "from .models import User\nimport jwt\nfrom rest_framework import authentication, exceptions\nfrom django.conf import settings\nfrom rest_framework.response import Response\n\n\nclass JWTAuthentication(authentication.BaseAuthentication):\n\n def authenticate(self, request):\n auth_data = authentication.get_authorization_header(request)\n\n if not auth_data:\n return None\n\n try:\n _, token = auth_data.decode('utf-8').split(' ')\n except:\n raise exceptions.AuthenticationFailed(\n 'Please insert \\'Bearer\\' before the inserted token.')\n \n try:\n payload = jwt.decode(token, settings.JWT_SECRET_KEY, algorithms=\"HS256\")\n\n user = User.objects.get(email=payload['email'])\n return (user, token)\n\n except jwt.DecodeError as identifier:\n raise exceptions.AuthenticationFailed(\n 'Your token is invalid,login')\n except jwt.ExpiredSignatureError as identifier:\n raise exceptions.AuthenticationFailed(\n 'Your token is expired,login')\n", "repo_name": "Carolinemagdy/Multivendor_Fashion_website", "sub_path": "accounts/authentication.py", "file_name": "authentication.py", "file_ext": "py", "file_size_in_byte": 1104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "rest_framework.authentication.BaseAuthentication", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rest_framework.authentication", "line_number": 8, "usage_type": "name"}, {"api_name": "rest_framework.authentication.get_authorization_header", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.authentication", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.AuthenticationFailed", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 19, "usage_type": "name"}, {"api_name": "jwt.decode", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.settings.JWT_SECRET_KEY", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 25, "usage_type": "name"}, {"api_name": "jwt.DecodeError", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rest_framework.exceptions.AuthenticationFailed", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 29, "usage_type": "name"}, {"api_name": "jwt.ExpiredSignatureError", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.exceptions.AuthenticationFailed", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "19141930193", "text": "import json\nimport re\nfrom datetime import datetime\nfrom io import BytesIO\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom zipfile import ZipFile\n\nimport networkx as nx\nimport numpy as np\nfrom openpyxl import Workbook\nfrom openpyxl.cell import WriteOnlyCell\nfrom scipy import interpolate\n\nimport Qiber3D\nfrom Qiber3D import helper, config\n\n\nclass IO:\n\n class load:\n def __new__(cls, path, **kwargs):\n \"\"\"\n Returns a new :class:`Qiber3D.Network` from file.\n\n Supports: :file:`.qiber`, :file:`.json`, :file:`.mv3d`, :file:`.tif`, :file:`.nd2`, :file:`.swc`, :file:`.ntr`\n\n :param path: file path to load\n :type path: str, Path\n :param kwargs: key-word arguments are passed down to the individual IO functions\n \"\"\"\n\n path = Path(path)\n if path.suffix == '':\n path = path.with_suffix('.qiber')\n\n if path.suffix == '.qiber':\n return cls.binary(path)\n elif path.suffix == '.json':\n return cls.json(path)\n elif path.suffix == '.mv3d':\n return cls.mv3d(path)\n elif path.suffix == '.nd2':\n return cls.nd2(path, **kwargs)\n elif path.suffix == '.ntr':\n return cls.ntr(path)\n elif path.suffix == '.swc':\n return cls.swc(path, **kwargs)\n else:\n return cls.image(path, **kwargs)\n\n @staticmethod\n def binary(path):\n \"\"\"\n Create a :class:`Qiber3D.Network` from a :file:`.qiber` file, created by :meth:`Qiber3D.Network.save`\n\n :param path: file path to load\n :type path: str, Path\n :return: :class:`Qiber3D.Network`\n \"\"\"\n path = Path(path)\n with ZipFile(path, mode='r') as save_file:\n net = IO.load.json(path, data=json.loads(save_file.read('network.json').decode('utf-8')))\n try:\n extractor_steps = helper.NumpyMemoryManager.load(\n fileobj=BytesIO(save_file.read('extractor_steps.tar')))\n except KeyError:\n extractor_steps = None\n net.extractor_steps = extractor_steps\n return net\n\n @staticmethod\n def image(path, channel=None, voxel_size=None):\n \"\"\"\n Create a :class:`Qiber3D.Network` from a image file.\n\n :param path: file path to load\n :type path: str, Path\n :param channel: either index or name of image channel\n :type channel: int, str\n :param tuple(float) voxel_size: physical size of voxel in (x,y,z)\n :return: :class:`Qiber3D.Network`\n\n \"\"\"\n path = Path(path)\n\n if voxel_size is None:\n voxel_size = config.extract.voxel_size\n\n if voxel_size is None:\n print('Please set the physical size of the voxels')\n vsx = float(input('\\tx: '))\n vsy = float(input('\\ty: '))\n vsz = float(input('\\tz: '))\n voxel_size = (vsx, vsy, vsz)\n ex = Qiber3D.Extractor(path, channel=channel, voxel_size=voxel_size)\n return ex.get_network()\n\n @staticmethod\n def nd2(path, channel=None):\n \"\"\"\n Create a :class:`Qiber3D.Network` from a :file:`.nd2` file.\n\n :param path: file path to load\n :type path: str, Path\n :param channel: either index or name of image channel\n :type channel: int, str\n :return: :class:`Qiber3D.Network`\n \"\"\"\n path = Path(path)\n ex = Qiber3D.Extractor(path, channel=channel)\n return ex.get_network()\n\n @staticmethod\n def mv3d(path):\n \"\"\"\n Create a :class:`Qiber3D.Network` from a :file:`.mv3d` file.\n\n :param path: file path to load\n :type path: str, Path\n :return: :class:`Qiber3D.Network`\n \"\"\"\n path = Path(path)\n\n network_data = np.loadtxt(path, delimiter='\\t')\n segments = {}\n available_segments = [int(sid) for sid in (set(network_data[:, 0].astype(int)))]\n\n for seg_id in available_segments:\n segments[seg_id] = dict(\n seg_id=seg_id,\n points=network_data[network_data[:, 0] == seg_id][:, 1:4],\n radius=network_data[network_data[:, 0] == seg_id][:, 4] / 2.0\n )\n data = {\n 'path': path,\n 'name': path.with_suffix('').name,\n 'segments': segments}\n return Qiber3D.Network(data)\n\n @staticmethod\n def network(net, scale=1, input_path=None, segment_list=None):\n \"\"\"\n Create a new :class:`Qiber3D.Network` from a :class:`Qiber3D.Network`.\n\n :param Qiber3D.Network net: original network\n :param float scale: scale all points and radii by this value\n :param input_path: set this path as new imput path of the returning network\n :type input_path: str, Path\n :param tuple segment_list: limit the new network to this list of segments (sid)\n :return: :class:`Qiber3D.Network`\n \"\"\"\n\n segments = {}\n if segment_list is None:\n for seg_id, segment in enumerate(net.segment.values()):\n segments[seg_id] = {\n 'seg_id': seg_id,\n 'points': segment.point * scale,\n 'radius': segment.radius * scale\n }\n else:\n for new_seg_id, old_seg_id in enumerate(segment_list):\n segments[new_seg_id] = {\n 'seg_id': new_seg_id,\n 'points': net.segment[old_seg_id].point * scale,\n 'radius': net.segment[old_seg_id].radius * scale\n }\n if input_path is not None:\n data = {\n 'path': input_path,\n 'name': input_path.with_suffix('').name,\n 'segments': segments}\n else:\n data = {\n 'path': net.input_file,\n 'name': net.name,\n 'segments': segments}\n return Qiber3D.Network(data)\n\n @staticmethod\n def ntr(path):\n \"\"\"\n Create a :class:`Qiber3D.Network` from a :file:`.ntr` file.\n\n :param path: file path to load\n :type path: str, Path\n :return: :class:`Qiber3D.Network`\n \"\"\"\n path = Path(path)\n content = path.read_text()\n data_re = re.compile(r'(.*),(.*),(.*),(.*),(.*),(.*)\\n')\n\n search = data_re.findall(content)\n data = np.array(search, dtype=np.float32)\n\n seg_id = 0\n segments = {seg_id: {'seg_id': seg_id, 'points': [], 'radius': []}}\n unfinished_bp = []\n for n, (x, y, z, d, pt, pd) in enumerate(data):\n segments[seg_id]['points'].append((x, y, z))\n segments[seg_id]['radius'].append(d/2.0)\n if int(pt) in [7, 8]:\n seg_id += 1\n segments[seg_id] = {'seg_id': seg_id, 'points': [], 'radius': []}\n if int(pt) == 7:\n unfinished_bp.append((x, y, z, d))\n segments[seg_id]['points'].append((x, y, z))\n segments[seg_id]['radius'].append(d / 2.0)\n if int(pt) == 8:\n if unfinished_bp:\n x, y, z, d = unfinished_bp.pop()\n segments[seg_id]['points'].append((x, y, z))\n segments[seg_id]['radius'].append(d / 2.0)\n pass\n\n to_remove = []\n for seg_id in segments:\n if segments[seg_id]['points']:\n segments[seg_id]['points'] = np.array(segments[seg_id]['points'])\n segments[seg_id]['radius'] = np.array(segments[seg_id]['radius'])\n else:\n to_remove.append(seg_id)\n if len(segments[seg_id]['points']) == 2:\n if np.all(segments[seg_id]['points'][0] == segments[seg_id]['points'][1]):\n to_remove.append(seg_id)\n for seg_id in to_remove:\n segments.__delitem__(seg_id)\n\n data = {\n 'path': path,\n 'name': path.with_suffix('').name,\n 'segments': segments}\n return Qiber3D.Network(data)\n\n @staticmethod\n def _from_graph(graph, point_lookup, radius_lookup, scale=1.0, ravel=False):\n segment_data = {}\n segments = []\n for fiber in (graph.subgraph(c).copy() for c in nx.connected_components(graph)):\n start_points = None\n stop_points = []\n for node in fiber:\n if len(fiber.adj[node]) == 1:\n start_points = [(node,)]\n break\n if start_points is None:\n start_node = list(fiber.nodes)[0]\n start_points = [list(graph.edges(start_node))[0]]\n\n while start_points:\n start = start_points.pop()\n if len(start) == 1:\n new_segment = [start[0]]\n start = start[0]\n else:\n if not fiber.has_edge(start[0], start[1]):\n continue\n new_segment = [start[0], start[1]]\n fiber.remove_edge(start[0], start[1])\n start = start[1]\n f = start\n while True:\n t = list(fiber.neighbors(f))\n if len(t) == 1:\n new_segment.append(t[0])\n fiber.remove_edge(f, t[0])\n f = t[0]\n if t[0] in stop_points:\n break\n elif len(t) > 1:\n for paths in t:\n start_points.append((f, paths))\n stop_points.append(f)\n break\n else:\n break\n segments.append(new_segment)\n\n for sid, seg in enumerate(segments):\n if ravel:\n points = np.array(np.unravel_index(seg, point_lookup)).T * scale\n points[:] = points[:, (2, 1, 0)]\n radius = radius_lookup.flat[seg] * scale\n else:\n points = [point_lookup[pid] for pid in seg]\n radius = np.array([radius_lookup[pid]*scale for pid in seg])\n points = np.array([(round(x*scale, 4), round(y*scale, 4), round(z*scale, 4)) for (x, y, z) in points])\n\n segment_data[sid] = dict(\n points=points,\n radius=radius,\n seg_id=sid\n )\n return segment_data\n\n @staticmethod\n def swc(path, allowed_types=None):\n \"\"\"\n Create a :class:`Qiber3D.Network` from a :file:`.swc` file.\n\n :param path: file path to load\n :type path: str, Path\n :param tuple allowed_types: limit the returned network to these SEGMENT_TYPEs\n :return: :class:`Qiber3D.Network`\n \"\"\"\n\n path = Path(path)\n raw_data = np.loadtxt(path, comments='#')\n point_lookup = helper.PointLookUp(places=1)\n id_mapper = {}\n radius_lookup = {}\n graph = nx.Graph()\n for n, pi, x, y, z, r, parent in raw_data:\n if allowed_types is not None:\n if int(pi) not in allowed_types:\n continue\n pid = point_lookup[(x, y, z)]\n id_mapper[int(n)] = pid\n radius_lookup[pid] = r\n if int(parent) != -1 and int(parent) in id_mapper:\n graph.add_edge(pid, id_mapper[int(parent)])\n segment_data = IO.load._from_graph(graph, point_lookup, radius_lookup)\n data = {\n 'path': path,\n 'name': path.with_suffix('').name,\n 'segments': segment_data}\n return Qiber3D.Network(data)\n\n @staticmethod\n def json(path, data=None):\n \"\"\"\n Create a :class:`Qiber3D.Network` from a :file:`.json` file.\n\n :param path: file path to load\n :type path: str, Path\n :param dict data: load network from a ``dict`` representation of the :file:`.json` file directly\n :return: :class:`Qiber3D.Network`\n \"\"\"\n path = Path(path)\n if data is None:\n data = json.loads(path.read_text())\n segment_data = {}\n for segment in data['network']['segment'].values():\n segment_data[segment['sid']] = dict(\n seg_id=segment['sid'],\n points=np.array(segment['point']),\n radius=np.array(segment['radius'])\n )\n network_data = {\n 'path': path,\n 'name': data['meta']['name'],\n 'segments': segment_data}\n net = Qiber3D.Network(network_data)\n net.extractor_data = data['network'].get('extractor_data')\n return net\n\n @staticmethod\n def __fill_in_segment(segment, resolution=200):\n base_points = np.array(segment).T\n tck, u = interpolate.splprep(base_points)\n u_fine = np.linspace(0, 1, resolution)\n x, y, z, r = interpolate.splev(u_fine, tck)\n points = np.stack((x, y, z)).T\n return points, r\n\n @classmethod\n def synthetic_network(cls):\n \"\"\"\n Create the synthetic test network.\n\n :return: :class:`Qiber3D.Network`\n \"\"\"\n resolution = 200\n raw_segments = []\n\n # Tree\n tree_start = ((200, 100, 0, 5), (300, 500, 100, 8), (600, 600, 250, 15),\n (800, 800, 300, 10), (1100, 900, 480, 6.5), (1300, 1000, 500, 6))\n tree_start = cls.__fill_in_segment(tree_start, resolution)\n raw_segments.append(tree_start)\n x, y, z = tree_start[0][2 * resolution // 10]\n r = tree_start[1][2 * resolution // 10]\n tree_first_branch = ((x, y, z, r), (400, 300, 150, 5), (600, 200, 180, 8),\n (900, 100, 200, 5), (1280, 110, 400, 4))\n raw_segments.append(cls.__fill_in_segment(tree_first_branch, resolution))\n x, y, z = tree_start[0][4 * resolution // 10]\n r = tree_start[1][4 * resolution // 10]\n tree_second_branch = ((x, y, z, r), (800, 500, 260, 18), (1200, 300, 270, 17),\n (1500, 600, 290, 10), (1500, 900, 210, 9))\n raw_segments.append(cls.__fill_in_segment(tree_second_branch, resolution))\n\n # Tree with loop\n tree_loop_start = ((100, 1100, 300, 5), (200, 950, 330, 13), (900, 800, 250, 20),\n (950, 600, 200, 22), (1100, 200, 100, 8), (1310, 90, 50, 6))\n tree_loop_start = cls.__fill_in_segment(tree_loop_start, resolution)\n raw_segments.append(tree_loop_start)\n s_x, s_y, s_z = tree_loop_start[0][1 * resolution // 10]\n s_r = tree_loop_start[1][1 * resolution // 10]\n f_x, f_y, f_z = tree_loop_start[0][9 * resolution // 10]\n f_r = tree_loop_start[1][9 * resolution // 10]\n tree_loop_branch = ((s_x, s_y, s_z, s_r), (600, 500, 250, 6), (800, 400, 130, 5), (f_x, f_y, f_z, f_r))\n raw_segments.append(cls.__fill_in_segment(tree_loop_branch, resolution))\n\n # No branch point\n no_branch = ((1500, 100, 450, 6), (1500, 200, 500, 11), (1400, 300, 500, 14), (1450, 500, 450, 11),\n (1300, 800, 300, 14), (1200, 900, 0, 9), (1100, 1100, 50, 8))\n raw_segments.append(cls.__fill_in_segment(no_branch, resolution))\n\n # Circle\n circle = ((780, 700, 428.8, 8), (650, 200, 250, 12), (600, 400, 100, 15),\n (700, 800, 100, 10), (750, 1100, 250, 7), (780, 700, 428.8, 8))\n raw_segments.append(cls.__fill_in_segment(circle, resolution))\n\n node_lookup = helper.PointLookUp(places=0)\n radius_lookup = {}\n raw_network = nx.Graph()\n for sid, (points, radii) in enumerate(raw_segments):\n last_node = None\n for point, r in zip(points, radii):\n pid = node_lookup[point]\n radius_lookup[pid] = r\n if last_node is not None:\n raw_network.add_edge(last_node, pid)\n last_node = pid\n\n segment_data = cls._from_graph(raw_network, node_lookup, radius_lookup, scale=0.1)\n\n data = {\n 'path': None,\n 'name': 'synthetic',\n 'segments': segment_data}\n return Qiber3D.Network(data)\n\n\n class export:\n\n def __new__(cls, net, out_path='.', overwrite=False, mode=None, **kwargs):\n \"\"\"\n Export a :class:`Qiber3D.Network` to file. Selecting the appropriate format based on the file suffix.\n\n Supports: :file:`.qiber`, :file:`.json`, :file:`.mv3d`, :file:`.tif`, :file:`.nd2`, :file:`.swc`,\n :file:`.csv`, :file:`.tsv`, :file:`.xlsx`, :file:`.x3d`\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :param str mode: select the file format ignoring the file suffix.\n Choose from ['binary', 'json', 'mv3d', 'x3d', 'swc', 'xlsx', 'csv', 'tsv', 'tif']\n :param kwargs: key-word arguments are passed down to the individual IO functions\n :return: path to saved file\n :rtype: Path\n \"\"\"\n if mode is None:\n path = Path(out_path)\n if path.suffix == '.json':\n mode = 'json'\n elif path.suffix == '.qiber':\n mode = 'binary'\n elif path.suffix == '.mv3d':\n mode = 'mv3d'\n elif path.suffix == '.x3d':\n mode = 'x3d'\n elif path.suffix == '.swc':\n mode = 'swc'\n elif path.suffix == '.xlsx':\n mode = 'xlsx'\n elif path.suffix == '.csv':\n mode = 'csv'\n elif path.suffix == '.tsv':\n mode = 'tsv'\n elif path.suffix in ['.tif', '.tiff']:\n mode = 'tif'\n else:\n mode = 'binary'\n\n if mode == 'binary':\n return cls.binary(net, out_path=out_path, overwrite=overwrite)\n elif mode == 'json':\n return cls.json(net, out_path=out_path, overwrite=overwrite)\n elif mode == 'mv3d':\n return cls.mv3d(net, out_path=out_path, overwrite=overwrite)\n elif mode == 'swc':\n return cls.swc(net, out_path=out_path, overwrite=overwrite, **kwargs)\n elif mode == 'x3d':\n return cls.x3d(net, out_path=out_path, overwrite=overwrite, **kwargs)\n elif mode == 'xlsx':\n return cls.xlsx(net, out_path=out_path, overwrite=overwrite)\n elif mode == 'csv':\n return cls.csv(net, out_path=out_path, overwrite=overwrite, **kwargs)\n elif mode == 'tsv':\n return cls.csv(net, out_path=out_path, overwrite=overwrite, separator='\\t')\n elif mode == 'tif':\n return cls.tif(net, out_path=out_path, overwrite=overwrite, **kwargs)\n else:\n net.logger.warn(f'Could not find mode \"{mode}\"')\n return None\n\n @staticmethod\n def binary(net, out_path='.', overwrite=False, save_steps=False):\n \"\"\"\n Export :class:`Qiber3D.Network` as binary file (:file:`.qiber`).\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :param bool save_steps: save extraction steps image stacks\n :return: path to saved file\n :rtype: Path\n \"\"\"\n out_path, needs_unlink = helper.out_path_check(out_path, network=net, prefix='', suffix='.qiber',\n overwrite=overwrite, logger=net.logger)\n if out_path is None:\n return\n\n work_dir = Path(out_path.with_suffix(''))\n work_dir.mkdir(parents=True, exist_ok=True)\n\n add_paths = [IO.export.json(net, out_path=work_dir / 'network.json', overwrite=True)]\n if save_steps:\n if isinstance(net.extractor_steps, helper.NumpyMemoryManager):\n net.extractor_steps.save(work_dir / 'extractor_steps.tar')\n add_paths.append(work_dir / 'extractor_steps.tar')\n with ZipFile(out_path, mode='w') as save_file:\n for add_path in add_paths:\n save_file.write(add_path, arcname=add_path.name)\n add_path.unlink()\n work_dir.rmdir()\n\n return out_path\n\n @staticmethod\n def xlsx(net, out_path='.', overwrite=False):\n \"\"\"\n Export :class:`Qiber3D.Network` as Excel file (:file:`.xlsx`).\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :return: path to saved file\n :rtype: Path\n \"\"\"\n out_path, needs_unlink = helper.out_path_check(out_path, network=net, prefix='', suffix='.xlsx',\n overwrite=overwrite, logger=net.logger)\n if out_path is None:\n return\n\n net_properties = {\n 'average_radius': 'Average radius',\n 'max_radius': 'Max radius',\n 'cylinder_radius': 'Equal cylinder radius',\n 'length': 'Length',\n 'volume': 'Volume',\n 'bbox_volume': 'Bounding box volume',\n 'bbox': 'Bounding box',\n 'bbox_size': 'Bounding box size',\n 'center': 'Bounding box center'\n }\n fiber_seg_properties = {\n 'average_radius': 'Average radius',\n 'max_radius': 'Max radius',\n 'cylinder_radius': 'Equal cylinder radius',\n 'length': 'Length',\n 'volume': 'Volume',\n # 'raster_volume',\n }\n\n wb = Workbook(write_only=True)\n ws = wb.create_sheet('Network')\n title = WriteOnlyCell(ws, f'{net.name}')\n ws.column_dimensions['A'].width = 21\n ws.column_dimensions['B'].width = 21\n title.style = 'Title'\n ws.append([title])\n ws.append([config.app_name, config.version])\n ws.append([])\n subtitle = WriteOnlyCell(ws, 'Metadata')\n empty_subtitle = WriteOnlyCell(ws, '')\n subtitle.style = 'Headline 3'\n empty_subtitle.style = 'Headline 3'\n ws.append([subtitle, empty_subtitle])\n if isinstance(net.input_file, Path):\n ws.append(['Source file', str(net.input_file.absolute())])\n else:\n ws.append(['Source file', '-'])\n ws.append(['Creation date', datetime.now()])\n\n ws.append([])\n subtitle = WriteOnlyCell(ws, 'Network measurements')\n empty_subtitle = WriteOnlyCell(ws, '')\n subtitle.style = 'Headline 3'\n empty_subtitle.style = 'Headline 3'\n ws.append([subtitle, empty_subtitle])\n ws.append(['Number of fibers', len(net.fiber)])\n ws.append(['Number of segments', len(net.segment)])\n ws.append(['Number of points', len(net.point)])\n ws.append(['Number of branch points', len(net.cross_point_dict)])\n for key, desciption in net_properties.items():\n value = getattr(net, key)\n if type(value) == np.ndarray:\n value = str(value.tolist())\n if isinstance(value, (np.floating, float)):\n value = WriteOnlyCell(ws, value=value)\n value.number_format = '0.00'\n ws.append([desciption, value])\n\n ws = wb.create_sheet('Fibers')\n ws.column_dimensions['A'].width = 21\n ws.column_dimensions['B'].width = 21\n for fid, fiber in net.fiber.items():\n subtitle = WriteOnlyCell(ws, f'Fiber {fid} measurements')\n empty_subtitle = WriteOnlyCell(ws, '')\n subtitle.style = 'Headline 3'\n empty_subtitle.style = 'Headline 3'\n ws.append([subtitle, empty_subtitle])\n ws.append(['Number of segments', len(fiber.segment)])\n ws.append(['Number of points', sum([len(seg) for seg in fiber.segment.values()])])\n branch_points_raw = sum((list(a) for a in fiber.graph.edges), [])\n check = []\n bp_set = set()\n for bp in branch_points_raw:\n if bp in check:\n bp_set.add(bp)\n else:\n check.append(bp)\n ws.append(['Number of branch points', len(bp_set)])\n for key, desciption in fiber_seg_properties.items():\n value = getattr(net, key)\n if type(value) == np.ndarray:\n value = str(value.tolist())\n if isinstance(value, (np.floating, float)):\n value = WriteOnlyCell(ws, value=value)\n value.number_format = '0.00'\n ws.append([desciption, value])\n ws.append(['Segment list'] + [sid for sid in fiber.segment.keys()])\n ws.append([])\n\n ws = wb.create_sheet('Segments')\n ws.column_dimensions['A'].width = 21\n ws.column_dimensions['B'].width = 21\n for sid, segment in net.segment.items():\n subtitle = WriteOnlyCell(ws, f'Segment {sid} measurements')\n empty_subtitle = WriteOnlyCell(ws, '')\n subtitle.style = 'Headline 3'\n empty_subtitle.style = 'Headline 3'\n ws.append([subtitle, empty_subtitle])\n ws.append(['Number of points', len(segment)])\n for key, desciption in fiber_seg_properties.items():\n value = getattr(net, key)\n if type(value) == np.ndarray:\n value = str(value.tolist())\n if isinstance(value, (np.floating, float)):\n value = WriteOnlyCell(ws, value=value)\n value.number_format = '0.00'\n ws.append([desciption, value])\n ws.append([])\n\n ws = wb.create_sheet('Points')\n ws.append(['FID', 'SID', 'X', 'Y', 'Z', 'Radius'])\n for fid, fiber in net.fiber.items():\n for sid, segment in fiber.segment.items():\n for n, (x, y, z) in enumerate(segment.point):\n x = WriteOnlyCell(ws, value=x)\n x.number_format = '0.000'\n y = WriteOnlyCell(ws, value=y)\n y.number_format = '0.000'\n z = WriteOnlyCell(ws, value=z)\n z.number_format = '0.000'\n r = WriteOnlyCell(ws, value=segment.radius[n])\n r.number_format = '0.000'\n ws.append([fid, sid, x, y, z, r])\n\n wb.save(out_path)\n return out_path\n\n @staticmethod\n def csv(net, out_path='.', overwrite=False, separator=';'):\n \"\"\"\n Export :class:`Qiber3D.Network` as :file:`.csv` file.\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :param str separator: char to separate values\n :return: path to saved file\n :rtype: Path\n \"\"\"\n out_path, needs_unlink = helper.out_path_check(out_path, network=net, prefix='', suffix='.csv',\n overwrite=overwrite, logger=net.logger)\n if out_path is None:\n return\n out_text = separator.join(('FID', 'SID', 'X', 'Y', 'Z', 'Radius')) + '\\n'\n for fid, fiber in net.fiber.items():\n for sid, segment in fiber.segment.items():\n for n, (x, y, z) in enumerate(segment.point):\n out_text += separator.join([str(fid), str(sid), f'{x:.3f}', f'{y:.3f}', f'{z:.3f}',\n f'{segment.radius[n]:.3f}']) + '\\n'\n\n out_path.write_text(out_text)\n return out_path\n\n @staticmethod\n def swc(net, out_path='.', overwrite=False, multiple_files=False):\n \"\"\"\n Export :class:`Qiber3D.Network` as :file:`.swc` file.\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :param bool multiple_files: save each fiber as separate swc file\n :return: path to saved file\n :rtype: Path\n \"\"\"\n out_path, needs_unlink = helper.out_path_check(out_path, network=net, prefix='', suffix='.swc',\n overwrite=overwrite, logger=net.logger)\n if out_path is None:\n return\n if isinstance(net.input_file, Path):\n raw = str(net.input_file.absolute())\n else:\n raw = str(net.input_file)\n header = dedent(f\"\"\"\\\n # ORIGINAL_SOURCE created by {config.app_name} {config.version}\n # CREATURE\n # REGION\n # FIELD/LAYER\n # TYPE {net.name}\n # CONTRIBUTOR\n # REFERENCE \n # RAW {raw} \n # EXTRAS \n # SOMA_AREA \n # SHRINKAGE_CORRECTION\n # VERSION_NUMBER \n # VERSION_DATE \n # SCALE 1.0 1.0 1.0 micrometers\n # SEGMENT_TYPE 1\t {net.cylinder_radius:.2f} UNSPECIFIC\n \"\"\")\n\n graph = nx.Graph()\n point_lookup = helper.PointLookUp()\n radius_lookup = {}\n center= net.center\n for segment in net.segment.values():\n last_pid = None\n for n, raw_point in enumerate(segment.point):\n point = raw_point - center\n if point in point_lookup:\n current_pid = point_lookup[point]\n else:\n current_pid = point_lookup[point]\n radius_lookup[current_pid] = segment.radius[n]\n if last_pid is not None:\n graph.add_edge(current_pid, last_pid)\n last_pid = current_pid\n\n network_data = []\n for fiber in (graph.subgraph(c).copy() for c in nx.connected_components(graph)):\n fiber_data = []\n start_points = None\n stop_points = []\n for node in fiber:\n if len(fiber.adj[node]) == 1:\n start_points = [(node,)]\n break\n if start_points is None:\n start_points = [(list(fiber.nodes)[0],)]\n while start_points:\n start = start_points.pop()\n if len(start) == 1:\n x, y, z = point_lookup[start[0]]\n fiber_data.append((start[0], 1, x, y, z, radius_lookup[start[0]], -1))\n start = start[0]\n else:\n if not fiber.has_edge(start[0], start[1]):\n continue\n x, y, z = point_lookup[start[1]]\n fiber_data.append((start[1], 1, x, y, z, radius_lookup[start[1]], start[0]))\n fiber.remove_edge(start[0], start[1])\n start = start[1]\n for f, t in nx.dfs_successors(fiber, start).items():\n if len(t) == 1:\n x, y, z = point_lookup[t[0]]\n fiber_data.append((t[0], 1, x, y, z, radius_lookup[t[0]], f))\n fiber.remove_edge(f, t[0])\n if t[0] in stop_points:\n break\n elif len(t) > 1:\n for paths in t:\n start_points.append((f, paths))\n stop_points.append(f)\n break\n network_data.append(fiber_data)\n\n if multiple_files:\n base_dir = out_path.with_suffix('')\n base_dir.mkdir(exist_ok=True)\n for n, point_list in enumerate(network_data):\n part_out_path = base_dir / f'{out_path.stem}_{n+1:04}{out_path.suffix}'\n local_point_lookup = {-1: -1}\n for pid, entry in enumerate(point_list):\n local_point_lookup[entry[0]] = pid + 1\n point_text = [f'{local_point_lookup[entry[0]]:d} {entry[1]:4d} {entry[2]:8.1f} {entry[3]:8.1f} '\n f'{entry[4]:8.1f} {entry[5]:6.2f} {local_point_lookup[entry[6]]:4d}'\n for entry in point_list]\n out_text = header + '\\n'.join(point_text)\n part_out_path.write_text(out_text)\n else:\n offset = 0\n point_text = []\n for n, point_list in enumerate(network_data):\n local_point_lookup = {-1: -1}\n for pid, entry in enumerate(point_list):\n local_point_lookup[entry[0]] = pid + 1 + offset\n point_text += [f'{local_point_lookup[entry[0]]:d} {entry[1]:4d} {entry[2]:8.1f} '\n f'{entry[3]:8.1f} {entry[4]:8.1f} {entry[5]:6.2f} {local_point_lookup[entry[6]]:4d}'\n for entry in point_list]\n offset = len(point_list)\n out_text = header + '\\n'.join(point_text)\n out_path.write_text(out_text)\n\n return out_path\n\n @staticmethod\n def mv3d(net, out_path='.', overwrite=False):\n \"\"\"\n Export :class:`Qiber3D.Network` as :file:`.mv3d` file.\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :return: path to saved file\n :rtype: Path\n \"\"\"\n out_path, needs_unlink = helper.out_path_check(out_path, network=net, prefix='', suffix='.mv3d',\n overwrite=overwrite, logger=net.logger)\n if out_path is None:\n return\n\n number_of_points = 0\n data_part = \"\"\n for seg in net.segment.values():\n seg_data = []\n for n, (x, y, z) in enumerate(seg.point):\n seg_data.append(f\"{seg.sid}\\t{x:.3f}\\t{y:.3f}\\t{z:.3f}\\t{seg.d[n]:.3f}\")\n data_part += \"\\n\".join(seg_data) + \"\\n\\n\"\n number_of_points += len(seg.d)\n header = dedent(f\"\"\"\\\n # MicroVisu3D file (created by {config.app_name} {config.version})\n # Number of lines {len(net.segment)}\n # Number of points {number_of_points}\n # Number of inter. {len(net.cross_point_dict)}\n #\n # No\\tx\\ty\\tz\\td\n #\\n\"\"\")\n out_path.write_text(header + data_part)\n return out_path\n\n @staticmethod\n def x3d(net, out_path='.', overwrite=False,\n color_mode='flat', color_map='jet', color=None, object_type=None, segment_list=None,\n azimuth=None, elevation=None, roll=None):\n \"\"\"\n Export :class:`Qiber3D.Network` as :file:`.x3d` file.\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :param str color_mode: sets the way to color the network\n choose one of ['flat', 'fiber', 'fiber_length', 'fiber_volume', 'segment',\n 'segment_length', 'segment_volume', 'fiber_segment_ratio']\n :param str color_map: name of a matplotlib colormap\n :param tuple(float) color: color if color_mode is `'flat'`\n :param str object_type: when set to `'line'` render center line\n :param tuple segment_list: limit the visualisation to certain segment (use sid)\n :param float azimuth: change camera azimuth\n :param float elevation: change camera elevation\n :param float roll: roll camera\n :return: path to saved file\n :rtype: Path\n \"\"\"\n out_path = net.render.export_x3d(out_path=out_path, overwrite=overwrite,\n color_mode=color_mode, color_map=color_map, color=color,\n object_type=object_type, segment_list=segment_list,\n azimuth=azimuth, elevation=elevation, roll=roll)\n return out_path\n\n @staticmethod\n def tif(net, out_path='.', overwrite=False, voxel_resolution=None, segment_list=None):\n \"\"\"\n Export :class:`Qiber3D.Network` as :file:`.tif` image stack.\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network, if `None` show the plot.\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :param float voxel_resolution: number of voxels per unit length\n :param tuple segment_list: limit the visualisation to certain segment (use sid)\n :return: path to saved file\n :rtype: Path\n \"\"\"\n\n out_path = net.render.export_image_stack(out_path=out_path, overwrite=overwrite,\n voxel_resolution=voxel_resolution, segment_list=segment_list)\n\n return out_path\n\n @staticmethod\n def json(net, out_path='.', overwrite=False):\n \"\"\"\n Export :class:`Qiber3D.Network` as :file:`.json` file.\n\n :param Qiber3D.Network net: network to export\n :param out_path: file or folder path where to save the network\n :type out_path: str, Path\n :param bool overwrite: allow file overwrite\n :return: path to saved file\n :rtype: Path\n \"\"\"\n out_path, needs_unlink = helper.out_path_check(out_path, network=net, prefix='', suffix='.json',\n overwrite=overwrite, logger=net.logger)\n if out_path is None:\n return\n\n def clean_value(value):\n if type(value) == np.ndarray:\n value = value.tolist()\n elif type(value) in [np.float32, np.float64]:\n value = float(value)\n elif isinstance(value, int):\n value = int(value)\n return value\n\n data = dict(\n meta=dict(\n created=datetime.now().isoformat(),\n app_name=config.app_name,\n app_version=config.version\n ),\n network=dict(\n fiber=dict(),\n extractor_data=None\n )\n )\n\n if net.extractor_data:\n data['network']['extractor_data'] = dict()\n for key, value in net.extractor_data.items():\n if 'processing_data' == key:\n data['network']['extractor_data'][key] = dict()\n for pro_key in net.extractor_data[key]:\n data['network']['extractor_data'][key][pro_key] = dict()\n for sub_key, sub_value in net.extractor_data[key][pro_key].items():\n data['network']['extractor_data'][key][pro_key][sub_key] = clean_value(sub_value)\n else:\n data['network']['extractor_data'][key] = clean_value(value)\n\n if net.input_file:\n data['meta']['source'] = str(net.input_file.absolute())\n else:\n data['meta']['source'] = None\n\n if net.name:\n data['meta']['name'] = net.name\n else:\n data['meta']['name'] = None\n\n for key in ['bbox', 'bbox_size', 'bbox_volume', 'center', 'volume', 'average_radius',\n 'max_radius', 'cylinder_radius', 'length']:\n data['network'][key] = clean_value(getattr(net, key))\n\n for fid, fiber in net.fiber.items():\n data['network']['fiber'][fid] = dict()\n for key in ['fid', 'volume', 'average_radius', 'cylinder_radius', 'length', 'sid_list']:\n data['network']['fiber'][fid][key] = clean_value(getattr(fiber, key))\n\n data['network']['segment'] = dict()\n for sid, segment in net.segment.items():\n data['network']['segment'][sid] = dict()\n for key in ['sid', 'volume', 'average_radius', 'cylinder_radius', 'length',\n 'direction', 'point', 'radius']:\n data['network']['segment'][sid][key] = clean_value(getattr(segment, key))\n\n out_path.write_text(json.dumps(data))\n return out_path\n", "repo_name": "theia-dev/Qiber3D", "sub_path": "Qiber3D/io.py", "file_name": "io.py", "file_ext": "py", "file_size_in_byte": 43963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pathlib.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 61, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 62, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "Qiber3D.helper.NumpyMemoryManager.load", "line_number": 65, "usage_type": "call"}, {"api_name": "Qiber3D.helper.NumpyMemoryManager", "line_number": 65, "usage_type": "attribute"}, {"api_name": "Qiber3D.helper", "line_number": 65, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 66, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 85, "usage_type": "call"}, {"api_name": "Qiber3D.config.extract", "line_number": 88, "usage_type": "attribute"}, {"api_name": "Qiber3D.config", "line_number": 88, "usage_type": "name"}, {"api_name": "Qiber3D.Extractor", "line_number": 96, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 110, "usage_type": "call"}, {"api_name": "Qiber3D.Extractor", "line_number": 111, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 125, "usage_type": "call"}, {"api_name": "Qiber3D.Network", "line_number": 139, "usage_type": "call"}, {"api_name": "Qiber3D.Network", "line_number": 179, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 190, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 225, "usage_type": "call"}, {"api_name": "Qiber3D.Network", "line_number": 234, "usage_type": "call"}, {"api_name": "networkx.connected_components", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 288, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 309, "usage_type": "call"}, {"api_name": "Qiber3D.helper.PointLookUp", "line_number": 310, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 310, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 313, "usage_type": "call"}, {"api_name": "Qiber3D.Network", "line_number": 328, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 340, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 348, "usage_type": "call"}, {"api_name": "Qiber3D.Network", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 360, "usage_type": "call"}, {"api_name": "scipy.interpolate.splprep", "line_number": 361, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 361, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 362, "usage_type": "call"}, {"api_name": "scipy.interpolate.splev", "line_number": 363, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 363, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 364, "usage_type": "call"}, {"api_name": "Qiber3D.helper.PointLookUp", "line_number": 415, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 415, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 417, "usage_type": "call"}, {"api_name": "Qiber3D.Network", "line_number": 433, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 456, "usage_type": "call"}, {"api_name": "Qiber3D.helper.out_path_check", "line_number": 513, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 513, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 518, "usage_type": "call"}, {"api_name": "Qiber3D.helper.NumpyMemoryManager", "line_number": 523, "usage_type": "attribute"}, {"api_name": "Qiber3D.helper", "line_number": 523, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 526, "usage_type": "call"}, {"api_name": "Qiber3D.helper.out_path_check", "line_number": 546, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 546, "usage_type": "name"}, {"api_name": "openpyxl.Workbook", "line_number": 571, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 573, "usage_type": "call"}, {"api_name": "Qiber3D.config.app_name", "line_number": 578, "usage_type": "attribute"}, {"api_name": "Qiber3D.config", "line_number": 578, "usage_type": "name"}, {"api_name": "Qiber3D.config.version", "line_number": 578, "usage_type": "attribute"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 580, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 581, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 585, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 589, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 589, "usage_type": "name"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 592, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 603, "usage_type": "attribute"}, {"api_name": "numpy.floating", "line_number": 605, "usage_type": "attribute"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 606, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 614, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 615, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 632, "usage_type": "attribute"}, {"api_name": "numpy.floating", "line_number": 634, "usage_type": "attribute"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 635, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 645, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 653, "usage_type": "attribute"}, {"api_name": "numpy.floating", "line_number": 655, "usage_type": "attribute"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 656, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 666, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 668, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 670, "usage_type": "call"}, {"api_name": "openpyxl.cell.WriteOnlyCell", "line_number": 672, "usage_type": "call"}, {"api_name": "Qiber3D.helper.out_path_check", "line_number": 692, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 692, "usage_type": "name"}, {"api_name": "Qiber3D.helper.out_path_check", "line_number": 719, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 719, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 723, "usage_type": "argument"}, {"api_name": "textwrap.dedent", "line_number": 727, "usage_type": "call"}, {"api_name": "Qiber3D.config.app_name", "line_number": 728, "usage_type": "attribute"}, {"api_name": "Qiber3D.config", "line_number": 728, "usage_type": "name"}, {"api_name": "Qiber3D.config.version", "line_number": 728, "usage_type": "attribute"}, {"api_name": "networkx.Graph", "line_number": 745, "usage_type": "call"}, {"api_name": "Qiber3D.helper.PointLookUp", "line_number": 746, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 746, "usage_type": "name"}, {"api_name": "networkx.connected_components", "line_number": 763, "usage_type": "call"}, {"api_name": "networkx.dfs_successors", "line_number": 786, "usage_type": "call"}, {"api_name": "Qiber3D.helper.out_path_check", "line_number": 841, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 841, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 854, "usage_type": "call"}, {"api_name": "Qiber3D.config.app_name", "line_number": 855, "usage_type": "attribute"}, {"api_name": "Qiber3D.config", "line_number": 855, "usage_type": "name"}, {"api_name": "Qiber3D.config.version", "line_number": 855, "usage_type": "attribute"}, {"api_name": "Qiber3D.helper.out_path_check", "line_number": 927, "usage_type": "call"}, {"api_name": "Qiber3D.helper", "line_number": 927, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 933, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 935, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 935, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 943, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 943, "usage_type": "name"}, {"api_name": "Qiber3D.config.app_name", "line_number": 944, "usage_type": "attribute"}, {"api_name": "Qiber3D.config", "line_number": 944, "usage_type": "name"}, {"api_name": "Qiber3D.config.version", "line_number": 945, "usage_type": "attribute"}, {"api_name": "Qiber3D.config", "line_number": 945, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 991, "usage_type": "call"}]} +{"seq_id": "22347463139", "text": "from cvxpy.expressions import cvxtypes\nfrom cvxpy.constraints.leq_constraint import LeqConstraint\nfrom cvxpy.constraints.semidefinite import SDP\nimport cvxpy.lin_ops.lin_utils as lu\n\n\nclass PSDConstraint(LeqConstraint):\n \"\"\"Constraint X >> Y that z.T(X - Y)z >= 0 for all z.\n \"\"\"\n OP_NAME = \">>\"\n\n def __init__(self, lh_exp, rh_exp):\n # Arguments must be square matrices or scalars.\n if (lh_exp.size[0] != lh_exp.size[1]) or \\\n (rh_exp.size[0] != rh_exp.size[1]):\n raise ValueError(\n \"Non-square matrix in positive definite constraint.\"\n )\n super(PSDConstraint, self).__init__(lh_exp, rh_exp)\n\n def is_dcp(self):\n \"\"\"Both sides must be affine.\n \"\"\"\n return self._expr.is_affine()\n\n @property\n def residual(self):\n \"\"\"The residual of the constraint.\n\n Returns\n -------\n Expression\n \"\"\"\n min_eig = cvxtypes.lambda_min()(self._expr + self._expr.T)/2\n return cvxtypes.neg()(min_eig)\n\n def canonicalize(self):\n \"\"\"Returns the graph implementation of the object.\n\n Marks the top level constraint as the dual_holder,\n so the dual value will be saved to the EqConstraint.\n\n Returns:\n A tuple of (affine expression, [constraints]).\n \"\"\"\n obj, constraints = self._expr.canonical_form\n half = lu.create_const(0.5, (1, 1))\n symm = lu.mul_expr(half, lu.sum_expr([obj, lu.transpose(obj)]),\n obj.size)\n dual_holder = SDP(symm, enforce_sym=False, constr_id=self.id)\n return (None, constraints + [dual_holder])\n", "repo_name": "TomHeaven/cvxpy", "sub_path": "cvxpy/constraints/psd_constraint.py", "file_name": "psd_constraint.py", "file_ext": "py", "file_size_in_byte": 1663, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "cvxpy.constraints.leq_constraint.LeqConstraint", "line_number": 7, "usage_type": "name"}, {"api_name": "cvxpy.expressions.cvxtypes.lambda_min", "line_number": 34, "usage_type": "call"}, {"api_name": "cvxpy.expressions.cvxtypes", "line_number": 34, "usage_type": "name"}, {"api_name": "cvxpy.expressions.cvxtypes.neg", "line_number": 35, "usage_type": "call"}, {"api_name": "cvxpy.expressions.cvxtypes", "line_number": 35, "usage_type": "name"}, {"api_name": "cvxpy.lin_ops.lin_utils.create_const", "line_number": 47, "usage_type": "call"}, {"api_name": "cvxpy.lin_ops.lin_utils", "line_number": 47, "usage_type": "name"}, {"api_name": "cvxpy.lin_ops.lin_utils.mul_expr", "line_number": 48, "usage_type": "call"}, {"api_name": "cvxpy.lin_ops.lin_utils", "line_number": 48, "usage_type": "name"}, {"api_name": "cvxpy.lin_ops.lin_utils.sum_expr", "line_number": 48, "usage_type": "call"}, {"api_name": "cvxpy.lin_ops.lin_utils.transpose", "line_number": 48, "usage_type": "call"}, {"api_name": "cvxpy.constraints.semidefinite.SDP", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "4521156116", "text": "# Development settings file\nimport json\nimport os\nimport dj_database_url\nfrom django.core.exceptions import ImproperlyConfigured\nfrom os.path import exists\nfrom .base import *\n\n\n\"\"\"\nConfigure environment and environment file location.\nNote: The environment file (which contains secrets) should be stored outside of VCS.\n\"\"\"\n\nON_HEROKU_SERVER = 'ON_HEROKU_SERVER'\nON_PRODUCTION = 'ON_PRODUCTION'\nHEROKU_ENV_KEY = 'DEVELOPMENT_ENV'\nLOCAL_ENV_LOCATION = ''\n\nON_HEROKU = False\nif ON_HEROKU_SERVER in os.environ:\n ON_HEROKU = True\n\nif ON_PRODUCTION in os.environ:\n raise ImproperlyConfigured(\"The development settings are trying to run on the production environment!\")\n\nif ON_HEROKU:\n ENV_JSON = json.loads(os.environ.get(HEROKU_ENV_KEY, None))\nelse:\n LOCAL_ENV_LOCATION = os.environ.get(LOCAL_ENV_LOCATION, dirname(dirname(dirname(dirname(__file__)))))\n ENV_FILE = join(LOCAL_ENV_LOCATION, 'django_email_hunter_local.env.json')\n if not exists(ENV_FILE):\n raise ImproperlyConfigured(\"No local environment file was found in directory: {0}\".format(LOCAL_ENV_LOCATION))\n with open(ENV_FILE) as data_file:\n ENV_JSON = json.load(data_file)\nif not ENV_JSON:\n raise ImproperlyConfigured(\"No environment variables were found\")\n\n\n\"\"\"\nDjango debug toolbar settings\n\"\"\"\n\nENABLE_DEBUG_TOOLBAR = False\nif ENABLE_DEBUG_TOOLBAR:\n INSTALLED_APPS += [\n 'debug_toolbar',\n ]\n\n MIDDLEWARE += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n ]\n\n\n\"\"\"\nDjango SSL Development Server Configuration\nInstead of using ngrok (which has request limits) we may use the SSLServer development server for https.\n\"\"\"\n\nif not ON_HEROKU:\n INSTALLED_APPS = ['sslserver'] + INSTALLED_APPS\n\n\n\"\"\"\nGeneral Django Debug settings.\n\"\"\"\n\nDEBUG = True\nTEMPLATES[0]['OPTIONS'].update({'debug': DEBUG})\n\nif not ON_HEROKU:\n INSTALLED_APPS.insert(0, 'whitenoise.runserver_nostatic')\n\nALLOWED_HOSTS = ['*']\nCORS_ORIGIN_ALLOW_ALL = True\n\nSECRET_KEY = ENV_JSON.get('DJANGO_SECRET_KEY', None)\n\n\n\"\"\"\nDatabase configuration.\nWe need the multi-tenant backend for schema routing.\nTODO: Resolve this issue: https://github.com/tomturner/django-tenants/issues/149\n\"\"\"\n\nif ON_HEROKU:\n DATABASES = {}\n DATABASES['default'] = dj_database_url.config(conn_max_age=500, engine='django_tenants.postgresql_backend')\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': ENV_JSON.get('DATABASE_NAME'),\n 'USER': ENV_JSON.get('DATABASE_USER'),\n 'PASSWORD': ENV_JSON.get('DATABASE_PW'),\n 'HOST': ENV_JSON.get('DATABASE_HOST'),\n 'PORT': '5432',\n }\n }\n\n\n\"\"\"\nDjango Storage AWS Configuration settings\n\"\"\"\n\n# AWS_ACCESS_KEY_ID = ENV_JSON.get('AWS_ACCESS_KEY_ID', None)\n# AWS_SECRET_ACCESS_KEY = ENV_JSON.get('AWS_SECRET_ACCESS_KEY', None)\n# AWS_STORAGE_BUCKET_NAME = \"charity-returns-storage\"\n# AWS_DEFAULT_ACL = \"private\"\n# AWS_S3_ENCRYPTION = True\n# AWS_S3_FILE_OVERWRITE = True\n# AWS_S3_OBJECT_PARAMETERS = {\n# 'CacheControl': 'max-age=86400',\n# }\n# AWS_LOCATION = ''\n# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n\n\n\"\"\"\nDjango Task Queue settings.\n\"\"\"\n\nif ON_HEROKU:\n CELERY_BROKER_URL = os.environ.get('REDIS_URL', None)\nelse:\n CELERY_BROKER_URL = ENV_JSON.get('REDIS_URL', None)\n\n\n\n\"\"\"\nLogging Configuration\n\"\"\"\n\nimport logging.config\nfrom django.utils.log import DEFAULT_LOGGING\nLOGLEVEL = os.environ.get('LOGLEVEL', 'debug').upper()\nLOGGING_CONFIG = None", "repo_name": "ecmascriptguru/hunter", "sub_path": "config/settings/development.py", "file_name": "development.py", "file_ext": "py", "file_size_in_byte": 3512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 30, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 33, "usage_type": "call"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 37, "usage_type": "call"}, {"api_name": "dj_database_url.config", "line_number": 88, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 124, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 136, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 136, "usage_type": "attribute"}]} +{"seq_id": "26061000246", "text": "from utils.null_object import Null\n\n\nclass Rating:\n NULL = Null()\n\n def __init__(self, rating_id, associated_id, associated_type, user_id, rating_type, date, value):\n self.rating_id = rating_id\n self.associated_id = associated_id\n self.associated_type = associated_type\n self.user_id = user_id\n self.rating_type = rating_type\n self.date = date\n self.value = value\n", "repo_name": "HOP-Ubiquitous/walk-a-story-backend", "sub_path": "ratings/entities/rating.py", "file_name": "rating.py", "file_ext": "py", "file_size_in_byte": 419, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "utils.null_object.Null", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "9244882021", "text": "from typing import List\n\nclass Solution:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:\n normalized_str = ''.join([c.lower() if c.isalnum() else ' ' for c in paragraph])\n \n words = normalized_str.split()\n \n word_count = {}\n banned_words = set(banned)\n \n for word in words:\n if word not in banned_words:\n word_count[word] += 1\n\n return max(word_count, key=word_count.get)", "repo_name": "fredzhangziji/Leetcode", "sub_path": "Python/String/819_Most_Common_Word.py", "file_name": "819_Most_Common_Word.py", "file_ext": "py", "file_size_in_byte": 484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "37395129462", "text": "from pyrogram import Client, filters, enums\nfrom cr import *\nfrom message import *\nimport asyncio\nimport random\nimport string\n\nbot = Client(\"Tg Earning Bot\", api_id=api_id, api_hash=api_hash, bot_token=bot_token)\n\n@bot.on_message(filters.command(\"start\"))\nasync def send_welcome(client, message):\n create_user_name = lambda user_first, user_last: f\"{user_first} {user_last}\" if user_last else f\"{user_first}\"\n user_first = message.from_user.first_name\n user_last = message.from_user.last_name\n user_name = create_user_name(user_first, user_last)\n tg_id = message.chat.id\n if collection.find_one({\"telegram_id\": tg_id}):\n await message.reply(welcome_message.format(user_name))\n else:\n collection.insert_one({\"telegram_id\": message.chat.id, \"bank_account\": 0, \"ifsc_code\": 0, \"balance\": 0, \"code\": 0})\n await message.reply(welcome_message.format(user_name))\n\n\n@bot.on_message(filters.command(\"help\"))\nasync def send_welcome(client, message):\n create_user_name = lambda user_first, user_last: f\"{user_first} {user_last}\" if user_last else f\"{user_first}\"\n user_first = message.from_user.first_name\n user_last = message.from_user.last_name\n user_name = create_user_name(user_first, user_last)\n tg_id = message.chat.id\n if collection.find_one({\"telegram_id\": tg_id}):\n await message.reply(welcome_message.format(user_name))\n else:\n collection.insert_one({\"telegram_id\": message.chat.id, \"bank_account\": 0, \"ifsc_code\": 0, \"balance\": 0, \"code\": 0})\n await message.reply(welcome_message.format(user_name))\n\n\n@bot.on_message(filters.command(\"wallet\"))\nasync def wallet(client, message):\n create_user_name = lambda user_first, user_last: f\"{user_first} {user_last}\" if user_last else f\"{user_first}\"\n user_first = message.from_user.first_name\n user_last = message.from_user.last_name\n user_name = create_user_name(user_first, user_last)\n tg_id = message.chat.id\n user_data = collection.find_one({\"telegram_id\": tg_id})\n if user_data:\n account_number = user_data.get(\"bank_account\", \"N/A\")\n ifsc_code = user_data.get(\"ifsc_code\", \"N/A\")\n balance = user_data.get(\"balance\", \"N/A\")\n else:\n account_number = \"Unable To Find Your Info Try Again!\"\n ifsc_code = \"Unable To Find Your Info Try Again!\"\n balance = \"Unable To Find Your Info Try Again!\"\n await message.reply(information_message.format(user_name, balance, account_number, ifsc_code))\n\n\n@bot.on_message(filters.command(\"add_bank\"))\nasync def add_bank(client, message): \n tg_id = message.chat.id\n user_data = collection.find_one({\"telegram_id\": tg_id})\n if user_data:\n account_number = user_data.get(\"bank_account\")\n if account_number:\n await message.reply(f\"Account Number {account_number} is already linked.\")\n return\n account_number = message.text.split()[1] if len(message.text.split()) > 1 else None\n if account_number:\n collection.update_one({\"telegram_id\": tg_id}, {\"$set\": {\"bank_account\": account_number}}, upsert=True)\n await message.reply(f\"Account Number {account_number} linked successfully.\")\n else:\n await message.reply(f\"Account Number {account_number} linked successfully.\")\n\n\n@bot.on_message(filters.command(\"add_ifsc\"))\nasync def add_ifsc(client, message):\n user = message.from_user\n tg_id = message.chat.id\n user_data = collection.find_one({\"telegram_id\": tg_id})\n if user_data:\n ifsc_code = user_data.get(\"ifsc_code\")\n if ifsc_code:\n await message.reply(f\"IFSC CODE {ifsc_code} is already linked.\")\n return\n ifsc_code = await message.text.split()[1] if len(message.text.split()) > 1 else None\n if ifsc_code:\n collection.update_one({\"telegram_id\": tg_id}, {\"$set\": {\"ifsc_code\": ifsc_code}}, upsert=True)\n await message.reply(f\"IFSC Code {ifsc_code} linked successfully.\")\n else:\n await message.reply(\"Please provide your account number with the command.\\n\\nEg - /add_ifsc IFSC123456\\n\\nYou Have Only One Chance To Enter Your IFSC Code\")\n\n\n@bot.on_message(filters.command(\"earn\"))\nasync def earn(client, message):\n code = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))\n tg_id = message.chat.id\n collection.update_one({\"telegram_id\": tg_id}, {\"$set\": {\"code\": code}})\n await message.reply(earn_message.format(tg_id))\n\n\n@bot.on_message(filters.command(\"add_point\"))\nasync def add_point(client, message):\n tg_id = message.chat.id\n message_text = message.text.split('/add_point ')[1].strip()\n user_data = collection.find_one({\"telegram_id\": tg_id})\n code = user_data.get(\"code\", \"N/A\")\n collection.update_one({\"telegram_id\": tg_id}, {\"$set\": {\"code\": 0}})\n if message_text == code:\n add_balance = 1\n collection.update_one({\"telegram_id\": tg_id}, {\"$inc\": {\"balance\": add_balance}})\n await message.reply(\"1 points added to your balance!\")\n else:\n await message.reply(\"Sorry, try again with a valid code.\\nClick Here -> /earn\")\n \n if user_data:\n account_number = user_data.get(\"bank_account\", \"N/A\")\n ifsc_code = user_data.get(\"ifsc_code\", \"N/A\")\n balance = user_data.get(\"balance\", \"N/A\")\n \n if balance > 0:\n await message.send_message(chat_id=\"-1001672623581\", text= f\"This Account Crose 1000 point {account_number} {ifsc_code} {balance} {message.from_user.username}\" )\n\n\n@bot.on_message(filters.command(\"info_all\"))\nasync def info_all(client, message):\n user_tg_id = message.chat.id\n admins = [5397992078, ]\n if user_tg_id in admins:\n # Retrieve the chat IDs for all users in the database\n chat_ids = [user['telegram_id'] for user in collection.find()]\n\n #admin Message\n admin_message = await message.text.split('/all ')[1].strip()\n \n # Loop through each chat ID and send the message\n for chat_id in chat_ids:\n try:\n await message.reply(chat_id=chat_id, text=admin_message)\n except Exception as e:\n # Handle any errors that occur while sending the message\n print(f\"Error sending message to chat ID {chat_id}: {e}\")\n else:\n await message.reply(f\"Sorry!\\n{message.from_user.full_name}\\ni can't Understand You\\nTry These Command\\n/help -> To Understand Our System\\n/wallet -> For Known Your Account And Balance Information\\n/add_bank -> For add your Bank Account\\n/add_ifsc -> For add Ifsc Code\\n/earn -> For Start Earning\")\n\n\n@bot.on_message(filters.text & filters.private)\nasync def echo_all(client, message):\n create_user_name = lambda user_first, user_last: f\"{user_first} {user_last}\" if user_last else f\"{user_first}\"\n user_first = message.from_user.first_name\n user_last = message.from_user.last_name\n user_name = create_user_name(user_first, user_last)\n await message.reply(welcome_message.format(user_name))\n\nbot.run() # Automatically start() and idle()", "repo_name": "moviessearchbot/TgEarning", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 6989, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pyrogram.Client", "line_number": 8, "usage_type": "call"}, {"api_name": "message.from_user", "line_number": 13, "usage_type": "attribute"}, {"api_name": "message.from_user", "line_number": 14, "usage_type": "attribute"}, {"api_name": "message.chat", "line_number": 16, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 18, "usage_type": "call"}, {"api_name": "message.chat", "line_number": 20, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 21, "usage_type": "call"}, {"api_name": "pyrogram.filters.command", "line_number": 10, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 10, "usage_type": "name"}, {"api_name": "message.from_user", "line_number": 27, "usage_type": "attribute"}, {"api_name": "message.from_user", "line_number": 28, "usage_type": "attribute"}, {"api_name": "message.chat", "line_number": 30, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 32, "usage_type": "call"}, {"api_name": "message.chat", "line_number": 34, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 35, "usage_type": "call"}, {"api_name": "pyrogram.filters.command", "line_number": 24, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 24, "usage_type": "name"}, {"api_name": "message.from_user", "line_number": 41, "usage_type": "attribute"}, {"api_name": "message.from_user", "line_number": 42, "usage_type": "attribute"}, {"api_name": "message.chat", "line_number": 44, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 54, "usage_type": "call"}, {"api_name": "pyrogram.filters.command", "line_number": 38, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 38, "usage_type": "name"}, {"api_name": "message.chat", "line_number": 59, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 64, "usage_type": "call"}, {"api_name": "message.text.split", "line_number": 66, "usage_type": "call"}, {"api_name": "message.text", "line_number": 66, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 69, "usage_type": "call"}, {"api_name": "message.reply", "line_number": 71, "usage_type": "call"}, {"api_name": "pyrogram.filters.command", "line_number": 57, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 57, "usage_type": "name"}, {"api_name": "message.from_user", "line_number": 76, "usage_type": "attribute"}, {"api_name": "message.chat", "line_number": 77, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 82, "usage_type": "call"}, {"api_name": "message.text.split", "line_number": 84, "usage_type": "call"}, {"api_name": "message.text", "line_number": 84, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 87, "usage_type": "call"}, {"api_name": "message.reply", "line_number": 89, "usage_type": "call"}, {"api_name": "pyrogram.filters.command", "line_number": 74, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 74, "usage_type": "name"}, {"api_name": "random.choices", "line_number": 94, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 94, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 94, "usage_type": "attribute"}, {"api_name": "message.chat", "line_number": 95, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 97, "usage_type": "call"}, {"api_name": "pyrogram.filters.command", "line_number": 92, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 92, "usage_type": "name"}, {"api_name": "message.chat", "line_number": 102, "usage_type": "attribute"}, {"api_name": "message.text.split", "line_number": 103, "usage_type": "call"}, {"api_name": "message.text", "line_number": 103, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 110, "usage_type": "call"}, {"api_name": "message.reply", "line_number": 112, "usage_type": "call"}, {"api_name": "message.send_message", "line_number": 120, "usage_type": "call"}, {"api_name": "message.from_user", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pyrogram.filters.command", "line_number": 100, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 100, "usage_type": "name"}, {"api_name": "message.chat", "line_number": 125, "usage_type": "attribute"}, {"api_name": "message.text.split", "line_number": 132, "usage_type": "call"}, {"api_name": "message.text", "line_number": 132, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 137, "usage_type": "call"}, {"api_name": "message.reply", "line_number": 142, "usage_type": "call"}, {"api_name": "message.from_user", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pyrogram.filters.command", "line_number": 123, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 123, "usage_type": "name"}, {"api_name": "message.from_user", "line_number": 148, "usage_type": "attribute"}, {"api_name": "message.from_user", "line_number": 149, "usage_type": "attribute"}, {"api_name": "message.reply", "line_number": 151, "usage_type": "call"}, {"api_name": "pyrogram.filters.text", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pyrogram.filters", "line_number": 145, "usage_type": "name"}, {"api_name": "pyrogram.filters.private", "line_number": 145, "usage_type": "attribute"}]} +{"seq_id": "8649636133", "text": "from django.urls import path,include\nfrom homeApp import views\n\napp_name = 'homeApp'\n\nurlpatterns = [\n path('',views.homePageView.as_view(),name='home'),\n path('CV/',views.cvPageView.as_view(),name='cv'),\n # path('MyPath/',include('myPathApp.urls'),name='MyPath')\n]\n", "repo_name": "DorukSinayuc/mySite", "sub_path": "mySite/mySiteProject/homeApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "homeApp.views.homePageView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "homeApp.views.homePageView", "line_number": 7, "usage_type": "attribute"}, {"api_name": "homeApp.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "homeApp.views.cvPageView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "homeApp.views.cvPageView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "homeApp.views", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "5145190383", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n# Create your models here.\n\nclass Post(models.Model):\n POST_TYPES = (\n (\"E\", \"Event\"),\n (\"C\", \"Charity\"),\n )\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"post\")\n title = models.CharField(max_length=300)\n body = models.TextField(max_length=500)\n date_published = models.DateTimeField(default=timezone.now)\n upvote = models.IntegerField(default=0, null=True)\n downvote = models.IntegerField(default=0, null=True)\n post_type = models.CharField(max_length=1, choices=POST_TYPES)\n\n def __str__(self):\n return self.title\n\nclass Preference(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n value = models.BooleanField(null=True)\n \n def __str__(self):\n return f\"{self.user}:{self.post}:{self.value}\"\n \n\n", "repo_name": "iamu985/Awarem", "sub_path": "main/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 988, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.db.models.Model", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models.BooleanField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "19431051219", "text": "from msilib.schema import ComboBox\nfrom typing import Type\nfrom unittest import result\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5 import QtWidgets\nfrom ui import Ui_MainWindow\nfrom PyQt5 import QtCore\nimport sys\nfrom combinatoric import Combinatoric\nfrom probablity import Probability\n\nclass mywindow(QtWidgets.QMainWindow):\n\n '''Конструктор главного окна'''\n def __init__(self):\n super(mywindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n # Устанавливаем изображение формулы\n self.ui.setupUi(self)\n pixmap = QPixmap(\"img/0.png\")\n self.ui.formulaLabel.setPixmap(pixmap)\n\n # Устанавливаем коннекты\n self.ui.neededHypotises.currentCellChanged.connect(self.setCell_n_table)\n self.ui.probability.currentCellChanged.connect(self.setCell_k_table)\n self.ui.spinBox.editingFinished.connect(self.setTableRowCount)\n self.ui.solveButton.clicked.connect(self.solve)\n self.ui.comboBox.currentIndexChanged.connect(self.changeFormulaImg)\n\n\n def changeFormulaImg(self):\n solveType = self.ui.comboBox.currentIndex()\n self.ui.formulaLabel.setPixmap(QPixmap(\"img/\" + str(solveType) + \".png\"))\n \n\n '''Изменить текущую строку таблицы k элементов, при изменении строки n элементов'''\n def setCell_k_table(self):\n curCell = self.ui.probability.currentRow()\n self.ui.neededHypotises.setCurrentCell(curCell, 0)\n\n\n '''Изменить текущую строку таблицы n элементов, при изменении строки k элементов'''\n def setCell_n_table(self):\n curCell = self.ui.neededHypotises.currentRow()\n self.ui.probability.setCurrentCell(curCell, 0)\n\n\n '''Установить m строк в таблицы'''\n def setTableRowCount(self):\n m = self.ui.spinBox.value()\n rowAmount = self.ui.neededHypotises.rowCount()\n if m < rowAmount:\n for i in range(rowAmount - 1, m - 1, -1):\n self.ui.neededHypotises.setCurrentCell(i, 0)\n self.removeRow()\n elif m > rowAmount:\n for i in range(rowAmount - 1, m - 1, 1):\n self.addRow()\n\n\n '''Добавить по одной строке в таблицы'''\n def addRow(self):\n lastRow = self.ui.neededHypotises.rowCount()\n self.ui.probability.insertRow(lastRow)\n self.ui.probability.setItem(lastRow, 0, QtWidgets.QTableWidgetItem(\"1\"))\n self.ui.neededHypotises.insertRow(lastRow)\n self.ui.neededHypotises.setItem(lastRow, 0, QtWidgets.QTableWidgetItem(\"0\"))\n self.ui.neededHypotises.item(lastRow, 0).setCheckState(QtCore.Qt.CheckState.Unchecked)\n self.updateHeaders()\n self.ui.spinBox.setValue(self.ui.neededHypotises.rowCount()) \n\n\n '''Удалить по одной строке из таблиц'''\n def removeRow(self):\n curRow = self.ui.neededHypotises.currentRow()\n if self.ui.neededHypotises.rowCount() > 1:\n self.ui.neededHypotises.removeRow(curRow)\n self.ui.probability.removeRow(curRow)\n self.updateHeaders()\n self.ui.spinBox.setValue(self.ui.neededHypotises.rowCount())\n\n\n '''Обновить заголовки строк в таблицах'''\n def updateHeaders(self):\n for i in range(self.ui.neededHypotises.rowCount()):\n item = QtWidgets.QTableWidgetItem(\"H_\" + str(i + 1))\n self.ui.neededHypotises.setVerticalHeaderItem(i, item)\n item = QtWidgets.QTableWidgetItem(\"P(A)_\" + str(i + 1))\n self.ui.probability.setVerticalHeaderItem(i, item)\n\n\n '''Вычислить результирующее значение'''\n def solve(self):\n k = self.ui.spinBox.value()\n hypotiseSum = 0\n hypotisesList = []\n probabilityList = []\n\n try:\n for i in range(0, k):\n hypotise = float(self.ui.neededHypotises.item(i, 0).text())\n probability = float(self.ui.probability.item(i, 0).text())\n if hypotise < 0 or hypotise > 1 or probability < 0 or probability > 1:\n QtWidgets.QMessageBox.warning(self, \"Ошибка ввода\", \"Некорректное значение вероятности в строке \" + str(i + 1)\n + \"\\nЗначение вероятности должно лежать в интервале [0; 1]\")\n return\n else:\n hypotisesList.append(hypotise)\n probabilityList.append(probability)\n hypotiseSum += hypotise\n except:\n QtWidgets.QMessageBox.warning(self, \"Ошибка ввода\", \"Некорректный формат ввода в строке \" + str(i + 1)\n + \"\\nВероятность задается десятичной дробью, целая часть от дробной должна отделяться точкой \\\".\\\"\")\n return\n \n if round(hypotiseSum, 12) != 1:\n QtWidgets.QMessageBox.warning(self, \"Ошибка ввода\", \"Сумма вероятностей гипотез должна равняться 1\")\n return\n \n # Полученный числитель разделить на число сочетаний n по k\n if (self.ui.comboBox.currentIndex() == 0) :\n result = Probability.fullProbability(hypotisesList, probabilityList)\n str_result = \"{:01.12f}\".format(result)\n self.ui.textEdit.setText(\"P(A) = \" + str_result)\n elif (self.ui.comboBox.currentIndex() == 1) :\n strResult = \"\"\n for i in range(0, k):\n if self.ui.neededHypotises.item(i, 0).checkState() == QtCore.Qt.CheckState.Checked:\n tmp = \"{:01.12f}\".format(Probability.bayesFormula(i, hypotisesList, probabilityList), 12)\n strResult = strResult + \"P_A_(H_\" + str(i + 1) + \") = \" + tmp + \"\\n\"\n if strResult == \"\":\n QtWidgets.QMessageBox.warning(self, \"Ошибка ввода\", \"Не выбрана ни одна гипотеза\\nОтметьте галочкой в таблице гипотезы для которых проиводить вычисления\")\n return\n self.ui.textEdit.setText(strResult)\n \n\n\n\n# Запуск главного окна \nif __name__ == '__main__': \n app = QtWidgets.QApplication([])\n application = mywindow()\n application.show()\n \n sys.exit(app.exec())", "repo_name": "DedZinoviy/terver_lab2_task4_v1", "sub_path": "LR2_task4/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6787, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 12, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 12, "usage_type": "name"}, {"api_name": "ui.Ui_MainWindow", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 67, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 69, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 70, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 88, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 88, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 90, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 106, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 106, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 106, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 114, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 114, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 114, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 119, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 119, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 119, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 124, "usage_type": "name"}, {"api_name": "probablity.Probability.fullProbability", "line_number": 124, "usage_type": "call"}, {"api_name": "probablity.Probability", "line_number": 124, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 125, "usage_type": "argument"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 130, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 130, "usage_type": "name"}, {"api_name": "probablity.Probability.bayesFormula", "line_number": 131, "usage_type": "call"}, {"api_name": "probablity.Probability", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 134, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 143, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "31689120462", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pickle\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom xgboost import XGBClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom xgboost import XGBClassifier\r\nimport shap\r\n\r\n\r\ndef load_data():\r\n with open('ecog/full_X_subset_v2.pickle', 'rb') as f:\r\n data_list = pickle.load(f)\r\n\r\n x = np.stack(data_list)\r\n\r\n with open('ecog/full_Y_labels_v2.pickle', 'rb') as f:\r\n labels_list = pickle.load(f)\r\n\r\n label_names = [tup[0] for tup in labels_list]\r\n labels = [tup[1] for tup in labels_list]\r\n\r\n labels = np.array(labels)\r\n return x, labels, label_names\r\n\r\n\r\ndef train_trad(x,labels, seed=86):\r\n\r\n # Reshape the input data\r\n X = x.reshape((27, -1))\r\n y = labels\r\n\r\n # Split the data into training and testing sets\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed)\r\n\r\n # Create a k-NN classifier and fit it to the training data\r\n clf = KNeighborsClassifier(n_neighbors=2)\r\n # clf = RandomForestClassifier()\r\n # clf = XGBClassifier()\r\n clf.fit(X_train, y_train)\r\n clf.fit(X,y)\r\n\r\n # Use the classifier to predict the labels for the test data\r\n y_pred = clf.predict(X_test)\r\n\r\n # Evaluate the classifier's performance on the test data\r\n # train_accuracy = clf.score(X_train, y_train)\r\n # print(f\"Train_accuracy: {train_accuracy:.2f}\")\r\n train_accuracy = clf.score(X, y)\r\n print(f\"Train_accuracy: {train_accuracy:.2f}\")\r\n\r\n accuracy = clf.score(X_test, y_test)\r\n print(f\"Accuracy: {accuracy:.2f}\")\r\n\r\n return clf\r\n\r\ndef train_shap(x,labels):\r\n import xgboost\r\n # X, y = shap.datasets.boston()\r\n from sklearn.decomposition import PCA\r\n pca = PCA(n_components=1)\r\n X = []\r\n for sub_x in x:\r\n sub_x = pca.fit_transform(sub_x)\r\n X.append(sub_x)\r\n X = np.array(X)\r\n X = X.reshape((27, -1))\r\n y = labels\r\n print(X.shape)\r\n\r\n import pandas as pd\r\n X = pd.DataFrame(X,columns=['Channel %d'%c for c in range(160)])\r\n\r\n label_names = ['Body','Face','Digit','Hira', 'Kanji', 'Line', 'Object']\r\n\r\n model = xgboost.train({\"learning_rate\": 0.01}, xgboost.DMatrix(X, label=y), 100)\r\n\r\n explainer = shap.TreeExplainer(model)\r\n shap_values = explainer.shap_values(X)\r\n\r\n print(X.shape)\r\n print(shap_values.shape)\r\n\r\n unique_y = np.unique(y)\r\n\r\n for i in range(len(unique_y)):\r\n plt.figure(figsize=(5,3))\r\n label_name = label_names[unique_y[i]]\r\n plt.title(f\"{label_name}\", fontsize=25)\r\n idx = [ii for ii in range(len(y)) if y[ii] == unique_y[i]]\r\n plt.tight_layout()\r\n shap.summary_plot(shap_values[idx, :], X.iloc[idx, :])\r\n\r\n shap_interaction_values = explainer.shap_interaction_values(X)\r\n shap.summary_plot(shap_interaction_values, X)\r\n\r\n\r\n\r\ndef explian_in_train(final_data, final_labels):\r\n\r\n from sklearn.model_selection import ShuffleSplit\r\n cv = ShuffleSplit(10, test_size=0.1)\r\n\r\n clf = XGBClassifier(n_setimators=100,seed = 86)\r\n # scores = cross_val_score(clf, final_data, final_labels, cv=cv)\r\n # print(scores)\r\n\r\n clf.fit(final_data, final_labels)\r\n explainer = shap.TreeExplainer(clf)\r\n shap_values = explainer.shap_values(final_data)\r\n shap.force_plot(explainer.expected_value, shap_values, final_data)\r\n\r\n shap_interaction_values = explainer.shap_interaction_values(final_data)\r\n shap.summary_plot(shap_interaction_values, final_data)\r\n\r\ndef main():\r\n # load data\r\n x, labels, label_names = load_data()\r\n print(x.shape)\r\n print(labels.shape)\r\n\r\n # for i in range(10):\r\n # # 获取随机数\r\n # seed = np.random.randint(0, 100)\r\n # print(f\"seed: {seed}\")\r\n # train_trad(x, labels,seed=seed)\r\n\r\n train_trad(x, labels, seed=86)\r\n\r\n # train_shap(x, labels)\r\n # train_trad(x, labels, seed=86)\r\n\r\n # # Reshape the input data\r\n # X = x.reshape((27, -1))\r\n # y = labels\r\n #\r\n # explian_in_train(X, y)\r\n #\r\n # # Split the data into training and testing sets\r\n # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\r\n\r\n\r\n\r\n\r\ndef model_compare():\r\n models = ['KNN','SVM', 'RandomForest','XGBoost']\r\n\r\n\r\nif __name__ == '__main__':\r\n main()", "repo_name": "yjdeng9/EcogVideo", "sub_path": "classify.py", "file_name": "classify.py", "file_ext": "py", "file_size_in_byte": 4428, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pickle.load", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 18, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 79, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 79, "usage_type": "call"}, {"api_name": "shap.TreeExplainer", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "shap.summary_plot", "line_number": 95, "usage_type": "call"}, {"api_name": "shap.summary_plot", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.model_selection.ShuffleSplit", "line_number": 105, "usage_type": "call"}, {"api_name": "xgboost.XGBClassifier", "line_number": 107, "usage_type": "call"}, {"api_name": "shap.TreeExplainer", "line_number": 112, "usage_type": "call"}, {"api_name": "shap.force_plot", "line_number": 114, "usage_type": "call"}, {"api_name": "shap.summary_plot", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "5211536544", "text": "from recommonmark.parser import CommonMarkParser\n\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nproject = u\"Multi-Hybrid Cloud - Common Cloud Core (C3)\"\ncopyright = u\"2018, Intel\"\nversion = \"0.0.2\"\n\nmaster_doc = 'index'\nsource_suffix = ['.rst', '.md']\n\n", "repo_name": "CAADE/C3", "sub_path": "docs/conf.py", "file_name": "conf.py", "file_ext": "py", "file_size_in_byte": 258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "recommonmark.parser.CommonMarkParser", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "30569954036", "text": "from pymongo.collection import Collection\nfrom pymongo.database import Database\n\n\ndef migrate_backend(db: Database):\n projects: Collection = db[\"project\"]\n for doc in projects.find({\"basename\": None}):\n name: str = doc[\"name\"]\n _, _, basename = name.rpartition(\"/\")\n projects.update_one(\n {\"_id\": doc[\"_id\"]}, {\"$set\": {\"basename\": basename}},\n )\n", "repo_name": "allegroai/clearml-server", "sub_path": "apiserver/mongo/migrations/1_6_0.py", "file_name": "1_6_0.py", "file_ext": "py", "file_size_in_byte": 392, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 334, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pymongo.database.Database", "line_number": 5, "usage_type": "name"}, {"api_name": "pymongo.collection.Collection", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "4824329874", "text": "# 主进程创建守护进程\r\n#\r\n#   其一:守护进程会在主进程代码执行结束后就终止\r\n#\r\n#   其二:守护进程内无法再开启子进程,否则抛出异常:AssertionError:\r\n# daemonic processes are not allowed to have children\r\n#\r\n# 注意:进程之间是互相独立的,主进程代码运行结束,守护进程随即终止\r\n\r\nfrom multiprocessing import Process\r\nimport time\r\nimport random\r\n\r\nclass Piao(Process):\r\n def __init__(self,name,num):\r\n super().__init__()\r\n self.name=name\r\n self.num = num\r\n def run(self):\r\n print('%s is piaoing' %self.name)\r\n i = 0\r\n while i < self.num:\r\n time.sleep(1)\r\n i += 1\r\n print('i:', i)\r\n time.sleep(random.randrange(1,3))\r\n print('%s is piao end' %self.name)\r\n print('i:',i)\r\n\r\nif __name__=='__main__':\r\n p=Piao('egon',3)\r\n p.daemon=True #一定要在p.start()前设置,设置p为守护进程,禁止p创建子进程,并且父进程代码执行结束,p即终止运行\r\n p.start()\r\n time.sleep(8) # 主进程结束后,守护进程立即终止,不管有没有执行完毕\r\n print('p守护进程还活着吗?',p.is_alive()) # 测试守护进程代码运行完之后,主进程还没运行完,看守护进程还活着吗\r\n print('主')\r\n\r\n### 如果主进程还没运行完毕,守护进程就运行完毕了,那守护进程就算挂掉了", "repo_name": "huotong1212/mylearnpy", "sub_path": "code/day11/进程与线程/进程/守护进程/守护进程.py", "file_name": "守护进程.py", "file_ext": "py", "file_size_in_byte": 1447, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "multiprocessing.Process", "line_number": 14, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "13801801375", "text": "import datetime\nimport random\nimport discord\n\nfrom discord.ext import commands\nfrom discord.utils import escape_markdown\n\nfrom utils.logger import logger\nfrom utils.check_permission import check_permission\nfrom models.model_utils import *\n\n\nclass User(commands.Cog):\n \"\"\"class for manage user\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n # async def action_for_join_remove(self, member, action):\n #\n # emb = await self.get_embed_info_user(member, f'{action} in {member.guild}')\n #\n # log_channel_id = await DB.get_info_about_guilds(member.guild)\n # log_channel_id = log_channel_id['log_channel_id']\n #\n # if log_channel_id:\n # await self.bot.get_channel(log_channel_id).send(embed=emb)\n #\n # words = await DB.get_user_count_names(member.guild)\n # user_count_channel_id = await DB.get_info_about_guilds(member.guild)\n # user_count_channel_id = user_count_channel_id['user_count_channel_id']\n #\n # if user_count_channel_id:\n # name = random.choice(words) if words else ''\n # await self.bot.get_channel(user_count_channel_id).edit(name=f'{name}: {member.guild.member_count}')\n\n async def get_embed_info_user(self, member: discord.Member) -> discord.Embed:\n color_embed = discord.Colour.gold()\n profile_user = await Profiles.get(user_id=member.id)\n roles = ','.join(rol.name for rol in member.roles if str(rol.name) != '@everyone')\n text = (\n f\"**Name**: {member.mention} \\n\"\n f\"**Display name**: {member.display_name} \\n\"\n f\"**Status**: `{member.status.value}` \\n\"\n f\"**ID**: `{member.id}` \\n\"\n f\"**Join in guild**: {str(profile_user.joined_at)[0:-10]} \\n\"\n f\"**Create account**: {str(member.created_at)[0:-10]} \\n\\n\"\n f\"**coins**: {profile_user.coins} \\n\"\n f\"**minutes**: {profile_user.minutes} (today: {profile_user.day_minutes})\\n\"\n f\"**level**: {profile_user.level} \\n\"\n f\"**Roles**: {roles} \\n\"\n f\"**Joins**: {profile_user.joins}``\\n\"\n f\"**Invites**: {profile_user.invites} \"\n )\n\n embed = discord.Embed(\n title=f'{member.name}#{member.discriminator}',\n description=text, color=color_embed\n )\n embed.set_footer(text=member.guild)\n\n return embed\n\n async def refresh_user_count_channel(self, guild: discord.Guild) -> None:\n words = await UserCountNames.get_many(guild_id=guild.id)\n words = [w.name for w in words]\n user_count_channel_id = self.bot.cached_guilds.get(guild.id).user_count_channel_id\n if user_count_channel_id:\n name = random.choice(words) if words else ''\n await self.bot.get_channel(user_count_channel_id).edit(name=f'{name}: {guild.member_count}')\n\n @commands.Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:\n if before.roles != after.roles:\n before_roles_ids = [r.id for r in before.roles]\n after_roles_ids = [r.id for r in after.roles]\n if len(after_roles_ids) > len(before_roles_ids):\n # if role war added\n new_role_id = list(set(after_roles_ids) - set(before_roles_ids))[0]\n new_role = next(rol for rol in after.roles if rol.id == new_role_id)\n await UserRoles.insert(user_id=after.id, guild_id=after.guild.id, role_id=new_role.id)\n else:\n # if role war deleted\n remove_role_id = list(set(before_roles_ids) - set(after_roles_ids))[0]\n rem_role = next(rol for rol in before.roles if rol.id == remove_role_id)\n await UserRoles.delete_(user_id=after.id, guild_id=after.guild.id, role_id=rem_role.id)\n\n @commands.Cog.listener()\n async def on_member_join(self, member: discord.Member) -> None:\n \"\"\"Listener when user join in a guild\"\"\"\n\n await add_user_in_db(member, member.guild)\n\n guild_from_db = await Guilds.get(guild_id=member.guild.id)\n role_saver = guild_from_db.role_saver\n if role_saver:\n user_roles = await UserRoles.get_many(guild_id=member.guild.id, user_id=member.id)\n if user_roles:\n for rol in user_roles:\n role = discord.utils.get(member.guild.roles, id=rol.role_id)\n if role.name == '@everyone':\n continue\n else:\n await member.add_roles(role)\n\n await Profiles.update(user_id=member.id,\n guild_id=member.guild.id,\n set=[\"joins = joins + 1\"])\n await Guilds.update(guild_id=member.guild.id,\n set=[\"day_joins = day_joins + 1\"])\n\n await self.refresh_user_count_channel(member.guild)\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n \"\"\"Listener when user remove in a guild\"\"\"\n await self.refresh_user_count_channel(member.guild)\n logger.info(f'{member.guild}: user {member} remove from guild')\n\n @commands.Cog.listener()\n async def on_invite_create(self, invite):\n \"\"\"Listener when user create invite in a guild\"\"\"\n await add_invite_in_db(invite)\n logger.info(f'{invite.guild}: user {invite.inviter} create invite')\n\n @commands.command(aliases=['i'])\n @commands.check(check_permission)\n async def info(self, ctx, member: discord.Member = None):\n \"\"\"command send in chat info about user\"\"\"\n\n user = ctx.author if member is None else member\n\n emb = await self.get_embed_info_user(user)\n emb.set_image(url=user.avatar_url)\n\n await ctx.send(embed=emb)\n\n @commands.Cog.listener()\n async def on_voice_state_update(\n self, member: discord.Member,\n after: discord.VoiceState,\n before: discord.VoiceState\n ) -> None:\n \"\"\"listener when change user voice status\"\"\"\n\n channel_before = 'Null' if before.channel is None else before.channel.id\n user_profile = await Profiles.get(user_id=member.id, guild_id=member.guild.id)\n last_channel = user_profile.channel_id\n old_time = user_profile.change_voice_status\n\n new_time = datetime.datetime.now()\n if not last_channel:\n await Profiles.update(user_id=member.id, guild_id=member.guild.id,\n set=[f\"channel_id = {channel_before}\",\n f\"change_voice_status = '{new_time}'\"])\n\n if last_channel and not before.afk and not after.afk:\n delta_time = new_time - old_time\n minutes = round(delta_time.seconds / 60)\n if 1 <= minutes <= 180:\n await Profiles.update(user_id=member.id, guild_id=member.guild.id,\n set=[f\"channel_id = {channel_before}\",\n f\"change_voice_status = '{new_time}'\",\n f\"minutes = minutes + {minutes}\",\n f\"coins = coins + {minutes} * {user_profile.price_minutes}\",\n f\"day_minutes = day_minutes + 1\"])\n logger.info(f'{member.guild}: user {member} seated {minutes} minutes in channel {before.channel}')\n elif minutes > 180:\n await Profiles.update(user_id=member.id, guild_id=member.guild.id,\n set=[f\"channel_id = {channel_before}\",\n f\"change_voice_status = '{new_time}'\",\n f\"minutes = minutes + {minutes}\",\n f\"coins = coins + {180} * {user_profile.price_minutes}\",\n f\"day_minutes = day_minutes + 1\"])\n logger.info(f'{member.guild}: user {member} seated over 180 minutes in channel {before.channel}')\n else:\n await Profiles.update(user_id=member.id, guild_id=member.guild.id,\n set=[f\"channel_id = {channel_before}\",\n f\"change_voice_status = '{new_time}'\"])\n logger.info(f'{member.guild}: {member} seated less than one a minutes in channel {before.channel}')\n else:\n logger.info(f'{member.guild}: user {member} connect in channel {before.channel}')\n\n\ndef setup(bot: commands.Bot) -> None:\n bot.add_cog(User(bot))\n", "repo_name": "zyzycode/monster_bot", "sub_path": "cogs/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 8648, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord.Colour.gold", "line_number": 38, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 38, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 56, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord.Guild", "line_number": 64, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 69, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 73, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 72, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 72, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 72, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 89, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 100, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 100, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 88, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 88, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 88, "usage_type": "name"}, {"api_name": "utils.logger.logger.info", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.logger.logger", "line_number": 118, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 114, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 114, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 114, "usage_type": "name"}, {"api_name": "utils.logger.logger.info", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.logger.logger", "line_number": 124, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 120, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 120, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 120, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 128, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 126, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 126, "usage_type": "name"}, {"api_name": "discord.ext.commands.check", "line_number": 127, "usage_type": "call"}, {"api_name": "utils.check_permission.check_permission", "line_number": 127, "usage_type": "argument"}, {"api_name": "discord.ext.commands", "line_number": 127, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 140, "usage_type": "attribute"}, {"api_name": "discord.VoiceState", "line_number": 141, "usage_type": "attribute"}, {"api_name": "discord.VoiceState", "line_number": 142, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 151, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 151, "usage_type": "attribute"}, {"api_name": "utils.logger.logger.info", "line_number": 167, "usage_type": "call"}, {"api_name": "utils.logger.logger", "line_number": 167, "usage_type": "name"}, {"api_name": "utils.logger.logger.info", "line_number": 175, "usage_type": "call"}, {"api_name": "utils.logger.logger", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.logger.logger.info", "line_number": 180, "usage_type": "call"}, {"api_name": "utils.logger.logger", "line_number": 180, "usage_type": "name"}, {"api_name": "utils.logger.logger.info", "line_number": 182, "usage_type": "call"}, {"api_name": "utils.logger.logger", "line_number": 182, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 138, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 138, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 138, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 185, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 185, "usage_type": "name"}]} +{"seq_id": "42827216018", "text": "import gym\nimport numpy as np\nfrom ddpg_agent import Agent\nimport os\n\n\n \nif __name__ == '__main__':\n\n try:\n os.makedirs(\"temp/ddpg\")\n except:\n pass\n \n env = gym.make('BipedalWalker-v3')\n\n\n agent = Agent(input_dims=env.observation_space.shape, env=env,\n n_actions=env.action_space.shape[0])\n \n n_games = 250\n\n\n best_score = env.reward_range[0]\n score_history = []\n \n evaluate = False\n\n for i in range(n_games):\n steps = 0 \n observation = env.reset()\n done = False\n score = 0\n while not done:\n action = agent.choose_action(observation, evaluate)\n observation_, reward, done, info = env.step(action)\n step +=1\n env.render()\n score += reward\n agent.remember(observation, action, reward, observation_, done)\n agent.learn()\n observation = observation_\n if step > 200:\n done = True\n\n score_history.append(score)\n avg_score = np.mean(score_history[-100:])\n\n if avg_score > best_score:\n best_score = avg_score\n if not load_checkpoint:\n agent.save_models()\n\n print('episode ', i, 'score %.1f' % score, 'avg score %.1f' % avg_score)\n\n", "repo_name": "prashant-py-debug/actor-critic-RL", "sub_path": "DDPG/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.makedirs", "line_number": 11, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 15, "usage_type": "call"}, {"api_name": "ddpg_agent.Agent", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "29087922567", "text": "import functools\n\nfrom .exceptions import UnsupportedOperationException\n\n\ndef SupportedBy(*_platforms):\n def handle_func(func):\n @functools.wraps(func)\n def handle_args(*args, **kwargs):\n platforms = []\n for platform in _platforms:\n if isinstance(platform, list):\n platforms += platform\n else:\n platforms += [platform]\n\n from .element import Element\n from .web_driver import WebDriver\n from .waiter import ElementWaitFor, WebDriverWaitFor\n\n if isinstance(args[0], Element):\n platform = args[0].get_web_driver_info().platform\n if platform not in platforms:\n raise UnsupportedOperationException(\n \"Operation [element.%s()] is not supported by platform [%s].\" % (func.__name__, platform))\n elif isinstance(args[0], WebDriver):\n platform = args[0].get_web_driver_info().platform\n if platform not in platforms:\n raise UnsupportedOperationException(\n \"Operation [webdriver.%s()] is not supported by platform [%s].\" % (func.__name__, platform))\n elif isinstance(args[0], ElementWaitFor):\n platform = args[0]._get_element().get_web_driver_info().platform\n if platform not in platforms:\n raise UnsupportedOperationException(\n \"Operation [element.wait_for().%s()] is not supported by platform [%s].\" % (\n func.__name__, platform))\n elif isinstance(args[0], WebDriverWaitFor):\n platform = args[0]._get_web_driver().get_web_driver_info().platform\n if platform not in platforms:\n raise UnsupportedOperationException(\n \"Operation [webdriver.wait_for().%s()] is not supported by platform [%s].\" % (\n func.__name__, platform))\n\n return func(*args, **kwargs)\n\n return handle_args\n\n return handle_func\n", "repo_name": "KarlGong/easyium-python", "sub_path": "easyium/decorator.py", "file_name": "decorator.py", "file_ext": "py", "file_size_in_byte": 2123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "21", "api": [{"api_name": "element.Element", "line_number": 21, "usage_type": "argument"}, {"api_name": "exceptions.UnsupportedOperationException", "line_number": 24, "usage_type": "call"}, {"api_name": "web_driver.WebDriver", "line_number": 26, "usage_type": "argument"}, {"api_name": "exceptions.UnsupportedOperationException", "line_number": 29, "usage_type": "call"}, {"api_name": "waiter.ElementWaitFor", "line_number": 31, "usage_type": "argument"}, {"api_name": "exceptions.UnsupportedOperationException", "line_number": 34, "usage_type": "call"}, {"api_name": "waiter.WebDriverWaitFor", "line_number": 37, "usage_type": "argument"}, {"api_name": "exceptions.UnsupportedOperationException", "line_number": 40, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "12535830793", "text": "#!/usr/bin/env python\n\nimport requests\nimport sys\nimport string\nimport random\nimport json\nfrom urllib.parse import quote\n\nrequests.packages.urllib3.disable_warnings()\ndef list_urls(file_urls):\n with open(file_urls, 'r') as content_file:\n content = content_file.readlines()\n content = [x.strip() for x in content]\n return content\n\ndef random_string(length=8):\n\tchars = string.ascii_letters + string.digits\n\trandom_string = ''.join(random.choice(chars) for x in range(length))\n\treturn random_string\n\ndef create_session(base_url, session):\n\turl = '{0}/pcidss/report'.format(base_url)\n\n\tparams = {\n\t\t'type':'allprofiles',\n\t\t'sid':'loginchallengeresponse1requestbody',\n\t\t'username':'nsroot',\n\t\t'set':'1'\n\t}\n\n\theaders = {\n\t\t'Content-Type':'application/xml',\n\t\t'X-NITRO-USER':random_string(),\n\t\t'X-NITRO-PASS':random_string(),\n\t}\n\n\tdata = '<appfwprofile><login></login></appfwprofile>'\n\tsession.post(url=url, params=params, headers=headers, data=data, verify=False)\n\treturn session\n\ndef main(file_urls):\n\turls = list_urls(file_urls)\n\tfor url in urls:\n\t\ttry:\n\t\t\tsession = requests.Session()\n\t\t\tcreate_session(url, session)\n\t\t\tif len(session.cookies.get_dict()['SESSID']) > 1:\n\t\t\t\tprint(url + ' - Vulnerable')\n\t\texcept KeyError:\n\t\t\tprint(url + ' - Not Vulnerable')\n\t\texcept:\n\t\t\tprint(url + ' - URL Parse failed')\n\nif __name__ == '__main__':\n\tfile_urls = sys.argv[1]\n\tmain(file_urls)\n", "repo_name": "PR3R00T/CVE-2020-8193-Citrix-Scanner", "sub_path": "scanner.py", "file_name": "scanner.py", "file_ext": "py", "file_size_in_byte": 1426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.packages.urllib3.disable_warnings", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.packages", "line_number": 10, "usage_type": "attribute"}, {"api_name": "string.ascii_letters", "line_number": 18, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 18, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "74533980531", "text": "import re\nimport codecs\nimport pymysql\nimport sys\nimport datetime\nimport html\n\ndatasource_id = int(sys.argv[1])\npw = sys.argv[2]\nforge_id = 65\n\nif datasource_id and pw:\n # connect to db (once at local , and once at remote)\n db1 = pymysql.connect(host=\"grid6.cs.elon.edu\",\n user=\"megan\",\n passwd=pw,\n db=\"ossmole_merged\",\n use_unicode=True,\n charset=\"utf8\")\n \n cursor1 = db1.cursor()\n\n\n db2 = pymysql.connect(host=\"grid6.cs.elon.edu\",\n user=\"megan\",\n passwd=pw,\n db=\"irc\",\n use_unicode=True,\n charset=\"utf8\")\n \n cursor2 = db2.cursor()\n \n db3 = pymysql.connect(host=\"flossdata.syr.edu\",\n user=\"megan\",\n passwd=pw,\n db=\"irc\",\n use_unicode=True,\n charset=\"utf8\")\n cursor3 = db3.cursor()\n\n # get the file list from the 'comments' field in the datasources table \n cursor1.execute('SELECT datasource_id, comments \\\n FROM datasources \\\n WHERE datasource_id >= %s \\\n AND forge_id = %s',\n (datasource_id, forge_id))\n \n rows = cursor1.fetchall()\n\n for row in rows : \n ds = row[0]\n fileLoc = row[1]\n print (\"==================\\n\")\n # date is in the filename, in the format:\n # 51255/20150406\n datelog = \"\"\n formatting = re.search(\"^(.*?)\\/(.*?)$\",fileLoc)\n \n if formatting:\n tempdate = formatting.group(2);\n print(\"got \", tempdate, \" for date\")\n \n date = re.search(\"^(\\d\\d\\d\\d)(\\d\\d)(\\d\\d)$\",tempdate) \n \n if (date):\n datelog = date.group(1)+ \"-\" + date.group(2) + \"-\" + date.group(3)\n \n # open the file\n print(\"opening file: \" + fileLoc)\n \n log = codecs.open(fileLoc, 'r', encoding='utf-8', errors='ignore')\n line = log.read()\n line = line[2:]\n line = line[:-1]\n table = line\n \n # the perl6 data is in an html table\n # (there's a plaintext version but it only has mention & action, not system messages) \n regularLOG = re.search('<table id=\\\"log\\\"(.*?)<\\/table>',table)\n \n \n if (regularLOG):\n table = regularLOG.group(1)\n trs = table.split(\"</tr>\")\n \n line_num = 0\n for tr in trs:\n send_user = \"\"\n timelog = \"\"\n line_message = \"\"\n messageType = \"\"\n line_num += 1\n \n # here is the pattern for a system message:\n #<tr id=\"id_l2\" class=\"new special dark\">\n #<td class=\"time\" id=\"i_-799999\"><a href=\"/perl6/2005-02-26#i_-799999\">13:45</a></td>\n #<td style=\"color: 0\" class=\"nick\"></td>\n #<td class=\"msg ''\">ilogger starts logging <a href=\"/perl6/today\">#perl6</a> at Sat Feb 26 13:45:34 2005</td>\n #</tr>\n \n # here is the pattern for a regular message:\n #<tr id=\"id_l4\" class=\"new nick nick_feb\">\n #<td class=\"time\" id=\"i_-799997\"><a href=\"/perl6/2005-02-26#i_-799997\">13:46</a></td>\n #<td style=\"color: #04000e\" class=\"nick\">feb</td>\n #<td class=\"msg ''\">autrijus: you're welcome</td>\n #</tr>\n \n # here is the pattern for an action message:\n #<tr id=\"id_l15\" class=\"new nick nick_Odin- dark\">\n #<td class=\"time\" id=\"i_-799986\"><a href=\"/perl6/2005-02-26#i_-799986\">13:55</a></td>\n #<td style=\"color: #010002\" class=\"nick\">* Odin-</td>\n #<td class=\"msg act ''\">places a sane-o-meter on the channel, wondering if it'll score above zero.</td>\n #</tr>\n \n systemMessage = re.search(\"class\\=\\\"nick\\\"\\>\\<\\/td\\>\",tr)\n regMessage = re.search(\"\\<td class\\=\\\"msg \\&\",tr)\n actionMessage = re.search(\"\\<td class\\=\\\"msg act\",tr)\n regUsername = re.search(\"class=\\\"nick\\\">(.*?)<\\/td>\",tr)\n regTimelog = re.search('td class=\\\"time\\\"(.*?)\\>\\<(.*?)\\>(.*?)\\<\\/a\\>',tr)\n regLineMessage = re.search('td class=\\\"msg(.*?)\\>(.*?)<\\/td\\>',tr)\n \n # first case: system message (blank nick td)\n if (systemMessage):\n send_user = None\n messageType = \"system\"\n \n # second case: regular message\n elif(regMessage):\n messageType = \"message\"\n if (regUsername):\n send_user=regUsername.group(1)\n \n # third case: action message\n elif(actionMessage):\n messageType = \"action\"\n if (regUsername):\n send_user=regUsername.group(1)[9:]\n \n # grab timelog: \n # <td class=\"time\" id=\"i_-799986\"><a href=\"/perl6/2005-02-26#i_-799986\">13:55</a></td>\n if (regTimelog):\n timelog = regTimelog.group(3)\n \n # grab message\n # <td class=\"msg act ''\">places a sane-o-meter on the channel, wondering if it'll score above zero.</td>\n if (regLineMessage):\n line_message = regLineMessage.group(2)\n # clean up html\n line_message = html.unescape(line_message)\n \n insertQuery=\"INSERT IGNORE INTO perl6_irc \\\n (datasource_id,line_num,\\\n line_message,\\\n date_of_entry,\\\n time_of_entry,\\\n type,\\\n send_user,\\\n last_updated)\\\n VALUES(%s,%s,%s,%s,%s,%s,%s,%s)\"\n \n dataValues=(ds,line_num,line_message,datelog,timelog,messageType,send_user,datetime.datetime.now()) \n \n if messageType != \"\":\n try:\n cursor2.execute(insertQuery,dataValues)\n db2.commit()\n except pymysql.Error as error:\n print(error)\n db2.rollback()\n try:\n cursor3.execute(insertQuery,dataValues)\n except pymysql.Error as error:\n print(error)\n db3.rollback() \n \n cursor1.close() \n cursor2.close()\n cursor3.close()\n db1.close()\n db2.close()\n db3.close()\n print(\"done\")\n\nelse:\n\tprint (\"You need both a datasource_id and a date to start on your commandline.\")\n", "repo_name": "FLOSSmole/irc", "sub_path": "2parsePerl6IRCLogs.py", "file_name": "2parsePerl6IRCLogs.py", "file_ext": "py", "file_size_in_byte": 7374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 33, "usage_type": "call"}, {"api_name": "re.search", "line_number": 57, "usage_type": "call"}, {"api_name": "re.search", "line_number": 63, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 71, "usage_type": "call"}, {"api_name": "re.search", "line_number": 79, "usage_type": "call"}, {"api_name": "re.search", "line_number": 115, "usage_type": "call"}, {"api_name": "re.search", "line_number": 116, "usage_type": "call"}, {"api_name": "re.search", "line_number": 117, "usage_type": "call"}, {"api_name": "re.search", "line_number": 118, "usage_type": "call"}, {"api_name": "re.search", "line_number": 119, "usage_type": "call"}, {"api_name": "re.search", "line_number": 120, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 161, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pymysql.Error", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pymysql.Error", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "13683419765", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom copy import copy\nimport datetime\n\nfrom google.appengine.ext import ndb\n\nfrom backend.parcelamento.model import Parcelamento\nfrom backend.transacao.model import Transacao\n\n\ndef crie_parcelamento(tipo_parcelamento, qt_parcelas, confirmacao_automatica):\n parcelamento = Parcelamento(tipo_parcelamento=tipo_parcelamento, qt_parcelas=qt_parcelas)\n\n if confirmacao_automatica:\n parcelamento.confirmacao_automatica = confirmacao_automatica\n parcelamento.put()\n parcelamento = parcelamento.to_dict()\n return ndb.Key(Parcelamento, int(parcelamento['id']))\n\n\ndef atualiza_parcelas(transacao, tipo_parcelamento, qt_parcelas, confirmacao_automatica):\n parcelamento = Parcelamento.get_by_id(transacao.parcelamento.id())\n if qt_parcelas:\n parcelamento.qt_parcelas = qt_parcelas\n if tipo_parcelamento:\n parcelamento.tipo_parcelamento = tipo_parcelamento\n if confirmacao_automatica:\n parcelamento.confirmacao_automatica = confirmacao_automatica\n parcelamento.put()\n\n query = Transacao.query(Transacao.parcelamento == parcelamento.key, Transacao.data > transacao.data)\n transacoes = query.fetch(keys_only=True)\n for transacao_excluir in transacoes:\n transacao_excluir.delete()\n\n qt_parcelas = parcelamento.qt_parcelas\n if parcelamento.tipo_parcelamento == 'I':\n qt_parcelas = 12\n\n parcela_inicial = transacao.nu_parcela\n if not parcela_inicial:\n parcela_inicial = 0\n\n for nu_parcela in range(parcela_inicial + 1, qt_parcelas):\n nova_transacao = copy(transacao)\n nova_transacao.confirmada = False\n nova_transacao.nu_parcela = nu_parcela\n nova_transacao.key = None\n qt_dias = (nu_parcela - parcela_inicial) * 30\n nova_transacao.data = transacao.data + datetime.timedelta(days=qt_dias)\n nova_transacao.put()\n\n\ndef apagar(objeto_id):\n chave = ndb.Key(Parcelamento, int(objeto_id))\n chave.delete()\n", "repo_name": "cidacio/ganheValor", "sub_path": "src/backend/parcelamento/parcelamento_negocio.py", "file_name": "parcelamento_negocio.py", "file_ext": "py", "file_size_in_byte": 2020, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "backend.parcelamento.model.Parcelamento", "line_number": 13, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 19, "usage_type": "call"}, {"api_name": "backend.parcelamento.model.Parcelamento", "line_number": 19, "usage_type": "argument"}, {"api_name": "google.appengine.ext.ndb", "line_number": 19, "usage_type": "name"}, {"api_name": "backend.parcelamento.model.Parcelamento.get_by_id", "line_number": 23, "usage_type": "call"}, {"api_name": "backend.parcelamento.model.Parcelamento", "line_number": 23, "usage_type": "name"}, {"api_name": "backend.transacao.model.Transacao.query", "line_number": 32, "usage_type": "call"}, {"api_name": "backend.transacao.model.Transacao", "line_number": 32, "usage_type": "name"}, {"api_name": "backend.transacao.model.Transacao.parcelamento", "line_number": 32, "usage_type": "attribute"}, {"api_name": "backend.transacao.model.Transacao.data", "line_number": 32, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 51, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 56, "usage_type": "call"}, {"api_name": "backend.parcelamento.model.Parcelamento", "line_number": 56, "usage_type": "argument"}, {"api_name": "google.appengine.ext.ndb", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "6786574582", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nimport nibabel as nib\nimport os\nfrom sklearn.decomposition import PCA\nimport math\nfrom skimage import io\nimport plotly.plotly as py\nfrom plotly.grid_objs import Grid, Column\nimport time\n#get_ipython().run_line_magic('matplotlib', 'notebook')\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.ndimage.interpolation import rotate as rot\nimport glob\nfrom scipy import ndimage\nfrom scipy.ndimage.interpolation import zoom\nimport pickle\n\n\n\n# In[2]:\n\n\ndef read_mutant_txt(path):\n name_list = []\n fo = open(path)\n for line in fo:\n striped_line = line.strip('\\n')\n if striped_line != '':\n name_list.append(striped_line)\n return name_list\n\ndef l2norm(a,b):\n return np.sqrt(a**2+b**2)\n\n\ndef align_img(bv_label):\n x,y,z = np.where(bv_label==1)\n points = np.vstack((x,y,z))\n points = points.T\n #print(points.shape)\n pca = PCA(n_components = 3)\n pca.fit(points)\n pc1 = pca.components_[0,:]\n #print(pc1)\n pc2 = pca.components_[1,:]\n if pc1[0] >=0 and pc1[1] >=0:\n azimuth = -np.arctan(pc1[1]/pc1[0])*(360/(2*np.pi))\n #print('case1')\n# clockwise = 1\n\n elif pc1[0] <=0 and pc1[1] <=0:\n azimuth = (180 - np.arctan(pc1[1]/pc1[0])*(360/(2*np.pi)))\n # print('case2')\n # anticlockwise = 1\n\n elif pc1[0]>=0 and pc1[1]<=0:\n azimuth = np.arctan(np.abs(pc1[1])/np.abs(pc1[0]))*(360/(2*np.pi))\n # print('case3')\n # anticlockwise = 1\n else:\n azimuth = -(90 + np.arctan(np.abs(pc1[0])/np.abs(pc1[1]))*(360/(2*np.pi)))\n # print('case4')\n # clockwise = 1\n\n # finding the elevation angle\n if pc1[2]>=0:\n elevation = -np.arctan(pc1[2]/l2norm(pc1[0],pc1[1]))*(360/(2*np.pi))\n # print('case1')\n else:\n elevation = np.arctan(np.abs(pc1[2])/l2norm(pc1[0],pc1[1]))*(360/(2*np.pi))\n # print('case2')\n \n bv_label_copy = bv_label.copy()\n label_rot1 = rot(bv_label_copy,angle=azimuth,axes=(0,1))\n label_rot = rot(label_rot1,angle=elevation,axes=(0,2))\n \n xr,yr,zr = np.where(label_rot==1)\n points_rot = np.vstack((xr,yr,zr))\n points_rot = points_rot.T\n #print(points_rot.shape)\n pca_rot = PCA(n_components = 3)\n pca_rot.fit(points_rot)\n pc1r = pca_rot.components_[0,:]\n pc2r = pca_rot.components_[1,:]\n \n angle3 = np.arctan(pc2r[2]/pc2r[1])*(360/(2*np.pi))\n \n label_rot = rot(label_rot,angle=-angle3,axes=(1,2))\n \n return azimuth, elevation, angle3, label_rot\n\ndef save_img(img, label, count):\n img_nft = nib.Nifti1Image(img,np.eye(4))\n img_save_data_path = './resize_img/img{}.nii'.format(count)\n nib.save(img_nft,img_save_data_path)\n \n img_nft = nib.Nifti1Image(label,np.eye(4))\n img_save_data_path = './resize_img/label{}.nii'.format(count)\n nib.save(img_nft,img_save_data_path)\n \n\n\n# In[3]:\n\n\ndata_base_path = '/scratch/zq415/grammar_cor/Localization/data'\ndata_folder_list = ['20180419_newdata_nii_with_filtered', 'new_data_20180522_nii']\ndata_folder_list2 = 'fix_organized_data_nii'\n\nall_BVs = []\nfor cur_floder in data_folder_list:\n cur_folder_path = os.path.join(data_base_path,cur_floder)\n all_BVs += glob.glob(cur_folder_path+'/*/*/*[Bb][Vv]*')\nprint(len(all_BVs))\n\ncur_folder_path = os.path.join(data_base_path,data_folder_list2)\nall_BVs += glob.glob(cur_folder_path+'/*[Bb][Vv]*')\nprint(len(all_BVs))\n\nall_data_list = []\nsame_name_num = 0\nfor full_bv_path in all_BVs:\n if 'BV' in full_bv_path:\n all_data_list.append((full_bv_path[:-14] + '.nii', full_bv_path))\n else:\n all_data_list.append((full_bv_path[:-9]+ '_2' + '.nii', full_bv_path))\n\n\n# In[4]:\n\n\nmutant_names = read_mutant_txt('mutant_imgs.txt')\n\n\n# In[5]:\n\n\ncount = 0\nmutant_label = {}\n\nfor i,img_path in enumerate(all_data_list):\n img = nib.load(img_path[0])\n img = np.float32(img.get_data())\n \n img_label = nib.load(img_path[1])\n img_label = np.uint8(img_label.get_data())\n img_label[img_label>0] = 1\n \n print(np.shape(img),np.shape(img_label))\n \n azimuth, elevation, angle3, label_rot = align_img(img_label)\n \n img_rot1 = rot(img,angle=azimuth,axes=(0,1))\n img_rot2 = rot(img_rot1,angle=elevation,axes=(0,2))\n img_rot = rot(img_rot2,angle=-angle3,axes=(1,2))\n \n x_slice,y_slice,z_slice = ndimage.find_objects(label_rot)[0]\n print(count, 'bv size: ', x_slice.stop-x_slice.start, y_slice.stop-y_slice.start, z_slice.stop-z_slice.start)\n \n img_slice = img_rot[x_slice.start:x_slice.stop, y_slice.start:y_slice.stop, z_slice.start:z_slice.stop]\n label_slice = label_rot[x_slice.start:x_slice.stop, y_slice.start:y_slice.stop, z_slice.start:z_slice.stop]\n \n x, y, z = np.shape(label_slice)\n label_resized = zoom(label_slice, (112.0/x, 64.0/y, 64.0/z))\n label_resized[label_resized>=0.5] = 1\n label_resized[label_resized<0.5] = 0\n \n img_resized = zoom(img_slice, (112.0/x, 64.0/y, 64.0/z))\n \n save_img(img_resized, label_resized, count)\n count += 1\n \n if 'BV' in img_path[1]:\n bv_base_name = os.path.basename(img_path[0])[:-4]\n else:\n bv_base_name = os.path.basename(img_path[0])[:-6]\n \n if bv_base_name in mutant_names:\n mutant_label[i] = (i, 0, bv_base_name, label_resized, img_resized, img_path[1])\n else:\n mutant_label[i] = (i, 1, bv_base_name, label_resized, img_resized, img_path[1])\n \n\n\n# In[6]:\n\n\nsave_name = 'All_data_112_64_64.pickle'\nsave_file = open(os.path.join(os.getcwd(),'data',save_name),'wb')\npickle.dump(mutant_label,save_file)\nsave_file.close()\n\n\n", "repo_name": "ziming-qiu/mutant_cla", "sub_path": "Rotation_resize.py", "file_name": "Rotation_resize.py", "file_ext": "py", "file_size_in_byte": 5649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.sqrt", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 78, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 82, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 94, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 96, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 101, "usage_type": "call"}, {"api_name": "nibabel.save", "line_number": 103, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 105, "usage_type": "call"}, {"api_name": "nibabel.save", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 125, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 151, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 157, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 161, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 162, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 163, "usage_type": "call"}, {"api_name": "scipy.ndimage.find_objects", "line_number": 165, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 165, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 171, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.zoom", "line_number": 172, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.zoom", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 197, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 198, "usage_type": "call"}]} +{"seq_id": "33053162880", "text": "import pygame # 2 D Graphics para criar jogos\r\nimport os\r\npygame.font.init() # Definir a fonte que será usada\r\npygame.mixer.init() # Abre biblioteca de som\r\n\r\n#Surface - Basicamente a janela\r\nWIDTH,HEIGHT = 900,500\r\n\r\nWIN = pygame.display.set_mode((WIDTH, HEIGHT)) # Valores Constantes devem ser escritos em MAIUSCULO\r\npygame.display.set_caption(\"First Game in PYGAME!\") # Titulo da aba\r\n\r\nWHITE = (255,255,255)\r\nBLACK = (0,0,0)\r\nRED = (255,0,0)\r\nYELLOW = (255,255,0)\r\n\r\nBORDER = pygame.Rect(WIDTH//2 - 5,0,10,HEIGHT) # Posicão no meio, la em cima, grossura e altura\r\n\r\nBULLET_HIT_SOUND = pygame.mixer.Sound(os.path.join(\"Assets\", \"Grenade+1.mp3\"))\r\nBULLET_FIRE_SOUND = pygame.mixer.Sound(os.path.join(\"Assets\", \"Gun+Silencer.mp3\"))\r\n\r\nHEALTH_FONT = pygame.font.SysFont(\"comicsans\", 40)\r\nWINNER_FONT = pygame.font.SysFont(\"comicsans\", 100)\r\n\r\nFPS = 60\r\nVEL = 5\r\nBULLET_VEL = 7\r\nMAX_BULLETS = 3\r\nSPACESHIP_WIDTH, SPACESHIP_HEIGHT = 55, 40\r\n\r\nYELLOW_HIT = pygame.USEREVENT + 1 # Numero do evento, para eventos separados e novos necessario\r\nRED_HIT = pygame.USEREVENT + 2 # Se fossem os dois + 1, estariam no mesmo valor e seriam o mesmo evento\r\n\r\nYELLOW_SPACESHIP_IMAGE = pygame.image.load(os.path.join(\"Assets\",\"spaceship_yellow.png\"))\r\nYELLOW_SPACESHIP = pygame.transform.rotate(pygame.transform.scale(YELLOW_SPACESHIP_IMAGE,(SPACESHIP_WIDTH, SPACESHIP_HEIGHT)), 90) # Resize\r\nRED_SPACESHIP_IMAGE = pygame.image.load(os.path.join(\"Assets\",\"spaceship_red.png\"))\r\nRED_SPACESHIP = pygame.transform.rotate(pygame.transform.scale(RED_SPACESHIP_IMAGE,(SPACESHIP_WIDTH, SPACESHIP_HEIGHT)), 270)\r\n\r\nSPACE = pygame.transform.scale(pygame.image.load(os.path.join(\"Assets\", \"space.png\")),(WIDTH,HEIGHT))\r\n\r\ndef draw_window(red, yellow, red_bullets, yellow_bullets, red_health, yellow_health):\r\n WIN.blit(SPACE,(0,0)) # Cor para toda a tela, em RGB, precisa dar UPDATE (Fill)\r\n pygame.draw.rect(WIN, BLACK, BORDER)\r\n\r\n red_health_text = HEALTH_FONT.render(f\"Health: {red_health}\", 1, WHITE)#Coloca a font, o texto, sempre 1, e a cor\r\n yellow_health_text = HEALTH_FONT.render(f\"Health: {yellow_health}\", 1, WHITE)\r\n WIN.blit(red_health_text, (WIDTH - red_health_text.get_width() -10, 10))\r\n WIN.blit(yellow_health_text, (10,10))\r\n\r\n WIN.blit(YELLOW_SPACESHIP, (yellow.x,yellow.y))#Usar blit quando quer colocar um surface sobre a tela (Texto ou imagens)\r\n WIN.blit(RED_SPACESHIP, (red.x,red.y))\r\n\r\n for bullet in red_bullets:\r\n pygame.draw.rect(WIN, RED, bullet)\r\n for bullet in yellow_bullets:\r\n pygame.draw.rect(WIN, YELLOW, bullet)\r\n # O topo da esquerda é o (0,0)\r\n pygame.display.update()\r\n\r\ndef yellow_handle_movement(keys_pressed, yellow):\r\n if keys_pressed[pygame.K_a] and yellow.x - VEL > 0: # Left # Não ultrapassar a tela\r\n yellow.x -= VEL\r\n if keys_pressed[pygame.K_d] and yellow.x + VEL + yellow.width < BORDER.x: # Right\r\n yellow.x += VEL\r\n if keys_pressed[pygame.K_w] and yellow.y - VEL > 0: # Up\r\n yellow.y -= VEL\r\n if keys_pressed[pygame.K_s] and yellow.y + VEL + yellow.height < HEIGHT - 20: # Down\r\n yellow.y += VEL\r\n\r\ndef red_handle_movement(keys_pressed, red):\r\n if keys_pressed[pygame.K_LEFT] and red.x - VEL > BORDER.x + BORDER.width: # Left\r\n red.x -= VEL\r\n if keys_pressed[pygame.K_RIGHT] and red.x + VEL + red.width < WIDTH: # Right\r\n red.x += VEL\r\n if keys_pressed[pygame.K_UP] and red.y - VEL > 0: # Up\r\n red.y -= VEL\r\n if keys_pressed[pygame.K_DOWN] and red.y + VEL + red.height < HEIGHT - 20: # Down\r\n red.y += VEL\r\n\r\ndef handle_bullets(yellow_bullets, red_bullets, yellow, red): # Mover as balas, ver a colisão e excluir quando sai da tela ou toca\r\n for bullet in yellow_bullets:\r\n bullet.x += BULLET_VEL\r\n if red.colliderect(bullet): # O Yellow colidiu em algum momento com um retangulo\r\n pygame.event.post(pygame.event.Event(RED_HIT))\r\n yellow_bullets.remove(bullet) # Enviar um evento de dentro da função como aviso, ja que não tem como acessar isso\r\n if bullet.x > WIDTH:\r\n yellow_bullets.remove(bullet)\r\n for bullet in red_bullets:\r\n bullet.x -= BULLET_VEL\r\n if yellow.colliderect(bullet): \r\n pygame.event.post(pygame.event.Event(YELLOW_HIT))\r\n red_bullets.remove(bullet)\r\n if bullet.x < 0:\r\n red_bullets.remove(bullet)\r\n\r\ndef draw_winner(text):\r\n draw_text = WINNER_FONT.render(text,1, WHITE)\r\n WIN.blit(draw_text,(WIDTH/2 - draw_text.get_width()/2, HEIGHT/2 - draw_text.get_height()/2))\r\n pygame.display.update()\r\n pygame.time.delay(5000)\r\n\r\ndef main():\r\n yellow = pygame.Rect(100,300, SPACESHIP_WIDTH, SPACESHIP_HEIGHT) # Dado que estamos atualizando a posição do X, e sempre esta tendo um update\r\n red = pygame.Rect(700,300, SPACESHIP_WIDTH, SPACESHIP_HEIGHT) # Basicamente serve para alterar a posição, ja que vai desenha por parâmetro\r\n\r\n yellow_bullets = []\r\n red_bullets = []\r\n\r\n yellow_health = 10\r\n red_health = 10\r\n\r\n clock = pygame.time.Clock()\r\n run = True\r\n while run:\r\n clock.tick(FPS) # Controla a velocidade do loop do while\r\n for event in pygame.event.get(): # Checar cado um dos eventos que esta acontecendo no Pygame\r\n if event.type == pygame.QUIT:\r\n run = False\r\n pygame.quit()\r\n\r\n if event.type == pygame.KEYDOWN: # Ao invés de ver se esta pressionado, ver se foi clickado uma vez, para não spammar\r\n if event.key == pygame.K_LCTRL and len(yellow_bullets) < MAX_BULLETS:\r\n bullet = pygame.Rect(yellow.x + yellow.width, yellow.y + yellow.height//2 -2, 10, 5) # O tiro sair do meio e ponta do boneco\r\n yellow_bullets.append(bullet)\r\n BULLET_FIRE_SOUND.play()\r\n \r\n if event.key == pygame.K_RCTRL and len(red_bullets) < MAX_BULLETS:\r\n bullet = pygame.Rect(red.x, red.y + red.height//2 -2, 10, 5) # O tiro sair do meio e ponta do boneco\r\n red_bullets.append(bullet)\r\n BULLET_FIRE_SOUND.play()\r\n\r\n winner_text = \"\"\r\n if event.type == RED_HIT:\r\n red_health -= 1\r\n BULLET_HIT_SOUND.play()\r\n\r\n\r\n if event.type == YELLOW_HIT:\r\n yellow_health -= 1\r\n BULLET_HIT_SOUND.play()\r\n\r\n if red_health <= 0:\r\n winner_text = \"Yellow Wins!\"\r\n if yellow_health <= 0:\r\n winner_text = \"Red Wins!\"\r\n if winner_text != \"\":\r\n draw_winner(winner_text)\r\n break\r\n\r\n keys_pressed = pygame.key.get_pressed() # Enquanto essa linha esta ocorrendo, o pygame vai ficar de olho no teclado, e retornar a lista de clicks\r\n yellow_handle_movement(keys_pressed, yellow)\r\n red_handle_movement(keys_pressed, red)\r\n\r\n handle_bullets(yellow_bullets, red_bullets, yellow, red) # Ver se qualquer uma das balas colidem com um dos personagens\r\n\r\n draw_window(red, yellow, red_bullets, yellow_bullets, red_health, yellow_health)\r\n main()\r\n\r\nif __name__ == \"__main__\": # Garante que esta função \"main\" ira rodar apenas se este arquivo for a base, não tem como executar por outro lugar\r\n main() # Não pode importar de qualquer lugar", "repo_name": "seven-renato/first-game-pygame", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7358, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pygame.font.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 3, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.USEREVENT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.USEREVENT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.event.post", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.event.Event", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.event.post", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.event.Event", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.K_LCTRL", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 123, "usage_type": "call"}, {"api_name": "pygame.K_RCTRL", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "74447051571", "text": "import pylustrator\npylustrator.start()\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy\n\ndatasets = [\"mnist\", \"fashion_mnist\", \"cifar10\", \"letter_mnist\"]\n\nexperiment = \"no_bias_gaussian_weights_128\"\n\ndecays = [15, 30, 45, 60, 75, 90, 180, 285, 390, 480, 600] # last one (600) is for placing the binned uniform result\n\n\nfor dataset in datasets:\n df_const = pd.read_csv(f\"./results/{dataset}/{experiment}/metrics_constant.csv\")\n df_binned = pd.read_csv(f\"./results/{dataset}/{experiment}/metrics_distributions.csv\")\n\n if dataset == \"mnist\":\n dataset_name = \" MNIST\"\n if dataset == \"mnist_with_brightness_offset\":\n dataset_name = \"MNIST adj.\"\n if dataset == \"fashion_mnist\":\n dataset_name = \"Fashion-MNIST\"\n if dataset == \"fashion_mnist_with_brightness_offset\":\n dataset_name = \"Fashion-MNIST adj.\"\n if dataset == \"cifar10\":\n dataset_name = \" CIFAR-10\"\n if dataset == \"letter_mnist\":\n dataset_name = \"EMNIST/Letters\"\n if dataset == \"letter_mnist_with_brightness_offset\":\n dataset_name = \"EMNIST/Letters adj.\"\n\n plt_idx = 1\n for metric in [\"acc\", \"F1\", \"AUC\"]:\n plt.subplot(1, 3, plt_idx)\n plt_idx += 1\n\n means = list(deepcopy(df_const[f\"mean_{metric}\"]))\n stds = list(deepcopy(df_const[f\"std_{metric}\"]))\n\n means.append(deepcopy(df_binned[f\"mean_{metric}\"]))\n stds.append(deepcopy(df_binned[f\"std_{metric}\"]))\n\n means, stds, decays = np.asarray(means), np.asarray(stds), np.asarray(decays)\n\n plt.scatter(decays, means)\n plt.grid(True)\n for i in range(len(decays)):\n plt.errorbar(decays[i], means[i], yerr=stds[i], fmt=\"o\", c='#1f77b4')\n\n ylim = plt.gca().get_ylim()\n plt.xlabel(\"decay time (ms)\")\n if experiment == \"no_bias_gaussian_weights_128_decay120\":\n plt.plot([150, 150], [ylim[0], ylim[1]], color='gray', linestyle='dashed')\n plt.xticks(decays, ['15', '', '43', '', '71', '', '99', '', '120', 'binned'])\n else:\n plt.plot([540, 540], [ylim[0], ylim[1]], color='gray', linestyle='dashed')\n plt.xticks(decays, ['15', '', '', '', '', '90', '180', '285', '390', '480', 'binned'])\n\n #% start: automatic generated code from pylustrator\n plt.figure(1).ax_dict = {ax.get_label(): ax for ax in plt.figure(1).axes}\n import matplotlib as mpl\n plt.figure(1).set_size_inches(21.000000/2.54, 6.950000/2.54, forward=True)\n plt.figure(1).axes[0].set_position([0.069112, 0.158634, 0.262884, 0.692059])\n plt.figure(1).axes[1].set_position([0.390110, 0.158631, 0.262884, 0.692059])\n plt.figure(1).axes[2].set_position([0.721261, 0.158631, 0.262884, 0.692059])\n plt.figure(1).text(0.5, 0.5, f'{dataset_name}', transform=plt.figure(1).transFigure, weight='bold') # id=plt.figure(1).texts[0].new\n plt.figure(1).texts[0].set_position([0.445177, 0.927288])\n plt.figure(1).text(0.5, 0.5, 'accuracy', transform=plt.figure(1).transFigure) # id=plt.figure(1).texts[1].new\n plt.figure(1).texts[1].set_position([0.166822, 0.860950])\n plt.figure(1).text(0.5, 0.5, 'macro f1', transform=plt.figure(1).transFigure) # id=plt.figure(1).texts[2].new\n plt.figure(1).texts[2].set_position([0.485215, 0.860950])\n plt.figure(1).text(0.5, 0.5, 'AUC', transform=plt.figure(1).transFigure) # id=plt.figure(1).texts[3].new\n plt.figure(1).texts[3].set_position([0.844598, 0.860950])\n plt.figure(1).text(0.5, 0.5, 'a', transform=plt.figure(1).transFigure, weight='bold') # id=plt.figure(1).texts[4].new\n plt.figure(1).texts[4].set_position([0.032888, 0.875371])\n plt.figure(1).text(0.5, 0.5, 'b', transform=plt.figure(1).transFigure, weight='bold') # id=plt.figure(1).texts[5].new\n plt.figure(1).texts[5].set_position([0.352234, 0.875371])\n plt.figure(1).text(0.5, 0.5, 'c', transform=plt.figure(1).transFigure, weight='bold') # id=plt.figure(1).texts[6].new\n plt.figure(1).texts[6].set_position([0.681112, 0.875371])\n #% end: automatic generated code from pylustrator\n #plt.show()\n\n if not os.path.exists(\"./results/figures\"):\n os.makedirs(\"./results/figures\")\n\n plt.savefig(f\"./results/figures/{dataset}_performance_metrics.pdf\")\n plt.close(\"all\")\n", "repo_name": "andistoll/coincidence_detection_and_integration_behavior_in_SNNs", "sub_path": "plots/plot_performance_metrics.py", "file_name": "plot_performance_metrics.py", "file_ext": "py", "file_size_in_byte": 4305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pylustrator.start", "line_number": 2, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 41, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 42, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 44, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "32345254416", "text": "import nltk\nimport codecs\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef collect_parsed_categories(parsed_df, category=\"RESULT\"):\n \"\"\"Post-processing the result of parsing the abstracts through Prabhakaran's method;\n collecting out the extracted sentences that belong to the \"results\" into a dictionary\n with keys as the distinct PMIDs and values as the extracted findings of them\n \"\"\"\n\n cats = [\"BACKGROUND\", \"OBJECTIVE\", \"METHOD\", \"RESULT\", \"CONCLUSION\"]\n\n assert category in cats+['ALL'], \"Input is not among the available categories.\"\n \n results = {**{'paperid': []}, **{x: [] for x in cats}}\n tqdm_list = tqdm(range(len(parsed_df)), position=0, leave=True)\n for i in tqdm_list:\n row = parsed_df.iloc[i]\n rtype = row[0]\n\n if rtype == \"ABSTRACT\":\n if i > 0:\n # insert the results into the main dictionary\n results['paperid'] += [paperid]\n for cat in cats:\n results[cat] += [entry_dict[cat].strip()]\n paperid = row[1]\n entry_dict = {x:'' for x in cats}\n elif rtype not in cats:\n # continue if the row-type is not among the possible ones\n # For instance, there might be 'O'\n continue\n\n else:\n # if category is 'ALL' consider all the statements, and\n # enter them into the place in the result dictionary\n if category == \"ALL\":\n entry_dict[rtype] += \" \" + row[1]\n elif rtype == category:\n entry_dict[rtype] += \" \" + row[1]\n\n if category != \"ALL\":\n for cat in set(cats) - {category}:\n del results[cat]\n\n return results\n\n\ndef measure_uncertainty_df_abstracts(\n df, sub_unc, block_size, text_column=\"abstract\", save_path=None\n):\n \"\"\"Measuring subjective uncertainty in the abstracts/findings saved within a dataframe\n\n The input dataframe should have at least columns 'abstract' and 'pmid'. The\n input `block_size` determines the number of rows to be considered altogether\n as the input batch when running the uncertainty model.\n \"\"\"\n\n lb = 0\n pmids_lst = []\n unc_lst = []\n tqdm_list = tqdm(range(len(df)), position=0, leave=True)\n\n agg = []\n ignored_pids = []\n for lb in range(0, df.shape[0], block_size):\n block = df.iloc[lb : lb + block_size].copy()\n block = block[\n (block[text_column].apply(lambda x: (x != \"\")))\n & (block[text_column].notnull())\n ].copy()\n abst = block[text_column]\n pids = block[\"paperid\"]\n\n sents = [[(j, y) for j, y in enumerate(nltk.sent_tokenize(x))] for x in abst]\n flat_list = [\n (pid, j, y) for pid, sublist in zip(pids, sents) for j, y in sublist\n ]\n\n # removing long sentences that are untokenized for any reason\n thr = 1500\n ignored_pids += [x[0] for x in flat_list if len(x[2])>=thr]\n flat_list = [x for x in flat_list if len(x[2])<thr]\n \n enu_flat_list = pd.DataFrame(\n flat_list,\n columns=[\"paperid\", \"iphrase\", \"sentence\"],\n )\n unc = sub_unc.estimator(enu_flat_list[\"sentence\"].to_list())\n dfa = pd.concat([enu_flat_list, unc], axis=1)\n\n agg += [dfa]\n tqdm_list.update(block.shape[0])\n\n if save_path is not None:\n header = True if lb==0 else False\n dfa.to_csv(save_path, mode='a', sep='\\t', header=header, index=False)\n\n if (len(ignored_pids)>0) and (save_path is not None):\n ignored_pids_path = save_path.split('.')[0]+'_ignored_pids.txt'\n np.savetxt(ignored_pids_path, ignored_pids, fmt='%d')\n \n \n dft = pd.concat(agg)\n return dft\n\n\ndef eval_zero_shot_biocertainty(sub_unc):\n\n TRAIN_FILE = \"Complete_statements_training_set__ML_model.csv\"\n\n stopwords = nltk.corpus.stopwords.words(\"english\")\n\n texts = [] # list of text samples\n labels_index = {} # dictionary mapping label name to numeric id\n labels = [] # list of label ids\n fin = codecs.open(TRAIN_FILE, \"r\", encoding=\"utf8\")\n for line in fin:\n sent, certain = line.strip().split(\"\\t\")\n # sent = [x for x in nltk.word_tokenize(sent) if x not in stopwords]\n # texts.append(' '.join(sent))\n texts.append(sent)\n labels.append(certain)\n\n P = sub_unc.estimator(texts)\n\n intervals = np.linspace(0, 1, len(np.unique(labels)))\n inferred_labels = [\n np.min([i for i in range(len(intervals)) if p < intervals[i]]) for p in P\n ]\n\n return texts, labels, inferred_labels\n\n\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\n \"\"\"Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\n The Savitzky-Golay filter removes high frequency noise from data.\n It has the advantage of preserving the original shape and\n features of the signal better than other types of filtering\n approaches, such as moving averages techniques.\n Parameters\n ----------\n y : array_like, shape (N,)\n the values of the time history of the signal.\n window_size : int\n the length of the window. Must be an odd integer number.\n order : int\n the order of the polynomial used in the filtering.\n Must be less then `window_size` - 1.\n deriv: int\n the order of the derivative to compute (default = 0 means only smoothing)\n Returns\n -------\n ys : ndarray, shape (N)\n the smoothed signal (or it's n-th derivative).\n Notes\n -----\n The Savitzky-Golay is a type of low-pass filter, particularly\n suited for smoothing noisy data. The main idea behind this\n approach is to make for each point a least-square fit with a\n polynomial of high order over a odd-sized window centered at\n the point.\n Examples\n --------\n t = np.linspace(-4, 4, 500)\n y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)\n ysg = savitzky_golay(y, window_size=31, order=4)\n import matplotlib.pyplot as plt\n plt.plot(t, y, label='Noisy signal')\n plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')\n plt.plot(t, ysg, 'r', label='Filtered signal')\n plt.legend()\n plt.show()\n References\n ----------\n .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\n Data by Simplified Least Squares Procedures. Analytical\n Chemistry, 1964, 36 (8), pp 1627-1639.\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\n W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery\n Cambridge University Press ISBN-13: 9780521880688\n \"\"\"\n import numpy as np\n from math import factorial\n \n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n # precompute coefficients\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\n # pad the signal at the extremes with\n # values taken from the signal itself\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m[::-1], y, mode='valid')\n", "repo_name": "jsourati/subjective-uncertainty", "sub_path": "subj_unc/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 7874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "tqdm.tqdm", "line_number": 19, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 65, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 107, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 115, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 115, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.mat", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 202, "usage_type": "attribute"}, {"api_name": "math.factorial", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "3206388934", "text": "# https://pypi.org/project/audioread/\n\nimport audioread\n\ndef load_file(filename):\n with audioread.audio_open(filename) as f:\n print(f.channels, f.samplerate, f.duration)\n return f\n\nf = load_file(\"../test.mp3\")", "repo_name": "LucasTakanori/DA_programV2", "sub_path": "src/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 226, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "audioread.audio_open", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "19108420865", "text": "import numpy as np\nimport torch\n\ndef rand_bbox(size, lam):\n W = size[2]\n H = size[3]\n cut_rat = np.sqrt(1. - lam)\n cut_w = np.int(W * cut_rat)\n cut_h = np.int(H * cut_rat)\n\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n return bbx1, bby1, bbx2, bby2\n\ndef cutout_data(data, alpha):\n bbx1, bby1, bbx2, bby2 = rand_bbox(data.size(), alpha)\n new_data = data.clone()\n new_data[:, :, bby1:bby2, bbx1:bbx2] = torch.zeros_like(data)[:, :, bby1:bby2, bbx1:bbx2]\n # adjust lambda to exactly match pixel ratio\n\n return new_data\n\n\n", "repo_name": "Evergreen0929/Mid-Term-Project", "sub_path": "pytorch-cifar-models/cutout.py", "file_name": "cutout.py", "file_ext": "py", "file_size_in_byte": 750, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.sqrt", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "4480908244", "text": "import numpy as np\r\nfrom scipy.stats import norm\r\nfrom random import randint, uniform\r\nimport pandas as pd\r\n\r\n# Parameters\r\nnumberOfSamples = 100000\r\nS = []\r\nK = []\r\nt = []\r\nr = []\r\nsigma = []\r\nbS = []\r\n\r\n# continuously compounded dividend yield (q) = 0\r\n\r\ndef blackScholesEquation(S, K, t, r, sigma, q=0):\r\n\r\n d1 = (np.log(S / K) + t * (r - q + (sigma**2 / 2)))/(sigma * np.sqrt(t))\r\n d2 = d1 - sigma*np.sqrt(t)\r\n\r\n # Black-Scholes Equation\r\n # For increased efficiency, given q=0, e^-qt = 1\r\n callPrice = S * norm.cdf(d1) - K * np.exp(-r * t)*norm.cdf(d2)\r\n\r\n # Delta\r\n delta = norm.cdf(d1)\r\n\r\n # Gamma\r\n gamma = norm.pdf(d1) / (S * sigma * np.sqrt(t))\r\n\r\n # Theta\r\n\r\n theta = -S * norm.pdf(d1) * sigma / (2 * np.sqrt(t)) - r * K * np.exp(-r * t) * norm.cdf(d2)\r\n theta = theta / 365\r\n\r\n # Vega\r\n vega = S * norm.pdf(d1) * np.sqrt(t)\r\n vega = vega * 0.01\r\n\r\n # Rho\r\n rho = K * t * np.exp(-r * t) * norm.cdf(d2)\r\n rho = rho * 0.01\r\n\r\n return S, K, t, r, sigma, callPrice, delta, gamma, theta, vega, rho\r\n\r\ndef createData(numberOfSamples):\r\n for i in range(numberOfSamples):\r\n # Stock prices between $10 and $500\r\n S.append(randint(10, 500))\r\n # Keeping the strike price within the range of the Stock price\r\n K.append(S[i]+uniform(-2, 2))\r\n # 1/365 denotes 1 day, 3 denotes 3 years\r\n t.append(uniform(1/365, 3))\r\n # Risk free rate as a percentage\r\n r.append(uniform(0.01, 0.03))\r\n # Volatility as a percentage\r\n sigma.append(uniform(0.05, 0.9))\r\n # Calculate Call Price\r\n bS.append(blackScholesEquation(S[i], K[i], t[i], r[i], sigma[i]))\r\n\r\n option_df = pd.DataFrame(bS, columns=['Stock Price', 'Strike Price', 'Time to Maturity', 'Risk Free Rate', 'Implied Volatility', 'Call Price', 'Delta', 'Gamma', 'Theta', 'Vega', 'Rho'])\r\n option_df.to_csv('optionsData.csv', index=False)\r\n\r\ncreateData(numberOfSamples)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "LoganCyp/quant_portfolio", "sub_path": "Data_Generator.py", "file_name": "Data_Generator.py", "file_ext": "py", "file_size_in_byte": 1995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.log", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 27, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 27, "usage_type": "name"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 42, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 52, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 54, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 56, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "2714237298", "text": "#!/usr/bin/env python3\n\n# Program based on https://dev.to/willamesoares/how-to-integrate-spotify-and-genius-api-to-easily-crawl-song-lyrics-with-python-4o62\n\nimport json\nimport pprint\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef scrap_song_url(url):\n\tpage = requests.get(url)\n\thtml = BeautifulSoup(page.text, 'html.parser')\n\tlyrics = html.find('div', class_='lyrics').get_text()\n\n\treturn lyrics\n\ndef request_song_info(song_title, artist_name):\n\tbase_url = 'https://api.genius.com'\n\theaders = {'Authorization': 'Bearer ' + 'AukQPwELBzVMPWHBd0oHwCFYBEIGLqOlM1tlcrqxmO32GcirKneEmJlT3aqKSVTP'}\n\tsearch_url = base_url + '/search'\n\tdata = {'q': song_title + ' ' + artist_name}\n\tresponse = requests.get(search_url, data=data, headers=headers)\n\n\treturn response\n\nif __name__ == '__main__':\n\t# Search for matches in the request response\n\n\tfinal = {}\n\tinput_name = 'training_data.json' # also 'training_data.json'\n\tinput_fp = open('data/' + input_name, 'r')\n\tsamples = json.load(input_fp)\n\tprint(len(samples))\n\ti=0\n\tfor sample in samples:\n\t\ti += 1\n\t\tif 'name' in sample['values']:\n\t\t\tsong_title = sample['values']['name']\n\t\t\tartist_name = sample['values']['artists'][0]['name']\n\t\t\tprint(song_title,artist_name,i)\n\t\t\tresponse = request_song_info(song_title, artist_name)\n\t\t\tJSON = response.json()\n\t\t\tremote_song_info = None\n\n\t\t\tfor hit in JSON['response']['hits']:\n\t\t\t\tif artist_name.lower() in hit['result']['primary_artist']['name'].lower():\n\t\t\t\t\tremote_song_info = hit\n\t\t\t\t\tbreak\n\n\t\t\tif remote_song_info:\n\t\t\t\tsong_url = remote_song_info['result']['url']\n\t\t\t\tfinal[song_title+'::'+artist_name] = scrap_song_url(song_url)\n\n\toutput_fp = open('data/lyrics_' + input_name, 'w')\n\toutput_fp.write(json.dumps(final))\n\toutput_fp.close()\n", "repo_name": "bzanardo/data-science-project", "sub_path": "lyrics.py", "file_name": "lyrics.py", "file_ext": "py", "file_size_in_byte": 1790, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "json.load", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "36050417287", "text": "from math import inf\nimport random\nfrom typing import Callable\n\nfrom policies.policy import Policy\n\nfrom valuereps.value_representation import ValueRepresentation\nfrom utils.type_aliases import TypeValidState, TypeActions, TypeAction\n\n\nclass EpsilonGreedy(Policy):\n\n def __init__(self, *, value_representation: ValueRepresentation, actions: TypeActions,\n epsilon_getter: Callable[...,float]):\n\n super().__init__(value_representation, actions)\n\n self._get_epsilon = epsilon_getter\n\n\n def select_action_by_behavior_policy(self, state: TypeValidState, iteration: int) -> TypeAction:\n\n epsilon = self._get_state_epsilon(state, iteration)\n action = self._get_action_epsilon_greedy(state, epsilon)\n\n return action\n\n\n def select_action_by_target_policy(self, state: TypeValidState) -> TypeAction:\n\n action = self._valuerep.get_greedy_action(state)\n\n return action\n\n\n def _get_state_epsilon(self, state: TypeValidState, iteration: int) -> float:\n\n if hasattr(self._valuerep, 'get_state_visit_count'):\n visit_count = self._valuerep.get_state_visit_count(state) # type: ignore\n else:\n visit_count=None\n\n epsilon = self._get_epsilon(visit_count=visit_count, iteration=iteration)\n\n return epsilon\n\n\n def _get_action_epsilon_greedy(self, state: TypeValidState, epsilon: float) -> TypeAction:\n\n random_value = random.random()\n\n if random_value <= epsilon:\n action = self._valuerep.get_random_action() # explore\n else:\n action = self._valuerep.get_greedy_action(state) # choose greedy action\n\n return action\n\n\n def get_action_probability(self, state: TypeValidState, action: TypeAction, iteration: int) -> float:\n\n epsilon = self._get_state_epsilon(state, iteration)\n prob = self._get_action_probability(state, action, epsilon)\n return prob\n\n\n def _get_action_probability(self, state: TypeValidState, action: TypeAction,\n epsilon: float) -> float:\n\n label_best_action, label_tied_best, label_not_best = (0,1,2)\n\n prob = - inf\n\n my_value = self._valuerep.get_value(state, action)\n\n action_type = label_best_action\n tied_count = 1\n\n for this_action in self._actions:\n\n if this_action == action:\n continue\n\n state_action_value_q = self._valuerep.get_value(state, this_action)\n\n if state_action_value_q > my_value:\n action_type = label_not_best\n break\n elif state_action_value_q == my_value:\n action_type = label_tied_best\n tied_count += 1\n\n if action_type == label_best_action:\n prob = 1 - epsilon + epsilon / len(self._actions)\n\n elif action_type == label_tied_best:\n prob = (1 - epsilon) / tied_count + epsilon / len(self._actions)\n\n elif action_type == label_not_best:\n prob = epsilon / len(self._actions)\n\n return prob\n\n\n def get_state_value(self, state: TypeValidState, iteration: int) -> float:\n\n expected_value: float = 0\n\n epsilon = self._get_state_epsilon(state, iteration)\n\n for this_action in self._actions:\n state_action_value_q = self._valuerep.get_value(state, this_action)\n probability = self._get_action_probability(state, this_action, epsilon)\n expected_value += probability * state_action_value_q\n\n return expected_value\n", "repo_name": "mmakipaa/rl", "sub_path": "policies/epsilon_greedy.py", "file_name": "epsilon_greedy.py", "file_ext": "py", "file_size_in_byte": 3548, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "policies.policy.Policy", "line_number": 11, "usage_type": "name"}, {"api_name": "valuereps.value_representation.ValueRepresentation", "line_number": 13, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeActions", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 14, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeAction", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeAction", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 36, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 48, "usage_type": "name"}, {"api_name": "random.random", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.type_aliases.TypeAction", "line_number": 48, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 60, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeAction", "line_number": 60, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 67, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeAction", "line_number": 67, "usage_type": "name"}, {"api_name": "math.inf", "line_number": 72, "usage_type": "name"}, {"api_name": "utils.type_aliases.TypeValidState", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "15711028782", "text": "import numpy as np\nfrom scipy.integrate import odeint\n\n\nclass FitzHughNagumo:\n \"\"\"\n Creates a FitzHugh-Nagumo model.\n \"\"\"\n def __init__(self, a=-0.7, b=0.8, phi=12.5):\n \"\"\"\n Initializes the model.\n\n Args:\n a (int, float): Variable a.\n b (int, float): Variable b.\n phi (int, float): Variable phi.\n \"\"\"\n self.a = a\n self.b = b\n self.phi = phi\n self.V = None\n self.W = None\n self.current = None\n self.t = None\n self.dt = None\n self.tvec = None\n\n def __repr__(self):\n \"\"\"\n Visualize model parameters when printing.\n \"\"\"\n return f'FitzHughNagumo(a={self.a}, b={self.b}, phi={self.phi})'\n\n def _system_equations(self, X, t, current):\n \"\"\"\n Defines the equations of the dynamical system for integration.\n \"\"\"\n return [X[0] - (X[0]**3) / 3 - X[1] + current,\n self.phi * (X[0] + self.a - self.b * X[1])]\n\n def run(self, X0=[0, 0], current=1, t=100, dt=0.01):\n \"\"\"\n Runs the model.\n\n Args:\n X0 (list, optional): Initial values of V and W. Defaults to [0, 0, 0].\n current (int, optional): External current. Defaults to 1.\n t (int, optional): Total time for the simulation. Defaults to 100.\n dt (float, optional): Simulation step. Defaults to 0.01.\n \"\"\"\n self.current = current\n self.t = t\n self.dt = dt\n self.tvec = np.arange(0, self.t, self.dt)\n X = odeint(self._system_equations, X0, self.tvec, (current, ))\n self.V, self.W = X[:, 0], X[:, 1]\n", "repo_name": "sgalella/BiologicalNeuralModels", "sub_path": "neural_models/fitzhugh_nagumo.py", "file_name": "fitzhugh_nagumo.py", "file_ext": "py", "file_size_in_byte": 1664, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "34813332023", "text": "from django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.shortcuts import redirect, render\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\nfrom esi.decorators import token_required\nfrom eveuniverse.models import EveEntity\n\nfrom allianceauth.eveonline.models import EveCharacter\nfrom allianceauth.services.hooks import get_extension_logger\nfrom app_utils.logging import LoggerAddTag\nfrom app_utils.messages import messages_plus\n\nfrom .. import __title__\nfrom ..app_settings import SR_CORPORATIONS_ENABLED\nfrom ..core import BaseConfig, MainOrganizations\nfrom ..decorators import token_required_by_state\nfrom ..helpers.evecorporation import EveCorporation\nfrom ..models import ContactSet, StandingRequest, StandingRevocation\nfrom ..tasks import update_all\nfrom .helpers import DEFAULT_ICON_SIZE, add_common_context\n\nlogger = LoggerAddTag(get_extension_logger(__name__), __title__)\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef index_view(request):\n \"\"\"index page is used as dispatcher\"\"\"\n app_count = (\n StandingRequest.objects.pending_requests().count()\n + StandingRevocation.objects.pending_requests().count()\n )\n if app_count > 0 and request.user.has_perm(\"standingsrequests.affect_standings\"):\n return redirect(\"standingsrequests:manage\")\n else:\n return redirect(\"standingsrequests:create_requests\")\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef create_requests(request):\n organization = BaseConfig.standings_source_entity()\n context = {\n \"corporations_enabled\": SR_CORPORATIONS_ENABLED,\n \"organization\": organization,\n \"organization_image_url\": organization.icon_url(size=DEFAULT_ICON_SIZE),\n \"authinfo\": {\"main_char_id\": request.user.profile.main_character.character_id},\n }\n return render(\n request,\n \"standingsrequests/create_requests.html\",\n add_common_context(request, context),\n )\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef request_characters(request):\n logger.debug(\"Start request_characters request\")\n try:\n contact_set = ContactSet.objects.latest()\n except ContactSet.DoesNotExist:\n return render(\n request, \"standingsrequests/error.html\", add_common_context(request, {})\n )\n\n eve_characters_qs = EveCharacter.objects.filter(\n character_ownership__user=request.user\n ).select_related(\"character_ownership__user\")\n eve_characters = {obj.character_id: obj for obj in eve_characters_qs}\n characters_with_standing = {\n contact[\"eve_entity_id\"]: contact[\"standing\"]\n for contact in (\n contact_set.contacts.filter(\n eve_entity_id__in=list(eve_characters.keys())\n ).values(\"eve_entity_id\", \"standing\")\n )\n }\n characters_standings_requests = {\n obj.contact_id: obj\n for obj in (\n StandingRequest.objects.select_related(\"user\")\n .filter(contact_id__in=eve_characters.keys())\n .annotate_is_pending()\n .annotate_is_actioned()\n )\n }\n characters_standing_revocation = {\n obj.contact_id: obj\n for obj in (\n StandingRevocation.objects.filter(\n contact_id__in=eve_characters.keys()\n ).annotate_is_pending()\n )\n }\n characters_data = list()\n for character in eve_characters.values():\n character_id = character.character_id\n standing = characters_with_standing.get(character_id)\n has_pending_request = (\n character_id in characters_standings_requests\n and characters_standings_requests[character_id].is_pending_annotated\n )\n has_pending_revocation = (\n character_id in characters_standing_revocation\n and characters_standing_revocation[character_id].is_pending_annotated\n )\n has_actioned_request = (\n character_id in characters_standings_requests\n and characters_standings_requests[character_id].is_actioned_annotated\n )\n has_standing = (\n character_id in characters_standings_requests\n and characters_standings_requests[character_id].is_effective\n and characters_standings_requests[character_id].user == request.user\n )\n characters_data.append(\n {\n \"character\": character,\n \"standing\": standing,\n \"pendingRequest\": has_pending_request,\n \"pendingRevocation\": has_pending_revocation,\n \"requestActioned\": has_actioned_request,\n \"inOrganisation\": MainOrganizations.is_character_a_member(character),\n \"hasRequiredScopes\": StandingRequest.has_required_scopes_for_request(\n character, user=request.user, quick_check=True\n ),\n \"hasStanding\": has_standing,\n }\n )\n\n context = {\"characters\": characters_data}\n return render(\n request,\n \"standingsrequests/partials/_request_characters.html\",\n add_common_context(request, context),\n )\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef request_corporations(request):\n logger.debug(\"Start request_characters request\")\n try:\n contact_set = ContactSet.objects.latest()\n except ContactSet.DoesNotExist:\n return render(\n request, \"standingsrequests/error.html\", add_common_context(request, {})\n )\n\n eve_characters_qs = EveCharacter.objects.filter(\n character_ownership__user=request.user\n ).select_related(\"character_ownership__user\")\n corporation_ids = set(\n eve_characters_qs.exclude(corporation_id__in=MainOrganizations.corporation_ids)\n .exclude(alliance_id__in=MainOrganizations.alliance_ids)\n .values_list(\"corporation_id\", flat=True)\n )\n corporations_standing_requests = {\n obj.contact_id: obj\n for obj in (\n StandingRequest.objects.select_related(\"user\")\n .filter(contact_id__in=corporation_ids)\n .annotate_is_pending()\n .annotate_is_actioned()\n )\n }\n corporations_revocation_requests = {\n obj.contact_id: obj\n for obj in (\n StandingRevocation.objects.filter(contact_id__in=corporation_ids)\n .annotate_is_pending()\n .annotate_is_actioned()\n )\n }\n corporation_contacts = {\n obj.eve_entity_id: obj\n for obj in (contact_set.contacts.filter(eve_entity_id__in=corporation_ids))\n }\n corporations_data = list()\n for corporation in EveCorporation.get_many_by_id(corporation_ids):\n if corporation and not corporation.is_npc:\n corporation_id = corporation.corporation_id\n try:\n standing = corporation_contacts[corporation_id].standing\n except KeyError:\n standing = None\n has_pending_request = (\n corporation_id in corporations_standing_requests\n and corporations_standing_requests[corporation_id].is_pending_annotated\n )\n has_pending_revocation = (\n corporation_id in corporations_revocation_requests\n and corporations_revocation_requests[\n corporation_id\n ].is_pending_annotated\n )\n has_actioned_request = (\n corporation_id in corporations_standing_requests\n and corporations_standing_requests[corporation_id].is_actioned_annotated\n )\n has_standing = (\n corporation_id in corporations_standing_requests\n and corporations_standing_requests[corporation_id].is_effective\n and corporations_standing_requests[corporation_id].user == request.user\n )\n corporations_data.append(\n {\n \"token_count\": corporation.member_tokens_count_for_user(\n request.user, quick_check=True\n ),\n \"corp\": corporation,\n \"standing\": standing,\n \"pendingRequest\": has_pending_request,\n \"pendingRevocation\": has_pending_revocation,\n \"requestActioned\": has_actioned_request,\n \"hasStanding\": has_standing,\n }\n )\n\n corporations_data.sort(key=lambda x: x[\"corp\"].corporation_name)\n context = {\"corps\": corporations_data}\n return render(\n request,\n \"standingsrequests/partials/_request_corporations.html\",\n add_common_context(request, context),\n )\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef request_character_standing(request, character_id: int):\n \"\"\"For a user to request standings for their own characters\"\"\"\n logger.debug(\n \"Standings request from user %s for characterID %d\", request.user, character_id\n )\n try:\n character = (\n EveCharacter.objects.select_related(\"character_ownership__user\")\n .filter(character_ownership__user=request.user)\n .get(character_id=character_id)\n )\n except EveCharacter.DoesNotExist:\n success = False\n else:\n success = StandingRequest.objects.create_character_request(\n request.user, character\n )\n if not success:\n messages_plus.warning(\n request,\n \"An unexpected error occurred when trying to process \"\n \"your standing request for %s. Please try again.\"\n % EveEntity.objects.resolve_name(character_id),\n )\n\n return redirect(\"standingsrequests:create_requests\")\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef remove_character_standing(request, character_id: int):\n \"\"\"\n Handles both removing requests and removing existing standings\n \"\"\"\n logger.debug(\n \"remove_character_standing called by %s for character %d\",\n request.user,\n character_id,\n )\n try:\n req = StandingRequest.objects.filter(user=request.user).get(\n contact_id=character_id\n )\n except StandingRequest.DoesNotExist:\n success = False\n else:\n success = req.remove()\n if not success:\n messages_plus.warning(\n request,\n \"An unexpected error occurred when trying to process \"\n \"your request to revoke standing for %s. Please try again.\"\n % EveEntity.objects.resolve_name(character_id),\n )\n\n return redirect(\"standingsrequests:create_requests\")\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef request_corp_standing(request, corporation_id):\n \"\"\"\n For a user to request standings for their own corp\n \"\"\"\n corporation_id = int(corporation_id)\n logger.debug(\n \"Standings request from user %s for corpID %d\", request.user, corporation_id\n )\n if not StandingRequest.objects.create_corporation_request(\n request.user, corporation_id\n ):\n messages_plus.warning(\n request,\n \"An unexpected error occurred when trying to process \"\n \"your standing request for %s. Please try again.\"\n % EveEntity.objects.resolve_name(corporation_id),\n )\n\n return redirect(\"standingsrequests:create_requests\")\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\ndef remove_corp_standing(request, corporation_id: int):\n \"\"\"\n Handles both removing corp requests and removing existing standings\n \"\"\"\n logger.debug(\"remove_corp_standing called by %s\", request.user)\n try:\n req = StandingRequest.objects.filter(user=request.user).get(\n contact_id=corporation_id\n )\n except StandingRequest.DoesNotExist:\n success = False\n else:\n success = req.remove()\n if not success:\n messages_plus.warning(\n request,\n \"An unexpected error occurred when trying to process \"\n \"your request to revoke standing for %s. Please try again.\"\n % EveEntity.objects.resolve_name(corporation_id),\n )\n\n return redirect(\"standingsrequests:create_requests\")\n\n\n@login_required\n@permission_required(\"standingsrequests.affect_standings\")\n@token_required(new=False, scopes=ContactSet.required_esi_scope())\ndef view_auth_page(request, token):\n source_entity = BaseConfig.standings_source_entity()\n char_name = EveEntity.objects.resolve_name(BaseConfig.owner_character_id)\n if not source_entity:\n messages_plus.error(\n request,\n format_html(\n _(\n \"The configured character <strong>%s</strong> does not belong \"\n \"to an alliance and can therefore not be used \"\n \"to setup alliance standings. \"\n \"Please configure a character that has an alliance.\"\n )\n % char_name,\n ),\n )\n elif token.character_id == BaseConfig.owner_character_id:\n update_all.delay(user_pk=request.user.pk)\n messages_plus.success(\n request,\n format_html(\n _(\n \"Token for character <strong>%s</strong> has been setup \"\n \"successfully and the app has started pulling standings \"\n \"from <strong>%s</strong>.\"\n )\n % (char_name, source_entity.name),\n ),\n )\n else:\n messages_plus.error(\n request,\n _(\n \"Failed to setup token for configured character \"\n \"%(char_name)s (id:%(standings_api_char_id)s). \"\n \"Instead got token for different character: \"\n \"%(token_char_name)s (id:%(token_char_id)s)\"\n )\n % {\n \"char_name\": char_name,\n \"standings_api_char_id\": BaseConfig.owner_character_id,\n \"token_char_name\": EveEntity.objects.resolve_name(token.character_id),\n \"token_char_id\": token.character_id,\n },\n )\n return redirect(\"standingsrequests:index\")\n\n\n@login_required\n@permission_required(StandingRequest.REQUEST_PERMISSION_NAME)\n@token_required_by_state(new=False)\ndef view_requester_add_scopes(request, token):\n messages_plus.success(\n request,\n _(\"Successfully added token with required scopes for %(char_name)s\")\n % {\"char_name\": EveEntity.objects.resolve_name(token.character_id)},\n )\n return redirect(\"standingsrequests:create_requests\")\n\n\n@login_required\n@staff_member_required\ndef admin_changeset_update_now(request):\n update_all.delay(user_pk=request.user.pk)\n messages_plus.info(\n request,\n _(\n \"Started updating contacts and affiliations. \"\n \"You will receive a notification when completed.\"\n ),\n )\n return redirect(\"admin:standingsrequests_contactset_changelist\")\n", "repo_name": "staropera/standingsrequests", "sub_path": "standingsrequests/views/views_1.py", "file_name": "views_1.py", "file_ext": "py", "file_size_in_byte": 15325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "app_utils.logging.LoggerAddTag", "line_number": 23, "usage_type": "call"}, {"api_name": "allianceauth.services.hooks.get_extension_logger", "line_number": 23, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects.pending_requests", "line_number": 31, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 31, "usage_type": "name"}, {"api_name": "models.StandingRevocation.objects.pending_requests", "line_number": 32, "usage_type": "call"}, {"api_name": "models.StandingRevocation.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.StandingRevocation", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 27, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 27, "usage_type": "name"}, {"api_name": "core.BaseConfig.standings_source_entity", "line_number": 43, "usage_type": "call"}, {"api_name": "core.BaseConfig", "line_number": 43, "usage_type": "name"}, {"api_name": "app_settings.SR_CORPORATIONS_ENABLED", "line_number": 45, "usage_type": "name"}, {"api_name": "helpers.DEFAULT_ICON_SIZE", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "helpers.add_common_context", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 40, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 41, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 41, "usage_type": "name"}, {"api_name": "models.ContactSet.objects.latest", "line_number": 62, "usage_type": "call"}, {"api_name": "models.ContactSet.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.ContactSet", "line_number": 62, "usage_type": "name"}, {"api_name": "models.ContactSet.DoesNotExist", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.ContactSet", "line_number": 63, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "helpers.add_common_context", "line_number": 65, "usage_type": "call"}, {"api_name": "allianceauth.eveonline.models.EveCharacter.objects.filter", "line_number": 68, "usage_type": "call"}, {"api_name": "allianceauth.eveonline.models.EveCharacter.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "allianceauth.eveonline.models.EveCharacter", "line_number": 68, "usage_type": "name"}, {"api_name": "models.StandingRequest.objects.select_related", "line_number": 83, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 83, "usage_type": "name"}, {"api_name": "models.StandingRevocation.objects.filter", "line_number": 92, "usage_type": "call"}, {"api_name": "models.StandingRevocation.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.StandingRevocation", "line_number": 92, "usage_type": "name"}, {"api_name": "core.MainOrganizations.is_character_a_member", "line_number": 125, "usage_type": "call"}, {"api_name": "core.MainOrganizations", "line_number": 125, "usage_type": "name"}, {"api_name": "models.StandingRequest.has_required_scopes_for_request", "line_number": 126, "usage_type": "call"}, {"api_name": "models.StandingRequest", "line_number": 126, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 134, "usage_type": "call"}, {"api_name": "helpers.add_common_context", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 57, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 58, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 58, "usage_type": "name"}, {"api_name": "models.ContactSet.objects.latest", "line_number": 146, "usage_type": "call"}, {"api_name": "models.ContactSet.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.ContactSet", "line_number": 146, "usage_type": "name"}, {"api_name": "models.ContactSet.DoesNotExist", "line_number": 147, "usage_type": "attribute"}, {"api_name": "models.ContactSet", "line_number": 147, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 148, "usage_type": "call"}, {"api_name": "helpers.add_common_context", "line_number": 149, "usage_type": "call"}, {"api_name": "allianceauth.eveonline.models.EveCharacter.objects.filter", "line_number": 152, "usage_type": "call"}, {"api_name": "allianceauth.eveonline.models.EveCharacter.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "allianceauth.eveonline.models.EveCharacter", "line_number": 152, "usage_type": "name"}, {"api_name": "core.MainOrganizations.corporation_ids", "line_number": 156, "usage_type": "attribute"}, {"api_name": "core.MainOrganizations", "line_number": 156, "usage_type": "name"}, {"api_name": "core.MainOrganizations.alliance_ids", "line_number": 157, "usage_type": "attribute"}, {"api_name": "core.MainOrganizations", "line_number": 157, "usage_type": "name"}, {"api_name": "models.StandingRequest.objects.select_related", "line_number": 163, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 163, "usage_type": "name"}, {"api_name": "models.StandingRevocation.objects.filter", "line_number": 172, "usage_type": "call"}, {"api_name": "models.StandingRevocation.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "models.StandingRevocation", "line_number": 172, "usage_type": "name"}, {"api_name": "helpers.evecorporation.EveCorporation.get_many_by_id", "line_number": 182, "usage_type": "call"}, {"api_name": "helpers.evecorporation.EveCorporation", "line_number": 182, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 224, "usage_type": "call"}, {"api_name": "helpers.add_common_context", "line_number": 227, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 141, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 142, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 142, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 142, "usage_type": "name"}, {"api_name": "allianceauth.eveonline.models.EveCharacter.objects.select_related", "line_number": 240, "usage_type": "call"}, {"api_name": "allianceauth.eveonline.models.EveCharacter.objects", "line_number": 240, "usage_type": "attribute"}, {"api_name": "allianceauth.eveonline.models.EveCharacter", "line_number": 240, "usage_type": "name"}, {"api_name": "allianceauth.eveonline.models.EveCharacter.DoesNotExist", "line_number": 244, "usage_type": "attribute"}, {"api_name": "allianceauth.eveonline.models.EveCharacter", "line_number": 244, "usage_type": "name"}, {"api_name": "models.StandingRequest.objects.create_character_request", "line_number": 247, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects", "line_number": 247, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 247, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.warning", "line_number": 251, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 251, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveEntity.objects.resolve_name", "line_number": 255, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects", "line_number": 255, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveEntity", "line_number": 255, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 258, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 231, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 232, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 232, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 232, "usage_type": "name"}, {"api_name": "models.StandingRequest.objects.filter", "line_number": 273, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects", "line_number": 273, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 273, "usage_type": "name"}, {"api_name": "models.StandingRequest.DoesNotExist", "line_number": 276, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 276, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.warning", "line_number": 281, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 281, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveEntity.objects.resolve_name", "line_number": 285, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveEntity", "line_number": 285, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 288, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 261, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 262, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 262, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 262, "usage_type": "name"}, {"api_name": "models.StandingRequest.objects.create_corporation_request", "line_number": 301, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects", "line_number": 301, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 301, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.warning", "line_number": 304, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 304, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveEntity.objects.resolve_name", "line_number": 308, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects", "line_number": 308, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveEntity", "line_number": 308, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 311, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 291, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 292, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 292, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 292, "usage_type": "name"}, {"api_name": "models.StandingRequest.objects.filter", "line_number": 322, "usage_type": "call"}, {"api_name": "models.StandingRequest.objects", "line_number": 322, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 322, "usage_type": "name"}, {"api_name": "models.StandingRequest.DoesNotExist", "line_number": 325, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 325, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.warning", "line_number": 330, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 330, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveEntity.objects.resolve_name", "line_number": 334, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects", "line_number": 334, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveEntity", "line_number": 334, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 337, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 314, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 315, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 315, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 315, "usage_type": "name"}, {"api_name": "core.BaseConfig.standings_source_entity", "line_number": 344, "usage_type": "call"}, {"api_name": "core.BaseConfig", "line_number": 344, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveEntity.objects.resolve_name", "line_number": 345, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects", "line_number": 345, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveEntity", "line_number": 345, "usage_type": "name"}, {"api_name": "core.BaseConfig.owner_character_id", "line_number": 345, "usage_type": "attribute"}, {"api_name": "core.BaseConfig", "line_number": 345, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.error", "line_number": 347, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 347, "usage_type": "name"}, {"api_name": "django.utils.html.format_html", "line_number": 349, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 350, "usage_type": "call"}, {"api_name": "core.BaseConfig.owner_character_id", "line_number": 359, "usage_type": "attribute"}, {"api_name": "core.BaseConfig", "line_number": 359, "usage_type": "name"}, {"api_name": "tasks.update_all.delay", "line_number": 360, "usage_type": "call"}, {"api_name": "tasks.update_all", "line_number": 360, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.success", "line_number": 361, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 361, "usage_type": "name"}, {"api_name": "django.utils.html.format_html", "line_number": 363, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 364, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus.error", "line_number": 373, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 373, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 375, "usage_type": "call"}, {"api_name": "core.BaseConfig.owner_character_id", "line_number": 383, "usage_type": "attribute"}, {"api_name": "core.BaseConfig", "line_number": 383, "usage_type": "name"}, {"api_name": "eveuniverse.models.EveEntity.objects.resolve_name", "line_number": 384, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects", "line_number": 384, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveEntity", "line_number": 384, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 388, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 340, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 341, "usage_type": "call"}, {"api_name": "esi.decorators.token_required", "line_number": 342, "usage_type": "call"}, {"api_name": "models.ContactSet.required_esi_scope", "line_number": 342, "usage_type": "call"}, {"api_name": "models.ContactSet", "line_number": 342, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.success", "line_number": 395, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 395, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 397, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects.resolve_name", "line_number": 398, "usage_type": "call"}, {"api_name": "eveuniverse.models.EveEntity.objects", "line_number": 398, "usage_type": "attribute"}, {"api_name": "eveuniverse.models.EveEntity", "line_number": 398, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 400, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 391, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 392, "usage_type": "call"}, {"api_name": "models.StandingRequest.REQUEST_PERMISSION_NAME", "line_number": 392, "usage_type": "attribute"}, {"api_name": "models.StandingRequest", "line_number": 392, "usage_type": "name"}, {"api_name": "decorators.token_required_by_state", "line_number": 393, "usage_type": "call"}, {"api_name": "tasks.update_all.delay", "line_number": 406, "usage_type": "call"}, {"api_name": "tasks.update_all", "line_number": 406, "usage_type": "name"}, {"api_name": "app_utils.messages.messages_plus.info", "line_number": 407, "usage_type": "call"}, {"api_name": "app_utils.messages.messages_plus", "line_number": 407, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 409, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 414, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 403, "usage_type": "name"}, {"api_name": "django.contrib.admin.views.decorators.staff_member_required", "line_number": 404, "usage_type": "name"}]} +{"seq_id": "33126513805", "text": "\nimport logging\n\nfrom aiogram import types\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom aiogram.dispatcher import FSMContext\n\nfrom weedly_bot.loader import dp, api_client\nfrom weedly_bot.mystates import My_states\nfrom weedly_bot import utils\n\nlogger = logging.getLogger(__name__)\n\n@dp.message_handler(text_contains='Добавить источники')\n@dp.callback_query_handler(text_contains='add_rss', state='*')\nasync def add_rss(call: types.CallbackQuery):\n \"\"\"просим юзера отправить rss ссылку. входим в состояние typing и ловим ссылку\"\"\"\n\n await call.message.answer(text='пришли ссылку на rss-поток')\n await My_states.typing_rss.set()\n logger.debug('установили стейт typing_rss')\n\n await call.answer(cache_time=0)\n\n\n@dp.message_handler(state=My_states.typing_rss)\nasync def typing_rss(message: types.Message, state: FSMContext):\n \"\"\"ловим ссылку. валидируем. добавляем feed в БД, если его еще нет. \n добавляем feed в подписки юзера. выходим из состояния ловли ссылки \"\"\"\n\n await message.answer(f'Проверяем ссылку {message.text.strip()}')\n\n rss_link = message.text\n check_rss = utils.check_if_valid_rss_url(rss_link)\n user_id = message.from_user.id\n\n if check_rss['res']:\n\n kb = InlineKeyboardMarkup(inline_keyboard=[[InlineKeyboardButton(text='Добавить еще источник', callback_data='add_rss')],\n [InlineKeyboardButton(text='< Назад', callback_data='choose_what_to_add')]])\n\n api_client.feeds.add_rss_source(rss_link)\n feed = api_client.feeds.get_by_url(rss_link)\n logger.debug('получили feed %s', feed)\n\n api_client.users.subscrbe_user_to_rss(uid=user_id, feed_id=feed['uid'])\n\n else:\n\n kb = InlineKeyboardMarkup(inline_keyboard=[[InlineKeyboardButton(text='Отправить другую ссылку', callback_data='add_rss')],\n [InlineKeyboardButton(\n text='< Назад', callback_data='choose_what_to_add')]\n ]\n )\n\n await state.reset_state()\n await message.answer(text=check_rss['msg'], reply_markup=kb)\n", "repo_name": "kbondar17/pocket-rss", "sub_path": "tg-bot/weedly_bot/paths/add_feeds/add_rss.py", "file_name": "add_rss.py", "file_ext": "py", "file_size_in_byte": 2508, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 16, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 16, "usage_type": "name"}, {"api_name": "weedly_bot.mystates.My_states.typing_rss.set", "line_number": 20, "usage_type": "call"}, {"api_name": "weedly_bot.mystates.My_states.typing_rss", "line_number": 20, "usage_type": "attribute"}, {"api_name": "weedly_bot.mystates.My_states", "line_number": 20, "usage_type": "name"}, {"api_name": "weedly_bot.loader.dp.message_handler", "line_number": 14, "usage_type": "call"}, {"api_name": "weedly_bot.loader.dp", "line_number": 14, "usage_type": "name"}, {"api_name": "weedly_bot.loader.dp.callback_query_handler", "line_number": 15, "usage_type": "call"}, {"api_name": "weedly_bot.loader.dp", "line_number": 15, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 27, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 27, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 27, "usage_type": "name"}, {"api_name": "weedly_bot.utils.check_if_valid_rss_url", "line_number": 34, "usage_type": "call"}, {"api_name": "weedly_bot.utils", "line_number": 34, "usage_type": "name"}, {"api_name": "aiogram.types.InlineKeyboardMarkup", "line_number": 39, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 39, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 40, "usage_type": "call"}, {"api_name": "weedly_bot.loader.api_client.feeds.add_rss_source", "line_number": 42, "usage_type": "call"}, {"api_name": "weedly_bot.loader.api_client.feeds", "line_number": 42, "usage_type": "attribute"}, {"api_name": "weedly_bot.loader.api_client", "line_number": 42, "usage_type": "name"}, {"api_name": "weedly_bot.loader.api_client.feeds.get_by_url", "line_number": 43, "usage_type": "call"}, {"api_name": "weedly_bot.loader.api_client.feeds", "line_number": 43, "usage_type": "attribute"}, {"api_name": "weedly_bot.loader.api_client", "line_number": 43, "usage_type": "name"}, {"api_name": "weedly_bot.loader.api_client.users.subscrbe_user_to_rss", "line_number": 46, "usage_type": "call"}, {"api_name": "weedly_bot.loader.api_client.users", "line_number": 46, "usage_type": "attribute"}, {"api_name": "weedly_bot.loader.api_client", "line_number": 46, "usage_type": "name"}, {"api_name": "aiogram.types.InlineKeyboardMarkup", "line_number": 50, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 50, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 51, "usage_type": "call"}, {"api_name": "weedly_bot.loader.dp.message_handler", "line_number": 26, "usage_type": "call"}, {"api_name": "weedly_bot.loader.dp", "line_number": 26, "usage_type": "name"}, {"api_name": "weedly_bot.mystates.My_states.typing_rss", "line_number": 26, "usage_type": "attribute"}, {"api_name": "weedly_bot.mystates.My_states", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "37532071979", "text": "import numpy as np\nimport matplotlib\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\n\nclass Bounds(object):\n def __init__(self, x0, y0, x1, y1):\n self.x0 = x0\n self.y0 = y0\n self.x1 = x1\n self.y1 = y1\n\n def size(self):\n return np.array([self.x1 - self.x0, self.y1 - self.y0])\n\n def rescale(self, scale, save=False):\n new_size = self.size() * scale\n x1 = self.x0 + new_size[0]\n y1 = self.y0 + new_size[1]\n return Bounds(self.x0, self.y0, x1, y1)\n\n def to_list(self):\n return np.array([self.x0, self.y0, self.x1, self.y1])\n\n def __repr__(self):\n return \"Rect[(x0, y0)=(%g, %g) -> (x1, y1)=(%g, %g)]\" % (self.x0, self.y0, self.x1, self.y1)\n\n def __str__(self):\n return self.__repr__()\n\ndef get_axis_bounds(fig, ax, scaled=False):\n children = ax.get_children()\n\n # initial est based on ax itself\n p0, p1 = ax.bbox.get_points()\n xmax, ymax = p1\n xmin, ymin = p0\n\n\n for child in children:\n if isinstance(child, matplotlib.axis.XAxis):\n text_obj = filter(lambda x: isinstance(x, matplotlib.text.Text), child.get_children()) ## Axis labels?\n text_obj_y = [x.get_window_extent(renderer=fig.canvas.renderer).p0[1] for x in text_obj]\n ymin_label = np.min(text_obj_y)\n if ymin_label < ymin:\n ymin = ymin_label\n elif isinstance(child, matplotlib.axis.YAxis):\n text_obj = filter(lambda x: isinstance(x, matplotlib.text.Text), child.get_children())\n text_obj_x = [x.get_window_extent(renderer=fig.canvas.renderer).p0[0] for x in text_obj]\n xmin_label = np.min(text_obj_x)\n if xmin_label < xmin:\n xmin = xmin_label\n elif hasattr(child, 'get_window_extent'):\n bb = child.get_window_extent(renderer=fig.canvas.renderer)\n if xmax < bb.p1[0]:\n xmax = bb.p1[0]\n if xmin > bb.p0[0]:\n xmin = bb.p0[0]\n if ymin > bb.p0[1]:\n ymin = bb.p0[1]\n if ymax < bb.p1[1]:\n ymax = bb.p1[1]\n\n # special handler for ticklabels, which don't work in the same way as above for some reason..\n for l in ax.get_xticklabels():\n bb = l.get_window_extent(renderer=fig.canvas.renderer)\n if xmax < bb.p1[0]:\n xmax = bb.p1[0]\n if xmin > bb.p0[0]:\n xmin = bb.p0[0]\n if ymin > bb.p0[1]:\n ymin = bb.p0[1]\n if ymax < bb.p1[1]:\n ymax = bb.p1[1]\n\n if scaled:\n rect_bounds = np.array([xmin, ymin, xmax, ymax])\n fig_size_x, fig_size_y = fig.get_size_inches() * fig.dpi\n rect_bounds /= np.array([fig_size_x, fig_size_y, fig_size_x, fig_size_y])\n return rect_bounds\n else:\n return np.array([xmin, ymin, xmax, ymax])\n\ndef frame_axis(fig, ax, color='red'):\n fig_size_x, fig_size_y = fig.get_size_inches() * fig.dpi\n x0, y0, x1, y1 = get_axis_bounds(fig, ax)\n width = x1 - x0\n height = y1 - y0\n\n rect = patches.Rectangle([x0, y0], width, height, linewidth=1, edgecolor=color, facecolor='none')\n\n fig.patches.extend([rect])\n\ndef get_plot_bounds(fig, ax):\n fig_size_x, fig_size_y = fig.get_size_inches() * fig.dpi\n plot_bounds = ax.bbox.get_points() / np.array([fig_size_x, fig_size_y])\n return plot_bounds.ravel()\n\ndef is_overlapping(bounds1, bounds2):\n xmin1, ymin1, xmax1, ymax1 = bounds1\n xmin2, ymin2, xmax2, ymax2 = bounds2\n x_overlapping_km = (xmin1 >= xmin2 and xmin1 <= xmax2) or \\\n (xmax1 >= xmin2 and xmax1 <= xmax2) or \\\n (xmax1 >= xmax2 and xmin1 <= xmin2) or \\\n (xmax1 <= xmax2 and xmin1 >= xmin2)\n y_overlapping_km = (ymin1 >= ymin2 and ymin1 <= ymax2) or \\\n (ymax1 >= ymin2 and ymax1 <= ymax2) or \\\n (ymax1 >= ymax2 and ymin1 <= ymin2) or \\\n (ymax1 <= ymax2 and ymin1 >= ymin2)\n return x_overlapping_km and y_overlapping_km\n\ndef detect_overlapping_axes(fig, axes, ret_full=False, verbose=False):\n axes_bounds = [get_axis_bounds(fig, ax) for ax in axes]\n plot_bounds = [get_plot_bounds(fig, ax) for ax in axes]\n\n if verbose:\n print(\"Axes bounds\")\n print(axes_bounds)\n print(\"plot bounds\")\n print(plot_bounds)\n\n overlapping = False\n N = len(axes)\n overlapping_mat = np.zeros([N, N], dtype=bool)\n\n for k, ax1 in enumerate(axes):\n xmin1, ymin1, xmax1, ymax1 = axes_bounds[k]\n\n for m,ax2 in enumerate(axes):\n if m <= k: continue\n\n overlapping_km_with_text = is_overlapping(axes_bounds[k], axes_bounds[m])\n overlapping_km_plot_only = is_overlapping(plot_bounds[k], plot_bounds[m])\n\n overlapping_km = overlapping_km_with_text and not overlapping_km_plot_only\n overlapping_mat[k,m] = overlapping_km\n overlapping = overlapping or overlapping_km\n\n if verbose:\n print(\"k={}, m={}, overlapping with text={}, overlapping plot only={}\".format(k, m, overlapping_km_with_text, overlapping_km_plot_only))\n\n if ret_full:\n return overlapping_mat\n else:\n return overlapping\n\ndef calc_scale_factor_horizontal(fig, ax1, ax2):\n axes = [ax1, ax2]\n axes_bounds = [get_axis_bounds(fig, ax, scaled=True) for ax in axes]\n plot_bounds = [get_plot_bounds(fig, ax) for ax in axes]\n\n if plot_bounds[0][0] < plot_bounds[1][0]:\n left_idx, right_idx = 0, 1\n else:\n left_idx, right_idx = 1, 0\n return _calc_scale_factor(plot_bounds[left_idx], plot_bounds[right_idx],\n axes_bounds[left_idx], axes_bounds[right_idx], 'horizontal')\n\ndef _calc_scale_factor(pb_left, pb_right, ab_left, ab_right, axis):\n \"\"\"Helper function for scaling, axis-object free calculations\"\"\"\n if not isinstance(pb_left, Bounds):\n pb_right = Bounds(*pb_right)\n pb_left = Bounds(*pb_left)\n ab_right = Bounds(*ab_right)\n ab_left = Bounds(*ab_left)\n\n if axis == 'horizontal':\n if pb_left.x0 == pb_right.x0:\n return 0 # there's no horizontal space between the axes (aligned vertically), so shrinking can't help\n\n extra_space_needed = ab_left.x1 - ab_right.x0\n scaling_dim_size = pb_left.size()[0]\n elif axis == 'vertical':\n if pb_left.y0 == pb_right.y0:\n return 0 # no vertical space between axes so you can't shrink\n\n extra_space_needed = ab_left.y1 - ab_right.y0\n scaling_dim_size = pb_left.size()[1]\n else:\n raise ValueError(\"Unrecognized axis: %s\" % axis)\n\n # if the space needed is negative, then there is already enough so no scaling is necessary\n if extra_space_needed <= 0:\n return 1.0\n\n # shrink ab_left by an amount such that extra_space_needed == 0\n scale_factor = 1 - extra_space_needed / scaling_dim_size\n return scale_factor\n\ndef calc_scale_factor_vertical(fig, ax1, ax2):\n axes = [ax1, ax2]\n axes_bounds = [get_axis_bounds(fig, ax, scaled=True) for ax in axes]\n plot_bounds = [get_plot_bounds(fig, ax) for ax in axes]\n\n if plot_bounds[0][1] < plot_bounds[1][1]:\n idx_bottom, idx_top = 0, 1\n # reorder\n # return calc_scale_factor_vertical(fig, ax2, ax1)\n else:\n idx_bottom, idx_top = 1, 0\n\n return _calc_scale_factor(plot_bounds[idx_bottom], plot_bounds[idx_top],\n axes_bounds[idx_bottom], axes_bounds[idx_top], 'vertical')\n\ndef calc_scale_factor_pairwise(fig, ax1, ax2):\n sf_h = calc_scale_factor_horizontal(fig, ax1, ax2)\n sf_v = calc_scale_factor_vertical(fig, ax1, ax2)\n return np.max([sf_h, sf_v])\n\ndef calc_scale_factor(fig, axes, verbose=False):\n overlapping_mat = detect_overlapping_axes(fig, axes, ret_full=True)\n inds1, inds2 = np.nonzero(overlapping_mat)\n\n scale_factor = np.ones(overlapping_mat.shape)\n\n for k in inds1:\n for m in inds2:\n scale_factor[k,m] = calc_scale_factor_pairwise(fig, axes[0], axes[1])\n\n if verbose:\n print(\"pairwise scale factors\")\n print(scale_factor)\n\n pairwise_scale_factor = np.min(scale_factor)\n\n # calculate any collisions with the horizontal and vertical maximum\n boundary_scale_factor = 1.0\n for ax in axes:\n ax_bounds = Bounds(*get_axis_bounds(fig, ax, scaled=True))\n plot_bounds = Bounds(*get_plot_bounds(fig, ax))\n if ax_bounds.x1 > 1.0:\n extra_space_needed = ax_bounds.x1 - 1.0\n scaling_dim_size = plot_bounds.size()[0]\n\n scale_factor = 1 - extra_space_needed / scaling_dim_size\n if scale_factor < boundary_scale_factor:\n boundary_scale_factor = scale_factor\n\n if ax_bounds.y1 > 1.0:\n extra_space_needed = ax_bounds.y1 - 1.0\n scaling_dim_size = plot_bounds.size()[1]\n\n scale_factor = 1 - extra_space_needed / scaling_dim_size\n if scale_factor < boundary_scale_factor:\n boundary_scale_factor = scale_factor\n\n return min(pairwise_scale_factor, boundary_scale_factor)\n\ndef rescale(fig, reposition_axes=False):\n \"\"\"Shrink axes until there is no overlap\"\"\"\n axes = fig.get_axes()\n if reposition_axes:\n reposition(fig)\n scale_factor = calc_scale_factor(fig, axes)\n\n for ax in axes:\n bbox = ax.get_position()\n x0, y0 = bbox.x0, bbox.y0\n x1, y1 = bbox.x1, bbox.y1\n width = x1 - x0\n height = y1 - y0\n\n ax.set_position([x0, y0, width*scale_factor, height*scale_factor])\n\ndef _rescale(ax, scale_factor, anchor_top_left=True):\n \"\"\"Helper function to rescale an axis\"\"\"\n bbox = ax.get_position()\n x0, y0 = bbox.x0, bbox.y0\n x1, y1 = bbox.x1, bbox.y1\n width = x1 - x0\n height = y1 - y0\n\n if anchor_top_left:\n ax.set_position([x0, y0, width*scale_factor, height*scale_factor])\n else:\n ax.set_position([x0*scale_factor, y0*scale_factor, width*scale_factor, height*scale_factor])\n\ndef _extended_bounding_box(axes_bounds):\n \"\"\"Calculate a box which encompasses all the input boxes\"\"\"\n x0, y0 = np.inf, np.inf\n x1, y1 = -np.inf, -np.inf\n for k in range(len(axes_bounds)):\n ab = axes_bounds[k]\n if ab[0] < x0:\n x0 = ab[0]\n if ab[1] < y0:\n y0 = ab[1]\n if ab[2] > x1:\n x1 = ab[2]\n if ab[3] > y1:\n y1 = ab[3]\n\n height = y1 - y0\n width = x1 - x0\n return np.array([x0, y0, x1, y1])\n\ndef calc_translation_to_top_left(axes_bounds, margin=0):\n x0, y0, _, _ = _extended_bounding_box(axes_bounds)\n\n # translate to reference point (lower left corner = origin)\n ref_point = np.array([margin, margin])\n transl = ref_point - np.array([x0, y0])\n return transl\n\ndef reposition(fig, margin=0.02):\n \"\"\"Reposition axes of a figure to the top left\"\"\"\n axes = fig.get_axes()\n axes_bounds = [get_axis_bounds(fig, ax, scaled=True) for ax in axes]\n plot_bounds = [get_plot_bounds(fig, ax) for ax in axes]\n\n transl = calc_translation_to_top_left(axes_bounds, margin=margin)\n\n # redraw axes; apply transformation to plot boundaries because that's the input to `set_position`\n for k,ax in enumerate(axes):\n pb = plot_bounds[k]\n width, height = pb[2:] - pb[:2]\n x0, y0 = pb[:2] + transl\n ax.set_position([x0, y0, width, height])\n\n plt.draw()\n\n", "repo_name": "sgowda/autodrag", "sub_path": "autodrag/mpl_resize.py", "file_name": "mpl_resize.py", "file_ext": "py", "file_size_in_byte": 11356, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.axis", "line_number": 41, "usage_type": "attribute"}, {"api_name": "matplotlib.text", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.axis", "line_number": 47, "usage_type": "attribute"}, {"api_name": "matplotlib.text", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 280, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 281, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 320, "usage_type": "name"}]} +{"seq_id": "42262012048", "text": "\"\"\"\nHere we'll use Monte Carlo simulations to approximate the value of π\n\nImagine we square canvas contaning a circular dartboard, centered and tangent with the side of a square.\nA dart thrower has an equal probability of hitting anywhere on the canvas.\n\nWe can calculate the probability of hitting the circular board:\n\np(hit) = number of dart in circle/total number of darts thrown\n\nSince the thrower always hits the canvas, we can rewrite this as:\n\np(hit) = area of circle/area of square\n\nIf the circle has radius r, we can rewrite the equation:\n\np(hit) = πr^2/2(r^2) = πr^2/4r^2)\np(hit) = π/4\nπ = 4*p(hit)\n\n---------------------------------------------------------\nπ = 4 * num of darts in circle/total num of darts thrown\n---------------------------------------------------------\n\nIf we throw an infinite amount of darts at the canvas, we can obtain a good approximation for the value of π\nWe'll use Monte Carlo simulations to simulate throw outcomes.\n\n\"\"\"\n\n# Importing libraries\nimport random\nimport numpy as np\nfrom numpy import random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n\nnum_sims = 10000 # Number of simulations\n\nall_pis = []\n\ndef estimate_pi(simulations):\n # Simulating dart throws on a board with radius = 1\n hits = 0 # Counter for the number of \"darts that fall inside the circle\"\n inside_outside = [] # Array to contain all the dart outcomes; used for plotting\n\n all_x = []\n all_y = []\n\n for sim in range(1, num_sims):\n x = np.random.uniform(-1, 1)\n all_x.append(x)\n y = np.random.uniform(-1, 1)\n all_y.append(y)\n\n d = np.sqrt(x**2 + y**2) # We can use Pythagoras' formula to calculate the distance of the dot from the center of the circle\n if (d) < 1:\n hits += 1\n inside_outside.append(1)\n else:\n inside_outside.append(0)\n\n temp_pi = 4 * hits / sim\n all_pis.append(temp_pi)\n\n hit_or_miss = np.where(np.array(inside_outside) == 1, 'hit', 'miss')\n\n pi = 4 * hits / num_sims\n print(f'The estimated value of π is {pi}')\n\n # Plotting the results\n\n fig = plt.figure(figsize=(5, 10))\n plt.style.use('fivethirtyeight')\n\n ax1 = fig.add_subplot(211)\n ax1.set_title('Monte Carlo Simulation to Estimate Value of Pi',\n fontsize=14)\n ax1 = sns.scatterplot(all_x, all_y, hue=hit_or_miss, legend=False)\n\n ax2 = fig.add_subplot(212)\n ax2 = plt.plot(all_pis)\n plt.xlabel('Number of Simulations', fontsize=12)\n plt.ylabel('Value of Pi', fontsize=12)\n\n plt.axhline(y=3.14, color='g', linestyle='-')\n plt.annotate(f'Estimated value of pi = {pi}',\n xy=(1, 0),\n xycoords='axes fraction',\n xytext=(-20, 150),\n textcoords='offset pixels',\n horizontalalignment='right',\n verticalalignment='top')\n plt.xticks(fontsize=10)\n plt.yticks(fontsize=10)\n # ax2.ylabel('Value of Pi')\n # ax2.xlabel('Number of Simulations')\n\n plt.tight_layout()\n plt.show()\n fig.savefig('images/monte_carlo_pi_estimation.png', dpi=300)\n\n\nestimate_pi(100)", "repo_name": "tiroger/monte-carlo-demo", "sub_path": "pi.py", "file_name": "pi.py", "file_ext": "py", "file_size_in_byte": 3139, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.random.uniform", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 76, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "72937185654", "text": "# This script should invest money on a consistent basis for you into one stock at the best possible time within a window\nimport datetime\nimport math\nimport random\nimport time\nimport apirequest\n\n\nhourly_moving_average = apirequest.generate_hourly_moving_average_between_dates(\n start_date, datetime.date.today()\n)\n\n\ndef is_the_number():\n random_number = random.randint(1, 3)\n print(random_number)\n if random_number == 2:\n print(\"return true\")\n return True\n else:\n print(\"return false\")\n return False\n\n\ndef check_position_is_good(stock):\n return is_the_number()\n\n\ndef get_position():\n pass\n\n\ndef check_if_in_investment_window_days(\n days_between_investments,\n number_of_hours_to_make_investment,\n last_investment_date=None,\n):\n if last_investment_date:\n next_investment_date = datetime.datetime.strftime(\n datetime.datetime.strptime(last_investment_date, \"%d-%m-%Y\")\n + datetime.timedelta(days=days_between_investments),\n \"%d-%m-%Y\",\n )\n print(f\"Next investment date is : {next_investment_date}\")\n print(f\"Today is : {datetime.datetime.today().strftime('%d-%m-%Y')}\")\n if next_investment_date == datetime.datetime.today().strftime(\"%d-%m-%Y\"):\n return True\n else:\n return False\n else:\n return True\n\n\ndef make_buy(amount_to_invest):\n print(\"buying!\")\n pass\n\n\ndef main(\n days_between_investments,\n number_of_hours_to_make_investment,\n stock,\n amount_to_invest,\n last_investment_date=None,\n):\n if check_if_in_investment_window_days(\n days_between_investments,\n number_of_hours_to_make_investment,\n last_investment_date,\n ):\n while True:\n if check_position_is_good(stock):\n make_buy(amount_to_invest)\n break\n else:\n print(\"I'm sleeping\")\n time.sleep(1)\n else:\n print(\"will check if we're in the window in an hour\")\n time.sleep(3600)\n\n\nmain(2, 2, \"BTC\", 400)\n", "repo_name": "AGeeson/binance_stuff", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2074, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "apirequest.generate_hourly_moving_average_between_dates", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 77, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "27927717677", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom utils import algorithms, datasets_maps, dicts, survey\nfrom matplotlib.font_manager import _rebuild\n_rebuild() \nconfig = {\n \"font.family\":'serif',\n \"mathtext.fontset\":'stix',\n \"font.serif\": ['SimHei'],\n}\nplt.rcParams.update(config)\nbase_size = 12\n\nstep_maps = {\n 'Collection': '收集',\n 'Aggregation': '聚集',\n 'Messaging': '消息传递',\n 'Updating': '向量更新'\n}\n\ndef survey(labels, data, category_names, ax=None, color_dark2=False): # stages, layers, steps,算子可以通用\n for i, c in enumerate(category_names):\n if c[0] == '_':\n category_names[i] = c[1:]\n\n data_cum = data.cumsum(axis=1)\n if color_dark2:\n category_colors = plt.get_cmap('Dark2')(\n np.linspace(0.15, 0.85, data.shape[1]))\n else:\n category_colors = plt.get_cmap('RdYlGn')(\n np.linspace(0.15, 0.85, data.shape[1])) \n \n if ax is None:\n fig, ax = plt.subplots(figsize=(7/1.5, 4.5/1.5), tight_layout=True)\n else:\n fig = None\n ax.invert_yaxis()\n ax.set_xlim(0, np.sum(data, axis=1).max())\n\n for i, (colname, color) in enumerate(zip(category_names, category_colors)):\n widths = data[:, i]\n starts = data_cum[:, i] - widths\n ax.barh(labels, widths, left=starts, height=0.5,\n label=step_maps[colname], color=color)\n xcenters = starts + widths / 2\n\n r, g, b, _ = color\n text_color = 'white' if r * g * b < 0.5 else 'darkgrey'\n for y, (x, c) in enumerate(zip(xcenters, widths)):\n ax.text(x, y, '%.1f' % c, ha='center', va='center',\n color=text_color, fontsize=base_size-4)\n ax.legend(ncol=4, bbox_to_anchor=(-0.1, 1),\n loc='lower left', fontsize=base_size-4)\n\n return fig, ax\n\ndef pic_others_propogation(label, file_name, file_type, dir_out=\"exp3_thesis_figs/time\", dir_work=\"paper_exp2_time_break\"):\n algs = [\"gcn\", \"ggnn\", \"gat\", \"gaan\"]\n columns = dicts[label]\n for alg in algs:\n file_path = dir_work + \"/config_exp/\" + label + \"/\" + alg + \".csv\"\n df = pd.read_csv(file_path, index_col=0)\n print(df)\n data = 100 * df.values / df.values.sum(axis=0)\n fig, ax = survey([datasets_maps[i] for i in df.columns], data.T, columns, color_dark2=True)\n ax.set_title(algorithms[alg], loc=\"right\", fontsize=base_size+2)\n ax.set_xlabel(\"比例 (%)\", fontsize=base_size+2)\n ax.set_ylabel(\"数据集\", fontsize=base_size+2)\n plt.xticks(fontsize=base_size)\n plt.yticks(fontsize=base_size)\n plt.tight_layout()\n fig.savefig(dir_out + \"/\"+ file_name + alg + \".\" + file_type, dpi=400) \n \n\ndef pic_inference_others_propogation(label, file_name, file_type, dir_out=\"exp3_thesis_figs/time\", dir_work=\"paper_exp5_inference_full\"):\n algs = [\"gcn\", \"ggnn\", \"gat\", \"gaan\"]\n columns = dicts[label]\n for alg in ['gcn']:\n file_path = dir_work + \"/config_exp/\" + label + \"/\" + alg + \".csv\"\n df = pd.read_csv(file_path, index_col=0)\n print(df)\n data = 100 * df.values / df.values.sum(axis=0)\n fig, ax = survey([datasets_maps[i] for i in df.columns], data.T, columns, color_dark2=True)\n ax.set_title(algorithms[alg], loc=\"right\", fontsize=base_size+2)\n ax.set_xlabel(\"比例 (%)\", fontsize=base_size+2)\n ax.set_ylabel(\"数据集\", fontsize=base_size+2)\n plt.xticks(fontsize=base_size)\n plt.yticks(fontsize=base_size)\n plt.tight_layout()\n fig.savefig(dir_out + \"/\"+ file_name + alg + \".\" + file_type, dpi=400) \n\n\npic_others_propogation('edge_cal', 'exp_edge_calc_decomposition_', \"png\")\npic_inference_others_propogation('edge_cal', 'exp_inference_full_edge_calc_decomposition_', \"png\")\n", "repo_name": "AugF/pyg-analysis", "sub_path": "pics_exp2_edge_cal_proportion_thesis.py", "file_name": "pics_exp2_edge_cal_proportion_thesis.py", "file_ext": "py", "file_size_in_byte": 3837, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "matplotlib.font_manager._rebuild", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.dicts", "line_number": 61, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.survey", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.datasets_maps", "line_number": 67, "usage_type": "name"}, {"api_name": "utils.algorithms", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "utils.dicts", "line_number": 79, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 82, "usage_type": "call"}, {"api_name": "utils.survey", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.datasets_maps", "line_number": 85, "usage_type": "name"}, {"api_name": "utils.algorithms", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "39288459235", "text": "from flask import Flask, request, jsonify, render_template\nimport llm\nimport datetime\nimport json\nimport os\n\napp = Flask(__name__)\n\nlog_folder = './stored'\n\ndef create_log_file():\n \n # 디렉토리가 존재하지 않으면 생성\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n \n now = datetime.datetime.now()\n filename = now.strftime('%Y-%m-%d_%H-%M-%S') + '.json'\n filepath = os.path.join(log_folder, filename)\n\n # 빈 딕셔너리 생성\n empty_dict = []\n\n # 빈 JSON 파일 생성\n with open(filepath, 'w') as f:\n json.dump(empty_dict, f)\n\n return filename\n\ndef store_question_answer(question, answer, filename):\n # 저장할 데이터 생성\n now = datetime.datetime.now()\n data = {\n 'time': now.strftime('%Y-%m-%d %H:%M:%S'),\n 'question': question,\n 'answer': answer\n }\n\n filepath = os.path.join(log_folder, filename)\n\n if os.path.exists(filepath):\n with open(filepath, 'r') as f:\n loaded_data = json.load(f)\n else:\n loaded_data = []\n \n # 새로운 질문과 답변을 추가\n loaded_data.append(data)\n\n # JSON 파일로 저장\n with open(filepath, 'w') as f:\n json.dump(loaded_data, f, ensure_ascii=False, indent=4)\n\n@app.route('/')\ndef home():\n filename = create_log_file()\n return render_template('index.html', value=filename) # templates 폴더에서 index.html 파일을 찾습니다.\n\n@app.route('/get_answer', methods=['POST'])\ndef get_answer():\n data = request.get_json()\n question = data['question']\n filename = data['logfile']\n\n # print(question)\n # print(filename)\n\n # 답변 생성 코드 \n answer = llm.gptanswer(question) # generate_answer는 답변을 생성하는 함수\n # 로그작성\n store_question_answer(question, answer, filename)\n\n return jsonify({'answer': answer})\n", "repo_name": "alphalef/ai-education", "sub_path": "backup/app2.py", "file_name": "app2.py", "file_ext": "py", "file_size_in_byte": 1889, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 43, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "llm.gptanswer", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "32118995820", "text": "import json\nimport os\nimport pandas as pd\nimport csv\nfrom neo4j import GraphDatabase\nfrom collections import OrderedDict\n\nclass Neo4jDB:\n URI = \"\"\n AUTH = \"\"\n driver = \"\"\n session = \"\"\n\n def __init__(self,URI,AUTH,db):\n self.URI = URI\n self.AUTH = AUTH\n self.driver = GraphDatabase.driver(URI, auth=AUTH)\n self.session = self.driver.session(database=db)\n \n #This method will check if node is present or not. \n #There are 4 types of nodes, it will return the no.of nodes of pesent in db. \n #If no node is there it will return zero.\n def check_if_node_present(self,tx,node):\n if node[\"type\"] == \"Car\":\n result = tx.run(\"Match (n:Car) where n.Name=$name return n\",name=node[\"Name\"])\n return len([ele[\"n\"] for ele in result])\n \n elif node[\"type\"] == \"Model\":\n result = tx.run(\"Match (n:Model) where n.Name=$name return n\",name=node[\"Name\"])\n return len([ele[\"n\"] for ele in result])\n \n elif node[\"type\"] == \"Possible_Problem\":\n result = tx.run(\"Match (n:Possible_Problem) where n.name=$name return n\",name=node[\"name\"])\n return len([ele[\"n\"] for ele in result])\n else:\n result = tx.run(\"Match (n:Problem) where n.desc=$name return n\",name=node[\"desc\"])\n return len([ele[\"n\"] for ele in result])\n \n #This method will check if a relationship is present between two nodes. \n def check_if_relationship_present(self,tx,data):\n #if relation is of type Leads_to then the relationship will have weight and name as attribute.\n if data[1][\"type\"] == \"Leads_to\":\n res = tx.run(\"Match (n) where id(n)=$node1 Match (m) where id(m)=$node2 Match p = (n)-[r:\"+str(data[1][\"type\"])+\"]->(m) where r.relation=$rel AND r.weight=$weight return p\",node1=data[0],node2=data[2],rel=data[1][\"name\"],weight=data[1][\"weight\"])\n ans = [ele[\"p\"] for ele in res]\n return len(ans)\n #other wise, the relation will only have name as attribute\n else:\n res = tx.run(\"Match (n) where id(n)=$node1 Match (m) where id(m)=$node2 Match p = (n)-[r:\"+str(data[1][\"type\"])+\"]->(m) where r.relation=$rel return p\",node1=data[0],node2=data[2],rel=data[1][\"name\"])\n ans = [ele[\"p\"] for ele in res]\n return len(ans)\n \n #This method will check if there is any model has a relationship with any question and relationshp named being logictree name.\n #Returns 0 if logictree exist \n #Returns 1 if logictree do not exist \n #Returns 2 if car_make or model do not exist.\n def isLogicTreePresent(self,tx,data):\n car = data[\"Car\"].capitalize()\n model = data[\"Model\"].capitalize()\n tree = data[\"tree\"].capitalize()\n \n \n if len([x[\"id\"] for x in tx.run(\"Match (n:Car) where n.Name=$car return id(n) as id\",car=car)]) != 0:\n if len([ele[\"id\"] for ele in tx.run(\"Match (n:Car) where n.Name=$car Match (m:Model) where m.Name=$model Match (n)-[r:Model]->(m) return id(m) as id\",car=car,model=model)]) != 0:\n ans = tx.run(\"Match (n:Model) where n.Name=$name Match (n)-[r]->(m) where r.relation=$relation return id(m) as id,r.relation as rel\",name=model,relation=tree)\n temp = [e[\"id\"] for e in ans]\n if len(temp) != 0:\n return 0\n else:\n return 1\n else:\n return 2\n else:\n return 2\n \n\n #The method will return the id of a node.\n #Node being Car,Model,Problem,Possible_Problem\n def getNodeId(self,tx,node):\n #print(node)\n if node[\"type\"] == \"Car\":\n res = tx.run('Match (n:Car) where n.Name=$name return id(n) as id',name=node[\"Name\"])\n return [ele[\"id\"] for ele in res]\n if node[\"type\"] == \"Model\":\n res = tx.run('Match (n:Model) where n.Name=$name return id(n) as id',name=node[\"Name\"])\n return [ele[\"id\"] for ele in res]\n if node[\"type\"] == \"Problem\":\n res = tx.run('Match (n:Problem) where n.desc=$desc return id(n) as id',desc=node[\"desc\"])\n return [ele[\"id\"] for ele in res]\n if node[\"type\"] == \"Possible_Problem\":\n res = tx.run('Match (n:Possible_Problem) where n.name=$name return id(n) as id',name=node[\"name\"])\n return [ele[\"id\"] for ele in res]\n else:\n return None\n \n #This method will traverse the whole tree using depth first search.\n #Although this method is in use due to time complexity issue.\n #It will return all list of all node ids present in logic tree.\n #Logic tree basically kind of graph start with car, -> model -> question and then possible_problems. \n def traverse(self,tx,node,l=[]): \n l.append(node)\n result = tx.run(\"Match (n) where id(n)=$ids Match (n)-[r]->(m) return id(m) as id,r.relation as rel\",ids=node)\n nodes = [[res[\"id\"],res[\"rel\"]] for res in result]\n if len(nodes) == 0:\n #print(\"exit\")\n return \n #print(nodes)\n for n in nodes:\n if n not in l:\n l.append(n[0])\n self.traverse(tx,n[0])\n \n return l \n\n def traverse_(self,node):\n res = self.session.execute_read(self.traverse,node)\n print(set(res))\n \n #The add_nodes function will add a node in database. \n #Returns the node id.\n def add_nodes(self,tx,node):\n if node[\"type\"] == \"Car\":\n result = tx.run(\"Create (n:Car) SET n.Name=$name SET n.type=$type_ Return id(n) as id \",name=node[\"Name\"],type_=node[\"type\"])\n #print(\"Car Created\")\n return result.single()[0]\n \n elif node[\"type\"] == \"Model\":\n result = tx.run(\"Create (n:Model) SET n.Name=$name SET n.type=$type_ Return id(n) as id \",name=node[\"Name\"],type_=node[\"type\"])\n #print(\"Model Created\")\n return result.single()[0]\n \n elif node[\"type\"] == \"Possible_Problem\":\n result = tx.run(\"Create (n:Possible_Problem) set n.name=$name SET n.type=$type_ Return id(n) as id \",name=node[\"name\"],type_=node[\"type\"])\n #print(\"PP Created\")\n return result.single()[0]\n else:\n result = tx.run(\"Create (n:Problem) set n.desc=$desc SET n.type=$type_ Return id(n) as id \",desc=node[\"desc\"],type_=node[\"type\"])\n #print(\"Problem Created\")\n return result.single()[0]\n \n #This method take two nodes, and a relation as input and creates a relationship between them.\n #node1 -> Node no.1 (eg.Car,Model,Problem)\n #node2 -> Relationship (eg. model,labels(yes,no,...),leads_to)\n #node3 -> Node no.2 (eg.Model,Problem,Possible_Problem)\n def add_relationship(self,tx,data):\n print(data)\n node1 = data[0]\n node2 = data[1]\n node3 = data[2]\n rel_type = node2[\"type\"]\n relation = node2[\"name\"] \n \n \n #if weight is in node2, then node3 or Node no.2 is Possible_Problem and Node no.1 is Problem.\n #While creating new relationship between problem and possible_problems, an addition property of relation i.e weight is added along with name.\n if \"weight\" in node2.keys():\n weight = node2[\"weight\"]\n result = tx.run(\"Match (n) where id(n) = $node1 Match (m) where id(m) = $node3 Create (n)-[r:\"+str(rel_type)+\"]->(m) SET r.relation=$relation SET r.weight=$weight Return 'Relation Created'\",node1=node1,node3=node3,relation=relation,weight=weight) \n \n return result.single()[0]\n \n #while creating other relationship, they all have same property i.e (name).\n else:\n result = tx.run(\"Match (n) where id(n) = $node1 Match (m) where id(m) = $node3 Create (n)-[r:\"+str(rel_type)+\"]->(m) SET r.relation=$relation Return 'Relation Created'\",node1=node1,node3=node3,relation=relation)\n \n return result.single()[0]\n\n\n #This method will take node no.1 and node no.2 and relation as input.\n #First it will check if each node is present or not. \n #If not it will create the node and get their ids.\n #If present then it will fetch there ids.\n #Then it will check if they have a relationship, if not it will be created otherwise nothing will happen.\n #For eg. Node no.1 (type:Car, Name:Honda) Node no.2 (type:Model,Name:City) Relationship (type:Problem,name:logic tree name)\n \n def add_logic_tree(self,data,Model):\n node_set = []\n res = self.session.execute_read(self.isLogicTreePresent,Model)\n #res == 2, means node 1 and node 2 do not exist in databse.\n if res == 2:\n node1,node2 = None,None\n for i in data:\n #print(i)\n node1 = self.CreateNode(i[0],node_set)\n node_set.append(node1)\n node2 = self.CreateNode(i[2],node_set)\n node_set.append(node2)\n if self.session.execute_read(self.check_if_relationship_present,[node1,i[1],node2]) == 0:\n res = self.session.execute_write(self.add_relationship,[node1,i[1],node2])\n else:\n print(\"Relation already present \",[node1,i[1],node2])\n #print(res)\n print(\"Tree Created\")\n return \"Tree Created Successfully\"\n #res ==1 means car and model exist but node logic tree is present.\n elif res == 1:\n \n model = {}\n model[\"type\"] = \"Model\"\n model[\"Name\"] = Model[\"Model\"]\n \n node = self.session.execute_read(self.getNodeId,model)\n #node_set = self.session.execute_read(self.traverse,node[0])\n #if node_set is None:\n node_set = []\n \n #print(\"set:\",node_set)\n node1,node2 = None,None\n for i in data:\n #print(i)\n node1 = self.CreateNode(i[0],node_set)\n node_set.append(node1)\n node2 = self.CreateNode(i[2],node_set)\n node_set.append(node2)\n if self.session.execute_read(self.check_if_relationship_present,[node1,i[1],node2]) == 0:\n res = self.session.execute_write(self.add_relationship,[node1,i[1],node2])\n else:\n print(\"Relation already present \",[node1,i[1],node2])\n print(\"Tree Created\")\n return \"Tree Created Successfully\" \n else:\n print(\"Already Exist\")\n return \"Logic Tree Already Exist.\"\n\n #This method will create the nodes if they don't exist.\n #Now there may be case if for different logic tree same Problem Node i.e(Question) would be there. \n #This method will check if node type is (Car,Model or Possible_Problem) then no duplicates node will be created and existing node id will be returned.\n #But if node type is Problem then a duplicate node will be created.(It will reduce the complexity of graph and avoid formation of cyclic graph structure.)\n \n def CreateNode(self,node,node_set):\n node1 = None\n #print(node)\n if self.session.execute_read(self.check_if_node_present,node) == 0:\n node1 = self.session.execute_write(self.add_nodes,node)\n return node1\n \n ele = self.session.execute_read(self.getNodeId,node)\n if node[\"type\"] == \"Problem\":\n for e in ele:\n if e in node_set:\n return e\n \n node1 = self.session.execute_write(self.add_nodes,node)\n return node1\n \n \n return ele[0] \n \n \n \n #This method will take a dataframe as input and read each rows and creates relationship between them.\n def CreateLogicTree(self,df,data):\n\n relations = []\n nodes = []\n car = []\n problem = []\n pp = []\n model = []\n all_pp = []\n for index, row in df.iterrows():\n node1 = row[\"Node1\"].split(\":\",1)[0]\n name1 = row[\"Node1\"].split(\":\",1)[1].strip().capitalize()\n node2 = row[\"Node2\"].split(\":\",1)[0]\n name2 = row[\"Node2\"].split(\":\",1)[1].strip().capitalize()\n \n \n if node1 == \"Car\" and node2 == \"Model\":\n n1 = {}\n n2 = {}\n r = {}\n n1[\"type\"] = node1\n n1[\"Name\"] = name1\n n2[\"type\"] = node2 \n n2[\"Name\"] = name2\n r[\"type\"] = \"Model\"\n r[\"name\"] = row['Relationship'].capitalize()\n relations.append([n1,r,n2]) \n \n if node1 == \"Model\" and node2 == \"Problem\":\n n1 = {}\n n2 = {}\n r = {}\n n1[\"type\"] = node1\n n1[\"Name\"] = name1 \n n2[\"type\"] = node2 \n n2[\"desc\"] = name2 \n r[\"type\"] = \"Problem\"\n r[\"name\"] = row['Relationship'].capitalize()\n relations.append([n1,r,n2]) \n \n if node1 == \"Car\" and node2 == \"Problem\":\n n1 = {}\n n2 = {}\n r = {}\n n1[\"type\"] = node1\n n1[\"Name\"] = name1 \n n2[\"type\"] = node2 \n n2[\"desc\"] = name2 \n r[\"type\"] = \"Problem\"\n r[\"name\"] = row['Relationship'].capitalize()\n relations.append([n1,r,n2])\n \n if node1 == \"Problem\" and node2 == \"Problem\":\n n1 = {}\n n2 = {}\n r = {}\n n1[\"type\"] = node1\n n1[\"desc\"] = name1.replace(\"\\n\",\" \")\n n2[\"type\"] = node2 \n n2[\"desc\"] = name2.replace(\"\\n\",\" \")\n r[\"type\"] = \"Problem\"\n r[\"name\"] = row['Relationship'].capitalize()\n relations.append([n1,r,n2]) \n \n \n\n\n if node1 == \"Problem\" and node2 == \"Possible_Problem\":\n arr = name2.split('\\n')\n for e in range(len(arr)):\n if \"%\" not in arr[e]:\n arr[e] = \"% \"+arr[e]\n for e in arr:\n n1 = {}\n n2 = {}\n r = {}\n n1[\"type\"] = node1\n n1[\"desc\"] = name1.replace(\"\\n\",\"\")\n n2[\"type\"] = node2 \n n2[\"name\"] = e.split(\"%\")[1].strip().capitalize()\n r[\"type\"] = \"Leads_to\"\n r[\"weight\"] = str(e.split(\"%\")[0])\n r[\"name\"] = row['Relationship']\n relations.append([n1,r,n2]) \n\n if node1 == \"Model\" and node2 == \"Possible_Problem\":\n arr = name2.split('\\n')\n for e in range(len(arr)):\n if \"%\" not in arr[e]:\n arr[e] = \"% \"+arr[e]\n for e in arr:\n n1 = {}\n n2 = {}\n r = {}\n n1[\"type\"] = node1\n n1[\"Name\"] = name1 \n n2[\"type\"] = node2 \n n2[\"name\"] = e.split(\"%\")[1].strip().capitalize()\n r[\"type\"] = \"Leads_to\"\n r[\"weight\"] = str(e.split(\"%\")[0])\n r[\"name\"] = row['Relationship']\n relations.append([n1,r,n2]) \n \n \n \n \n if node1 == \"Car\" and name1 not in car:\n ele = {}\n ele[\"type\"] = node1\n ele[\"Name\"] = name1\n nodes.append(ele)\n car.append(name1)\n \n if node1 == \"Model\" and name1 not in model:\n ele = {}\n ele[\"type\"] = node1\n ele[\"Name\"] = name1\n nodes.append(ele)\n model.append(name1)\n \n if node1 == \"Possible_Problem\":\n arr = name1.split('\\n')\n for e in range(len(arr)):\n if \"%\" not in arr[e]:\n arr[e] = \"% \"+arr[e]\n for e in arr:\n if e.split(\"%\")[1].strip().capitalize() not in pp:\n ele = {}\n ele[\"type\"] = node1\n ele[\"name\"] = e.split(\"%\")[1].strip().capitalize()\n nodes.append(ele)\n pp.append(e.split(\"%\")[1].strip().capitalize())\n \n \n if node1 == \"Problem\" and name1 not in problem:\n ele = {}\n ele[\"type\"] = node1\n ele[\"desc\"] = name1.replace(\"\\n\",\"\")\n nodes.append(ele)\n problem.append(name1)\n \n if node2 == \"Car\" and name2 not in car:\n ele = {}\n ele[\"type\"] = node2\n ele[\"Name\"] = name2\n nodes.append(ele)\n car.append(name1)\n if node2 == \"Model\" and name2 not in model:\n ele = {}\n ele[\"type\"] = node2\n ele[\"Name\"] = name2\n nodes.append(ele)\n model.append(name2)\n \n \n if node2 == \"Possible_Problem\":\n arr = name2.split('\\n')\n for e in range(len(arr)):\n if \"%\" not in arr[e]:\n arr[e] = \"% \"+arr[e]\n for e in arr:\n all_pp.append(e.split(\"%\")[1].strip().capitalize())\n if e.split(\"%\")[1].strip().capitalize() not in pp:\n ele = {}\n ele[\"type\"] = node2\n ele[\"name\"] = e.split(\"%\")[1].strip().capitalize()\n nodes.append(ele)\n pp.append(e.split(\"%\")[1].strip().capitalize())\n \n if node2 == \"Problem\" and name2 not in problem:\n ele = {}\n ele[\"type\"] = node2\n ele[\"desc\"] = name2.replace(\"\\n\",\"\")\n nodes.append(ele)\n problem.append(name2)\n\n\n\n res_ = self.add_logic_tree(relations,{\"Car\":data[0],\"Model\":data[1],\"tree\":data[2]})\n \n print(\"Done\")\n return res_\n \n #---------------------------------------\n #Purpose:\n #This method will take model name and logic tree name as input and return the first question.\n \n #Why,What it does\n #Basically the Graph consist of Nodes and Edges. And Graph Starts from Car Node -> Model Node \n #The Model Node have multiple edges (which represnt logic tree names)\n #Now the Model Node with logic tree named edge will give us a Question Node in response.\n \n #Attributes:\n #tx.run() => returns a list which matches query.\n #List has elements => id , Relation_type( eg. problem), Node_type(eg.Problem), Node Description(eg.Questions to be ask)\n #In return of the function it will return a dictionary containing id,question.\n \n #Query Explaination:\n #Match (n:Model) where n.Name=$model => it gets the model node and store it in Variable 'n'.\n #Match (n)-[r]->(m) where r.relation=$rel return id(m) as id,m.type as node_type,m.desc as node_desc,r.relation as rel\n #The above query will map Node 'n' with relation 'r' and find next node 'm' and returns 'm' node details as m.id,m.desc ... .\n \n def getLogicTree_(self,tx,data):\n res = tx.run(\"Match (n:Model) where n.Name=$model Match (n)-[r]->(m) where r.relation=$rel return id(m) as id,m.type as node_type,m.desc as node_desc,r.relation as rel\",model=data[\"Model\"],rel=data[\"tree\"]) \n ans = [[ele[\"id\"],ele[\"rel\"],ele[\"node_type\"],ele[\"node_desc\"]] for ele in res][0]\n return {\"id\":ans[0],\"Question\":ans[3]}\n \n def getLogicTree(self,data):\n '''\n data = {}\n data[\"Car\"] = \"Dodge\"\n data[\"Model\"] = \"Grand caravan\"\n data[\"tree\"] = \"No heat from heater\"\n '''\n res = self.session.execute_read(self.isLogicTreePresent,data)\n if res == 0:\n res = self.session.execute_read(self.getLogicTree_,data)\n qid = res[\"id\"]\n res_ = self.getPossibleCause(qid)\n res[\"Possible_Problem\"] = res_\n return res\n else:\n return {\"message\":\"No Logic Tree Present\"}\n #----------------------------------------\n \n #----------------------------------------\n #This method will get all the possible causes for a question id.\n \n #Query Explaination\n #Match (n) where id(n)=$nodeId => this query will get Node and store it in 'n'.\n #Match (n)-[r]->(m) where r.relation=$pp return id(m) as id,m.type as node_type ...\n #The above query will map node 'n' and r with relation name \"Possible_Problem\" and would return list of all nodes 'm'.\n #The 'm' node have => possible_problems node details i.e id,name,weight\n def getPossibleCause_(self,tx,node_id):\n res = tx.run(\"Match (n) where id(n)=$nodeId Match (n)-[r]->(m) where r.relation=$pp return id(m) as id,m.type as node_type,m.name as node_pp,r.relation as rel,r.weight as weight\",nodeId=int(node_id),pp=\"Possible_Problem\")\n arr = [{\"id\":ele[\"id\"],\"name\":ele[\"node_pp\"],\"weight\":ele[\"weight\"]} for ele in res]\n ans = {}\n for i in arr:\n ans[i[\"name\"]] = int(i[\"weight\"])\n sorted_ele = OrderedDict(sorted(list(ans.items()), key = lambda x: x[1],reverse=True))\n print(sorted_ele)\n return sorted_ele\n \n \n def getPossibleCause(self,nodeid):\n res = self.session.execute_read(self.getPossibleCause_,nodeid) \n return res \n \n #----------------------------------------\n #this method will get next leading nodes for a nodes id.\n #The leading node can be a Question/Problem Node and Possible_Problem Node.\n #getNextProblem_ function will return list all possible result nodes.\n #The getNextProblem calls the getNextProblem_ and filtered out the possible_problems nodes and problem/Question nodes.\n #\n #Query Explaination:\n #Match (n) where id(n)=$nodeId => searchs the node and store it in 'n'.\n #Match (n)-[r]->(m) return id(m) as id,m.type as node_type,m.desc as node_desc,m.name as node_pp,r.relation as rel,r.weight as weight\n #The above query will return list of object such as :-\n #Result=> list of [{id:\"\",node_type:\"\",node_desc:\"\",node_pp:\"\",rel:\"\",weight:\"\"},...]\n #If Node type is Problem then result object will be {id:101,node_type:Problem,node_desc:Question...,node_pp:None/Null,rel:Problem,weight:None}\n #for Problem Node, node_pp,weight will be null.\n #for Possible_Problem Node, node_desc will be Null.\n #Possible_Problem nodes i.e => Possible_Causes or final causes.\n #eg. {id:101,node_type:Possible_Problem,node_desc:None/Null,node_pp:possible_problems/high/low/,rel:possible_problems,weight:20}\n \n def getNextProblem_(self,tx,data):\n node_id = data[0]\n res = tx.run(\"Match (n) where id(n)=$nodeId Match (n)-[r]->(m) return id(m) as id,m.type as node_type,m.desc as node_desc,m.name as node_pp,r.relation as rel,r.weight as weight\",nodeId=node_id)\n return [[ele[\"id\"],ele[\"rel\"],ele[\"node_type\"],ele[\"node_desc\"],ele[\"node_pp\"],ele[\"weight\"]] for ele in res]\n #this method will call the getNextProblem_ methods and restructurize the output and returns the result.\n def getNextProblem(self,nodeid,rel):\n res = self.session.execute_read(self.getNextProblem_,[int(nodeid)])\n arr = []\n for ele in res:\n e = {}\n if ele[2] == \"Possible_Problem\" and ele[3] is None:\n e[\"id\"] = ele[0]\n e[\"Type\"] = \"Possible_Problem\"\n e[\"Node\"] = ele[4]\n e[\"Relation\"] = ele[1]\n e[\"weight\"] = ele[5]\n else:\n e[\"id\"] = ele[0]\n e[\"Type\"] = \"Problem\"\n e[\"Node\"] = ele[3]\n e[\"Relation\"] = ele[1]\n arr.append(e)\n ans = {} \n \n for ele in arr:\n \n if ele[\"Relation\"] != \"Possible_Problem\": \n if ele[\"Relation\"] in ans.keys():\n e = {}\n if ele[\"Type\"] == \"Possible_Problem\":\n e[\"weight\"] = ele[\"weight\"]\n e[\"Node\"] = ele[\"Node\"]\n e[\"id\"] = ele[\"id\"]\n e[\"Type\"] = ele[\"Type\"]\n ans[ele[\"Relation\"]].append(e)\n else:\n ans[ele[\"Relation\"]] = []\n e = {}\n if ele[\"Type\"] == \"Possible_Problem\":\n e[\"weight\"] = ele[\"weight\"]\n e[\"Node\"] = ele[\"Node\"]\n e[\"id\"] = ele[\"id\"]\n e[\"Type\"] = ele[\"Type\"]\n ans[ele[\"Relation\"]].append(e)\n \n print(ans)\n for k,v in ans.items():\n \n ele = {}\n \n for e in v:\n \n #pp\n if e[\"Type\"] == \"Possible_Problem\":\n #print(ele,\"Possible_Problem\" in ele.keys())\n if \"Possible_Problem\" in ele.keys():\n ele[e[\"Type\"]].append({\"id\":e[\"id\"],\"Name\":e[\"Node\"],\"Weigth\":e[\"weight\"]})\n else:\n ele[e[\"Type\"]] = [{\"id\":e[\"id\"],\"Name\":e[\"Node\"],\"Weigth\":e[\"weight\"]}]\n #p\n if e[\"Type\"] == \"Problem\":\n if \"Problem\" in ele.keys():\n ele[e[\"Type\"]].append({\"id\":e[\"id\"],\"Name\":e[\"Node\"]})\n else:\n ele[e[\"Type\"]] = [{\"id\":e[\"id\"],\"Name\":e[\"Node\"]}]\n #print(ele)\n #print(\"---------\")\n ans[k] = ele\n \n #print(ans)\n for k,v in ans.items():\n for key,value in v.items():\n ele = {}\n if key == \"Possible_Problem\":\n for e in value:\n ele[e[\"Name\"]] = int(e[\"Weigth\"])\n sorted_ele = dict(sorted(list(ele.items()), key = lambda x: x[1]))\n print(\"-##--\")\n print(sorted_ele)\n print(\"-----\")\n v[key] = sorted_ele\n \n \n return ans\n \n #This method will give all the logic tree name associated with a model. \n def getLogicTrees_(self,tx,model):\n print(model)\n res = tx.run(\"Match (n:Model) where n.Name=$model Match (n)-[r]->() return r.relation as rel\",model=model)\n ans = [ele[\"rel\"] for ele in res]\n return ans\n \n def getLogicTrees(self,model):\n res = self.session.execute_read(self.getLogicTrees_,model)\n return res\n #This method will return all the Cars present in db. \n def get_all_car_make(self,tx):\n res = tx.run(\"Match (n:Car) return n.Name as Name\")\n ans = [ele[\"Name\"] for ele in res]\n return ans\n def Cars(self):\n res = self.session.execute_read(self.get_all_car_make)\n return res \n #This method will give all the models associated with a Car Make. \n def get_models_for_carMake(self,tx,make):\n res = tx.run(\"Match (n:Car) where n.Name=$make Match (n)-[:Model]->(m) return m.Name as Name\",make=make)\n ans = [ele[\"Name\"] for ele in res]\n return ans\n \n def getModel(self,make):\n res = self.session.execute_read(self.get_models_for_carMake,make)\n return res\n \n \n\n\n\n\n \n ", "repo_name": "Shreyash2704/AmberfluxNeo4j", "sub_path": "Neo4jDB.py", "file_name": "Neo4jDB.py", "file_ext": "py", "file_size_in_byte": 27959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "neo4j.GraphDatabase.driver", "line_number": 17, "usage_type": "call"}, {"api_name": "neo4j.GraphDatabase", "line_number": 17, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 490, "usage_type": "call"}]} +{"seq_id": "2670172492", "text": "import cv2\nimport pywt\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport os\nfrom pywt._doc_utils import wavedec2_keys, draw_2d_wp_basis\n\ndir = os.path.dirname(__file__)\nfoldername = os.path.join(dir, 'Images')\nos.chdir(foldername)\n\nimage = cv2.imread('BPF.jpg')\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nprint(gray_image.shape)\ncv2.imwrite('gray_image.png',gray_image)\nx=np.asarray(Image.open(\"gray_image.png\").convert(\"L\"))\n\nshape = x.shape\nmax_lev = 3 # how many levels of decomposition to draw\nlabel_levels = 3 # how many levels to explicitly label on the plots\n\nfor level in range(1, max_lev + 1):\n # compute the 2D DWT\n c = pywt.wavedec2(x, 'db2', mode='sym', level=level)\n # normalize each coefficient array independently for better visibility\n c[0] /= np.abs(c[0]).max()\n for detail_level in range(level):\n c[detail_level + 1] = [d/np.abs(d).max() for d in c[detail_level + 1]]\n\n plt.imshow(c[0], cmap=plt.cm.gray)\n plt.title('Coefficients\\n({} level)'.format(level))\n plt.show()\n\n print(c[0].shape)\n wp = pywt.WaveletPacket2D(data=c[0], wavelet='db1', mode='sym')\n limith = 0\n limitv = 0\n limitd = 0\n limitf = 25\n\n zh=wp['h'].data\n zh[zh<limith]=0.0\n print(zh.shape)\n plt.title(\"Horizontal\")\n plt.imshow(zh,plt.cm.gray) # plot horizontal decomposition of image\n plt.show()\n\n zv=wp['v'].data\n zv[zv<limitv]=0.0\n plt.title(\"Vertical\")\n plt.imshow(zv,plt.cm.gray) # plot vertical decomposition of image\n plt.show()\n\n zd=wp['d'].data\n zd[zd<limitd]=0.0\n plt.title(\"Diagonal\")\n plt.imshow(zd,plt.cm.gray) # plot diagonal decomposition of image\n plt.show()\n\n plt.gray()\n zf = zh + zv + zd\n zf[zf<limitf] = 0.0\n print(zf.shape)\n res = cv2.resize (zf, dsize= (418, 240), interpolation=cv2.INTER_CUBIC)\n plt.title(\"ORed Image\")\n plt.imshow(res) # plot final decomposition of image\n plt.show()", "repo_name": "aditiawas/Defencing-Inpainting", "sub_path": "Extra trials/trial.py", "file_name": "trial.py", "file_ext": "py", "file_size_in_byte": 1873, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "pywt.wavedec2", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "pywt.WaveletPacket2D", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 46, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 52, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "314402131", "text": "from django.db import models\nfrom django.utils.timezone import datetime\nfrom django.contrib.auth.models import User\n# Create your models here.\nclass ReferenceBook(models.Model):\n\n # article_manager = models.ForeignKey(User, on_delete=models.CASCADE, default=None)\n article_title = models.CharField(max_length=500)\n topicType = (\n ('Epiplex', 'Epiplex'),\n ('UiPath', 'UiPath'),\n ('Python','Python'),\n ('C#.net','C#.net'),\n ('HTML','HTML'),\n ('Bootstrap', 'Bootstrap'),\n ('Javascript', 'Javascript'),\n )\n article_topic_type = models.CharField(max_length=100, choices=topicType, default='Epiplex')\n article_content = models.TextField(editable=True)\n\n updated = (\n ('admin','Arun Kesavan'),\n ('user','Others'),\n )\n article_updated_by = models.CharField(max_length=500, choices=updated, default='admin')\n last_updated_date = models.DateTimeField(auto_now_add=True)\n likes = models.IntegerField(default=0)\n dislikes = models.IntegerField(default=0)\n\n def __str__(self):\n return self.article_title", "repo_name": "kottilukkalarun/CodeGenLib", "sub_path": "CodeLibrary/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1100, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "29293947154", "text": "import sys\nimport os\nimport random\nfrom pathlib import Path\nsys.path.append(f\"{Path.home()}/usd_scene_construction_utils\") # use your install path\nsys.path.append(f\"{Path.home()}/usd_scene_construction_utils/examples/pallet_with_boxes\") # use your install path\n\nfrom usd_scene_construction_utils import *\n\nPALLET_URIS = [\n \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Pallets/Wood/Block_A/BlockPallet_A01_PR_NVD_01.usd\",\n \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Pallets/Wood/Block_B/BlockPallet_B01_PR_NVD_01.usd\",\n \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Pallets/Wood/Wing_A/WingPallet_A01_PR_NVD_01.usd\"\n]\n\nCARDBOARD_BOX_URIS = [\n \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Cardboard_Boxes/Cube_A/CubeBox_A02_16cm_PR_NVD_01.usd\",\n \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Cardboard_Boxes/Flat_A/FlatBox_A05_26x26x11cm_PR_NVD_01.usd\",\n \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping/Cardboard_Boxes/Printer_A/PrintersBox_A05_23x28x25cm_PR_NVD_01.usd\"\n]\n\ndef add_pallet(stage, path: str):\n prim = add_usd_ref(stage, path, random.choice(PALLET_URIS))\n add_semantics(prim, \"class\", \"pallet\")\n return prim\n\ndef add_cardboard_box(stage, path: str):\n prim = add_usd_ref(stage, path, random.choice(CARDBOARD_BOX_URIS))\n add_semantics(prim, \"class\", \"box\")\n return prim\n\n\ndef add_pallet_with_box(stage, path: str):\n container = add_xform(stage, path)\n pallet = add_pallet(stage, os.path.join(path, \"pallet\"))\n box = add_cardboard_box(stage, os.path.join(path, \"box\"))\n pallet_bbox = compute_bbox(pallet)\n box_bbox = compute_bbox(box)\n translate(box,(0, 0, pallet_bbox[1][2] - box_bbox[0][2]))\n rotate_z(pallet, random.uniform(-25, 25))\n return container\n\n\ndef add_tree(stage, path: str):\n url = \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Vegetation/Trees/American_Beech.usd\"\n return add_usd_ref(stage, path, url)\n\n\nstage = new_omniverse_stage()\n\nbrick = add_mdl_material(stage, \"/scene/brick\", \"http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base/Masonry/Brick_Pavers.mdl\")\npallet_box = add_pallet_with_box(stage, \"/scene/pallet\")\nfloor = add_plane(stage, \"/scene/floor\", size=(1000, 1000), uv=(20., 20.))\ntree = add_tree(stage, \"/scene/tree\")\ntranslate(tree, (100, -150, 0))\n\nbind_material(floor, brick)\nlight = add_dome_light(stage, \"/scene/dome_light\")", "repo_name": "NVIDIA-Omniverse/usd_scene_construction_utils", "sub_path": "examples/pallet_with_boxes/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pathlib.Path.home", "line_number": 5, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 5, "usage_type": "name"}, {"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pathlib.Path.home", "line_number": 6, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 6, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 23, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "19095898743", "text": "import os\nimport re\nimport sys\nimport time\nimport logging\nimport requests\n\nfrom tqdm import tqdm\n\nif __name__ == \"__main__\":\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(\"id_mapping.log\")\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n id_map = {}\n \n gene_regex = re.compile(r'<gene><name type=\"primary\">(\\w*)</name>.*</gene>')\n pdb_ids = os.listdir(\"/home/wkg/complex_essentiality/complexes\")\n pdb_ids = [item.split(\".\")[0] for item in pdb_ids]\n\n for pdb_id in tqdm(pdb_ids):\n start_time = time.perf_counter()\n req_url = f\"https://www.ebi.ac.uk/proteins/api/proteins/PDB:{pdb_id}?offset=0&size=100\"\n result = requests.get(req_url, headers={\"Accept\": \"application/xml\"})\n if result.ok:\n match = gene_regex.search(result.text)\n if match:\n id_map[pdb_id] = match.group(1)\n else:\n logger.info(f\"no match for: {pdb_id}\")\n else:\n logger.info(f\"error with id: {pdb_id}\")\n\n if time.perf_counter() - start_time < .2:\n time.sleep(.1 - (time.perf_counter() - start_time))\n\n with open(\"pdb_id_map\", \"w\") as out:\n for key in id_map:\n out.write(\",\".join([key, id_map[key]]))\n out.write(\"\\n\")\n", "repo_name": "wigasper/ddi-planarity", "sub_path": "data-aggregation/get_id_map.py", "file_name": "get_id_map.py", "file_ext": "py", "file_size_in_byte": 1431, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 14, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 24, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "13128157945", "text": "from __future__ import annotations\n\nimport json\nfrom copy import deepcopy\nfrom typing import Union\n\nfrom boto3.session import Session\nfrom compose_x_common.aws import get_session\n\n\nclass SsmParameter:\n def __init__(self, config: dict, session: Session = None):\n self._config = deepcopy(config)\n self.name = config[\"name\"]\n self.session = get_session(session)\n\n @property\n def current(self) -> dict:\n return self.session.client(\"ssm\").get_parameter(ParameterName=self.name)\n\n @property\n def current_value(self) -> str:\n return self.current[\"Value\"]\n\n @current_value.setter\n def current_value(self, value: Union[str, dict, list]):\n if not isinstance(value, (str, list, dict)):\n raise TypeError(\n f\"Unsupported type {type(value)}. Expected one of\", (str, list, dict)\n )\n client = self.session.client(\"ssm\")\n if isinstance(value, str):\n client.put_parameter(\n Name=self.name, Value=value, Type=\"String\", Overwrite=True\n )\n elif isinstance(value, (dict, list)):\n client.put_parameter(\n Name=self.name, Value=json.dumps(value), Type=\"String\", Overwrite=True\n )\n", "repo_name": "compose-x/s3-autosync", "sub_path": "aws_s3_files_autosync/ssm_management.py", "file_name": "ssm_management.py", "file_ext": "py", "file_size_in_byte": 1250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "boto3.session.Session", "line_number": 12, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 13, "usage_type": "call"}, {"api_name": "compose_x_common.aws.get_session", "line_number": 15, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 26, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "74216294453", "text": "import psycopg2\r\nimport json\r\nfrom config import config\r\n\r\n\r\ndef playersLoad():\r\n f = open('playerInfo.json')\r\n playerData = json.load(f)\r\n row = 0\r\n\r\n conn = None\r\n try:\r\n # read the connection parameters\r\n params = config()\r\n # connect to the PostgreSQL server\r\n conn = psycopg2.connect(**params)\r\n cur = conn.cursor()\r\n # create table one by one\r\n for row in playerData:\r\n players = playerData[row] \r\n cur.execute(\"INSERT INTO nhldb.players(playerid, namelast, namefirst, primarynumber, birthdate, birthcity, birthcountry, nationality, height, weight, shootscatches, primaryposition) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\", (players['playerId'], players['lastName'], players['firstName'], players['primaryNumber'], players['birthDate'], players['birthCity'], players['birthCountry'], players['nationality'], players['height'], players['weight'], players['shootsCatches'], players['primaryPosition']))\r\n \r\n # close communication with the PostgreSQL database server\r\n cur.close()\r\n # commit the changes\r\n conn.commit()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n finally:\r\n if conn is not None:\r\n conn.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n playersLoad()\r\n\r\n\r\n\r\n\r\n", "repo_name": "cyngerian/hockeydb", "sub_path": "build_database/needsWork/playersLoad.py", "file_name": "playersLoad.py", "file_ext": "py", "file_size_in_byte": 1380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "json.load", "line_number": 8, "usage_type": "call"}, {"api_name": "config.config", "line_number": 14, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "13126474010", "text": "import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import uic\n\nfrom_class = uic.loadUiType(\"textbrowser2.ui\")[0]\n\n\nclass WindowClass(QMainWindow, from_class) :\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowTitle(\"TextBrowser2, Qt!\")\n \n self.Add.clicked.connect(self.addText)\n self.Clear.clicked.connect(self.clearText)\n self.FontUbuntu.clicked.connect(lambda: self.setFont(\"Ubuntu\"))\n self.FontNanumGothic.clicked.connect(lambda: self.setFont(\"NanumGothic\"))\n\n self.Red.clicked.connect(lambda: self.setTextColor(255, 0, 0))\n self.Green.clicked.connect(lambda: self.setTextColor(0, 255, 0))\n self.Blue.clicked.connect(lambda: self.setTextColor(0, 0, 255))\n\n self.SetFontSize.clicked.connect(self.setFontSize)\n\n self.FontSize.textChanged.connect(self.checkDigit)\n self.FontSize.returnPressed.connect(self.setFontSize)\n # self.FontSize.setValidator(QIntValidator())\n \n\n def clearText(self):\n self.Input.clear()\n self.Output.clear()\n\n def addText(self):\n input = self.Input.toPlainText()\n self.Input.clear()\n self.Output.append(input)\n \n def setFont(self, fontName):\n font = QFont(fontName, 11)\n self.Output.setFont(font)\n\n def setTextColor(self, r, g, b):\n color = QColor(r, g, b)\n self.Output.selectAll()\n self.Output.setTextColor(color)\n self.Output.moveCursor(QTextCursor.End)\n\n def setFontSize(self):\n size = int(self.FontSize.text())\n self.Output.selectAll()\n self.Output.setFontPointSize(size)\n self.Output.moveCursor(QTextCursor.End)\n\n def checkDigit(self):\n text = self.FontSize.text()\n if text.isdigit() == False:\n self.FontSize.setText(text[:-1])\n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n myWindows = WindowClass()\n myWindows.show()\n sys.exit(app.exec_())", "repo_name": "haneol0415/pyqt_study", "sub_path": "textBrowser2.py", "file_name": "textBrowser2.py", "file_ext": "py", "file_size_in_byte": 2017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "PyQt5.uic.loadUiType", "line_number": 6, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 6, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 65, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "27071643828", "text": "import json\n#from serialcomtest import bringthedrink\nimport RPi.GPIO as gpio\nimport time\n\ngpio.setwarnings(False)\ngpio.setmode(gpio.BCM)\ngpio.setup(18,gpio.OUT)\n\nwith open('word.json') as json_data:\n d = json.load(json_data)\nif d == 0:\n# bringthedrink(0)\n gpio.output(18,1)\n time.sleep(1)\n gpio.output(18,0)\nelif d == 1:\n# bringthedrink(1)\n gpio.output(18,1)\n time.sleep(200)\n gpio.output(18,0)\n#elif d == 2:\n# bringthedrink(2)\n#else:\n# print \"Error: INVALID DRINK TYPE\"\n\n\n\n\n", "repo_name": "ahmetakif/TASHIMASU", "sub_path": "TASHIMASU CODES/my-awesome-project-old/program.py", "file_name": "program.py", "file_ext": "py", "file_size_in_byte": 508, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "RPi.GPIO.setwarnings", "line_number": 6, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 6, "usage_type": "name"}, {"api_name": "RPi.GPIO.setmode", "line_number": 7, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 7, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 7, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 8, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 8, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 14, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 14, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 16, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 16, "usage_type": "name"}, {"api_name": "RPi.GPIO.output", "line_number": 19, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 19, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 21, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "37937317872", "text": "import numpy as np\n\nfrom .environment import ParserContext\n\nimport logging\n\nlogger = logging.getLogger(\"nart.modules.tensorrt\")\n\n\ndef to_const_tensor(array, name=None):\n \"\"\"convert a numpy array to const itensor\n Args:\n array: a numpy array\n Returns:\n a created itensor\n \"\"\"\n if array.dtype == np.int64:\n logger.warning(\"tensorrt doesnot support int64 array, converting to int32\")\n array = array.astype(np.int32)\n elif array.dtype == np.float64:\n logger.warning(\"tensorrt doesnot support float64 array, converting to float32\")\n array = array.astype(np.float32)\n ctx = ParserContext.get_current()\n from tensorrt import tensorrt as trt\n\n shape = trt.Dims(array.shape)\n weights = trt.Weights(array)\n constant = ctx.network.add_constant(shape, weights)\n if name is not None:\n constant.name = name\n constant.get_output(0).name = name\n return constant.get_output(0)\n\n\ndef get_nb_dims(tensor):\n \"\"\"get the number of dimension of a tensorrt.itensor or weights.\"\"\"\n return len(tensor.shape)\n\n\nclass ShapeTensor(object):\n \"\"\"represents the shape of a tensor or weight.\"\"\"\n\n def __init__(self, shape_vals, shape_tensor=None, shape_tensor_getter=None):\n shape_vals = [int(x) for x in shape_vals]\n self._shape_vals = shape_vals\n is_dynamic = any(x < 0 for x in shape_vals)\n self._is_dynamic = is_dynamic\n self._shape_tensor = shape_tensor\n self._shape_tensor_getter = shape_tensor_getter\n\n @property\n def is_dynamic(self):\n return self._is_dynamic\n\n def is_dim_dynamic(self, axis):\n return self._shape_vals[axis] < 0\n\n @property\n def shape_vals(self):\n return self._shape_vals.copy()\n\n @property\n def shape_tensor(self):\n if self._shape_tensor is not None:\n return self._shape_tensor\n elif self._shape_tensor_getter is not None:\n self._shape_tensor = self._shape_tensor_getter()\n return self._shape_tensor\n else:\n # set shape_tensor if possible\n assert (\n not self.is_dynamic\n ), \"creating ShapeTensor of dynamic shape without shape_tensor provided\"\n shape_vals = self.shape_vals\n array = np.array(shape_vals, dtype=np.int32)\n self._shape_tensor = to_const_tensor(array)\n return self._shape_tensor\n\n @property\n def nb_dim(self):\n return len(self.shape_vals)\n\n @property\n def is_empty(self):\n return self.nb_dim == 0\n\n def __getitem__(self, arg):\n ret = self.shape_vals[arg]\n if isinstance(ret, (list, tuple)):\n assert not any(\n x < 0 for x in ret\n ), \"accessing part of shape which contains dynamic dimension\"\n else:\n assert (\n not ret < 0\n ), \"accessing part of shape which contains dynamic dimension\"\n return ret\n\n def __len__(self):\n return len(self.shape_vals)\n\n def slice(self, arg):\n vals = self.shape_vals[arg]\n vals = vals if isinstance(vals, (list, tuple)) else [vals]\n if not any(x < 0 for x in vals):\n # if the slice is static\n return ShapeTensor(vals, None)\n else:\n # use gather layer to select the range\n # first create a indices tensor\n if isinstance(arg, slice):\n indices = range(arg.start or 0, arg.stop or len(self), arg.step or 1)\n else:\n indices = [arg]\n indices = np.array(indices, dtype=np.int32)\n indice_tensor = to_const_tensor(indices)\n ctx = ParserContext.get_current()\n # create gather layer. shape_tensor is a 1 dimension tensor, so gather on axis 0.\n gather = ctx.network.add_gather(self.shape_tensor, indice_tensor, axis=0)\n gather.num_elementwise_dims = 0\n return ShapeTensor(vals, gather.get_output(0))\n\n def gather(self, indices):\n vals = [self._shape_vals[index] for index in indices]\n if all(x >= 0 for x in vals):\n # all dimension static\n return ShapeTensor(vals)\n\n indices = np.array(indices, dtype=np.int32)\n indice_tensor = to_const_tensor(indices)\n ctx = ParserContext.get_current()\n # create gather layer. shape_tensor is a 1 dimension tensor, so gather on axis 0.\n gather = ctx.network.add_gather(self.shape_tensor, indice_tensor, axis=0)\n gather.num_elementwise_dims = 0\n return ShapeTensor(vals, gather.get_output(0))\n\n\ndef reduce_mul(shape_tensor):\n assert isinstance(shape_tensor, ShapeTensor)\n if not shape_tensor.is_dynamic:\n # for staic shape, simply reduce multiply\n from functools import reduce\n\n res = reduce(lambda x, y: x * y, shape_tensor.shape_vals, 1)\n return ShapeTensor([res], None)\n elif shape_tensor.nb_dim == 1:\n # dynamic, but has only one element\n return shape_tensor\n else:\n # for dynamic shape, use reduce layer to multiply it up\n ctx = ParserContext.get_current()\n from tensorrt import tensorrt as trt\n\n layer = ctx.network.add_reduce(\n shape_tensor.shape_tensor,\n op=trt.ReduceOperation.PROD,\n axes=(1 << 0),\n keep_dims=True,\n )\n return ShapeTensor([-1], layer.get_output(0))\n\n\ndef concat_shape(a, b):\n import tensorrt.tensorrt as trt\n\n if isinstance(a, trt.ITensor) and isinstance(b, trt.ITensor):\n ctx = ParserContext.get_current()\n concat = ctx.network.add_concatenation([a, b])\n concat.axis = 0\n return concat.get_output(0)\n if a.is_empty:\n return b\n if b.is_empty:\n return a\n if not a.is_dynamic and not b.is_dynamic:\n # both are static, so simply concat it up\n vals = tuple(a.shape_vals) + tuple(b.shape_vals)\n return ShapeTensor(vals, None)\n else:\n # use concat layer to concatenate two parts up\n ctx = ParserContext.get_current()\n concat = ctx.network.add_concatenation([a.shape_tensor, b.shape_tensor])\n concat.axis = 0 # the axis HAS TO BE ZERO.\n vals = tuple(a.shape_vals) + tuple(b.shape_vals)\n return ShapeTensor(vals, concat.get_output(0))\n\n\ndef concat_shapes(shapes):\n ret = shapes[0]\n for item in shapes[1:]:\n ret = concat_shape(ret, item)\n return ret\n\n\ndef get_shape(tensor):\n \"\"\"get the shape of a tensorrt.itensor or weights.\n Returns:\n for weights or numpy.array: returns tensor.shape.\n for tensorrt.itensor: a ShapeTensor which wraps the tensor shape.\n \"\"\"\n shape_vals = tensor.shape\n is_dynamic = any(x < 0 for x in shape_vals)\n if not is_dynamic:\n return ShapeTensor(shape_vals, None)\n else:\n\n def shape_getter():\n ctx = ParserContext.get_current()\n shape_layer = ctx.network.add_shape(tensor)\n return shape_layer.get_output(0)\n\n return ShapeTensor(shape_vals, shape_getter(), None)\n\n\ndef add_shuffle(tensor, shape):\n import tensorrt.tensorrt as trt\n\n ctx = ParserContext.get_current()\n reshape = ctx.network.add_shuffle(tensor)\n if isinstance(shape, trt.ITensor):\n reshape.set_input(1, shape)\n return reshape\n if not shape.is_dynamic:\n # for static shape, use reshape_dims parameter\n reshape.reshape_dims = shape.shape_vals\n else:\n # for dynamic shape, use shape tensor\n reshape.set_input(1, shape.shape_tensor)\n return reshape\n\n\n\"\"\" some utility funtions about network creation\n\"\"\"\n\n\ndef flatten_tensor(tensor, axis):\n shape = get_shape(tensor)\n\n d0 = reduce_mul(shape.slice(slice(axis)))\n d1 = reduce_mul(shape.slice(slice(axis, None)))\n reshape = add_shuffle(tensor, concat_shape(d0, d1))\n return reshape.get_output(0)\n\n\ndef cast_to(tensor, dtype):\n ctx = ParserContext.get_current()\n identity_layer = ctx.network.add_identity(tensor)\n # identity_layer.name = node.name\n identity_layer.set_output_type(0, dtype)\n return identity_layer.get_output(0)\n\n\ndef dtype_from_art_dtype(art_dtype):\n from ...core.art import Dtype\n import tensorrt.tensorrt as trt\n\n if art_dtype == Dtype.Float32:\n return trt.float32\n if art_dtype == Dtype.Int32:\n return trt.int32\n if art_dtype == Dtype.Bool:\n return trt.bool\n raise ValueError(f\"unexpected dtype {art_dtype}\")\n\n\ndef art_dtype_from_dtype(dtype):\n from ...core.art import Dtype\n import tensorrt.tensorrt as trt\n\n if dtype == trt.float32:\n return Dtype.Float32\n if dtype == trt.int32:\n return Dtype.Int32\n if dtype == trt.bool:\n return Dtype.Bool\n raise ValueError(f\"unexpected dtype {dtype}\")\n", "repo_name": "ModelTC/NART", "sub_path": "python/nart/modules/trt_utils/parse_utils.py", "file_name": "parse_utils.py", "file_ext": "py", "file_size_in_byte": 8822, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 34, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "environment.ParserContext.get_current", "line_number": 23, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 23, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.Dims", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorrt.tensorrt", "line_number": 26, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.Weights", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorrt.tensorrt", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "environment.ParserContext.get_current", "line_number": 117, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 129, "usage_type": "attribute"}, {"api_name": "environment.ParserContext.get_current", "line_number": 131, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 131, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 144, "usage_type": "call"}, {"api_name": "environment.ParserContext.get_current", "line_number": 151, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 151, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.ReduceOperation", "line_number": 156, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 156, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.ITensor", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 166, "usage_type": "name"}, {"api_name": "environment.ParserContext.get_current", "line_number": 167, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 167, "usage_type": "name"}, {"api_name": "environment.ParserContext.get_current", "line_number": 181, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 181, "usage_type": "name"}, {"api_name": "environment.ParserContext.get_current", "line_number": 208, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 208, "usage_type": "name"}, {"api_name": "environment.ParserContext.get_current", "line_number": 218, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 218, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.ITensor", "line_number": 220, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 220, "usage_type": "name"}, {"api_name": "environment.ParserContext.get_current", "line_number": 246, "usage_type": "call"}, {"api_name": "environment.ParserContext", "line_number": 246, "usage_type": "name"}, {"api_name": "core.art.Dtype.Float32", "line_number": 257, "usage_type": "attribute"}, {"api_name": "core.art.Dtype", "line_number": 257, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.float32", "line_number": 258, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 258, "usage_type": "name"}, {"api_name": "core.art.Dtype.Int32", "line_number": 259, "usage_type": "attribute"}, {"api_name": "core.art.Dtype", "line_number": 259, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.int32", "line_number": 260, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 260, "usage_type": "name"}, {"api_name": "core.art.Dtype.Bool", "line_number": 261, "usage_type": "attribute"}, {"api_name": "core.art.Dtype", "line_number": 261, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.bool", "line_number": 262, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 262, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.float32", "line_number": 270, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 270, "usage_type": "name"}, {"api_name": "core.art.Dtype.Float32", "line_number": 271, "usage_type": "attribute"}, {"api_name": "core.art.Dtype", "line_number": 271, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.int32", "line_number": 272, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 272, "usage_type": "name"}, {"api_name": "core.art.Dtype.Int32", "line_number": 273, "usage_type": "attribute"}, {"api_name": "core.art.Dtype", "line_number": 273, "usage_type": "name"}, {"api_name": "tensorrt.tensorrt.bool", "line_number": 274, "usage_type": "attribute"}, {"api_name": "tensorrt.tensorrt", "line_number": 274, "usage_type": "name"}, {"api_name": "core.art.Dtype.Bool", "line_number": 275, "usage_type": "attribute"}, {"api_name": "core.art.Dtype", "line_number": 275, "usage_type": "name"}]} +{"seq_id": "29361829215", "text": "import unittest\nfrom unittest import mock\nfrom datetime import datetime\n\nimport dask.array as da\nimport numpy as np\nimport pytest\nimport xarray as xr\n\n\nclass TestSunZenithCorrector(unittest.TestCase):\n \"\"\"Test case for the zenith corrector.\"\"\"\n\n def setUp(self):\n \"\"\"Create test data.\"\"\"\n from pyresample.geometry import AreaDefinition\n area = AreaDefinition('test', 'test', 'test',\n {'proj': 'merc'}, 2, 2,\n (-2000, -2000, 2000, 2000))\n bigger_area = AreaDefinition('test', 'test', 'test',\n {'proj': 'merc'}, 4, 4,\n (-2000, -2000, 2000, 2000))\n attrs = {'area': area,\n 'start_time': datetime(2018, 1, 1, 18),\n 'modifiers': tuple(),\n 'name': 'test_vis'}\n ds1 = xr.DataArray(da.ones((2, 2), chunks=2, dtype=np.float64),\n attrs=attrs, dims=('y', 'x'),\n coords={'y': [0, 1], 'x': [0, 1]})\n self.ds1 = ds1\n ds2 = xr.DataArray(da.ones((4, 4), chunks=2, dtype=np.float64),\n attrs=attrs, dims=('y', 'x'),\n coords={'y': [0, 0.5, 1, 1.5], 'x': [0, 0.5, 1, 1.5]})\n ds2.attrs['area'] = bigger_area\n self.ds2 = ds2\n self.sza = xr.DataArray(\n np.rad2deg(np.arccos(da.from_array([[0.0149581333, 0.0146694376], [0.0150812684, 0.0147925727]],\n chunks=2))),\n attrs={'area': area},\n dims=('y', 'x'),\n coords={'y': [0, 1], 'x': [0, 1]},\n )\n\n def test_basic_default_not_provided(self):\n \"\"\"Test default limits when SZA isn't provided.\"\"\"\n from satpy.modifiers.geometry import SunZenithCorrector\n comp = SunZenithCorrector(name='sza_test', modifiers=tuple())\n res = comp((self.ds1,), test_attr='test')\n np.testing.assert_allclose(res.values, np.array([[22.401667, 22.31777], [22.437503, 22.353533]]))\n self.assertIn('y', res.coords)\n self.assertIn('x', res.coords)\n ds1 = self.ds1.copy().drop_vars(('y', 'x'))\n res = comp((ds1,), test_attr='test')\n np.testing.assert_allclose(res.values, np.array([[22.401667, 22.31777], [22.437503, 22.353533]]))\n self.assertNotIn('y', res.coords)\n self.assertNotIn('x', res.coords)\n\n def test_basic_lims_not_provided(self):\n \"\"\"Test custom limits when SZA isn't provided.\"\"\"\n from satpy.modifiers.geometry import SunZenithCorrector\n comp = SunZenithCorrector(name='sza_test', modifiers=tuple(), correction_limit=90)\n res = comp((self.ds1,), test_attr='test')\n np.testing.assert_allclose(res.values, np.array([[66.853262, 68.168939], [66.30742, 67.601493]]))\n\n def test_basic_default_provided(self):\n \"\"\"Test default limits when SZA is provided.\"\"\"\n from satpy.modifiers.geometry import SunZenithCorrector\n comp = SunZenithCorrector(name='sza_test', modifiers=tuple())\n res = comp((self.ds1, self.sza), test_attr='test')\n np.testing.assert_allclose(res.values, np.array([[22.401667, 22.31777], [22.437503, 22.353533]]))\n\n def test_basic_lims_provided(self):\n \"\"\"Test custom limits when SZA is provided.\"\"\"\n from satpy.modifiers.geometry import SunZenithCorrector\n comp = SunZenithCorrector(name='sza_test', modifiers=tuple(), correction_limit=90)\n res = comp((self.ds1, self.sza), test_attr='test')\n np.testing.assert_allclose(res.values, np.array([[66.853262, 68.168939], [66.30742, 67.601493]]))\n\n def test_imcompatible_areas(self):\n \"\"\"Test sunz correction on incompatible areas.\"\"\"\n from satpy.composites import IncompatibleAreas\n from satpy.modifiers.geometry import SunZenithCorrector\n comp = SunZenithCorrector(name='sza_test', modifiers=tuple(), correction_limit=90)\n with pytest.raises(IncompatibleAreas):\n comp((self.ds2, self.sza), test_attr='test')\n\n\nclass TestNIRReflectance(unittest.TestCase):\n \"\"\"Test NIR reflectance compositor.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the test case for the NIRReflectance compositor.\"\"\"\n self.get_lonlats = mock.MagicMock()\n self.lons, self.lats = 1, 2\n self.get_lonlats.return_value = (self.lons, self.lats)\n area = mock.MagicMock(get_lonlats=self.get_lonlats)\n\n self.start_time = 1\n self.metadata = {'platform_name': 'Meteosat-11',\n 'sensor': 'seviri',\n 'name': 'IR_039',\n 'area': area,\n 'start_time': self.start_time}\n\n nir_arr = np.random.random((2, 2))\n self.nir = xr.DataArray(da.from_array(nir_arr), dims=['y', 'x'])\n self.nir.attrs.update(self.metadata)\n\n ir_arr = 100 * np.random.random((2, 2))\n self.ir_ = xr.DataArray(da.from_array(ir_arr), dims=['y', 'x'])\n self.ir_.attrs['area'] = area\n\n self.sunz_arr = 100 * np.random.random((2, 2))\n self.sunz = xr.DataArray(da.from_array(self.sunz_arr), dims=['y', 'x'])\n self.sunz.attrs['standard_name'] = 'solar_zenith_angle'\n self.sunz.attrs['area'] = area\n self.da_sunz = da.from_array(self.sunz_arr)\n\n refl_arr = np.random.random((2, 2))\n self.refl = da.from_array(refl_arr)\n self.refl_with_co2 = da.from_array(np.random.random((2, 2)))\n self.refl_from_tbs = mock.MagicMock()\n self.refl_from_tbs.side_effect = self.fake_refl_from_tbs\n\n def fake_refl_from_tbs(self, sun_zenith, da_nir, da_tb11, tb_ir_co2=None):\n \"\"\"Fake refl_from_tbs.\"\"\"\n del sun_zenith, da_nir, da_tb11\n if tb_ir_co2 is not None:\n return self.refl_with_co2\n return self.refl\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_provide_sunz_no_co2(self, calculator, apply_modifier_info, sza):\n \"\"\"Test NIR reflectance compositor provided only sunz.\"\"\"\n calculator.return_value = mock.MagicMock(\n reflectance_from_tbs=self.refl_from_tbs)\n sza.return_value = self.da_sunz\n from satpy.modifiers.spectral import NIRReflectance\n\n comp = NIRReflectance(name='test')\n info = {'modifiers': None}\n res = comp([self.nir, self.ir_], optional_datasets=[self.sunz], **info)\n\n assert self.metadata.items() <= res.attrs.items()\n assert res.attrs['units'] == '%'\n assert res.attrs['sun_zenith_threshold'] is not None\n assert np.allclose(res.data, self.refl * 100).compute()\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_no_sunz_no_co2(self, calculator, apply_modifier_info, sza):\n \"\"\"Test NIR reflectance compositor with minimal parameters.\"\"\"\n calculator.return_value = mock.MagicMock(\n reflectance_from_tbs=self.refl_from_tbs)\n sza.return_value = self.da_sunz\n from satpy.modifiers.spectral import NIRReflectance\n\n comp = NIRReflectance(name='test')\n info = {'modifiers': None}\n res = comp([self.nir, self.ir_], optional_datasets=[], **info)\n\n self.get_lonlats.assert_called()\n sza.assert_called_with(self.start_time, self.lons, self.lats)\n self.refl_from_tbs.assert_called_with(self.da_sunz, self.nir.data, self.ir_.data, tb_ir_co2=None)\n assert np.allclose(res.data, self.refl * 100).compute()\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_no_sunz_with_co2(self, calculator, apply_modifier_info, sza):\n \"\"\"Test NIR reflectance compositor provided extra co2 info.\"\"\"\n calculator.return_value = mock.MagicMock(\n reflectance_from_tbs=self.refl_from_tbs)\n from satpy.modifiers.spectral import NIRReflectance\n sza.return_value = self.da_sunz\n\n comp = NIRReflectance(name='test')\n info = {'modifiers': None}\n co2_arr = np.random.random((2, 2))\n co2 = xr.DataArray(da.from_array(co2_arr), dims=['y', 'x'])\n co2.attrs['wavelength'] = [12.0, 13.0, 14.0]\n co2.attrs['units'] = 'K'\n res = comp([self.nir, self.ir_], optional_datasets=[co2], **info)\n\n self.refl_from_tbs.assert_called_with(self.da_sunz, self.nir.data, self.ir_.data, tb_ir_co2=co2.data)\n assert np.allclose(res.data, self.refl_with_co2 * 100).compute()\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_provide_sunz_and_threshold(self, calculator, apply_modifier_info, sza):\n \"\"\"Test NIR reflectance compositor provided sunz and a sunz threshold.\"\"\"\n calculator.return_value = mock.MagicMock(\n reflectance_from_tbs=self.refl_from_tbs)\n from satpy.modifiers.spectral import NIRReflectance\n sza.return_value = self.da_sunz\n\n comp = NIRReflectance(name='test', sunz_threshold=84.0)\n info = {'modifiers': None}\n res = comp([self.nir, self.ir_], optional_datasets=[self.sunz], **info)\n\n self.assertEqual(res.attrs['sun_zenith_threshold'], 84.0)\n calculator.assert_called_with('Meteosat-11', 'seviri', 'IR_039',\n sunz_threshold=84.0, masking_limit=NIRReflectance.MASKING_LIMIT)\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_sunz_threshold_default_value_is_not_none(self, calculator, apply_modifier_info, sza):\n \"\"\"Check that sun_zenith_threshold is not None.\"\"\"\n from satpy.modifiers.spectral import NIRReflectance\n\n comp = NIRReflectance(name='test')\n info = {'modifiers': None}\n calculator.return_value = mock.MagicMock(\n reflectance_from_tbs=self.refl_from_tbs)\n comp([self.nir, self.ir_], optional_datasets=[self.sunz], **info)\n\n assert comp.sun_zenith_threshold is not None\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_provide_masking_limit(self, calculator, apply_modifier_info, sza):\n \"\"\"Test NIR reflectance compositor provided sunz and a sunz threshold.\"\"\"\n calculator.return_value = mock.MagicMock(\n reflectance_from_tbs=self.refl_from_tbs)\n from satpy.modifiers.spectral import NIRReflectance\n sza.return_value = self.da_sunz\n\n comp = NIRReflectance(name='test', masking_limit=None)\n info = {'modifiers': None}\n res = comp([self.nir, self.ir_], optional_datasets=[self.sunz], **info)\n\n self.assertIsNone(res.attrs['sun_zenith_masking_limit'])\n calculator.assert_called_with('Meteosat-11', 'seviri', 'IR_039',\n sunz_threshold=NIRReflectance.TERMINATOR_LIMIT, masking_limit=None)\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_masking_limit_default_value_is_not_none(self, calculator, apply_modifier_info, sza):\n \"\"\"Check that sun_zenith_threshold is not None.\"\"\"\n from satpy.modifiers.spectral import NIRReflectance\n\n comp = NIRReflectance(name='test')\n info = {'modifiers': None}\n calculator.return_value = mock.MagicMock(\n reflectance_from_tbs=self.refl_from_tbs)\n comp([self.nir, self.ir_], optional_datasets=[self.sunz], **info)\n\n assert comp.masking_limit is not None\n\n\nclass TestNIREmissivePartFromReflectance(unittest.TestCase):\n \"\"\"Test the NIR Emissive part from reflectance compositor.\"\"\"\n\n @mock.patch('satpy.modifiers.spectral.sun_zenith_angle')\n @mock.patch('satpy.modifiers.NIRReflectance.apply_modifier_info')\n @mock.patch('satpy.modifiers.spectral.Calculator')\n def test_compositor(self, calculator, apply_modifier_info, sza):\n \"\"\"Test the NIR emissive part from reflectance compositor.\"\"\"\n from satpy.modifiers.spectral import NIRReflectance\n\n refl_arr = np.random.random((2, 2))\n refl = da.from_array(refl_arr)\n\n refl_from_tbs = mock.MagicMock()\n refl_from_tbs.return_value = refl\n calculator.return_value = mock.MagicMock(reflectance_from_tbs=refl_from_tbs)\n\n emissive_arr = np.random.random((2, 2))\n emissive = da.from_array(emissive_arr)\n emissive_part = mock.MagicMock()\n emissive_part.return_value = emissive\n calculator.return_value = mock.MagicMock(emissive_part_3x=emissive_part)\n\n from satpy.modifiers.spectral import NIREmissivePartFromReflectance\n\n comp = NIREmissivePartFromReflectance(name='test', sunz_threshold=86.0)\n info = {'modifiers': None}\n\n platform = 'NOAA-20'\n sensor = 'viirs'\n chan_name = 'M12'\n\n get_lonlats = mock.MagicMock()\n lons, lats = 1, 2\n get_lonlats.return_value = (lons, lats)\n area = mock.MagicMock(get_lonlats=get_lonlats)\n\n nir_arr = np.random.random((2, 2))\n nir = xr.DataArray(da.from_array(nir_arr), dims=['y', 'x'])\n nir.attrs['platform_name'] = platform\n nir.attrs['sensor'] = sensor\n nir.attrs['name'] = chan_name\n nir.attrs['area'] = area\n ir_arr = np.random.random((2, 2))\n ir_ = xr.DataArray(da.from_array(ir_arr), dims=['y', 'x'])\n ir_.attrs['area'] = area\n\n sunz_arr = 100 * np.random.random((2, 2))\n sunz = xr.DataArray(da.from_array(sunz_arr), dims=['y', 'x'])\n sunz.attrs['standard_name'] = 'solar_zenith_angle'\n sunz.attrs['area'] = area\n sunz2 = da.from_array(sunz_arr)\n sza.return_value = sunz2\n\n res = comp([nir, ir_], optional_datasets=[sunz], **info)\n self.assertEqual(res.attrs['sun_zenith_threshold'], 86.0)\n self.assertEqual(res.attrs['units'], 'K')\n self.assertEqual(res.attrs['platform_name'], platform)\n self.assertEqual(res.attrs['sensor'], sensor)\n self.assertEqual(res.attrs['name'], chan_name)\n calculator.assert_called_with('NOAA-20', 'viirs', 'M12', sunz_threshold=86.0,\n masking_limit=NIRReflectance.MASKING_LIMIT)\n\n\nclass TestPSPAtmosphericalCorrection(unittest.TestCase):\n \"\"\"Test the pyspectral-based atmospheric correction modifier.\"\"\"\n\n def setUp(self):\n \"\"\"Patch in-class imports.\"\"\"\n self.orbital = mock.MagicMock()\n modules = {\n 'pyspectral.atm_correction_ir': mock.MagicMock(),\n 'pyorbital.orbital': self.orbital,\n }\n self.module_patcher = mock.patch.dict('sys.modules', modules)\n self.module_patcher.start()\n\n def tearDown(self):\n \"\"\"Unpatch in-class imports.\"\"\"\n self.module_patcher.stop()\n\n @mock.patch('satpy.modifiers.PSPAtmosphericalCorrection.apply_modifier_info')\n @mock.patch('satpy.modifiers.atmosphere.get_satpos')\n def test_call(self, get_satpos, *mocks):\n \"\"\"Test atmospherical correction.\"\"\"\n from satpy.modifiers import PSPAtmosphericalCorrection\n\n # Patch methods\n get_satpos.return_value = 'sat_lon', 'sat_lat', 12345678\n self.orbital.get_observer_look.return_value = 0, 0\n area = mock.MagicMock()\n area.get_lonlats.return_value = 'lons', 'lats'\n band = mock.MagicMock(attrs={'area': area,\n 'start_time': 'start_time',\n 'name': 'name',\n 'platform_name': 'platform',\n 'sensor': 'sensor'}, dims=['y'])\n\n # Perform atmospherical correction\n psp = PSPAtmosphericalCorrection(name='dummy')\n psp(projectables=[band])\n\n # Check arguments of get_orbserver_look() call, especially the altitude\n # unit conversion from meters to kilometers\n self.orbital.get_observer_look.assert_called_with(\n 'sat_lon', 'sat_lat', 12345.678, 'start_time', 'lons', 'lats', 0)\n\n\nclass TestPSPRayleighReflectance(unittest.TestCase):\n \"\"\"Test the pyspectral-based rayleigh correction modifier.\"\"\"\n\n def setUp(self):\n \"\"\"Patch in-class imports.\"\"\"\n self.astronomy = mock.MagicMock()\n self.orbital = mock.MagicMock()\n modules = {\n 'pyorbital.astronomy': self.astronomy,\n 'pyorbital.orbital': self.orbital,\n }\n self.module_patcher = mock.patch.dict('sys.modules', modules)\n self.module_patcher.start()\n\n def tearDown(self):\n \"\"\"Unpatch in-class imports.\"\"\"\n self.module_patcher.stop()\n\n @mock.patch('satpy.modifiers.atmosphere.get_satpos')\n def test_get_angles(self, get_satpos):\n \"\"\"Test sun and satellite angle calculation.\"\"\"\n from satpy.modifiers import PSPRayleighReflectance\n\n # Patch methods\n get_satpos.return_value = 'sat_lon', 'sat_lat', 12345678\n self.orbital.get_observer_look.return_value = 0, 0\n self.astronomy.get_alt_az.return_value = 0, 0\n area = mock.MagicMock()\n lons = np.zeros((5, 5))\n lons[1, 1] = np.inf\n lons = da.from_array(lons, chunks=5)\n lats = np.zeros((5, 5))\n lats[1, 1] = np.inf\n lats = da.from_array(lats, chunks=5)\n area.get_lonlats.return_value = (lons, lats)\n vis = mock.MagicMock(attrs={'area': area,\n 'start_time': 'start_time'})\n\n # Compute angles\n psp = PSPRayleighReflectance(name='dummy')\n psp.get_angles(vis)\n\n # Check arguments of get_orbserver_look() call, especially the altitude\n # unit conversion from meters to kilometers\n self.orbital.get_observer_look.assert_called_once()\n args = self.orbital.get_observer_look.call_args[0]\n self.assertEqual(args[:4], ('sat_lon', 'sat_lat', 12345.678, 'start_time'))\n self.assertIsInstance(args[4], da.Array)\n self.assertIsInstance(args[5], da.Array)\n self.assertEqual(args[6], 0)\n", "repo_name": "Mervolt/pp", "sub_path": "venv/Lib/site-packages/satpy/tests/test_modifiers.py", "file_name": "test_modifiers.py", "file_ext": "py", "file_size_in_byte": 18707, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pyresample.geometry.AreaDefinition", "line_number": 17, "usage_type": "call"}, {"api_name": "pyresample.geometry.AreaDefinition", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 27, "usage_type": "call"}, {"api_name": "dask.array.ones", "line_number": 27, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 27, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 31, "usage_type": "call"}, {"api_name": "dask.array.ones", "line_number": 31, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 31, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 37, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 37, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 37, "usage_type": "name"}, {"api_name": "satpy.modifiers.geometry.SunZenithCorrector", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "satpy.modifiers.geometry.SunZenithCorrector", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "satpy.modifiers.geometry.SunZenithCorrector", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "satpy.modifiers.geometry.SunZenithCorrector", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "satpy.modifiers.geometry.SunZenithCorrector", "line_number": 83, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 84, "usage_type": "call"}, {"api_name": "satpy.composites.IncompatibleAreas", "line_number": 84, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 88, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 93, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 93, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 96, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 105, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 106, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 106, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 109, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 110, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 110, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 114, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 114, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 114, "usage_type": "name"}, {"api_name": "dask.array.from_array", "line_number": 117, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 119, "usage_type": "attribute"}, {"api_name": "dask.array.from_array", "line_number": 120, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 120, "usage_type": "name"}, {"api_name": "dask.array.from_array", "line_number": 121, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 122, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 122, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 137, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 137, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 149, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 132, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 132, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 133, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 133, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 134, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 134, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 156, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 156, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 168, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 151, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 151, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 152, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 152, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 153, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 153, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 175, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 175, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 182, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 183, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 183, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 189, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 170, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 170, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 171, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 171, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 172, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 172, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 196, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 196, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 201, "usage_type": "call"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance.MASKING_LIMIT", "line_number": 207, "usage_type": "attribute"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 207, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 191, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 191, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 192, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 192, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 193, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 193, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 216, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 218, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 218, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 209, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 209, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 210, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 210, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 211, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 211, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 229, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 229, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 234, "usage_type": "call"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance.TERMINATOR_LIMIT", "line_number": 240, "usage_type": "attribute"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 240, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 224, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 224, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 225, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 225, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 226, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 226, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 249, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 251, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 251, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 242, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 242, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 243, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 243, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 244, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 244, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 258, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 268, "usage_type": "attribute"}, {"api_name": "dask.array.from_array", "line_number": 269, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 269, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 271, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 271, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 273, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 273, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 275, "usage_type": "attribute"}, {"api_name": "dask.array.from_array", "line_number": 276, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 276, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 277, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 277, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 279, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 279, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIREmissivePartFromReflectance", "line_number": 283, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 290, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 290, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 293, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 293, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 295, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 296, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 296, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 296, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 301, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 302, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 302, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 302, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 305, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 306, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 306, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 306, "usage_type": "name"}, {"api_name": "dask.array.from_array", "line_number": 309, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 309, "usage_type": "name"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance.MASKING_LIMIT", "line_number": 319, "usage_type": "attribute"}, {"api_name": "satpy.modifiers.spectral.NIRReflectance", "line_number": 319, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 261, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 261, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 262, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 262, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 263, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 263, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 322, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 327, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 327, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 329, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 329, "usage_type": "name"}, {"api_name": "unittest.mock.patch.dict", "line_number": 332, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 332, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 332, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 348, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 348, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 350, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 350, "usage_type": "name"}, {"api_name": "satpy.modifiers.PSPAtmosphericalCorrection", "line_number": 357, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 339, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 339, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 340, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 340, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 366, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 371, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 371, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 372, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 372, "usage_type": "name"}, {"api_name": "unittest.mock.patch.dict", "line_number": 377, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 377, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 377, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 393, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 393, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 395, "usage_type": "attribute"}, {"api_name": "dask.array.from_array", "line_number": 396, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 396, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 398, "usage_type": "attribute"}, {"api_name": "dask.array.from_array", "line_number": 399, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 399, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 401, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 401, "usage_type": "name"}, {"api_name": "satpy.modifiers.PSPRayleighReflectance", "line_number": 405, "usage_type": "call"}, {"api_name": "dask.array.Array", "line_number": 413, "usage_type": "attribute"}, {"api_name": "dask.array", "line_number": 413, "usage_type": "name"}, {"api_name": "dask.array.Array", "line_number": 414, "usage_type": "attribute"}, {"api_name": "dask.array", "line_number": 414, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 384, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 384, "usage_type": "name"}]} +{"seq_id": "42110134562", "text": "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom pyparsing import line\r\nimport pt100_lookuptable as pt100\r\n\r\n'''\r\nhttps://pythonbasics.org/read-excel/\r\nRead Excel files (extensions:.xlsx, .xls) with Python Pandas. To read an excel file as a DataFrame, use the pandas read_excel() method.\r\nYou can specify the sheet to read with the argument sheet_name. Specify by number (starting at 0). Or Specify by sheet name.\r\nIt is also possible to specify a list in the argumentsheet_name. It is OK even if it is a number of 0 starting or the sheet name.\r\n'''\r\n\r\nexcel_data = pd.read_excel('ET2_test_TemperatureSensor_test2.xlsx', sheet_name='Sheet2')\r\n# print(excel_data)\r\n\r\nTempSetting = excel_data['TempSetting']\r\n\r\nTS_testOut4_1 = excel_data['TS_testOUT4_1']\r\nTS_Out4_1 = excel_data['TS_OUT4_1']\r\nTS_testOut4_2 = excel_data['TS_testOUT4_2']\r\nTS_Out4_2 = excel_data['TS_OUT4_2']\r\nTS_testOut4_3 = excel_data['TS_testOUT4_3']\r\nTS_Out4_3 = excel_data['TS_OUT4_3']\r\n\r\nTS_testOut5_1 = excel_data['TS_testOUT5_1']\r\nTS_Out5_1 = excel_data['TS_OUT5_1']\r\nTS_testOut5_2 = excel_data['TS_testOUT5_2']\r\nTS_Out5_2 = excel_data['TS_OUT5_2']\r\nTS_testOut5_3 = excel_data['TS_testOUT5_3']\r\nTS_Out5_3 = excel_data['TS_OUT5_3']\r\n\r\nTS_testOut6_1 = excel_data['TS_testOUT6_1']\r\nTS_Out6_1 = excel_data['TS_OUT6_1']\r\nTS_testOut6_2 = excel_data['TS_testOUT6_2']\r\nTS_Out6_2 = excel_data['TS_OUT6_2']\r\nTS_testOut6_3 = excel_data['TS_testOUT6_3']\r\nTS_Out6_3 = excel_data['TS_OUT6_3']\r\n\r\nTS_testOut7_1 = excel_data['TS_testOUT7_1']\r\nTS_Out7_1 = excel_data['TS_OUT7_1']\r\nTS_testOut7_2 = excel_data['TS_testOUT7_2']\r\nTS_Out7_2 = excel_data['TS_OUT7_2']\r\nTS_testOut7_3 = excel_data['TS_testOUT7_3']\r\nTS_Out7_3 = excel_data['TS_OUT7_3']\r\n\r\nTS_testOut8_1 = excel_data['TS_testOUT8_1']\r\nTS_Out8_1 = excel_data['TS_OUT8_1']\r\nTS_testOut8_2 = excel_data['TS_testOUT8_2']\r\nTS_Out8_2 = excel_data['TS_OUT8_2']\r\nTS_testOut8_3 = excel_data['TS_testOUT8_3']\r\nTS_Out8_3 = excel_data['TS_OUT8_3']\r\n\r\n# Data processing\r\n# tempReal = []\r\n# for i in range(len(Res_pt100)):\r\n# tempReal += [pt100.interp_resist_to_temp_naive(Res_pt100[i])]\r\n\r\n# Create figure\r\nfig = plt.figure()\r\n# Add subplot to figure\r\nax = fig.add_subplot(111)\r\n\r\nTS_Measure = 1\r\nif (TS_Measure == 1):\r\n # Plot data\r\n ax.plot(TempSetting, TS_testOut4_1, linestyle = '--', marker='1', c='blue', label = 'B4 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out4_1, linestyle = '-', marker='1', c='blue', label = 'B4 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut5_1, linestyle = '--', marker='*', c='green', label = 'B5 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out5_1, linestyle = '-', marker='*', c='green', label = 'B5 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut6_1, linestyle = '--', marker='o', c='cyan', label = 'B6 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out6_1, linestyle = '-', marker='o', c='cyan', label = 'B6 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut7_1, linestyle = '--', marker='>', c='magenta', label = 'B7 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out7_1, linestyle = '-', marker='>', c='magenta', label = 'B7 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut8_1, linestyle = '--', marker='s', c='brown', label = 'B8 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out8_1, linestyle = '-', marker='s', c='brown', label = 'B8 ET2_test TS_OUT')\r\nelse:\r\n if (TS_Measure == 2):\r\n ax.plot(TempSetting, TS_testOut4_2, linestyle = '--', marker='1', c='blue', label = 'B4 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out4_2, linestyle = '-', marker='1', c='blue', label = 'B4 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut5_2, linestyle = '--', marker='*', c='green', label = 'B5 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out5_2, linestyle = '-', marker='*', c='green', label = 'B5 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut6_2, linestyle = '--', marker='o', c='cyan', label = 'B6 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out6_2, linestyle = '-', marker='o', c='cyan', label = 'B6 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut7_2, linestyle = '--', marker='>', c='magenta', label = 'B7 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out7_2, linestyle = '-', marker='>', c='magenta', label = 'B7 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut8_2, linestyle = '--', marker='s', c='brown', label = 'B8 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out8_2, linestyle = '-', marker='s', c='brown', label = 'B8 ET2_test TS_OUT_2')\r\n else:\r\n if (TS_Measure == 3):\r\n ax.plot(TempSetting, TS_testOut4_3, linestyle = '--', marker='1', c='blue', label = 'B4 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out4_3, linestyle = '-', marker='1', c='blue', label = 'B4 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut5_3, linestyle = '--', marker='*', c='green', label = 'B5 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out5_3, linestyle = '-', marker='*', c='green', label = 'B5 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut6_3, linestyle = '--', marker='o', c='cyan', label = 'B6 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out6_3, linestyle = '-', marker='o', c='cyan', label = 'B6 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut7_3, linestyle = '--', marker='>', c='magenta', label = 'B7 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out7_3, linestyle = '-', marker='>', c='magenta', label = 'B7 ET2_test TS_OUT')\r\n\r\n ax.plot(TempSetting, TS_testOut8_3, linestyle = '--', marker='s', c='brown', label = 'B8 ET2_test TS_testOUT')\r\n ax.plot(TempSetting, TS_Out8_3, linestyle = '-', marker='s', c='brown', label = 'B8 ET2_test TS_OUT_3') \r\n\r\n\r\nax.yaxis.get_ticklocs(minor=True)\r\nax.minorticks_on()\r\n\r\n# Set axises\r\nax.set_xlim(-45, 55)\r\nax.set_ylim(0.475, 0.775)\r\n\r\n# Set axis labels\r\nax.set_xlabel('Temp setting [$^o$C]')\r\nax.set_ylabel('Voltage [V]')\r\n\r\n# Add legend\r\nax.legend()\r\n# Add legend - loc is a tuple specifying the bottom left corner\r\n# ax.legend(loc=(1.02, 0.65))\r\n\r\n# Grid on setting\r\nax.grid(True)\r\n\r\n# Save as fig\r\nfig.savefig('ET2_test_Temp_v3.png', transparent = True)\r\n\r\n# Show plot\r\nplt.show()\r\n\r\n\r\n\r\n\r\n", "repo_name": "royxicer/ET2_test-test", "sub_path": "TemperatureSensor_test/ET2_test_TS_v3.py", "file_name": "ET2_test_TS_v3.py", "file_ext": "py", "file_size_in_byte": 6329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_excel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "35323861916", "text": "from bs4 import BeautifulSoup\nfrom typing import Union, List\nfrom datetime import datetime\n\nfrom ....config import (\n MINUTES_PER_HOUR,\n SECONDS_PER_DAY,\n SECONDS_PER_HOUR,\n SECONDS_PER_MINUTE\n)\n\nMISSON_TYPES = {\n \"Collect Food\": \"food\",\n \"Battle Dragons\": \"battle\",\n \"Hatch Eggs\": \"hatch\",\n \"Breed Dragons\": \"breed\",\n \"Feed Dragons\": \"feed\",\n \"League Battles\": \"pvp\",\n \"Collect Gold\": \"gold\",\n}\n\ndef pool_time_to_seconds(pool_time: str) -> int:\n pool_time = pool_time.lower()\n\n if pool_time == \"instant\" or pool_time == \"no minimum\":\n return 0\n \n if \"minutes\" in pool_time:\n if \"60\" in pool_time:\n return 60 * SECONDS_PER_MINUTE\n \n minutes = datetime.strptime(pool_time, \"%M minutes\").minute\n return minutes * SECONDS_PER_MINUTE\n\n elif \"hours\" in pool_time:\n hours = datetime.strptime(pool_time, \"%H hours\").hour\n return hours * SECONDS_PER_HOUR\n\n elif \"hr\" in pool_time and \"min\" in pool_time:\n hours_and_minutes = datetime.strptime(pool_time, \"%Hhr %Mmin\")\n hours = hours_and_minutes.hour\n minutes = hours_and_minutes.minute\n\n return (hours * SECONDS_PER_HOUR) + (minutes * SECONDS_PER_MINUTE)\n\n elif \"day\" in pool_time:\n days_and_hours = datetime.strptime(pool_time, \"%d day %H hrs\")\n days = days_and_hours.day\n hours = days_and_hours.hour\n return (days * SECONDS_PER_DAY) + (hours * MINUTES_PER_HOUR)\n\nclass MissionParser:\n def __init__(\n self,\n mission_soup: BeautifulSoup\n ) -> None:\n self.mission_soup = mission_soup\n\n self.info_divs = mission_soup.select(\"div.m2\")\n\n def get_type(self) -> str:\n name = self.get_name()\n\n return MISSON_TYPES[name]\n\n def get_name(self) -> str:\n name = self.mission_soup.select_one(\"div.mh\").text\n return name\n\n def get_goal_items(self) -> int:\n goal_items = self.info_divs[0].text\n return int(goal_items)\n\n def get_pool_size(self) -> int:\n pool = self.info_divs[1].text\n return int(pool)\n \n def get_pool_time(self) -> int:\n pool_time = self.info_divs[2].text\n pool_time_seconds = pool_time_to_seconds(pool_time)\n return pool_time_seconds\n\n def get_item_drop_chance(self) -> str:\n drop_chance = self.info_divs[3].text\n return drop_chance\n\n def get_total_pool_time(self) -> int:\n total_pool_time = self.info_divs[4].text\n total_pool_time_seconds = pool_time_to_seconds(total_pool_time)\n return total_pool_time_seconds\n\n def get_all(self) -> dict:\n mission_type = self.get_type()\n mission_name = self.get_name()\n goal_items = self.get_goal_items()\n pool_size = self.get_pool_size()\n pool_time = self.get_pool_time()\n total_pool_time = self.get_total_pool_time()\n item_drop_chance = self.get_item_drop_chance()\n\n return {\n \"type\": mission_type,\n \"name\": mission_name,\n \"goal_items\": goal_items,\n \"pool_size\": pool_size,\n \"pool_time\": {\n \"per_item\": pool_time,\n \"total\": total_pool_time\n },\n \"item_drop_chance\": item_drop_chance\n }\n", "repo_name": "1Marcuth/wcdeetlist", "sub_path": "wcdeetlist/parser/islands/heroic_race/mission.py", "file_name": "mission.py", "file_ext": "py", "file_size_in_byte": 3374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "config.SECONDS_PER_MINUTE", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "config.SECONDS_PER_MINUTE", "line_number": 33, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "config.SECONDS_PER_HOUR", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "config.SECONDS_PER_HOUR", "line_number": 44, "usage_type": "name"}, {"api_name": "config.SECONDS_PER_MINUTE", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "config.SECONDS_PER_DAY", "line_number": 50, "usage_type": "name"}, {"api_name": "config.MINUTES_PER_HOUR", "line_number": 50, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "23170312616", "text": "from Models.Test import Test\nfrom Utils.ReverseReader import ReverseReader\nimport logging\nimport re\n\n\nclass TestDescriptionParser:\n def __init__(self) -> None:\n self._version = 1\n\n def parse(self, test: Test, fullPath: str) -> str:\n try:\n if test.is_dimm_error():\n return self.extract_error_description(fullPath)\n elif test.is_chk_serialuart_error():\n return self.extract_chk_serialuart_description(fullPath)\n else:\n return \"\"\n except Exception as e:\n logging.error(str(e))\n return \"\"\n\n def extract_error_description(self, fullPath: str):\n with open(fullPath, \"r\") as fp:\n dims: \"list[str]\" = []\n voltageError = None\n for l_no, line in enumerate(fp):\n if voltageError == None and self.search(\n \".*Voltage SYS.*Deasserted.*\", line\n ):\n voltageError = line[31:].strip()\n continue\n\n if self.search(\"Occur Memory Error at\", line):\n dims.append(self.extract_dim(line))\n continue\n\n description = \"\"\n description = self.append(description, voltageError)\n\n if len(dims) > 0:\n description = self.append(\n description, \"Occur Memory Error at: \" + \", \".join(list(set(dims)))\n )\n\n return description\n\n def extract_chk_serialuart_description(self, fullPath: str):\n for line in ReverseReader().reverse_readline(fullPath):\n if self.search(\".*UART.*TTYUSB.*Fail.*\", line):\n return line.strip()\n return \"\"\n\n def append(self, txt1, txt2):\n if txt1 == None and txt2 == None:\n return \"\"\n elif txt1 != None and txt2 == None:\n return txt1\n elif txt1 != None and txt2 != None:\n return txt2\n elif txt1 != None and txt2 != None:\n return txt1 + \"\\n\" + txt2\n\n def search(self, pattern: str, string: str) -> bool:\n return re.search(pattern, string, re.IGNORECASE) != None\n\n def extract_dim(self, line: str) -> str:\n match = re.search(\"CPU\\d+_DIMM\\d+_C\\d+\", line)\n if match:\n return match.group(0).strip()\n return \"\"\n", "repo_name": "David1906/Xandra", "sub_path": "DataAccess/TestDescriptionParser.py", "file_name": "TestDescriptionParser.py", "file_ext": "py", "file_size_in_byte": 2354, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "Models.Test.Test", "line_number": 11, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 20, "usage_type": "call"}, {"api_name": "Utils.ReverseReader.ReverseReader", "line_number": 49, "usage_type": "call"}, {"api_name": "re.search", "line_number": 65, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 65, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "72258613173", "text": "from typing import Any, Generator, List\n\nfrom .queues import SimpleQueue\n\n\nclass Task:\n def __init__(self, task: Generator):\n if not isinstance(task, Generator):\n raise TypeError(f'task is expected to be <Generator>, but got {type(task)}')\n self._task = task\n self._result = None\n\n def get_result(self) -> Any:\n return self._result\n\n\nclass Loop:\n \"\"\"Event loop\n\n Example\n >>> def func1():\n ... for i in range(5):\n ... yield print(f'step {i}')\n ... return 'stop'\n >>> def func2():\n ... for i in range(10, 13):\n ... yield print(f'jump {i}')\n ... return 'stop'\n >>> print(Loop([func1(), func2(),]).run())\n step 0\n jump 10\n step 1\n jump 11\n step 2\n jump 12\n step 3\n step 4\n ['stop', 'stop']\n \"\"\"\n def __init__(self, tasks: Generator | List[Generator]) -> None:\n if isinstance(tasks, Generator):\n tasks = [tasks]\n self._tasks = [Task(t) for t in tasks]\n self._queue = SimpleQueue(self._tasks)\n \n def run(self):\n for curr_task in self._queue:\n try:\n next(curr_task._task)\n self._queue.push(curr_task)\n except StopIteration as e:\n curr_task._result = e.value\n return [t.get_result() for t in self._tasks]", "repo_name": "ma-petrov/toy-aio", "sub_path": "src/aio.py", "file_name": "aio.py", "file_ext": "py", "file_size_in_byte": 1363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.Generator", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 8, "usage_type": "argument"}, {"api_name": "typing.Any", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 41, "usage_type": "argument"}, {"api_name": "queues.SimpleQueue", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "71117402613", "text": "import pandas as pd\nimport numpy as np\nimport librosa as lr\n\nfrom librosa import feature\nfrom maad import sound, features\nfrom maad.util import power2dB\nfrom joblib import load\n\n\ntarget_fs = 10000\n\n\ndef transform(s, fs, wtype='hann', nperseg=512, noverlap=0, db_range=60, db_gain=30):\n \"\"\" Compute decibel spectrogram \"\"\"\n Sxx, tn, fn, ext = sound.spectrogram(s, fs, wtype, nperseg, noverlap) \n #Sxx = util.power2dB(Sxx, db_range, db_gain)\n Sxx = power2dB(Sxx, db_range, db_gain)\n return Sxx, tn, fn, ext\n\ndef preprocessing_audio(y, sr):\n\n \"\"\"\n Reading and extracting features\n \"\"\"\n\n s, fs = y, sr\n # resample\n s_resamp = sound.resample(s, fs, target_fs, res_type='kaiser_fast')\n\n # Mel-frequency cepstral coefficients (MFCCs)\n mfcc = feature.mfcc(y=s_resamp, sr=target_fs, n_mfcc=20, n_fft=1024, \n win_length=1024, hop_length=512, htk=True)\n mfcc = np.median(mfcc, axis=1)\n # format dataframe\n idx_names = ['mfcc_' + str(idx).zfill(2) for idx in range(1,mfcc.size+1)]\n row = pd.Series(mfcc, index=idx_names)\n \n # Format the output as an array for decomposition\n Sxx, tn, fn, ext = transform(s, fs, nperseg=1024, noverlap=512) #db_range, db_gain)\n # compute shape features\n shape, params = features.shape_features(Sxx)\n all_features = row.append(shape.squeeze(axis=0))\n\n return pd.DataFrame(all_features).T\n\ndef classification(instace):\n\n model_file_rain = \"apps/models/Rain_gbrt_20220703.pkl\"\n classifier_rain = load(model_file_rain)\n result_rain = classifier_rain.predict(instace)\n\n model_file_insects = \"apps/models/Insects_gbrt_20220703.pkl\"\n classifier_insects = load(model_file_insects)\n result_insects = classifier_insects.predict(instace)\n\n model_file_birds = \"apps/models/Birds_gbrt_20220703.pkl\"\n classifier_birds = load(model_file_birds)\n result_birds = classifier_birds.predict(instace)\n\n model_file_anthro = \"apps/models/Anthrophony_gbrt_20220703.pkl\"\n classifier_anthro = load(model_file_anthro)\n result_anthro = classifier_anthro.predict(instace)\n\n if result_rain == 1:\n return 'Geophony --> Rain 🌧️ 🌧️ 🌧️ 🌧️'\n elif result_insects == 1:\n return 'Biophony --> Insects 🦗 🐜 🦗 🐛'\n elif result_birds == 1:\n return 'Biophony --> Birds ��� 🐦 🦜 🐦 '\n elif result_anthro == 1:\n return 'Anthropophony --> 🛵 🛩️ 🛵 🛩️'\n else:\n return 'Sound not detected ❌ 😖 ❌ 😖'\n\n", "repo_name": "maarojasga/FE_DS4A", "sub_path": "apps/classification.py", "file_name": "classification.py", "file_ext": "py", "file_size_in_byte": 2531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "maad.sound.spectrogram", "line_number": 16, "usage_type": "call"}, {"api_name": "maad.sound", "line_number": 16, "usage_type": "name"}, {"api_name": "maad.util.power2dB", "line_number": 18, "usage_type": "call"}, {"api_name": "maad.sound.resample", "line_number": 29, "usage_type": "call"}, {"api_name": "maad.sound", "line_number": 29, "usage_type": "name"}, {"api_name": "librosa.feature.mfcc", "line_number": 32, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 37, "usage_type": "call"}, {"api_name": "maad.features.shape_features", "line_number": 42, "usage_type": "call"}, {"api_name": "maad.features", "line_number": 42, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 50, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 54, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 58, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "34700244593", "text": "#!/usr/bin/env python3\n\n\"\"\"\nScript to clean up stale/failed correlator runs created by launchJobs.py.\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport shlex\nimport argparse\nimport subprocess\n\nfrom lsl.misc import parser as aph\n\n\ndef run_command(cmd, node=None, cwd=None, quiet=False):\n if node is None:\n if type(cmd) is list:\n pcmd = cmd\n else:\n pcmd = shlex.split(cmd)\n elif cwd is None:\n pcmd = ['ssh', '-t', '-t', node, 'bash -c \"%s\"' % cmd]\n else:\n pcmd = ['ssh', '-t', '-t', node, 'bash -c \"cd %s && %s\"' % (cwd, cmd)]\n \n outdev = subprocess.PIPE\n if quiet:\n outdev = open(os.devnull, 'wb')\n p = subprocess.Popen(pcmd, stdout=outdev, stderr=outdev)\n stdout, stderr = p.communicate()\n status = p.returncode\n if quiet:\n outdev.close()\n \n return status, stdout, stderr\n\n\ndef get_directories(node):\n status, dirnames, errors = run_command('ls -d -1 /tmp/correlator-*', node=node)\n if status != 0:\n dirnames = []\n else:\n dirnames = dirnames.decode(encoding='ascii', errors='ignore')\n dirnames = dirnames.split('\\n')[:-1]\n dirnames = [dirname.strip().rstrip() for dirname in dirnames]\n return dirnames\n\n\ndef get_processes(node):\n status, processes, errors = run_command('ps aux | grep -e superCorrelator -e superPulsarCorrelator | grep bash | grep -v grep | grep -v ssh', node=node)\n if status != 0:\n processes = []\n else:\n processes = processes.decode(encoding='ascii', errors='ignore')\n processes = processes.split('\\n')[:-1]\n processes = [process.strip().rstrip() for process in processes]\n return processes\n\n\ndef get_directory_contents(node, dirname):\n status, filenames, errors = run_command('ls -d -1 %s/*' % dirname, node=node)\n if status != 0:\n filenames = []\n else:\n filenames = filenames.decode(encoding='ascii', errors='ignore')\n filenames = filenames.split('\\n')[:-1]\n filenames = [filename.strip().rstrip() for filename in filenames]\n return filenames\n\n\ndef remove_directory(node, dirname):\n status, _, errors = run_command('rm -rf %s' % dirname, node=node)\n return True if status == 0 else False\n\n\ndef main(args):\n for node in args.nodes:\n ## Find out which directories exist\n dirnames = get_directories(node)\n if len(dirnames) == 0:\n continue\n \n ## Create an entry for this node since there seems to be\n ## something to report\n status = {'dirnames' :[], \n 'processes':[],\n 'active' :{},\n 'progress' :{}}\n status['dirnames'] = dirnames\n \n ## Get running superCorrelator.py/superPulsarCorrelator.py processes\n status['processes'] = get_processes(node)\n \n ## For each process, get the configuration file being\n ## processes\n for process in status['processes']:\n dirname, cmdname = process.rsplit('&&', 1)\n _, dirname = dirname.split('cd', 1)\n dirname = dirname.strip().rstrip()\n cmdname, _ = cmdname.split('>', 1)\n _, configname = cmdname.rsplit(None, 1)\n status['active'][dirname] = configname\n \n ## For each directory, get the progress toward completion\n ## (the number of NPZ files) and the latest values from \n ## the logfile for the average time per integration and the\n ## estimated time remaining\n for dirname in status['dirnames']:\n ### Filenames inside the directory\n filenames = get_directory_contents(node, dirname)\n \n ### Count the number of .npz files and find the .log\n ### file\n nNPZ = 0\n logname = None\n configname = None\n for filename in filenames:\n filename = filename.strip().rstrip()\n _, ext = os.path.splitext(filename)\n if ext == '.npz':\n if filename.find('-vis2-bin') == -1:\n ## Standard files\n nNPZ += 1\n elif filename.find('-vis2-bin000') != -1:\n ## Binning mode files - we only want one bin\n nNPZ += 1\n elif ext == '.log':\n logname = filename\n elif ext[:7] == '.config':\n configname = os.path.basename(filename)\n \n ### Save\n status['progress'][dirname] = nNPZ\n \n ## Clean\n for dirname in status['dirnames']:\n nFiles = status['progress'][dirname]\n if dirname not in status['active']:\n if nFiles == 0:\n print(\"%s @ %s -> stale and empty\" % (node, dirname))\n if not args.dry_run:\n if remove_directory(node, dirname):\n print(\" removed\")\n else:\n print(\"%s @ %s -> stale and *not* empty\" % (node, dirname))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='removed stale/failed correlator runs', \n epilog=\"NOTE: The -n/--nodes option also supports numerical node ranges using the '~' character to indicate a decimal range. For example, 'lwaucf1~2' is expanded to 'lwaucf1' and 'lwaucf2'. The range exansion can also be combined with other comma separated entries to specify more complex node lists.\", \n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument('-n', '--nodes', type=aph.csv_hostname_list, default='localhost', \n help='comma seperated lists of nodes to examine')\n parser.add_argument('-d', '--dry-run', action='store_true', \n help='dry run; report but do not clean')\n args = parser.parse_args()\n main(args)\n \n", "repo_name": "lwa-project/eLWA", "sub_path": "cleanFailedRuns.py", "file_name": "cleanFailedRuns.py", "file_ext": "py", "file_size_in_byte": 5986, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "shlex.split", "line_number": 22, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 30, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 151, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 154, "usage_type": "attribute"}, {"api_name": "lsl.misc.parser.csv_hostname_list", "line_number": 156, "usage_type": "attribute"}, {"api_name": "lsl.misc.parser", "line_number": 156, "usage_type": "name"}]} +{"seq_id": "27707228957", "text": "from flask import Flask, render_template, request, redirect\nfrom threading import Thread\nimport sqlite3 as sql\nimport csv\nfrom Helpers.Helpers import server_begin\nfrom SlaveContainer import SlaveContainer\nimport logging\n\napp = Flask(__name__)\n\nstop_run = False\ntest_false = True\nsocket_usage = False\n\n\ndef socket_function(container: SlaveContainer):\n container.start()\n # first_copy\n container.first_copy(container.master.get_open_orders())\n # set variable for stop socket\n set_stop_run.container = container\n global socket_usage\n socket_usage = True\n\n\ndef manual_run():\n container = server_begin()\n t1 = Thread(target=socket_function, args=(container,))\n t1.start()\n return \"Processing\"\n\n\n@app.route(\"/stop\", methods=['GET'])\ndef set_stop_run():\n logger = logging.getLogger('cct')\n global stop_run\n if not stop_run:\n logger.warning('You cannot stop without starting. Think about it :)')\n return redirect(\"/\")\n stop_run = False\n set_stop_run.container.stop()\n logger.info('WebSocket closed')\n return redirect(\"/\", code=302)\n\n\n@app.route(\"/run\", methods=['GET'])\ndef run_process():\n global stop_run\n if stop_run:\n logger = logging.getLogger('cct')\n logger.warning('The Program already has been running')\n return redirect(\"/\")\n stop_run = True\n manual_run()\n return redirect(\"/\", code=302)\n\n\n@app.route('/master', methods=['POST'])\ndef master_form():\n print(request.form['comment_content'])\n print(request.form['comment_content2'])\n print(request.form['comment_content3'])\n with sql.connect(\"database.db\") as con:\n cur = con.cursor()\n cur.execute(\"INSERT INTO keys (name,key,secret,type) VALUES (?,?,?,?)\", (\n request.form['comment_content3'], request.form['comment_content'], request.form['comment_content2'],\n \"master\"))\n con.commit()\n print(\"Record successfully added\")\n\n con.close()\n\n return redirect(\"/\", code=302)\n\n\n@app.route('/delete_master')\ndef delete_master():\n with sql.connect(\"database.db\") as con:\n cur = con.cursor()\n cur.execute(\"delete from keys where type='master'\")\n con.commit()\n print(\"Record successfully deleted\")\n con.close()\n return redirect(\"/\", code=302)\n\n\n@app.route('/delete_slave')\ndef delete_slave():\n with sql.connect(\"database.db\") as con:\n cur = con.cursor()\n cur.execute(\"delete from keys where type='slave'\")\n con.commit()\n print(\"Record successfully deleted\")\n con.close()\n return redirect(\"/\", code=302)\n\n\n@app.route('/slave', methods=['POST'])\ndef slave_form():\n print(request.form['comment_content'])\n print(request.form['comment_content2'])\n print(request.form['comment_content3'])\n with sql.connect(\"database.db\") as con:\n cur = con.cursor()\n cur.execute(\"INSERT INTO keys (name,key,secret,type) VALUES (?,?,?,?)\", (\n request.form['comment_content3'], request.form['comment_content'], request.form['comment_content2'],\n \"slave\"))\n con.commit()\n print(\"Record successfully added\")\n con.close()\n return redirect(\"/\", code=302)\n\n\n@app.route('/')\ndef homepage():\n global test_false\n\n if test_false == True:\n test_false = False\n\n final = bool(test_false) ^ bool(stop_run)\n\n con = sql.connect(\"database.db\")\n con.row_factory = sql.Row\n\n cur = con.cursor()\n cur.execute(\"select * from keys where type='slave'\")\n rows = cur.fetchall()\n\n cur = con.cursor()\n cur.execute(\"select * from keys where type='master'\")\n rows2 = cur.fetchall()\n\n slave_keys = []\n slave_sec = []\n master_key = []\n master_sec = []\n\n for row in rows:\n slave_keys.append(row[\"key\"])\n slave_sec.append(row[\"secret\"])\n\n for row in rows2:\n master_key.append(row[\"key\"])\n master_sec.append(row[\"secret\"])\n\n with open('config_files/config.csv', mode='w', newline='') as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['Master API Key'] + master_key + [\"\"])\n writer.writerow(['Master API Keys'] + master_sec + [\"\"])\n writer.writerow(['Slave API Keys'] + slave_keys + [\"\"])\n writer.writerow(['Slave API Secrets'] + slave_sec + [\"\"])\n\n final_str = \"No\" if False else \"Yes\"\n return render_template(\"home.html\", isRunning=\"Is App Running ? : \" + final_str, rows=rows, rows2=rows2)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n", "repo_name": "MohammedRashad/Crypto-Copy-Trader", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 4547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 174, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "SlaveContainer.SlaveContainer", "line_number": 16, "usage_type": "name"}, {"api_name": "Helpers.Helpers.server_begin", "line_number": 27, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 84, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 123, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 124, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 148, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 148, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "28091027482", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n url(r'weather/(\\d+)$', views.weather, name='weather'),\n url(r'find_city$', views.find_city, name='find_city'),\n url(r'sign_up$', views.sign_up, name='sign_up'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'connexion$', views.connexion, name='connexion'),\n url(r'^sign_out$', views.sign_out, name='sign_out'),\n url(r'alerts$', views.alerts, name='alerts'),\n url(r'^delete_alert/(\\d+)$', views.delete_alert, name='delete_alert'),\n url(r'^create_alert/(\\d+)/([\\w\\ \\-]+)$', views.create_alert, name='create_alert'),\n url(r'^sign_in$', views.sign_in, name='sign_in'),\n\n]\n", "repo_name": "emmanuelderevel/meteo", "sub_path": "city/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 768, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "74682487411", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import division\n\n__author__ = 'Nick Dickens and Samantha Campbell'\n__copyright__ = 'Copyright 2015, Nicholas J. Dickens and Samantha J. Campbell'\n\n'''\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n'''\n\nimport pysam\nimport sys\nimport os.path\nimport argparse\n\n\nfrom numpy import random\nfrom numpy import mean\nfrom numpy import std\n\n#commandline input\nparser = argparse.ArgumentParser(description='Generates multiple bam files with simulated origin regions')\nparser.add_argument('--infile', required=True, help='the BAM file to use as input')\nparser.add_argument('--prefix', required=False, help='the prefix of the output file (outfiles will be prefix_region.bam), defaults to simulated')\nparser.add_argument('--window', required=False, help='the window size in bases (tile) for binning the read data, for speed defaults to 100')\nparser.add_argument('--bumpsize', required=False, help='the size of an origin bump (rather than just the SSR), defaults to 30kb')\n\n\n\nfileE = args.infile\n\noriginBumpSize = 30000\nprobabilityWindow = 100\noutPrefix = 'simulated'\n\nif arg.window:\n probabilityWindow = int(arg.window)\n\nif arg.bumpsize:\n originBumpSize = int(arg.bumpsize)\n\nif arg.prefix:\n outPrefix = str(arg.prefix)\n\n\n# connect to the bam files\n\ntry:\n samfile = pysam.Samfile(fileE, \"rb\")\nexcept IOError as e:\n print (\"Error: cannot open the sam file: %d %s\" % (e.errno, e.strerror ))\n sys.exit(1)\nexcept:\n print (\"Error opening the bam file: %s\" % sys.exc_info()[0])\n sys.exit(1)\n\n# check the file is indexed\n# TO DO: Call the samtools indexing pysam function if it isn't indexed\nif not os.path.exists(fileE + \".bai\"):\n sys.exit('ERROR: There is no index for ' + fileE + '!')\n\n\ndef writeAlignments(alignmentList, bamFileOut):\n for alignment in alignmentList:\n bamFileOut.write(alignment)\n return\n\n\n# do original classification\n\n#TO DO: this was hard coded for the paper, add bedfile reader code and a comparison region\norigin = {'chr' : 'LmjF.36', 'start': 1110127, 'end':1116528}\noriginLength = (origin.get('end') - origin.get('start')) + 1\n\n#get all reads from the origin coordinates\n#originAlignmentsIterator = samfile.fetch(origin.get('chr') , origin.get('start')-1, origin.get('end')-1)\n\ndef countAlignmentStarts(alignments, regionStart, regionEnd):\n counter = 0\n for alignment in alignments:\n if alignment.reference_start>=regionStart & alignment.reference_start<=regionEnd:\n counter+=1\n return counter\n\n\n\ndef probabilityReader (chr, start, end, samfile, window):\n theseAlignments = samfile.fetch(chr, start, end)\n totalStartsForRegion = countAlignmentStarts(theseAlignments, start, end)\n if totalStartsForRegion == 0:\n return 0\n\n probabilityList = []\n for base in range(start,end,window):\n baseAlignments = samfile.fetch(chr, base, base+window)\n thisCount = countAlignmentStarts(baseAlignments,base,base+window)\n probabilityList.append(thisCount/totalStartsForRegion)\n return probabilityList\n\ndef reweightProbabilityList (originProbabilities, genomeProbabilities):\n #check both lists are the same length\n if len(originProbabilities) != len(genomeProbabilities):\n print (\">>>ERROR: \" + str(len(originProbabilities)) + \"!=\" + str(len(genomeProbabilities)) + \"\")\n return 0\n newProbabilities = []\n #reweight to background genome\n for i,j in zip(originProbabilities, genomeProbabilities):\n #newProbabilities.append(i * j)\n newProbabilities.append(i + j) # adding the probabilities is a better way to do it\n\n newTotal = sum(newProbabilities)\n #rescale so the total for the region is always 1\n for x in range(1, len(newProbabilities),1):\n newProbabilities[x] = newProbabilities[x]/newTotal\n\n return newProbabilities\n\ndef createFakeAlignment (alignmentList,probabilityList,fakeRegionStart,window,outfile):\n totalAlignments = len(alignmentList)\n spareAlignment = alignmentList[0]\n print (\"Name:\" +spareAlignment.query_name)\n for pIndex in range(0,len(probabilityList),1):\n newCoordinate = fakeRegionStart + (pIndex*window)\n howManyToFix = int((probabilityList[pIndex]*totalAlignments)+0.5)\n for i in range(0,howManyToFix,1):\n newAlignment = spareAlignment\n newAlignment.reference_start=newCoordinate\n #used the outfile because I had a problem with the iterator\n #TO DO: fix this\n outfile.write(newAlignment)\n return\n\n\n\noriginCentre = int(0.5+(origin.get('start')+(origin.get('end')-origin.get('start'))/2))\noriginStart = originCentre - int(0.5+(originBumpSize/2))\noriginAlignmentsIterator = samfile.fetch(origin.get('chr') , originStart, originStart+originBumpSize)\noriginAlignmentsList = []\n\nfor alignment in originAlignmentsIterator:\n originAlignmentsList.append(alignment)\n\n#print (\"???? OR ???? \" + str(len(originAlignmentsList)) + \" ?????????\")\n\n\n\n#list of target (non-origin strand-switch) regions\nnonOrigins = [\n {'chr' : 'LmjF.36', 'start': 155190, 'end':156279},\n {'chr' : 'LmjF.36', 'start': 778681, 'end' :779754},\n {'chr' : 'LmjF.36', 'start': 1413340, 'end':1414163},\n {'chr' : 'LmjF.36', 'start': 1607311, 'end':1608348},\n {'chr' : 'LmjF.36', 'start': 2078727, 'end':2082697},\n {'chr' : 'LmjF.36', 'start': 2468381, 'end':2470493}\n]\n\n\n#origin probabilites\noriginProbabilities = probabilityReader (origin.get('chr'),originStart, originStart+originBumpSize, samfile, probabilityWindow)\n\n\n# iterate through target regions\nfor location in nonOrigins:\n print(location)\n #open an output file\n outfileName = 'simulated_' + location.get('chr') + '_' + str(location.get('start')) + '-' + str(location.get('end')) + '.bam'\n outfile = pysam.AlignmentFile(outfileName, \"wb\", template=samfile)\n\n thisCentre = int(0.5+(location.get('start')+(location.get('end')-location.get('start'))/2))\n newStart = thisCentre - int(0.5+(originBumpSize/2))\n if newStart < 0:\n newStart = 0\n newEnd = newStart + originBumpSize\n #create a list of new alignments\n\n realRegionProbabilities = probabilityReader (location.get('chr'), newStart, newEnd, samfile, probabilityWindow)\n probabilityList = reweightProbabilityList (originProbabilities, realRegionProbabilities)\n createFakeAlignment (originAlignmentsList,probabilityList,newStart, probabilityWindow, outfile)\n\n for chr in samfile.references:\n if chr != origin.get('chr'):\n currentAlignments = samfile.fetch(chr)\n writeAlignments(currentAlignments, outfile)\n #pass\n\n else:\n currentAlignments = samfile.fetch(chr, 0, newStart)\n writeAlignments(currentAlignments , outfile )\n #writeAlignments(newAlignmentsList , outfile )\n\n\n chromEnd = samfile.lengths[samfile.references.index(chr)]\n if newEnd > chromEnd:\n newEnd = chromEnd\n currentAlignments = samfile.fetch(origin.get('chr'), newEnd, chromEnd)\n writeAlignments(currentAlignments , outfile )\n #close, sort and index the outfile\n outfile.close()\n pysam.sort(outfileName,outfileName+\"srt\")\n pysam.index(outfileName+\"srt.bam\")\n\n#this is for single file output\n#close it\n#outfile.close()\n#sort it\n#pysam.sort(outfileName,outfileName+\"srt\")\n#index it\n#pysam.index(outfileName+\"srt.bam\")\n\n\n", "repo_name": "CampbellSam/MFAseq-sensitivity", "sub_path": "generate_simulated_origins.py", "file_name": "generate_simulated_origins.py", "file_ext": "py", "file_size_in_byte": 7984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 34, "usage_type": "call"}, {"api_name": "pysam.Samfile", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 71, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 72, "usage_type": "call"}, {"api_name": "pysam.AlignmentFile", "line_number": 179, "usage_type": "call"}, {"api_name": "pysam.sort", "line_number": 211, "usage_type": "call"}, {"api_name": "pysam.index", "line_number": 212, "usage_type": "call"}]} +{"seq_id": "42566560482", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nnp.set_printoptions(precision=8)\n# set up the fiber values\na1 = 4 # input (\"fiber graded index core radius(um)\")\nalpha = 2.2 # input (\"profile power index\")\nGeO2_SiO2_dop = 0.05 # input (\"maximum dopant concentration % (GeSiO2)\")\nsteps = 100 # number of steps in the core area\n\nm = -GeO2_SiO2_dop / (steps ** alpha) # \"pendiente\" ecuacion\n\nx = np.arange(0, steps) # variable x ecuacion parabola\ny = np.zeros(steps) # variable y parabola (porcentajes)\n\nfor j in range(steps):\n y[j] = (m * (x[j] ** alpha) + GeO2_SiO2_dop) # porcentaje j\n\nplt.plot(x, y)\nplt.show()\n", "repo_name": "marioannier/FOdesign_optimization", "sub_path": "Graded_Index_Gen_dopPerc.py", "file_name": "Graded_Index_Gen_dopPerc.py", "file_ext": "py", "file_size_in_byte": 626, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.set_printoptions", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "29627781355", "text": "#!/usr/bin/env python3\n\"\"\"feedfinder: Find the Web feed for a Web page\nhttp://www.aaronsw.com/2002/feedfinder/\n\nUsage:\n feed(uri) - returns feed found for a URI\n feeds(uri) - returns all feeds found for a URI\n\n >>> import feedfinder\n >>> feedfinder.feed('scripting.com')\n 'http://scripting.com/rss.xml'\n >>>\n >>> feedfinder.feeds('scripting.com')\n ['http://delong.typepad.com/sdj/atom.xml',\n 'http://delong.typepad.com/sdj/index.rdf',\n 'http://delong.typepad.com/sdj/rss.xml']\n >>>\n\nCan also use from the command line. Feeds are returned one per line:\n\n $ python feedfinder.py diveintomark.org\n http://diveintomark.org/xml/atom.xml\n\nHow it works:\n 0. At every step, feeds are minimally verified to make sure they are really feeds.\n 1. If the URI points to a feed, it is simply returned; otherwise\n the page is downloaded and the real fun begins.\n 2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)\n 3. <A> links to feeds on the same server ending in \".rss\", \".rdf\", \".xml\", or\n \".atom\"\n 4. <A> links to feeds on the same server containing \"rss\", \"rdf\", \"xml\", or \"atom\"\n 5. <A> links to feeds on external servers ending in \".rss\", \".rdf\", \".xml\", or\n \".atom\"\n 6. <A> links to feeds on external servers containing \"rss\", \"rdf\", \"xml\", or \"atom\"\n 7. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).\n\"\"\"\n\n__version__ = \"2.0\"\n__date__ = \"2021-12-01\"\n__maintainer__ = \"Dominique Hazael-Massieux (dom@w3.org)\"\n__author__ = \"Mark Pilgrim (http://diveintomark.org)\"\n__copyright__ = \"Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz, 2021 Dominique Hazael-Massieux\"\n__license__ = \"Python\"\n__credits__ = \"\"\"Abe Fettig for a patch to sort Syndic8 feeds by popularity\nAlso Jason Diamond, Brian Lalor for bug reporting and patches\"\"\"\n\n_debug = 0\n\nfrom html5lib import HTMLParser\nfrom lxml import etree\nimport html5lib, urllib.request, urllib.error, urllib.parse, re, sys, urllib.robotparser\n\nimport threading\nclass TimeoutError(Exception): pass\n\ndef timelimit(timeout):\n def internal(function):\n def internal2(*args, **kw):\n \"\"\"\n from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878\n \"\"\"\n class Calculator(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = None\n self.error = None\n\n def run(self):\n try:\n self.result = function(*args, **kw)\n except:\n self.error = sys.exc_info()\n\n c = Calculator()\n c.setDaemon(True) # don't hold up exiting\n c.start()\n c.join(timeout)\n if c.is_alive():\n raise TimeoutError\n if c.error:\n raise c.error[0](c.error[1])\n return c.result\n return internal2\n return internal\n\nif not dict:\n def dict(aList):\n rc = {}\n for k, v in aList:\n rc[k] = v\n return rc\n\ndef _debuglog(message):\n if _debug: print(message)\n\nclass URLGatekeeper:\n \"\"\"a class to track robots.txt rules across multiple servers\"\"\"\n def __init__(self):\n self.rpcache = {} # a dictionary of RobotFileParser objects, by domain\n self.urlopener = urllib.request.build_opener()\n self.version = \"feedfinder/\" + __version__ + \" https://github.com/w3c/feedvalidator/blob/main/feedfinder.py\"\n _debuglog(self.version)\n self.urlopener.addheaders = [('User-agent', self.version)]\n\n def _getrp(self, url):\n protocol, domain = urllib.parse.urlparse(url)[:2]\n if domain in self.rpcache:\n return self.rpcache[domain]\n baseurl = '%s://%s' % (protocol, domain)\n robotsurl = urllib.parse.urljoin(baseurl, 'robots.txt')\n _debuglog('fetching %s' % robotsurl)\n rp = urllib.robotparser.RobotFileParser()\n with self.urlopener.open(robotsurl) as response:\n rp.parse(response.read().decode(\"utf-8\").splitlines())\n try:\n rp.read()\n except:\n pass\n self.rpcache[domain] = rp\n return rp\n\n def can_fetch(self, url):\n rp = self._getrp(url)\n allow = rp.can_fetch(self.version, url)\n _debuglog(\"gatekeeper of %s says %s\" % (url, allow))\n return allow\n\n @timelimit(10)\n def get(self, url):\n if not self.can_fetch(url): return ''\n try:\n return self.urlopener.open(url).read()\n except:\n return ''\n\n_gatekeeper = URLGatekeeper()\n\nclass BaseParser(HTMLParser):\n def __init__(self, baseuri):\n HTMLParser.__init__(self, namespaceHTMLElements=False)\n self.links = []\n self.baseuri = baseuri\n\n def feed(self, data):\n root = self.parse(data)\n for child in root.iter('*'):\n if isinstance(child.tag, str):\n try:\n handler = getattr(self, \"do_\" + child.tag)\n handler(child)\n except AttributeError:\n pass\n\n def do_base(self, el):\n if el.get('href'):\n self.baseuri = el.get('href').strip()\n\n def error(self, *a, **kw): pass # we're not picky\n\nclass LinkParser(BaseParser):\n FEED_TYPES = ('application/rss+xml',\n 'text/xml',\n 'application/atom+xml',\n 'application/x.atom+xml',\n 'application/x-atom+xml')\n def do_link(self, el):\n if not el.get('rel'): return\n rels = el.get('rel').lower().split()\n if 'alternate' not in rels: return\n if el.get('type').lower().strip() not in self.FEED_TYPES: return\n if not el.get('href'): return\n self.links.append(urllib.parse.urljoin(self.baseuri, el.get('href').strip()))\n\nclass ALinkParser(BaseParser):\n def do_a(self, el):\n if not el.get('href'): return\n self.links.append(urllib.parse.urljoin(self.baseuri, el.get('href').strip()))\n\ndef makeFullURI(uri):\n if uri.startswith('feed://'):\n uri = 'http://' + uri.split('feed://', 1).pop()\n for x in ['http', 'https']:\n if uri.startswith('%s://' % x):\n return uri\n return 'http://%s' % uri\n\ndef getLinks(data, baseuri):\n p = LinkParser(baseuri)\n p.feed(data)\n return p.links\n\ndef getALinks(data, baseuri):\n p = ALinkParser(baseuri)\n p.feed(data)\n return p.links\n\ndef getLocalLinks(links, baseuri):\n baseuri = baseuri.lower()\n urilen = len(baseuri)\n return [l for l in links if l.lower().startswith(baseuri)]\n\ndef isFeedLink(link):\n if link.startswith('http://feeds.feedburner.com/'): return True\n if link.endswith('/feeds/posts/default'): return True\n return link[-4:].lower() in ('.rss', '.rdf', '.xml') or link[-5:].lower() in ('.atom', 'atom/', '/atom', '/feed')\n\ndef isXMLRelatedLink(link):\n link = link.lower()\n return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom') + link.count('feed')\n\ndef couldBeFeedData(data):\n data = data.lower()\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n if data.count(b'<html'): return 0\n return data.count(b'<rss') + data.count(b'<rdf') + data.count(b'<feed')\n\ndef isFeed(uri):\n _debuglog('seeing if %s is a feed' % uri)\n protocol = urllib.parse.urlparse(uri)\n if protocol[0] not in ('http', 'https'): return 0\n try:\n data = _gatekeeper.get(uri)\n except TimeoutError: # server down, give up\n return false\n return couldBeFeedData(data)\n\ndef sortFeeds(feed1Info, feed2Info):\n return cmp(feed2Info['headlines_rank'], feed1Info['headlines_rank'])\n\ndef feeds(uri, all=False):\n fulluri = makeFullURI(uri)\n try:\n data = _gatekeeper.get(fulluri)\n except:\n return []\n # is this already a feed?\n if couldBeFeedData(data):\n return [fulluri]\n # nope, it's a page, try LINK tags first\n _debuglog('looking for LINK tags')\n try:\n feeds = getLinks(data, fulluri)\n except:\n feeds = []\n _debuglog('found %s feeds through LINK tags' % len(feeds))\n feeds = list(filter(isFeed, feeds))\n if all or not feeds:\n # no LINK tags, look for regular <A> links that point to feeds\n _debuglog('no LINK tags, looking at A tags')\n try:\n links = getALinks(data, fulluri)\n except:\n links = []\n locallinks = getLocalLinks(links, fulluri)\n # look for obvious feed links on the same server\n feeds.extend(list(filter(isFeed, list(filter(isFeedLink, locallinks)))))\n if all or not feeds:\n # look harder for feed links on the same server\n feeds.extend(list(filter(isFeed, list(filter(isXMLRelatedLink, locallinks)))))\n if all or not feeds:\n # look for obvious feed links on another server\n feeds.extend(list(filter(isFeed, list(filter(isFeedLink, links)))))\n if all or not feeds:\n # look harder for feed links on another server\n feeds.extend(list(filter(isFeed, list(filter(isXMLRelatedLink, links)))))\n if all and not feeds:\n _debuglog('no A tags, guessing')\n suffixes = [ # filenames used by popular software:\n 'atom.xml', # blogger, TypePad\n 'index.atom', # MT, apparently\n 'index.rdf', # MT\n 'rss.xml', # Dave Winer/Manila\n 'index.xml', # MT\n 'index.rss' # Slash\n ]\n feeds.extend(list(filter(isFeed, [urllib.parse.urljoin(fulluri, x) for x in suffixes])))\n if hasattr(__builtins__, 'set') or 'set' in __builtins__:\n feeds = list(set(feeds))\n return feeds\n\ngetFeeds = feeds # backwards-compatibility\n\ndef feed(uri):\n #todo: give preference to certain feed formats\n feedlist = feeds(uri)\n if feedlist:\n return feedlist[0]\n else:\n return None\n\n##### test harness ######\n\ndef test():\n failed = []\n count = 0\n filename = 'html4-001.html'\n while 1:\n uri = 'http://diveintomark.org/tests/client/autodiscovery/' + filename\n with open(\"feedfinder-tests/%s\" % filename, 'rb') as f:\n data = f.read()\n if data.find(b'Atom autodiscovery test') == -1: break\n sys.stdout.write('.')\n sys.stdout.flush()\n count += 1\n links = getLinks(data, uri)\n if not links:\n print('\\n*** FAILED ***', uri, 'could not find link')\n failed.append(uri)\n elif len(links) > 1:\n print('\\n*** FAILED ***', uri, 'found too many links')\n failed.append(uri)\n else:\n feedfilename = links[0].split('/')[-1]\n if links[0].startswith('http://www.ragingplatypus.com/'):\n feedfilename = \"ragingplatypus/\" + feedfilename\n with open(\"feedfinder-tests/%s\" % feedfilename, 'rb') as atomf:\n atomdata = atomf.read()\n if atomdata.find(b'<link rel=\"alternate\"') == -1:\n print('\\n*** FAILED ***', uri, 'retrieved something that is not a feed')\n failed.append(uri)\n else:\n backlink = atomdata.split(b'href=\"').pop().split(b'\"')[0].decode('utf-8')\n if backlink != uri:\n print('\\n*** FAILED ***', uri, 'retrieved wrong feed, backlink set to ', backlink)\n failed.append(uri)\n if data.find(b'<link rel=\"next\" href=\"') == -1: break\n filename = data.split(b'<link rel=\"next\" href=\"').pop().split(b'\"')[0].split(b'/')[-1].decode('us-ascii')\n print()\n print(count, 'tests executed,', len(failed), 'failed')\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if args and args[0] == '--debug':\n _debug = 1\n args.pop(0)\n if args:\n uri = args[0]\n else:\n uri = 'http://diveintomark.org/'\n if uri == 'test':\n test()\n else:\n print(\"\\n\".join(getFeeds(uri)))\n", "repo_name": "w3c/feedvalidator", "sub_path": "feedfinder.py", "file_name": "feedfinder.py", "file_ext": "py", "file_size_in_byte": 12140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 65, "dataset": "github-code", "pt": "21", "api": [{"api_name": "threading.Thread", "line_number": 62, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 64, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 72, "usage_type": "call"}, {"api_name": "urllib.request.request.build_opener", "line_number": 100, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 100, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 100, "usage_type": "name"}, {"api_name": "urllib.request.parse.urlparse", "line_number": 106, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 106, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 106, "usage_type": "name"}, {"api_name": "urllib.request.parse.urljoin", "line_number": 110, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 110, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 110, "usage_type": "name"}, {"api_name": "urllib.request.robotparser.RobotFileParser", "line_number": 112, "usage_type": "call"}, {"api_name": "urllib.request.robotparser", "line_number": 112, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 112, "usage_type": "name"}, {"api_name": "html5lib.HTMLParser", "line_number": 138, "usage_type": "name"}, {"api_name": "html5lib.HTMLParser.__init__", "line_number": 140, "usage_type": "call"}, {"api_name": "html5lib.HTMLParser", "line_number": 140, "usage_type": "name"}, {"api_name": "urllib.request.parse.urljoin", "line_number": 172, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 172, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 172, "usage_type": "name"}, {"api_name": "urllib.request.parse.urljoin", "line_number": 177, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 177, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 177, "usage_type": "name"}, {"api_name": "urllib.request.parse.urlparse", "line_number": 220, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 220, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 220, "usage_type": "name"}, {"api_name": "urllib.request.parse.urljoin", "line_number": 277, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 277, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 277, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 303, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 303, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 304, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 304, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 333, "usage_type": "attribute"}]} +{"seq_id": "9804090153", "text": "import logging\nimport sys\n\nimport zmq\n\nlogging.basicConfig(stream=sys.stderr, level=logging.INFO)\n\n\nif __name__ == \"__main__\":\n context = zmq.Context()\n\n reader = context.socket(zmq.SUB)\n reader.connect(\"tcp://localhost:8100\")\n reader.setsockopt(zmq.SUBSCRIBE, b\"\")\n\n logging.info(\"reading from lb proxy\")\n while True:\n message = reader.recv()\n logging.info(\"processing message: %s\", message)\n", "repo_name": "tinylambda/keep", "sub_path": "module_zmq/zmq_pubsub_lb/reader_from_proxy.py", "file_name": "reader_from_proxy.py", "file_ext": "py", "file_size_in_byte": 425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "zmq.Context", "line_number": 10, "usage_type": "call"}, {"api_name": "zmq.SUB", "line_number": 12, "usage_type": "attribute"}, {"api_name": "zmq.SUBSCRIBE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "25067601988", "text": "import random\nfrom flask_cors import CORS\nfrom flask import Flask, request\nfrom geopy.distance import geodesic\nimport mysql.connector\nfrom dotenv import load_dotenv\nimport os\nimport json\nimport copy\n\n\nload_dotenv(\".env\")\n\nconnection = mysql.connector.connect(\n host='127.0.0.1',\n port=3306,\n database=os.environ.get(\"database\"),\n user=os.environ.get(\"user\"),\n password=os.environ.get(\"password\"),\n autocommit=True\n)\ncursor = connection.cursor(buffered=True)\n\n\nclass Player:\n def __init__(self, nimi, airport):\n self.id = 1\n if len(players_list) > 0:\n self.id = players_list[-1].id + 1\n\n self.nimi = nimi\n self.airport = airport\n self.co2 = 0\n self.score = 0\n self.co2max = 100000\n self.matka = 0\n\n\napp = Flask(__name__)\nCORS(app)\napp.config[\"cors_headers\"] = \"content-type\"\n\n\ndef add_score_column():\n sql = 'SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"game\" AND COLUMN_NAME = \"score\";'\n cursor.execute(sql)\n result = cursor.fetchall()\n if len(result) == 0: # If score column does not exist, create it\n cursor.execute(\"delete from goal_reached\")\n cursor.execute(\"delete from game\")\n cursor.execute(\"alter table game add score int\")\n\n\n@app.route(\"/matkusta\", methods=[\"POST\"])\ndef matkustaa():\n if request.method == \"POST\":\n player_data = request.get_json(force=True)\n # print(player_data[\"playerName\"])\n # print(player_data)\n\n player_object = None\n\n for player in players_list:\n if player_data[\"playerId\"] == player.id:\n player_object = player\n break\n if player_object != None:\n player_object.matka += player_data[\"matka\"]\n player_object.co2 += player_data[\"co2\"]\n player_object.score = round(\n player_object.score + player_data[\"score\"], 1)\n player_object.airport = player_data[\"airport\"]\n return player_object.__dict__\n\n return {\"status\": \"Error\"}\n\n\n@app.route(\"/tarkista-maakoodi/<maakoodi>\")\ndef check_country(maakoodi):\n sql = f\"SELECT iso_country FROM airport WHERE iso_country ='{maakoodi}';\"\n cursor.execute(sql)\n result = cursor.fetchone()\n return {\"result\": result != None, \"isocode\": maakoodi}\n\n\n@app.route(\"/code/<maakoodi>\")\ndef get_country(maakoodi):\n sql = f\"select country.name, airport.name, ident, latitude_deg, longitude_deg, type from airport, country where airport.iso_country = country.iso_country and airport.iso_country='{maakoodi}';\"\n cursor.execute(sql)\n sql_answer = cursor.fetchall()\n if len(sql_answer) == 0:\n result = random.choice(all_airports)\n return list(result)\n result = random.choice(sql_answer)\n return list(result)\n\n\nplayers_list = []\n\n\n@app.route(\"/newplayer\", methods=[\"POST\"])\ndef new_player():\n\n if request.method == \"POST\":\n player_data = request.get_json(force=True)\n player = Player(player_data[\"playerName\"], player_data[\"airport\"])\n players_list.append(player)\n\n print(player_data[\"playerName\"])\n print(player_data)\n print(players_list)\n return player.__dict__\n\n\n@app.route(\"/laske-lennon-tiedot\", methods=[\"POST\"])\ndef laske_matka():\n if request.method == \"POST\":\n json_response = request.get_json(force=True)\n alkukentta = json_response[\"alkuLentokentta\"][3:5]\n nykynen_kentta = json_response[\"loppuLentokentta\"][3:5]\n airport_type = json_response[\"loppuLentokentta\"][5]\n return calculate_flight_info(alkukentta, nykynen_kentta, airport_type)\n\n\ndef calculate_flight_info(start_airport, end_airport, type_of_airport):\n matka = geodesic(start_airport, end_airport).km\n points_by_type = {\n \"small_airport\": 10,\n \"heliport\": 15,\n \"closed\": -15,\n \"medium_airport\": 20,\n \"seaplane_base\": 30,\n \"large_airport\": 45,\n \"balloonport\": 90,\n }\n\n conversion = matka / 1000\n score = points_by_type[type_of_airport] - conversion\n co2 = 2 * matka\n\n return {\"matka\": round(matka), \"score\": round(score, 2), \"co2\": round(co2)}\n\ncursor.execute(\"\"\"\nselect country.name, airport.name, ident, \n latitude_deg, longitude_deg, type from \n airport, country where \n airport.iso_country = country.iso_country\"\"\")\nall_airports = cursor.fetchall()\n\n@app.route(\"/reset-score/<id>\")\ndef delete_player(id):\n for player in players_list:\n if player.id == int(id):\n players_list.remove(player)\n break\n return {\"status\":\"Ok\"}\n\n\n\n@app.route(\"/airport/<numero>\")\ndef choose_airport(numero):\n airport_buttons = []\n while len(airport_buttons) < int(numero):\n airport = random.choice(all_airports)\n for s_airport in airport_buttons:\n if s_airport[0] == airport[0]:\n break\n if airport[5] == \"closed\" and s_airport[5] == \"closed\":\n break\n alkukentta = airport_buttons[-1][3:5]\n nykynen_kentta = airport[3:5]\n matka = geodesic(alkukentta, nykynen_kentta).km\n if matka > 5000:\n break\n else:\n airport_buttons.append(airport)\n return airport_buttons\n\n@app.route(\"/best-flight-path\", methods=[\"POST\"])\ndef best_flight_path():\n if request.method == \"POST\":\n json_response = request.get_json(force=True)\n length = len(json_response[\"flightPaths\"]) - 2\n for i in range(length):\n global max_score\n global best_path\n max_score = None\n best_path = None\n\n def loop(arr, current, score, path):\n global max_score \n global best_path \n if len(arr) == 0:\n if max_score == None or score > max_score:\n max_score = score\n best_path = path\n return\n for rivi in arr[0]:\n end = rivi[0:2]\n stats = calculate_flight_info(current, end, rivi[2])\n path = copy.deepcopy(path)\n path.append(rivi)\n return loop(arr[1:], end, score + stats[\"score\"], path) \n\n loop(json_response[\"flightPaths\"][i+1:i+3], json_response[\"flightPaths\"][i][0][0:2], 0, [])\n if len(best_path) > 1: json_response[\"flightPaths\"][i+1] = [best_path[1]]\n\n score = None\n for airport in json_response[\"flightPaths\"][-1]:\n stats = calculate_flight_info(json_response[\"flightPaths\"][-2][0][0:2], airport[0:2], airport[2])\n if score == None or score < stats[\"score\"]:\n score = stats[\"score\"]\n json_response[\"flightPaths\"][-1] = [airport]\n\n return json_response\n \n\n@app.route(\"/save\", methods=[\"POST\"])\ndef update_sql():\n if request.method == \"POST\":\n id = 1\n json_response = request.get_json(force=True)\n cursor.execute(\"SELECT max(CAST(id AS INT)) FROM game\")\n id_result = cursor.fetchone()[0]\n if id_result != None:\n id = int(id_result) + 1\n \n cursor.execute( f\"\"\"insert into game(id, screen_name, score, co2_consumed)\n value ({id}, \"{json_response[\"nimi\"]}\", {json_response[\"score\"]}, {json_response[\"co2\"]})\"\"\")\n\n return {\"status\":\"Ok\", \"id\":id}\n\n\n@app.route(\"/scoreboard/\")\ndef send_scoreboard():\n sql = \"select id, screen_name, score, co2_consumed from game order by score desc limit 100;\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return result\n\n@app.route(\"/scoreboard/<id>\")\ndef scoreboard_by_id(id):\n sql = \"select id, screen_name, score, co2_consumed from game order by score desc;\"\n cursor.execute(sql)\n result = cursor.fetchall()\n for riviNumero in range(len(result)):\n if result[riviNumero][0] == id:\n players = result[max(riviNumero-20, 0):riviNumero+21]\n return {\"playerList\": players, \"startIndex\": riviNumero}\n return {\"error\": \"id not found\"}\n\n\nadd_score_column()\n\nif __name__ == \"__main__\":\n app.run(use_reloader=True, host=\"127.0.0.1\", port=3000)\n", "repo_name": "kassu11/flight-game-2", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 12, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 14, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 14, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 17, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 39, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 92, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "geopy.distance.geodesic", "line_number": 126, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 164, "usage_type": "call"}, {"api_name": "geopy.distance.geodesic", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 182, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 220, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 220, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}]} +{"seq_id": "41705036781", "text": "from PIL import Image\n\ndef find_dominant_color(filename):\n #Resizing parameters\n width, height = 150,150\n image = Image.open(filename)\n image = image.resize((width, height),resample = 0)\n #Get colors from image object\n pixels = image.getcolors(width * height)\n #Sort them by count number(first element of tuple)\n sorted_pixels = sorted(pixels, key=lambda t: t[0])\n #Get the most frequent color\n dominant_color = sorted_pixels[-1][1]\n return dominant_color\n\nprint(find_dominant_color('image.jpg'))", "repo_name": "sprasadik2010/colordetector", "sub_path": "dominentcolorcore.py", "file_name": "dominentcolorcore.py", "file_ext": "py", "file_size_in_byte": 529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "29412471073", "text": "import cv2\nimport numpy as np\nfrom simple_facerec import SimpleFacerec\nimport sys\n\n\nimport time\n\nimport sqlite3\n\nfrom tkinter import *\nfrom PIL import ImageTk,Image\nimport tkinter.messagebox\n\nimport smtplib\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nimport datetime\n\nroot=Tk()\nroot.geometry('500x570')\nframe = Frame(root, relief=RIDGE, borderwidth=2)\nframe.pack(fill=BOTH,expand=1)\nroot.title('Reconnaissance Facial')\nframe.config(background='light pink')\nlabel = Label(frame, text=\"Reconnaissance Facial\",bg='light pink',font=('Times 35 bold'))\nlabel.pack(side=TOP)\nfilename = ImageTk.PhotoImage(Image.open(\"C:\\\\miniprojet\\\\demo4.jpg\"))\nbackground_label = Label(frame,image=filename)\nbackground_label.pack(side=TOP)\n\n\ndef hel():\n help(cv2)\n\ndef Contri():\n tkinter.messagebox.showinfo(\"Contributors\",\"\\n1.Mayur Kadam\\n2. Abhishek Ezhava \\n3. Rajendra Patil \\n\")\n\n\ndef anotherWin():\n tkinter.messagebox.showinfo(\"About\",'Driver Cam version v1.0\\n Made Using\\n-OpenCV\\n-Numpy\\n-Tkinter\\n In Python 3')\n \n \n\nmenu = Menu(root)\nroot.config(menu=menu)\n\nsubm1 = Menu(menu)\nmenu.add_cascade(label=\"Tools\",menu=subm1)\nsubm1.add_command(label=\"Open CV Docs\",command=hel)\n\nsubm2 = Menu(menu)\nmenu.add_cascade(label=\"About\",menu=subm2)\nsubm2.add_command(label=\"Driver Cam\",command=anotherWin)\nsubm2.add_command(label=\"Contributors\",command=Contri)\n\n\n\ndef exitt():\n exit()\n\n \ndef web():\n capture =cv2.VideoCapture(0)\n while True:\n ret,frame=capture.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF ==ord('q'):\n break\n capture.release()\n cv2.destroyAllWindows()\n\ndef webrec():\n cap = cv2.VideoCapture(0)\n\n sfr = SimpleFacerec()\n sfr.load_encoding_images(\"images/\")\n\n while True:\n _,frame = cap.read()\n\n\n face_locations, face_names = sfr.detect_known_faces(frame)\n\n for face_loc, name in zip(face_locations, face_names):\n y1, x2, y2, x1 = face_loc[0], face_loc[1], face_loc[2], face_loc[3]\n\n cv2.putText(frame, name,(x1, y1 - 10), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 200), 2)\n cv2.rectangle(frame, (x1,y1), (x2,y2), (0, 0, 200), 4)\n\n cv2.imshow(\"Frame\", frame)\n\n\n key = cv2.waitKey(1)\n if key == 27:\n break\n \n cap.release()\n cv2.destroyAllWindows()\n \n\ndef webdet():\n cap = cv2.VideoCapture(0)\n sfr = SimpleFacerec()\n sfr.load_encoding_images(\"images/\")\n\n while True:\n _,frame = cap.read()\n\n\n face_locations, face_names = sfr.detect_known_faces(frame)\n\n for face_loc, name in zip(face_locations, face_names):\n y1, x2, y2, x1 = face_loc[0], face_loc[1], face_loc[2], face_loc[3]\n\n cv2.putText(frame, name,(x1, y1 - 10), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 200), 2)\n cv2.rectangle(frame, (x1,y1), (x2,y2), (0, 0, 200), 4)\n\n cv2.imshow(\"Frame\", frame)\n\n print(\"press q to capture\", frame)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n cv2.imwrite(\"capture image opencv.jpg\", frame)\n print(\"image captured\")\n break\n if key == 27: \n break\n \n cap.release()\n cv2.destroyAllWindows()\n\ndef send():\n dateTimeNow = datetime.datetime.now()\n\n message = MIMEMultipart()\n message['from'] = \"ayabenkhedher84@gmail.com\" \n message['to'] = \"daoudwissal2000@gmail.com \"\n Password = \"Wissalpqt1\"\n message['Subject'] = \"Face recognition app\"\n body = \"Welcome to my Face recognition app \" + str(dateTimeNow)\n \n image_open = open('capture image opencv.jpg' ,'rb').read()\n\n message.attach(MIMEText(body, 'html'))\n message.attach(MIMEImage(image_open, 'jpg', name= 'aya.jpg'))\n \n\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(message['From'], Password)\n server.sendmail(message['From'], message['To'], message.as_string())\n server.quit()\n\n os.system('face recognition')\n cv2.destroyAllWindows()\n\ndef data():\n try:\n sqliteConnection = sqlite3.connect('SQLite_data.db')\n cursor = sqliteConnection.cursor()\n print(\"Connected to SQLite\")\n\n cursor.execute(\"SELECT * from Person\")\n record = cursor.fetchall()\n profil = 0\n for row in record:\n First_name = row[0]\n Last_name = row[1]\n image = row[2]\n mail = row[3]\n print(\"First_name =\", row[0], \"Last_name =\", row[1], \"img = \" , row[2], \"mail =\" , row[3])\n print(\"Storing Person image on disk \\n\")\n\n cursor.close()\n\n except sqlite3.Error as error:\n print(\"Failed to read blob data from sqlite table\", error)\n finally:\n if sqliteConnection:\n sqliteConnection.close()\n print(\"sqlite connection is closed\")\n\n\n\n\n\n \nbut1=Button(frame,padx=5,pady=5,width=39,bg='white',fg='black',relief=GROOVE,command=web,text='Open Cam',font=('helvetica 15 bold'))\nbut1.place(x=5,y=104)\n\nbut2=Button(frame,padx=5,pady=5,width=39,bg='white',fg='black',relief=GROOVE,command=webrec,text='Open Cam & Detect',font=('helvetica 15 bold'))\nbut2.place(x=5,y=176)\n\nbut3=Button(frame,padx=5,pady=5,width=39,bg='white',fg='black',relief=GROOVE,command=webdet,text='Open Cam & Capture',font=('helvetica 15 bold'))\nbut3.place(x=5,y=250)\n\nbut4=Button(frame,padx=5,pady=5,width=39,bg='white',fg='black',relief=GROOVE,command=send,text='Send Email',font=('helvetica 15 bold'))\nbut4.place(x=5,y=322)\n\nbut5=Button(frame,padx=5,pady=5,width=39,bg='white',fg='black',relief=GROOVE,command=data,text='Read From Database',font=('helvetica 15 bold'))\nbut5.place(x=5,y=400)\n\nbut6=Button(frame,padx=5,pady=5,width=5,bg='white',fg='black',relief=GROOVE,text='EXIT',command=exitt,font=('helvetica 15 bold'))\nbut6.place(x=210,y=478)\n\n\n\nroot.mainloop()\n", "repo_name": "AyaBenKhedher/Reconnaissance-Facial", "sub_path": "Réalisation with Tkinter.py", "file_name": "Réalisation with Tkinter.py", "file_ext": "py", "file_size_in_byte": 5810, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "PIL.ImageTk.PhotoImage", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 30, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 30, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 39, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 43, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 77, "usage_type": "call"}, {"api_name": "simple_facerec.SimpleFacerec", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 91, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 106, "usage_type": "call"}, {"api_name": "simple_facerec.SimpleFacerec", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 119, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 120, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 125, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 127, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "attribute"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 139, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 148, "usage_type": "call"}, {"api_name": "email.mime.image.MIMEImage", "line_number": 149, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 153, "usage_type": "call"}, {"api_name": "os.system", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 160, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 164, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 181, "usage_type": "attribute"}]} +{"seq_id": "42518043551", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom collections import namedtuple\nimport json\nimport logging\nimport time\nfrom collections import namedtuple\n\nfrom slackclient import SlackClient\n\n\nclass SlackBot(object):\n def __init__(self, settingsFilePath='settings.json'):\n self.s = {}\n self.sclient = {}\n ### Not necessary, moved to settings?\n # self.botcheck = '' #<@' + s['bot']['id'] + '>: '\n with file(settingsFilePath, 'r') as settingsfile:\n self.s = json.load(settingsfile)\n\n logging.debug(self.s)\n self.lastping = int(time.time())\n self.sclient = SlackClient(self.s[\"token\"])\n\n self.CommandBody = namedtuple('CommandBody', 'action help')\n self.botcheck = '' # <@' + s['bot']['id'] + '>: '\n self.commands = {}\n\n#\"\"\"{\t u'channel': u'G1FS1CJ84',\n# u'team': u'T05311JTT',\n# u'text': u'<@U1FRJ3WMU>: lol',\n# u'ts': u'1465583194.000034',\n# u'type': u'message',\n# u'user': u'U0LJ6Q4S0'}\"\"\" ### Typical structure of a command packet\n\n\n def help(self, msg):\n output = self.sclient.api_call('chat.postMessage',\n as_user='true',\n channel=msg['channel'],\n text=self.commands[\"help\"].help)\n logging.debug(output)\n\n def generateHelp(self):\n helptext = 'Commands are:\\n'\n for c in self.commands:\n helptext += \"\\t\" + self.botcheck + self.commands[c].help + \"\\n\"\n helptext += \"\\t\" + self.botcheck + \"help [this help text]\\n\"\n self.commands['help'] = self.CommandBody(action=self.help, help=helptext)\n\n\n def get_bot_id(self):\n api_call = self.sclient.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n for user in users:\n if 'name' in user and user.get('name') == self.s[\"bot\"][\"name\"]:\n self.s[\"bot\"][\"id\"] = user.get('id')\n self.botcheck = '<@' + self.s['bot']['id'] + '>: '\n return ({user['name']: user.get('id')})\n else:\n return \"could not find bot user with the name \" + s[\"bot\"][\"name\"]\n\n\n def autoping(self,last):\n ### hardcode the interval to 3 seconds\n now = int(time.time())\n if last + 3 < now:\n self.sclient.server.ping()\n return now\n\n\n def addCommand(self, command, action, help):\n self.commands[command] = self.CommandBody(action=action, help=help)\n\n def sendReply(self, msg):\n text = msg['text'][len(self.botcheck):]\n pos = len(text)\n try:\n pos = text.index(' ')\n except ValueError:\n pass\n\n cmd = text[:pos] # grab command string up to first space\n logging.info('cmd =\"' + cmd + '\"')\n if cmd in self.commands:\n self.commands[cmd].action(msg)\n\n\n def monitor(self):\n #self.sclient = SlackClient(s[\"token\"])\n\n logging.info(\"Connecting as \" + self.s[\"bot\"][\"name\"])\n if self.sclient.rtm_connect():\n logging.info(\"...Connected!\")\n logging.debug(self.get_bot_id())\n self.generateHelp()\n last_ping = int(time.time())\n while True:\n messages = self.sclient.rtm_read()\n # logging.debug(messages)\n last_ping = self.autoping(last_ping)\n for message in messages:\n if all(k in message for k in ('type', 'text')) \\\n and message['type'] == 'message' \\\n and 'bot_id' not in message \\\n and self.botcheck in message['text']:\n logging.debug(message)\n self.sendReply(message)\n time.sleep(1)\n else:\n logging.info(\"Connection Failed, invalid token?\")\n", "repo_name": "mightypenguin/qotd", "sub_path": "slackbot.py", "file_name": "slackbot.py", "file_ext": "py", "file_size_in_byte": 4015, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "json.load", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 23, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "slackclient.SlackClient", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 96, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 111, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "70180986292", "text": "from django.contrib import admin\nfrom .models import Order\n\n\nclass OrderAdmin(admin.ModelAdmin):\n list_display = ('order_number', 'status', 'user_products', 'notes', 'full_name', 'address', 'address2', 'city', 'state', 'zip_code',\n 'email', 'country', 'created_at', 'total')\n list_per_page = 10\n search_fields = ['email']\n empty_value_display = 'NA'\n list_filter = ('status', 'email',)\n\nadmin.site.register(Order, OrderAdmin)\n\nadmin.site.site_header = 'UprintMate管理后台' # 设置header\nadmin.site.site_title = 'UprintMate管理后台' # 设置title\nadmin.site.index_title = 'UprintMate管理后台'\n", "repo_name": "rockking00/printshirtdiy", "sub_path": "cart/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 642, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Order", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 13, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "13422177637", "text": "import argparse\nimport yaml\nimport munch\nimport jittor as jt\nimport dataset_svr.dataset_shapenet as dataset_shapenet\n\n\n\ndef build_dataset(args):\n # Create Datasets\n dataset_train = dataset_shapenet.ShapeNet(args, train=True)\n dataset_test = dataset_shapenet.ShapeNet(args, train=False)\n\n # Create dataloaders\n dataset_train.set_attrs(\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=int(args.workers)\n )\n\n dataset_test.set_attrs(\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=int(args.workers)\n )\n\n len_dataset = dataset_train.total_len\n len_dataset_test = dataset_test.total_len\n print('Length of train dataset:%d', len_dataset)\n print('Length of test dataset:%d', len_dataset_test)\n\n return dataset_train, dataset_test\n\ndef build_dataset_val(args):\n\n # Create Datasets\n dataset_test = dataset_shapenet.ShapeNet_val(args, train=False)\n\n # Create dataloaders\n dataset_test.set_attrs(\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=int(args.workers)\n )\n\n len_dataset_test = dataset_test.total_len\n print('Length of test dataset:%d', len_dataset_test)\n\n return dataset_test\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train config file')\n parser.add_argument('-c', '--config', help='path to config file', required=True)\n parser.add_argument('-gpu', '--gpu_id', help='gpu_id', required=True)\n arg = parser.parse_args()\n config_path = arg.config\n args = munch.munchify(yaml.safe_load(open(config_path)))\n dataloader_train, dataloader_test = build_dataset(args)\n", "repo_name": "AllenXiangX/SPD_jittor", "sub_path": "svr/dataset_svr/trainer_dataset.py", "file_name": "trainer_dataset.py", "file_ext": "py", "file_size_in_byte": 1667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dataset_svr.dataset_shapenet.ShapeNet", "line_number": 11, "usage_type": "call"}, {"api_name": "dataset_svr.dataset_shapenet", "line_number": 11, "usage_type": "name"}, {"api_name": "dataset_svr.dataset_shapenet.ShapeNet", "line_number": 12, "usage_type": "call"}, {"api_name": "dataset_svr.dataset_shapenet", "line_number": 12, "usage_type": "name"}, {"api_name": "dataset_svr.dataset_shapenet.ShapeNet_val", "line_number": 37, "usage_type": "call"}, {"api_name": "dataset_svr.dataset_shapenet", "line_number": 37, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 52, "usage_type": "call"}, {"api_name": "munch.munchify", "line_number": 57, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "1129786859", "text": "import requests\nimport json\n\nAPI_KEY = 'api_key'\nAPI_ID = 'api_id'\n\nLANGUAGE = 'en-gb'\nURL = 'https://od-api.oxforddictionaries.com/api/v2/entries/en-gb/%s' \n\nMINUTE_LIMIT = 60\n\ndef search_word(word):\n\n\tdefinitions = []\n\n\tr = requests.get(URL %(word.lower()), headers = {'app_id' : API_ID, 'app_key' : API_KEY})\n\n\tif r.status_code == 200:\n\t\tresp = r.json()\n\n\t\tfor result in resp[\"results\"]:\n\t\t\tfor lex in result[\"lexicalEntries\"]:\n\t\t\t\tfor entry in lex[\"entries\"]:\n\t\t\t\t\tfor sense in entry[\"senses\"]:\n\t\t\t\t\t\tif \"definitions\" in sense:\n\t\t\t\t\t\t\tfor definition in sense[\"definitions\"]:\n\t\t\t\t\t\t\t\tdefinitions.append(definition)\n\n\tlean_definitions = definitions[:3] if len(definitions) >= 3 else definitions\n\tdefinition_str = '; '.join(lean_definitions)\n\n\treturn definition_str\n\n", "repo_name": "dergenc/kindle-clipper", "sub_path": "dictionary_api.py", "file_name": "dictionary_api.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "22488182594", "text": "from keras.models import Sequential, Model\nfrom keras import layers\nfrom keras import Input\nimport numpy as np\n\n\n# 前面学过的Sequential 模型\nseq_model = Sequential()\nseq_model.add(layers.Dense(32, activation='relu', input_shape=(64,)))\nseq_model.add(layers.Dense(32, activation='relu'))\nseq_model.add(layers.Dense(10, activation='softmax'))\n\n# 对应的函数式API 实现\ninput_tensor = Input(shape=(64,))\nx = layers.Dense(32, activation='relu')(input_tensor)\nx = layers.Dense(32, activation='relu')(x)\noutput_tensor = layers.Dense(10, activation='softmax')(x)\nmodel = Model(input_tensor, output_tensor) # Model 类将输入张量和输出张量转换为一个模型\nprint(model.summary())\n\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy')\nx_train = np.random.random((1000, 64))\ny_train = np.random.random((1000, 10))\nmodel.fit(x_train, y_train, epochs=10, batch_size=128)\nscore = model.evaluate(x_train, y_train)\nprint(score)", "repo_name": "ThomasCai/ML-books-code-inAction", "sub_path": "deep-learning-with-python/ch07/7.1.1_intro_api.py", "file_name": "7.1.1_intro_api.py", "file_ext": "py", "file_size_in_byte": 953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "keras.models.Sequential", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 9, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 9, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 10, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 11, "usage_type": "name"}, {"api_name": "keras.Input", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 15, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 16, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 17, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "39137628158", "text": "import pymysql.cursors\nimport izyrtm_prop\n\ndef getConn():\n connection = pymysql.connect(host=izyrtm_prop.db_host,\n user=izyrtm_prop.db_user,\n password=izyrtm_prop.db_pw,\n db=izyrtm_prop.db_datadbase,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n return connection\n\ndef getBotList():\n conn = getConn()\n\n try:\n with conn.cursor() as cursor:\n #sql = 'SELECT * FROM tb_bot WHERE email = %s'\n sql = 'SELECT * FROM tb_bot'\n #cursor.execute(sql, ('test@test.com',))\n cursor.execute(sql)\n result = cursor.fetchall()\n #print(result)\n # (1, 'test@test.com', 'my-passwd')\n #print('select list start')\n #for i in result:\n # print(i)\n #seq_no,bot_key,bot_token,bot_type,bot_title,topic_name,user_list,use_yn,reg_dt,mod_dt\n #print(str(i['seq_no'])+' / '+str(i['bot_key'])+' / '+str(i['bot_token'])+' / '+str(i['bot_type'])+' / '+str(i['bot_title'])+' / '+str(i['topic_name'])+' / '+str(i['user_list'])+' / '+str(i['use_yn']))\n #print(\"\\n\")\n #print('select list end')\n return result\n finally:\n conn.close()\n\n\nif __name__ == '__main__':\n getBotList()", "repo_name": "izyrtm/izyrtm-server", "sub_path": "rtmBot/izyrtm_db.py", "file_name": "izyrtm_db.py", "file_ext": "py", "file_size_in_byte": 1296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pymysql.cursors.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 5, "usage_type": "name"}, {"api_name": "izyrtm_prop.db_host", "line_number": 5, "usage_type": "attribute"}, {"api_name": "izyrtm_prop.db_user", "line_number": 6, "usage_type": "attribute"}, {"api_name": "izyrtm_prop.db_pw", "line_number": 7, "usage_type": "attribute"}, {"api_name": "izyrtm_prop.db_datadbase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pymysql.cursors.cursors", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "74027515252", "text": "import requests\nimport logging\nimport json\n\nfrom os import environ, makedirs\nfrom datetime import datetime\nfrom time import sleep\nfrom uuid import uuid4\n\n\nAPI_URL = environ.get('API_URL', 'https://api.chucknorris.io/jokes/random')\nLAKE_PATH = environ.get('LAKE_PATH', './output/jokes')\nLOG_PATH = environ.get('LOG_PATH', './logs')\nHOW_MANY = int(environ.get('HOW_MANY', '10'))\nSLEEP_TIME = int(environ.get('SLEEP_TIME', '1'))\n\n\ndef init():\n makedirs(LOG_PATH, exist_ok=True)\n makedirs(LAKE_PATH, exist_ok=True)\n logging.basicConfig(\n filename=log_file(),\n filemode='a',\n format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.DEBUG\n )\n\n\ndef log_file():\n app_name, app_version = 'app-extractor', '1.0.0'\n date = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n return f'{LOG_PATH}/{app_name}.{app_version}.{date}.log'\n\n\ndef save(data):\n if data:\n file_name = f'{LAKE_PATH}/{uuid4()}.json'\n logging.debug(f'Writing file \"{file_name}\"')\n with open(file_name, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef request():\n data = requests.get(API_URL).json()\n if data:\n data['persisted_at'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n logging.debug(f'Response Data: {data}')\n return data\n\n\nif __name__ == '__main__':\n\n init()\n logging.info(f'Starting {HOW_MANY} executions...')\n\n for i in range(HOW_MANY):\n count = f'({i+1}/{HOW_MANY})'\n\n logging.info(f'{count} Requesting data...')\n data = request()\n\n logging.info(f'{count} Saving data...')\n save(data)\n\n logging.info(f'{count} Done!')\n sleep(SLEEP_TIME)\n", "repo_name": "avcaliani/airflow-app", "sub_path": "jobs/app-extractor/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1730, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 26, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 39, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 66, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "74068545333", "text": "import sqlite3\nfrom sqlite3 import Error\n\nimport pandas as pd\n\n# For help:\n# https://realpython.com/python-sql-libraries/#using-python-sql-libraries-to-connect-to-a-database\n# https://docs.python.org/2/library/sqlite3.html\n\nclass SQLiteDB:\n\n def __init__(self, path_to_db):\n self.path_to_db = path_to_db\n self.connection = self.create_connection()\n\n\n def create_connection(self):\n connection = None\n try:\n connection = sqlite3.connect(self.path_to_db)\n print(\"Connection to SQLite DB successful\")\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\n return connection\n\n\n def create_cursor(self):\n cursor = None\n try:\n cursor = self.connection.cursor()\n print(\"SQLite DB cursor created successfully\")\n except Error as e:\n print(f\"The error '{e}' occurred\")\n \n return cursor\n\n\n def create_table(self, cursor, table_name, table_columns):\n\n formatted_table_columns = ''\n\n # Convert table_columns dict to string with format '(item_1 type, item_2 type, ..., item_n type)' \n for column_name, column_type in table_columns.items():\n column_name_type = column_name + ' ' + column_type\n formatted_table_columns += column_name_type + ','\n formatted_table_columns = '(' + formatted_table_columns[:-1] + ')'\n\n # Create table in DB\n try:\n cursor.execute(f\"CREATE TABLE {table_name} {formatted_table_columns}\")\n print(f\"SQLite DB table '{table_name}' created successfully\")\n\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\n\n def insert_row(self, cursor, table_name, row_items):\n\n formatted_row_items = ''\n\n # Convert row_items list to string with format '(item_1,item_2, ..., item_n)' \n for item in row_items:\n if isinstance(item, str): # Keep quotes on strings: \"'string'\"\n formatted_row_items += f\"'{item}',\"\n else:\n formatted_row_items += f\"{item},\"\n formatted_row_items = '(' + formatted_row_items[:-1] + ')'\n\n try:\n cursor.execute(f\"INSERT INTO {table_name} VALUES {formatted_row_items}\")\n print(f\"SQLite DB row in '{table_name}' created successfully\")\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\n\n def commit_changes(self):\n self.connection.commit()\n\n \n def close_connection(self):\n self.connection.close()\n\n \n def export_table_to_excel(self, table_name):\n data = pd.read_sql_query(f\"SELECT * FROM {table_name}\", self.connection)\n exported = False\n while exported == False:\n try:\n data.to_excel(f\"{table_name}.xlsx\", index=False)\n exported = True\n except PermissionError:\n input(f\"PermissionError. Close the book '{table_name}.xlsx' and press any key:\")\n", "repo_name": "FARBEX97/coffeece", "sub_path": "coffeece/SQLiteDB.py", "file_name": "SQLiteDB.py", "file_ext": "py", "file_size_in_byte": 2995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlite3.connect", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 54, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 73, "usage_type": "name"}, {"api_name": "pandas.read_sql_query", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "4116639120", "text": "#!/usr/bin/env python3\nimport os\nimport base64\nimport json\nimport dotenv\nfrom alpaca_news import AlpacaNews\nfrom alpaca_ticker import AlpacaTicker\nfrom prediction import Prediction\nfrom sentiment import Sentiment\nfrom typing import Any, Dict, List, TypeVar\nfrom azure.cosmos import CosmosClient\nfrom delta import Delta\n\nT = TypeVar('T')\n\nclass Cosmos():\n \"\"\"Cosmos DB wrapper\"\"\"\n\n def __init__(self):\n self._internalClient = self._cosmosCreateInstance()\n database_name = 'sentiments'\n self._database = self._internalClient.get_database_client(database_name)\n self.prediction_container_name = \"predictions\"\n self.article_container_name = \"articles\"\n self.sentiment_container_name = \"sentiments\"\n self.ticker_container_name = \"tickers\"\n self.delta_name = \"deltas\"\n # container_name = 'Items'\n # self._container = self._database.get_container_client(container_name)\n\n def _cosmosCreateInstance(self) -> CosmosClient:\n \"\"\"[PRIVATE] Return new instance of cosmosDB - reading api credentials from environment\n\n Returns:\n CosmosClient: An instance of CosmosDB Api Client\n \"\"\"\n dotenv.load_dotenv()\n encrypted_creds: str = os.environ[\"COSMOS_CREDS\"]\n decoded: bytes = base64.b64decode(encrypted_creds)\n json_dict = json.loads(decoded)\n return CosmosClient(\n json_dict[\"url\"],\n json_dict[\"primary_key\"],\n )\n\n def write(self, items: List[any]) -> None:\n \"\"\"Write items of the generic type T in the database\n this function will handle contain resolution based on\n the type of T and return the result \n\n Args:\n items (List[Generic[T]]): The list of items to be written.\n\n Unit Tests:\n \"\"\"\n container = self._getContainer(item_type=type(items[0]))\n client = self._database.get_container_client(container)\n for item in items:\n client.upsert_item(item.__dict__)\n\n def read(\n self,\n item_type: type,\n filter_injection: str=None,\n skip: int=None,\n limit: int=None,\n ) -> List[T]:\n \"\"\"Read items of Generic type [T] from the database\n\n Args:\n injection (str): injected SQL query\n type (Generic[T]): type of item to read\n\n Pagination\n skip (int): start at \"skip\" entries from the beginning\n limit (int): return \"limit\" number of results from the database \n\n Returns:\n List[Generic[T]]: A list of desired items\n \n Unit Tests:\n \"\"\"\n \n # Get container details to read data from\n container = self._getContainer(item_type)\n container_client = self._database.get_container_client(container)\n\n whereClause = \"\"\n if filter_injection:\n whereClause = f\"WHERE {filter_injection}\"\n \n offset = \"\"\n if offset:\n offset = f\"OFFSET {skip}\"\n\n range = \"\"\n if limit:\n range = f\"LIMIT {limit}\"\n\n results: Any = container_client.query_items(\n f\"\"\"\n SELECT * FROM {container}\n {whereClause}\n {offset}\n {range}\n \"\"\",\n enable_cross_partition_query=True \n )\n\n converted = []\n for result in results:\n result = {res: result[res] for res in result if not res.startswith(\"_\")}\n c = item_type(**result)\n converted.append(c)\n\n return converted\n\n def _getContainer(self, item_type: any) -> str:\n \"\"\"Get container name from passed type\n\n Args:\n item_type (Generic[T]): type of the item that you want to do\n\n Raises:\n Exception: Exception if type name does not resolve to a container\n\n Returns:\n str: container name\n\n Unit Test:\n >>> c = Cosmos()\n >>> actual = c.getContainer(AlpacaNews)\n >>> expected = c.article_container_name\n >>> actual == expected\n True\n >>> actual = c.getContainer(Prediction)\n >>> expected = c.prediction_container_name\n >>> actual == expected\n True\n >>> actual = c.getContainer(Sentiment)\n >>> expected = c.sentiment_container_name\n >>> actual == expected\n True\n \"\"\"\n if item_type == AlpacaNews:\n container: str = self.article_container_name\n elif item_type == Prediction:\n container: str = self.prediction_container_name\n elif item_type == Sentiment:\n container: str = self.sentiment_container_name\n elif item_type == AlpacaTicker:\n container: str = self.ticker_container_name\n elif item_type == Delta:\n container: str = self.delta_name\n else: \n raise Exception(f\"Cannot read {item_type} from database\")\n return container\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n wrapper = CosmosClient()\n", "repo_name": "a-wallen/stm-toolkit", "sub_path": "models/cosmos.py", "file_name": "cosmos.py", "file_ext": "py", "file_size_in_byte": 5098, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.TypeVar", "line_number": 14, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 37, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 38, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "azure.cosmos.CosmosClient", "line_number": 41, "usage_type": "call"}, {"api_name": "azure.cosmos.CosmosClient", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 67, "usage_type": "name"}, {"api_name": "alpaca_news.AlpacaNews", "line_number": 145, "usage_type": "name"}, {"api_name": "prediction.Prediction", "line_number": 147, "usage_type": "name"}, {"api_name": "sentiment.Sentiment", "line_number": 149, "usage_type": "name"}, {"api_name": "alpaca_ticker.AlpacaTicker", "line_number": 151, "usage_type": "name"}, {"api_name": "delta.Delta", "line_number": 153, "usage_type": "name"}, {"api_name": "doctest.testmod", "line_number": 162, "usage_type": "call"}, {"api_name": "azure.cosmos.CosmosClient", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "40799628426", "text": "import time\nimport os\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\n\nrootCA = os.path.abspath(\"../../certs/AmazonRootCA1.pem\")\nprivateKey = os.path.abspath(\"../../certs/private.pem.key\")\ncert = os.path.abspath(\"../../certs/device-certificate.pem.crt\")\n\ndef helloworld(self, params, packet):\n print ('Receved Message from AWS IoT Core')\n print ('Topic: ' + packet.topic)\n print (\"Payload: \", (packet.payload))\n \nmyMQTTClient = AWSIoTMQTTClient(\"testChrisFeltClientID\")\nmyMQTTClient.configureEndpoint(\"a2rfrvyjik0srb-ats.iot.us-east-2.amazonaws.com\", 8883)\n\nmyMQTTClient.configureCredentials(rootCA, privateKey, cert)\n\nmyMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\nmyMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz\nmyMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec\nmyMQTTClient.configureMQTTOperationTimeout(5) #5 sec\n\nprint ('Initiating IoT Core Topic ...')\nmyMQTTClient.connect()\nmyMQTTClient.subscribe(\"test/topic\", 1, helloworld)\n\nwhile True:\n time.sleep(5)\n\n", "repo_name": "christopherfelt/aws-iot-samples-and-notes", "sub_path": "test1.py", "file_name": "test1.py", "file_ext": "py", "file_size_in_byte": 1042, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "38899350563", "text": "import os\nimport h5py\nimport xlrd\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nclass DrugClassifier():\n\n def __init__(self):\n self.threashold = 0.5\n self.words_list = []\n self.train_pVec = []\n\n\n def train(self, vocab_file):\n with open(vocab_file,'r',encoding = 'utf-8') as f: \n vocab = f.read().splitlines()\n std_word_dict = self.createWordDict(vocab)\n sort_dict = sorted(std_word_dict.items(),key=lambda x:x[1],reverse=True)\n self.words_list = [i for i,_ in sort_dict]\n\n pVect = np.zeros([len(vocab),len(self.words_list)])\n pDenom = np.zeros([len(vocab),1])\n for i, words_i in tqdm(enumerate(vocab)):\n c = i\n word_vec = self.bagOfWords2VecMN(words_i)\n pVect[c] += word_vec\n pDenom[c] += np.sum(word_vec)\n self.train_pVec = np.zeros_like(pVect)\n for i in range(len(pVect)):\n if pDenom[i] == 0:\n pVect[i] = 0\n continue\n self.train_pVec[i] = pVect[i]/pDenom[i]\n\n def createWordDict(self, dataList):\n dataDict = {}\n for item in dataList:\n for character in item:\n if character in ['\\u3000']:\n continue\n if character not in dataDict.keys():\n dataDict[character] = 1\n else:\n dataDict[character] +=1\n return dataDict\n\n def is_drug(self, item):\n index_vec = self.index_in_wordList(item)\n if len(index_vec) == 0:\n return False\n p1 = np.sum(self.train_pVec[:,index_vec],1)\n if p1.max() > self.threashold:\n return True\n else:\n return False\n\n def bagOfWords2VecMN(self, inputSet):\n returnVec = np.zeros([len(self.words_list)])\n for word in inputSet:\n try:\n returnVec[self.words_list.index(word)] += 1\n except:\n continue\n return returnVec\n\n def index_in_wordList(self, inputWord):\n indexVec = []\n for word in inputWord:\n try:\n indexVec.append(self.words_list.index(word))\n except:\n continue\n return np.unique(indexVec)\n\ndef load(file_path):\n with open(file_path, 'rb') as f:\n c = pickle.load(f)\n return c\n\ndef save(c, file_path):\n with open(file_path, 'wb') as f:\n pickle.dump(c, f)\n\n\n\n\n", "repo_name": "Zipper-KG/Zipper-KG", "sub_path": "NER/HMM/drug_classifier.py", "file_name": "drug_classifier.py", "file_ext": "py", "file_size_in_byte": 2485, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 76, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 80, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "33683166267", "text": "from PyQt5.QtWidgets import (\n QWidget, QVBoxLayout, QPushButton, \n QDesktopWidget\n)\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtCore import pyqtSignal, Qt\nfrom constants import *\n\nclass OptionsWindow(QWidget):\n\n pressedBackspace = pyqtSignal()\n\n def __init__(self, *args, **kwargs):\n super(OptionsWindow, self).__init__(*args, **kwargs)\n self.setupUI()\n self.center()\n \n def setupUI(self):\n self.setWindowTitle(WINDOW_TITLE)\n # self.resize(*WINDOW_SIZE)\n\n self.font = QFont()\n self.font.setPointSize(18)\n self.font.setFamily(\"Roboto Mono\")\n\n self.font = QFont()\n self.font.setPointSize(18)\n self.font.setFamily(\"Roboto Mono\")\n \n self.mainlayout = QVBoxLayout()\n self.btns_layout = QVBoxLayout()\n\n # Create widgets\n self.monitor_btn = QPushButton(\"Monitor\")\n self.publish_plain_txt_btn = QPushButton(\"Publish Plain Text\")\n self.set_preferences_btn = QPushButton(\"Set Preferences\")\n self.changee_interval_btn = QPushButton(\"Change Interval\")\n self.disconnect_btn = QPushButton(\"Disconnect\")\n\n self.monitor_btn.setFont(self.font)\n self.publish_plain_txt_btn.setFont(self.font)\n self.set_preferences_btn.setFont(self.font)\n self.changee_interval_btn.setFont(self.font)\n self.disconnect_btn.setFont(self.font)\n\n self.btns_layout.addWidget(self.monitor_btn)\n self.btns_layout.addWidget(self.publish_plain_txt_btn)\n self.btns_layout.addWidget(self.set_preferences_btn)\n self.btns_layout.addWidget(self.changee_interval_btn)\n self.btns_layout.addWidget(self.disconnect_btn)\n\n self.mainlayout.addLayout(self.btns_layout)\n\n self.setLayout(self.mainlayout)\n\n def center(self):\n width, height = self.sizeHint().width(), self.sizeHint().height()\n centerPoint = QDesktopWidget().availableGeometry().center()\n self.move(centerPoint.x() - width // 2, centerPoint.y() - height // 2)\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key.Key_Backspace:\n self.pressedBackspace.emit()", "repo_name": "zEuS0390/mqtt_gui_test", "sub_path": "windows/options.py", "file_name": "options.py", "file_ext": "py", "file_size_in_byte": 2158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDesktopWidget", "line_number": 58, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Key", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "38702886410", "text": "import scrapy\nfrom scrapy.exceptions import CloseSpider\n\n\nclass ProductsSpider(scrapy.Spider):\n name = \"products\"\n allowed_domains = [\"www.tinydeals.co\"]\n start_urls = [\"https://www.tinydeals.co/recommended/\"]\n\n # optional\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/113.0',\n 'Accept': '*/*',\n 'Accept-Language': 'en-US,en;q=0.5',\n # 'Accept-Encoding': 'gzip, deflate, br',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Origin': 'https://www.tinydeals.co',\n 'Connection': 'keep-alive',\n 'Referer': 'https://www.tinydeals.co/recommended/',\n # 'Cookie': 'tk_or=%22https%3A%2F%2Fwww.google.com%2F%22; tk_r3d=%22https%3A%2F%2Fwww.google.com%2F%22; tk_lr=%22https%3A%2F%2Fwww.google.com%2F%22; PHPSESSID=fnj26lvvu6egj4f9kgma6rk5hr; _ga_KSNDB8SNFZ=GS1.1.1684335201.2.1.1684335208.0.0.0; _ga=GA1.2.374695317.1684263582; _gid=GA1.2.1689830523.1684263582; gist_identified_xzgks4hj=0; gist_id_xzgks4hj=3b130238-8f6c-99f3-104e-e4f41fbb04c2; _fbp=fb.1.1684263584935.42552601; woocommerce_recently_viewed=240428%7C240228; sc_is_visitor_unique=rx12829943.1684335205.7732F19FC3AC4F01C148703460FB09C1.2.2.2.2.2.2.1.1.1; _gat_gtag_UA_186231599_1=1',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n }\n\n data = {\n 'action': 're_filterpost',\n 'filterargs[post_type]': 'product',\n 'filterargs[posts_per_page]': '50',\n 'filterargs[orderby]': '',\n 'filterargs[order]': '',\n 'filterargs[tax_query][0][0][taxonomy]': 'product_visibility',\n 'filterargs[tax_query][0][0][field]': 'name',\n 'filterargs[tax_query][0][0][terms]': 'exclude-from-catalog',\n 'filterargs[tax_query][0][0][operator]': 'NOT IN',\n 'filterargs[tax_query][0][relation]': 'AND',\n 'template': 'woogridcompact',\n # 'containerid': 'rh_woogrid_236485417',\n 'offset': '0',\n 'innerargs[columns]': '6_col',\n 'innerargs[woolinktype]': '',\n 'innerargs[disable_thumbs]': '',\n 'innerargs[gridtype]': 'compact',\n 'innerargs[soldout]': '',\n 'innerargs[attrelpanel]': '',\n # 'security': '9c1ade18eb',\n 'security': 'lul',\n }\n\n # use this to control the number of items to scrape per request\n product_per_request_count = 50\n\n paginator = 0\n\n def parse(self, response):\n # Update the security key used for template calls (the security key is updated each 24 I guess)\n self.data['security'] = response.xpath(\"//script[@id='rehub-js-extra']/text()\").re_first(\n r'\"filternonce\":\"(\\w+)\"')\n self.data['posts_per_page'] = str(self.product_per_request_count)\n # start from 0\n self.data['offset'] = '0'\n\n data = self.data\n\n yield scrapy.FormRequest(\"https://www.tinydeals.co/wp-admin/admin-ajax.php\",\n formdata=data,\n callback=self.parse_products)\n\n def parse_products(self, response):\n\n no_more_products = response.xpath(\"//span[@class='no_more_posts']\")\n if no_more_products:\n raise CloseSpider(\"no more products to scrape!\")\n\n products = response.xpath(\"//div[@class='grid_desc_and_btn']\")\n\n for product in products:\n # get the price as a string\n price = \"-\".join(product.xpath(\".//bdi/text()\").getall())\n # check if the price is empty and return \"Out of stock\" if so\n price = price if price else \"Out of stock\"\n yield {\n \"Name\": product.xpath(\".//h3/a[@href]/text()\").get(),\n \"Price\": price\n }\n\n if self.paginator < 300:\n\n self.paginator = int(self.data['offset']) + self.product_per_request_count\n\n self.data['offset'] = str(self.paginator)\n\n yield scrapy.FormRequest(\"https://www.tinydeals.co/wp-admin/admin-ajax.php\",\n formdata=self.data,\n callback=self.parse_products)\n", "repo_name": "A-M-Amine/web-scraping-projects", "sub_path": "tinydeal/tinydeal/spiders/products.py", "file_name": "products.py", "file_ext": "py", "file_size_in_byte": 4209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}, {"api_name": "scrapy.FormRequest", "line_number": 66, "usage_type": "call"}, {"api_name": "scrapy.exceptions.CloseSpider", "line_number": 74, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "71011314614", "text": "#!/usr/bin/python3\n# *-* coding: utf-8 *-*\n\nimport pathlib\n\nspaceName = \"dataExampleSpace\"\nlookupLocators = \"EC2AMAZ-PUUQMQH\"\nlookupGroups = \"xap-16.2.1\"\nspaceHostsFileName = \"spaceHosts.txt\"\nredoLogScriptName = \"copyRedoLogScript.bat\"\ndeserializeScriptName = \"deserializeScript.py\"\njarFileName = 'redolog-client-1.0-SNAPSHOT-jar-with-dependencies.jar'\n\n# setting paths accordding to this script location\nraHome = pathlib.PurePath(__file__).parent.parent\ngsHome = pathlib.PurePath(raHome).parent\nscriptLocation = raHome.joinpath('scripts')\nresourceLocation = raHome.joinpath('resources')\nresourceAdminShare = str(resourceLocation.drive).replace(':','$')\nresourceNetPath = f\"{resourceAdminShare}{str(resourceLocation).replace(resourceLocation.drive , '')}\"\njarFilePath = resourceLocation.joinpath(jarFileName)\nsourcePath = gsHome.joinpath('Work/redo-log', spaceName)\ntargetPath = gsHome.joinpath('backup/work/redo-log', spaceName)\ntargetPathBaseDir = gsHome.joinpath('backup')\ndeserializeFullPath = gsHome.joinpath('backup')\n\nredoLogYaml = str(gsHome.joinpath('backup','AllDeserializedFiles'))\nassemblyFileName = str(gsHome.joinpath('Deploy','DataProcessor','GigaSpaces.Examples.ProcessingUnit.Common.dll'))\n", "repo_name": "GigaSpaces-ProfessionalServices/CSM-Magic-Tools", "sub_path": "Recovery Assistant/scripts/setenvredolog.py", "file_name": "setenvredolog.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pathlib.PurePath", "line_number": 15, "usage_type": "call"}, {"api_name": "pathlib.PurePath", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "6053581693", "text": "from rest_framework import serializers\nfrom rest_framework.exceptions import APIException\n\nfrom .models import Doctor, Appointments, Patient\nfrom libs.time_utils import time_slots\n\nVALID_TIME_SLOTS = time_slots()\n\n\nclass DoctorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Doctor\n fields = \"__all__\"\n\n\nclass PatientSerializer(serializers.ModelSerializer):\n class Meta:\n model = Patient\n fields = \"__all__\"\n\n\nclass AppointmentSerializer(serializers.ModelSerializer):\n patient = PatientSerializer()\n\n class Meta:\n model = Appointments\n fields = \"__all__\"\n\n def validate_appt_time(self, time):\n if time in VALID_TIME_SLOTS:\n return time\n else:\n raise serializers.ValidationError('Invalid Time Value')\n\n def create(self, validated_data, *args, **kwargs):\n doc_appts_count = Appointments.objects.filter(\n doctor=validated_data['doctor'],\n appt_date=validated_data['appt_date'],\n appt_time=validated_data['appt_time'],\n ).count()\n\n if doc_appts_count >= 3:\n raise APIException(\"Appointment time unavailable\")\n\n patient, created = Patient.objects.get_or_create(**validated_data.get('patient'))\n if created:\n kind = Appointments.NEW_APNTMNT\n else:\n kind = Appointments.FLW_UP_APTMNT\n validated_data['patient'] = patient\n validated_data['kind'] = kind\n\n return Appointments.objects.create(**validated_data)\n\n\n\n\n\n\n", "repo_name": "sahithi403/notable_test", "sub_path": "appointments/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "libs.time_utils.time_slots", "line_number": 7, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 10, "usage_type": "name"}, {"api_name": "models.Doctor", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Patient", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 22, "usage_type": "name"}, {"api_name": "models.Appointments", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Appointments.objects.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Appointments.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Appointments", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.APIException", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Patient.objects.get_or_create", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Patient.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.Patient", "line_number": 45, "usage_type": "name"}, {"api_name": "models.Appointments.NEW_APNTMNT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Appointments", "line_number": 47, "usage_type": "name"}, {"api_name": "models.Appointments.FLW_UP_APTMNT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.Appointments", "line_number": 49, "usage_type": "name"}, {"api_name": "models.Appointments.objects.create", "line_number": 53, "usage_type": "call"}, {"api_name": "models.Appointments.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.Appointments", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "11657677516", "text": "from gensim.utils import simple_preprocess\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom nltk.stem import SnowballStemmer, WordNetLemmatizer\nimport re\n\n\n# nltk.download('wordnet')\n\n\ndef stemming(words):\n stemmer = SnowballStemmer(\"english\")\n stemmed_words = []\n for word in words:\n stemmed_words.append(stemmer.stem(word))\n return stemmed_words\n\n\ndef lemmatize(words):\n lemmatized_words = []\n for word in words:\n lemmatized_words.append(WordNetLemmatizer().lemmatize(word, pos=\"v\"))\n return lemmatized_words\n\n\ndef lemmatize_stemming(words):\n return stemming(lemmatize(words))\n\n\ndef cleanup_text(text):\n text = re.sub('[^a-zA-Z]', ' ', text)\n text = re.sub(\"</?.*?>\", \" <> \", text)\n return text\n\n\ndef preprocess(text, extra_cleanup=False):\n processed_words = []\n if extra_cleanup:\n text = cleanup_text(text)\n for token in simple_preprocess(text, min_len=3):\n if token not in STOPWORDS:\n processed_words.extend(lemmatize_stemming([token]))\n return processed_words\n", "repo_name": "EduardoSaverin/Machine-Learning-APIs", "sub_path": "nlp/stemming.py", "file_name": "stemming.py", "file_ext": "py", "file_size_in_byte": 1068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "nltk.stem.SnowballStemmer", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "gensim.utils.simple_preprocess", "line_number": 39, "usage_type": "call"}, {"api_name": "gensim.parsing.preprocessing.STOPWORDS", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "3970515805", "text": "\"\"\" goulash._inspect\n\"\"\"\nimport os\nimport inspect\nfrom addict import Dict\nfrom setuptools import find_packages\n\n\ndef _main_package(src_root):\n \"\"\" typically find_packages() returns something like\n ['foo','foo.bar','foo.baz']. in that case, \"foo\"\n is the main pasckage.\n \"\"\"\n packages = find_packages(where=src_root, exclude=('tests',))\n packages = set([p.split('.')[0] for p in packages])\n if len(packages) == 0 or 1 < len(packages):\n err = 'cannot guess pkg_root and it was not provided. '\n err += 'setuptools.find_packages() returns \"{0}\", '\n err += 'working dir is \"{1}\"'\n err = err.format(packages, os.getcwd())\n raise RuntimeError(err)\n else:\n pkg_root = list(packages)[0]\n return pkg_root\n\n\ndef getcaller(level=2):\n \"\"\" \"\"\"\n x = inspect.stack()[level]\n frame = x[0]\n file_name = x[1]\n flocals = frame.f_locals\n fglobals = frame.f_globals\n func_name = x[3]\n himself = flocals.get('self', None)\n try:\n kls = himself and himself.__class__\n except AttributeError:\n # python uses self only by convention, so it's\n # possible there is a \"himself\" local but it's\n # not actually an object.\n kls = None\n kls_func = getattr(kls, func_name, None)\n if type(kls_func) == property:\n func = kls_func\n else:\n try:\n func = himself and getattr(himself, func_name)\n except AttributeError:\n func = func_name + '[nested]'\n out = dict(file=file_name,\n self=himself,\n locals=flocals,\n globals=fglobals,\n func=func,\n func_name=func_name)\n out.update({'class': kls})\n return Dict(out)\nget_caller = getcaller\n", "repo_name": "mattvonrocketstein/goulash", "sub_path": "goulash/_inspect.py", "file_name": "_inspect.py", "file_ext": "py", "file_size_in_byte": 1769, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "setuptools.find_packages", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 20, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 29, "usage_type": "call"}, {"api_name": "addict.Dict", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "2974750343", "text": "# encoding: utf-8\n\nimport sys\nfrom datetime import datetime\nimport requests\nimport json\nimport time\nimport os\nfrom links import icons, config\nfrom links.util import workflow\nfrom links.models.preferences import Preferences\nimport logging\nfrom logging.config import fileConfig\nfileConfig('logging_config.ini')\nlog = logging.getLogger('links')\n\n\ndef getAlfredVersion(wf):\n # alfred workflow version\n v = wf.alfred_version.tuple\n return \"{0}.{1}.{2}\".format(v[0], v[1], v[2])\n\n\ndef getWorkflowVersion():\n with open('version', 'r') as file:\n ver = file.readlines()[0]\n return ver.strip()\n\n\ndef search(query, offset, size):\n localResult = {\n 'statusCode': 0,\n 'message': '',\n 'data': []\n }\n\n appKey = workflow().get_password(config.KC_OAUTH_TOKEN)\n log.info('appKey: %s' % (appKey))\n\n # query the keyword from web server\n prefs = Preferences.current_prefs()\n session = requests.session()\n formData = {\n 'keyword': query,\n 'from': offset,\n 'size': size\n }\n resp = session.post(\n url=config.LK_SEARCH_APP_URL,\n headers={\n 'User-Agent': 'alfred/{0} workflow/{1}'.format(getAlfredVersion(workflow()), getWorkflowVersion()),\n 'Authorization': appKey,\n 'Content-Type': 'application/json; charset=UTF-8'\n },\n data=json.dumps(formData),\n timeout=60)\n if resp.status_code == 200:\n log.info('search respond success')\n result = json.loads(resp.text)\n return result\n else:\n log.info('search respond failed')\n localResult['message'] = u'网络错误,请稍后重试'\n return localResult\n", "repo_name": "tickstep/alfred-links-workflow", "sub_path": "links/api/search_api.py", "file_name": "search_api.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.config.fileConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "links.util.workflow", "line_number": 37, "usage_type": "call"}, {"api_name": "links.config.KC_OAUTH_TOKEN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "links.config", "line_number": 37, "usage_type": "name"}, {"api_name": "links.models.preferences.Preferences.current_prefs", "line_number": 41, "usage_type": "call"}, {"api_name": "links.models.preferences.Preferences", "line_number": 41, "usage_type": "name"}, {"api_name": "requests.session", "line_number": 42, "usage_type": "call"}, {"api_name": "links.config.LK_SEARCH_APP_URL", "line_number": 49, "usage_type": "attribute"}, {"api_name": "links.config", "line_number": 49, "usage_type": "name"}, {"api_name": "links.util.workflow", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "33551243198", "text": "import cv2\nimport sys\nimport JSON_Control\n\ndef capture(file_path):\n # 内蔵カメラを使用する場合、引数は0に設定します\n cap = cv2.VideoCapture(0)\n # カメラから1フレーム取得する\n ret, frame = cap.read()\n \n # カメラからのフレームが無効の場合、処理を中断します\n if not ret:\n return False\n cv2.imwrite(file_path,frame) \n # フレームを表示する\n #cv2.imshow('frame', frame)\n # ウィンドウをすべて閉じます\n #cv2.destroyAllWindows()\n cap.release()\n\n return True\n\nif __name__ == '__main__':\n\n try:\n json_str = sys.argv[1]\n except IndexError:\n # 引数が指定されていない場合には、以下のコメントを出力します\n print(\"JSON形式の文字列を指定してください\")\n sys.exit()\n\n json_dictionary = JSON_Control.JsonToDictionary(json_str)\n \n file_path = json_dictionary.get(\"file_path\",\"\")\n if(file_path != \"\"):\n capture(file_path)\n", "repo_name": "kouzimiso/study", "sub_path": "Project/AutoClick/Sources/Test/old/test_camera.py", "file_name": "test_camera.py", "file_ext": "py", "file_size_in_byte": 1031, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "JSON_Control.JsonToDictionary", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "38537050597", "text": "from collections import deque\n\ndef BFS(n, info):\n queue = deque([(0, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])])\n max_value = 0\n answer = []\n\n while queue:\n pre_arrow, board = queue.popleft()\n hitted_arrows = sum(board)\n if hitted_arrows == n:\n # 현재 점수 확인\n lion, appeach = (0, 0)\n for i in range(11):\n if board[i] > info[i]:\n lion += 10 - i\n elif info[i] > 0:\n appeach += 10 - i\n \n if max_value < lion - appeach:\n max_value = lion - appeach\n answer.clear()\n answer.append(board)\n elif lion - appeach and max_value == lion - appeach:\n answer.append(board)\n continue\n\n elif hitted_arrows > n:\n continue\n\n elif pre_arrow == 10:\n temp = board.copy()\n temp[10] += n - hitted_arrows\n queue.append([11, temp])\n\n else:\n temp = board.copy()\n temp[pre_arrow] = info[pre_arrow] + 1\n queue.append([pre_arrow + 1, temp])\n\n temp2 = board.copy()\n temp2[pre_arrow] = 0\n queue.append([pre_arrow + 1, temp2])\n \n return answer\n\ndef solution(n, info):\n answer = BFS(n, info)\n \n if answer:\n return answer[-1]\n else:\n return [-1]\n \n\nprint(solution(9, [0, 0, 1, 2, 0, 1, 1, 1, 1, 1, 1]))", "repo_name": "oo009pbh/Today-I-learn", "sub_path": "7월 18일~/양궁 대회.py", "file_name": "양궁 대회.py", "file_ext": "py", "file_size_in_byte": 1477, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "collections.deque", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "20028919857", "text": "import pytest\nfrom flask_migrate import upgrade\nfrom sqlalchemy import text\nfrom sqlalchemy_utils.functions import create_database\nfrom sqlalchemy_utils.functions import database_exists\nfrom sqlalchemy_utils.functions import drop_database\n\n\ndef prep_db(reuse_db=False):\n \"\"\"\n Determines whether the target database already exists, if it\n doesn't it is created.\n If it exists but reuse_db is False, then the DB is dropped and recreated.\n upgrade() is always run to make sure the schema is up to date.\n \"\"\"\n\n from config import Config\n\n no_db = not database_exists(Config.SQLALCHEMY_DATABASE_URI)\n refresh_db = not reuse_db\n\n if no_db:\n create_database(Config.SQLALCHEMY_DATABASE_URI)\n\n elif refresh_db:\n drop_database(Config.SQLALCHEMY_DATABASE_URI)\n create_database(Config.SQLALCHEMY_DATABASE_URI)\n\n upgrade()\n\n\n@pytest.fixture(scope=\"session\")\ndef _db(app, request):\n \"\"\"\n Fixture to supply tests with direct access to the database\n \"\"\"\n yield app.extensions[\"sqlalchemy\"]\n\n\n@pytest.fixture(scope=\"session\")\ndef recreate_db(request, _db, app):\n \"\"\"\n Fixture to determine whether we need to clear out the existing DB\n (based on cache values) and then call prep_db.\n This is session scoped so we only drop/create the db\n at the start of a test session\n \"\"\"\n print(\"Recreating db\")\n reuse_db = bool(request.config.cache.get(\"reuse_db\", False))\n with app.app_context():\n prep_db(reuse_db)\n request.config.cache.set(\"reuse_db\", True)\n yield\n\n\n@pytest.fixture(scope=\"session\")\ndef clear_test_data(app, _db, request, recreate_db):\n \"\"\"\n Fixture to clean up the database after each test.\n\n This fixture reads preserve_test_data from the cache\n (see enable_preserve_test_data below),\n and if we are not preserving the data it clears the database\n by deleting all data from all tables.\n\n This is module scoped so that each test file gets a fresh empty\n database.\n\n \"\"\"\n with app.app_context():\n yield\n preserve_test_data = request.config.cache.get(\"preserve_test_data\", None)\n if not preserve_test_data:\n # rollback incase of any errors during test session\n _db.session.rollback()\n # disable foreign key checks\n _db.session.execute(text(\"SET session_replication_role = replica\"))\n # delete all data from tables\n for table in reversed(_db.metadata.sorted_tables):\n _db.session.execute(table.delete())\n # reset foreign key checks\n _db.session.execute(text(\"SET session_replication_role = DEFAULT\"))\n _db.session.commit()\n else:\n # If test requests 'preserve test data' make sure\n # on the next run we clear out the DB completely.\n request.config.cache.set(\"reuse_db\", False)\n request.config.cache.set(\"preserve_test_data\", False)\n\n\n@pytest.fixture(scope=\"function\")\ndef enable_preserve_test_data(request):\n \"\"\"\n Fixture to read the markers on a test and if preserve_test_data is\n set, it sets it in the cache.\n This can't be combined with clear_test_data due to conflicts on the fixture\n scope, so this function reads the test markers and clear_test_data\n reads the cache.\n \"\"\"\n marker = request.node.get_closest_marker(\"preserve_test_data\")\n if marker is not None:\n request.config.cache.set(\"preserve_test_data\", True)\n", "repo_name": "communitiesuk/funding-service-design-utils", "sub_path": "fsd_test_utils/fixtures/db_fixtures.py", "file_name": "db_fixtures.py", "file_ext": "py", "file_size_in_byte": 3468, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sqlalchemy_utils.functions.database_exists", "line_number": 19, "usage_type": "call"}, {"api_name": "config.Config.SQLALCHEMY_DATABASE_URI", "line_number": 19, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy_utils.functions.create_database", "line_number": 23, "usage_type": "call"}, {"api_name": "config.Config.SQLALCHEMY_DATABASE_URI", "line_number": 23, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy_utils.functions.drop_database", "line_number": 26, "usage_type": "call"}, {"api_name": "config.Config.SQLALCHEMY_DATABASE_URI", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy_utils.functions.create_database", "line_number": 27, "usage_type": "call"}, {"api_name": "config.Config.SQLALCHEMY_DATABASE_URI", "line_number": 27, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 27, "usage_type": "name"}, {"api_name": "flask_migrate.upgrade", "line_number": 29, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 82, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 56, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "22358780059", "text": "import frappe\nfrom frappe import _, scrub\nfrom frappe.database.query import OPERATOR_MAP\nfrom frappe.utils import add_days, add_to_date, flt, getdate\n\nfrom erpnext.accounts.utils import get_fiscal_year\n\n\ndef execute(filters=None):\n\treturn DiagnosisTrends(filters).run()\n\n\nclass DiagnosisTrends(object):\n\t\"\"\"\n\tDiagnosis Trends Report.\n\t\"\"\"\n\n\tdef __init__(self, filters=None):\n\t\tself.data = []\n\t\tself.periodic_daterange = []\n\t\tself.filters = frappe._dict(filters or {})\n\t\tself.months = [\n\t\t\t\"Jan\",\n\t\t\t\"Feb\",\n\t\t\t\"Mar\",\n\t\t\t\"Apr\",\n\t\t\t\"May\",\n\t\t\t\"Jun\",\n\t\t\t\"Jul\",\n\t\t\t\"Aug\",\n\t\t\t\"Sep\",\n\t\t\t\"Oct\",\n\t\t\t\"Nov\",\n\t\t\t\"Dec\",\n\t\t]\n\t\tself.get_period_date_ranges()\n\n\tdef run(self):\n\t\tself.get_columns()\n\t\tself.get_data()\n\t\tself.get_chart_data()\n\n\t\treturn self.columns, self.data, None, self.chart\n\n\tdef get_period_date_ranges(self):\n\t\tfrom dateutil.relativedelta import MO, relativedelta\n\n\t\tfrom_date, to_date = getdate(self.filters.from_date), getdate(self.filters.to_date)\n\n\t\tincrement = {\"Monthly\": 1, \"Quarterly\": 3, \"Half-Yearly\": 6, \"Yearly\": 12}.get(\n\t\t\tself.filters.range, 1\n\t\t)\n\n\t\tif self.filters.range in [\"Monthly\", \"Quarterly\"]:\n\t\t\tfrom_date = from_date.replace(day=1)\n\t\telif self.filters.range == \"Yearly\":\n\t\t\tfrom_date = get_fiscal_year(from_date)[1]\n\t\telse:\n\t\t\tfrom_date = from_date + relativedelta(from_date, weekday=MO(-1))\n\n\t\tfor _ in range(1, 53): # noqa\n\t\t\tif self.filters.range == \"Weekly\":\n\t\t\t\tperiod_end_date = add_days(from_date, 6)\n\t\t\telse:\n\t\t\t\tperiod_end_date = add_to_date(from_date, months=increment, days=-1)\n\n\t\t\tif period_end_date > to_date:\n\t\t\t\tperiod_end_date = to_date\n\n\t\t\tself.periodic_daterange.append(period_end_date)\n\n\t\t\tfrom_date = add_days(period_end_date, 1)\n\t\t\tif period_end_date == to_date:\n\t\t\t\tbreak\n\n\tdef get_columns(self):\n\t\tself.columns = []\n\n\t\tself.columns.append(\n\t\t\t{\n\t\t\t\t\"label\": _(\"Diagnosis\"),\n\t\t\t\t\"fieldname\": \"diagnosis\",\n\t\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\t\"options\": \"Diagnosis\",\n\t\t\t\t\"width\": 150,\n\t\t\t}\n\t\t)\n\n\t\tfor end_date in self.periodic_daterange:\n\t\t\tperiod = self.get_period(end_date)\n\t\t\tself.columns.append(\n\t\t\t\t{\"label\": _(period), \"fieldname\": scrub(period), \"fieldtype\": \"Int\", \"width\": 120}\n\t\t\t)\n\n\t\tself.columns.append(\n\t\t\t{\"label\": _(\"Total\"), \"fieldname\": \"total\", \"fieldtype\": \"Int\", \"width\": 120}\n\t\t)\n\n\tdef get_data(self):\n\t\tpe_diagnosis = frappe.qb.DocType(\"Patient Encounter Diagnosis\")\n\t\tquery = (\n\t\t\tfrappe.qb.from_(pe_diagnosis)\n\t\t\t.select(\"name\", \"creation\", \"diagnosis\")\n\t\t\t.where(pe_diagnosis.creation[self.filters.from_date : self.filters.to_date])\n\t\t)\n\n\t\tdepartment = self.filters.get(\"department\")\n\n\t\tif department:\n\t\t\tencounters = frappe.get_all(\n\t\t\t\t\"Patient Encounter\", filters={\"medical_department\": department}, pluck=\"name\"\n\t\t\t)\n\t\t\tif encounters:\n\t\t\t\t_operator = OPERATOR_MAP[\"in\"]\n\t\t\t\tquery = query.where(_operator(pe_diagnosis.parent, encounters))\n\n\t\tself.entries = query.run(as_dict=True)\n\t\tself.get_rows()\n\n\tdef get_period(self, appointment_date):\n\t\tif self.filters.range == \"Weekly\":\n\t\t\tperiod = \"Week \" + str(appointment_date.isocalendar()[1])\n\t\telif self.filters.range == \"Monthly\":\n\t\t\tperiod = str(self.months[appointment_date.month - 1])\n\t\telif self.filters.range == \"Quarterly\":\n\t\t\tperiod = \"Quarter \" + str(((appointment_date.month - 1) // 3) + 1)\n\t\telse:\n\t\t\tyear = get_fiscal_year(appointment_date, company=self.filters.company)\n\t\t\tperiod = str(year[0])\n\n\t\tif getdate(self.filters.from_date).year != getdate(self.filters.to_date).year:\n\t\t\tperiod += \" \" + str(appointment_date.year)\n\n\t\treturn period\n\n\tdef get_rows(self):\n\t\tself.get_periodic_data()\n\n\t\tfor entity, period_data in self.appointment_periodic_data.items():\n\t\t\trow = {\"diagnosis\": entity}\n\n\t\t\ttotal = 0\n\t\t\tfor end_date in self.periodic_daterange:\n\t\t\t\tperiod = self.get_period(end_date)\n\t\t\t\tamount = flt(period_data.get(period, 0.0))\n\t\t\t\trow[scrub(period)] = amount\n\t\t\t\ttotal += amount\n\n\t\t\trow[\"total\"] = total\n\n\t\t\tself.data.append(row)\n\n\tdef get_periodic_data(self):\n\t\tself.appointment_periodic_data = frappe._dict()\n\n\t\tfor d in self.entries:\n\t\t\tperiod = self.get_period(d.get(\"creation\"))\n\t\t\tself.appointment_periodic_data.setdefault(d.diagnosis, frappe._dict()).setdefault(period, 0.0)\n\t\t\tself.appointment_periodic_data[d.diagnosis][period] += 1\n\n\tdef get_chart_data(self):\n\t\tlength = len(self.columns)\n\t\tlabels = [d.get(\"label\") for d in self.columns[1 : length - 1]]\n\t\tself.chart = {\"data\": {\"labels\": labels, \"datasets\": []}, \"type\": \"line\"}\n", "repo_name": "frappe/health", "sub_path": "healthcare/healthcare/report/diagnosis_trends/diagnosis_trends.py", "file_name": "diagnosis_trends.py", "file_ext": "py", "file_size_in_byte": 4376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 190, "dataset": "github-code", "pt": "21", "api": [{"api_name": "frappe._dict", "line_number": 21, "usage_type": "call"}, {"api_name": "frappe.utils.getdate", "line_number": 48, "usage_type": "call"}, {"api_name": "erpnext.accounts.utils.get_fiscal_year", "line_number": 57, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 59, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.MO", "line_number": 59, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 61, "usage_type": "name"}, {"api_name": "frappe.utils.add_days", "line_number": 63, "usage_type": "call"}, {"api_name": "frappe.utils.add_to_date", "line_number": 65, "usage_type": "call"}, {"api_name": "frappe.utils.add_days", "line_number": 72, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 81, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 92, "usage_type": "call"}, {"api_name": "frappe.scrub", "line_number": 92, "usage_type": "call"}, {"api_name": "frappe._", "line_number": 96, "usage_type": "call"}, {"api_name": "frappe.qb.DocType", "line_number": 100, "usage_type": "call"}, {"api_name": "frappe.qb", "line_number": 100, "usage_type": "attribute"}, {"api_name": "frappe.qb.from_", "line_number": 102, "usage_type": "call"}, {"api_name": "frappe.qb", "line_number": 102, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 110, "usage_type": "call"}, {"api_name": "frappe.database.query.OPERATOR_MAP", "line_number": 114, "usage_type": "name"}, {"api_name": "erpnext.accounts.utils.get_fiscal_year", "line_number": 128, "usage_type": "call"}, {"api_name": "frappe.utils.getdate", "line_number": 131, "usage_type": "call"}, {"api_name": "frappe.utils.flt", "line_number": 145, "usage_type": "call"}, {"api_name": "frappe.scrub", "line_number": 146, "usage_type": "call"}, {"api_name": "frappe._dict", "line_number": 154, "usage_type": "call"}, {"api_name": "frappe._dict", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "29747782436", "text": "from __future__ import print_function\nfrom __future__ import absolute_import\nimport logging\n\n# iTrade system\nimport itrade_config\nfrom itrade_logging import setLevel, debug\nfrom itrade_defs import QList, QTag\nfrom itrade_ext import gListSymbolRegistry\nfrom itrade_connection import ITradeConnection\nfrom six.moves import map\nfrom six.moves import range\n\n# ============================================================================\n# Import_ListOfQuotes_NZE()\n# ============================================================================\ndef removeCarriage(s):\n if s[-1] == '\\r':\n return s[:-1]\n else:\n return s\n\n\ndef splitLines(buf):\n lines = buf.split('\\n')\n lines = [x for x in lines if x]\n\n lines = [removeCarriage(l) for l in lines]\n return lines\n\n\ndef Import_ListOfQuotes_NZE(quotes, market='NEW ZEALAND EXCHANGE', dlg=None, x=0):\n if itrade_config.verbose:\n print(u'Update {} list of symbols'.format(market))\n connection = ITradeConnection(proxy=itrade_config.proxyHostname,\n proxyAuth=itrade_config.proxyAuthentication)\n\n if market == 'NEW ZEALAND EXCHANGE':\n url = u'https://www.findata.co.nz/Markets/NZX/{}.htm'\n else:\n return False\n\n select_alpha = list(map(chr, list(range(65, 91)))) # A to Z\n\n count = 0\n isin = ''\n\n for letter in select_alpha:\n if dlg:\n dlg.Update(x, u\" NZX : {} to Z\".format(letter))\n\n try:\n data = connection.getDataFromUrl(url.format(letter))\n except Exception:\n debug('Import_ListOfQuotes_NZE unable to connect :-(')\n return False\n\n # returns the data\n lines = splitLines(data)\n\n for line in lines:\n if '\"hideInfo();\">' in line:\n tickername = line[line.find('\"hideInfo();\">')+14:line.find('</td><td align=right>')]\n if not 'Index' in tickername:\n ticker = tickername[:tickername.index('<')]\n if not '0' in ticker[-1:]:\n name = tickername[tickername.index('<td>')+4:]\n\n count = count + 1\n\n # ok to proceed\n quotes.addQuote(isin=isin, name=name,\n ticker=ticker, market='NEW ZEALAND EXCHANGE', currency='NZD', place='NZE', country='NZ')\n if itrade_config.verbose:\n print(u'Imported {:d} lines from NEW ZEALAND EXCHANGE'.format(count))\n\n return True\n\n# ============================================================================\n# Export me\n# ============================================================================\n\ngListSymbolRegistry.register('NEW ZEALAND EXCHANGE', 'NZE', QList.any, QTag.list, Import_ListOfQuotes_NZE)\n\n# ============================================================================\n# Test ME\n# ============================================================================\n\nif __name__ == '__main__':\n setLevel(logging.INFO)\n\n from itrade_quotes import quotes\n\n Import_ListOfQuotes_NZE(quotes)\n quotes.saveListOfQuotes()\n\n# ============================================================================\n# That's all folks !\n# ============================================================================\n", "repo_name": "eternallyBaffled/itrade", "sub_path": "ext/itrade_quotes_nze.py", "file_name": "itrade_quotes_nze.py", "file_ext": "py", "file_size_in_byte": 3291, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "itrade_config.verbose", "line_number": 33, "usage_type": "attribute"}, {"api_name": "itrade_connection.ITradeConnection", "line_number": 35, "usage_type": "call"}, {"api_name": "itrade_config.proxyHostname", "line_number": 35, "usage_type": "attribute"}, {"api_name": "itrade_config.proxyAuthentication", "line_number": 36, "usage_type": "attribute"}, {"api_name": "six.moves.map", "line_number": 43, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 43, "usage_type": "call"}, {"api_name": "itrade_logging.debug", "line_number": 55, "usage_type": "call"}, {"api_name": "itrade_config.verbose", "line_number": 74, "usage_type": "attribute"}, {"api_name": "itrade_ext.gListSymbolRegistry.register", "line_number": 83, "usage_type": "call"}, {"api_name": "itrade_ext.gListSymbolRegistry", "line_number": 83, "usage_type": "name"}, {"api_name": "itrade_defs.QList.any", "line_number": 83, "usage_type": "attribute"}, {"api_name": "itrade_defs.QList", "line_number": 83, "usage_type": "name"}, {"api_name": "itrade_defs.QTag.list", "line_number": 83, "usage_type": "attribute"}, {"api_name": "itrade_defs.QTag", "line_number": 83, "usage_type": "name"}, {"api_name": "itrade_logging.setLevel", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 90, "usage_type": "attribute"}, {"api_name": "itrade_quotes.quotes", "line_number": 94, "usage_type": "argument"}, {"api_name": "itrade_quotes.quotes.saveListOfQuotes", "line_number": 95, "usage_type": "call"}, {"api_name": "itrade_quotes.quotes", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "38597825854", "text": "import pytest\nimport time\n\nfrom django.contrib.auth.models import User\n\nfrom deposit.hal.models import HALDepositPreferences\nfrom deposit.models import DepositRecord\nfrom deposit.models import UserPreferences\nfrom deposit.osf.models import OSFDepositPreferences\nfrom website.utils import merge_users\n\n\n@pytest.mark.usefixtures('db')\nclass TestUserMerge:\n \"\"\"\n Groups tests about merging of User objects\n \"\"\"\n\n @pytest.fixture(autouse=True)\n def preparation(self, db):\n \"\"\"\n Sets up two users that can be merged\n \"\"\"\n self.user_1 = User.objects.create_user(\n username='user_1',\n )\n # Some time between creation, to have measurable different joined dates\n time.sleep(1)\n self.user_2 = User.objects.create_user(\n username='user_2',\n email='user_2@dissem.in',\n first_name='Iulius',\n last_name='Caesar',\n )\n\n def test_monitor_relations(self):\n \"\"\"\n Monitors the relations of the User model. Of particual interest are reverse relations.\n If this test fails, then a new field or relation has been added. Make sure to adjust the merging\n \"\"\"\n field_names = {\n 'date_joined',\n 'depositrecord',\n 'email',\n 'emailaddress',\n 'first_name',\n 'groups',\n 'haldepositpreferences',\n 'id',\n 'inbox',\n 'is_active',\n 'is_staff',\n 'is_superuser',\n 'last_login',\n 'last_name',\n 'logentry',\n 'notificationarchive',\n 'osfdepositpreferences',\n 'paper',\n 'password',\n 'researcher',\n 'shibboleth_account',\n 'socialaccount',\n 'uploadedpdf',\n 'user_permissions',\n 'username',\n 'userpreferences'\n }\n assert { field.name for field in User._meta.get_fields() } == field_names\n\n def test_merge(self, dummy_repository, book_god_of_the_labyrinth, uploaded_pdf):\n dr = DepositRecord.objects.create(\n paper=book_god_of_the_labyrinth,\n user=self.user_2,\n repository=dummy_repository,\n status='pending',\n file=uploaded_pdf,\n )\n # Let's merge\n merge_users(self.user_1, self.user_2)\n\n # Then we do our checks\n # Fields where we can merge directly\n for field in ['email', 'first_name', 'last_name',]:\n assert getattr(self.user_1, field) == getattr(self.user_2, field)\n # Date joined, should be the older one\n assert self.user_1.date_joined <= self.user_2.date_joined\n\n dr.refresh_from_db()\n\n # We do not test all fields, as they are the same, but at least one\n assert dr.user == self.user_1\n\n\n def test_repository_preferences(self, repository):\n obo = 'spam'\n hal_rep = repository.dummy_repository()\n osf_rep = repository.dummy_repository()\n assert hal_rep.pk != osf_rep.pk\n HALDepositPreferences.objects.create(user=self.user_2, repository=hal_rep)\n OSFDepositPreferences.objects.create(user=self.user_1, repository=osf_rep)\n OSFDepositPreferences.objects.create(user=self.user_2, repository=osf_rep, on_behalf_of=obo)\n self.user_1.refresh_from_db()\n self.user_2.refresh_from_db()\n merge_users(self.user_1, self.user_2)\n self.user_1.refresh_from_db()\n assert len(self.user_1.haldepositpreferences_set.all()) == 1\n assert self.user_1.osfdepositpreferences_set.get().on_behalf_of == obo\n\n\n def test_merge_one_to_one_relation_change_obj(self):\n \"\"\"\n Does not test all fields, but the process\n \"\"\"\n email = 'user@dissem.in'\n UserPreferences.objects.create(user=self.user_2, email=email)\n merge_users(self.user_1, self.user_2)\n self.user_1.refresh_from_db()\n assert self.user_1.userpreferences.email == email\n self.user_2.refresh_from_db()\n assert hasattr(self.user_2, 'userpreferences') == False\n\n def test_merge_one_to_one_relation_change_field_values(self, dummy_repository):\n \"\"\"\n Does not test all fields, but the process\n \"\"\"\n UserPreferences.objects.create(user=self.user_1, last_repository=dummy_repository)\n email = 'user@dissem.in'\n UserPreferences.objects.create(user=self.user_2, email=email)\n merge_users(self.user_1, self.user_2)\n self.user_1.refresh_from_db()\n assert self.user_1.userpreferences.email == email\n assert self.user_1.userpreferences.last_repository == dummy_repository\n", "repo_name": "dissemin/dissemin", "sub_path": "website/tests/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 4719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 166, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 29, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User._meta.get_fields", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User._meta", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 69, "usage_type": "name"}, {"api_name": "deposit.models.DepositRecord.objects.create", "line_number": 72, "usage_type": "call"}, {"api_name": "deposit.models.DepositRecord.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "deposit.models.DepositRecord", "line_number": 72, "usage_type": "name"}, {"api_name": "website.utils.merge_users", "line_number": 80, "usage_type": "call"}, {"api_name": "deposit.hal.models.HALDepositPreferences.objects.create", "line_number": 100, "usage_type": "call"}, {"api_name": "deposit.hal.models.HALDepositPreferences.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "deposit.hal.models.HALDepositPreferences", "line_number": 100, "usage_type": "name"}, {"api_name": "deposit.osf.models.OSFDepositPreferences.objects.create", "line_number": 101, "usage_type": "call"}, {"api_name": "deposit.osf.models.OSFDepositPreferences.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "deposit.osf.models.OSFDepositPreferences", "line_number": 101, "usage_type": "name"}, {"api_name": "deposit.osf.models.OSFDepositPreferences.objects.create", "line_number": 102, "usage_type": "call"}, {"api_name": "deposit.osf.models.OSFDepositPreferences.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "deposit.osf.models.OSFDepositPreferences", "line_number": 102, "usage_type": "name"}, {"api_name": "website.utils.merge_users", "line_number": 105, "usage_type": "call"}, {"api_name": "deposit.models.UserPreferences.objects.create", "line_number": 116, "usage_type": "call"}, {"api_name": "deposit.models.UserPreferences.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "deposit.models.UserPreferences", "line_number": 116, "usage_type": "name"}, {"api_name": "website.utils.merge_users", "line_number": 117, "usage_type": "call"}, {"api_name": "deposit.models.UserPreferences.objects.create", "line_number": 127, "usage_type": "call"}, {"api_name": "deposit.models.UserPreferences.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "deposit.models.UserPreferences", "line_number": 127, "usage_type": "name"}, {"api_name": "deposit.models.UserPreferences.objects.create", "line_number": 129, "usage_type": "call"}, {"api_name": "deposit.models.UserPreferences.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "deposit.models.UserPreferences", "line_number": 129, "usage_type": "name"}, {"api_name": "website.utils.merge_users", "line_number": 130, "usage_type": "call"}, {"api_name": "pytest.mark.usefixtures", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "10415137018", "text": "from contextlib import contextmanager\nfrom typing import Any, MutableMapping\nimport peewee as pw\n\n\n@contextmanager\ndef bind_all(database: pw.Database):\n with database.bind_ctx(ALL_TABLES):\n yield\n\n\nclass Settings(pw.Model):\n \"\"\"\n ORM model of the Settings table\n\n - may contain info about key derivation\n - version info?\n - values are text (unicode)\n - store complex values as json\n \"\"\"\n key = pw.CharField(unique=True)\n value = pw.TextField()\n\n\nclass AsymKeys(pw.Model):\n \"\"\" ORM model for asymmetrically encrypted keys.\n \"\"\"\n key_id = pw.AutoField()\n value = pw.TextField()\n\n\nclass Files(pw.Model):\n file_id = pw.AutoField()\n path = pw.BlobField()\n encrypted_file_path = pw.TextField(null=True)\n n_chunks = pw.IntegerField(default=-1)\n is_physical_file = pw.BooleanField(default=True)\n\n def to_dict(self) -> MutableMapping[str, Any]:\n return {\n 'file_id': self.file_id,\n 'path': self.path,\n 'encrypted_file_path': self.encrypted_file_path,\n 'n_chunks': self.n_chunks,\n 'is_physical_file': self.is_physical_file\n }\n\n\nclass Chunks(pw.Model):\n fk_file_id = pw.ForeignKeyField(Files, field='file_id')\n i_chunk = pw.IntegerField()\n content = pw.BlobField()\n\n\nALL_TABLES = [Settings, Files, AsymKeys, Chunks]\n", "repo_name": "matthiashuschle/cryp-to-go", "sub_path": "cryp_to_go/db_models.py", "file_name": "db_models.py", "file_ext": "py", "file_size_in_byte": 1350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "peewee.Database", "line_number": 7, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 6, "usage_type": "name"}, {"api_name": "peewee.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "peewee.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "peewee.TextField", "line_number": 22, "usage_type": "call"}, {"api_name": "peewee.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "peewee.AutoField", "line_number": 28, "usage_type": "call"}, {"api_name": "peewee.TextField", "line_number": 29, "usage_type": "call"}, {"api_name": "peewee.Model", "line_number": 32, "usage_type": "attribute"}, {"api_name": "peewee.AutoField", "line_number": 33, "usage_type": "call"}, {"api_name": "peewee.BlobField", "line_number": 34, "usage_type": "call"}, {"api_name": "peewee.TextField", "line_number": 35, "usage_type": "call"}, {"api_name": "peewee.IntegerField", "line_number": 36, "usage_type": "call"}, {"api_name": "peewee.BooleanField", "line_number": 37, "usage_type": "call"}, {"api_name": "typing.MutableMapping", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 39, "usage_type": "name"}, {"api_name": "peewee.Model", "line_number": 49, "usage_type": "attribute"}, {"api_name": "peewee.ForeignKeyField", "line_number": 50, "usage_type": "call"}, {"api_name": "peewee.IntegerField", "line_number": 51, "usage_type": "call"}, {"api_name": "peewee.BlobField", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "8680135524", "text": "from typing import List\n\nfrom src.core.enums import SearchEnum\nfrom src.dto.user import UserDto\nfrom src.interface.repository.post import PostStorageInterface\nfrom src.schema.post import DeletePostRequest, GetPostsResponse, GetTagsResponse\n\n\nclass PostUseCase:\n\n def __init__(self, repository: PostStorageInterface):\n self.repo = repository\n\n async def get_tags(self, string: str) -> GetTagsResponse:\n tags = await self.repo.get_tags(string=string)\n if not tags:\n tags = []\n tags = [tag.name for tag in tags]\n return GetTagsResponse(tags=tags)\n\n async def get_posts(self, user: UserDto, search_type: SearchEnum,\n search_filter: List[str] | str | None, page: int) -> GetPostsResponse:\n # check search type/filter\n if isinstance(search_filter, str):\n posts, n_posts = await self.repo.get_posts_by_title(\n user_id=user.id,\n string=search_filter,\n search_type=search_type,\n offset=page,\n )\n elif isinstance(search_filter, List):\n posts, n_posts = await self.repo.get_posts_by_tags(\n user_id=user.id,\n tags=search_filter,\n search_type=search_type,\n offset=page,\n )\n else:\n posts, n_posts = await self.repo.get_posts(\n user_id=user.id,\n search_type=search_type,\n offset=page,\n )\n return GetPostsResponse(\n posts=posts,\n total=n_posts\n )\n\n async def delete_post(self, user: UserDto, data: DeletePostRequest):\n await self.repo.delete_post(\n user_id=user.id,\n post_id=data.id\n )\n", "repo_name": "iurii-umnov/it-storage", "sub_path": "API/src/usecase/blog/post.py", "file_name": "post.py", "file_ext": "py", "file_size_in_byte": 1782, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "src.interface.repository.post.PostStorageInterface", "line_number": 11, "usage_type": "name"}, {"api_name": "src.schema.post.GetTagsResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "src.schema.post.GetTagsResponse", "line_number": 14, "usage_type": "name"}, {"api_name": "src.dto.user.UserDto", "line_number": 21, "usage_type": "name"}, {"api_name": "src.core.enums.SearchEnum", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 31, "usage_type": "argument"}, {"api_name": "src.schema.post.GetPostsResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "src.schema.post.GetPostsResponse", "line_number": 22, "usage_type": "name"}, {"api_name": "src.dto.user.UserDto", "line_number": 49, "usage_type": "name"}, {"api_name": "src.schema.post.DeletePostRequest", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "70864981172", "text": "from typing import List\n\nfrom fastapi import HTTPException\nfrom sqlalchemy import select\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom starlette import status\n\nfrom app.database.db_models.pizzeria_tables import PositionsModel, ProductsModel\nfrom app.dto.products.payload import RelatedPositionModel, NewProductPayload\n\n\nasync def check_exists_positions(\n positions: List[RelatedPositionModel],\n db: AsyncSession,\n):\n position_ids = [position.position_id for position in positions]\n position_stmt = select(PositionsModel.id).where(PositionsModel.id.in_(position_ids))\n exists_positions = await db.scalars(position_stmt)\n exist_positions_result = exists_positions.all()\n if len(position_ids) != len(exist_positions_result):\n difference_position = set(position_ids) - set(exist_positions_result)\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"Position {difference_position} not represented in db\"\n )\n\n\nasync def check_for_exists_product_by_name(\n product_name: str,\n db: AsyncSession,\n):\n query = select(ProductsModel).where(ProductsModel.name == product_name)\n if await db.scalar(query):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Product name already exists\",\n )\n\n\nasync def check_new_product_properties(\n new_product_payload: NewProductPayload,\n db: AsyncSession,\n):\n await check_for_exists_product_by_name(db=db, product_name=new_product_payload.name)\n await check_exists_positions(db=db, positions=new_product_payload.related_positions)\n", "repo_name": "Pelykh-Ilya/pizzeria_v3.0", "sub_path": "pizzeria_app/app/services/products.py", "file_name": "products.py", "file_ext": "py", "file_size_in_byte": 1651, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "app.dto.products.payload.RelatedPositionModel", "line_number": 13, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 14, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 17, "usage_type": "call"}, {"api_name": "app.database.db_models.pizzeria_tables.PositionsModel.id", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.database.db_models.pizzeria_tables.PositionsModel", "line_number": 17, "usage_type": "name"}, {"api_name": "app.database.db_models.pizzeria_tables.PositionsModel.id.in_", "line_number": 17, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 22, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_400_BAD_REQUEST", "line_number": 23, "usage_type": "attribute"}, {"api_name": "starlette.status", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 32, "usage_type": "call"}, {"api_name": "app.database.db_models.pizzeria_tables.ProductsModel", "line_number": 32, "usage_type": "argument"}, {"api_name": "app.database.db_models.pizzeria_tables.ProductsModel.name", "line_number": 32, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 34, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_400_BAD_REQUEST", "line_number": 35, "usage_type": "attribute"}, {"api_name": "starlette.status", "line_number": 35, "usage_type": "name"}, {"api_name": "app.dto.products.payload.NewProductPayload", "line_number": 41, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "38115436226", "text": "#!/usr/bin/env python\nfrom conf.app_settings import APP_LOGGER\nfrom kook.models import Dump\nimport json\nimport kookoo\nimport os\nimport subprocess\nimport web\n\nurls = (\n\n '/status', 'status',\n '/attempt_call', 'attempt_call',\n '/responses', 'responses',\n )\napp = web.application(urls, globals())\n\nSID={}\nclass status:\n def POST(self):\n i = web.input()\n Dump.objects.get_or_create(type='STATUS',data=json.dumps(i))\n APP_LOGGER.debug(\"KOOKOO SENT STATUS:\"+json.dumps(i))\n return 'OK'\n\nclass responses:\n def GET(self):\n from datetime import datetime\n x=datetime.now() \n data= Dump.objects.filter(type__in=['INTERESTED','CALL BACK'],timestamp__day=x.day,timestamp__year=x.year,timestamp__month=x.month).order_by('type','id')\n x='<html><head><script type=\"text/JavaScript\">function timedRefresh(timeoutPeriod) { setTimeout(\"location.reload(true);\",timeoutPeriod);} </script></head><body onload=\"JavaScript:timedRefresh(10000);\"><table border=\"6\">' \n for e in data:\n x=x+\"<tr><td>\"+str(e.id)+\"</td><td>\"+e.type+\"</td><td>\"+e.data+\"</td></tr>\"\n \n return x+\"</table> </body></html>\"\n \n \n\n\nclass attempt_call:\n def GET(self):\n from conf.app_settings import LOCALT_PATH,AUDIO_FILE\n web.header('Content-Type', 'text/xml')\n input= web.input()\n APP_LOGGER.debug(\"KOOKOO CALLED ME:\"+json.dumps(input))\n# Dump.objects.get_or_create(type=\"INPUT\",data=json.dumps(input))\n if input.has_key('event'):\n if input['event']=='NewCall':\n SID[input['sid']]=input['cid']\n Dump.objects.get_or_create(type='NEW CALL',data=json.dumps(input))\n r = kookoo.Response()\n g = r.append(kookoo.CollectDtmf(maxDigits=1))\n g.append(kookoo.PlayAudio(LOCALT_PATH+'/static/'+AUDIO_FILE))\n return r\n if input['event']=='GotDTMF':\n digit=input['data']\n if digit=='1':\n Dump.objects.create(type='INTERESTED',data=SID[input['sid']])\n r = kookoo.Response()\n r.addPlayText('Thank you, we will call you back in 5 minutes')\n r.addHangup()\n return r\n if digit=='2':\n Dump.objects.create(type='CALL BACK',data=SID[input['sid']])\n r = kookoo.Response()\n r.addPlayText('Thank You')\n r.addHangup()\n return r\n if digit=='3':\n Dump.objects.create(type='NOT INTERESTED',data=SID[input['sid']])\n r = kookoo.Response()\n r.addPlayText('Thank You')\n r.addHangup()\n return r\n if digit=='4':\n Dump.objects.get_or_create(type='REPLAYED',data=SID[input['sid']])\n r = kookoo.Response()\n g = r.append(kookoo.CollectDtmf(maxDigits=1))\n g.append(kookoo.PlayAudio(LOCALT_PATH+'/static/'+AUDIO_FILE))\n return r\n else:\n return '<response>invalid call</response>'\n\ndef main():\n app.run()\n return 0\n\nif __name__ == '__main__': \n main()\n ", "repo_name": "anaved/kookoo-caller", "sub_path": "kookoocaller/kookoo_server.py", "file_name": "kookoo_server.py", "file_ext": "py", "file_size_in_byte": 3346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "web.application", "line_number": 16, "usage_type": "call"}, {"api_name": "web.input", "line_number": 21, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects.get_or_create", "line_number": 22, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "kook.models.Dump", "line_number": 22, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 22, "usage_type": "call"}, {"api_name": "conf.app_settings.APP_LOGGER.debug", "line_number": 23, "usage_type": "call"}, {"api_name": "conf.app_settings.APP_LOGGER", "line_number": 23, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "kook.models.Dump.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "kook.models.Dump", "line_number": 30, "usage_type": "name"}, {"api_name": "web.header", "line_number": 43, "usage_type": "call"}, {"api_name": "web.input", "line_number": 44, "usage_type": "call"}, {"api_name": "conf.app_settings.APP_LOGGER.debug", "line_number": 45, "usage_type": "call"}, {"api_name": "conf.app_settings.APP_LOGGER", "line_number": 45, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 45, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects.get_or_create", "line_number": 50, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "kook.models.Dump", "line_number": 50, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "kookoo.Response", "line_number": 51, "usage_type": "call"}, {"api_name": "kookoo.CollectDtmf", "line_number": 52, "usage_type": "call"}, {"api_name": "kookoo.PlayAudio", "line_number": 53, "usage_type": "call"}, {"api_name": "conf.app_settings.LOCALT_PATH", "line_number": 53, "usage_type": "name"}, {"api_name": "conf.app_settings.AUDIO_FILE", "line_number": 53, "usage_type": "name"}, {"api_name": "kook.models.Dump.objects.create", "line_number": 58, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "kook.models.Dump", "line_number": 58, "usage_type": "name"}, {"api_name": "kookoo.Response", "line_number": 59, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects.create", "line_number": 64, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "kook.models.Dump", "line_number": 64, "usage_type": "name"}, {"api_name": "kookoo.Response", "line_number": 65, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects.create", "line_number": 70, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "kook.models.Dump", "line_number": 70, "usage_type": "name"}, {"api_name": "kookoo.Response", "line_number": 71, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects.get_or_create", "line_number": 76, "usage_type": "call"}, {"api_name": "kook.models.Dump.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "kook.models.Dump", "line_number": 76, "usage_type": "name"}, {"api_name": "kookoo.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "kookoo.CollectDtmf", "line_number": 78, "usage_type": "call"}, {"api_name": "kookoo.PlayAudio", "line_number": 79, "usage_type": "call"}, {"api_name": "conf.app_settings.LOCALT_PATH", "line_number": 79, "usage_type": "name"}, {"api_name": "conf.app_settings.AUDIO_FILE", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "25264246366", "text": "\"\"\"Change Booster Model property name\n\nRevision ID: c8a51a6d773b\nRevises: b205c56b4e1a\nCreate Date: 2023-04-27 02:04:22.343881\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c8a51a6d773b'\ndown_revision = 'b205c56b4e1a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('boosters', schema=None) as batch_op:\n batch_op.add_column(sa.Column('deck_id', sa.Integer(), nullable=False))\n batch_op.drop_constraint('boosters_deck_name_fkey', type_='foreignkey')\n batch_op.create_foreign_key(None, 'decks', ['deck_id'], ['id'])\n batch_op.drop_column('deck_name')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('boosters', schema=None) as batch_op:\n batch_op.add_column(sa.Column('deck_name', sa.INTEGER(), autoincrement=False, nullable=False))\n batch_op.drop_constraint(None, type_='foreignkey')\n batch_op.create_foreign_key('boosters_deck_name_fkey', 'decks', ['deck_name'], ['id'])\n batch_op.drop_column('deck_id')\n\n # ### end Alembic commands ###\n", "repo_name": "vladygk/Softuni-Flask", "sub_path": "migrations/versions/c8a51a6d773b_change_booster_model_property_name.py", "file_name": "c8a51a6d773b_change_booster_model_property_name.py", "file_ext": "py", "file_size_in_byte": 1265, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "alembic.op.batch_alter_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "30565144137", "text": "from fastapi import APIRouter, HTTPException\nfrom datetime import date, datetime\n\nfrom models.price_data import SalesRecords\n\n\nrouter = APIRouter(prefix=\"/api\")\nob = SalesRecords()\n\n\n@router.get(\"/\")\nasync def get_basic():\n return {'hey': 1}\n\n\n@router.get(\"/total_items\")\nasync def get_total_items(start_date: str, end_date: str, department: str) -> int:\n start_date = datetime.strptime(start_date, \"%Y-%m-%d\").date()\n end_date = datetime.strptime(end_date, \"%Y-%m-%d\").date()\n count = ob.getProductsInRange(start_date, end_date, department)\n return count\n\n\n@router.get(\"/nth_most_total_item\")\nasync def get_nth_most_total_item(item_by: str, start_date: str, end_date: str, n: int) -> str:\n\n if (item_by != \"quantity\" and item_by != \"price\"):\n raise HTTPException(status_code=400, detail=\"Invalid item_by value\")\n\n start_date = datetime.strptime(start_date, \"%Y-%m-%d\").date()\n end_date = datetime.strptime(end_date, \"%Y-%m-%d\").date()\n item = ob.getNthMostItem(item_by, start_date, end_date, n)\n return item\n\n\n@router.get(\"/percentage_of_department_wise_sold_items\")\nasync def get_percentage_of_department_wise_sold_items(start_date: str, end_date: str):\n start_date = datetime.strptime(start_date, \"%Y-%m-%d\").date()\n end_date = datetime.strptime(end_date, \"%Y-%m-%d\").date()\n departments_sales_percentage = ob.getDepartmentWiseSoldPercentage(\n start_date, end_date)\n\n return departments_sales_percentage\n\n\n@router.get(\"/monthly_sales\")\nasync def get_monthly_sales(product: str, year: int):\n monthlyProductSales = ob.getMonthlyProductSales(product, year)\n return monthlyProductSales\n", "repo_name": "AshminJayson/Volopay_Price_API", "sub_path": "routers/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "fastapi.APIRouter", "line_number": 7, "usage_type": "call"}, {"api_name": "models.price_data.SalesRecords", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "74358358454", "text": "from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, count\nfrom pyspark.sql.functions import desc, year, sum, lit, when, min, avg, max, col, split, explode, count, lower\n\nspark = SparkSession.builder.appName(\"test\").getOrCreate()\n\n# Đọc dữ liệu từ tệp CSV\ndataQg = spark.read.format('csv').option('header', 'true').load(\n 'data/qg_noc.csv', header=True, inferSchema=True)\n\ndataOlympic = spark.read.format('csv').option('header', 'true').load(\n 'data/vdv_olympics.csv', header=True, inferSchema=True)\n\n# Tạo cột mùa dựa trên điều kiện\ndataOlympic = dataOlympic.withColumn(\n \"Season\", when(col(\"Season\") == \"Summer\", \"Summer\").otherwise(\"Winter\"))\n\n# Lọc và nhóm theo Năm và Mùa\nrussian_athletes_1990s = dataOlympic.filter(\n (col(\"Year\") >= 1990) & (col(\"Year\") < 2000) & (col(\"NOC\") == \"RUS\"))\n\nrussian_athletes_1990s_grouped = russian_athletes_1990s.groupBy(\n \"Year\", \"Season\").agg(count(\"*\").alias(\"TotalAthletes\"))\n\nrussian_athletes_1990s_grouped.show()\n# Đóng Spark Session\nspark.stop()\n", "repo_name": "Malware87/bigdatalearning", "sub_path": "C6.py", "file_name": "C6.py", "file_ext": "py", "file_size_in_byte": 1061, "program_lang": "python", "lang": "vi", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 5, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 5, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.when", "line_number": 16, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 16, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 20, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.count", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "22058745526", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom argparse import ArgumentParser\n\nfrom flask import Flask, make_response\nfrom flask_cors import CORS # pylint:disable=no-name-in-module,import-error\nfrom flask_restful import Api, Resource, reqparse\n\nimport guessit\nfrom guessit.jsonutils import GuessitEncoder\n\nimport local\nimport omdbi\n\n\nclass MisOmdb(Resource):\n def _impl(self, location):\n parser = reqparse.RequestParser()\n parser.add_argument('title', action='store', required=True, help='Title to search', location=location)\n parser.add_argument('season', action='store', help='Season info', location=location)\n parser.add_argument('episode', action='store', help='Episode info', location=location)\n parser.add_argument('type', action='store', help='Video type', location=location)\n parser.add_argument('year', action='store', help='Video year', location=location)\n args = parser.parse_args()\n\n return omdbi.inspect(args.title, args.year, args.type, args.season, args.episode)\n\n def get(self):\n return self._impl('args')\n\n def post(self):\n return self._impl('json')\n\n\nclass MisFile(Resource):\n def _impl(self, location):\n parser = reqparse.RequestParser()\n parser.add_argument('filename', action='store', required=True, help='Filename to parse', location=location)\n parser.add_argument('options', action='store', help='Guessit options', location=location)\n args = parser.parse_args()\n\n return guessit.guessit(args.filename, args.options)\n\n def get(self):\n return self._impl('args')\n\n def post(self):\n return self._impl('json')\n\n\ndef take(obj, key):\n if key in obj:\n return obj[key]\n return None\n\n\ndef want(guess, title=None):\n if not guess:\n return\n return omdbi.inspect(title or take(guess, 'title'),\n take(guess, 'year'),\n take(guess, 'type'),\n take(guess, 'season'),\n take(guess, 'episode'))\n\n\nclass MisInfo(Resource):\n def _impl(self, location):\n parser = reqparse.RequestParser()\n parser.add_argument('filename', action='store', required=True, help='Filename to parse', location=location)\n parser.add_argument('options', action='store', help='Guessit options', location=location)\n args = parser.parse_args()\n\n guess = guessit.guessit(args.filename, args.options)\n value = want(guess)\n if value:\n return value\n guessLocal = local.inspect(args.filename)\n value = want(guessLocal)\n if value:\n return value\n\n value = want(guess, local.modContractionsGeneric(guess['title'])) or \\\n want(guess, local.modContractionsWere(guess['title'])) or \\\n want(guess, local.modPossesiveSingular(guess['title'])) or \\\n want(guess, local.modPossesivePlural(guess['title']))\n if value:\n return value\n\n if guessLocal:\n value = want(guessLocal, local.modContractionsGeneric(guess['title'])) or \\\n want(guessLocal, local.modContractionsWere(guess['title'])) or \\\n want(guessLocal, local.modPossesiveSingular(guess['title'])) or \\\n want(guessLocal, local.modPossesivePlural(guess['title']))\n if value:\n return value\n\n guessFile = local.inspect(args.filename, True)\n value = want(guessFile)\n if value:\n return value\n\n if guessFile:\n value = want(guessFile, local.modContractionsGeneric(guess['title'])) or \\\n want(guessFile, local.modContractionsWere(guess['title'])) or \\\n want(guessFile, local.modPossesiveSingular(guess['title'])) or \\\n want(guessFile, local.modPossesivePlural(guess['title']))\n if value:\n return value\n\n if guessLocal and guessLocal['type'] == 'episode':\n value = omdbi.inspect(take(guess, 'title'),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(take(guessLocal, 'title'),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modContractionsGeneric(take(guess, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modContractionsGeneric(take(guessLocal, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modContractionsWere(take(guess, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modContractionsWere(take(guessLocal, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modPossesiveSingular(take(guess, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modPossesiveSingular(take(guessLocal, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modPossesivePlural(take(guess, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode')) or \\\n omdbi.inspect(local.modPossesivePlural(take(guessLocal, 'title')),\n take(guess, 'year'),\n take(guessLocal, 'type'),\n take(guessLocal, 'season'),\n take(guessLocal, 'episode'))\n\n return value\n\n def get(self):\n return self._impl('args')\n\n def post(self):\n return self._impl('json')\n\n\nopts = ArgumentParser()\n\nopts.add_argument('-l', '--listening-adress', dest='listening_adress', default='0.0.0.0',\n help='Listening IP Adress of the HTTP Server.')\nopts.add_argument('-p', '--listening-port', dest='listening_port', default=None,\n help='Listening TCP Port of the HTTP Server.')\n\nparsed_opts = opts.parse_args()\n\napp = Flask('MIS')\nCORS(app)\napi = Api(app)\napp.debug = os.environ.get('DEBUG', False)\n\n@api.representation('application/json')\ndef output_json(data, code, headers=None):\n resp = make_response(json.dumps(data, cls=GuessitEncoder, ensure_ascii=False), code)\n resp.headers.extend(headers or {})\n return resp\n\nif not app.debug:\n handler = RotatingFileHandler('guessit-rest.log', maxBytes=5 * 1024 * 1024, backupCount=5)\n handler.setLevel(logging.DEBUG)\n app.logger.addHandler(handler)\n\napi.add_resource(MisInfo, '/')\napi.add_resource(MisFile, '/file/')\napi.add_resource(MisOmdb, '/omdb/')\n\napp.run(host=parsed_opts.listening_adress, port=parsed_opts.listening_port)\n", "repo_name": "edzius/media-info", "sub_path": "mis.py", "file_name": "mis.py", "file_ext": "py", "file_size_in_byte": 8524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "flask_restful.Resource", "line_number": 21, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 23, "usage_type": "name"}, {"api_name": "omdbi.inspect", "line_number": 31, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 40, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 42, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 42, "usage_type": "name"}, {"api_name": "guessit.guessit", "line_number": 47, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 65, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 72, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 74, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 74, "usage_type": "name"}, {"api_name": "guessit.guessit", "line_number": 79, "usage_type": "call"}, {"api_name": "local.inspect", "line_number": 83, "usage_type": "call"}, {"api_name": "local.modContractionsGeneric", "line_number": 88, "usage_type": "call"}, {"api_name": "local.modContractionsWere", "line_number": 89, "usage_type": "call"}, {"api_name": "local.modPossesiveSingular", "line_number": 90, "usage_type": "call"}, {"api_name": "local.modPossesivePlural", "line_number": 91, "usage_type": "call"}, {"api_name": "local.modContractionsGeneric", "line_number": 96, "usage_type": "call"}, {"api_name": "local.modContractionsWere", "line_number": 97, "usage_type": "call"}, {"api_name": "local.modPossesiveSingular", "line_number": 98, "usage_type": "call"}, {"api_name": "local.modPossesivePlural", "line_number": 99, "usage_type": "call"}, {"api_name": "local.inspect", "line_number": 103, "usage_type": "call"}, {"api_name": "local.modContractionsGeneric", "line_number": 109, "usage_type": "call"}, {"api_name": "local.modContractionsWere", "line_number": 110, "usage_type": "call"}, {"api_name": "local.modPossesiveSingular", "line_number": 111, "usage_type": "call"}, {"api_name": "local.modPossesivePlural", "line_number": 112, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 117, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 122, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 127, "usage_type": "call"}, {"api_name": "local.modContractionsGeneric", "line_number": 127, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 132, "usage_type": "call"}, {"api_name": "local.modContractionsGeneric", "line_number": 132, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 137, "usage_type": "call"}, {"api_name": "local.modContractionsWere", "line_number": 137, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 142, "usage_type": "call"}, {"api_name": "local.modContractionsWere", "line_number": 142, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 147, "usage_type": "call"}, {"api_name": "local.modPossesiveSingular", "line_number": 147, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 152, "usage_type": "call"}, {"api_name": "local.modPossesiveSingular", "line_number": 152, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 157, "usage_type": "call"}, {"api_name": "local.modPossesivePlural", "line_number": 157, "usage_type": "call"}, {"api_name": "omdbi.inspect", "line_number": 162, "usage_type": "call"}, {"api_name": "local.modPossesivePlural", "line_number": 162, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 186, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 187, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 188, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 189, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 189, "usage_type": "attribute"}, {"api_name": "flask.make_response", "line_number": 193, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 193, "usage_type": "call"}, {"api_name": "guessit.jsonutils.GuessitEncoder", "line_number": 193, "usage_type": "name"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 198, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 199, "usage_type": "attribute"}]} +{"seq_id": "30059728287", "text": "\"\"\"'\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n self.assertEqual(True, False)\n\n\nif __name__ == '__main__':\n unittest.main()\n\"\"\"\n\nfrom biosim.Cell import Lowland, Highland, Desert, Water\nfrom biosim.Animals import Herbivore, Carnivore\nimport pytest\n\n\n@pytest.fixture(autouse=True)\ndef reset_parameters():\n Herbivore.p['F'] = 10\n Carnivore.p['F'] = 50\n Lowland.af = 800\n\n\n@pytest.fixture\ndef list_herbivores_and_carnivores():\n return [{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9},\n {'species': 'Carnivore', 'weight': 70, 'age': 10},\n {'species': 'Herbivore', 'weight': 10, 'age': 3},\n {'species': 'Herbivore', 'weight': 60, 'age': 3}]\n\n\n@pytest.fixture\ndef list_herbivores():\n return [{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9},\n {'species': 'Herbivore', 'weight': 10, 'age': 3},\n {'species': 'Herbivore', 'weight': 14, 'age': 3},\n {'species': 'Herbivore', 'weight': 13, 'age': 3}]\n\n\n@pytest.fixture\ndef list_carnivores():\n return [{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 41, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9},\n {'species': 'Carnivore', 'weight': 10, 'age': 3},\n {'species': 'Carnivore', 'weight': 14, 'age': 3},\n {'species': 'Carnivore', 'weight': 13, 'age': 3}]\n\n\n@pytest.fixture\ndef list_herbivore_long():\n return [{'species': 'Herbivore', 'weight': 65, 'age': 3},\n {'species': 'Herbivore', 'weight': 41, 'age': 3},\n {'species': 'Herbivore', 'weight': 50, 'age': 3},\n {'species': 'Herbivore', 'weight': 40, 'age': 3},\n {'species': 'Herbivore', 'weight': 41, 'age': 3},\n {'species': 'Herbivore', 'weight': 50, 'age': 9},\n {'species': 'Herbivore', 'weight': 67, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}]\n\n\n@pytest.fixture\ndef list_carnivore_long():\n return [{'species': 'Carnivore', 'weight': 65, 'age': 3},\n {'species': 'Carnivore', 'weight': 41, 'age': 3},\n {'species': 'Carnivore', 'weight': 50, 'age': 3},\n {'species': 'Carnivore', 'weight': 40, 'age': 3},\n {'species': 'Carnivore', 'weight': 41, 'age': 3},\n {'species': 'Carnivore', 'weight': 50, 'age': 9},\n {'species': 'Carnivore', 'weight': 67, 'age': 5},\n {'species': 'Carnivore', 'weight': 41, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}]\n\n\n# tests for initial values:\ndef test_lowland_given_fodder():\n \"\"\"\n When initialising a cell it is given an amount for fodder. The fodder in Lowland is 800\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 50, 'age': 9}])\n assert l.p['f_max'] == 800\n\n\ndef test_highland_given_fodder():\n \"\"\"\n When initialising a cell it is given an amount for fodder. The fodder in Highland is 300\n \"\"\"\n h = Highland(population=[{'species': 'Carnivore', 'weight': 50, 'age': 9}])\n assert h.p['f_max'] == 300\n\n\ndef test_water_unhabitable():\n \"\"\"\n An animal can not enter the water. It is therefore unhabitable.\n \"\"\"\n w = Water(population=[])\n assert w.habitable is False\n\n\ndef test_desert_habitable():\n \"\"\"\n An animal can enter the even though there are no food\n \"\"\"\n d = Desert(population=[])\n assert d.habitable is True\n\n\n# tests for sorting_animals function\ndef test_sorting_herb(list_herbivores_and_carnivores):\n \"\"\"\n This is a test that checks if the herbivores get sorted in a list based on ascending\n phi-value.\n \"\"\"\n l = Lowland(population=list_herbivores_and_carnivores)\n unsorted_herbs = l.herbivores_pop\n herbs_fitness = [k.phi for k in unsorted_herbs]\n herbs_fitness.sort()\n l.sorting_animals()\n sorted_herbs_fitness = [k.phi for k in l.herbivores_pop]\n assert herbs_fitness == sorted_herbs_fitness\n\n\ndef test_sorting_carn(list_herbivores_and_carnivores):\n \"\"\"\n This is a test that checks if the carnivores get sorted in a list based on descending\n phi-value.\n \"\"\"\n l = Lowland(population=list_herbivores_and_carnivores)\n unsorted_carns = l.carnivores_pop\n carns_fitness = [k.phi for k in unsorted_carns]\n carns_fitness.sort()\n carns_fitness.reverse()\n l.sorting_animals()\n sorted_carns_fitness = [k.phi for k in l.carnivores_pop]\n assert carns_fitness == sorted_carns_fitness\n\n\n# Tests for make_herbivores_eat function:\ndef test_eats_random(list_herbivores):\n \"\"\"\n The herbivores should eat in a random order. To test this, i assign all the herbs a False\n value. I make the herbivores eat. We randomise the list, and everytime they eat, the first\n herb in the list eats and is given a True value. At the end there should be more than one\n herb with a True value.\n \"\"\"\n l = Lowland(list_herbivores)\n for herb in l.herbivores_pop:\n herb.eaten = False\n\n for k in range(len(l.herbivores_pop)):\n l.make_herbivores_eat()\n l.herbivores_pop[0].eaten = True\n\n eaten = 0\n for herb in l.herbivores_pop:\n if herb.eaten:\n eaten += 1\n\n assert eaten > 1\n\n\ndef test_available_fodder():\n \"\"\"\n The available fodder should be 800 for lowland\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 50, 'age': 9}])\n l.make_herbivores_eat()\n assert l.available_fodder == 800\n\n\ndef test_consumption_becomes_appetite(list_herbivores):\n \"\"\"\n When the herbivore has enough fodder the consumption should be the same as the appetite\n \"\"\"\n l = Lowland(list_herbivores)\n l.make_herbivores_eat()\n for herb in l.herbivores_pop:\n assert herb.F_consumption == herb.p['F']\n\n\ndef test_update_fodder(list_herbivores):\n \"\"\"\n When a herbivore eats the available fodder should update. When there are six herbivores there\n should be enough fodder for everyone. The updated fodder should then be 800 - 6 * appetite\n \"\"\"\n l = Lowland(list_herbivores)\n appetite = Herbivore.p['F']\n l.make_herbivores_eat()\n assert l.available_fodder == 800 - len(l.herbivores_pop) * appetite\n\n\ndef test_consumption_when_little_fodder(list_herbivores):\n \"\"\"\n When there is to little fodder the consumption is not the same as the appetite, but rather\n what is left of the fodder\n \"\"\"\n l = Lowland(list_herbivores)\n l.p['f_max'] = 8\n l.make_herbivores_eat()\n l.p['f_max'] = 800\n assert l.herbivores_pop[0].F_consumption == 8\n\n\ndef test_fodder_will_stop_at_zero(list_herbivores):\n \"\"\"\n When there isn't enough fodder the available fodder should stop at 0 after eating and not\n become negative.\n \"\"\"\n l = Lowland(list_herbivores)\n l.p['f_max'] = 51\n l.make_herbivores_eat()\n l.p['f_max'] = 800\n assert l.available_fodder == 0\n\n\ndef test_gain_weight_after_eating_herb(list_herbivores):\n \"\"\"\n After the herbivore eats it should gain weight\n \"\"\"\n l = Lowland(list_herbivores)\n weight = [k.weight for k in l.herbivores_pop]\n l.make_herbivores_eat()\n weight_after_eating = [k.weight for k in l.herbivores_pop]\n weight.sort()\n weight_after_eating.sort()\n assert [k + 9 for k in weight] == weight_after_eating\n\n\ndef test_fitness_change_after_eating(list_herbivores):\n \"\"\"\n After the herbivores eats, they should gain weight and therefore have a greater fitness\n \"\"\"\n l = Lowland(list_herbivores)\n init_fitness = [k.phi for k in l.herbivores_pop]\n l.make_herbivores_eat()\n fitness_after_eating = [k.phi for k in l.herbivores_pop]\n init_fitness.sort()\n fitness_after_eating.sort()\n for k in range(len(l.herbivores_pop)):\n assert init_fitness[k] < fitness_after_eating[k]\n\n\n\n# Tests for feed_carnivores_function\ndef test_carn_appetite(list_herbivores_and_carnivores):\n \"\"\"\n The carnivores appetite is given as a parameter. It should be 50 before eating\n \"\"\"\n l = Lowland(list_herbivores_and_carnivores)\n l.feed_carnivores()\n for carn in l.carnivores_pop:\n assert carn.p['F'] == 50\n\n\ndef test_weakest_herb_eaten_first(mocker, list_herbivores_and_carnivores):\n \"\"\"\n When the carnivores eats, it eats the weakest herbivore first. To check if that happens i\n make sure there are only one carnivore in the cell with appetite equal to the weight of the\n weakest herbivore. After eating we check that the herbivore is no longer in the cell\n \"\"\"\n mocker.patch('random.random', return_value=0.01)\n l = Lowland(list_herbivores_and_carnivores)\n l.sorting_animals()\n weakest_herb = l.herbivores_pop[0]\n for k in range(len(l.carnivores_pop)):\n l.carnivores_pop[k].p['F'] = weakest_herb.weight + 1\n l.feed_carnivores()\n for k in range(len(l.carnivores_pop)):\n l.carnivores_pop[k].p['F'] = 50\n assert weakest_herb not in l.herbivores_pop\n\n\ndef test_strongest_carn_eats_first(mocker):\n \"\"\"\n When eating the strongest carnivore always eats first. To test this a make sure there are only\n carnivores with a total weight of the carnivores appetite (ie. 50), and make the carnivores eat.\n Later i check that the weight of is the same for all the carnivores, except the fittest.\n \"\"\"\n mocker.patch('random.random', return_value=0.01)\n population = [{'species': 'Herbivore', 'weight': 25, 'age': 20},\n {'species': 'Herbivore', 'weight': 25, 'age': 20},\n {'species': 'Carnivore', 'weight': 50, 'age': 9},\n {'species': 'Carnivore', 'weight': 70, 'age': 10},\n {'species': 'Carnivore', 'weight': 10, 'age': 3},\n {'species': 'Carnivore', 'weight': 90, 'age': 3}]\n l = Lowland(population)\n l.sorting_animals()\n init_weight = [carn.weight for carn in l.carnivores_pop]\n l.feed_carnivores()\n for k in range(1, len(l.carnivores_pop)):\n assert l.carnivores_pop[k].weight == init_weight[k]\n\n\ndef test_eats_until_reaches_appetite(mocker):\n \"\"\"\n The carnivores does not stop eating until it has eaten herbs with total weight >= appetite.\n To check this I make several carnivores, and enough herbivores for all of them to be full.\n After they eat i check that they have all gained as much weight as they should, which is\n appetite * beta\n \"\"\"\n mocker.patch('random.random', return_value=0.01)\n population = [{'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Carnivore', 'weight': 100, 'age': 3},\n {'species': 'Carnivore', 'weight': 100, 'age': 3}]\n l = Lowland(population)\n beta = l.carnivores_pop[0].p['beta']\n appetite = l.carnivores_pop[0].p['F']\n l.sorting_animals()\n init_weight = [carn.weight for carn in l.carnivores_pop]\n l.feed_carnivores()\n for k in range(len(l.carnivores_pop)):\n assert init_weight[k] + appetite * beta <= l.carnivores_pop[k].weight\n\n\ndef test_eats_until_tried_eating_all_the_herbivores(mocker):\n \"\"\"\n The carnivore eats until it has tried to eat all the herbivores. To test this we create less\n available fodder than there are appetites to see if they are all removed from the list\n \"\"\"\n mocker.patch('random.random', return_value=0.01)\n population = [{'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Herbivore', 'weight': 23, 'age': 5},\n {'species': 'Carnivore', 'weight': 100, 'age': 3},\n {'species': 'Carnivore', 'weight': 100, 'age': 3}]\n l = Lowland(population)\n l.feed_carnivores()\n assert len(l.herbivores_pop) == 0\n\n\ndef test_will_not_eat_stronger_herb(mocker):\n \"\"\"\n If the herbivore is stronger than the carnivore, the carnivore will not be able to eat. To test\n this the mocker is set very low and we create one strong herbivore and one weak carnivore.\n Later we check if the herbivore is still there\n \"\"\"\n mocker.patch('random.random', return_value=0.001)\n population = [{'species': 'Herbivore', 'weight': 70, 'age': 5},\n {'species': 'Carnivore', 'weight': 13, 'age': 5}]\n l = Lowland(population)\n l.feed_carnivores()\n assert len(l.herbivores_pop) != 0\n\n\ndef test_update_fitness_after_eating_carnivores(mocker):\n \"\"\"\n When a carnivore eats it gains weight, and therefore need to have grater fitness\n \"\"\"\n mocker.patch('random.random', return_value=0.01)\n population = [{'species': 'Carnivore', 'weight': 70, 'age': 5},\n {'species': 'Carnivore', 'weight': 80, 'age': 8},\n {'species': 'Carnivore', 'weight': 90, 'age': 9},\n {'species': 'Herbivore', 'weight': 25, 'age': 3},\n {'species': 'Herbivore', 'weight': 25, 'age': 3},\n {'species': 'Herbivore', 'weight': 25, 'age': 3},\n {'species': 'Herbivore', 'weight': 25, 'age': 3},\n {'species': 'Herbivore', 'weight': 25, 'age': 3},\n {'species': 'Herbivore', 'weight': 25, 'age': 3}]\n l = Lowland(population)\n l.sorting_animals()\n init_fitness = [carn.phi for carn in l.carnivores_pop]\n l.feed_carnivores()\n for k in range(len(l.carnivores_pop)):\n assert l.carnivores_pop[k].phi > init_fitness[k]\n\n\n# Tests for newborn_animals_function for herbs\ndef test_newborn_added_to_list_herb(list_herbivore_long):\n \"\"\"\n When an animal gives birth the newborn must be added to the list. When there are 9 herbivores\n the probability for birth is 1, if the weight is acceptable, and therefore there will be added\n 9 herbivores to the list.\n \"\"\"\n l = Lowland(list_herbivore_long)\n length = len(l.herbivores_pop)\n l.newborn_animals()\n assert len(l.herbivores_pop) == length * 2\n\n\ndef test_mother_lost_weight_herb(list_herbivore_long):\n \"\"\"\n When an animal gives birth the mother loses weight equivalent to the weight of the\n newborn * zeta. When there are 9 herbivores the probability for birth is 1, if the weight is\n acceptable, and therefore all 9 herbivores will give birth.\n \"\"\"\n l = Lowland(list_herbivore_long)\n weight = [k.weight for k in l.herbivores_pop]\n l.newborn_animals()\n mothers = l.herbivores_pop[0:9]\n for k in range(len(weight)):\n assert weight[k] > mothers[k].weight\n\n\ndef test_mother_lost_fitness_herb(list_herbivore_long):\n \"\"\"\n When the mother loses weight, the fitness needs to be updated.\n \"\"\"\n l = Lowland(list_herbivore_long)\n fitness = [k.phi for k in l.herbivores_pop]\n l.newborn_animals()\n mothers = l.herbivores_pop[0:9]\n for k in range(len(fitness)):\n assert fitness[k] > mothers[k].phi\n\n\ndef test_criteria_for_birth_fitness_herb():\n \"\"\"\n A criteria for giving birth is that the weight can't be zero, and therefore the fitness can't be\n zero.\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 0, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n for k in l.herbivores_pop:\n if k.phi == 0:\n assert k.will_the_animal_give_birth(n=len(l.herbivores_pop)) is False\n\n\ndef test_criteria_for_birth_weight_herb():\n \"\"\"\n The herbivore cant give birth if it weighs less than zeta*newborn_birth_weight\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 10, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n for k in l.herbivores_pop:\n k.birth_probability(n=len(l.herbivores_pop))\n if k.weight <= k.newborn_birth_weight * k.p['zeta']:\n assert k.will_the_animal_give_birth(n=len(l.herbivores_pop)) is False\n\n\ndef test_criteria_for_birth_prob_herb():\n \"\"\"\n If the criteria for weight and fitness is fulfilled, p = variable.\n variable is gamma * fitness * (N - 1)\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 75, 'age': 5},\n {'species': 'Herbivore', 'weight': 60, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n for k in l.herbivores_pop:\n if k.birth_probability(n=len(l.herbivores_pop)) != 0:\n assert k.birth_probability(n=len(l.herbivores_pop)) == k.p['gamma'] * k.phi * (\n len(l.herbivores_pop) - 1)\n\n\n# Tests for newborn_animals for carns\ndef test_newborn_added_to_list_carn(list_carnivore_long):\n \"\"\"\n When an animal gives birth the newborn must be added to the list. When there are 9 carnivores\n the probability for birth is 1, if the weight is acceptable, and therefore there will be added\n 9 carnivores to the list.\n \"\"\"\n l = Lowland(list_carnivore_long)\n length = len(l.carnivores_pop)\n l.newborn_animals()\n assert len(l.carnivores_pop) == length * 2\n\n\ndef test_mother_lost_weight_carn(list_carnivore_long):\n \"\"\"\n When an animal gives birth the mother loses weight equivalent to the weight of the\n newborn * zeta. When there are 9 carnivores the probability for birth is 1, if the weight is\n acceptable, and therefore all 9 carnivores will give birth.\n \"\"\"\n l = Lowland(list_carnivore_long)\n weight = [k.weight for k in l.carnivores_pop]\n l.newborn_animals()\n mothers = l.carnivores_pop[0:9]\n for k in range(len(weight)):\n assert weight[k] > mothers[k].weight\n\n\ndef test_mother_lost_fitness_carn(list_carnivore_long):\n \"\"\"\n When the mother loses weight, the fitness needs to be updated.\n \"\"\"\n l = Lowland(list_carnivore_long)\n fitness = [k.phi for k in l.carnivores_pop]\n l.newborn_animals()\n mothers = l.carnivores_pop[0:9]\n for k in range(len(fitness)):\n assert fitness[k] > mothers[k].phi\n\n\ndef test_criteria_for_birth_fitness_carn():\n \"\"\"\n A criteria for giving birth is that the weight can't be zero, and therefore the fitness can't be\n zero.\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 0, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n for k in l.carnivores_pop:\n if k.phi == 0:\n assert k.will_the_animal_give_birth(n=len(l.carnivores_pop)) is False\n\n\ndef test_criteria_for_birth_weight_carn():\n \"\"\"\n The herbivore cant give birth if it weighs less than zeta*newborn_birth_weight\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 10, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n for k in l.carnivores_pop:\n k.birth_probability(n=len(l.carnivores_pop))\n if k.weight <= k.newborn_birth_weight * k.p['zeta']:\n assert k.will_the_animal_give_birth(n=len(l.carnivores_pop)) is False\n\n\ndef test_criteria_for_birth_prob_carn():\n \"\"\"\n If the criteria for weight and fitness is fulfilled, p = variable.\n variable is gamma * fitness * (N - 1). If variable > 1, p = 1\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 75, 'age': 5},\n {'species': 'Carnivore', 'weight': 60, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n for k in l.carnivores_pop:\n if k.birth_probability(n=len(l.carnivores_pop)) != 0:\n if k.p['gamma'] * k.phi * (len(l.carnivores_pop) - 1) < 1:\n assert k.birth_probability(n=len(l.carnivores_pop)) == k.p['gamma'] * k.phi * (\n len(l.carnivores_pop) - 1)\n else:\n assert k.birth_probability(n=len(l.carnivores_pop)) == 1\n\n\n# Tests for move_animals_from_cell:\ndef test_herbs_removed_from_list(mocker, list_herbivore_long):\n \"\"\"\n The animal will move with a probability, when choosing mocker for 0.1 there will definitely some\n animals moving\n \"\"\"\n mocker.patch('random.random', return_value=0.1)\n l = Lowland(list_herbivore_long)\n length = len(l.herbivores_pop)\n l.move_animals_from_cell()\n assert len(l.herbs_move) == length\n\n\ndef test_carns_removed_from_list(mocker, list_carnivore_long):\n \"\"\"\n The animal will move with a probability, when choosing mocker for 0.1 there will definitely some\n animals moving\n \"\"\"\n mocker.patch('random.random', return_value=0.1)\n l = Lowland(list_carnivore_long)\n length = len(l.carnivores_pop)\n l.move_animals_from_cell()\n assert len(l.carns_move) == length\n\n\ndef test_total_moving_animals(mocker, list_carnivore_long, list_herbivore_long):\n \"\"\"\n The move_animal_from_cell function returns a list with one list for moving herbivores and one\n list for moving carnivores\n \"\"\"\n mocker.patch('random.random', return_value=0.1)\n l = Lowland(list_herbivore_long + list_carnivore_long)\n total_moving = l.move_animals_from_cell()\n assert len(total_moving[0]) == len(l.herbivores_pop)\n assert len(total_moving[1]) == len(l.carnivores_pop)\n\n\n# Tests for reset_already_moved:\ndef test_reset_already_moved_herb(list_carnivore_long, list_herbivore_long):\n \"\"\"\n After moving, animal.already_moved should be True, so they don't move again from the new cell\n in the same year\n \"\"\"\n l = Lowland(list_herbivore_long + list_carnivore_long)\n total_moving = l.move_animals_from_cell()\n\n for herb in total_moving[0]:\n assert herb.already_moved is True\n\n\ndef test_reset_already_moved_carn(list_carnivore_long, list_herbivore_long):\n \"\"\"\n After moving, animal.already_moved should be True, so they don't move again from the new cell\n in the same year\n \"\"\"\n l = Lowland(list_herbivore_long + list_carnivore_long)\n total_moving = l.move_animals_from_cell()\n for carn in total_moving[1]:\n assert carn.already_moved is True\n\n\ndef test_reset_already_moved(list_carnivore_long, list_herbivore_long):\n \"\"\"\n Every year we must reset already moved so that they can move again\n \"\"\"\n l = Lowland(list_herbivore_long + list_carnivore_long)\n l.move_animals_from_cell()\n l.reset_already_moved()\n for herb in l.carnivores_pop:\n assert herb.already_moved is False\n\n\n# Tests for counting_animals function\ndef test_count_animals_herb():\n \"\"\"\n We test if the counting_animals function give the same results as len()\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n l.counting_animals()\n assert l.N_herb == len(l.herbivores_pop)\n\n\ndef test_count_animals_carn():\n \"\"\"\n We test if the counting_animals function give the same results as len()\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 41, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n l.counting_animals()\n assert l.N_carn == len(l.carnivores_pop)\n\n\n# Tests for make_animals_age function\ndef test_aging_herb():\n \"\"\"\n The animals age each year\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n init_age = [ani.age for ani in l.herbivores_pop]\n l.make_animals_age()\n for k in range(len(l.herbivores_pop)):\n assert l.herbivores_pop[k].age == init_age[k] + 1\n\n\ndef test_fitness_when_aging_herb():\n \"\"\"\n when an animal age, we must update the fitness\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n init_fitness = [ani.phi for ani in l.herbivores_pop]\n l.make_animals_age()\n for k in range(len(l.herbivores_pop)):\n assert l.herbivores_pop[k].phi < init_fitness[k]\n\n\ndef test_aging_carn():\n \"\"\"\n The animals age each year\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 41, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n init_age = [ani.age for ani in l.carnivores_pop]\n l.make_animals_age()\n for k in range(len(l.carnivores_pop)):\n assert l.carnivores_pop[k].age == init_age[k] + 1\n\n\ndef test_fitness_when_aging_carn():\n \"\"\"\n when an animal age, we must update the fitness\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 41, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n init_fitness = [ani.phi for ani in l.carnivores_pop]\n l.make_animals_age()\n for k in range(len(l.carnivores_pop)):\n assert l.carnivores_pop[k].phi < init_fitness[k]\n\n\n# Tests for make_animals_lose_weight:\ndef test_yearly_weight_loss_herb():\n \"\"\"\n Every year the animal loses weight equivalent to current weight times eta\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n\n init_weight = [animal.weight for animal in l.herbivores_pop]\n l.make_animals_lose_weight()\n for k in range(len(l.herbivores_pop)):\n assert l.herbivores_pop[k].weight == init_weight[k] - Herbivore.p['eta'] * init_weight[k]\n\n\ndef test_update_fitness_during_weight_loss_herb():\n \"\"\"\n Every year the animal loses weight and therefore must update the fitness. It becomes less\n than the initial fitness.\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 35, 'age': 5},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 50, 'age': 9}])\n init_fitness = [animal.phi for animal in l.herbivores_pop]\n l.make_animals_lose_weight()\n for k in range(len(init_fitness)):\n assert l.herbivores_pop[k].phi < init_fitness[k]\n\n\ndef test_yearly_weight_loss_carn():\n \"\"\"\n Every year the animal loses weight equivalent to current weight times eta\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 41, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n\n init_weight = [animal.weight for animal in l.carnivores_pop]\n l.make_animals_lose_weight()\n for k in range(len(l.carnivores_pop)):\n assert l.carnivores_pop[k].weight == init_weight[k] - Carnivore.p['eta'] * init_weight[k]\n\n\ndef test_update_fitness_during_weight_loss_carn():\n \"\"\"\n Every year the animal loses weight and therefore must update the fitness. It becomes less\n than the initial fitness.\n \"\"\"\n l = Lowland(population=[{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 41, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}])\n init_fitness = [animal.phi for animal in l.carnivores_pop]\n l.make_animals_lose_weight()\n for k in range(len(init_fitness)):\n assert l.carnivores_pop[k].phi < init_fitness[k]\n\n\n# Tests for dead_animals_natural_cause:\ndef test_animal_removed_after_death_when_true_herb():\n \"\"\"\n Each year we must check if animals die. If weight = 0 the probability for dying is 0.\n Therefore the length of the list will be less than the initial length.\n \"\"\"\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 5, 'age': 60},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 0, 'age': 9}])\n init_length = len(l.herbivores_pop)\n l.dead_animals_natural_cause()\n assert len(l.herbivores_pop) < init_length\n\n\ndef test_animal_removed_after_death_when_true_carn():\n \"\"\"\n Each year we must check if animals die. If weight = 0 the probability for dying is 0.\n Therefore the length of the list will be less than the initial length.\n \"\"\"\n population = [{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 0, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}]\n l = Lowland(population)\n init_length = len(l.carnivores_pop)\n l.dead_animals_natural_cause()\n assert len(l.carnivores_pop) < init_length\n\n\ndef test_animal_removed_after_death_when_false_herb(mocker):\n \"\"\"\n If the random number is less than the probability for dying, no animal should be removed from\n the list. None of the animals in the list lays grounds for p=1\n \"\"\"\n mocker.patch('random.random', return_value=1)\n l = Lowland(population=[{'species': 'Herbivore', 'weight': 5, 'age': 60},\n {'species': 'Herbivore', 'weight': 41, 'age': 8},\n {'species': 'Herbivore', 'weight': 8, 'age': 9}])\n init_length = len(l.herbivores_pop)\n l.dead_animals_natural_cause()\n assert len(l.herbivores_pop) == init_length\n\n\ndef test_animal_removed_after_death_when_false_carn(mocker):\n \"\"\"\n If the random number is less than the probability for dying, no animal should be removed from\n the list. None of the animals in the list lays grounds for p=1\n \"\"\"\n mocker.patch('random.random', return_value=1)\n population = [{'species': 'Carnivore', 'weight': 35, 'age': 5},\n {'species': 'Carnivore', 'weight': 10, 'age': 8},\n {'species': 'Carnivore', 'weight': 50, 'age': 9}]\n l = Lowland(population)\n init_length = len(l.carnivores_pop)\n l.dead_animals_natural_cause()\n assert len(l.carnivores_pop) == init_length\n", "repo_name": "joridholmen/biosim", "sub_path": "tests/test_landscape.py", "file_name": "test_landscape.py", "file_ext": "py", "file_size_in_byte": 31016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "biosim.Animals.Herbivore.p", "line_number": 18, "usage_type": "attribute"}, {"api_name": "biosim.Animals.Herbivore", "line_number": 18, "usage_type": "name"}, {"api_name": "biosim.Animals.Carnivore.p", "line_number": 19, "usage_type": "attribute"}, {"api_name": "biosim.Animals.Carnivore", "line_number": 19, "usage_type": "name"}, {"api_name": "biosim.Cell.Lowland.af", "line_number": 20, "usage_type": "attribute"}, {"api_name": "biosim.Cell.Lowland", "line_number": 20, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 66, "usage_type": "attribute"}, {"api_name": "biosim.Cell.Lowland", "line_number": 84, "usage_type": "call"}, {"api_name": "biosim.Cell.Highland", "line_number": 92, "usage_type": "call"}, {"api_name": "biosim.Cell.Water", "line_number": 100, "usage_type": "call"}, {"api_name": "biosim.Cell.Desert", "line_number": 108, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 118, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 132, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 150, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 170, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 179, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 190, "usage_type": "call"}, {"api_name": "biosim.Animals.Herbivore.p", "line_number": 191, "usage_type": "attribute"}, {"api_name": "biosim.Animals.Herbivore", "line_number": 191, "usage_type": "name"}, {"api_name": "biosim.Cell.Lowland", "line_number": 201, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 213, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 224, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 237, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 253, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 266, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 290, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 314, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 335, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 349, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 368, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 383, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 395, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 407, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 420, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 432, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 446, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 462, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 474, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 486, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 499, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 511, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 525, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 544, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 556, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 568, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 580, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 592, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 602, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 614, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 625, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 637, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 650, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 663, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 676, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 690, "usage_type": "call"}, {"api_name": "biosim.Animals.Herbivore.p", "line_number": 697, "usage_type": "attribute"}, {"api_name": "biosim.Animals.Herbivore", "line_number": 697, "usage_type": "name"}, {"api_name": "biosim.Cell.Lowland", "line_number": 705, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 718, "usage_type": "call"}, {"api_name": "biosim.Animals.Carnivore.p", "line_number": 725, "usage_type": "attribute"}, {"api_name": "biosim.Animals.Carnivore", "line_number": 725, "usage_type": "name"}, {"api_name": "biosim.Cell.Lowland", "line_number": 733, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 748, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 764, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 776, "usage_type": "call"}, {"api_name": "biosim.Cell.Lowland", "line_number": 793, "usage_type": "call"}]} +{"seq_id": "25848261508", "text": "import usb.core\nimport usb.util\nimport struct, zlib, logging\nfrom const import CommandByte\n\n\nclass MiaoMiao:\n\tstandardKey = 0x35769521\n\tmax_send_msg_length = 2016\n\tmax_recv_msg_length = 1024\n\tpadding_line = 300\n\n\tdef __init__(self):\n\t\tself.crckeyset = False\n\t\tself.connected = True if self.connect() else False\n\n\tdef connect(self):\n\t\tdevice = usb.core.find(idVendor=0x4348, idProduct=0x5584)\n\t\tif device is None:\n\t\t\traise ValueError('Our device is not connected')\n\t\tdevice.set_configuration()\n\t\tself.read_addr = 0x81\n\t\tself.write_addr = 0x02\n\t\tself.device = device\n\t\tself.registerCrcKey()\n\t\treturn True\n\n\tdef disconnect(self):\n\t\t# try:\n\t\t\n\t\t# except:\n\n # pass\n\t\tlogging.info(\"Disconnected.\")\n\n\tdef crc32(self, content):\n\t\treturn zlib.crc32(content, self.crcKey if self.crckeyset else self.standardKey)\n\n\tdef packPerBytes(self, bytes, control_command, i):\n\t\tresult = struct.pack('<BBB', 2, control_command, i)\n\t\tresult += struct.pack('<H', len(bytes))\n\t\tresult += bytes\n\n\t\tcrc32 = self.crc32(bytes)\n\t\tprint(\"crcKey: \",crc32)\n\t\tresult += struct.pack('<I', crc32)\n\t\tresult += struct.pack('<B', 3)\n\n\t\treturn result\n\n\n\tdef addBytesToList(self, bytes):\n\t\tlength = self.max_send_msg_length\n\t\tresult = [bytes[i:i+length] for i in range(0, len(bytes), length)]\n\t\treturn result\n\n\tdef send(self, allbytes, control_command, need_reply=True):\n\t\tprint(\"Control Command: \" + str(control_command))\n\t\tbytes_list = self.addBytesToList(allbytes)\n\t\tfor i, bytes in enumerate(bytes_list):\n\t\t\ttmp = self.packPerBytes(bytes, control_command, i)\n\t\t\tself.sendMsgAllPackage(tmp)\n\t\tif need_reply:\n\t\t\treturn self.recv()\n\n\tdef sendMsgAllPackage(self, msg):\n\t\twrite_len = self.device.write(self.write_addr, msg)\n\t\tif write_len == len(msg):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef recv(self):\n\t\traw_msg = self.device.read(self.read_addr, self.max_recv_msg_length)\n\t\tparsed = self.resultParser(bytes(raw_msg))\n\t\tlogging.info(\"Received %d packets: \" % len(parsed) + \"\".join([str(p) for p in parsed]))\n\t\treturn raw_msg, parsed\n\n\tdef resultParser(self, data):\n\t\tprint(\"Parsing Data: \"+ str(data))\n\t\tprint(\"Data Length: \" + str(len(data)))\n\t\tbase = 0\n\t\tres = []\n\t\twhile base < len(data):\n\t\t\tclass Info(object):\n\t\t\t\tdef __str__(self):\n\t\t\t\t\tstring = \"\\nControl command: %s(%s)\\nPayload length: %d\\nPayload(hex): %s\" % (\n self.command, CommandByte.findCommand(self.command)\n , self.payload_length, bytes(self.payload)\n )\n\t\t\t\t\treturn string\n\t\t\tinfo = Info()\n\t\t\t_, info.command, _, info.payload_length = struct.unpack('<BBBH', data[base:base+5])\n\t\t\tinfo.payload = data[base + 5: base + 5 + info.payload_length]\n\t\t\tinfo.crc32 = data[base + 5 + info.payload_length: base + 9 + info.payload_length]\n\t\t\tbase += 10 + info.payload_length\n\t\t\tres.append(info)\n\n\t\treturn res\n\n\tdef sendPaperType(self, paperType=0):\n\t\tmsg = struct.pack('<B', paperType)\n\t\tself.send(msg, CommandByte.PRT_SET_PAPER_TYPE)\n\n\tdef querySN(self):\n\t\tmsg = struct.pack('<B', 1)\n\t\treturn self.send(msg, CommandByte.PRT_GET_SN)\n\n\tdef sendDensity(self, density):\n\t\tmsg = struct.pack('<B', 1)\n\t\treturn self.send(msg, CommandByte.PRT_SET_HEAT_DENSITY)\n\n\n\tdef queryBatteryStatus(self):\n\t\tmsg = struct.pack('<B', 1)\n\t\treturn self.send(msg, CommandByte.PRT_GET_BAT_STATUS)\n\n\tdef sendImage(self, binary_img):\n\t\tbinary_img = bytes(binary_img,'utf-8')\n\t\tself.sendPaperType()\n\t\tprint(\"Image Length: \" + str(len(binary_img)))\n\t\tmsg = struct.pack(\"<%ds\" % len(binary_img), binary_img)\n\t\tself.send(msg, CommandByte.PRT_PRINT_DATA)\n\t\tself.sendFeedLine(self.padding_line)\n\n\tdef sendFeedLine(self, length):\n\t\tmsg = struct.pack('<H', length)\n\t\tself.send(msg, CommandByte.PRT_FEED_LINE)\n\n\tdef TestPage(self):\n\t\tmsg = struct.pack(\"<B\", 1)\n\t\treturn self.send(msg,CommandByte.PRT_PRINT_TEST_PAGE)\n\n\tdef BatteryStatus(self):\n\t\tmsg = struct.pack(\"<B\", 1)\n\t\treturn self.send(msg,CommandByte.PRT_GET_BAT_STATUS)\n\n\n\tdef registerCrcKey(self, key=0x6968634 ^ 0x2e696d):\n\t\tlogging.info(\"Setting CRC32 key...\")\n\t\tmsg = struct.pack('<I', int(key ^ self.standardKey))\n\t\tret = self.send(msg, CommandByte.PRT_SET_CRC_KEY)\n\t\tself.crcKey = key\n\t\tself.crckeyset = True\n\t\tlogging.info(\"CRC32 key set\")\n\nif __name__ == \"__main__\":\n\tlogging.getLogger().setLevel(logging.INFO)\n\tmm = MiaoMiao()\n\tif mm.connected:\n\t\tmm.sendDensity(100)\n\t\tfrom image_process import TextConverter\n\n\t\timg = TextConverter.text2bmp(\"114514\")\n\t\tmm.sendImage(img)\n\t\tpass\n\n\n\n", "repo_name": "papulicmilan/miaomiaoji-usb-protocol", "sub_path": "print_test.py", "file_name": "print_test.py", "file_ext": "py", "file_size_in_byte": 4409, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "usb.core.core.find", "line_number": 18, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 18, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 18, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 34, "usage_type": "call"}, {"api_name": "zlib.crc32", "line_number": 37, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 40, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 41, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 46, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 76, "usage_type": "call"}, {"api_name": "const.CommandByte.findCommand", "line_number": 88, "usage_type": "call"}, {"api_name": "const.CommandByte", "line_number": 88, "usage_type": "name"}, {"api_name": "struct.unpack", "line_number": 93, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 102, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_SET_PAPER_TYPE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 103, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 106, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_GET_SN", "line_number": 107, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 107, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 110, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_SET_HEAT_DENSITY", "line_number": 111, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 111, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 115, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_GET_BAT_STATUS", "line_number": 116, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 116, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 122, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_PRINT_DATA", "line_number": 123, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 123, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 127, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_FEED_LINE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 128, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 131, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_PRINT_TEST_PAGE", "line_number": 132, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 132, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 135, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_GET_BAT_STATUS", "line_number": 136, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 136, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 140, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 141, "usage_type": "call"}, {"api_name": "const.CommandByte.PRT_SET_CRC_KEY", "line_number": 142, "usage_type": "attribute"}, {"api_name": "const.CommandByte", "line_number": 142, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 145, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 148, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 148, "usage_type": "attribute"}, {"api_name": "image_process.TextConverter.text2bmp", "line_number": 154, "usage_type": "call"}, {"api_name": "image_process.TextConverter", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "9870761717", "text": "#!/usr/bin/env python3\n\n\"\"\"Tests for US_SPP.py\"\"\"\n\nimport logging\nimport unittest\nfrom datetime import datetime\nfrom unittest.mock import patch\n\nfrom arrow import get\nfrom pandas import read_pickle\nfrom testfixtures import LogCapture\n\nfrom parsers import US_SPP\n\n\nclass TestUSSPP(unittest.TestCase):\n \"\"\"Patches in a fake response from the data source to allow consistent testing.\"\"\"\n\n def test_fetch_production(self):\n filename = \"parsers/test/mocks/US_SPP_Gen_Mix.pkl\"\n fake_data = read_pickle(filename)\n\n # Suppress log messages to prevent interfering with test formatting.\n with LogCapture() as log:\n with patch(\"parsers.US_SPP.get_data\") as gd:\n gd.return_value = fake_data\n data = US_SPP.fetch_production(logger=logging.getLogger(\"test\"))\n datapoint = data[-1]\n\n with self.subTest():\n self.assertIsInstance(data, list)\n\n with self.subTest():\n self.assertEqual(len(data), 23)\n\n # Unknown keys must be assigned and summed.\n with self.subTest():\n self.assertEqual(round(datapoint[\"production\"][\"unknown\"], 2), 33.1)\n\n with self.subTest():\n expected_dt = get(datetime(2018, 7, 27, 11, 45), \"UTC\").datetime\n self.assertEqual(datapoint[\"datetime\"], expected_dt)\n\n with self.subTest():\n self.assertEqual(datapoint[\"source\"], \"spp.org\")\n\n with self.subTest():\n self.assertEqual(datapoint[\"zoneKey\"], \"US-SPP\")\n\n with self.subTest():\n self.assertIsInstance(datapoint[\"storage\"], dict)\n\n def test_SPP_logging(self):\n \"\"\"Make sure that new generation types are logged properly.\"\"\"\n\n filename = \"parsers/test/mocks/US_SPP_Gen_Mix.pkl\"\n fake_data = read_pickle(filename)\n\n with LogCapture() as log:\n with patch(\"parsers.US_SPP.get_data\") as gd:\n gd.return_value = fake_data\n data = US_SPP.fetch_production(logger=logging.getLogger(\"test\"))\n log.check(\n (\n \"test\",\n \"WARNING\",\n \"\"\"New column 'Flux Capacitor' present in US-SPP data source.\"\"\",\n )\n )\n\n\nif __name__ == \"__main__\":\n unittest.main(buffer=True)\n", "repo_name": "electricitymaps/electricitymaps-contrib", "sub_path": "parsers/test/test_US_SPP.py", "file_name": "test_US_SPP.py", "file_ext": "py", "file_size_in_byte": 2317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3126, "dataset": "github-code", "pt": "21", "api": [{"api_name": "unittest.TestCase", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 22, "usage_type": "call"}, {"api_name": "testfixtures.LogCapture", "line_number": 25, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 26, "usage_type": "call"}, {"api_name": "parsers.US_SPP.fetch_production", "line_number": 28, "usage_type": "call"}, {"api_name": "parsers.US_SPP", "line_number": 28, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 28, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 58, "usage_type": "call"}, {"api_name": "testfixtures.LogCapture", "line_number": 60, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 61, "usage_type": "call"}, {"api_name": "parsers.US_SPP.fetch_production", "line_number": 63, "usage_type": "call"}, {"api_name": "parsers.US_SPP", "line_number": 63, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 63, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "26990973420", "text": "from django.core.exceptions import ValidationError\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import status\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom gamerraterapi.models import Games, Gamers, Ratings\n\nclass All_Ratings(ViewSet):\n def retrieve(self, request, pk=None):\n \n try:\n rating=Ratings.objects.get(pk=pk)\n serializer=RatingSerializer(rating, context={'request':request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)\n\n def list(self, request):\n ratings=Ratings.objects.all()\n # gamer = Gamers.objects.get(user=request.auth.user)\n\n # game=Games.objects.get(id=ratings.game)\n\n serializer=RatingSerializer(ratings, many=True, context={'request': request})\n\n\n return Response(serializer.data)\n\n\n def create(self, request):\n \n\n # Uses the token passed in the `Authorization` header\n gamer = Gamers.objects.get(user=request.auth.user)\n game = Games.objects.get(pk=request.data[\"gameId\"])\n \n rating = Ratings()\n rating.gamer = gamer\n rating.rating=request.data[\"rating\"]\n rating.game=game\n\n \n try:\n rating.save()\n serializer = RatingSerializer(rating, context={'request': request})\n return Response(serializer.data)\n\n # If anything went wrong, catch the exception and\n # send a response with a 400 status code to tell the\n # client that something was wrong with its request data\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)\n# class EventUserSerializer(serializers.ModelSerializer):\n# \"\"\"JSON serializer for event scheduler's related Django user\"\"\"\n# class Meta:\n# model = User\n# fields = ['first_name', 'last_name', 'email']\n\nclass GameSerializer(serializers.ModelSerializer):\n class Meta:\n model= Games\n fields=(\"id\", \"title\")\n\nclass RatingSerializer(serializers.ModelSerializer):\n game=GameSerializer(many=False)\n class Meta:\n model= Ratings\n fields=(\"id\", \"rating\", \"gamer\", \"game\")\n ", "repo_name": "dwilliams91/GamerRater-Server", "sub_path": "gamerraterapi/views/rating.py", "file_name": "rating.py", "file_ext": "py", "file_size_in_byte": 2419, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "rest_framework.viewsets.ViewSet", "line_number": 12, "usage_type": "name"}, {"api_name": "gamerraterapi.models.Ratings.objects.get", "line_number": 16, "usage_type": "call"}, {"api_name": "gamerraterapi.models.Ratings.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "gamerraterapi.models.Ratings", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 18, "usage_type": "call"}, {"api_name": "django.http.HttpResponseServerError", "line_number": 20, "usage_type": "call"}, {"api_name": "gamerraterapi.models.Ratings.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "gamerraterapi.models.Ratings.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "gamerraterapi.models.Ratings", "line_number": 23, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 31, "usage_type": "call"}, {"api_name": "gamerraterapi.models.Gamers.objects.get", "line_number": 38, "usage_type": "call"}, {"api_name": "gamerraterapi.models.Gamers.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "gamerraterapi.models.Gamers", "line_number": 38, "usage_type": "name"}, {"api_name": "gamerraterapi.models.Games.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "gamerraterapi.models.Games.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "gamerraterapi.models.Games", "line_number": 39, "usage_type": "name"}, {"api_name": "gamerraterapi.models.Ratings", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 55, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 56, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 56, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 63, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 63, "usage_type": "name"}, {"api_name": "gamerraterapi.models.Games", "line_number": 65, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 68, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 68, "usage_type": "name"}, {"api_name": "gamerraterapi.models.Ratings", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "24262598053", "text": "import sys\r\nimport random\r\nfrom collections import deque\r\n\r\n\r\nclass Node:\r\n def __init__(self, state, parent, path_length, dir):\r\n self.parent = parent\r\n self.path_length = path_length\r\n self.state = state\r\n self.dir = dir\r\n\r\n def get_parent(self):\r\n return self.parent\r\n\r\n def get_path_length(self):\r\n return self.path_length\r\n\r\n def get_state(self):\r\n return self.state\r\n\r\n def get_dir(self):\r\n return self.dir\r\n\r\n\r\ndef print_puzzle(state):\r\n count = 1\r\n for a in state:\r\n if count % size == 0:\r\n print(a + \" \")\r\n else:\r\n print(a + \" \", end=\"\", flush=True)\r\n count += 1\r\n\r\n\r\ndef goal_test(state):\r\n return goal == state\r\n\r\n\r\ndef get_children(state):\r\n (x, y) = get_coordinate(state)\r\n move = []\r\n children = []\r\n if (x + 1) < size:\r\n move.append(get_index(x + 1, y))\r\n if (x - 1) >= 0:\r\n move.append(get_index(x - 1, y))\r\n if (y + 1) < size:\r\n move.append(get_index(x, y + 1))\r\n if (y - 1) >= 0:\r\n move.append(get_index(x, y - 1))\r\n for a in range(0, len(move)):\r\n children.append(swap(state, state.index(\"0\"), move[a]))\r\n return children\r\n\r\n\r\ndef get_child_dir(state):\r\n (x, y) = get_coordinate(state)\r\n move = {}\r\n children = {}\r\n if (x + 1) < size:\r\n move[get_index(x + 1, y)] = \"DOWN\"\r\n if (x - 1) >= 0:\r\n move[get_index(x - 1, y)] = \"UP\"\r\n if (y + 1) < size:\r\n move[get_index(x, y + 1)] = \"RIGHT\"\r\n if (y - 1) >= 0:\r\n move[get_index(x, y - 1)] = \"LEFT\"\r\n for key in move:\r\n children[swap(state, state.index(\"0\"), key)] = move[key]\r\n return children\r\n\r\n\r\ndef get_coordinate(state):\r\n index = state.index(\"0\")\r\n if index // size == 0:\r\n x = 0\r\n elif index // size == 1:\r\n x = 1\r\n elif index // size == 2:\r\n x = 2\r\n elif index // size == 3:\r\n x = 3\r\n else:\r\n x = 4\r\n if index % size == 0:\r\n y = 0\r\n elif index % size == 1:\r\n y = 1\r\n elif index % size == 2:\r\n y = 2\r\n elif index % size == 3:\r\n y = 3\r\n else:\r\n y = 4\r\n return x, y\r\n\r\n\r\ndef get_index(x, y):\r\n index = 0\r\n if x == 1:\r\n index += size\r\n elif x == 2:\r\n index += size * 2\r\n elif x == 3:\r\n index += size * 3\r\n elif x == 4:\r\n index += size * 4\r\n if y == 1:\r\n index += 1\r\n elif y == 2:\r\n index += 2\r\n elif y == 3:\r\n index += 3\r\n elif y == 4:\r\n index += 4\r\n return index\r\n\r\n\r\ndef swap(s, a, b):\r\n n = list(s)\r\n temp = n[a]\r\n n[a] = n[b]\r\n n[b] = temp\r\n return ''.join(n)\r\n\r\n\r\ndef bfs_winnable():\r\n children = [goal]\r\n fringe = deque()\r\n fringe.appendleft(goal)\r\n visited = set()\r\n visited.add(goal)\r\n while len(fringe) != 0:\r\n v = fringe.pop()\r\n c = get_children(v)\r\n for a in range(0, len(c)):\r\n if c[a] not in visited:\r\n fringe.appendleft(c[a])\r\n visited.add(c[a])\r\n children.append(c[a])\r\n return children\r\n\r\n\r\ndef bfs_winnable_set():\r\n children = set()\r\n children.add(goal)\r\n fringe = deque()\r\n fringe.appendleft(goal)\r\n visited = set()\r\n visited.add(goal)\r\n while len(fringe) != 0:\r\n v = fringe.pop()\r\n c = get_children(v)\r\n for a in range(0, len(c)):\r\n if c[a] not in visited:\r\n fringe.appendleft(c[a])\r\n visited.add(c[a])\r\n children.add(c[a])\r\n return children\r\n\r\n\r\ndef random_state():\r\n m = []\r\n for i in range(0, size * size):\r\n r = str(random.randint(0, 8))\r\n while r in m:\r\n r = str(random.randint(0, 8))\r\n m.append(str(r))\r\n return ''.join(m)\r\n\r\n\r\ndef random_solvable():\r\n rs = bfs_winnable()\r\n s = random.randint(0, len(rs) - 1)\r\n return rs[s]\r\n\r\n\r\ndef print_path(state):\r\n start = Node(state, None, 0, None)\r\n fringe = deque()\r\n fringe.appendleft(start)\r\n visited = set()\r\n visited.add(start.get_state())\r\n while len(fringe) != 0:\r\n v = fringe.pop()\r\n if goal_test(v.get_state()):\r\n print(v.get_path_length())\r\n parents = list()\r\n p = v\r\n while p is not None:\r\n parents.append(p)\r\n p = p.get_parent()\r\n if parents:\r\n print_puzzle(parents.pop().get_state())\r\n print()\r\n while parents:\r\n t = parents.pop()\r\n print_puzzle(t.get_state())\r\n print(t.get_dir())\r\n print()\r\n break\r\n c = get_child_dir(v.get_state())\r\n temp = []\r\n for e in c:\r\n add = Node(e, v, v.get_path_length() + 1, c[e])\r\n temp.append(add)\r\n for a in range(0, len(c)):\r\n if temp[a].get_state() not in visited:\r\n fringe.appendleft(temp[a])\r\n visited.add(temp[a].get_state())\r\n\r\n\r\ndef path(state):\r\n start = Node(state, None, 0, None)\r\n fringe = deque()\r\n fringe.appendleft(start)\r\n visited = set()\r\n visited.add(start.get_state())\r\n while len(fringe) != 0:\r\n v = fringe.pop()\r\n if goal_test(v.get_state()):\r\n return v.get_path_length()\r\n c = get_child_dir(v.get_state())\r\n temp = []\r\n for e in c:\r\n add = Node(e, v, v.get_path_length() + 1, c[e])\r\n temp.append(add)\r\n for a in range(0, len(c)):\r\n if temp[a].get_state() not in visited:\r\n fringe.appendleft(temp[a])\r\n visited.add(temp[a].get_state())\r\n\r\n\r\ndef random_gen():\r\n r = random.randint(100, 1000)\r\n solve = bfs_winnable_set()\r\n y = []\r\n for a in range(1, r):\r\n b = random_state()\r\n if b in solve:\r\n y.append(path(b))\r\n print(\"Longest Path Length:\" + str(max(y)))\r\n print(\"Average Path Length:\" + str(sum(y)/len(y)))\r\n print(\"Percent Solvable:\" + str(len(y)/r * 100))\r\n\r\n\r\ndef longest_solvable():\r\n fringe = deque()\r\n fringe.appendleft(goal)\r\n visited = set()\r\n last = \"\"\r\n while len(fringe) != 0:\r\n v = fringe.pop()\r\n for a in get_children(v):\r\n if a not in visited:\r\n fringe.appendleft(a)\r\n visited.add(a)\r\n last = a\r\n return last\r\n\r\n\r\ndef print_path_dfs(state):\r\n start = Node(state, None, 0, None)\r\n fringe = list()\r\n fringe.append(start)\r\n visited = set()\r\n visited.add(start.get_state())\r\n while len(fringe) != 0:\r\n v = fringe.pop()\r\n visited.add(v)\r\n if goal_test(v.get_state()):\r\n print(v.get_path_length())\r\n parents = list()\r\n p = v\r\n while p is not None:\r\n parents.append(p)\r\n p = p.get_parent()\r\n if parents:\r\n print_puzzle(parents.pop().get_state())\r\n print()\r\n while parents:\r\n t = parents.pop()\r\n print_puzzle(t.get_state())\r\n print(t.get_dir())\r\n print()\r\n break\r\n c = get_child_dir(v.get_state())\r\n temp = []\r\n for e in c:\r\n add = Node(e, v, v.get_path_length() + 1, c[e])\r\n temp.append(add)\r\n for a in range(0, len(temp)):\r\n if temp[a].get_state() not in visited:\r\n fringe.append(temp[a])\r\n visited.add(temp[a].get_state())\r\n\r\n\r\ndef moves(moves_away):\r\n start = Node(goal, None, 0, None)\r\n fringe = deque()\r\n fringe.appendleft(start)\r\n visited = set()\r\n visited.add(start.get_state())\r\n count = 0\r\n while len(fringe) != 0:\r\n v = fringe.pop()\r\n if v.get_path_length() == moves_away:\r\n count += 1\r\n c = get_child_dir(v.get_state())\r\n temp = []\r\n for e in c:\r\n add = Node(e, v, v.get_path_length() + 1, c[e])\r\n temp.append(add)\r\n for a in range(0, len(c)):\r\n if temp[a].get_state() not in visited:\r\n fringe.appendleft(temp[a])\r\n visited.add(temp[a].get_state())\r\n return count\r\n\r\n\r\ndef parity_check(state):\r\n if state == goal:\r\n return 1\r\n out_of_order = 0\r\n temp = state[:state.index(\"0\")] + state[state.index(\"0\") + 1:]\r\n for a in range(0, len(temp)):\r\n for b in range(a, len(temp)):\r\n if temp[b] < temp[a]:\r\n out_of_order += 1\r\n if size % 2 == 1:\r\n if out_of_order % 2 == 0:\r\n return 1\r\n else:\r\n return 0\r\n else:\r\n (x, y) = get_coordinate(state)\r\n if x % 2 == 0:\r\n if out_of_order % 2 == 1:\r\n return 0\r\n else:\r\n return 1\r\n else:\r\n if out_of_order % 2 == 0:\r\n return 0\r\n else:\r\n return 1\r\n\r\n\r\nsize = 3\r\ngoal = \"012345678\"\r\nprint_path(\"806547231\")\r\n", "repo_name": "realshiruizhou/8-Puzzle", "sub_path": "8Puzzle/8puzzle.py", "file_name": "8puzzle.py", "file_ext": "py", "file_size_in_byte": 9034, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "collections.deque", "line_number": 130, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 148, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 166, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 168, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 175, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 181, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 216, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 236, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 249, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 301, "usage_type": "call"}]} +{"seq_id": "74012402293", "text": "import xml.etree.ElementTree as ET\nfrom jinja2 import Environment, FileSystemLoader\nimport os\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n '--pyreport',\n action='store_true',\n help='generate an HTML test report'\n )\n\n\ndef pytest_sessionfinish(session):\n if session.config.getoption('--pyreport'):\n try:\n with open('result.xml', 'r'):\n pass\n except FileNotFoundError:\n print('The results file does not exist.')\n return\n\n tree = ET.parse('result.xml')\n root = tree.getroot()\n\n testsuite = root.find('testsuite')\n total_time = float(testsuite.get('time'))\n\n test_cases = []\n for testcase in testsuite.iter('testcase'):\n test_case = {\n 'name': testcase.get('name'),\n 'result': 'pass',\n 'details': '',\n 'time': float(testcase.get('time'))\n }\n failure = testcase.find('failure')\n if failure is not None:\n test_case['result'] = 'fail'\n test_case['details'] = failure.text\n skipped = testcase.find('skipped')\n if skipped is not None:\n test_case['result'] = 'skip'\n test_case['details'] = skipped.text\n test_cases.append(test_case)\n\n num_tests = len(test_cases)\n num_failures = len([test_case for test_case in test_cases if test_case['result'] == 'fail'])\n num_skipped = len([test_case for test_case in test_cases if test_case['result'] == 'skip'])\n\n package_dir = os.path.dirname(os.path.abspath(__file__))\n templates_dir = os.path.join(package_dir, 'templates')\n env = Environment(loader=FileSystemLoader(templates_dir))\n template = env.get_template('template.html')\n\n html_output = template.render(test_cases=test_cases, num_tests=num_tests, num_failures=num_failures,\n num_skipped=num_skipped, total_time=total_time)\n\n with open('pyreport.html', 'w') as f:\n f.write(html_output)\n", "repo_name": "ToghrulMirzayev/pytest-pyreport", "sub_path": "src/pytest_pyreport/plugin.py", "file_name": "plugin.py", "file_ext": "py", "file_size_in_byte": 2122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 23, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "jinja2.Environment", "line_number": 53, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "32319906630", "text": "from collections import deque\n\n\ndef out_dic(prere,output_dic,degree_dic):\n for edge in prere:\n if edge[1] not in output_dic:\n output_dic[edge[1]]=[]\n output_dic[edge[1]].append(edge[0])\n if edge[0] not in degree_dic:\n degree_dic[edge[0]]=1\n else:\n degree_dic[edge[0]]+=1\n if edge[1] not in degree_dic:\n degree_dic[edge[1]] = 0\n\n\n\ndef canFinish(numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n out_edges={}\n degree_dic={}\n out_dic(prerequisites,out_edges,degree_dic)\n que=deque()\n for i in range(0,numCourses):\n if i not in degree_dic or degree_dic[i]==0:\n que.append(i)\n if len(que)==0:\n return False\n cal_num=len(que)\n while len(que)!=0:\n prere=que.popleft()\n if prere in out_edges:\n for next in out_edges[prere]:\n degree_dic[next]-=1\n if degree_dic[next]==0:\n que.append(next)\n cal_num+=1\n if cal_num!=numCourses:\n return False\n return True\n\nprint(canFinish(3,[[1,0]]))", "repo_name": "hanzmyco/ST", "sub_path": "Alibaba/Course Schedule.py", "file_name": "Course Schedule.py", "file_ext": "py", "file_size_in_byte": 1188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "collections.deque", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "71204366452", "text": "import discord\nfrom discord.ext import commands\nimport json\n\ndef main():\n with open('cfg.json') as config:\n try:\n data = json.load(config)\n except:\n print('cfg.json failed to load. Bot cannot start.')\n return\n\n token = data['discord_token']\n prefix = data['command_prefix']\n bot = commands.Bot(command_prefix=prefix)\n\n extensions = data['extensions']\n for extension in extensions:\n try:\n bot.load_extension(f'Extensions.{extension}.{extension}')\n except Exception as e:\n print(f'{extension} extension failed to load. Exception: {e}')\n\n bot.run(token)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "GordonJakeKole/KommBot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 694, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "json.load", "line_number": 8, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "41105177633", "text": "from numpy import loadtxt\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n##import data\n# load the dataset\ndataset = loadtxt('/Users/mayyaral-atari/Desktop/work/uni/msci598/assignments/testing/a4Test/pima-indians-diabetes.data.csv', delimiter=',')\n# split into input (X) and output (y) variables\nX = dataset[:,0:8]\ny = dataset[:,8]\nprint(\"parsed file\")\n# define the keras model\nmodel = Sequential()\nmodel.add(Dense(12, input_dim=8, activation='relu'))\nmodel.add(Dense(8, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\nprint(\"built model\")\n\n# compile the keras model\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(\"using binary_crossentropy as the lose fcn\")\n\n# fit the keras model on the dataset\nmodel.fit(X, y, epochs=150, batch_size=10)\nprint(\"model trainging\")\n\n# evaluate the keras model\n_, accuracy = model.evaluate(X, y)\nprint('Accuracy: %.2f' % (accuracy*100))", "repo_name": "MayyarAA/msci-nlp-w22", "sub_path": "testing/a4Test/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 944, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.loadtxt", "line_number": 7, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "37224112708", "text": "# [*.dat] : https://archive.ics.uci.edu/ml/machine-learning-databases/00224/\n# Part4. [실습10] 제조 공정 내 가스 혼합물의 개별 가스 성분 분류\nfrom matplotlib import pyplot as plt\n\nplt.rc('font', family='AppleGothic')\n# ## 01. 데이터 소개 및 분석프로세스 수립\n# : \"강의자료 → Ch11. [실습9] 가스 터빈 추진 플랜트 제조 공정의 부식 예측\" 참고\n# 02. 데이터 준비를 위한 EDA 및 전처리\n\n# ### 0. 데이터 불러오기\n\n############################################## 00. 필요한 파이썬 라이브러리 불러오기 #####################################################\nimport os\nimport time\nimport glob\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport scipy.stats\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d import proj3d\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import StandardScaler\n\n# 데이터 파일을 얻어오기 위한 처리\npath = '/Users/jiyoonkim/Documents/da_study/comFiles/Dataset'\nall_files = glob.glob(os.path.join(path, \"*.dat\"))\n\ndf_from_each_file = (pd.read_csv(f, sep=\"\\s+\", index_col=0, header=None) for f in all_files)\ndf_from_each_file\ndf = pd.concat(df_from_each_file, sort=True)\n\n# 데이터 모양의 변경 처리\n# 각 셀마다 feature 와 value 를 나눠준다 (예 --> 1:15596.16 ---> 15596.16)\nfor col in df.columns.values:\n df[col] = df[col].apply(lambda x: float(str(x).split(':')[1]))\n\ndf = df.rename_axis('Gas').reset_index()\ndf.sort_values(by=['Gas'], inplace=True)\ndf.reset_index(drop=True, inplace=True)\n# 1. 데이터 탐색\n# 1) Basic\n# 가스는 1의 6까지의 종류로 되어있음\ndf.Gas.nunique()\ndf.head()\ndf.shape\n# 2) 데이터 타입\n# object 컬럼 제외 -> Gas 컬럼\npd.unique(df.dtypes), len(df.select_dtypes(exclude='object').columns) - 1\n# 3) 데이터 통계값\ndf.describe()\n# 2. Feature 정의\n# 1) Target Feature\n\n# 1. Ethanol\n# 2. Ethylene\n# 3. Ammonia\n# 4. Acetaldehyde\n# 5. Acetone\n# 6. Toluene\nsns.countplot(df.Gas)\nsns.set(style=\"darkgrid\")\nplt.title('Gas Count')\nplt.show()\n\n# 분포를 그려봄\nsns.distplot(df.Gas)\nplt.xlim(1, 6)\nplt.title('Distribution of Gas')\nplt.show()\n\n# 2) 농도가 다른 컬럼 확인\nconc = df.iloc[:, 1]\n# 농도가 다른 것들을 비교하기 위해 처리\nconc_red = conc.apply(lambda x: x / 10000)\n\nfig = plt.figure(figsize=(22, 5))\nfig.add_subplot(121)\nsns.distplot(conc_red)\nplt.title('Distribution of Concentrations')\nplt.xlabel('Gas concentration Levels')\n\nfig.add_subplot(122)\nsns.boxplot(conc_red)\nplt.title('Concentration')\nplt.xlabel('Gas concentration Levels')\n\nplt.show()\n\n# #### 3) 데이터 확인\nattr = df.iloc[:, 2:].copy()\nattr.head()\n\n# #### 4) 상관도 분석\n# 상관계수 계산\ncorrelation = df.corr()\n\n# Heatmap 그리기\nf, ax = plt.subplots(figsize=(20, 10))\nplt.title('Correlations in dataset', size=20)\nsns.heatmap(correlation)\nplt.show()\n\n# 상관계수 상위 20개 (양수, 음수)\n# 상관계수 정렬\nconc_corr = correlation.iloc[:, 1].sort_values(ascending=False)\n\n# 상위 20개 (양수)\nconc_corr[1:].head(20)\n\n# 상위 20개 (음수)\nconc_corr[1:].tail(20)\n\n# 5) 상관도 기준 관계 그래프\nfig = plt.figure(figsize=(20, 50))\nfor i in range(0, 20):\n fig.add_subplot(10, 2, i + 1)\n sns.scatterplot(attr.iloc[:, conc_corr.index[i]], conc_red, hue=\"Gas\", palette=\"Set1\", data=df, legend=\"full\")\n plt.xlabel(conc_corr.index[i])\n plt.ylabel(\"Gas Concentration\")\n\nfig.tight_layout()\nplt.show()\n\n# PCA 적용\n# 1) 데이터 준비\ndf_copy = df.copy()\n\nX = df_copy.iloc[:, 1:]\n# 각 가스들을 나타내는 숫자\ny = df_copy.iloc[:, 0]\ny.head()\nX.head()\n\n# 2) 테스트 모델 생성 주성분 3개로 표현\npca = PCA(n_components=3)\nX_train = pca.fit_transform(X)\n# 3) 그래프\n# 평면 그래프 -> 3차원 그래프\nfig = plt.figure(figsize=(8, 8))\nax = fig.add_subplot(111, projection='3d')\nplt.rcParams['legend.fontsize'] = 11\n# 가스별로 그려준다\nax.plot(X_train[0:2564, 0], X_train[0:2564, 1], X_train[0:2564, 2], 'o', markersize=2.5, label='Ethanol')\nax.plot(X_train[2565:5490, 0], X_train[2565:5490, 1], X_train[2565:5490, 2], 'o', markersize=2.5, label='Ethylene')\nax.plot(X_train[5491:7131, 0], X_train[5491:7131, 1], X_train[5491:7131, 2], 'o', markersize=2.5, label='Ammonia')\nax.plot(X_train[7132:9067, 0], X_train[7132:9067, 1], X_train[7132:9067, 2], 'o', markersize=2.5, label='Acetaldehyde')\nax.plot(X_train[9068:12076, 0], X_train[9068:12076, 1], X_train[9068:12076, 2], 'o', markersize=2.5, label='Acetone')\nax.plot(X_train[12077:13909, 0], X_train[12077:13909, 1], X_train[12077:13909, 2], 'o', markersize=2.5, label='Toluene')\nax.set_xlabel('PC1')\nax.set_ylabel('PC2')\nax.set_zlabel('PC3')\nax.legend(loc='upper right')\n\nplt.show()\n\n# t-SNE 그래프\n# n_iter 최소 250개이상 -> 3000개\ntsne = TSNE(n_components=3, n_iter=250)\nxtrain = tsne.fit_transform(X)\n\n# 각각의 평명산 표현하는 점을 보기 위한것임\nfig = plt.figure(figsize=(8, 8))\nax = fig.add_subplot(111, projection='3d')\nplt.rcParams['legend.fontsize'] = 11\nax.plot(xtrain[0:2564, 0], xtrain[0:2564, 1], xtrain[0:2564, 2], 'o', markersize=2.5, label='Ethanol')\nax.plot(xtrain[2565:5490, 0], xtrain[2565:5490, 1], xtrain[2565:5490, 2], 'o', markersize=2.5, label='Ethylene')\nax.plot(xtrain[5491:7131, 0], xtrain[5491:7131, 1], xtrain[5491:7131, 2], 'o', markersize=2.5, label='Ammonia')\nax.plot(xtrain[7132:9067, 0], xtrain[7132:9067, 1], xtrain[7132:9067, 2], 'o', markersize=2.5, label='Acetaldehyde')\nax.plot(xtrain[9068:12076, 0], xtrain[9068:12076, 1], xtrain[9068:12076, 2], 'o', markersize=2.5, label='Acetone')\nax.plot(xtrain[12077:13909, 0], xtrain[12077:13909, 1], xtrain[12077:13909, 2], 'o', markersize=2.5, label='Toluene')\nax.set_xlabel('PC1')\nax.set_ylabel('PC2')\nax.set_zlabel('PC3')\nax.legend(loc='upper right')\nplt.show()\n\n# 4) Scailing\nX_scaled = X.copy()\nX_scaled = StandardScaler().fit(X_scaled).transform(X_scaled)\n\n# 5) PCA Step 1 - Covariance Matrix 만들기\ncov_matrix = np.cov(X_scaled.T)\n\n# 6) PCA Step 2 - Eigen Values 와 Eigen Vector 만들기\neig_val, eig_vec = np.linalg.eig(cov_matrix)\nprint('Eigenvectors \\n%s' % eig_vec)\nprint('\\nEigenvalues \\n%s' % eig_val)\n\ntot = sum(eig_val)\nvar_exp = [(i / tot) * 100 for i in sorted(eig_val, reverse=True)]\ncum_var_exp = np.cumsum(var_exp)\nprint(\"누적 분산 설명력\", cum_var_exp)\n\nplt.plot(var_exp)\nplt.show()\n\n# %로 보면됨\nplt.figure(figsize=(20, 4))\nplt.bar(range(128), var_exp)\nplt.ylabel('Explained variance ratio')\nplt.xlabel('Principal components')\nplt.xlim(0, 20)\nplt.xticks(range(-1, 20))\nplt.tight_layout()\nplt.show()\n\n# 7) Scikit-learn 으로 PCA 적용하기\npca = PCA()\nX_scaled = pca.fit_transform(X_scaled)\n\nplt.figure(figsize=(10, 4))\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlim(0, 27)\nplt.xticks(range(0, 27))\nplt.title('Cumulative variance of principle components')\nplt.xlabel('Number of components')\nplt.ylabel('Cumulative explained variance')\nplt.tight_layout()\nplt.show()\n\nprint(pca.explained_variance_ratio_)\n\n# -> 12개의 성분만으로 약 95%의 설명력을 가질 수 있음\n# 다양한 Classifier 를 활용한 가스 성분 분류 모델 생성\n# 모델링 준비\n# Label Encoding\nfrom sklearn.preprocessing import label_binarize\n\ny_ohe = label_binarize(y, classes=[1, 2, 3, 4, 5, 6])\nn_classes = y_ohe.shape[1]\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)\n\ny_train_nobinary = y_train.copy()\ny_test_nobinary = y_test.copy()\n\n# Scailing\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\ny_train = label_binarize(y_train, classes=[1, 2, 3, 4, 5, 6])\ny_test = label_binarize(y_test, classes=[1, 2, 3, 4, 5, 6])\n\n\n# ROC Curve Function 생성 -> 여러가지로 사용할 수 있도록 함수로 표현\ndef plot_roc(y_test, y_pred, title):\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_pred.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n # Compute macro-average ROC curve and ROC area\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n lw = 2\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # Plot all ROC curves\n plt.figure()\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.3f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.3f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n plt.xlim([-0.01, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\n# Confusion Plot Function 생성\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=4)\n\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport itertools\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\n# 기본 모델링\n# Random Forest\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\n\nclf = RandomForestClassifier()\nrandom = clf.fit(X_train, y_train)\n\ny_pred = random.predict(X_test)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('cross validation acc :', cross_val_score(random, X_test, y_test).mean())\n\n# > Confusion Matrix\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\nplt.figure()\nclass_names = ['1', '2', '3', '4', '5', '6']\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\nplt.show()\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Confusion matrix, with normalization')\nplt.legend(['train', 'test'], loc='lower right')\nplt.show()\n\n# Ada Boost\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import cross_val_score\n\nada = AdaBoostClassifier(n_estimators=10)\nada = ada.fit(X_train, y_train)\n\ny_pred = ada.predict(X_test)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\n\n# > Confusion Matrix\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\nplt.figure()\nclass_names = ['1', '2', '3', '4', '5', '6']\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\nplt.show()\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Confusion matrix, with normalization')\nplt.legend(['train', 'test'], loc='lower right')\nplt.show()\n\n# Bagging with KNN\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import BaggingClassifier\n\nbagging = BaggingClassifier(KNeighborsClassifier(n_neighbors=14),\n max_samples=0.5, max_features=0.5)\n\nbagging = bagging.fit(X_train, y_train)\n\ny_pred = bagging.predict(X_test)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('cross validation acc :', cross_val_score(bagging, X_test, y_test).mean())\n# > Confusion Matrix\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nclass_names = ['1', '2', '3', '4', '5', '6']\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\nplt.show()\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Confusion matrix, with normalization')\nplt.legend(['train', 'test'], loc='lower right')\nplt.show()\n\n# Decision Tree\nfrom sklearn.tree import DecisionTreeClassifier\n\nclf = DecisionTreeClassifier()\ntree = clf.fit(X_train, y_train)\n\ny_pred = tree.predict(X_test)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('cross validation acc :', cross_val_score(tree, X_test, y_test).mean())\n# > Confusion Matrix\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nclass_names = ['1', '2', '3', '4', '5', '6']\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Confusion matrix, with normalization')\nplt.legend(['train', 'test'], loc='lower right')\nplt.show()\n\n# Majority Voting Ensemble Machine\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import VotingClassifier\n\nclf1 = LogisticRegression()\nclf2 = DecisionTreeClassifier()\nclf3 = LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,\n intercept_scaling=1, loss='squared_hinge', max_iter=1000,\n multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,\n verbose=0)\nclf4 = KNeighborsClassifier(n_neighbors=30)\neclf = VotingClassifier(estimators=[('lr', clf1), ('dt', clf2), ('svm', clf3), ('knn', clf4)], voting='hard')\neclf = eclf.fit(X_train, y_train)\ny_pred = eclf.predict(X_test.values)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('ground truth :', y_test)\nprint('predicted class :', y_pred)\nprint('cross validation acc :', cross_val_score(eclf, X_test, y_test).mean())\n\n# -> Confusion Matrix\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nclass_names = ['1', '2', '3', '4', '5', '6']\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\nplt.show()\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Confusion matrix, with normalization')\nplt.legend(['train', 'test'], loc='lower right')\nplt.show()\n\n# Logistic Regression\n# OneVsRestClassifier 는 클래스마다 분류기를 하나씩 만들어서 학습시키는 Classifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nfrom scipy import interp\nfrom itertools import cycle\n\nstart = time.time()\n\n# Learn to predict each class against the other\nclassifier = OneVsRestClassifier(LogisticRegression(solver='sag', n_jobs=-1))\nclassifier.fit(X_train, y_train)\ny_pred1 = classifier.predict_proba(X_test)\n\nend = time.time()\nprint(\"\\nTime taken: {:.2f} seconds\".format(end - start))\n\n# > Confusion Matrix\nconfusion_matrix = metrics.confusion_matrix(y_test.astype(int).tolist(), y_pred1.argmax(axis=1))\nconfusion_matrix\n\n# auc_roc = metrics.classification_report(np.argmax(y_test, axis=1), np.argmax(y_pred1, axis=1))\nauc_roc = metrics.classification_report(y_test.astype(int).tolist(), np.argmax(y_pred1, axis=1))\n\nprint('Logistic Regression Classification Report:\\n {}'.format(auc_roc))\n# plot_roc(y_test, y_pred1, \"ROC Logistic Regression\")\n\n# SVC\n\nfrom sklearn.svm import SVC\n\nstart = time.time()\nclassifier = OneVsRestClassifier(SVC(kernel=\"linear\", verbose=1, decision_function_shape='ovr', probability=True))\nclassifier.fit(X_train, y_train)\ny_pred2 = classifier.predict_proba(X_test)\n\nend = time.time()\nprint(\"\\nTime taken: {:.2f} seconds\".format(end - start))\n\n# > Confusion Matrix\n\n# confusion_matrix = metrics.confusion_matrix(y_test.argmax(axis=1), y_pred2.argmax(axis=1))\nconfusion_matrix = metrics.confusion_matrix(y_test.astype(int).tolist(), y_pred2.argmax(axis=1))\nconfusion_matrix\n\n# auc_roc = metrics.classification_report(np.argmax(y_test, axis=1), np.argmax(y_pred2, axis=1))\nauc_roc = metrics.classification_report(y_test.astype(int).tolist(), np.argmax(y_pred2, axis=1))\nprint('SVC Classification Report:\\n {}'.format(auc_roc))\n\n# **We will use the macro average method to evaluate the algorithm.**\n\n# Feature Selction 을 활용한 모델링\n\n# * ROC(Receiver Operating Characteristic) : 모든 임계값에서 분류 모델의 성능을 보여주는 그래프\n# * AUC(Area Under the Curve) : ROC 곡선 아래 영역을 의미함\n\n# Logistic regression with RFE\n\nfrom sklearn.feature_selection import RFE\n\nstart = time.time()\n\nclassifier = OneVsRestClassifier(LogisticRegression(solver='sag', n_jobs=-1))\nrfe = RFE(classifier, n_features_to_select=64, verbose=1, step=1)\nrfe = rfe.fit(X_train, y_train_nobinary)\n\nend = time.time()\nprint(\"\\nTime taken: {:.2f} seconds\".format(end - start))\n\n# RFE 알고리즘에 의해 선택된 Feature 리스트\nrfe\nfeatures = X.columns[rfe.support_]\nprint(features)\nX_train_rfe = pd.DataFrame(X_train)[features]\nX_test_rfe = pd.DataFrame(X_test)[features]\n\nclassifier = OneVsRestClassifier(LogisticRegression(solver='sag', n_jobs=-1))\nclassifier.fit(X_train_rfe, y_train)\ny_pred11 = classifier.predict_proba(X_test_rfe)\n\n# > Confusion Matrix\n\nconfusion_matrix = metrics.confusion_matrix(y_test.argmax(axis=1), y_pred11.argmax(axis=1))\nconfusion_matrix\n\nauc_roc = metrics.classification_report(np.argmax(y_test, axis=1), np.argmax(y_pred11, axis=1))\nprint('Logistic regression with Recursive Feature Elimination:\\n {}'.format(auc_roc))\n\nplot_roc(y_test, y_pred11, 'ROC for Logistic regression with Recursive Feature Elimination')\n\n# #### Logistic regression with SelectKBest (Chi Square test)\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.feature_selection import chi2, SelectKBest\n\n# MinMaxScaler 적용\nnorm = MinMaxScaler()\nX_train_norm = norm.fit_transform(X_train)\n\nselector = SelectKBest(chi2, k=64)\nselector.fit(X_train_norm, y_train)\nX_train_kbest = selector.transform(X_train)\nX_test_kbest = selector.transform(X_test)\n\nclassifier = OneVsRestClassifier(LogisticRegression(solver='sag', n_jobs=-1))\nclassifier.fit(X_train_kbest, y_train)\ny_pred12 = classifier.predict_proba(X_test_kbest)\n\n# > Confusion Matrix\n# confusion_matrix = metrics.confusion_matrix(y_test.argmax(axis=1), y_pred12.argmax(axis=1))\nconfusion_matrix = metrics.confusion_matrix(y_test.astype(int).tolist(), y_pred12.argmax(axis=1))\nconfusion_matrix\n\n# auc_roc = metrics.classification_report(np.argmax(y_test, axis=1), np.argmax(y_pred12, axis=1))\nauc_roc = metrics.classification_report(y_test.astype(int).tolist(), np.argmax(y_pred12, axis=1))\nprint('Logistic regression with chi2 test feature selection:\\n {}'.format(auc_roc))\n\nplot_roc(y_test, y_pred12, 'ROC Logistic regression with chi2 test')\n\n# #### SVC with RFE\n\nclassifier = OneVsRestClassifier(SVC(kernel=\"linear\", decision_function_shape='ovr'))\nrfe = RFE(classifier, n_features_to_select=64, verbose=1, step=1)\nrfe = rfe.fit(X_train, y_train_nobinary)\n\nfeatures = pd.DataFrame(X_train).columns[rfe.support_]\nprint(features)\nX_train_rfe = pd.DataFrame(X_train)[features]\nX_test_rfe = pd.DataFrame(X_test)[features]\n\nclassifier = OneVsRestClassifier(SVC(kernel=\"linear\", probability=True, verbose=1, decision_function_shape='ovr'))\nclassifier.fit(X_train_rfe, y_train)\ny_pred21 = classifier.predict_proba(X_test_rfe)\n\n# confusion_matrix = metrics.confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred21, axis=1))\nconfusion_matrix = metrics.confusion_matrix(y_test.astype(int).tolist(), y_pred12.argmax(axis=1))\n\n# confusion_matrix = metrics.confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred21, axis=1))\nconfusion_matrix = metrics.confusion_matrix(y_test.astype(int).tolist(), np.argmax(y_pred21, axis=1))\nconfusion_matrix\n\nauc_roc = metrics.classification_report(np.argmax(y_test, axis=1), np.argmax(y_pred21, axis=1))\nprint('SVC with Recursive Feature Elimination:\\n {}'.format(auc_roc))\n\nplot_roc(y_test, y_pred21, 'ROC for SVC with Recursive Feature Elimination')\n\n# #### SVC with SelectKBest(chi2 test)\n\nclassifier = OneVsRestClassifier(SVC(kernel=\"linear\", probability=True, verbose=1, decision_function_shape='ovr'))\nclassifier.fit(X_train_kbest, y_train)\ny_pred22 = classifier.predict_proba(X_test_kbest)\n\n# confusion_matrix = metrics.confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred22, axis=1))\n\nconfusion_matrix = metrics.confusion_matrix(y_test.astype(int).tolist(), y_pred22.argmax(axis=1))\nconfusion_matrix\n\nauc_roc = metrics.classification_report(y_test.astype(int).tolist(), np.argmax(y_pred22, axis=1))\n# auc_roc = metrics.classification_report(np.argmax(y_test, axis=1), np.argmax(y_pred22, axis=1))\nprint('SVC with chi2 test feature selection:\\n {}'.format(auc_roc))\n\nplot_roc(y_test, y_pred22, 'ROC for SVC with feature selection based on chi2 test')\n", "repo_name": "badjiyoon/da_study", "sub_path": "KimJiYoon/Part5/CH12/CH_12.py", "file_name": "CH_12.py", "file_ext": "py", "file_size_in_byte": 23420, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "matplotlib.pyplot.rc", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.unique", "line_number": 53, "usage_type": "call"}, {"api_name": "seaborn.countplot", "line_number": 65, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 146, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 169, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.cumsum", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "numpy.cumsum", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.label_binarize", "line_number": 234, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 239, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 245, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.label_binarize", "line_number": 248, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.label_binarize", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 315, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 321, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 329, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 329, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 330, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 333, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 338, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 338, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 343, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 352, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 360, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 366, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 366, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 374, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 374, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 381, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 392, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 394, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 394, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 398, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 398, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 399, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 399, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 402, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 402, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 403, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 403, "usage_type": "name"}, {"api_name": "sklearn.ensemble.BaggingClassifier", "line_number": 409, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 409, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 419, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 423, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 426, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 426, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 430, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 430, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 432, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 432, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 435, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 435, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 436, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 436, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 441, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 449, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 452, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 456, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 456, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 460, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 460, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 463, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 463, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 466, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 466, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 467, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 467, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 476, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 477, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 478, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 482, "usage_type": "call"}, {"api_name": "sklearn.ensemble.VotingClassifier", "line_number": 483, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 490, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 498, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 502, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 502, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 504, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 504, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 507, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 507, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 508, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 508, "usage_type": "name"}, {"api_name": "time.time", "line_number": 520, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 523, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 523, "usage_type": "call"}, {"api_name": "time.time", "line_number": 527, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 531, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 531, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 532, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 535, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 535, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 535, "usage_type": "call"}, {"api_name": "time.time", "line_number": 544, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 545, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 545, "usage_type": "call"}, {"api_name": "time.time", "line_number": 549, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 555, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 555, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 556, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 559, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 559, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 559, "usage_type": "call"}, {"api_name": "time.time", "line_number": 573, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 575, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 575, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 576, "usage_type": "call"}, {"api_name": "time.time", "line_number": 579, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 586, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 587, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 589, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 589, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 595, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 595, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 596, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 598, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 598, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 598, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 609, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 612, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.chi2", "line_number": 612, "usage_type": "argument"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 617, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 617, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 623, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 623, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 624, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 627, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 627, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 627, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 634, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 634, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 635, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 638, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 640, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 641, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 643, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 643, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 648, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 648, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 651, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 651, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 651, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 652, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 654, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 654, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 654, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 661, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 661, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 667, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 667, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 668, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 670, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 670, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 670, "usage_type": "call"}]} +{"seq_id": "35689938541", "text": "from utils import clearScreen, printMenu\nfrom gui import StudentGUI, SubjectGUI, ScoreGUI\nfrom tabulate import tabulate\n\nclass MainGUI:\n def __init__(self):\n self.__stGUI = StudentGUI()\n self.__sbjGUI = SubjectGUI()\n self.__scGUI = ScoreGUI()\n\n def mainMenuScreen(self):\n \n # Xoá trắng màn hình\n clearScreen()\n\n print('*** MENU CHƯƠNG TRÌNH QUẢN LÝ ĐIỂM THI ***')\n funcs = [\n '[1] Quản lý Học viên',\n '[2] Quản lý Môn học',\n '[3] Quản lý Điểm thi',\n '[4] IN KET QUA',\n '[0] Thoát'\n ]\n printMenu(funcs)\n\n cmd = ''\n while cmd not in ['1', '2', '3', '4','0']:\n cmd = input('Chọn chức năng: ')\n\n if cmd == '1':\n # Hiển thị màn hình QL Học viên\n self.studentMenuScreen()\n elif cmd == '2':\n # Hiển thị màn hình QL Môn học\n self.subjectMenuScreen()\n elif cmd == '3':\n # Hiển thị màn hình QL Điểm thi\n self.scoreMenuScreen()\n elif cmd == '4':\n self.printTotal()\n elif cmd == '0':\n exit()\n\n\n def studentMenuScreen(self):\n clearScreen()\n\n print('*** QUẢN LÝ HỌC VIÊN ***')\n funcs = [\n '[1] Thêm',\n '[2] Sửa',\n '[3] Xoá',\n '[4] Tìm kiếm',\n '[0] Quay lại'\n ]\n printMenu(funcs)\n\n cmd = ''\n while cmd not in ['1', '2', '3', '4', '0']:\n cmd = input('Chọn chức năng: ')\n\n if cmd == '1':\n self.__stGUI.insertStudentScreen()\n # Nhập xong thì quay lại màn hình studentMenuScreen\n self.studentMenuScreen()\n elif cmd == '2':\n self.__stGUI.updateStudentScreen()\n # Sửa xong thì quay lại màn hình studentMenuScreen\n self.studentMenuScreen()\n elif cmd == '3':\n self.__stGUI.deleteStudentScreen()\n # Xoá xong thì quay lại màn hình studentMenuScreen\n self.studentMenuScreen()\n elif cmd == '4':\n self.__stGUI.searchStudentScreen()\n # Tìm kiếm xong thì quay lại màn hình studentMenuScreen\n self.studentMenuScreen()\n elif cmd == '0':\n # Quay lại màn hình Menu chính\n self.mainMenuScreen()\n\n \n def subjectMenuScreen(self):\n clearScreen()\n\n print('*** QUẢN LÝ MON HỌC ***')\n funcs = [\n '[1] Thêm',\n '[2] Sửa',\n '[3] Xoá',\n '[4] Tìm kiếm',\n '[0] Quay lại'\n ]\n printMenu(funcs)\n\n cmd = ''\n while cmd not in ['1', '2', '3', '4', '0']:\n cmd = input('Chọn chức năng: ')\n\n if cmd == '1':\n self.__sbjGUI.insertSubjectScreen()\n # Nhập xong thì quay lại màn hình studentMenuScreen\n self.subjectMenuScreen()\n elif cmd == '2':\n self.__sbjGUI.updateSubjectScreen()\n # Sửa xong thì quay lại màn hình studentMenuScreen\n self.subjectMenuScreen()\n elif cmd == '3':\n self.__sbjGUI.deleteSubjectScreen()\n # Xoá xong thì quay lại màn hình studentMenuScreen\n self.subjectMenuScreen()\n elif cmd == '4':\n self.__sbjGUI.searchSubjectScreen()\n # Tìm kiếm xong thì quay lại màn hình studentMenuScreen\n self.subjectMenuScreen()\n elif cmd == '0':\n # Quay lại màn hình Menu chính\n self.mainMenuScreen()\n\n def scoreMenuScreen(self): \n clearScreen()\n\n print('*** QUAN LY DIEM THI *** ')\n funcs= [\n '[1] Nhap diem thi',\n '[2] Sua diem thi',\n '[3] Tra cuu diem thi',\n '[0] quay lai'\n ]\n printMenu(funcs)\n cmd =''\n while cmd not in ['1', '2', '3','0']:\n cmd = input('chon chuc nang: ')\n if cmd == '1':\n self.__scGUI.insertScoreScreen()\n self.scoreMenuScreen()\n elif cmd == '2':\n self.__scGUI.updateScoreScreen()\n self.scoreMenuScreen()\n elif cmd == '3':\n self.__scGUI.searchScoreScreen() \n self.scoreMenuScreen()\n elif cmd == '0':\n self.mainMenuScreen()\n\n def printTotal(self):\n clearScreen()\n\n print('*** IN KET XUAT THONG TIN HOC VIEN***')\n funcs = [\n '[9] Xuat file tat ca cac hoc vien',\n '[1] Xuat file tat ca cac hoc vien co diem tong ket A',\n '[2] Xuat file tat ca cac hoc vien co diem tong ket B',\n '[3] Xuat file tat ca cac hoc vien co diem tong ket C',\n '[4] Xuat file tat ca cac hoc vien co diem tong ket D',\n '[0] quay lai'\n ]\n printMenu(funcs)\n \n cmd = ' '\n while cmd not in ['9', '1', '2', '3', '4', '0']:\n cmd = input('chon chuc nang')\n if cmd == '9':\n self.__scGUI.exportScoreScreen()\n self.printTotal()\n if cmd == '1':\n self.__scGUI.exportScoreScreen('A')\n self.printTotal()\n if cmd == '2':\n self.__scGUI.exportScoreScreen('B')\n self.printTotal()\n if cmd == '3':\n self.__scGUI.exportScoreScreen('B')\n self.printTotal()\n if cmd == '4':\n self.__scGUI.exportScoreScreen('D')\n self.printTotal() \n if cmd == '0':\n self.mainMenuScreen() \n\n\nif __name__ == '__main__':\n mainGUI = MainGUI()\n mainGUI.mainMenuScreen()\n ", "repo_name": "HieuDiem/python_core", "sub_path": "Project02/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5749, "program_lang": "python", "lang": "vi", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "gui.StudentGUI", "line_number": 7, "usage_type": "call"}, {"api_name": "gui.SubjectGUI", "line_number": 8, "usage_type": "call"}, {"api_name": "gui.ScoreGUI", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.clearScreen", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.printMenu", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.clearScreen", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.printMenu", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.clearScreen", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.printMenu", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.clearScreen", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.printMenu", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.clearScreen", "line_number": 147, "usage_type": "call"}, {"api_name": "utils.printMenu", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "74342991092", "text": "import pygame\r\nimport sys\r\nfrom sprites.platforms import GreenPlatform as gp\r\nfrom sprites.platforms import RedPlatform as rp\r\nfrom sprites.platforms import BluePlatform as bp\r\nfrom sprites.doodle import Player\r\nfrom sprites.spring import Spring\r\nfrom sprites.fon import Score\r\nfrom random import randint as rn\r\n\r\npygame.init()\r\n\r\n# Константы/Constants\r\nWIDTH = 600\r\nHEIGHT = 600\r\nFPS = 20\r\n\r\n# Создание окна/Window creating\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"Doodle Jump\")\r\nclock = pygame.time.Clock()\r\n\r\n\r\ndef main():\r\n # Спрайты/Sprites\r\n doode = Player()\r\n score = Score()\r\n offset = 0\r\n springs = pygame.sprite.Group()\r\n plats = pygame.sprite.Group()\r\n for i in range(10):\r\n x = rn(0, 100)\r\n if x in range(10, 90):\r\n plats.add(gp((rn(50, screen.get_width() - 50), screen.get_height() - 37 - (screen.get_height() // 10) * i)))\r\n elif x in range(0, 10):\r\n plats.add(bp((rn(50, screen.get_width() - 50), screen.get_height() - 37 - (screen.get_height() // 10) * i)))\r\n else:\r\n plats.add(rp((rn(50, screen.get_width() - 50), screen.get_height() - 37 - (screen.get_height() // 10) * i)))\r\n\r\n running = True\r\n while running:\r\n # Частота обновления экрана/Screen refresh rate\r\n clock.tick(FPS)\r\n\r\n # События/Events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n main()\r\n with open(\"results.txt\", 'w') as f1:\r\n f1.write(str(score.steps))\r\n\r\n if doode.rect.bottom < screen.get_height() // 2:\r\n offset = doode.gravity\r\n else:\r\n offset = 0\r\n\r\n for platform in plats:\r\n if platform.rect.collidepoint(doode.rect.bottomleft) or platform.rect.collidepoint(doode.rect.bottomright) and doode.gravity < 0:\r\n if not isinstance(platform, rp):\r\n doode.gravity = 15\r\n else:\r\n platform.image = platform.image2\r\n\r\n for spring in springs:\r\n if spring.rect.collidepoint(doode.rect.bottomleft) or spring.rect.collidepoint(doode.rect.bottomright) and doode.gravity < 0:\r\n doode.gravity = 50\r\n\r\n\r\n # Рендеринг/Rendering\r\n screen.fill((255, 255, 255))\r\n for x in range(0, screen.get_width(), 10):\r\n pygame.draw.line(screen, (222, 222, 222), (x, 0), (x, screen.get_height()))\r\n pygame.draw.line(screen, (222, 222, 222), (0, x), (screen.get_width(), x))\r\n\r\n if len(plats) < 10:\r\n x = rn(0, 100)\r\n if x in range(10, 90):\r\n x = gp((rn(50, screen.get_width() - 50), -10))\r\n plats.add(x)\r\n if True:\r\n springs.add(Spring(x.rect.topleft))\r\n elif x in range(0, 10):\r\n plats.add(bp((rn(50, screen.get_width() - 50), -10)))\r\n else:\r\n plats.add(rp((rn(50, screen.get_width() - 50), -10)))\r\n\r\n\r\n plats.draw(screen)\r\n doode.draw(screen)\r\n springs.draw(screen)\r\n score.draw(screen)\r\n\r\n\r\n # Обновление спрайтов/Updating sprites\r\n plats.update(offset)\r\n doode.update(offset)\r\n springs.update(offset)\r\n score.update(offset)\r\n\r\n # Обновление экрана/Screen Refresh\r\n pygame.display.update()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()", "repo_name": "Hellcat-Brilliant/j7t", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pygame.init", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sprites.doodle.Player", "line_number": 26, "usage_type": "call"}, {"api_name": "sprites.fon.Score", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 30, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "sprites.platforms.GreenPlatform", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "sprites.platforms.BluePlatform", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "sprites.platforms.RedPlatform", "line_number": 38, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sprites.platforms.RedPlatform", "line_number": 63, "usage_type": "argument"}, {"api_name": "pygame.draw.line", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 77, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 80, "usage_type": "call"}, {"api_name": "sprites.platforms.GreenPlatform", "line_number": 82, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "sprites.spring.Spring", "line_number": 85, "usage_type": "call"}, {"api_name": "sprites.platforms.BluePlatform", "line_number": 87, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 87, "usage_type": "call"}, {"api_name": "sprites.platforms.RedPlatform", "line_number": 89, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 105, "usage_type": "attribute"}]} +{"seq_id": "20979794512", "text": "import cv2\nimport numpy as np\nimport os\nimport time\n\n\nif __name__ == '__main__':\n ### Load Camera Calibration File\n caliFile = np.load(r'D:\\_Project\\E-fence\\camera_relation\\calibration_matrix\\R3V6F\\720p_R3V6F_best.npz')\n matrix = caliFile['mtx']\n distortion = caliFile['dist']\n videoPath = r'D:\\_Project\\E-fence\\camera_relation\\Cobot_E-Fence_video'\n outputPath = r'D:\\_Project\\E-fence\\camera_relation\\Output'\n ### iterates video floder\n for root, dirs, files in os.walk(videoPath):\n for f in files:\n ### Test Video\n videoFile = os.path.join(root, f)\n cap = cv2.VideoCapture(videoFile) # from video file\n ### Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n # fourcc = cv2.VideoWriter_fourcc(*'XVID')\n outVideo = cv2.VideoWriter(videoFile.replace('Cobot_E-Fence_video', 'Output'), fourcc, 30.0, (1280, 720))\n ### captures stream\n while cap.isOpened():\n key = cv2.waitKey(1)\n ret, frame = cap.read()\n if ret:\n img = frame.copy()\n # cv2.imwrite('original.jpg', img)\n imgH, imgW = img.shape[:2]\n\n ### Calibration Start\n t1 = time.time() \n newCameraMatrix, roi = cv2.getOptimalNewCameraMatrix(matrix, distortion, (imgW, imgH), 1, (imgW, imgH))\n ### undistort\n dst = cv2.undistort(img, matrix, distortion, None, newCameraMatrix)\n cv2.imwrite('undistort.jpg', dst)\n ### Calibration End\n\n ### Crops Images Start\n cropX, cropY, cropW, cropH = roi\n dst = dst[cropY:cropY + cropH, cropX:cropX + cropW]\n t2 = time.time() # Calibration Crop End\n cv2.imwrite('calibration.jpg', dst)\n ### Crops Images End\n\n ### Resize Start\n dst = cv2.resize(dst, (imgW, imgH), interpolation = cv2.INTER_AREA)\n t3 = time.time() # Calibration Crop&Resize End\n # print('Crop Calibration Time:' + str(t2 - t1))\n # print('Crop & Resize Calibration Time:' + str(t3 - t1))\n # cv2.imwrite('resize.jpg', dst)\n ### Resize End\n\n ### write the frame\n outVideo.write(dst)\n # cv2.imshow('frame', img)\n # cv2.waitKey(250)\n else:\n break\n ### Release everything if job is finished\n cap.release()\n cv2.destroyAllWindows()\n\n\n", "repo_name": "peter874j/sharefile", "sub_path": "Camera_Calibration_AUO.py", "file_name": "Camera_Calibration_AUO.py", "file_ext": "py", "file_size_in_byte": 2786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.load", "line_number": 9, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.getOptimalNewCameraMatrix", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 49, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "15934390161", "text": "\n__all__ = [\n 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',\n 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',\n 'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',\n 'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',\n 'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',\n 'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',\n 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',\n 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',\n 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',\n ]\nimport itertools\nimport warnings\nfrom . import core as ma\nfrom .core import (\n MaskedArray, MAError, add, array, asarray, concatenate, filled, count,\n getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,\n nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,\n mask_rowcols\n )\nimport numpy as np\nfrom numpy import ndarray, array as nxarray\nimport numpy.core.umath as umath\nfrom numpy.core.multiarray import normalize_axis_index\nfrom numpy.core.numeric import normalize_axis_tuple\nfrom numpy.lib.function_base import _ureduce\nfrom numpy.lib.index_tricks import AxisConcatenator\ndef issequence(seq):\n return isinstance(seq, (ndarray, tuple, list))\ndef count_masked(arr, axis=None):\n m = getmaskarray(arr)\n return m.sum(axis)\ndef masked_all(shape, dtype=float):\n a = masked_array(np.empty(shape, dtype),\n mask=np.ones(shape, make_mask_descr(dtype)))\n return a\ndef masked_all_like(arr):\n a = np.empty_like(arr).view(MaskedArray)\n a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))\n return a\nclass _fromnxfunction:\n def __init__(self, funcname):\n self.__name__ = funcname\n self.__doc__ = self.getdoc()\n def getdoc(self):\n npfunc = getattr(np, self.__name__, None)\n doc = getattr(npfunc, '__doc__', None)\n if doc:\n sig = self.__name__ + ma.get_object_signature(npfunc)\n doc = ma.doc_note(doc, \"The function is applied to both the _data \"\n \"and the _mask, if any.\")\n return '\\n\\n'.join((sig, doc))\n return\n def __call__(self, *args, **params):\n pass\nclass _fromnxfunction_single(_fromnxfunction):\n def __call__(self, x, *args, **params):\n func = getattr(np, self.__name__)\n if isinstance(x, ndarray):\n _d = func(x.__array__(), *args, **params)\n _m = func(getmaskarray(x), *args, **params)\n return masked_array(_d, mask=_m)\n else:\n _d = func(np.asarray(x), *args, **params)\n _m = func(getmaskarray(x), *args, **params)\n return masked_array(_d, mask=_m)\nclass _fromnxfunction_seq(_fromnxfunction):\n def __call__(self, x, *args, **params):\n func = getattr(np, self.__name__)\n _d = func(tuple([np.asarray(a) for a in x]), *args, **params)\n _m = func(tuple([getmaskarray(a) for a in x]), *args, **params)\n return masked_array(_d, mask=_m)\nclass _fromnxfunction_args(_fromnxfunction):\n def __call__(self, *args, **params):\n func = getattr(np, self.__name__)\n arrays = []\n args = list(args)\n while len(args) > 0 and issequence(args[0]):\n arrays.append(args.pop(0))\n res = []\n for x in arrays:\n _d = func(np.asarray(x), *args, **params)\n _m = func(getmaskarray(x), *args, **params)\n res.append(masked_array(_d, mask=_m))\n if len(arrays) == 1:\n return res[0]\n return res\nclass _fromnxfunction_allargs(_fromnxfunction):\n def __call__(self, *args, **params):\n func = getattr(np, self.__name__)\n res = []\n for x in args:\n _d = func(np.asarray(x), **params)\n _m = func(getmaskarray(x), **params)\n res.append(masked_array(_d, mask=_m))\n if len(args) == 1:\n return res[0]\n return res\natleast_1d = _fromnxfunction_allargs('atleast_1d')\natleast_2d = _fromnxfunction_allargs('atleast_2d')\natleast_3d = _fromnxfunction_allargs('atleast_3d')\nvstack = row_stack = _fromnxfunction_seq('vstack')\nhstack = _fromnxfunction_seq('hstack')\ncolumn_stack = _fromnxfunction_seq('column_stack')\ndstack = _fromnxfunction_seq('dstack')\nstack = _fromnxfunction_seq('stack')\nhsplit = _fromnxfunction_single('hsplit')\ndiagflat = _fromnxfunction_single('diagflat')\ndef flatten_inplace(seq):\n k = 0\n while (k != len(seq)):\n while hasattr(seq[k], '__iter__'):\n seq[k:(k + 1)] = seq[k]\n k += 1\n return seq\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n arr = array(arr, copy=False, subok=True)\n nd = arr.ndim\n axis = normalize_axis_index(axis, nd)\n ind = [0] * (nd - 1)\n i = np.zeros(nd, 'O')\n indlist = list(range(nd))\n indlist.remove(axis)\n i[axis] = slice(None, None)\n outshape = np.asarray(arr.shape).take(indlist)\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())], *args, **kwargs)\n asscalar = np.isscalar(res)\n if not asscalar:\n try:\n len(res)\n except TypeError:\n asscalar = True\n dtypes = []\n if asscalar:\n dtypes.append(np.asarray(res).dtype)\n outarr = zeros(outshape, object)\n outarr[tuple(ind)] = res\n Ntot = np.product(outshape)\n k = 1\n while k < Ntot:\n ind[-1] += 1\n n = -1\n while (ind[n] >= outshape[n]) and (n > (1 - nd)):\n ind[n - 1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())], *args, **kwargs)\n outarr[tuple(ind)] = res\n dtypes.append(asarray(res).dtype)\n k += 1\n else:\n res = array(res, copy=False, subok=True)\n j = i.copy()\n j[axis] = ([slice(None, None)] * res.ndim)\n j.put(indlist, ind)\n Ntot = np.product(outshape)\n holdshape = outshape\n outshape = list(arr.shape)\n outshape[axis] = res.shape\n dtypes.append(asarray(res).dtype)\n outshape = flatten_inplace(outshape)\n outarr = zeros(outshape, object)\n outarr[tuple(flatten_inplace(j.tolist()))] = res\n k = 1\n while k < Ntot:\n ind[-1] += 1\n n = -1\n while (ind[n] >= holdshape[n]) and (n > (1 - nd)):\n ind[n - 1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n j.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())], *args, **kwargs)\n outarr[tuple(flatten_inplace(j.tolist()))] = res\n dtypes.append(asarray(res).dtype)\n k += 1\n max_dtypes = np.dtype(np.asarray(dtypes).max())\n if not hasattr(arr, '_mask'):\n result = np.asarray(outarr, dtype=max_dtypes)\n else:\n result = asarray(outarr, dtype=max_dtypes)\n result.fill_value = ma.default_fill_value(result)\n return result\napply_along_axis.__doc__ = np.apply_along_axis.__doc__\ndef apply_over_axes(func, a, axes):\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0:\n axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = ma.expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError(\"function is not returning \"\n \"an array of the correct shape\")\n return val\nif apply_over_axes.__doc__ is not None:\n apply_over_axes.__doc__ = np.apply_over_axes.__doc__[\n :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \"\"\"\n Examples\n --------\n >>> a = np.ma.arange(24).reshape(2,3,4)\n >>> a[:,0,1] = np.ma.masked\n >>> a[:,1,:] = np.ma.masked\n >>> a\n masked_array(\n data=[[[0, --, 2, 3],\n [--, --, --, --],\n [8, 9, 10, 11]],\n [[12, --, 14, 15],\n [--, --, --, --],\n [20, 21, 22, 23]]],\n mask=[[[False, True, False, False],\n [ True, True, True, True],\n [False, False, False, False]],\n [[False, True, False, False],\n [ True, True, True, True],\n [False, False, False, False]]],\n fill_value=999999)\n >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])\n masked_array(\n data=[[[46],\n [--],\n [124]]],\n mask=[[[False],\n [ True],\n [False]]],\n fill_value=999999)\n Tuple axis arguments to ufuncs are equivalent:\n >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))\n masked_array(\n data=[[[46],\n [--],\n [124]]],\n mask=[[[False],\n [ True],\n [False]]],\n fill_value=999999)\n \"\"\"\ndef average(a, axis=None, weights=None, returned=False):\n a = asarray(a)\n m = getmask(a)\n if weights is None:\n avg = a.mean(axis)\n scl = avg.dtype.type(a.count(axis))\n else:\n wgt = np.asanyarray(weights)\n if issubclass(a.dtype.type, (np.integer, np.bool_)):\n result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')\n else:\n result_dtype = np.result_type(a.dtype, wgt.dtype)\n if a.shape != wgt.shape:\n if axis is None:\n raise TypeError(\n \"Axis must be specified when shapes of a and weights \"\n \"differ.\")\n if wgt.ndim != 1:\n raise TypeError(\n \"1D weights expected when shapes of a and weights differ.\")\n if wgt.shape[0] != a.shape[axis]:\n raise ValueError(\n \"Length of weights not compatible with specified axis.\")\n wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)\n wgt = wgt.swapaxes(-1, axis)\n if m is not nomask:\n wgt = wgt*(~a.mask)\n scl = wgt.sum(axis=axis, dtype=result_dtype)\n avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl\n if returned:\n if scl.shape != avg.shape:\n scl = np.broadcast_to(scl, avg.shape).copy()\n return avg, scl\n else:\n return avg\ndef median(a, axis=None, out=None, overwrite_input=False, keepdims=False):\n if not hasattr(a, 'mask'):\n m = np.median(getdata(a, subok=True), axis=axis,\n out=out, overwrite_input=overwrite_input,\n keepdims=keepdims)\n if isinstance(m, np.ndarray) and 1 <= m.ndim:\n return masked_array(m, copy=False)\n else:\n return m\n r, k = _ureduce(a, func=_median, axis=axis, out=out,\n overwrite_input=overwrite_input)\n if keepdims:\n return r.reshape(k)\n else:\n return r\ndef _median(a, axis=None, out=None, overwrite_input=False):\n if np.issubdtype(a.dtype, np.inexact):\n fill_value = np.inf\n else:\n fill_value = None\n if overwrite_input:\n if axis is None:\n asorted = a.ravel()\n asorted.sort(fill_value=fill_value)\n else:\n a.sort(axis=axis, fill_value=fill_value)\n asorted = a\n else:\n asorted = sort(a, axis=axis, fill_value=fill_value)\n if axis is None:\n axis = 0\n else:\n axis = normalize_axis_index(axis, asorted.ndim)\n if asorted.shape[axis] == 0:\n indexer = [slice(None)] * asorted.ndim\n indexer[axis] = slice(0, 0)\n indexer = tuple(indexer)\n return np.ma.mean(asorted[indexer], axis=axis, out=out)\n if asorted.ndim == 1:\n counts = count(asorted)\n idx, odd = divmod(count(asorted), 2)\n mid = asorted[idx + odd - 1:idx + 1]\n if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:\n s = mid.sum(out=out)\n if not odd:\n s = np.true_divide(s, 2., casting='safe', out=out)\n s = np.lib.utils._median_nancheck(asorted, s, axis, out)\n else:\n s = mid.mean(out=out)\n if np.ma.is_masked(s) and not np.all(asorted.mask):\n return np.ma.minimum_fill_value(asorted)\n return s\n counts = count(asorted, axis=axis, keepdims=True)\n h = counts // 2\n odd = counts % 2 == 1\n l = np.where(odd, h, h-1)\n lh = np.concatenate([l,h], axis=axis)\n low_high = np.take_along_axis(asorted, lh, axis=axis)\n def replace_masked(s):\n if np.ma.is_masked(s):\n rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask\n s.data[rep] = np.ma.minimum_fill_value(asorted)\n s.mask[rep] = False\n replace_masked(low_high)\n if np.issubdtype(asorted.dtype, np.inexact):\n s = np.ma.sum(low_high, axis=axis, out=out)\n np.true_divide(s.data, 2., casting='unsafe', out=s.data)\n s = np.lib.utils._median_nancheck(asorted, s, axis, out)\n else:\n s = np.ma.mean(low_high, axis=axis, out=out)\n return s\ndef compress_nd(x, axis=None):\n x = asarray(x)\n m = getmask(x)\n if axis is None:\n axis = tuple(range(x.ndim))\n else:\n axis = normalize_axis_tuple(axis, x.ndim)\n if m is nomask or not m.any():\n return x._data\n if m.all():\n return nxarray([])\n data = x._data\n for ax in axis:\n axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))\n data = data[(slice(None),)*ax + (~m.any(axis=axes),)]\n return data\ndef compress_rowcols(x, axis=None):\n if asarray(x).ndim != 2:\n raise NotImplementedError(\"compress_rowcols works for 2D arrays only.\")\n return compress_nd(x, axis=axis)\ndef compress_rows(a):\n a = asarray(a)\n if a.ndim != 2:\n raise NotImplementedError(\"compress_rows works for 2D arrays only.\")\n return compress_rowcols(a, 0)\ndef compress_cols(a):\n a = asarray(a)\n if a.ndim != 2:\n raise NotImplementedError(\"compress_cols works for 2D arrays only.\")\n return compress_rowcols(a, 1)\ndef mask_rows(a, axis=np._NoValue):\n if axis is not np._NoValue:\n warnings.warn(\n \"The axis argument has always been ignored, in future passing it \"\n \"will raise TypeError\", DeprecationWarning, stacklevel=2)\n return mask_rowcols(a, 0)\ndef mask_cols(a, axis=np._NoValue):\n if axis is not np._NoValue:\n warnings.warn(\n \"The axis argument has always been ignored, in future passing it \"\n \"will raise TypeError\", DeprecationWarning, stacklevel=2)\n return mask_rowcols(a, 1)\ndef ediff1d(arr, to_end=None, to_begin=None):\n arr = ma.asanyarray(arr).flat\n ed = arr[1:] - arr[:-1]\n arrays = [ed]\n if to_begin is not None:\n arrays.insert(0, to_begin)\n if to_end is not None:\n arrays.append(to_end)\n if len(arrays) != 1:\n ed = hstack(arrays)\n return ed\ndef unique(ar1, return_index=False, return_inverse=False):\n output = np.unique(ar1,\n return_index=return_index,\n return_inverse=return_inverse)\n if isinstance(output, tuple):\n output = list(output)\n output[0] = output[0].view(MaskedArray)\n output = tuple(output)\n else:\n output = output.view(MaskedArray)\n return output\ndef intersect1d(ar1, ar2, assume_unique=False):\n if assume_unique:\n aux = ma.concatenate((ar1, ar2))\n else:\n aux = ma.concatenate((unique(ar1), unique(ar2)))\n aux.sort()\n return aux[:-1][aux[1:] == aux[:-1]]\ndef setxor1d(ar1, ar2, assume_unique=False):\n if not assume_unique:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n aux = ma.concatenate((ar1, ar2))\n if aux.size == 0:\n return aux\n aux.sort()\n auxf = aux.filled()\n flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))\n flag2 = (flag[1:] == flag[:-1])\n return aux[flag2]\ndef in1d(ar1, ar2, assume_unique=False, invert=False):\n if not assume_unique:\n ar1, rev_idx = unique(ar1, return_inverse=True)\n ar2 = unique(ar2)\n ar = ma.concatenate((ar1, ar2))\n order = ar.argsort(kind='mergesort')\n sar = ar[order]\n if invert:\n bool_ar = (sar[1:] != sar[:-1])\n else:\n bool_ar = (sar[1:] == sar[:-1])\n flag = ma.concatenate((bool_ar, [invert]))\n indx = order.argsort(kind='mergesort')[:len(ar1)]\n if assume_unique:\n return flag[indx]\n else:\n return flag[indx][rev_idx]\ndef isin(element, test_elements, assume_unique=False, invert=False):\n element = ma.asarray(element)\n return in1d(element, test_elements, assume_unique=assume_unique,\n invert=invert).reshape(element.shape)\ndef union1d(ar1, ar2):\n return unique(ma.concatenate((ar1, ar2), axis=None))\ndef setdiff1d(ar1, ar2, assume_unique=False):\n if assume_unique:\n ar1 = ma.asarray(ar1).ravel()\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]\ndef _covhelper(x, y=None, rowvar=True, allow_masked=True):\n x = ma.array(x, ndmin=2, copy=True, dtype=float)\n xmask = ma.getmaskarray(x)\n if not allow_masked and xmask.any():\n raise ValueError(\"Cannot process masked data.\")\n if x.shape[0] == 1:\n rowvar = True\n rowvar = int(bool(rowvar))\n axis = 1 - rowvar\n if rowvar:\n tup = (slice(None), None)\n else:\n tup = (None, slice(None))\n if y is None:\n xnotmask = np.logical_not(xmask).astype(int)\n else:\n y = array(y, copy=False, ndmin=2, dtype=float)\n ymask = ma.getmaskarray(y)\n if not allow_masked and ymask.any():\n raise ValueError(\"Cannot process masked data.\")\n if xmask.any() or ymask.any():\n if y.shape == x.shape:\n common_mask = np.logical_or(xmask, ymask)\n if common_mask is not nomask:\n xmask = x._mask = y._mask = ymask = common_mask\n x._sharedmask = False\n y._sharedmask = False\n x = ma.concatenate((x, y), axis)\n xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)\n x -= x.mean(axis=rowvar)[tup]\n return (x, xnotmask, rowvar)\ndef cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\"ddof must be an integer\")\n if ddof is None:\n if bias:\n ddof = 0\n else:\n ddof = 1\n (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)\n if not rowvar:\n fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof\n result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()\n else:\n fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof\n result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()\n return result\ndef corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,\n ddof=np._NoValue):\n msg = 'bias and ddof have no effect and are deprecated'\n if bias is not np._NoValue or ddof is not np._NoValue:\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)\n if not rowvar:\n fact = np.dot(xnotmask.T, xnotmask) * 1.\n c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()\n else:\n fact = np.dot(xnotmask, xnotmask.T) * 1.\n c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()\n try:\n diag = ma.diagonal(c)\n except ValueError:\n return 1\n if xnotmask.all():\n _denom = ma.sqrt(ma.multiply.outer(diag, diag))\n else:\n _denom = diagflat(diag)\n _denom._sharedmask = False\n n = x.shape[1 - rowvar]\n if rowvar:\n for i in range(n - 1):\n for j in range(i + 1, n):\n _x = mask_cols(vstack((x[i], x[j]))).var(axis=1)\n _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))\n else:\n for i in range(n - 1):\n for j in range(i + 1, n):\n _x = mask_cols(\n vstack((x[:, i], x[:, j]))).var(axis=1)\n _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))\n return c / _denom\nclass MAxisConcatenator(AxisConcatenator):\n concatenate = staticmethod(concatenate)\n @classmethod\n def makemat(cls, arr):\n data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False)\n return array(data, mask=arr.mask)\n def __getitem__(self, key):\n if isinstance(key, str):\n raise MAError(\"Unavailable for masked array.\")\n return super(MAxisConcatenator, self).__getitem__(key)\nclass mr_class(MAxisConcatenator):\n def __init__(self):\n MAxisConcatenator.__init__(self, 0)\nmr_ = mr_class()\ndef flatnotmasked_edges(a):\n m = getmask(a)\n if m is nomask or not np.any(m):\n return np.array([0, a.size - 1])\n unmasked = np.flatnonzero(~m)\n if len(unmasked) > 0:\n return unmasked[[0, -1]]\n else:\n return None\ndef notmasked_edges(a, axis=None):\n a = asarray(a)\n if axis is None or a.ndim == 1:\n return flatnotmasked_edges(a)\n m = getmaskarray(a)\n idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))\n return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),\n tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]\ndef flatnotmasked_contiguous(a):\n m = getmask(a)\n if m is nomask:\n return [slice(0, a.size)]\n i = 0\n result = []\n for (k, g) in itertools.groupby(m.ravel()):\n n = len(list(g))\n if not k:\n result.append(slice(i, i + n))\n i += n\n return result\ndef notmasked_contiguous(a, axis=None):\n a = asarray(a)\n nd = a.ndim\n if nd > 2:\n raise NotImplementedError(\"Currently limited to atmost 2D array.\")\n if axis is None or nd == 1:\n return flatnotmasked_contiguous(a)\n result = []\n other = (axis + 1) % 2\n idx = [0, 0]\n idx[axis] = slice(None, None)\n for i in range(a.shape[other]):\n idx[other] = i\n result.append(flatnotmasked_contiguous(a[tuple(idx)]))\n return result\ndef _ezclump(mask):\n if mask.ndim > 1:\n mask = mask.ravel()\n idx = (mask[1:] ^ mask[:-1]).nonzero()\n idx = idx[0] + 1\n if mask[0]:\n if len(idx) == 0:\n return [slice(0, mask.size)]\n r = [slice(0, idx[0])]\n r.extend((slice(left, right)\n for left, right in zip(idx[1:-1:2], idx[2::2])))\n else:\n if len(idx) == 0:\n return []\n r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]\n if mask[-1]:\n r.append(slice(idx[-1], mask.size))\n return r\ndef clump_unmasked(a):\n mask = getattr(a, '_mask', nomask)\n if mask is nomask:\n return [slice(0, a.size)]\n return _ezclump(~mask)\ndef clump_masked(a):\n mask = ma.getmask(a)\n if mask is nomask:\n return []\n return _ezclump(mask)\ndef vander(x, n=None):\n _vander = np.vander(x, n)\n m = getmask(x)\n if m is not nomask:\n _vander[m] = 0\n return _vander\nvander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)\ndef polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):\n x = asarray(x)\n y = asarray(y)\n m = getmask(x)\n if y.ndim == 1:\n m = mask_or(m, getmask(y))\n elif y.ndim == 2:\n my = getmask(mask_rows(y))\n if my is not nomask:\n m = mask_or(m, my[:, 0])\n else:\n raise TypeError(\"Expected a 1D or 2D array for y!\")\n if w is not None:\n w = asarray(w)\n if w.ndim != 1:\n raise TypeError(\"expected a 1-d array for weights\")\n if w.shape[0] != y.shape[0]:\n raise TypeError(\"expected w and y to have the same length\")\n m = mask_or(m, getmask(w))\n if m is not nomask:\n not_m = ~m\n if w is not None:\n w = w[not_m]\n return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)\n else:\n return np.polyfit(x, y, deg, rcond, full, w, cov)\npolyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)\n", "repo_name": "Mockingbird01001/NLG-code-generator-LSTM", "sub_path": "work/data/data_model/batch_1/371.py.transformed.py.transformed.py", "file_name": "371.py.transformed.py.transformed.py", "file_ext": "py", "file_size_in_byte": 24365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.ndarray", "line_number": 30, "usage_type": "name"}, {"api_name": "core.getmaskarray", "line_number": 32, "usage_type": "call"}, {"api_name": "core.masked_array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 36, "usage_type": "call"}, {"api_name": "core.make_mask_descr", "line_number": 36, "usage_type": "call"}, {"api_name": "core.MaskedArray", "line_number": 39, "usage_type": "argument"}, {"api_name": "numpy.empty_like", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 40, "usage_type": "call"}, {"api_name": "core.make_mask_descr", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 60, "usage_type": "argument"}, {"api_name": "core.getmaskarray", "line_number": 62, "usage_type": "call"}, {"api_name": "core.masked_array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "core.getmaskarray", "line_number": 66, "usage_type": "call"}, {"api_name": "core.masked_array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 71, "usage_type": "call"}, {"api_name": "core.getmaskarray", "line_number": 72, "usage_type": "call"}, {"api_name": "core.masked_array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 83, "usage_type": "call"}, {"api_name": "core.getmaskarray", "line_number": 84, "usage_type": "call"}, {"api_name": "core.masked_array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 94, "usage_type": "call"}, {"api_name": "core.getmaskarray", "line_number": 95, "usage_type": "call"}, {"api_name": "core.masked_array", "line_number": 96, "usage_type": "call"}, {"api_name": "core.array", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.core.multiarray.normalize_axis_index", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.isscalar", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 137, "usage_type": "call"}, {"api_name": "core.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 140, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 152, "usage_type": "call"}, {"api_name": "core.array", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 159, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 163, "usage_type": "call"}, {"api_name": "core.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 183, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.apply_along_axis", "line_number": 188, "usage_type": "attribute"}, {"api_name": "core.asarray", "line_number": 190, "usage_type": "call"}, {"api_name": "core.array", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.apply_over_axes", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.apply_over_axes.__doc__.find", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.apply_over_axes", "line_number": 211, "usage_type": "attribute"}, {"api_name": "core.asarray", "line_number": 253, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.integer", "line_number": 260, "usage_type": "attribute"}, {"api_name": "numpy.bool_", "line_number": 260, "usage_type": "attribute"}, {"api_name": "numpy.result_type", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.result_type", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.broadcast_to", "line_number": 275, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 277, "usage_type": "name"}, {"api_name": "numpy.multiply", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.broadcast_to", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 289, "usage_type": "call"}, {"api_name": "core.getdata", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 292, "usage_type": "attribute"}, {"api_name": "core.masked_array", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.lib.function_base._ureduce", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.issubdtype", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.inexact", "line_number": 303, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 304, "usage_type": "attribute"}, {"api_name": "core.sort", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.core.multiarray.normalize_axis_index", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.ma.mean", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 324, "usage_type": "attribute"}, {"api_name": "core.count", "line_number": 326, "usage_type": "call"}, {"api_name": "core.count", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.issubdtype", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.inexact", "line_number": 329, "usage_type": "attribute"}, {"api_name": "numpy.true_divide", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.lib.utils._median_nancheck", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.lib", "line_number": 333, "usage_type": "attribute"}, {"api_name": "numpy.ma.is_masked", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 336, "usage_type": "attribute"}, {"api_name": "numpy.all", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.ma.minimum_fill_value", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 337, "usage_type": "attribute"}, {"api_name": "core.count", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.ma.is_masked", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 346, "usage_type": "attribute"}, {"api_name": "numpy.all", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.ma.minimum_fill_value", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 348, "usage_type": "attribute"}, {"api_name": "numpy.issubdtype", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.inexact", "line_number": 351, "usage_type": "attribute"}, {"api_name": "numpy.ma.sum", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 352, "usage_type": "attribute"}, {"api_name": "numpy.true_divide", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.lib.utils._median_nancheck", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.lib", "line_number": 354, "usage_type": "attribute"}, {"api_name": "numpy.ma.mean", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 356, "usage_type": "attribute"}, {"api_name": "core.asarray", "line_number": 359, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.core.numeric.normalize_axis_tuple", "line_number": 364, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 365, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 368, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 375, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 379, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy._NoValue", "line_number": 388, "usage_type": "attribute"}, {"api_name": "numpy._NoValue", "line_number": 389, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 390, "usage_type": "call"}, {"api_name": "core.mask_rowcols", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy._NoValue", "line_number": 394, "usage_type": "attribute"}, {"api_name": "numpy._NoValue", "line_number": 395, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 396, "usage_type": "call"}, {"api_name": "core.mask_rowcols", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 412, "usage_type": "call"}, {"api_name": "core.MaskedArray", "line_number": 417, "usage_type": "argument"}, {"api_name": "core.MaskedArray", "line_number": 420, "usage_type": "argument"}, {"api_name": "numpy.logical_not", "line_number": 485, "usage_type": "call"}, {"api_name": "core.array", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 493, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 494, "usage_type": "name"}, {"api_name": "numpy.logical_not", "line_number": 499, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 499, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 512, "usage_type": "call"}, {"api_name": "core.dot", "line_number": 513, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 515, "usage_type": "call"}, {"api_name": "core.dot", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy._NoValue", "line_number": 518, "usage_type": "attribute"}, {"api_name": "numpy._NoValue", "line_number": 519, "usage_type": "attribute"}, {"api_name": "numpy._NoValue", "line_number": 521, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 522, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 525, "usage_type": "call"}, {"api_name": "core.dot", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 528, "usage_type": "call"}, {"api_name": "core.dot", "line_number": 529, "usage_type": "call"}, {"api_name": "numpy.lib.index_tricks.AxisConcatenator", "line_number": 552, "usage_type": "name"}, {"api_name": "core.concatenate", "line_number": 553, "usage_type": "name"}, {"api_name": "core.array", "line_number": 557, "usage_type": "call"}, {"api_name": "core.MAError", "line_number": 560, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 567, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 568, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.flatnonzero", "line_number": 570, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 576, "usage_type": "call"}, {"api_name": "core.getmaskarray", "line_number": 579, "usage_type": "call"}, {"api_name": "core.array", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.indices", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 580, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 584, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 585, "usage_type": "name"}, {"api_name": "itertools.groupby", "line_number": 589, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 596, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 629, "usage_type": "argument"}, {"api_name": "core.nomask", "line_number": 630, "usage_type": "name"}, {"api_name": "core.nomask", "line_number": 635, "usage_type": "name"}, {"api_name": "numpy.vander", "line_number": 639, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 640, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 641, "usage_type": "name"}, {"api_name": "numpy.vander", "line_number": 644, "usage_type": "attribute"}, {"api_name": "core.asarray", "line_number": 646, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 647, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 648, "usage_type": "call"}, {"api_name": "core.mask_or", "line_number": 650, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 650, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 652, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 653, "usage_type": "name"}, {"api_name": "core.mask_or", "line_number": 654, "usage_type": "call"}, {"api_name": "core.asarray", "line_number": 658, "usage_type": "call"}, {"api_name": "core.mask_or", "line_number": 663, "usage_type": "call"}, {"api_name": "core.getmask", "line_number": 663, "usage_type": "call"}, {"api_name": "core.nomask", "line_number": 664, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 668, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 670, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 671, "usage_type": "attribute"}]} +{"seq_id": "21306000050", "text": "import webapp2\r\nimport jinja2\r\nfrom google.appengine.api import users\r\nfrom google.appengine.ext import ndb\r\nimport os\r\nfrom myuser import MyUser\r\nfrom ReviewModel import ReviewModel\r\n\r\nJINJA_ENVIRONMENT= jinja2.Environment(\r\n loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),\r\n extensions=['jinja2.ext.autoescape'],\r\n autoescape=True\r\n)\r\n\r\nclass EVDATA(ndb.Model):\r\n #email address of this User\r\n name = ndb.StringProperty()\r\n manufacturer = ndb.StringProperty()\r\n year = ndb.StringProperty()\r\n batterysize = ndb.StringProperty()\r\n wltprange=ndb.StringProperty()\r\n cost = ndb.StringProperty()\r\n power = ndb.StringProperty()\r\n\r\n#********************class to return the list of data after searching in the search page using the lower and upper range***********************\r\nclass RET(webapp2.RequestHandler):\r\n def get(self):\r\n result=''\r\n var=None\r\n action=self.request.get('Cancel')\r\n if action==\"Cancel\":\r\n self.redirect('/')\r\n\r\n #*************further function will be performed when the submit button is clicked***********************************\r\n if self.request.get(\"button\"):\r\n ab=EVDATA.query()\r\n name1=self.request.get('name1',var)\r\n if name1:\r\n ab=ab.filter(EVDATA.name==self.request.get('name1'))\r\n\r\n manufacturer1=self.request.get('manufacturer1',var)\r\n if manufacturer1:\r\n ab=ab.filter(EVDATA.manufacturer==self.request.get('manufacturer1'))\r\n\r\n yearmin1=self.request.get('yearmin1',var)\r\n if yearmin1:\r\n ab=ab.filter(EVDATA.year>=self.request.get('yearmin1'))\r\n\r\n yearmax1=self.request.get('yearmax1',var)\r\n if yearmax1:\r\n ab=ab.filter(EVDATA.year<=self.request.get('yearmax1'))\r\n\r\n batterysizemin1=self.request.get('batterysizemin1',var)\r\n if batterysizemin1:\r\n ab=ab.filter(EVDATA.batterysize>=self.request.get('batterysizemin1'))\r\n\r\n batterysizemax1=self.request.get('batterysizemax1',var)\r\n if batterysizemax1:\r\n ab=ab.filter(EVDATA.batterysize<=self.request.get('batterysizemax1'))\r\n\r\n wltprangemin1=self.request.get('wltprangemin1',var)\r\n if wltprangemin1:\r\n ab=ab.filter(EVDATA.wltprange>=self.request.get('wltprangemin1'))\r\n\r\n wltprangemax1=self.request.get('wltprangemax1',var)\r\n if wltprangemax1:\r\n ab=ab.filter(EVDATA.wltprange<=self.request.get('wltprangemax1'))\r\n\r\n costmin1=self.request.get('costmin1',var)\r\n if costmin1:\r\n ab=ab.filter(EVDATA.cost>=self.request.get('costmin1'))\r\n\r\n costmax1=self.request.get('costmax1',var)\r\n if costmax1:\r\n ab=ab.filter(EVDATA.cost<=self.request.get('costmax1'))\r\n\r\n powermin1=self.request.get('powermin1',var)\r\n if powermin1:\r\n ab=ab.filter(EVDATA.power>=self.request.get('powermin1'))\r\n\r\n powermax1=self.request.get('powermax1',var)\r\n if powermax1:\r\n ab=ab.filter(EVDATA.power<=self.request.get('powermax1'))\r\n\r\n result=ab.fetch()\r\n if len(result)==0:\r\n self.response.write(\"There is no data for this search. Please try with other values\")\r\n\r\n template_values={\r\n 'result':result\r\n }\r\n action = self.request.get('button')\r\n #*******here if the user click on view then all the values will be rendered to next page******************\r\n if action == 'View':\r\n self.response.write(action)\r\n self.redirect('/EditDelete')\r\n template = JINJA_ENVIRONMENT.get_template('View.html')\r\n self.response.write(template.render(template_values))\r\n\r\n template = JINJA_ENVIRONMENT.get_template('search.html')\r\n self.response.write(template.render(template_values))\r\n", "repo_name": "ritz0505/EV-Database", "sub_path": "EVDatabase/searchClass.py", "file_name": "searchClass.py", "file_ext": "py", "file_size_in_byte": 4008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "jinja2.Environment", "line_number": 9, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb.Model", "line_number": 15, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 15, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 17, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 17, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 18, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 18, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 19, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 19, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 20, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 20, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 21, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 21, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 22, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 22, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 23, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 23, "usage_type": "name"}, {"api_name": "webapp2.RequestHandler", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "73041950453", "text": "# _*_ coding: utf-8 _*_\n\n\"\"\"\nconcur_async_insts.py by xianhu\n\"\"\"\n\nimport re\nimport sys\nimport random\nimport logging\nimport asyncio\nimport datetime\nimport aiohttp\nfrom ..utilities import get_url_legal, make_random_useragent\n\n\nclass FetcherAsync(object):\n \"\"\"\n class of FetcherAsync\n \"\"\"\n\n def __init__(self, max_repeat=3, sleep_time=0):\n \"\"\"\n constructor\n \"\"\"\n self._max_repeat = max_repeat # default: 3, maximum repeat fetching time for a url\n self._sleep_time = sleep_time # default: 0, sleeping time after a fetching for a url\n\n self._session = None # session object to fetch the content of a url\n return\n\n def init_session(self, loop):\n \"\"\"\n initial self._session based on loop\n \"\"\"\n if not self._session:\n self._session = aiohttp.ClientSession(loop=loop, headers={\"User-Agent\": make_random_useragent(), \"Accept-Encoding\": \"gzip\"})\n return\n\n def close_session(self):\n \"\"\"\n close the session object of this class\n \"\"\"\n if not self._session.closed:\n self._session.close()\n return\n\n async def fetch(self, url: str, keys: object, repeat: int) -> (int, object):\n \"\"\"\n fetch the content of a url, must \"try, expect\" and don't change parameters and return\n :return (fetch_result, content): fetch_result can be -1(fetch failed), 0(need repeat), 1(fetch success), content can be anything\n \"\"\"\n logging.debug(\"%s start: keys=%s, repeat=%s, url=%s\", self.__class__.__name__, keys, repeat, url)\n\n await asyncio.sleep(random.randint(0, self._sleep_time))\n try:\n response = await self._session.get(url, params=None, data=None, timeout=5)\n if response.history:\n logging.debug(\"%s redirect: keys=%s, repeat=%s, url=%s\", self.__class__.__name__, keys, repeat, url)\n\n fetch_result, content = 1, (response.status, response.url, await response.text())\n await response.release()\n except Exception as excep:\n if repeat >= self._max_repeat:\n fetch_result, content = -1, None\n logging.error(\"%s error: %s, keys=%s, repeat=%s, url=%s\", self.__class__.__name__, excep, keys, repeat, url)\n else:\n fetch_result, content = 0, None\n logging.debug(\"%s repeat: %s, keys=%s, repeat=%s, url=%s\", self.__class__.__name__, excep, keys, repeat, url)\n\n logging.debug(\"%s end: fetch_result=%s, url=%s\", self.__class__.__name__, fetch_result, url)\n return fetch_result, content\n\n\nclass ParserAsync(object):\n \"\"\"\n class of ParserAsync\n \"\"\"\n\n def __init__(self, max_deep=0):\n \"\"\"\n constructor\n \"\"\"\n self._max_deep = max_deep # default: 0, if -1, spider will not stop until all urls are fetched\n return\n\n async def parse(self, priority: int, url: str, keys: object, deep: int, content: object) -> (int, list, list):\n \"\"\"\n parse the content of a url, must \"try, except\" and don't change parameters and return\n :return (parse_result, url_list, save_list): parse_result can be -1(parse failed), 1(parse success)\n :return (parse_result, url_list, save_list): url_list is [(url, keys, priority), ...], save_list is [item, ...]\n \"\"\"\n logging.debug(\"%s start: priority=%s, keys=%s, deep=%s, url=%s\", self.__class__.__name__, priority, keys, deep, url)\n\n try:\n *_, cur_html = content\n\n parse_result, url_list = 1, []\n if (self._max_deep < 0) or (deep < self._max_deep):\n a_list = re.findall(r\"<a[\\w\\W]+?href=\\\"(?P<url>[\\w\\W]{5,}?)\\\"[\\w\\W]*?>[\\w\\W]+?</a>\", cur_html, flags=re.IGNORECASE)\n url_list = [(_url, keys, priority + 1) for _url in [get_url_legal(href, url) for href in a_list]]\n else:\n logging.debug(\"%s stop parse urls: priority=%s, keys=%s, deep=%s, url=%s\", self.__class__.__name__, priority, keys, deep, url)\n\n title = re.search(r\"<title>(?P<title>[\\w\\W]+?)\", cur_html, flags=re.IGNORECASE)\n save_list = [(title.group(\"title\"), datetime.datetime.now()), ] if title else []\n except Exception as excep:\n parse_result, url_list, save_list = -1, [], []\n logging.error(\"%s error: %s, priority=%s, keys=%s, deep=%s, url=%s\", self.__class__.__name__, excep, priority, keys, deep, url)\n\n logging.debug(\"%s end: parse_result=%s, len(url_list)=%s, len(save_list)=%s, url=%s\", self.__class__.__name__, parse_result, len(url_list), len(save_list), url)\n return parse_result, url_list, save_list\n\n\nclass SaverAsync(object):\n \"\"\"\n class of SaverAsync\n \"\"\"\n\n def __init__(self, save_pipe=sys.stdout):\n \"\"\"\n constructor\n \"\"\"\n self._save_pip = save_pipe # default: sys.stdout, also can be a file handler\n return\n\n async def save(self, url: str, keys: object, item: object) -> bool:\n \"\"\"\n save the item of a url, must \"try, except\" and don't change parameters and return\n :return save_result: True or False\n \"\"\"\n logging.debug(\"%s start: keys=%s, url=%s\", self.__class__.__name__, keys, url)\n\n try:\n self._save_pip.write(\"\\t\".join([url, str(keys)] + [str(i) for i in item]) + \"\\n\")\n self._save_pip.flush()\n save_result = True\n except Exception as excep:\n save_result = False\n logging.error(\"%s error: %s, keys=%s, url=%s\", self.__class__.__name__, excep, keys, url)\n\n logging.debug(\"%s end: save_result=%s, url=%s\", self.__class__.__name__, save_result, url)\n return save_result\n", "repo_name": "LiuFang816/SALSTM_py_data", "sub_path": "python/xianhu_PSpider/PSpider-master/spider/concurrent/concur_async_insts.py", "file_name": "concur_async_insts.py", "file_ext": "py", "file_size_in_byte": 5766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "21", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 37, "usage_type": "call"}, {"api_name": "utilities.make_random_useragent", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 53, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 93, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 100, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 100, "usage_type": "attribute"}, {"api_name": "utilities.get_url_legal", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 103, "usage_type": "call"}, {"api_name": "re.search", "line_number": 105, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 105, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 109, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 111, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 120, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 132, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "31631546216", "text": "#! /usr/bin/env python\n\"\"\"\nDate : July 2023 \nAuthor : @oponcet \nDescription :\n - Scan of tes and tid SF is implemented as a rateParamer wich is profiled. Ex usage : Scan by DM (option 1)\n - Scan of tid SF and tes need to be set as POI with redefineSignalPOIs to include it in the fit. Ex usage : Scan by DM (option 2) \n - 2D scan of tes and tid SF. Ex usage : Scan by DM (option 3)\n - Scan of tid SF, tid SF and tes of other regions are profiled POIs. Ex usage : Fit tes by DM and tid SF by pt (option 4)\n - Scan of tes, tid SF and tes of other regions are profiled POIs. Ex usage : Fit tes by DM and tid SF by pt (option 5)\n - 2D scan of tes and tid SF and tes of other regions are profiled POIs. Ex usage : Fit tes by DM and tid SF by pt (option 6) \n\"\"\"\n\nfrom distutils import filelist\nfrom distutils.command.config import config\nimport sys\nimport os\nimport yaml\nfrom argparse import ArgumentParser\n\n# Generating the datacards for mutau channel\ndef generate_datacards_mutau(era, config, extratag):\n print(' >>>>>> Generating datacards for mutau channel')\n os.system(\"./TauES_ID/harvestDatacards_TES_idSF_MCStat.py -y %s -c %s -e %s \"%(era,config,extratag)) \n\n# Generating the datacards for mumu channel\ndef generate_datacards_mumu(era, config_mumu, extratag):\n print(' >>>>>> Generating datacards for mumu channel')\n os.system(\"TauES_ID/harvestDatacards_zmm.py -y %s -c %s -e %s \"%(era,config_mumu,extratag)) # Generating the datacards with one statistics uncertianties for all processes\n\n# Merge the datacards between regions for combine fit and return the name of the combined datacard file\ndef merge_datacards_regions(setup, setup_mumu, config_mumu, era, extratag):\n # Variable of the fit (usually mvis)\n variable = \"m_vis\"\n print(\"Observable : \"+variable)\n # LABEL used for datacard file\n LABEL = setup[\"tag\"]+extratag+\"-\"+era+\"-13TeV\"\n filelist = \"\" # List of the datacard files to merge in one file combinecards.txt\n # Name of the combined datacard file\n outcombinedfile = \"combinecards%s\" %(setup[\"tag\"])\n for region in setup[\"observables\"][\"m_vis\"][\"fitRegions\"]:\n filelist += region + \"=output_\"+era+\"/ztt_mt_m_vis-\"+region+LABEL+\".txt \"\n os.system(\"combineCards.py %s >output_%s/%s.txt\" % (filelist, era,outcombinedfile))\n #print(\"filelist : %s\") %(filelist) \n # Add the CR datacard file to the lsit of file to merge if there is CR option\n if str(config_mumu) != 'None':\n LABEL_mumu = setup_mumu[\"tag\"]+extratag+\"-\"+era+\"-13TeV\"\n filelist += \"zmm=output_\"+era+\"/ztt_mm_m_vis-baseline\"+LABEL_mumu+\".txt \"\n outcombinedfile += \"CR\"\n os.system(\"combineCards.py %s >output_%s/%s.txt\" % (filelist, era,outcombinedfile))\n print(\">>>>>>>>> merging datacards is done \")\n return outcombinedfile\n\n\n\n# Merge the datacards between mt regions and Zmm when using Zmm CR and return the name of the CR + region datacard file\ndef merge_datacards_ZmmCR(setup, setup_mumu, era,extratag,region):\n # datacard of the region to be merged\n datacardfile_region = \"ztt_mt_m_vis-\"+region+setup[\"tag\"]+extratag+\"-\"+era+\"-13TeV.txt\"\n filelist = \"%s=output_%s/%s\" %(region,era, datacardfile_region)\n LABEL_mumu = setup_mumu[\"tag\"]+extratag+\"-\"+era+\"-13TeV\"\n filelist += \" Zmm=output_\"+era+\"/ztt_mm_m_vis-baseline\"+LABEL_mumu+\".txt \"\n print(filelist)\n # Name of the CR + region datacard file\n outCRfile = \"ztt_mt_m_vis-%s_zmmCR\" %(region)\n os.system(\"combineCards.py %s >output_%s/%s.txt\" % (filelist, era,outCRfile))\n return outCRfile\n \ndef run_combined_fit(setup, setup_mumu, option, **kwargs):\n #tes_range = kwargs.get('tes_range', \"1.000,1.000\")\n tes_range = kwargs.get('tes_range', \"%s,%s\" %(min(setup[\"TESvariations\"][\"values\"]), max(setup[\"TESvariations\"][\"values\"])) )\n tid_SF_range = kwargs.get('tid_SF_range', \"0.5,1.5\")\n extratag = kwargs.get('extratag', \"_DeepTau\")\n algo = kwargs.get('algo', \"--algo=grid --alignEdges=1 --saveFitResult \")\n npts_fit = kwargs.get('npts_fit', \"--points=51\")\n fit_opts = kwargs.get('fit_opts', \"--robustFit=1 --setRobustFitAlgo=Minuit2 --setRobustFitStrategy=2 --setRobustFitTolerance=0.001 %s\" %(npts_fit))\n xrtd_opts = kwargs.get('xrtd_opts', \"--X-rtd FITTER_NEW_CROSSING_ALGO --X-rtd FITTER_NE\")\n cmin_opts = kwargs.get('cmin_opts', \"--cminFallbackAlgo Minuit2,Migrad,0:0.0001 --cminPreScan\" )\n save_opts = kwargs.get('save_opts', \"--saveNLL --saveSpecifiedNuis all \" )\n era = kwargs.get('era', \"\")\n config_mumu = kwargs.get('config_mumu', \"\")\n workspace = \"\"\n\n # Create the workspace for combined fit\n if int(option) > 3:\n # merge datacards regions\n datacardfile = merge_datacards_regions(setup,setup_mumu, config_mumu, era, extratag)\n print(\"datacard file for combined fit = %s\" %(datacardfile)) \n # Create workspace \n os.system(\"text2workspace.py output_%s/%s.txt\" %(era, datacardfile))\n workspace = \"output_%s/%s.root\" %(era, datacardfile)\n \n # Variable of the fit (usually mvis)\n variable = \"m_vis\"\n ## For each region defined in scanRegions in the config file \n for r in setup[\"observables\"][\"m_vis\"][\"scanRegions\"]:\n print(\"Region : \"+r)\n\n # Binelabel for output file of the fit\n BINLABELoutput = \"mt_\"+variable+\"-\"+r+setup[\"tag\"]+extratag+\"-\"+era+\"-13TeV\"\n\n # For fit by region create the datacards and the workspace here\n if int(option) <= 3 :\n # For CR Zmumu \n print(\"config_mumu = %s\" %(config_mumu))\n if str(config_mumu) != 'None':\n # merge datacards regions and CR\n datacardfile = merge_datacards_ZmmCR(setup, setup_mumu, era, extratag, r)\n print(\"datacard file for fit by region with additionnal CR = %s\" %(datacardfile)) \n\n else:\n datacardfile = \"ztt_mt_m_vis-\"+r+setup[\"tag\"]+extratag+\"-\"+era+\"-13TeV\"\n print(\"datacard file for fit by region = %s\" %(datacardfile)) \n # Create workspace \n os.system(\"text2workspace.py output_%s/%s.txt\" %(era, datacardfile))\n workspace = \"output_%s/%s.root\" %(era, datacardfile)\n print(\"Datacard workspace has been created\")\n\n ## FIT ##\n\n # Fit of tes_DM by DM with tid_SF as a nuisance parameter \n if option == '1':\n POI = \"tes_%s\" % (r)\n NP = \"rgx{.*tid.*}\"\n print(\">>>>>>> \"+POI+\" fit\")\n POI_OPTS = \"-P %s --setParameterRanges %s=%s:tid_SF_%s=%s --setParameters r=1,rgx{.*tes.*}=1,rgx{.*tid.*}=1 --freezeParameters r \" % (POI, POI, tes_range, r,tid_SF_range) # tes_DM\n MultiDimFit_opts = \" -m 90 %s %s %s -n .%s %s %s %s %s --trackParameters %s,rgx{.**.},rgx{.*sf_W_*.}\" %(workspace, algo, POI_OPTS, BINLABELoutput, fit_opts, xrtd_opts, cmin_opts, save_opts,NP)\n # Fit with combine\n os.system(\"combine -M MultiDimFit %s\" %(MultiDimFit_opts))\n \n # Fit of tid_SF_DM by DM with tes as a nuisance parameter\n elif option == '2':\n POI = \"tid_SF_%s\" % (r)\n NP = \"rgx{.*tid.*}\" \n print(\">>>>>>> Scan of \"+POI)\n POI_OPTS = \"-P %s --redefineSignalPOIs tes_%s --setParameterRanges %s=%s:tes_%s=%s -m 90 --setParameters r=1,rgx{.*tid.*}=1,rgx{.*tes.*}=1 --freezeParameters r --floatOtherPOIs=1\" % (POI,r, POI, tid_SF_range, r,tes_range) # tes_DM\n MultiDimFit_opts = \" %s %s %s -n .%s %s %s %s %s --trackParameters rgx{.*tid.*},rgx{.*W.*},rgx{.*dy.*} --saveInactivePOI=1 \" %(workspace, algo, POI_OPTS, BINLABELoutput, fit_opts, xrtd_opts, cmin_opts, save_opts)\n os.system(\"combine -M MultiDimFit %s \" %(MultiDimFit_opts))\n\n # 2D Fit of tes_DM and tid_SF_DM by DM, both are pois\n elif option == '3': \n print(\">>>>>>> Fit of tid_SF_\"+r+\" and tes_\"+r)\n POI1 = \"tid_SF_%s\" % (r)\n POI2 = \"tes_%s\" % (r)\n POI_OPTS = \"-P %s -P %s --setParameterRanges %s=%s:%s=%s --setParameters r=1,%s=1,%s=1 --freezeParameters r \" % (POI2, POI1, POI2, tes_range, POI1,tid_SF_range, POI2, POI1)\n MultiDimFit_opts = \" -m 90 %s %s %s -n .%s %s %s %s %s --trackParameters rgx{.*tid.*},rgx{.*W.*},rgx{.*dy.*}\" %(workspace, algo, POI_OPTS, BINLABELoutput, fit_opts, xrtd_opts, cmin_opts, save_opts)\n os.system(\"combine -M MultiDimFit %s \" %(MultiDimFit_opts))\n\n ### Fit with combined datacards tes_DM0,tes_DM1,tes_DM10,tes_DM11 \n ## Fit of tid_SF in its regions with tes_region and other tid_SF_regions as nuisance parameters tes_DM0,tes_DM1,tes_DM10,tes_DM11\n elif option == '4': \n print(\">>>>>>> Fit of tid_SF_\"+r)\n POI_OPTS = \"-P tid_SF_%s --redefineSignalPOIs tes_DM0_pt1,tes_DM0_pt2,tes_DM1_pt1,tes_DM1_pt2,tes_DM10_pt1,tes_DM10_pt2,tes_DM11_pt1,tes_DM11_pt2 --setParameterRanges rgx{.*tid.*}=%s:rgx{.*tes.*}=%s -m 90 --setParameters r=1,rgx{.*tes.*}=1 --freezeParameters r --floatOtherPOIs=1 \" %(r, tid_SF_range,tes_range)\n MultiDimFit_opts = \"%s %s %s -n .%s %s %s %s %s --trackParameters rgx{.*tid.*},rgx{.*W.*},rgx{.*dy.*} --saveInactivePOI=1\" %(workspace, algo, POI_OPTS, BINLABELoutput, fit_opts, xrtd_opts, cmin_opts, save_opts)\n os.system(\"combine -M MultiDimFit %s\" %(MultiDimFit_opts))\n\n ## Fit of tes in DM regions with tid_SF and other tes_DM as nuisance parameters \n elif option == '5':\n print(\">>>>>>> simultaneous fit of tid_SF in pt bins and tes_\"+r + \" in DM\")\n POI_OPTS = \"-P tes_%s --redefineSignalPOIs tes_DM0_pt1,tes_DM0_pt2,tes_DM1_pt1,tes_DM1_pt2,tes_DM10_pt1,tes_DM10_pt2,tes_DM11_pt1,tes_DM11_pt2 --setParameterRanges rgx{.*tid.*}=%s:rgx{.*tes.*}=%s -m 90 --setParameters r=1,rgx{.*tes.*}=1,rgx{.*tid.*}=1 --freezeParameters r,rgx{.*tid.*} --floatOtherPOIs=1\" %(r, tid_SF_range, tes_range)\n MultiDimFit_opts = \"%s %s %s -n .%s %s %s %s %s --trackParameters rgx{.*tid.*} --saveInactivePOI=1\" %(workspace, algo, POI_OPTS, BINLABELoutput, fit_opts, xrtd_opts, cmin_opts, save_opts)\n os.system(\"combine -M MultiDimFit %s \" %(MultiDimFit_opts))\n\n ### 2D Fit of tes_DM and tid_SF in DM and pt regions with others tid_SF and tes_DM as nuisance parameter\n elif option == '6':\n #for each decay mode\n for r in setup['tidRegions']: #[\"DM0\",\"DM1\",\"DM10\",\"DM11\"]\n for dm in setup['tesRegions']:\n print(\"Region : \"+r)\n print(\">>>>>>> simultaneous fit of tes_\" +r + \" in pt bins and tes_\"+r + \"in DM\")\n POI_OPTS = \"-P tid_SF_%s -P tes_%s --setParameterRanges rgx{.*tid.*}=%s:rgx{.*tes.*}=%s -m 90 --setParameters r=1 --freezeParameters r\" %(r,dm, tid_SF_range, tes_range)\n MultiDimFit_opts = \"-m 90 %s %s %s -n .%s %s %s %s %s \" %(workspace, algo, POI_OPTS, BINLABELoutput, fit_opts, xrtd_opts, cmin_opts, save_opts)\n os.system(\"combine -M MultiDimFit %s\" %(MultiDimFit_opts))\n\n else:\n continue\n\n os.system(\"mv higgsCombine*root output_%s\"%era)\n\n# Plot the scan using output file of combined \ndef plotScan(setup, setup_mumu, option, **kwargs):\n tid_SF_range = kwargs.get('tid_SF_range', \"0.4,1.6\")\n extratag = kwargs.get('extratag', \"_DeepTau\")\n era = kwargs.get('era', \"\" )\n config = kwargs.get('config', \"\" )\n # Plot \n\n if option == '2' or option == '4' :\n print(\">>> Plot parabola\")\n os.system(\"./TauES_ID/plotParabola_POI_region.py -p tid_SF -y %s -e %s -s -a -c %s\"% (era, extratag, config))\n #os.system(\"./TauES_ID/plotPostFitScan_POI.py --poi tid_SF -y %s -e %s -r %s,%s -c %s\" %(era,extratag,min(tid_SF_range),max(tid_SF_range), config))\n\n elif option == '1' or option == '5' :\n print(\">>> Plot parabola\")\n os.system(\"./TauES_ID/plotParabola_POI_region.py -p tes -y %s -e %s -r %s,%s -s -a -c %s\" % (era, extratag, min(setup[\"TESvariations\"][\"values\"]), max(setup[\"TESvariations\"][\"values\"]), config))\n #os.system(\"./TauES_ID/plotPostFitScan_POI.py --poi tes -y %s -e %s -r %s,%s -c %s\" %(era,extratag,min(setup[\"TESvariations\"][\"values\"]),max(setup[\"TESvariations\"][\"values\"]), config))\n\n else:\n print(\" No output plot...\")\n\n\n### main function\ndef main(args):\n\n era = args.era\n config = args.config\n config_mumu = args.config_mumu \n option = args.option\n extratag = \"_DeepTau\"\n\n\n print(\"Using configuration file: %s\"%(args.config))\n with open(args.config, 'r') as file:\n setup = yaml.safe_load(file)\n\n if config_mumu != 'None':\n print(\"Using configuration file for mumu: %s\"%(args.config_mumu))\n with open(args.config_mumu, 'r') as file_mumu:\n setup_mumu = yaml.safe_load(file_mumu)\n else: \n setup_mumu = 0\n\n # Generating the datacards for mutau channel\n generate_datacards_mutau(era=era, config=config,extratag=extratag)\n\n # Generating the datacards for mumu channel\n if str(config_mumu) != 'None':\n generate_datacards_mumu(era=era, config_mumu=config_mumu,extratag=extratag)\n\n # Run the fit using combine with the different options \n run_combined_fit(setup,setup_mumu, era=era, config=config, config_mumu=config_mumu, option=option)\n\n # Plots \n plotScan(setup,setup_mumu, era=era, config=config, config_mumu=config_mumu, option=option)\n\n\n###\nif __name__ == '__main__':\n\n argv = sys.argv\n parser = ArgumentParser(prog=\"makeTESfit\", description=\"execute all steps to run TES fit\")\n parser.add_argument('-y', '--era', dest='era', choices=['2016', '2017', '2018', 'UL2016_preVFP','UL2016_postVFP', 'UL2017', 'UL2018','UL2018_v10'], default=['UL2018'], action='store', help=\"set era\")\n parser.add_argument('-c', '--config', dest='config', type=str, default='TauES_ID/config/defaultFitSetupTES_mutau.yml', action='store', help=\"set config file containing sample & fit setup\")\n parser.add_argument('-o', '--option', dest='option', choices=['1', '2', '3', '4', '5','6'], default='1', action='store',\n help=\"set option : Scan of tes and tid SF is profiled (-o 1) ; Scan of tid SF and tes is profiled (-o 2) ; 2D scan of tes and tid SF (-o 3) \\\n ; Scan of tid SF, tid SF and tes of other regions are profiled POIs (-o 4); Scan of tes, tid SF and tes of other regions are profiled POIs(-o 5)\\\n ; 2D scan of tes and tid SF and tes of other regions are profiled POIs (-o 6) \")\n parser.add_argument('-cmm', '--config_mumu', dest='config_mumu', type=str, default='None', action='store', help=\"set config file containing sample & fit setup\")\n\n args = parser.parse_args()\n\n main(args)\n print(\">>>\\n>>> done\\n\")\n", "repo_name": "cms-tau-pog/TauFW", "sub_path": "Fitter/TauES_ID/makecombinedfitTES_SF.py", "file_name": "makecombinedfitTES_SF.py", "file_ext": "py", "file_size_in_byte": 15033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.system", "line_number": 24, "usage_type": "call"}, {"api_name": "distutils.command.config.config", "line_number": 24, "usage_type": "name"}, {"api_name": "os.system", "line_number": 29, "usage_type": "call"}, {"api_name": "distutils.filelist", "line_number": 38, "usage_type": "name"}, {"api_name": "distutils.filelist", "line_number": 42, "usage_type": "name"}, {"api_name": "os.system", "line_number": 43, "usage_type": "call"}, {"api_name": "distutils.filelist", "line_number": 43, "usage_type": "name"}, {"api_name": "distutils.filelist", "line_number": 48, "usage_type": "name"}, {"api_name": "os.system", "line_number": 50, "usage_type": "call"}, {"api_name": "distutils.filelist", "line_number": 50, "usage_type": "name"}, {"api_name": "distutils.filelist", "line_number": 60, "usage_type": "name"}, {"api_name": "distutils.filelist", "line_number": 62, "usage_type": "name"}, {"api_name": "distutils.filelist", "line_number": 63, "usage_type": "argument"}, {"api_name": "os.system", "line_number": 66, "usage_type": "call"}, {"api_name": "distutils.filelist", "line_number": 66, "usage_type": "name"}, {"api_name": "os.system", "line_number": 90, "usage_type": "call"}, {"api_name": "os.system", "line_number": 115, "usage_type": "call"}, {"api_name": "os.system", "line_number": 129, "usage_type": "call"}, {"api_name": "os.system", "line_number": 138, "usage_type": "call"}, {"api_name": "os.system", "line_number": 147, "usage_type": "call"}, {"api_name": "os.system", "line_number": 155, "usage_type": "call"}, {"api_name": "os.system", "line_number": 162, "usage_type": "call"}, {"api_name": "os.system", "line_number": 173, "usage_type": "call"}, {"api_name": "os.system", "line_number": 178, "usage_type": "call"}, {"api_name": "distutils.command.config.config", "line_number": 185, "usage_type": "name"}, {"api_name": "os.system", "line_number": 190, "usage_type": "call"}, {"api_name": "distutils.command.config.config", "line_number": 190, "usage_type": "name"}, {"api_name": "os.system", "line_number": 195, "usage_type": "call"}, {"api_name": "distutils.command.config.config", "line_number": 195, "usage_type": "name"}, {"api_name": "distutils.command.config.config", "line_number": 206, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 214, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 219, "usage_type": "call"}, {"api_name": "distutils.command.config.config", "line_number": 224, "usage_type": "name"}, {"api_name": "distutils.command.config.config", "line_number": 231, "usage_type": "name"}, {"api_name": "distutils.command.config.config", "line_number": 234, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 240, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 241, "usage_type": "call"}]} +{"seq_id": "16294656476", "text": "import plotly.graph_objs as go\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.express as px\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\ndf = pd.read_csv('../data/iris.csv')\nfig = px.scatter(df, x='SepalWidth', y='SepalLength', color='Name')\n### anywhere before app.layout create your figure\napp.layout = html.Div(children=[\n html.H1(children='Hello Dash'),\n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n dcc.Graph(\n id='example-graph',\n figure = fig ### this is the figure you created earlier\n )\n])\nif __name__ == '__main__':\n app.run_server(debug=True)\n", "repo_name": "dmlunde/ATX_Flex_GA", "sub_path": "08-week/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "dash.Dash", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 10, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 10, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 12, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 13, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 14, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "33330456072", "text": "from django.shortcuts import get_object_or_404, render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views import View\nfrom django.urls import reverse_lazy, reverse\nfrom .models import Account\nfrom .forms import (\n AccountForm, \n RegisterForm, \n LoginForm, \n UpdateUserForm, \n UpdateAccountForm\n )\nfrom django.contrib.auth.models import User\n\n\n# viewing account\nclass AccountView(LoginRequiredMixin, View):\n model = Account\n template = 'accounts/account.html'\n\n def get(self, request, username):\n form = AccountForm(instance=request.user)\n ctx = {'form': form}\n return render(request, self.template, ctx)\n\n\n# creating new account (registration)\nclass AccountCreate(View):\n model = User\n template = 'accounts/register.html'\n success_url = reverse_lazy('success_page')\n\n def get(self, request):\n form = RegisterForm()\n ctx = {'form': form}\n return render(request, self.template, ctx)\n\n def post(self, request):\n form = RegisterForm(request.POST)\n if not form.is_valid():\n ctx = {'form': form}\n return render(request, self.template, ctx)\n\n form.save()\n # ctx= {'process':'registered'}\n request.session['process'] = 'registered to'\n return redirect(self.success_url)\n\n\n# logging into existing account\nclass AccountLogin(View):\n template = 'accounts/login.html'\n success_url = reverse_lazy('success_page')\n\n def get(self, request):\n form = LoginForm()\n ctx = {'form': form}\n return render(request, self.template, ctx)\n\n def post(self, request):\n form = LoginForm(request.POST)\n\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n\n else:\n messages.success(request, (\"There was an error logging in, try again\"))\n ctx = {'form': form}\n return render(request, self.template, ctx)\n\n # ctx = {'process':'logged in'}\n request.session['process'] = 'logged in'\n return redirect(self.success_url)\n\n\n# logging out from account\nclass AccountLogout(View):\n success_url = reverse_lazy('success_page')\n\n def get(self, request):\n logout(request)\n\n # ctx = {'process':'logged out'}\n request.session['process'] = 'logged out from'\n return redirect(self.success_url)\n\n\n# updating account\nclass AccountUpdate(LoginRequiredMixin, View):\n template = 'accounts/update.html'\n account_template = 'accounts/account.html'\n success_url = reverse_lazy('success_page')\n\n def get(self, request, username):\n user_form = UpdateUserForm()\n account_form = UpdateAccountForm() \n \n ctx = {\n 'user_form': user_form,\n 'account_form': account_form\n }\n return render(request, self.template, ctx)\n\n def post(self, request, username):\n # print(request.user, request.user.account)\n user_form = UpdateUserForm(request.POST, instance=request.user)\n account_form = UpdateAccountForm(request.POST, request.FILES, instance=request.user.account) \n\n if not user_form.is_valid() and not account_form.is_valid():\n user_form = UpdateUserForm(instance=request.user)\n account_form = UpdateAccountForm(instance=request.user.account)\n\n return render(request, self.template, {'user_form': user_form, 'account_form': account_form})\n \n user_form.save()\n account_form.save()\n\n request.session['process'] = 'updated at'\n return redirect(self.success_url)\n\n\n# deleting account\nclass AccountDelete(LoginRequiredMixin, View):\n template = 'accounts/delete.html'\n success_url = reverse_lazy('success_page')\n\n def get(self, request, username):\n ctx = {}\n return render(request, self.template, ctx)\n\n def post(self, request, username):\n u = User.objects.get(username=username)\n u.delete()\n \n request.session['process'] = 'deleted from'\n return redirect(self.success_url) \n\n\n", "repo_name": "TamaraSavadyan/games-site-", "sub_path": "games/accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 20, "usage_type": "name"}, {"api_name": "models.Account", "line_number": 21, "usage_type": "name"}, {"api_name": "forms.AccountForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 31, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 34, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 54, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 56, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 84, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 96, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 96, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 99, "usage_type": "call"}, {"api_name": "forms.UpdateUserForm", "line_number": 102, "usage_type": "call"}, {"api_name": "forms.UpdateAccountForm", "line_number": 103, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 109, "usage_type": "call"}, {"api_name": "forms.UpdateUserForm", "line_number": 113, "usage_type": "call"}, {"api_name": "forms.UpdateAccountForm", "line_number": 114, "usage_type": "call"}, {"api_name": "forms.UpdateUserForm", "line_number": 117, "usage_type": "call"}, {"api_name": "forms.UpdateAccountForm", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 120, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 126, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 130, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 130, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 132, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 139, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 139, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "25151449201", "text": "\nimport csv\nimport os.path\nimport pandas as pd\nfrom typing import List , Dict\nfrom binance import Client\napi_key = os.environ['BINANCE_API_KEY_TEST']\napi_secret = os.environ['BINANCE_API_SECRET_TEST']\nclient = Client(api_key , api_secret)\n\n# coins list contains some of the well known crypto pairs\ncoins = [\"BTC\" , \"ETH\" , \"BNB\" , \"DOG\" ,\"SOL\" , \"XRP\"]\npairs = [\"BTCUSD\" , \"BNBUSD\" , \"ETHUSD\" , \"DOGBNB\"]\nclass Binance_Account:\n\n def __init__(self , account , balance):\n self.account = account\n self.balance = balance\n # account: should your verified binance accound\n def account_info(self , account):\n account = client.get_account()\n btc_balance = client.get_asset_balance(\"BTC\")\n eth_balance = client.get_asset_balance(\"ETH\")\n acc_status = client.get_account_status()\n status_data = pd.DataFrame(acc_status)\n status_data.head()\n #safe it to csv file\n with open(\"data.csv\" , newline='') as file:\n write = csv.writer(file , acc_status=acc_status)\n write .writerow()\n write.writerows(acc_status)\n for keys , values in acc_status.items():\n print(f\"{keys} : {values} \")\n def orders_info(self , pairs:List[str]) -> Dict:\n for pair in pairs:\n orders = client.get_all_orders(pair)\n open = client.get_open_orders(pair)\n\n def transaction_history(self):\n deposits = client.get_deposit_history()\n withdraws = client.get_withdraw_history()\n\n for coin in coins:\n trans_address = client.get_asset_balance(coin)\n if trans_address is None or trans_address == 0:\n print(\"you have no balance in the aforementioned coin\")\n pass\n else:\n print(trans_address)\ndef main():\n binance = Binance_Account()\n binance.account_info()\n binance.transaction_history()\nif __name__ == '__main__':\n main()\n", "repo_name": "ayaanlehashi11/BinancePortfolioinfo", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.path.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 7, "usage_type": "name"}, {"api_name": "os.path.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 8, "usage_type": "name"}, {"api_name": "binance.Client", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 34, "usage_type": "name"}, {"api_name": "binance.account_info", "line_number": 52, "usage_type": "call"}, {"api_name": "binance.transaction_history", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "23340188810", "text": "import copy\nfrom typing import Callable, List\n\nimport gymnasium as gym\nimport numpy as np\n\n\ndef ascii_version_of_card(ranks, suits, return_string=True):\n \"\"\"Instead of a boring text version of the card we render an ASCII image of\n the card.\n\n :param cards: One or more card objects\n :param return_string: By default we return the string version\n of the card, but the dealer hide the 1st card and we\n keep it as a list so that the dealer can add a hidden card in front of the list\n \"\"\"\n # we will use this to prints the appropriate icons for each card\n suits_name = [\"s\", \"d\", \"h\", \"c\"]\n suits_symbols = [\"♠\", \"♦\", \"♥\", \"♣\"]\n\n # create an empty list of list, each sublist is a line\n lines = [[] for i in range(9)]\n\n for s, r in zip(suits, ranks):\n # \"King\" should be \"K\" and \"10\" should still be \"10\"\n if r == \"10\": # ten is the only one who's rank is 2 char long\n rank = r\n space = \"\" # if we write \"10\" on the card that line will be 1 char to long\n else:\n rank = r\n space = \" \" # no \"10\", we use a blank space to will the void\n # get the cards suit in two steps\n suit = suits_name.index(s)\n suit = suits_symbols[suit]\n\n # add the individual card on a line by line basis\n lines[0].append(\"┌─────────┐\")\n lines[1].append(\n \"│{}{} │\".format(rank, space)\n ) # use two {} one for char, one for space or char\n lines[2].append(\"│ │\")\n lines[3].append(\"│ │\")\n lines[4].append(\"│ {} │\".format(suit))\n lines[5].append(\"│ │\")\n lines[6].append(\"│ │\")\n lines[7].append(\"│ {}{}│\".format(space, rank))\n lines[8].append(\"└─────────┘\")\n\n result = []\n for index, line in enumerate(lines):\n result.append(\"\".join(lines[index]))\n\n # hidden cards do not use string\n if return_string:\n return \"\\n\".join(result)\n else:\n return result\n\n\nclass DeckEmptyError(Exception):\n pass\n\n\nRANKS = np.array([\"a\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"j\", \"q\", \"k\"])\nSUITS = np.array([\"s\", \"d\", \"c\", \"h\"])\nSUITS_UNICODE = [\"♠\", \"♦\", \"♥\", \"♣\"]\nCOLORS = np.array([\"b\", \"r\"])\nDECK_SIZE = 52\n\n\nclass Deck:\n \"\"\"An object that represents a collection of cards.\n\n A deck can represent a single deck or multiple decks\n \"\"\"\n\n def get_obs_space(self, fields=[\"colors\", \"suits\", \"ranks\"], hand_size=1):\n space = []\n for f in fields:\n if f == \"colors\":\n space.append(COLORS.size)\n elif f == \"suits\":\n space.append(SUITS.size)\n elif f == \"ranks\":\n space.append(RANKS.size)\n else:\n raise Exception(f\"Invalid field: {f}\")\n space = np.tile(np.array(space), hand_size)\n if len(space) == 1:\n return gym.spaces.Discrete(space[0])\n return gym.spaces.MultiDiscrete(space)\n\n def __init__(self, num_decks=1, shuffle=False):\n self.num_decks = num_decks\n self.num_cards = DECK_SIZE * num_decks\n self.idx = np.arange(self.num_cards)\n\n self.ranks = np.tile(RANKS.repeat(SUITS.size), num_decks)\n self.ranks_idx = np.tile(np.arange(RANKS.size).repeat(SUITS.size), num_decks)\n\n self.suits = np.tile(np.tile(SUITS, RANKS.size), num_decks)\n self.suits_idx = np.tile(np.tile(np.arange(SUITS.size), RANKS.size), num_decks)\n\n self.colors = np.tile(COLORS, self.num_cards // 2)\n self.colors_idx = np.tile(np.arange(COLORS.size), self.num_cards // 2)\n self.hands = {}\n # The length of the deck, which decreases as cards\n # are drawn/dealt\n self.deck_len = self.num_cards\n if shuffle:\n # For determinism, we should always shuffle from the original input\n self.idx = np.arange(self.num_cards)\n np.random.shuffle(self.idx)\n\n assert self.idx.size == self.num_cards\n assert self.ranks.size == self.num_cards\n assert self.suits.size == self.num_cards\n assert self.colors.size == self.num_cards\n assert self.ranks_idx.size == self.num_cards\n assert self.suits_idx.size == self.num_cards\n assert self.colors_idx.size == self.num_cards\n\n self.keys = [\"idx\", \"ranks\", \"suits\", \"colors\"]\n self.idx_keys = [\"ranks_idx\", \"suits_idx\", \"colors_idx\", \"idx\"]\n\n def define_hand_value(\n self, fn: Callable[[List[str]], int], fields: List[str]\n ) -> None:\n \"\"\"Pass in a function to be used to define the value of a hand\n or set of cards\"\"\"\n self.value_fn = fn\n for f in fields:\n assert f in self.keys, f\"{f} is not {self.keys}\"\n self.value_fn_args = fields\n\n def clone(self) -> \"Deck\":\n return copy.deepcopy(self)\n\n def value(self, player: str) -> int:\n \"\"\"Returns the value of a players hand by calling the function passed\n to define_hand_value\"\"\"\n assert hasattr(\n self, \"value_fn\"\n ), \"Must specify a value fn using define_hand_value first!\"\n\n args = []\n for arg in self.value_fn_args:\n idx = self.hands[player]\n args.append(getattr(self, arg)[idx])\n\n # Numpy will attempt to unpack an ndarray\n # if it is the only element in a list\n # when using a starred expression\n # i.e. len([np.array([1,2,3]])) == 3\n if len(args) == 1:\n return self.value_fn(args[0])\n else:\n return self.value_fn(*args)\n\n def value_idx(self, idx: List[int]) -> int:\n \"\"\"Returns the value of a selection of cards\"\"\"\n assert hasattr(\n self, \"value_fn\"\n ), \"Must specify a value fn using define_hand_value first!\"\n args = []\n for arg in self.value_fn_args:\n args.append(getattr(self, arg)[idx])\n\n return self.value_fn(*args)\n\n def __len__(self):\n return self.deck_len\n\n def __getitem__(self, item):\n return self.hands[item]\n\n def add_players(self, *players: List[str]) -> None:\n for player in players:\n self.hands[player] = []\n\n def deal(self, player: str, num_cards: int = 1) -> None:\n \"\"\"Deals a number of cards to the specified player\n from the deck\"\"\"\n new_len = self.deck_len - num_cards\n if new_len < 0:\n raise DeckEmptyError()\n\n self.hands[player] += self.idx[new_len : self.deck_len].tolist()\n self.deck_len = new_len\n\n def discard_hands(self, *players: List[str]):\n \"\"\"Discards the cards in the hand of a player. Note that\n these cards do not go back into the deck. Call reset()\n to fold the hands back into the deck\"\"\"\n for player in players:\n self.hands[player].clear()\n\n def discard_all(self):\n \"\"\"Discards the cards in all player hands. Note that\n these cards do not go back into the deck. Call reset()\n to fold the hands back into the deck\"\"\"\n for player in self.hands:\n self.hands[player].clear()\n\n def discard(self, player: str, hand_idx: int):\n \"\"\"Discards one card in the players hand at the specified idx.\n Note this idx refers to the idx of the card in the hand, rather\n than the idx of the card in the deck\"\"\"\n self.hands[player].pop(hand_idx)\n\n def reset(self, shuffle=True, rng=None):\n \"\"\"Empties the hands of all players and places cards\n back into the deck in their original position. Optionally\n shuffles the deck afterwards\"\"\"\n for hands in self.hands.values():\n hands.clear()\n self.deck_len = self.num_cards\n if shuffle:\n self.idx = np.arange(self.num_cards)\n if rng is None:\n np.random.shuffle(self.idx)\n else:\n self.rng = rng\n rng.shuffle(self.idx)\n\n def show(\n self, player: str, fields: List[str] = [\"colors\", \"suits\", \"ranks\"], pad_to=None\n ) -> List[np.ndarray]:\n \"\"\"Shows the hand of the player, returning the fields specified of the cards\n they hold. Optionally zero-pad to a size.\"\"\"\n reprs = []\n if pad_to is not None:\n padding = [0] * (pad_to - len(self.hands[player]))\n else:\n padding = []\n hand_idx = np.array(self.hands[player] + padding, dtype=np.int64)\n for f in fields:\n assert f in [*self.idx_keys, *self.keys], f\"{f} is not a valid key\"\n arr = getattr(self, f)\n # Special case, do not double index indices\n if f == \"idx\":\n reprs.append(np.array(hand_idx))\n else:\n # Requires indexing\n reprs.append(arr[hand_idx])\n\n return np.stack(reprs)\n\n def hand_size(self, player: str) -> int:\n return len(self.hands[player])\n\n def visualize(self, player: str) -> str:\n \"\"\"Returns a string visualization of a player's hand, for printing\n to the terminal\"\"\"\n if len(self[player]) == 0:\n return \"\\n\".join([\"\"] * 10)\n ranks, suits = self.show(player, [\"ranks\", \"suits\"])\n return ascii_version_of_card(ranks, suits)\n\n def visualize_idx(self, idx: List[int]) -> str:\n \"\"\"Returns a string visualization of the following idx,\n referring to cards in the hand or deck\"\"\"\n if len(idx) == 0:\n return \"\\n\".join([\"\"] * 10)\n suits = self.suits[idx]\n ranks = self.ranks[idx]\n return ascii_version_of_card(ranks, suits)\n", "repo_name": "proroklab/popgym", "sub_path": "popgym/core/deck.py", "file_name": "deck.py", "file_ext": "py", "file_size_in_byte": 9714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 110, "dataset": "github-code", "pt": "21", "api": [{"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "gymnasium.spaces.Discrete", "line_number": 90, "usage_type": "call"}, {"api_name": "gymnasium.spaces", "line_number": 90, "usage_type": "attribute"}, {"api_name": "gymnasium.spaces.MultiDiscrete", "line_number": 91, "usage_type": "call"}, {"api_name": "gymnasium.spaces", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 127, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 137, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 160, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 177, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 191, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 221, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 227, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 236, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 247, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 228, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 260, "usage_type": "name"}]} +{"seq_id": "175980109", "text": "from torch.utils.mobile_optimizer import optimize_for_mobile\nimport torchvision\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport time\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Subset\nfrom PIL import Image\nfrom utils import evaluate_model, create_classification_report\n\ntrain_loader, test_loader = torch.load('./data/trainloader.pth'), torch.load('./data/testloader.pth')\ncuda_device = torch.device(\"cuda:0\")\n\nimg = Image.open(\"paronychia.jpg\")\n\ntrain_transforms = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor(),\n ])\nex = train_transforms(img=img)\nex = ex.unsqueeze(0)\nprint(ex.shape)\n\n# for i,elem in enumerate(ex[0][1]):\n# print(i,\": \",elem[0])\n##########################################\n# pruned_resnet18 = torchvision.models.mobilenet_v2(pretrained=True)\n#\n# pruned_resnet18.fc = nn.Sequential(nn.Linear(512, 256),\n# nn.ReLU(),\n# nn.Dropout(0.2),\n# nn.Linear(256, 23),\n# nn.LogSoftmax(dim=1))\n# pruned_resnet18.load_state_dict(torch.load('best_mobilenet(2).pth'))\n# pruned_resnet18.eval()\n#\n# start_time = time.time()\n# _, eval_accuracy = evaluate_model(model=pruned_resnet18,\n# test_loader=test_loader,\n# device=cuda_device,\n# criterion=None)\n# elapsed_time = time.time()-start_time\n#\n#\n# print(\"MobileNet\")\n# print(\"Time Spent Testing: \",elapsed_time)\n# print(\"Test Accuracy: {:.3f}\".format(eval_accuracy))\n\n##########################################\npruned_resnet18 = torchvision.models.resnet18(pretrained=True)\n\npruned_resnet18.fc = nn.Sequential(nn.Linear(512, 256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, 23),\n nn.LogSoftmax(dim=1))\npruned_resnet18.load_state_dict(torch.load('./saved_model/resnet18_dermnet.pt'))\npruned_resnet18.eval()\n\nstart_time = time.time()\n_, eval_accuracy = evaluate_model(model=pruned_resnet18,\n test_loader=test_loader,\n device=cuda_device,\n criterion=None)\nelapsed_time = time.time()-start_time\n\nprint(\"Pruned ResNet18\")\nprint(\"Time Spent Testing: \",elapsed_time)\nprint(\"Test Accuracy: {:.3f}\".format(eval_accuracy))\n\n##########################################\npruned_resnet18 = torchvision.models.resnet18(pretrained=True)\n\npruned_resnet18.fc = nn.Sequential(nn.Linear(512, 256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, 23),\n nn.LogSoftmax(dim=1))\npruned_resnet18.load_state_dict(torch.load('best_resnet18_1.pth'))\npruned_resnet18.eval()\n\nstart_time = time.time()\n_, eval_accuracy = evaluate_model(model=pruned_resnet18,\n test_loader=test_loader,\n device=cuda_device,\n criterion=None)\nelapsed_time = time.time()-start_time\n\nprint(\"ResNet18\")\nprint(\"Time Spent Testing: \",elapsed_time)\nprint(\"Test Accuracy: {:.3f}\".format(eval_accuracy))\n\n##########################################\nimport gc\ngc.collect()\ntorch.cuda.empty_cache()\n\npruned_resnet18 = torchvision.models.resnet50(pretrained=True)\n\npruned_resnet18.fc = nn.Sequential(nn.Linear(2048, 256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, 23),\n nn.LogSoftmax(dim=1))\npruned_resnet18.load_state_dict(torch.load('best_resnet50.pth'))\npruned_resnet18.eval()\n\nstart_time = time.time()\n_, eval_accuracy = evaluate_model(model=pruned_resnet18,\n test_loader=test_loader,\n device=cuda_device,\n criterion=None)\nelapsed_time = time.time()-start_time\n\nprint(\"ResNet50\")\nprint(\"Time Spent Testing: \",elapsed_time)\nprint(\"Test Accuracy: {:.3f}\".format(eval_accuracy))\n\n\n\n\n\n# model.fc = nn.Sequential(nn.Linear(2048, 512),\n# nn.ReLU(),\n# nn.Dropout(0.2),\n# nn.Linear(512, 23),\n# nn.LogSoftmax(dim=1))\n\n# res = model(ex)\n# print(res)\n# pred = torch.argmax(res)\n# print(pred)\n# class_list = ['Acne and Rosacea Photos', 'Actinic Keratosis Basal Cell Carcinoma and other Malignant Lesions', 'Atopic Dermatitis Photos', 'Bullous Disease Photos', 'Cellulitis Impetigo and other Bacterial Infections', 'Eczema Photos', 'Exanthems and Drug Eruptions', 'Hair Loss Photos Alopecia and other Hair Diseases', 'Herpes HPV and other STDs Photos', 'Light Diseases and Disorders of Pigmentation', 'Lupus and other Connective Tissue diseases', 'Melanoma Skin Cancer Nevi and Moles', 'Nail Fungus and other Nail Disease', 'Poison Ivy Photos and other Contact Dermatitis', 'Psoriasis pictures Lichen Planus and related diseases', 'Scabies Lyme Disease and other Infestations and Bites', 'Seborrheic Keratoses and other Benign Tumors', 'Systemic Disease', 'Tinea Ringworm Candidiasis and other Fungal Infections', 'Urticaria Hives', 'Vascular Tumors', 'Vasculitis Photos', 'Warts Molluscum and other Viral Infections']\n# print(class_list[10])\n\n", "repo_name": "hyrumk/CS492_model_training", "sub_path": "practice.py", "file_name": "practice.py", "file_ext": "py", "file_size_in_byte": 5539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "torch.load", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.models.resnet18", "line_number": 53, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 60, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.evaluate_model", "line_number": 64, "usage_type": "call"}, {"api_name": "time.time", "line_number": 68, "usage_type": "call"}, {"api_name": "torchvision.models.resnet18", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.evaluate_model", "line_number": 86, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torchvision.models.resnet50", "line_number": 101, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 101, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 108, "usage_type": "call"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.evaluate_model", "line_number": 112, "usage_type": "call"}, {"api_name": "time.time", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "31748483385", "text": "from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.insert(1,'..') # allow parent modules to be imported\nsys.path.insert(1,'../..') # allow parent modules to be imported\nsys.path.insert(1,'../../..') # allow parent modules to be imported\nimport time\nimport params\nfrom misc.utils import generate_OUinput, x_filter, get_changing_input, interpolate_input\nimport models.brian2.network_sim as net\nimport models.fp.fokker_planck_model as fp\nimport models.ln_exp.ln_exp_model as lnexp\nimport models.ln_dos.ln_dos_model as lndos\nimport models.ln_bexdos.ln_bexdos_model as lnbexdos\nimport models.spec1.spec1_model as s1\nimport models.spec2.spec2_model as s2\nimport models.spec2_red.spec2_red_model as s2_red\n\n# use the following in IPython for qt plots: %matplotlib qt\n\n\n# what will be computed\n\n# network simulation\nrun_network = True\n# full fokker planck model\nrun_fp = True\n\n# reduced models\n# ln cascade\nrun_ln_exp = True\nrun_ln_dos= True\nrun_ln_bexdos = False \n\n# spectral\nrun_spec1 = True\nrun_spec2 = True\nrun_spec2_red = True\n\n\n# use as default the parameters from file params.py\n# if not specified else below\nparams = params.get_params()\n\n# runtime options\n# run simulation of uncoupled (rec=False) or recurrently coupled simulation (rec=True)\nrec = True\n\nparams['runtime'] = 3000.\n# number of neurons\nparams['N_total'] = 4000 #50000\n# time steps for models\nparams['uni_dt'] = 0.01 # [ms]\nparams['fp_dt'] = 0.05\nparams['net_dt'] = 0.05\n\n\n\n\n\n# coupling (and delay) params in the case of recurrency, i.e. rec = True\nparams['K'] = 100\nparams['J'] = 0.05\nparams['delay_type'] = 2\nparams['taud'] = 3.\nparams['const_delay'] = 5.\n\n\n# adaptation params as scalars\nparams['a'] = 4.\nparams['b'] = 40.\n\n\n# [only for reduced models] switch between two different time integration schemes: (1) Euler, (2) Heun\nparams['uni_int_order'] = 2\n\n\n# for generating the input; for all models which do\n# not have the same resolution we have to interpolate\nparams['min_dt'] = min(params['uni_dt'], params['net_dt'],params['fp_dt'])\n\n\nln_data = 'quantities_cascade.h5'\nspec_data = 'quantities_spectral.h5'\nparams['t_ref'] = 0.0\n\n# plotting section\nplot_rates = True\nplot_input = True\n\nplot_adapt = True and (params['a'] > 0 or params['b'] > 0)\n\n# external input mean\n# for the external input mean and the standard deviation any type of input may be defined, such as constant, step, ramp\n\ninput_mean = 'steps' # similar to Fig1 of manuscript\n# input_mean = 'osc'\n# input_mean = 'const'\n# input_mean = 'OU'\n# input_mean = 'ramp'\n\n# filter input mean (necessary for spectral_2m model)\nfilter_mean = True\n\n#input_std = 'const'\n#input_std = 'step'\n#input_std = 'OU'\ninput_std = 'ramp'\nfilter_std = True\n\n# external time trace used for generating input and plotting\n# if time step is unequal to model_dt input gets interpolated for\n# the respective model\nsteps = int(params['runtime']/params['min_dt'])\nt_ext = np.linspace(0., params['runtime'], steps+1)\n\n# time trace computed with min_dt\nparams['t_ext'] = t_ext\n\n# for filter testing set seed\n# np.random.seed(3)\n\n# mu_ext variants\nif input_mean == 'const':\n mu_ext = np.ones(steps+1) * 4.0\n\n# mu = OU process, sigma = const\nelif input_mean == 'OU':\n params['ou_X0'] = 0.\n params['ou_mean'] = 6.0\n params['ou_sigma'] = .5\n params['ou_tau'] = 50.\n mu_ext = generate_OUinput(params)\n\n# oscillating input\nelif input_mean == 'osc':\n freq = 0.005 #kHz\n amp = 0.1 #mV/ms\n offset = 0.5 #mV/ms\n mu_ext = offset*np.ones(len(t_ext)) + amp*np.sin(2*np.pi*freq*t_ext)\n\n# input is ramped over a certain time interval from mu_start to mu_end\nelif input_mean == 'ramp':\n # define parameters for input\n ramp_start = 500.\n assert ramp_start < params['runtime']\n ramp_duration = 30.\n mu_start = 2.\n mu_end = 4.\n mu_ext = get_changing_input(params['runtime'],\n ramp_start,params['min_dt'],mu_start,\n mu_end,duration_change=ramp_duration)\n\n# step input scenario for mean input\nelif input_mean == 'steps':\n # vals for steps\n vals = [1, 1, 1, 1, 1, 1.7,\n 1.3,2.7, 2.4, 3.5,\n 3,3.4, 4.1, 3.7, 3.5,\n 2.5,3,3.5, 2, 2.5]\n\n params['vals'] = vals\n params['duration_vals'] = 150.\n\n\n def step_plateaus_up_down(params):\n steps = int(params['runtime']/params['min_dt'])\n trace = np.zeros(steps+1)\n val_idx = int(params['duration_vals']/params['min_dt'])\n assert params['runtime']%params['duration_vals']==0\n assert len(vals)*params['duration_vals'] == params['runtime']\n for i in xrange(len(params['vals'])):\n trace[i*val_idx:i*val_idx+val_idx] = params['vals'][i]\n return trace\n\n mu_ext=step_plateaus_up_down(params)\n\n\n# sigma_ext variants\nif input_std == 'const':\n sigma_ext = np.ones(steps+1) * 2.\n\n\nelif input_std == 'step':\n sigma_ext = np.ones(steps+1)* 4.0\n sigma_ext[int(steps/3):int(2*steps/3)] = 3.0\n sigma_ext[int(2*steps/3):] = 1.5\n \n\n# mu = const, sigma = OU process\nelif input_std == 'OU':\n params['ou_X0'] = 0. #only relevant if params['ou_stationary'] = False\n params['ou_mean'] = 3.0\n params['ou_sigma'] = 1.2\n params['ou_tau'] = 1.\n sigma_ext = generate_OUinput(params)\n\nelif input_std == 'ramp':\n # define parameters for input\n ramp_start = 1500.\n assert ramp_start < params['runtime']\n ramp_duration = 100.\n sigma_start = 3.5\n sigma_end = 1.5\n sigma_ext = get_changing_input(params['runtime'],ramp_start, params['min_dt'],sigma_start,\n sigma_end,duration_change=ramp_duration)\nelse:\n raise NotImplementedError\n\n\n# enforce in any case sufficiently large input\nmu_min = -1.0\nmu_ext[mu_ext < mu_min] = mu_min - (mu_ext[mu_ext < mu_min] - mu_min)\nmu_max = 5.\nmu_ext[mu_ext > mu_max] = mu_max - (mu_ext[mu_ext > mu_max] - mu_max)\nsigma_min = 0.5\nsigma_ext[sigma_ext < sigma_min] = sigma_min - (sigma_ext[sigma_ext < sigma_min] - sigma_min)\nsigma_max = 5.\nsigma_ext[sigma_ext > sigma_max] = sigma_max - (sigma_ext[sigma_ext > sigma_max] - sigma_max)\n\n# filter the input in order to have not sharp edges \n# filter params\nparams['filter_type'] = 'gauss'\n# filter width in time domain ~ 6*filter_gauss_sigma\n# -> keep that in mind for resolution issues\n\nparams['filter_gauss_sigma'] = 1. #1 for ramps, 0.1-0.5 for OU\nif filter_mean:\n mu_ext_orig = mu_ext\n mu_ext = x_filter(mu_ext_orig, params)\nif filter_std:\n sigma_ext_orig = sigma_ext\n sigma_ext = x_filter(sigma_ext_orig, params)\n\n# collect ext input for model wrappers\next_input0 = [mu_ext, sigma_ext]\n\n# saving results in global results dict\nresults = dict()\nresults['input_mean'] = mu_ext\nresults['input_sigma']= sigma_ext\nresults['model_results'] = dict()\n\n\n\nprint('\\nModels run in {} mode.\\n'.format('recurrent' if rec else 'feedforward'))\n\n# brian network sim\nif run_network:\n ext_input = interpolate_input(ext_input0,params,'net')\n results['model_results']['net'] = \\\n net.network_sim(ext_input, params, rec = rec)\n\n\n#fokker planck equation solved using the Scharfetter-Gummel-flux approximation \nif run_fp:\n ext_input = interpolate_input(ext_input0, params, 'fp')\n results['model_results']['fp'] = \\\n fp.sim_fp_sg(ext_input, params, rec=rec)\n\n#reduced models\n\n# models based on a linear-nonlinear cascade\nif run_ln_exp:\n ext_input = interpolate_input(ext_input0, params, 'reduced')\n results['model_results']['ln_exp'] = \\\n lnexp.run_ln_exp(ext_input, params, ln_data,\n rec_vars= params['rec_lne'], rec= rec)\n\nif run_ln_dos:\n ext_input = interpolate_input(ext_input0, params, 'reduced')\n results['model_results']['ln_dos'] = \\\n lndos.run_ln_dos(ext_input, params,ln_data,\n rec_vars= params['rec_lnd'],\n rec= rec)\n\n# models based on a spectral decomposition of the Fokker-Planck operator\nif run_ln_bexdos:\n ext_input = interpolate_input(ext_input0, params, 'reduced')\n results['model_results']['ln_bexdos'] = \\\n lnbexdos.run_ln_bexdos(ext_input, params,ln_data,\n rec_vars=['wm'], rec = rec)\n\n\nif run_spec1:\n ext_input = interpolate_input(ext_input0, params, 'reduced')\n results['model_results']['spec1'] = \\\n s1.run_spec1(ext_input, params, spec_data,\n rec_vars=params['rec_s1'],\n rec = rec)\n\nif run_spec2:\n ext_input = interpolate_input(ext_input0, params, 'reduced')\n results['model_results']['spec2'] = \\\n s2.run_spec2(ext_input, params, spec_data,\n rec_vars=['wm'],\n rec=rec)\n\nif run_spec2_red:\n ext_input = interpolate_input(ext_input0, params, 'reduced')\n results['model_results']['spec2_red'] = \\\n s2_red.run_spec2_red(ext_input, params, rec_vars=params['rec_sm'],\n rec=rec, filename_h5 = spec_data)\n\n\n\n# plotting section\nnr_p = plot_rates + plot_adapt + plot_input\nfig = plt.figure(); pidx = 1\n\n# plot inputs\nif plot_input:\n ax_mu = fig.add_subplot(nr_p, 1, pidx)\n plt.plot(t_ext, mu_ext_orig, color = 'k', lw=1.5) if filter_mean else 0\n line_mu_final = plt.plot(t_ext, ext_input0[0], color = 'm', lw=1.5, label='$\\mu_\\mathrm{final}$')\n plt.ylabel('$\\mu_{ext}$ [mV/ms]', fontsize=15)\n ax_sig = plt.twinx() \n plt.plot(t_ext, sigma_ext_orig, color = 'g', lw=1.5) if filter_std else 0\n line_sig_final = plt.plot(t_ext, ext_input0[1], color = 'b', lw=1.5, label='$\\sigma_\\mathrm{final}$')\n plt.ylabel('$\\sigma_{ext}$ [$\\sqrt{mV}$/ms]', fontsize=15)\n plt.legend([line_mu_final[0], line_sig_final[0]], \n [line_mu_final[0].get_label(), line_sig_final[0].get_label()])\n pidx +=1\n\n# plot rates\nif plot_rates:\n ax_rate = fig.add_subplot(nr_p, 1, pidx, sharex=ax_mu)\n for model in results['model_results']:\n color = params['color'][model]\n lw = params['lw'][model]\n time = results['model_results'][model]['t']\n rates = results['model_results'][model]['r']\n plt.plot(time, rates, label = model, color = color, lw=lw)\n plt.ylabel('r [Hz]')\n plt.legend()\n pidx += 1\n\n# plot adaptation current\nif plot_adapt:\n ax_adapt = fig.add_subplot(nr_p, 1, pidx, sharex=ax_mu)\n for model in results['model_results']:\n color = params['color'][model]\n lw = params['lw'][model]\n time = results['model_results'][model]['t']\n wm = results['model_results'][model]['wm']\n wm_shape = wm.shape\n time_shape = time.shape\n plt.ylabel(' [pA]')\n plt.plot(time, wm, color = color, lw = lw)\n\n # plot also mean+std/mean-std if net was computed\n if 'net' in results:\n time = results['model_results']['net']['t']\n wm = results['model_results']['net']['wm']\n w_std = results['model_results']['net']['w_std']\n wm_plus = wm + w_std\n wm_minus = wm - w_std\n plt.fill_between(time,wm_minus, wm_plus, color = 'lightpink')\n\n\nif nr_p: plt.show()\n\n\n\n\n\n", "repo_name": "neuromethods/fokker-planck-based-spike-rate-models", "sub_path": "adex_comparison/runmodels.py", "file_name": "runmodels.py", "file_ext": "py", "file_size_in_byte": 11126, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.path.insert", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "params.get_params", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 126, "usage_type": "call"}, {"api_name": "misc.utils.generate_OUinput", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 141, "usage_type": "attribute"}, {"api_name": "misc.utils.get_changing_input", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 186, "usage_type": "call"}, {"api_name": "misc.utils.generate_OUinput", "line_number": 197, "usage_type": "call"}, {"api_name": "misc.utils.get_changing_input", "line_number": 206, "usage_type": "call"}, {"api_name": "misc.utils.x_filter", "line_number": 231, "usage_type": "call"}, {"api_name": "misc.utils.x_filter", "line_number": 234, "usage_type": "call"}, {"api_name": "misc.utils.interpolate_input", "line_number": 251, "usage_type": "call"}, {"api_name": "models.brian2.network_sim.network_sim", "line_number": 253, "usage_type": "call"}, {"api_name": "models.brian2.network_sim", "line_number": 253, "usage_type": "name"}, {"api_name": "misc.utils.interpolate_input", "line_number": 258, "usage_type": "call"}, {"api_name": "models.fp.fokker_planck_model.sim_fp_sg", "line_number": 260, "usage_type": "call"}, {"api_name": "models.fp.fokker_planck_model", "line_number": 260, "usage_type": "name"}, {"api_name": "misc.utils.interpolate_input", "line_number": 266, "usage_type": "call"}, {"api_name": "models.ln_exp.ln_exp_model.run_ln_exp", "line_number": 268, "usage_type": "call"}, {"api_name": "models.ln_exp.ln_exp_model", "line_number": 268, "usage_type": "name"}, {"api_name": "misc.utils.interpolate_input", "line_number": 272, "usage_type": "call"}, {"api_name": "models.ln_dos.ln_dos_model.run_ln_dos", "line_number": 274, "usage_type": "call"}, {"api_name": "models.ln_dos.ln_dos_model", "line_number": 274, "usage_type": "name"}, {"api_name": "misc.utils.interpolate_input", "line_number": 280, "usage_type": "call"}, {"api_name": "models.ln_bexdos.ln_bexdos_model.run_ln_bexdos", "line_number": 282, "usage_type": "call"}, {"api_name": "models.ln_bexdos.ln_bexdos_model", "line_number": 282, "usage_type": "name"}, {"api_name": "misc.utils.interpolate_input", "line_number": 287, "usage_type": "call"}, {"api_name": "models.spec1.spec1_model.run_spec1", "line_number": 289, "usage_type": "call"}, {"api_name": "models.spec1.spec1_model", "line_number": 289, "usage_type": "name"}, {"api_name": "misc.utils.interpolate_input", "line_number": 294, "usage_type": "call"}, {"api_name": "models.spec2.spec2_model.run_spec2", "line_number": 296, "usage_type": "call"}, {"api_name": "models.spec2.spec2_model", "line_number": 296, "usage_type": "name"}, {"api_name": "misc.utils.interpolate_input", "line_number": 301, "usage_type": "call"}, {"api_name": "models.spec2_red.spec2_red_model.run_spec2_red", "line_number": 303, "usage_type": "call"}, {"api_name": "models.spec2_red.spec2_red_model", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.twinx", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 320, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 335, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 335, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "time.shape", "line_number": 348, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 350, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 350, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 362, "usage_type": "name"}]} +{"seq_id": "19450704360", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('phone', models.CharField(blank=True, max_length=10, null=True)),\n ('email', models.EmailField(blank=True, max_length=254, null=True)),\n ('permission_text', models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3, null=True)),\n ('permission_photo', models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3, null=True)),\n ('permission_email', models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3, null=True)),\n ('permission_call', models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3, null=True)),\n ('permission_mail', models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3, null=True)),\n ('permission_facebook', models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], blank=True, max_length=3, null=True)),\n ('contact_pref', models.CharField(choices=[('Facebook', 'FACEBOOK'), ('Email', 'EMAIL'), ('Mail', 'MAIL'), ('Call', 'CALL'), ('Text', 'TEXT')], blank=True, max_length=8, null=True)),\n ('date_updated', models.DateTimeField(blank=True, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Hours',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('date', models.DateField(help_text='Please use the following format: YYYY-MM-DD.')),\n ('hours_num', models.PositiveSmallIntegerField()),\n ('volunteer_activity', models.CharField(choices=[('FOOD_SHELF', 'Food Shelf'), ('ZOOM_GALA', 'ZOOM Gala'), ('OTHER', 'Other')], max_length=15)),\n ],\n ),\n migrations.CreateModel(\n name='Volunteer',\n fields=[\n ('volunteer_id', models.AutoField(serialize=False, primary_key=True)),\n ('first_name', models.CharField(max_length=20)),\n ('last_name', models.CharField(max_length=20)),\n ('start_date', models.DateField(help_text='Please use the following format: YYYY-MM-DD.')),\n ('organization', models.CharField(blank=True, max_length=30, null=True)),\n ],\n ),\n migrations.AddField(\n model_name='hours',\n name='volunteer',\n field=models.ForeignKey(to='zoom_vols.Volunteer'),\n ),\n migrations.AddField(\n model_name='contact',\n name='volunteer',\n field=models.ForeignKey(to='zoom_vols.Volunteer'),\n ),\n ]\n", "repo_name": "5klynna5/zoom_c", "sub_path": "zoom_vols/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 3090, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "47165949384", "text": "import ftplib as FTP\nimport struct\nimport os\nfrom PIL import Image\nimport numpy as np\n\nworkingDir = \".\"\nfor subdir, dirs, files in os.walk(workingDir): \n#fileIn = file(\"test.fits\", \"rb\")\n for filename in files:\n if filename.endswith(\".nms\"): \n nmsfile = open(filename, mode=\"rb\")\n filecontent = nmsfile.read()\n#test = readBin(fileIn, n=16, raw(), size=1, endian = \"little\") \n struct.unpack_from(\"<16x\", filecontent[0:16]) \n#testd = readBin(fileIn, n=2, double(), endian = \"little\")\n struct.unpack_from(\"<2d\", filecontent[16:32]) \n##RawBuff = readBin(fileIn, n=1336, raw(), size=1, endian = \"little\")\n struct.unpack_from(\"<1336x\", filecontent[32:1368]) \n##testY = readBin(fileIn, n=4, integer(), size=2, endian = \"little\")\n pos_array = struct.unpack_from(\"<4h\", filecontent[1368:1376]) \n##widthIm = testY[1];\n widthIm = pos_array[0] \n##heightIm = testY[3];\n heightIm = pos_array[2]\n##testd2 = readBin(fileIn, n=2, double(), endian = \"little\") \n idx= 1376\n num = 2\n size = 8\n struct.unpack_from(\"<2d\", filecontent[idx:idx+num*size]) \n##RawBuff2 = readBin(fileIn, n=2076, raw(), size=1, endian = \"little\")\n idx = idx+num*size\n num = 2076\n size = 1 \n struct.unpack_from(\"<2076x\", filecontent[idx:idx+num*size]) \n##DepthIn = readBin(fileIn, n= widthIm*heightIm, integer(), size=2, signed=FALSE, endian = \"little\")\n idx = idx+num*size\n num = widthIm*heightIm\n size = 2\n dataSz = \"<\"+str(num)+\"H\"\n DepthIn = struct.unpack_from(dataSz, filecontent[idx:idx+num*size])\n##LaserIn = readBin(fileIn, n= widthIm*heightIm, integer(), size=1, signed=FALSE, endian = \"little\")\n idx = idx+num*size\n num = widthIm*heightIm\n size = 1\n dataSz = \"<\"+str(num)+\"B\"\n LaserIn = struct.unpack_from(dataSz, filecontent[idx:idx+num*size])\n a = np.array(LaserIn,dtype=np.uint8)\n array = a.reshape(widthIm, heightIm)\n##writeTIFF(array(as.double(LaserIn)/255,dim = c(512,512,3)),\"laserOut.tif\",8,\"LZW\",reduce=TRUE)\n im = Image.fromarray(array, mode=\"L\")\n im.save(filename.split('.')[0]+'.tiff')\n\n", "repo_name": "leakey-lab/Optical_Tomography_Python", "sub_path": "nmsFileConv.py", "file_name": "nmsFileConv.py", "file_ext": "py", "file_size_in_byte": 2451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "os.walk", "line_number": 8, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 15, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 17, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 19, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 21, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 30, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 35, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 41, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 48, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "18341942936", "text": "import os\nfrom typing import Union\n\nimport requests\n\n\ndef download_aeom_data(\n filepath: Union[str, os.PathLike],\n url: str = \"https://aemo.com.au/aemo/data/nem/priceanddemand/PRICE_AND_DEMAND_202103_NSW1.csv\",\n) -> None:\n headers = {\n \"User-Agent\": (\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\"\n )\n }\n\n r = requests.get(url, headers=headers)\n with open(filepath, \"wb\") as f:\n f.write(r.content)\n", "repo_name": "aws-samples/sagemaker-rl-energy-storage-system", "sub_path": "src/energy_storage_system/utils/_data.py", "file_name": "_data.py", "file_ext": "py", "file_size_in_byte": 542, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.Union", "line_number": 8, "usage_type": "name"}, {"api_name": "os.PathLike", "line_number": 8, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "21290745062", "text": "import pytest\nfrom presidio_analyzer import RecognizerRegistry, AnalyzerEngine\nfrom presidio_analyzer.nlp_engine import NlpArtifacts\nfrom spacy.tokens import Doc\n\nfrom hebsafeharbor.common.city_utils import AMBIGUOUS_CITIES_CONTEXT, AMBIGOUS_BELOW_THRESHOLD_CITIES_LIST, \\\n AMBIGOUS_ABOVE_THRESHOLD_CITIES_LIST\nfrom hebsafeharbor.common.prepositions import LOCATION_PREPOSITIONS\nfrom hebsafeharbor.identifier import HebSpacyNlpEngine\nfrom hebsafeharbor.identifier.signals.hebrew_city_recognizer import AmbiguousHebrewCityRecognizer\n\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\ndef doc_to_nlp_artifact(doc: Doc, language: str = \"he\") -> NlpArtifacts:\n lemmas = [token.lemma_ for token in doc]\n tokens_indices = [token.idx for token in doc]\n entities = list(doc.ents)\n return NlpArtifacts(\n entities=entities,\n tokens=doc,\n tokens_indices=tokens_indices,\n lemmas=lemmas,\n nlp_engine=None,\n language=language,\n )\n\n@pytest.mark.parametrize(\"words,score,entity_type\", [([\"לשכה\", \"עמוקה\", \"תאים\"], 0.2, \"CITY\"),\n ([\"הגיע\", \"לעמוקה\"], 0.2, \"CITY\"),\n ])\ndef test_ambigous_city_simple(he_vocab, score, entity_type, words):\n mock_doc = Doc(he_vocab, words=words)\n nlp_artifacts = doc_to_nlp_artifact(mock_doc)\n ambiguous_cities_set = set(\n AMBIGOUS_BELOW_THRESHOLD_CITIES_LIST).union(\n set(AMBIGOUS_ABOVE_THRESHOLD_CITIES_LIST))\n city_recognizer = AmbiguousHebrewCityRecognizer(\"AmbiguousIsraeliCityRecognizer\", \"CITY\",\n ambiguous_cities_set,\n allowed_prepositions=LOCATION_PREPOSITIONS,\n endorsing_entities=['LOC', 'GPE'],\n context=AMBIGUOUS_CITIES_CONTEXT)\n results = city_recognizer.analyze(text=mock_doc.text,\n entities=[\"CITY\"],\n nlp_artifacts=nlp_artifacts)\n\n assert results[0].score == score\n assert results[0].entity_type == entity_type\n\n@pytest.mark.parametrize(\"text,score,entity_type,supportive_context_word,textual_explanation,case\", [(\"רמות גלוקוז בצום\", 0.2, \"CITY\",\"\",None,\"Test with no enhancement\"),\n (\"הגיע לרמות\", 0.6000000000000001, \"CITY\",\"הגיע\", None,\"Test enhancement with context\"),\n (\"מטופל ממושב אודם\", 0.6000000000000001, \"CITY\",\"\",\"NLP-LOC-enhanced;\", \"Test enhancement with NLP artifacts\"),\n ])\ndef test_ambigous_city_enhanced(he_vocab, score, entity_type, text, supportive_context_word,textual_explanation,case):\n LOGGER.info(case)\n nlp_engine = HebSpacyNlpEngine()\n ambiguous_cities_set = set(\n AMBIGOUS_BELOW_THRESHOLD_CITIES_LIST).union(\n set(AMBIGOUS_ABOVE_THRESHOLD_CITIES_LIST))\n city_recognizer = AmbiguousHebrewCityRecognizer(\"AmbiguousIsraeliCityRecognizer\", \"CITY\",\n ambiguous_cities_set,\n allowed_prepositions=LOCATION_PREPOSITIONS,\n endorsing_entities=['LOC', 'GPE'],\n context=AMBIGUOUS_CITIES_CONTEXT)\n\n registry = RecognizerRegistry()\n registry.add_recognizer(city_recognizer)\n\n analyzer_engine = AnalyzerEngine(registry=registry, nlp_engine=nlp_engine)\n results = analyzer_engine.analyze(text, language=\"he\", return_decision_process=True)\n\n assert results[0].score == score\n assert results[0].entity_type == entity_type\n assert results[0].analysis_explanation.supportive_context_word == supportive_context_word\n assert results[0].analysis_explanation.textual_explanation == textual_explanation\n", "repo_name": "8400TheHealthNetwork/HebSafeHarbor", "sub_path": "tests/test_city_recognizer.py", "file_name": "test_city_recognizer.py", "file_ext": "py", "file_size_in_byte": 4182, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "21", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 16, "usage_type": "name"}, {"api_name": "presidio_analyzer.nlp_engine.NlpArtifacts", "line_number": 20, "usage_type": "call"}, {"api_name": "presidio_analyzer.nlp_engine.NlpArtifacts", "line_number": 16, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc", "line_number": 33, "usage_type": "call"}, {"api_name": "hebsafeharbor.common.city_utils.AMBIGOUS_BELOW_THRESHOLD_CITIES_LIST", "line_number": 36, "usage_type": "argument"}, {"api_name": "hebsafeharbor.common.city_utils.AMBIGOUS_ABOVE_THRESHOLD_CITIES_LIST", "line_number": 37, "usage_type": "argument"}, {"api_name": "hebsafeharbor.identifier.signals.hebrew_city_recognizer.AmbiguousHebrewCityRecognizer", "line_number": 38, "usage_type": "call"}, {"api_name": "hebsafeharbor.common.prepositions.LOCATION_PREPOSITIONS", "line_number": 40, "usage_type": "name"}, {"api_name": "hebsafeharbor.common.city_utils.AMBIGUOUS_CITIES_CONTEXT", "line_number": 42, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 29, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}, {"api_name": "hebsafeharbor.identifier.HebSpacyNlpEngine", "line_number": 56, "usage_type": "call"}, {"api_name": "hebsafeharbor.common.city_utils.AMBIGOUS_BELOW_THRESHOLD_CITIES_LIST", "line_number": 58, "usage_type": "argument"}, {"api_name": "hebsafeharbor.common.city_utils.AMBIGOUS_ABOVE_THRESHOLD_CITIES_LIST", "line_number": 59, "usage_type": "argument"}, {"api_name": "hebsafeharbor.identifier.signals.hebrew_city_recognizer.AmbiguousHebrewCityRecognizer", "line_number": 60, "usage_type": "call"}, {"api_name": "hebsafeharbor.common.prepositions.LOCATION_PREPOSITIONS", "line_number": 62, "usage_type": "name"}, {"api_name": "hebsafeharbor.common.city_utils.AMBIGUOUS_CITIES_CONTEXT", "line_number": 64, "usage_type": "name"}, {"api_name": "presidio_analyzer.RecognizerRegistry", "line_number": 66, "usage_type": "call"}, {"api_name": "presidio_analyzer.AnalyzerEngine", "line_number": 69, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 50, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "70041192374", "text": "import datetime\nimport json\nimport time\nfrom collections import OrderedDict\n\nimport requests\nimport xmltodict\nfrom lxml import html\n\n\nclass Analyzer:\n REQUEST_HEADERS = {\n 'User-Agent': 'Mozilla/5.0',\n }\n\n LCBO_URL = \"http://www.lcbo.com\"\n LCBO_XML_URLS = [\"/product_en.1.xml\", \"/product_en.2.xml\"]\n\n BEER_STORE_URL = \"http://www.thebeerstore.ca\"\n BEER_STORE_SEARCH_SUFFIX = \"/beers/search/beer_type--\"\n BEER_STORE_CATEGORIES = [\"Ale\", \"Lager\", \"Malt\", \"Stout\"]\n\n def get_items(self, get_lcbo=True, get_beer_store=True, use_existing_drinks=True):\n try:\n self.items = json.load(open('drinks.json')) if use_existing_drinks else []\n self.items = list(map(lambda x: Drink(**x), self.items))\n self._dump_items(filename='drinks_1.json')\n except FileNotFoundError:\n self.items = []\n if get_beer_store:\n self._load_beer_store_items()\n if get_lcbo:\n self._load_lcbo_items()\n self._dump_items()\n self._dump_html()\n return self.items\n\n def _dump_items(self, filename='drinks.json'):\n if '_dump_items_counter' not in self.__dict__:\n self.__dump_items_counter = -1\n self.__dump_items_counter += 1\n if self.__dump_items_counter % 20 == 0:\n json.dump([e.to_json() for e in list(self.items)], open(filename, \"w+\"))\n\n def _dump_html(self, filename='index.html'):\n if '__dump_html_counter' not in self.__dict__:\n self.__dump_html_counter = -1\n self.__dump_html_counter += 1\n if self.__dump_html_counter % 100 == 0:\n open(filename, 'w+').write(self.to_html(self.items))\n\n def _get_page(self, url):\n try:\n for j in range(5):\n page = requests.get(url, headers=self.REQUEST_HEADERS)\n if page:\n return page\n time.sleep(1)\n else:\n print(\"No more Connection tries\")\n return None\n except Exception as ex:\n print(ex)\n return None\n\n def _get_lcbo_urls(self, from_file=False, save_links=True):\n if from_file:\n return json.load(open(\"lcbo_links.json\"))\n else:\n products = []\n for xml_url in self.LCBO_XML_URLS:\n xml = requests.get(self.LCBO_URL + xml_url, headers=self.REQUEST_HEADERS)\n results = xmltodict.parse(xml.text)\n products += list(map(dict, results['urlset']['url']))\n if save_links:\n json.dump(products, open(\"lcbo_links.json\", \"w+\"))\n return products\n\n def _load_lcbo_items(self):\n products = self._get_lcbo_urls(from_file=True)\n existing = [e.url for e in list(self.items)]\n for product in products:\n if product['loc'] in existing:\n continue\n print(product['loc'])\n page = self._get_page(product['loc'])\n if not page:\n continue\n\n try:\n drink = Drink.from_lcbo_page(page.text, product['loc'])\n if drink:\n self.items.append(drink)\n self._dump_items()\n self._dump_html()\n except Exception as ex:\n print(ex)\n continue\n\n def _load_beer_store_items(self):\n beers = []\n for beer in self.BEER_STORE_CATEGORIES:\n url = self.BEER_STORE_URL + self.BEER_STORE_SEARCH_SUFFIX + beer\n page = self._get_page(url)\n page = html.fromstring(page.text)\n beers += page.xpath('//a[@class=\"brand-link teaser\"]/@href')\n existing = [e.url for e in list(self.items)]\n for beer in beers:\n url = self.BEER_STORE_URL + beer\n if url in existing:\n continue\n print(url)\n page = self._get_page(url)\n if not page:\n continue\n try:\n\n drink = Drink.from_beer_store_page(page.text, url)\n if drink:\n self.items += drink\n self._dump_items()\n self._dump_html()\n except Exception as ex:\n print(ex)\n continue\n\n @staticmethod\n def load_json_items(filename='drinks.json'):\n items = json.load(open(filename))\n return list(map(lambda x: Drink(**x), items))\n\n @staticmethod\n def to_csv(items, filename='drinks.csv'):\n headers = list(Drink(\"\", \"\", 1, 1, \"\", 1, 1, \"\").to_json().keys())\n file = open(filename, 'w+')\n file.write(','.join(headers) + \"\\n\")\n for item in items:\n data = item.to_json()\n file.write(\",\".join(map(lambda x: str(x).replace(\",\", \".\"), data.values())) + \"\\n\")\n file.close()\n\n @staticmethod\n def to_html(items):\n dump_str = \"\"\n date = \"This list was created created on \" + str(datetime.datetime.now().strftime(\"%Y-%m-%d at %H:%M:%S\")) + \\\n \" and has \" + str(len(items)) + \" items.\"\n dump_str += \"\" \\\n \"\" \\\n \"\" \\\n \"\" \\\n \"\" \\\n \"\" \\\n \"\" + date + \"\"\n\n dump_str += \"\"\n for header in [\"Name\", \"Category\", \"Source\", \"ABV\", \"Price\", \"Quantity\", \"Single mL\", \"Total mL\",\n \"mL/$\", \"Alc mL\", \"mL(alc)/$\"]:\n dump_str += \"\"\n dump_str += \"\"\n\n for item in items:\n dump_str += \"\"\n for elem in [item.name, item.category, item.source, item.abv, item.price, item.quantity, item.single_vol,\n item.total_vol,\n item.ml_per_dollar, item.alcohol_vol, item.alc_per_dollar]:\n if isinstance(elem, float):\n elem = round(elem, 2)\n if elem is item.name:\n elem = '' + elem + \"\"\n dump_str += \"\"\n dump_str += \"\"\n\n dump_str += \"
\" + header + \"
\" + str(elem) + \"
\"\n return dump_str\n\nclass Drink:\n def __init__(self, name, category, abv, price, source, quantity, single_vol, url, **kwargs):\n self.name = str(name).encode('ascii', 'ignore').decode(\"utf-8\")\n self.category = str(category).encode('ascii', 'ignore').decode(\"utf-8\")\n self.abv = float(abv)\n self.price = round(float(price), 2)\n self.source = str(source).encode('ascii', 'ignore').decode(\"utf-8\")\n self.quantity = int(quantity)\n self.single_vol = int(single_vol)\n self.url = str(url).encode('ascii', 'ignore').decode(\"utf-8\")\n self._update()\n\n def _update(self):\n self.total_vol = self.quantity * self.single_vol\n self.ml_per_dollar = self.total_vol / self.price\n self.alcohol_vol = self.total_vol * (self.abv / 100)\n self.alc_per_dollar = self.alcohol_vol / self.price\n\n def to_json(self):\n return OrderedDict({\n \"name\": self.name,\n \"source\": self.source,\n \"url\": self.url,\n \"category\": self.category,\n \"abv\": self.abv,\n \"price\": self.price,\n \"quantity\": self.quantity,\n \"single_vol\": self.single_vol,\n \"total_vol\": int(self.total_vol),\n \"ml_per_dollar\": round(self.ml_per_dollar, 2),\n \"alcohol_vol\": int(self.alcohol_vol),\n \"alc_per_dollar\": round(self.alc_per_dollar, 2)\n })\n\n @staticmethod\n def from_lcbo_page(text, url):\n page = html.fromstring(text)\n name = page.xpath('//li[@id=\"categoryPath\"]/text()')[0].strip().encode('ascii', 'ignore')\n container = ''.join(page.xpath('//dt[@class=\"product-volume\"]/text()'))\n details = page.xpath('//div[@class=\"product-details-list\"]/dl/dd/text()')\n price = float(\n page.xpath('//div[@id=\"prodPrices\"]/strong/span/span[@class=\"price-value\"]/text()')[0].strip(\n '$').replace(\n \",\", \"\"))\n category = page.xpath('//div[@class=\"breadcrumbs\"]/nav/ul/li/a/text()')\n category = category[min(2, len(category) - 1)]\n\n try:\n abv = float(details[[\"%\" in i for i in details].index(True)].split(\"%\")[0])\n except Exception:\n print(\"No ABV Value\")\n return None\n\n if \" x\" in container:\n quantity = int(container.split(\" x\")[0])\n else:\n quantity = 1\n if \" x\" in container:\n single_vol = int(container.split(\"x \")[1].split(\" mL\")[0])\n else:\n single_vol = int(container.split(\" \")[0])\n\n return Drink(name, category, abv, price, \"LCBO\", quantity, single_vol, url)\n\n @staticmethod\n def from_beer_store_page(text, url):\n page = html.fromstring(text)\n\n name = page.xpath('//div[@class=\"only-desktop\"]/h1[@class=\"page-title\"]/text()')[0]\n abv = float(page.xpath('//div[@class=\"brand-info-inner\"]/dl/dd/text()')[-1].split(\"%\")[0])\n\n options = page.xpath('//tbody/tr/td/text()')\n sale_prices = page.xpath('//tbody/tr/td/strike/text()')\n cat = page.xpath('//p[@class=\"introduction\"]/span/text()')\n for beer_type in [\"Ale\", \"Lager\", \"Malt\", \"Stout\"]:\n if any([beer_type in e for e in cat]):\n cat = beer_type\n break\n else:\n cat = cat[0]\n\n if len(sale_prices) > 0:\n to_insert = []\n for i in range(len(options)):\n if len(options) == 1 or (\n \"ml\" in options[i] and (i == len(options) - 1 or \"$\" not in options[i + 1])):\n to_insert.append([i + 1, sale_prices.pop(0)])\n\n i = 0\n for e in to_insert:\n options.insert(e[0] + i, e[1])\n i += 1\n\n for i in range(0, len(options), 2):\n container_type = options[i]\n price = float(options[i + 1].split(\"$\")[1].replace(\",\", \"\"))\n quantity = int(container_type.split(\" \")[0])\n single_vol = int(container_type.split(\" \")[-1][0:-3])\n\n # Deposit on keg return\n if single_vol in [30000, 50000, 58600]:\n price -= 50\n if single_vol in [20000, 25000]:\n price -= 20\n\n yield Drink(name, cat, abv, price, \"The Beer Store\", quantity, single_vol, url)\n\nif __name__ == \"__main__\":\n analyzer = Analyzer()\n analyzer.to_csv(analyzer.load_json_items())\n", "repo_name": "kForth/AlcoholAnalyzer", "sub_path": "alcohol_analyzer.py", "file_name": "alcohol_analyzer.py", "file_ext": "py", "file_size_in_byte": 11207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "21", "api": [{"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "json.load", "line_number": 68, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 73, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 76, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 105, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 105, "usage_type": "name"}, {"api_name": "json.load", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 145, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 200, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 217, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 217, "usage_type": "name"}, {"api_name": "lxml.html.fromstring", "line_number": 247, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 247, "usage_type": "name"}]} +{"seq_id": "503466179", "text": "import json\nimport time\nfrom collections import OrderedDict, defaultdict\nfrom concurrent.futures import Future, ThreadPoolExecutor, wait\nfrom copy import deepcopy\nfrom typing import (\n Any,\n Callable,\n DefaultDict,\n Dict,\n FrozenSet,\n Iterable,\n List,\n Optional,\n Set,\n Tuple,\n Union,\n cast,\n)\nfrom urllib.parse import urlparse\n\nimport zulip\nfrom typing_extensions import Literal, TypedDict\n\nfrom zulipterminal import unicode_emojis\nfrom zulipterminal.config.keys import primary_key_for_command\nfrom zulipterminal.helper import (\n Message,\n NamedEmojiData,\n StreamData,\n asynch,\n canonicalize_color,\n classify_unread_counts,\n display_error_if_present,\n index_messages,\n initial_index,\n notify,\n set_count,\n)\nfrom zulipterminal.ui_tools.utils import create_msg_box_list\n\n\nEvent = TypedDict('Event', {\n 'type': str,\n # typing:\n 'sender': Dict[str, Any], # 'email', ...\n # typing & reaction:\n 'op': str,\n # reaction:\n 'user': Dict[str, Any], # 'email', 'user_id', 'full_name'\n 'reaction_type': str,\n 'emoji_code': str,\n 'emoji_name': str,\n # reaction & update_message:\n 'message_id': int,\n # update_message:\n 'rendered_content': str,\n # update_message_flags:\n 'messages': List[int],\n 'operation': str, # NOTE: deprecated in Zulip 4.0 / ZFL 32 -> 'op'\n 'flag': str,\n 'all': bool,\n # message:\n 'message': Message,\n 'flags': List[str],\n 'subject': str,\n # subscription:\n 'property': str,\n 'user_id': int, # Present when a streams subscribers are updated.\n 'user_ids': List[int], # NOTE: replaces 'user_id' in ZFL 35\n 'stream_id': int,\n 'stream_ids': List[int], # NOTE: replaces 'stream_id' in ZFL 35 for peer*\n 'value': bool,\n 'message_ids': List[int] # Present when subject of msg(s) is updated\n}, total=False) # Each Event will only have a subset of these\n\nEditPropagateMode = Literal['change_one', 'change_all', 'change_later']\n\nOFFLINE_THRESHOLD_SECS = 140\n\n\nclass ServerConnectionFailure(Exception):\n pass\n\n\ndef sort_streams(streams: List[StreamData]) -> None:\n \"\"\"\n Used for sorting model.pinned_streams and model.unpinned_streams.\n \"\"\"\n streams.sort(key=lambda s: s['name'].lower())\n\n\nclass Model:\n \"\"\"\n A class responsible for storing the data to be displayed.\n \"\"\"\n\n def __init__(self, controller: Any) -> None:\n self.controller = controller\n self.client = controller.client\n\n self.narrow = [] # type: List[Any]\n self._have_last_message = {} # type: Dict[str, bool]\n self.stream_id = -1\n self.recipients = frozenset() # type: FrozenSet[Any]\n self.index = initial_index\n\n self.user_id = -1\n self.user_email = \"\"\n self.user_full_name = \"\"\n self.server_url = '{uri.scheme}://{uri.netloc}/'.format(\n uri=urlparse(self.client.base_url))\n self.server_name = \"\"\n\n self._notified_user_of_notification_failure = False\n\n self.event_actions = OrderedDict([\n ('message', self._handle_message_event),\n ('update_message', self._handle_update_message_event),\n ('reaction', self._handle_reaction_event),\n ('subscription', self._handle_subscription_event),\n ('typing', self._handle_typing_event),\n ('update_message_flags', self._handle_update_message_flags_event),\n ]) # type: OrderedDict[str, Callable[[Event], None]]\n\n self.initial_data = {} # type: Dict[str, Any]\n\n # Register to the queue before initializing further so that we don't\n # lose any updates while messages are being fetched.\n self._update_initial_data()\n\n self.server_version = self.initial_data['zulip_version']\n self.server_feature_level = (\n self.initial_data.get('zulip_feature_level')\n )\n\n self.users = self.get_all_users()\n\n subscriptions = self.initial_data['subscriptions']\n stream_data = Model._stream_info_from_subscriptions(subscriptions)\n (self.stream_dict, self.muted_streams,\n self.pinned_streams, self.unpinned_streams) = stream_data\n\n # NOTE: The expected response has been upgraded from\n # [stream_name, topic] to [stream_name, topic, date_muted] in\n # feature level 1, server version 3.0.\n muted_topics = self.initial_data['muted_topics']\n assert set(map(len, muted_topics)) in (set(), {2}, {3})\n self._muted_topics = {\n (stream_name, topic): (None if self.server_feature_level is None\n else date_muted[0])\n for stream_name, topic, *date_muted in muted_topics\n } # type: Dict[Tuple[str, str], Optional[int]]\n\n groups = self.initial_data['realm_user_groups']\n self.user_group_by_id = {} # type: Dict[int, Dict[str, Any]]\n self.user_group_names = self._group_info_from_realm_user_groups(groups)\n\n self.unread_counts = classify_unread_counts(self)\n\n self._draft = None # type: Optional[Message]\n unicode_emoji_data = unicode_emojis.EMOJI_DATA\n for name, data in unicode_emoji_data.items():\n data['type'] = 'unicode_emoji'\n typed_unicode_emoji_data = cast(NamedEmojiData, unicode_emoji_data)\n custom_emoji_data = self.fetch_custom_emojis()\n zulip_extra_emoji = {\n 'zulip': {'code': 'zulip', 'type': 'zulip_extra_emoji'}\n } # type: NamedEmojiData\n all_emoji_data = {**typed_unicode_emoji_data,\n **custom_emoji_data,\n **zulip_extra_emoji}.items()\n self.active_emoji_data = OrderedDict(sorted(all_emoji_data,\n key=lambda e: e[0]))\n\n self.new_user_input = True\n self._start_presence_updates()\n\n def get_focus_in_current_narrow(self) -> Union[int, Set[None]]:\n \"\"\"\n Returns the focus in the current narrow.\n For no existing focus this returns {}, otherwise the message ID.\n \"\"\"\n return self.index['pointer'][repr(self.narrow)]\n\n def set_focus_in_current_narrow(self, focus_message: int) -> None:\n self.index['pointer'][repr(self.narrow)] = focus_message\n\n def is_search_narrow(self) -> bool:\n \"\"\"\n Checks if the current narrow is a result of a previous search for\n a messages in a different narrow.\n \"\"\"\n return 'search' in [subnarrow[0] for subnarrow in self.narrow]\n\n def set_narrow(self, *,\n stream: Optional[str]=None,\n topic: Optional[str]=None,\n pms: bool=False,\n pm_with: Optional[str]=None,\n starred: bool=False,\n mentioned: bool=False) -> bool:\n selected_params = {k for k, v in locals().items() if k != 'self' and v}\n valid_narrows = {\n frozenset(): [],\n frozenset(['stream']): [['stream', stream]],\n frozenset(['stream', 'topic']): [['stream', stream],\n ['topic', topic]],\n frozenset(['pms']): [['is', 'private']],\n frozenset(['pm_with']): [['pm_with', pm_with]],\n frozenset(['starred']): [['is', 'starred']],\n frozenset(['mentioned']): [['is', 'mentioned']],\n } # type: Dict[FrozenSet[str], List[Any]]\n for narrow_param, narrow in valid_narrows.items():\n if narrow_param == selected_params:\n new_narrow = narrow\n break\n else:\n raise RuntimeError(\"Model.set_narrow parameters used incorrectly.\")\n\n if new_narrow != self.narrow:\n self.narrow = new_narrow\n\n if pm_with is not None and new_narrow[0][0] == 'pm_with':\n users = pm_with.split(', ')\n self.recipients = frozenset(\n [self.user_dict[user]['user_id'] for user in users]\n + [self.user_id]\n )\n else:\n self.recipients = frozenset()\n return False\n else:\n return True\n\n def set_search_narrow(self, search_query: str) -> None:\n self.unset_search_narrow()\n self.narrow.append(['search', search_query])\n\n def unset_search_narrow(self) -> None:\n # If current narrow is a result of a previous started search,\n # we pop the ['search', 'text'] term in the narrow, before\n # setting a new narrow.\n if self.is_search_narrow():\n self.narrow = [item for item in self.narrow\n if item[0] != 'search']\n\n def get_message_ids_in_current_narrow(self) -> Set[int]:\n narrow = self.narrow\n index = self.index\n if narrow == []:\n ids = index['all_msg_ids']\n elif self.is_search_narrow(): # Check searches first\n ids = index['search']\n elif narrow[0][0] == 'stream':\n stream_id = self.stream_id\n if len(narrow) == 1:\n ids = index['stream_msg_ids_by_stream_id'][stream_id]\n elif len(narrow) == 2:\n topic = narrow[1][1]\n ids = index['topic_msg_ids'][stream_id].get(topic, set())\n elif narrow[0][1] == 'private':\n ids = index['private_msg_ids']\n elif narrow[0][0] == 'pm_with':\n recipients = self.recipients\n ids = index['private_msg_ids_by_user_ids'].get(recipients, set())\n elif narrow[0][1] == 'starred':\n ids = index['starred_msg_ids']\n elif narrow[0][1] == 'mentioned':\n ids = index['mentioned_msg_ids']\n return ids.copy()\n\n def _notify_server_of_presence(self) -> Dict[str, Any]:\n response = self.client.update_presence(\n request={\n # TODO: Determine `status` from terminal tab focus.\n 'status': 'active' if self.new_user_input else 'idle',\n 'new_user_input': self.new_user_input,\n }\n )\n self.new_user_input = False\n return response\n\n @asynch\n def _start_presence_updates(self) -> None:\n \"\"\"\n Call `_notify_server_of_presence` every minute (version 1a).\n Use 'response' to update user list (version 1b).\n \"\"\"\n # FIXME: Version 2: call endpoint with ping_only=True only when\n # needed, and rely on presence events to update\n while True:\n response = self._notify_server_of_presence()\n if response['result'] == 'success':\n self.initial_data['presences'] = response['presences']\n self.users = self.get_all_users()\n if hasattr(self.controller, 'view'):\n self.controller.view.users_view.update_user_list(\n user_list=self.users)\n time.sleep(60)\n\n @asynch\n def react_to_message(self,\n message: Message,\n reaction_to_toggle: str) -> None:\n # FIXME Only support thumbs_up for now\n assert reaction_to_toggle == 'thumbs_up'\n\n reaction_to_toggle_spec = dict(\n emoji_name='thumbs_up',\n emoji_code='1f44d',\n reaction_type='unicode_emoji',\n message_id=str(message['id']))\n existing_reactions = [\n reaction['emoji_code']\n for reaction in message['reactions']\n if (reaction['user'].get('user_id', None) == self.user_id\n or reaction['user'].get('id', None) == self.user_id)\n ]\n if reaction_to_toggle_spec['emoji_code'] in existing_reactions:\n response = self.client.remove_reaction(reaction_to_toggle_spec)\n else:\n response = self.client.add_reaction(reaction_to_toggle_spec)\n display_error_if_present(response, self.controller)\n\n def session_draft_message(self) -> Optional[Message]:\n return deepcopy(self._draft)\n\n def save_draft(self, message: Message) -> None:\n self._draft = deepcopy(message)\n self.controller.view.set_footer_text(\"Saved message as draft\", 3)\n\n @asynch\n def toggle_message_star_status(self, message: Message) -> None:\n base_request = dict(flag='starred', messages=[message['id']])\n if 'starred' in message['flags']:\n request = dict(base_request, op='remove')\n else:\n request = dict(base_request, op='add')\n response = self.client.update_message_flags(request)\n display_error_if_present(response, self.controller)\n\n @asynch\n def mark_message_ids_as_read(self, id_list: List[int]) -> None:\n if not id_list:\n return\n response = self.client.update_message_flags({\n 'messages': id_list,\n 'flag': 'read',\n 'op': 'add',\n })\n display_error_if_present(response, self.controller)\n\n @asynch\n def send_typing_status_by_user_ids(self, recipient_user_ids: List[int],\n *, status: Literal['start', 'stop']\n ) -> None:\n if recipient_user_ids:\n request = {\n 'to': recipient_user_ids,\n 'op': status\n }\n response = self.client.set_typing_status(request)\n display_error_if_present(response, self.controller)\n else:\n raise RuntimeError('Empty recipient list.')\n\n def send_private_message(self, recipients: List[str],\n content: str) -> bool:\n if recipients:\n request = {\n 'type': 'private',\n 'to': recipients,\n 'content': content,\n }\n response = self.client.send_message(request)\n display_error_if_present(response, self.controller)\n return response['result'] == 'success'\n else:\n raise RuntimeError('Empty recipients list.')\n\n def send_stream_message(self, stream: str, topic: str,\n content: str) -> bool:\n request = {\n 'type': 'stream',\n 'to': stream,\n 'subject': topic,\n 'content': content,\n }\n response = self.client.send_message(request)\n display_error_if_present(response, self.controller)\n return response['result'] == 'success'\n\n def update_private_message(self, msg_id: int, content: str) -> bool:\n request = {\n \"message_id\": msg_id,\n \"content\": content,\n }\n response = self.client.update_message(request)\n display_error_if_present(response, self.controller)\n return response['result'] == 'success'\n\n def update_stream_message(self, topic: str, message_id: int,\n propagate_mode: EditPropagateMode,\n content: Optional[str]=None) -> bool:\n request = {\n \"message_id\": message_id,\n \"propagate_mode\": propagate_mode,\n \"topic\": topic,\n }\n if content is not None:\n request['content'] = content\n\n response = self.client.update_message(request)\n display_error_if_present(response, self.controller)\n return response['result'] == 'success'\n\n def fetch_custom_emojis(self) -> NamedEmojiData:\n response = self.client.get_realm_emoji()\n custom_emojis = {emoji['name']: {'code': emoji_code,\n 'type': 'realm_emoji'}\n for emoji_code, emoji in response['emoji'].items()\n if not emoji['deactivated']} # type: NamedEmojiData\n display_error_if_present(response, self.controller)\n return custom_emojis\n\n def get_messages(self, *,\n num_after: int, num_before: int,\n anchor: Optional[int]) -> str:\n # anchor value may be specific message (int) or next unread (None)\n first_anchor = anchor is None\n anchor_value = anchor if anchor is not None else 0\n\n request = {\n 'anchor': anchor_value,\n 'num_before': num_before,\n 'num_after': num_after,\n 'apply_markdown': True,\n 'use_first_unread_anchor': first_anchor,\n 'client_gravatar': True,\n 'narrow': json.dumps(self.narrow),\n }\n response = self.client.get_messages(message_filters=request)\n if response['result'] == 'success':\n self.index = index_messages(response['messages'], self, self.index)\n narrow_str = repr(self.narrow)\n if first_anchor and response['anchor'] != 10000000000000000:\n self.index['pointer'][narrow_str] = response['anchor']\n if 'found_newest' in response:\n self._have_last_message[narrow_str] = response['found_newest']\n else:\n # Older versions of the server does not contain the\n # 'found_newest' flag. Instead, we use this logic:\n query_range = num_after + num_before + 1\n self._have_last_message[narrow_str] = (\n len(response['messages']) < query_range)\n return \"\"\n display_error_if_present(response, self.controller)\n return response['msg']\n\n def fetch_message_history(self, message_id: int,\n ) -> List[Dict[str, Union[int, str]]]:\n \"\"\"\n Fetches message edit history for a message using its ID.\n \"\"\"\n response = self.client.get_message_history(message_id)\n if response['result'] == 'success':\n return response['message_history']\n display_error_if_present(response, self.controller)\n return list()\n\n def _fetch_topics_in_streams(self, stream_list: Iterable[int]) -> str:\n \"\"\"\n Fetch all topics with specified stream_id's and\n index their names (Version 1)\n \"\"\"\n # FIXME: Version 2: Fetch last 'n' recent topics for each stream.\n for stream_id in stream_list:\n response = self.client.get_stream_topics(stream_id)\n if response['result'] == 'success':\n self.index['topics'][stream_id] = [topic['name'] for\n topic in response['topics']]\n else:\n display_error_if_present(response, self.controller)\n return response['msg']\n return \"\"\n\n def topics_in_stream(self, stream_id: int) -> List[str]:\n \"\"\"\n Returns a list of topic names for stream_id from the index.\n \"\"\"\n if not self.index['topics'][stream_id]:\n self._fetch_topics_in_streams([stream_id])\n\n return list(self.index['topics'][stream_id])\n\n @staticmethod\n def exception_safe_result(future: 'Future[str]') -> str:\n try:\n return future.result()\n except zulip.ZulipError as e:\n return str(e)\n\n def is_muted_stream(self, stream_id: int) -> bool:\n return stream_id in self.muted_streams\n\n def is_muted_topic(self, stream_id: int, topic: str) -> bool:\n \"\"\"\n Returns True if topic is muted via muted_topics.\n \"\"\"\n stream_name = self.stream_dict[stream_id]['name']\n topic_to_search = (stream_name, topic)\n return topic_to_search in self._muted_topics.keys()\n\n def _update_initial_data(self) -> None:\n # Thread Processes to reduce start time.\n # NOTE: Exceptions do not work well with threads\n with ThreadPoolExecutor(max_workers=1) as executor:\n futures = {\n 'get_messages': executor.submit(self.get_messages,\n num_after=10,\n num_before=30,\n anchor=None),\n 'register': executor.submit(self._register_desired_events,\n fetch_data=True),\n } # type: Dict[str, Future[str]]\n\n # Wait for threads to complete\n wait(futures.values())\n\n results = {\n name: self.exception_safe_result(future)\n for name, future in futures.items()\n } # type: Dict[str, str]\n if not any(results.values()):\n self.user_id = self.initial_data['user_id']\n self.user_email = self.initial_data['email']\n self.user_full_name = self.initial_data['full_name']\n self.server_name = self.initial_data['realm_name']\n else:\n failures = defaultdict(list) # type: DefaultDict[str, List[str]]\n for name, result in results.items():\n if result:\n failures[result].append(name)\n failure_text = [\n \"{} ({})\".format(error, \", \".join(sorted(calls)))\n for error, calls in failures.items()\n ]\n raise ServerConnectionFailure(\", \".join(failure_text))\n\n def get_other_subscribers_in_stream(self, stream_id: Optional[int]=None,\n stream_name: Optional[str]=None,\n ) -> List[int]:\n assert stream_id is not None or stream_name is not None\n\n if stream_id:\n assert self.is_user_subscribed_to_stream(stream_id)\n\n return [sub\n for sub in self.stream_dict[stream_id]['subscribers']\n if sub != self.user_id]\n else:\n return [sub\n for _, stream in self.stream_dict.items()\n for sub in stream['subscribers']\n if stream['name'] == stream_name\n if sub != self.user_id]\n\n def get_all_users(self) -> List[Dict[str, Any]]:\n # Dict which stores the active/idle status of users (by email)\n presences = self.initial_data['presences']\n\n # Construct a dict of each user in the realm to look up by email\n # and a user-id to email mapping\n self.user_dict = dict() # type: Dict[str, Dict[str, Any]]\n self.user_id_email_dict = dict() # type: Dict[int, str]\n for user in self.initial_data['realm_users']:\n if self.user_id == user['user_id']:\n current_user = {\n 'full_name': user['full_name'],\n 'email': user['email'],\n 'user_id': user['user_id'],\n 'status': 'active',\n }\n continue\n email = user['email']\n if email in presences: # presences currently subset of all users\n \"\"\"\n * Aggregate our information on a user's presence across their\n * clients.\n *\n * For an explanation of the Zulip presence model this helps\n * implement, see the subsystem doc:\n https://zulip.readthedocs.io/en/latest/subsystems/presence.html\n *\n * This logic should match `status_from_timestamp` in the web\n * app's\n * `static/js/presence.js`.\n *\n * Out of the ClientPresence objects found in `presence`, we\n * consider only those with a timestamp newer than\n * OFFLINE_THRESHOLD_SECS; then of\n * those, return the one that has the greatest UserStatus, where\n * `active` > `idle` > `offline`.\n *\n * If there are several ClientPresence objects with the greatest\n * UserStatus, an arbitrary one is chosen.\n \"\"\"\n aggregate_status = 'offline'\n for client in presences[email].items():\n client_name = client[0]\n status = client[1]['status']\n timestamp = client[1]['timestamp']\n if client_name == 'aggregated':\n continue\n elif (time.time() - timestamp) < OFFLINE_THRESHOLD_SECS:\n if status == 'active':\n aggregate_status = 'active'\n if status == 'idle':\n if aggregate_status != 'active':\n aggregate_status = status\n if status == 'offline':\n if (aggregate_status != 'active'\n and aggregate_status != 'idle'):\n aggregate_status = status\n\n status = aggregate_status\n else:\n # Set status of users not in the `presence` list\n # as 'inactive'. They will not be displayed in the\n # user's list by default (only in the search list).\n status = 'inactive'\n self.user_dict[email] = {\n 'full_name': user['full_name'],\n 'email': email,\n 'user_id': user['user_id'],\n 'status': status,\n }\n self.user_id_email_dict[user['user_id']] = email\n\n # Add internal (cross-realm) bots to dicts\n for bot in self.initial_data['cross_realm_bots']:\n email = bot['email']\n self.user_dict[email] = {\n 'full_name': bot['full_name'],\n 'email': email,\n 'user_id': bot['user_id'],\n 'status': 'inactive',\n }\n self.user_id_email_dict[bot['user_id']] = email\n\n # Generate filtered lists for active & idle users\n active = [properties for properties in self.user_dict.values()\n if properties['status'] == 'active']\n idle = [properties for properties in self.user_dict.values()\n if properties['status'] == 'idle']\n offline = [properties for properties in self.user_dict.values()\n if properties['status'] == 'offline']\n inactive = [properties for properties in self.user_dict.values()\n if properties['status'] == 'inactive']\n\n # Construct user_list from sorted components of each list\n user_list = sorted(active, key=lambda u: u['full_name'].casefold())\n user_list += sorted(idle, key=lambda u: u['full_name'].casefold())\n user_list += sorted(offline, key=lambda u: u['full_name'].casefold())\n user_list += sorted(inactive, key=lambda u: u['full_name'].casefold())\n # Add current user to the top of the list\n user_list.insert(0, current_user)\n self.user_dict[current_user['email']] = current_user\n self.user_id_email_dict[self.user_id] = current_user['email']\n\n return user_list\n\n def user_name_from_id(self, user_id: int) -> str:\n \"\"\"\n Returns user's full name given their ID.\n \"\"\"\n user_email = self.user_id_email_dict.get(user_id)\n\n if not user_email:\n raise RuntimeError('Invalid user ID.')\n\n return self.user_dict[user_email]['full_name']\n\n @staticmethod\n def _stream_info_from_subscriptions(\n subscriptions: List[Dict[str, Any]]\n ) -> Tuple[Dict[int, Any], Set[int], List[StreamData], List[StreamData]]:\n\n def make_reduced_stream_data(stream: Dict[str, Any]) -> StreamData:\n # stream_id has been changed to id.\n return StreamData({'name': stream['name'],\n 'id': stream['stream_id'],\n 'color': stream['color'],\n 'invite_only': stream['invite_only'],\n 'description': stream['description']})\n # Canonicalize color formats, since zulip server versions may use\n # different formats\n for subscription in subscriptions:\n subscription['color'] = canonicalize_color(subscription['color'])\n\n pinned_streams = [make_reduced_stream_data(stream)\n for stream in subscriptions if stream['pin_to_top']]\n unpinned_streams = [make_reduced_stream_data(stream)\n for stream in subscriptions\n if not stream['pin_to_top']]\n sort_streams(pinned_streams)\n sort_streams(unpinned_streams)\n # Mapping of stream-id to all available stream info\n # Stream IDs for muted streams\n # Limited stream info sorted by name (used in display)\n return (\n {stream['stream_id']: stream for stream in subscriptions},\n {stream['stream_id'] for stream in subscriptions\n if stream['in_home_view'] is False},\n pinned_streams,\n unpinned_streams,\n )\n\n def _group_info_from_realm_user_groups(self,\n groups: List[Dict[str, Any]]\n ) -> List[str]:\n \"\"\"\n Stores group information in the model and returns a list of\n group_names which helps in group typeahead. (Eg: @*terminal*)\n \"\"\"\n for sub_group in groups:\n self.user_group_by_id[sub_group['id']] = {\n key: sub_group[key] for key in sub_group if key != 'id'}\n user_group_names = [self.user_group_by_id[group_id]['name']\n for group_id in self.user_group_by_id]\n # Sort groups for typeahead to work alphabetically (case-insensitive)\n user_group_names.sort(key=str.lower)\n return user_group_names\n\n def toggle_stream_muted_status(self, stream_id: int) -> None:\n request = [{\n 'stream_id': stream_id,\n 'property': 'is_muted',\n 'value': not self.is_muted_stream(stream_id)\n # True for muting and False for unmuting.\n }]\n response = self.client.update_subscription_settings(request)\n display_error_if_present(response, self.controller)\n\n def stream_id_from_name(self, stream_name: str) -> int:\n for stream_id, stream in self.stream_dict.items():\n if stream['name'] == stream_name:\n return stream_id\n raise RuntimeError(\"Invalid stream name.\")\n\n def is_pinned_stream(self, stream_id: int) -> bool:\n return stream_id in [stream['id'] for stream in self.pinned_streams]\n\n def toggle_stream_pinned_status(self, stream_id: int) -> bool:\n request = [{\n 'stream_id': stream_id,\n 'property': 'pin_to_top',\n 'value': not self.is_pinned_stream(stream_id)\n }]\n response = self.client.update_subscription_settings(request)\n return response['result'] == 'success'\n\n def is_user_subscribed_to_stream(self, stream_id: int) -> bool:\n return stream_id in self.stream_dict\n\n def _handle_subscription_event(self, event: Event) -> None:\n \"\"\"\n Handle changes in subscription (eg. muting/unmuting,\n pinning/unpinning streams)\n \"\"\"\n def get_stream_by_id(streams: List[StreamData], stream_id: int\n ) -> StreamData:\n for stream in streams:\n if stream['id'] == stream_id:\n return stream\n raise RuntimeError(\"Invalid stream id.\")\n\n if event['op'] == 'update':\n if hasattr(self.controller, 'view'):\n if event.get('property', None) == 'in_home_view':\n stream_id = event['stream_id']\n\n # FIXME: Does this always contain the stream_id?\n stream_button = (\n self.controller.view.stream_id_to_button[stream_id]\n )\n\n unread_count = self.unread_counts['streams'][stream_id]\n if event['value']: # Unmuting streams\n self.muted_streams.remove(stream_id)\n self.unread_counts['all_msg'] += unread_count\n stream_button.mark_unmuted(unread_count)\n else: # Muting streams\n self.muted_streams.add(stream_id)\n self.unread_counts['all_msg'] -= unread_count\n stream_button.mark_muted()\n self.controller.update_screen()\n elif event.get('property', None) == 'pin_to_top':\n stream_id = event['stream_id']\n\n # FIXME: Does this always contain the stream_id?\n stream_button = (\n self.controller.view.stream_id_to_button[stream_id]\n )\n\n if event['value']:\n stream = get_stream_by_id(self.unpinned_streams,\n stream_id)\n if stream:\n self.unpinned_streams.remove(stream)\n self.pinned_streams.append(stream)\n else:\n stream = get_stream_by_id(self.pinned_streams,\n stream_id)\n if stream:\n self.pinned_streams.remove(stream)\n self.unpinned_streams.append(stream)\n sort_streams(self.unpinned_streams)\n sort_streams(self.pinned_streams)\n self.controller.view.left_panel.update_stream_view()\n self.controller.update_screen()\n elif event['op'] in ('peer_add', 'peer_remove'):\n # NOTE: ZFL 35 commit was not atomic with API change\n # (ZFL >=35 can use new plural style)\n if 'stream_ids' not in event or 'user_ids' not in event:\n stream_ids = [event['stream_id']]\n user_ids = [event['user_id']]\n else:\n stream_ids = event['stream_ids']\n user_ids = event['user_ids']\n\n for stream_id in stream_ids:\n if self.is_user_subscribed_to_stream(stream_id):\n subscribers = self.stream_dict[stream_id]['subscribers']\n if event['op'] == 'peer_add':\n subscribers.extend(user_ids)\n else:\n for user_id in user_ids:\n subscribers.remove(user_id)\n\n def _handle_typing_event(self, event: Event) -> None:\n \"\"\"\n Handle typing notifications (in private messages)\n \"\"\"\n if hasattr(self.controller, 'view'):\n # If the user is in pm narrow with the person typing\n narrow = self.narrow\n if (len(narrow) == 1 and narrow[0][0] == 'pm_with'\n and event['sender']['email'] in narrow[0][1].split(',')):\n if event['op'] == 'start':\n user = self.user_dict[event['sender']['email']]\n self.controller.view.set_footer_text([\n ' ',\n ('code', user['full_name']),\n ' is typing...'\n ])\n elif event['op'] == 'stop':\n self.controller.view.set_footer_text()\n else:\n raise RuntimeError(\"Unknown typing event operation\")\n\n def get_invalid_recipient_emails(self, recipient_emails: List[str]\n ) -> List[str]:\n\n return [email for email in recipient_emails\n if email not in self.user_dict]\n\n def is_valid_stream(self, stream_name: str) -> bool:\n for stream in self.stream_dict.values():\n if stream['name'] == stream_name:\n return True\n return False\n\n def notify_user(self, message: Message) -> str:\n \"\"\"\n return value signifies if notification failed, if it should occur\n \"\"\"\n # Check if notifications are enabled by the user.\n # It is disabled by default.\n if not self.controller.notify_enabled:\n return \"\"\n if message['sender_id'] == self.user_id:\n return \"\"\n\n recipient = ''\n if message['type'] == 'private':\n target = 'you'\n if len(message['display_recipient']) > 2:\n extra_targets = [target] + [\n recip['full_name']\n for recip in message['display_recipient']\n if recip['id'] not in (self.user_id, message['sender_id'])\n ]\n target = ', '.join(extra_targets)\n recipient = ' (to {})'.format(target)\n elif message['type'] == 'stream' and (\n {'mentioned', 'wildcard_mentioned'}.intersection(\n set(message['flags'])\n )\n or self.stream_dict[message['stream_id']]['desktop_notifications']\n ):\n recipient = ' (to {} -> {})'.format(message['display_recipient'],\n message['subject'])\n\n if recipient:\n return notify((self.server_name + \":\\n\"\n + message['sender_full_name'] + recipient),\n message['content'])\n return \"\"\n\n def _handle_message_event(self, event: Event) -> None:\n \"\"\"\n Handle new messages (eg. add message to the end of the view)\n \"\"\"\n message = event['message']\n # sometimes `flags` are missing in `event` so initialize\n # an empty list of flags in that case.\n message['flags'] = event.get('flags', [])\n # We need to update the topic order in index, unconditionally.\n if message['type'] == 'stream':\n # NOTE: The subsequent helper only updates the topic index based\n # on the message event not the UI (the UI is updated in a\n # consecutive block independently). However, it is critical to keep\n # the topics index synchronized as it used whenever the topics list\n # view is reconstructed later.\n self._update_topic_index(message['stream_id'],\n message['subject'])\n # If the topic view is toggled for incoming message's\n # recipient stream, then we re-arrange topic buttons\n # with most recent at the top.\n if hasattr(self.controller, 'view'):\n view = self.controller.view\n if (view.left_panel.is_in_topic_view_with_stream_id(\n message['stream_id'])):\n view.topic_w.update_topics_list(\n message['stream_id'], message['subject'],\n message['sender_id'])\n self.controller.update_screen()\n\n # We can notify user regardless of whether UI is rendered or not,\n # but depend upon the UI to indicate failures.\n failed_command = self.notify_user(message)\n if (failed_command\n and hasattr(self.controller, 'view')\n and not self._notified_user_of_notification_failure):\n notice_template = (\n \"You have enabled notifications, but your notification \"\n \"command '{}' could not be found.\"\n \"\\n\\n\"\n \"The application will continue attempting to run this command \"\n \"in this session, but will not notify you again.\"\n \"\\n\\n\"\n \"Press '{}' to close this window.\"\n )\n notice = notice_template.format(failed_command,\n primary_key_for_command(\"GO_BACK\"))\n self.controller.popup_with_message(notice, width=50)\n self.controller.update_screen()\n self._notified_user_of_notification_failure = True\n\n # Index messages before calling set_count.\n self.index = index_messages([message], self, self.index)\n if 'read' not in message['flags']:\n set_count([message['id']], self.controller, 1)\n\n if (hasattr(self.controller, 'view')\n and self._have_last_message[repr(self.narrow)]):\n msg_log = self.controller.view.message_view.log\n if msg_log:\n last_message = msg_log[-1].original_widget.message\n else:\n last_message = None\n msg_w_list = create_msg_box_list(self, [message['id']],\n last_message=last_message)\n if not msg_w_list:\n return\n else:\n msg_w = msg_w_list[0]\n\n if not self.narrow:\n msg_log.append(msg_w)\n\n elif (self.narrow[0][1] == 'mentioned'\n and 'mentioned' in message['flags']):\n msg_log.append(msg_w)\n\n elif (self.narrow[0][1] == message['type']\n and len(self.narrow) == 1):\n msg_log.append(msg_w)\n\n elif (message['type'] == 'stream'\n and self.narrow[0][0] == \"stream\"):\n recipient_stream = message['display_recipient']\n narrow_stream = self.narrow[0][1]\n append_to_stream = recipient_stream == narrow_stream\n\n if (append_to_stream\n and (len(self.narrow) == 1\n or (len(self.narrow) == 2\n and self.narrow[1][1] == message['subject']))):\n msg_log.append(msg_w)\n\n elif (message['type'] == 'private' and len(self.narrow) == 1\n and self.narrow[0][0] == \"pm_with\"):\n narrow_recipients = self.recipients\n message_recipients = frozenset(\n [user['id'] for user in message['display_recipient']])\n if narrow_recipients == message_recipients:\n msg_log.append(msg_w)\n self.controller.update_screen()\n\n def _update_topic_index(self, stream_id: int, topic_name: str) -> None:\n \"\"\"\n Update topic order in index based on incoming message.\n Helper method called by _handle_message_event\n \"\"\"\n topic_list = self.topics_in_stream(stream_id)\n for topic_iterator, topic in enumerate(topic_list):\n if topic == topic_name:\n topic_list.insert(0, topic_list.pop(topic_iterator))\n break\n else:\n # No previous topics with same topic names are found\n # hence, it must be a new topic.\n topic_list.insert(0, topic_name)\n\n # Update the index.\n self.index['topics'][stream_id] = topic_list\n\n def _handle_update_message_event(self, event: Event) -> None:\n \"\"\"\n Handle updated (edited) messages (changed content/subject)\n \"\"\"\n # Update edited message status from single message id\n # NOTE: If all messages in topic have topic edited,\n # they are not all marked as edited, as per server optimization\n message_id = event['message_id']\n indexed_message = self.index['messages'].get(message_id, None)\n\n if indexed_message:\n self.index['edited_messages'].add(message_id)\n\n # Update the rendered content, if the message is indexed\n if 'rendered_content' in event and indexed_message:\n indexed_message['content'] = event['rendered_content']\n self.index['messages'][message_id] = indexed_message\n self._update_rendered_view(message_id)\n\n # NOTE: This is independent of messages being indexed\n # Previous assertion:\n # * 'subject' is not present in update event if\n # the event didn't have a 'subject' update.\n if 'subject' in event:\n new_subject = event['subject']\n stream_id = event['stream_id']\n\n # Update any indexed messages & re-render them\n for msg_id in event['message_ids']:\n indexed_msg = self.index['messages'].get(msg_id)\n if indexed_msg:\n indexed_msg['subject'] = new_subject\n self._update_rendered_view(msg_id)\n\n # If topic view is open, reload list else reset cache.\n if stream_id in self.index['topics']:\n if hasattr(self.controller, 'view'):\n view = self.controller.view\n if (view.left_panel.is_in_topic_view_with_stream_id(\n stream_id)):\n self._fetch_topics_in_streams([stream_id])\n view.left_panel.show_topic_view(\n view.topic_w.stream_button)\n self.controller.update_screen()\n else:\n self.index['topics'][stream_id] = []\n\n def _handle_reaction_event(self, event: Event) -> None:\n \"\"\"\n Handle change to reactions on a message\n \"\"\"\n message_id = event['message_id']\n # If the message is indexed\n if self.index['messages'][message_id] != {}:\n\n message = self.index['messages'][message_id]\n if event['op'] == 'add':\n message['reactions'].append(\n {\n 'user': event['user'],\n 'reaction_type': event['reaction_type'],\n 'emoji_code': event['emoji_code'],\n 'emoji_name': event['emoji_name'],\n }\n )\n else:\n emoji_code = event['emoji_code']\n for reaction in message['reactions']:\n # Since Who reacted is not displayed,\n # remove the first one encountered\n if reaction['emoji_code'] == emoji_code:\n message['reactions'].remove(reaction)\n\n self.index['messages'][message_id] = message\n self._update_rendered_view(message_id)\n\n def _handle_update_message_flags_event(self, event: Event) -> None:\n \"\"\"\n Handle change to message flags (eg. starred, read)\n \"\"\"\n if (self.server_feature_level is None\n or self.server_feature_level < 32):\n operation = event['operation']\n else:\n operation = event['op']\n\n if event['all']: # FIXME Should handle eventually\n return\n\n flag_to_change = event['flag']\n if flag_to_change not in {'starred', 'read'}:\n return\n\n if flag_to_change == 'read' and operation == 'remove':\n return\n\n indexed_message_ids = set(self.index['messages'])\n message_ids_to_mark = set(event['messages'])\n\n for message_id in message_ids_to_mark & indexed_message_ids:\n msg = self.index['messages'][message_id]\n if operation == 'add':\n if flag_to_change not in msg['flags']:\n msg['flags'].append(flag_to_change)\n elif operation == 'remove':\n if flag_to_change in msg['flags']:\n msg['flags'].remove(flag_to_change)\n else:\n raise RuntimeError(event, msg['flags'])\n\n self.index['messages'][message_id] = msg\n self._update_rendered_view(message_id)\n\n if operation == 'add' and flag_to_change == 'read':\n set_count(list(message_ids_to_mark & indexed_message_ids),\n self.controller, -1)\n\n def _update_rendered_view(self, msg_id: int) -> None:\n \"\"\"\n Helper method called by various _handle_* methods\n \"\"\"\n # Update new content in the rendered view\n view = self.controller.view\n for msg_w in view.message_view.log:\n msg_box = msg_w.original_widget\n if msg_box.message['id'] == msg_id:\n # Remove the message if it no longer belongs in the current\n # narrow.\n if (len(self.narrow) == 2\n and msg_box.message['subject'] != self.narrow[1][1]):\n view.message_view.log.remove(msg_w)\n # Change narrow if there are no messages left in the\n # current narrow.\n if not view.message_view.log:\n msg_w_list = create_msg_box_list(\n self, [msg_id],\n last_message=msg_box.last_message)\n if msg_w_list:\n self.controller.narrow_to_topic(\n msg_w_list[0].original_widget)\n self.controller.update_screen()\n return\n\n msg_w_list = create_msg_box_list(\n self, [msg_id],\n last_message=msg_box.last_message)\n if not msg_w_list:\n return\n else:\n new_msg_w = msg_w_list[0]\n msg_pos = view.message_view.log.index(msg_w)\n view.message_view.log[msg_pos] = new_msg_w\n\n # If this is not the last message in the view\n # update the next message's last_message too.\n if len(view.message_view.log) != (msg_pos + 1):\n next_msg_w = view.message_view.log[msg_pos + 1]\n msg_w_list = create_msg_box_list(\n self, [next_msg_w.original_widget.message['id']],\n last_message=new_msg_w.original_widget.message)\n view.message_view.log[msg_pos + 1] = msg_w_list[0]\n self.controller.update_screen()\n return\n\n def _register_desired_events(self, *, fetch_data: bool=False) -> str:\n fetch_types = None if not fetch_data else [\n 'realm',\n 'presence',\n 'subscription',\n 'message',\n 'update_message_flags',\n 'muted_topics',\n 'realm_user', # Enables cross_realm_bots\n 'realm_user_groups',\n # zulip_version and zulip_feature_level are always returned in\n # POST /register from Feature level 3.\n 'zulip_version',\n ]\n event_types = list(self.event_actions)\n try:\n response = self.client.register(event_types=event_types,\n fetch_event_types=fetch_types,\n client_gravatar=True,\n apply_markdown=True,\n include_subscribers=True)\n except zulip.ZulipError as e:\n return str(e)\n\n if response['result'] == 'success':\n if fetch_data:\n self.initial_data.update(response)\n self.max_message_id = response['max_message_id']\n self.queue_id = response['queue_id']\n self.last_event_id = response['last_event_id']\n return \"\"\n return response['msg']\n\n @asynch\n def poll_for_events(self) -> None:\n reregister_timeout = 10\n queue_id = self.queue_id\n last_event_id = self.last_event_id\n while True:\n if queue_id is None:\n while True:\n if not self._register_desired_events():\n queue_id = self.queue_id\n last_event_id = self.last_event_id\n break\n time.sleep(reregister_timeout)\n\n response = self.client.get_events(\n queue_id=queue_id,\n last_event_id=last_event_id\n )\n\n if 'error' in response['result']:\n if response[\"msg\"].startswith(\"Bad event queue id:\"):\n # Our event queue went away, probably because\n # we were asleep or the server restarted\n # abnormally. We may have missed some\n # events while the network was down or\n # something, but there's not really anything\n # we can do about it other than resuming\n # getting new ones.\n #\n # Reset queue_id to register a new event queue.\n queue_id = None\n time.sleep(1)\n continue\n\n for event in response['events']:\n last_event_id = max(last_event_id, int(event['id']))\n if event['type'] in self.event_actions:\n try:\n self.event_actions[event['type']](event)\n except Exception:\n import sys\n (self.controller.\n raise_exception_in_main_thread(sys.exc_info(),\n critical=False))\n", "repo_name": "uiejin/SeventeemMinutes", "sub_path": ".local/lib/python3.8/site-packages/zulipterminal/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 53158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing_extensions.TypedDict", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "zulipterminal.helper.Message", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 74, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "zulipterminal.helper.StreamData", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 98, "usage_type": "name"}, {"api_name": "zulipterminal.helper.initial_index", "line_number": 106, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 112, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 117, "usage_type": "call"}, {"api_name": "zulipterminal.helper.classify_unread_counts", "line_number": 159, "usage_type": "call"}, {"api_name": "zulipterminal.unicode_emojis.EMOJI_DATA", "line_number": 162, "usage_type": "attribute"}, {"api_name": "zulipterminal.unicode_emojis", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 165, "usage_type": "call"}, {"api_name": "zulipterminal.helper.NamedEmojiData", "line_number": 165, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 173, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 197, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 198, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 248, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 273, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 273, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 300, "usage_type": "call"}, {"api_name": "zulipterminal.helper.asynch", "line_number": 284, "usage_type": "name"}, {"api_name": "zulipterminal.helper.Message", "line_number": 304, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 324, "usage_type": "call"}, {"api_name": "zulipterminal.helper.asynch", "line_number": 302, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 327, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 326, "usage_type": "name"}, {"api_name": "zulipterminal.helper.Message", "line_number": 326, "usage_type": "name"}, {"api_name": "zulipterminal.helper.Message", "line_number": 329, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 330, "usage_type": "call"}, {"api_name": "zulipterminal.helper.Message", "line_number": 334, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 341, "usage_type": "call"}, {"api_name": "zulipterminal.helper.asynch", "line_number": 333, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 344, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 352, "usage_type": "call"}, {"api_name": "zulipterminal.helper.asynch", "line_number": 343, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 355, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 356, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 364, "usage_type": "call"}, {"api_name": "zulipterminal.helper.asynch", "line_number": 354, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 368, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 377, "usage_type": "call"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 391, "usage_type": "call"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 400, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 405, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 415, "usage_type": "call"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 424, "usage_type": "call"}, {"api_name": "zulipterminal.helper.NamedEmojiData", "line_number": 418, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 429, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 441, "usage_type": "call"}, {"api_name": "zulipterminal.helper.index_messages", "line_number": 445, "usage_type": "call"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 458, "usage_type": "call"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 469, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 462, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 462, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 462, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 472, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 484, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 488, "usage_type": "name"}, {"api_name": "zulip.ZulipError", "line_number": 501, "usage_type": "attribute"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 518, "usage_type": "call"}, {"api_name": "concurrent.futures.wait", "line_number": 529, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 541, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 551, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 552, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 553, "usage_type": "name"}, {"api_name": "time.time", "line_number": 616, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 569, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 569, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 569, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 687, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 687, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 687, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 690, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 690, "usage_type": "name"}, {"api_name": "zulipterminal.helper.StreamData", "line_number": 692, "usage_type": "call"}, {"api_name": "zulipterminal.helper.StreamData", "line_number": 690, "usage_type": "name"}, {"api_name": "zulipterminal.helper.canonicalize_color", "line_number": 700, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 688, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 688, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 688, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 688, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 688, "usage_type": "name"}, {"api_name": "zulipterminal.helper.StreamData", "line_number": 688, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 721, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 721, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 721, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 722, "usage_type": "name"}, {"api_name": "zulipterminal.helper.display_error_if_present", "line_number": 744, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 772, "usage_type": "name"}, {"api_name": "zulipterminal.helper.StreamData", "line_number": 772, "usage_type": "name"}, {"api_name": "zulipterminal.helper.StreamData", "line_number": 773, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 863, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 864, "usage_type": "name"}, {"api_name": "zulipterminal.helper.Message", "line_number": 875, "usage_type": "name"}, {"api_name": "zulipterminal.helper.notify", "line_number": 907, "usage_type": "call"}, {"api_name": "zulipterminal.config.keys.primary_key_for_command", "line_number": 957, "usage_type": "call"}, {"api_name": "zulipterminal.helper.index_messages", "line_number": 963, "usage_type": "call"}, {"api_name": "zulipterminal.helper.set_count", "line_number": 965, "usage_type": "call"}, {"api_name": "zulipterminal.ui_tools.utils.create_msg_box_list", "line_number": 974, "usage_type": "call"}, {"api_name": "zulipterminal.helper.set_count", "line_number": 1145, "usage_type": "call"}, {"api_name": "zulipterminal.ui_tools.utils.create_msg_box_list", "line_number": 1165, "usage_type": "call"}, {"api_name": "zulipterminal.ui_tools.utils.create_msg_box_list", "line_number": 1174, "usage_type": "call"}, {"api_name": "zulipterminal.ui_tools.utils.create_msg_box_list", "line_number": 1188, "usage_type": "call"}, {"api_name": "zulip.ZulipError", "line_number": 1216, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 1240, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1259, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 1270, "usage_type": "call"}, {"api_name": "zulipterminal.helper.asynch", "line_number": 1228, "usage_type": "name"}]} +{"seq_id": "14919547480", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\n# disable: accessing protected members, too many methods\n# pylint: disable=W0212,R0904\n\nfrom hamcrest import is_\nfrom hamcrest import none\nfrom hamcrest import is_in\nfrom hamcrest import is_not\nfrom hamcrest import assert_that\nfrom hamcrest import has_property\nfrom hamcrest import not_\n\nfrom zope import component\n\nfrom nti.testing.matchers import validly_provides\nfrom nti.testing.matchers import verifiably_provides\n\nimport unittest\n\nimport stripe\n\nfrom nti.dataserver.users import User\n\nfrom nti.store.payments.stripe import StripeException\n\nfrom nti.store.payments.stripe.interfaces import IStripeCustomer\nfrom nti.store.payments.stripe.interfaces import IStripePurchaseError\nfrom nti.store.payments.stripe.interfaces import IStripeOperationError\nfrom nti.store.payments.stripe.interfaces import IStripePurchaseAttempt\nfrom nti.store.payments.stripe.interfaces import IStripeAccountInfo\nfrom nti.store.payments.stripe.interfaces import IStripeConnectKey\n\n\nfrom nti.store.purchase_order import create_purchase_item\nfrom nti.store.purchase_order import create_purchase_order\n\nfrom nti.store.purchase_attempt import create_purchase_attempt\n\nfrom nti.dataserver.tests import mock_dataserver\n\nfrom nti.store.tests import SharedConfiguringTestLayer\n\n\nclass TestStripeAdapters(unittest.TestCase):\n\n layer = SharedConfiguringTestLayer\n\n processor = 'stripe'\n\n def _create_user(self, username=u'nt@nti.com', password=u'temp001'):\n ds = mock_dataserver.current_mock_ds\n usr = User.create_user(ds, username=username, password=password)\n return usr\n\n @mock_dataserver.WithMockDSTrans\n def test_stripe_customer_adapter(self):\n user = self._create_user()\n adapted = IStripeCustomer(user)\n assert_that(adapted, is_not(none()))\n assert_that(adapted, has_property('CustomerID', is_(none())))\n\n adapted.Charges.add(u'ch_id')\n assert_that('ch_id', is_in(adapted))\n\n adapted.customer_id = u'xyz'\n assert_that(adapted, has_property('CustomerID', is_(is_('xyz'))))\n\n def _create_purchase_attempt(self, item=u'xyz-book', quantity=None,\n\t\t\t\t\t\t\t\t state=None, description=u'my purchase'):\n item = create_purchase_item(item, 1)\n order = create_purchase_order(item, quantity=quantity)\n result = create_purchase_attempt(order,\n\t\t\t\t\t\t\t\t\t\t processor=self.processor,\n description=description,\n state=state)\n return result\n\n @mock_dataserver.WithMockDSTrans\n def test_stripe_purchase_adapter(self):\n pa = self._create_purchase_attempt()\n adapted = IStripePurchaseAttempt(pa)\n adapted.charge_id = u'charge_id'\n adapted.token_id = u'token_id'\n assert_that(adapted.purchase, is_(pa))\n assert_that(adapted.charge_id, is_('charge_id'))\n assert_that(adapted.token_id, is_('token_id'))\n\n def test_exception_adapter(self):\n e = StripeException(u'my exception')\n adapted = IStripePurchaseError(e, None)\n assert_that(adapted, is_not(none()))\n assert_that(adapted.Type, is_('PurchaseError'))\n assert_that(adapted.Message, is_('my exception'))\n\n def test_stripe_error_adapters(self):\n e = stripe.error.CardError(u'my error', u'my param', u'my code')\n adapted = IStripeOperationError(e, None)\n assert_that(adapted, is_not(none()))\n assert_that(adapted.Type, is_('CardError'))\n assert_that(adapted.Message, is_('my error'))\n assert_that(adapted.Param, is_('my param'))\n assert_that(adapted.Code, is_('my code'))\n assert_that(adapted, validly_provides(IStripeOperationError))\n assert_that(adapted, verifiably_provides(IStripeOperationError))\n\n e = u'my error message'\n adapted = IStripeOperationError(e, None)\n assert_that(adapted, is_not(none()))\n assert_that(adapted.Type, is_('OperationError'))\n assert_that(adapted.Message, is_('my error message'))\n\n e = stripe.error.InvalidRequestError(u\"++invalidtoken++\", u'token id')\n adapted = IStripeOperationError(e, None)\n assert_that(adapted, validly_provides(IStripeOperationError))\n assert_that(adapted, verifiably_provides(IStripeOperationError))\n\n def test_account_info_adapter(self):\n connect_key = component.getUtility(IStripeConnectKey, 'NTI-TEST')\n\n account_info = IStripeAccountInfo(connect_key)\n\n assert_that(account_info.StripeAccountID, is_(connect_key.StripeUserID))\n assert_that(account_info.LiveMode, is_(connect_key.LiveMode))\n assert_that(account_info, not_(has_property('PrivateKey')))\n assert_that(account_info, not_(has_property('RefreshToken')))\n assert_that(account_info, not_(has_property('PublicKey')))\n", "repo_name": "OpenNTI/nti.store", "sub_path": "src/nti/store/payments/stripe/tests/test_adapters.py", "file_name": "test_adapters.py", "file_ext": "py", "file_size_in_byte": 4959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "unittest.TestCase", "line_number": 49, "usage_type": "attribute"}, {"api_name": "nti.store.tests.SharedConfiguringTestLayer", "line_number": 51, "usage_type": "name"}, {"api_name": "nti.dataserver.tests.mock_dataserver.current_mock_ds", "line_number": 56, "usage_type": "attribute"}, {"api_name": "nti.dataserver.tests.mock_dataserver", "line_number": 56, "usage_type": "name"}, {"api_name": "nti.dataserver.users.User.create_user", "line_number": 57, "usage_type": "call"}, {"api_name": "nti.dataserver.users.User", "line_number": 57, "usage_type": "name"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeCustomer", "line_number": 63, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 64, "usage_type": "call"}, {"api_name": "hamcrest.is_not", "line_number": 64, "usage_type": "call"}, {"api_name": "hamcrest.none", "line_number": 64, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 65, "usage_type": "call"}, {"api_name": "hamcrest.has_property", "line_number": 65, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 65, "usage_type": "call"}, {"api_name": "hamcrest.none", "line_number": 65, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 68, "usage_type": "call"}, {"api_name": "hamcrest.is_in", "line_number": 68, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 71, "usage_type": "call"}, {"api_name": "hamcrest.has_property", "line_number": 71, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 71, "usage_type": "call"}, {"api_name": "nti.dataserver.tests.mock_dataserver.WithMockDSTrans", "line_number": 60, "usage_type": "attribute"}, {"api_name": "nti.dataserver.tests.mock_dataserver", "line_number": 60, "usage_type": "name"}, {"api_name": "nti.store.purchase_order.create_purchase_item", "line_number": 75, "usage_type": "call"}, {"api_name": "nti.store.purchase_order.create_purchase_order", "line_number": 76, "usage_type": "call"}, {"api_name": "nti.store.purchase_attempt.create_purchase_attempt", "line_number": 77, "usage_type": "call"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripePurchaseAttempt", "line_number": 86, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 89, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 89, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 90, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 90, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 91, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 91, "usage_type": "call"}, {"api_name": "nti.dataserver.tests.mock_dataserver.WithMockDSTrans", "line_number": 83, "usage_type": "attribute"}, {"api_name": "nti.dataserver.tests.mock_dataserver", "line_number": 83, "usage_type": "name"}, {"api_name": "nti.store.payments.stripe.StripeException", "line_number": 94, "usage_type": "call"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripePurchaseError", "line_number": 95, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 96, "usage_type": "call"}, {"api_name": "hamcrest.is_not", "line_number": 96, "usage_type": "call"}, {"api_name": "hamcrest.none", "line_number": 96, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 97, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 97, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 98, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 98, "usage_type": "call"}, {"api_name": "stripe.error.CardError", "line_number": 101, "usage_type": "call"}, {"api_name": "stripe.error", "line_number": 101, "usage_type": "attribute"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeOperationError", "line_number": 102, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 103, "usage_type": "call"}, {"api_name": "hamcrest.is_not", "line_number": 103, "usage_type": "call"}, {"api_name": "hamcrest.none", "line_number": 103, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 104, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 104, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 105, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 105, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 106, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 106, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 107, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 107, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 108, "usage_type": "call"}, {"api_name": "nti.testing.matchers.validly_provides", "line_number": 108, "usage_type": "call"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeOperationError", "line_number": 108, "usage_type": "argument"}, {"api_name": "hamcrest.assert_that", "line_number": 109, "usage_type": "call"}, {"api_name": "nti.testing.matchers.verifiably_provides", "line_number": 109, "usage_type": "call"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeOperationError", "line_number": 109, "usage_type": "argument"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeOperationError", "line_number": 112, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 113, "usage_type": "call"}, {"api_name": "hamcrest.is_not", "line_number": 113, "usage_type": "call"}, {"api_name": "hamcrest.none", "line_number": 113, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 114, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 114, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 115, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 115, "usage_type": "call"}, {"api_name": "stripe.error.InvalidRequestError", "line_number": 117, "usage_type": "call"}, {"api_name": "stripe.error", "line_number": 117, "usage_type": "attribute"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeOperationError", "line_number": 118, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 119, "usage_type": "call"}, {"api_name": "nti.testing.matchers.validly_provides", "line_number": 119, "usage_type": "call"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeOperationError", "line_number": 119, "usage_type": "argument"}, {"api_name": "hamcrest.assert_that", "line_number": 120, "usage_type": "call"}, {"api_name": "nti.testing.matchers.verifiably_provides", "line_number": 120, "usage_type": "call"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeOperationError", "line_number": 120, "usage_type": "argument"}, {"api_name": "zope.component.getUtility", "line_number": 123, "usage_type": "call"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeConnectKey", "line_number": 123, "usage_type": "argument"}, {"api_name": "zope.component", "line_number": 123, "usage_type": "name"}, {"api_name": "nti.store.payments.stripe.interfaces.IStripeAccountInfo", "line_number": 125, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 127, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 127, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 128, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 128, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 129, "usage_type": "call"}, {"api_name": "hamcrest.not_", "line_number": 129, "usage_type": "call"}, {"api_name": "hamcrest.has_property", "line_number": 129, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 130, "usage_type": "call"}, {"api_name": "hamcrest.not_", "line_number": 130, "usage_type": "call"}, {"api_name": "hamcrest.has_property", "line_number": 130, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 131, "usage_type": "call"}, {"api_name": "hamcrest.not_", "line_number": 131, "usage_type": "call"}, {"api_name": "hamcrest.has_property", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "191229716", "text": "from datetime import datetime\n\nfrom django.utils.encoding import force_text\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.exceptions import APIException\n\nfrom weather.models import Forecast\nfrom weather.serializers import ForecastSerializer\n\n\nclass WeatherException(APIException):\n \"\"\"\n Custom APIException to return proper message in case of error.\n \"\"\"\n status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n default_detail = 'A server error occurred.'\n\n def __init__(self, detail, status_code):\n if status_code is not None:\n self.status_code = status_code\n if detail is not None:\n self.detail = force_text(detail)\n\n\nclass ForecastDetail(APIView):\n \"\"\"\n Gets information about a forecast for a specific city given date and time.\n The allowed operation types are (summary, temperature, pressure, humidity).\n \"\"\"\n\n @staticmethod\n def check_requested_time(requested_date, requested_hour):\n \"\"\"\n Checks if the date and time requested correspond to the supported format.\n\n requested_date and requested_hour are enforced by the url to be integers\n so here only checks if they have a valid format.\n\n :param requested_date: the date present in the url of the request\n :param requested_hour: the time present in the url of the request\n :return: the requested_date and requested_hour as datetime objects\n \"\"\"\n try:\n formatted_date = datetime.strptime(str(requested_date), '%Y%m%d')\n formatted_time = datetime.strptime(str(requested_hour), '%H%M')\n except (ValueError, TypeError):\n raise WeatherException('The requested date and/or time have an '\n 'incorrect format. The correct format '\n 'should be YYYYMMDD for date and HHMM for '\n 'the time of the forecast',\n status.HTTP_400_BAD_REQUEST)\n\n return formatted_date, formatted_time\n\n @staticmethod\n def check_requested_operation(requested_operation):\n \"\"\"\n Checks if the operation requested is one of the supported operations.\n\n Supported operations: summary, temperature, pressure, humidity\n\n :param requested_operation: the name of the forecast details operation\n \"\"\"\n operation_name = requested_operation.lower()\n if operation_name not in ('summary', 'temperature', 'pressure',\n 'humidity'):\n raise WeatherException('The requested operation is not supported. '\n 'Please select on the supported operations '\n '(summary, temperature, pressure, humidity)',\n status.HTTP_400_BAD_REQUEST)\n return operation_name\n\n def get(self, request, operation_name, forecast_date, forecast_hour, format=None):\n\n requested_date, requested_time = self.check_requested_time(forecast_date,\n forecast_hour)\n requested_operation = self.check_requested_operation(operation_name)\n\n try:\n forecast = Forecast.objects.get(city__name='lewisham',\n date=requested_date.date(),\n time=requested_time.time())\n except Forecast.DoesNotExist:\n raise WeatherException('Unfortunately there\\'s no forecast data '\n 'for {} {}'.format(\n requested_date.strftime('%Y-%m-%d'),\n requested_time.strftime('%H:%M')),\n status.HTTP_404_NOT_FOUND)\n\n serialize_fields = ('description', 'temperature', 'pressure',\n 'humidity')\n serialize_context = {'temperature-units': 'C'}\n\n # Since the names of the operations are equal to the model fields we\n # can just filter the serializer fields by the operation name\n if requested_operation in ('temperature', 'pressure', 'humidity'):\n serialize_fields = tuple(field for field in serialize_fields\n if field == operation_name)\n\n serializer = ForecastSerializer(forecast,\n fields=serialize_fields,\n context=serialize_context)\n return Response(serializer.data)\n", "repo_name": "xmaps/weather_forecast", "sub_path": "src/weather/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "rest_framework.exceptions.APIException", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 17, "usage_type": "name"}, {"api_name": "django.utils.encoding.force_text", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 53, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 72, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 72, "usage_type": "name"}, {"api_name": "weather.models.Forecast.objects.get", "line_number": 82, "usage_type": "call"}, {"api_name": "weather.models.Forecast.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "weather.models.Forecast", "line_number": 82, "usage_type": "name"}, {"api_name": "weather.models.Forecast.DoesNotExist", "line_number": 85, "usage_type": "attribute"}, {"api_name": "weather.models.Forecast", "line_number": 85, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 90, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 90, "usage_type": "name"}, {"api_name": "weather.serializers.ForecastSerializer", "line_number": 102, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "70423627574", "text": "from pyro.optim import Adam, SGD\nfrom torch.distributions import constraints\nfrom util import NN\nfrom BNN import BNN\nimport os, sys\nimport pyro\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nclass BNN_SVI(BNN):\n def __init__(self, dim, act = nn.ReLU(), num_hiddens = [50], conf = dict()):\n super(BNN_SVI, self).__init__()\n self.dim = dim\n self.act = act\n self.num_hiddens = num_hiddens\n self.num_iters = conf.get('num_iters', 4000)\n self.batch_size = conf.get('batch_size', 32)\n self.print_every = conf.get('print_every', 100)\n self.lr = conf.get('lr', 1e-2)\n self.weight_std = conf.get('weight_std', 1.0)\n self.noise_level = conf.get('noise_level', None)\n self.nn = NN(dim, self.act, self.num_hiddens, nout = 1)\n\n def model(self, X, y):\n \"\"\"\n Normal distribution for weights and bias\n \"\"\"\n num_x = X.shape[0]\n priors = dict()\n for n, p in self.nn.named_parameters():\n priors[n] = pyro.distributions.Normal(loc = torch.zeros_like(p), scale = self.weight_std * torch.ones_like(p)).to_event(1)\n\n lifted_module = pyro.random_module(\"module\", self.nn, priors)\n lifted_reg_model = lifted_module()\n with pyro.plate(\"map\", len(X), subsample_size = min(num_x, self.batch_size)) as ind:\n pred = lifted_reg_model(X[ind]).squeeze(-1)\n pyro.sample(\"obs\", pyro.distributions.Normal(pred, self.noise_level), obs = y[ind])\n\n def guide(self, X, y):\n priors = dict()\n softplus = nn.Softplus()\n for n, p in self.nn.named_parameters():\n loc = pyro.param(\"mu_\" + n, self.weight_std * torch.randn_like(p))\n scale = pyro.param(\"sigma_\" + n, torch.randn_like(p))\n priors[n] = pyro.distributions.Normal(loc = loc, scale = softplus(scale)).to_event(1)\n lifted_module = pyro.random_module(\"module\", self.nn, priors)\n return lifted_module()\n\n def train(self, X, y):\n if self.noise_level is None:\n print(\"No noise level provided, use noise_level = 0.05 * y.std()\")\n self.noise_level = 0.05 * y.std()\n num_train = X.shape[0]\n y = y.reshape(num_train)\n optim = pyro.optim.Adam({\"lr\":self.lr})\n svi = pyro.infer.SVI(self.model, self.guide, optim, loss = pyro.infer.Trace_ELBO())\n pyro.clear_param_store()\n self.rec = []\n for i in range(self.num_iters):\n loss = svi.step(X, y)\n if (i+1) % self.print_every == 0:\n self.rec.append(loss / num_train)\n print(\"[Iteration %05d/%05d] loss: %-4.3f\" % (i + 1, self.num_iters, loss / num_train))\n\n def sample(self, num_samples = 1):\n nns = [self.guide(None, None) for i in range(num_samples)]\n return nns\n\n def sample_predict(self, nns, X):\n num_x = X.shape[0]\n pred = torch.zeros(len(nns), num_x)\n for i in range(len(nns)):\n pred[i] = nns[i](X).squeeze()\n precs = torch.ones(pred.shape) / (self.noise_level**2)\n return pred\n\n def report(self):\n print(self.nn)\n", "repo_name": "Alaya-in-Matrix/BNN", "sub_path": "BNN_SVI.py", "file_name": "BNN_SVI.py", "file_ext": "py", "file_size_in_byte": 3306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "21", "api": [{"api_name": "BNN.BNN", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "util.NN", "line_number": 23, "usage_type": "call"}, {"api_name": "pyro.distributions.Normal", "line_number": 32, "usage_type": "call"}, {"api_name": "pyro.distributions", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.zeros_like", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 32, "usage_type": "call"}, {"api_name": "pyro.random_module", "line_number": 34, "usage_type": "call"}, {"api_name": "pyro.plate", "line_number": 36, "usage_type": "call"}, {"api_name": "pyro.sample", "line_number": 38, "usage_type": "call"}, {"api_name": "pyro.distributions.Normal", "line_number": 38, "usage_type": "call"}, {"api_name": "pyro.distributions", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn.Softplus", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "pyro.param", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 44, "usage_type": "call"}, {"api_name": "pyro.param", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 45, "usage_type": "call"}, {"api_name": "pyro.distributions.Normal", "line_number": 46, "usage_type": "call"}, {"api_name": "pyro.distributions", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pyro.random_module", "line_number": 47, "usage_type": "call"}, {"api_name": "pyro.optim.Adam", "line_number": 56, "usage_type": "call"}, {"api_name": "pyro.optim", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pyro.infer.SVI", "line_number": 57, "usage_type": "call"}, {"api_name": "pyro.infer", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pyro.infer.Trace_ELBO", "line_number": 57, "usage_type": "call"}, {"api_name": "pyro.clear_param_store", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "4197637885", "text": "import random\nfrom multiprocessing import Queue, Process\nimport time\nimport multiprocessing\nfrom multiprocessing import cpu_count\nfrom numpy import *\nimport pandas as pd\nimport numpy as np\nimport numpy\nfrom threading import Thread\n# import random \nfrom random import sample \n\n\n\n\ndef loadTrainDataSet():\n '''\n 加载训练数据\n '''\n train_data1 = pd.read_csv('./data/train1.csv',header=None)\n train_data2 = pd.read_csv('./data/train2.csv',header=None)\n train_data3 = pd.read_csv('./data/train3.csv',header=None)\n train_data4 = pd.read_csv('./data/train4.csv',header=None)\n train_data5 = pd.read_csv('./data/train5.csv',header=None)\n \n label1 = pd.read_csv('./data/label1.csv',header=None)\n label2 = pd.read_csv('./data/label2.csv',header=None)\n label3 = pd.read_csv('./data/label3.csv',header=None)\n label4 = pd.read_csv('./data/label4.csv',header=None)\n label5 = pd.read_csv('./data/label5.csv',header=None)\n \n train = pd.concat([train_data1, train_data2, train_data3, train_data4, train_data5], sort=False)\n label = pd.concat([label1, label2, label3, label4, label5], sort=False)\n \n train = np.array(train)\n label = np.array(label)\n \n train = np.c_[train, label]\n return np.array(train)\n\n\ndef loadTestDataSet():\n '''\n 加载测试数据\n '''\n test_data1 = pd.read_csv('./data/test1.csv',header=None)\n test_data2 = pd.read_csv('./data/test2.csv',header=None)\n test_data3 = pd.read_csv('./data/test3.csv',header=None)\n test_data4 = pd.read_csv('./data/test4.csv',header=None)\n test_data5 = pd.read_csv('./data/test5.csv',header=None)\n test_data6 = pd.read_csv('./data/test6.csv',header=None)\n test_data = pd.concat([test_data1, test_data2, test_data3, test_data4, test_data5, test_data6], sort=False)\n test_data = np.array(test_data)\n return test_data\n\n\n\nclass RegressionTree:\n def __init__(self, min_increase_gain = 1, min_samples_split = 100):\n '''\n min_increase_gain: 一个节点分裂以后误差的最小减少值,当值小于这个值的时候停止分裂\n min_samples_split: 每个叶子的最小样本数\n '''\n self.min_increase_gain = min_increase_gain\n self.min_samples_split = min_samples_split\n \n def binSplitDataSet(self, dataSet, feature, value):\n ''' \n 划分数据集,\n 这里进行了cache的优化,使用方案1的时候跑得太慢了,每次都要遍历全部的特征\n 改变以后,由于传进来的dataSet是按照feature排序的,所以当一个大于��前的value的时候,我们就可以将数据集划分开来了\n '''\n '''\n 方案1\n dataSet00 = []\n dataSet11 = []\n for i in range(len(dataSet)):\n if dataSet[i][feature] > value:\n dataSet00.append(dataSet[i])\n else:\n dataSet11.append(dataSet[i])\n \n return np.array(dataSet00), np.array(dataSet11)\n '''\n # 方案2\n index = -1\n dataSet0 = []\n dataSet1 = []\n for i in range(len(dataSet)):\n if dataSet[i][feature] > value:\n index = i\n break\n \n if index == 0:\n return dataSet, np.array(dataSet0)\n elif index == -1:\n return np.array(dataSet0), dataSet\n else:\n dataSet1 = dataSet[0:index]\n dataSet0 = dataSet[index:]\n return np.array(dataSet0),np.array(dataSet1)\n \n def leafValue(self, dataSet):\n '''\n 求叶子的均值,作为最后预测的结果\n '''\n return mean(dataSet[:,-1])\n \n \n def leafVar(self, dataSet):\n '''\n 求方差,用于选取最佳分割点\n '''\n return var(dataSet[:,-1]) * shape(dataSet)[0]\n\n \n def chooseBestSplit(self, dataSet):\n '''\n 选取最佳分割点,在对数据进行分割之前,先对特征进行排序,\n 以减少cache与内存的换页,可以有效减少训练时间\n \n '''\n # 如果叶子的所有值相等,不需要继续划分\n if len(set(dataSet[:,-1].T.tolist())) == 1:\n return None, self.leafValue(dataSet)\n m, n = shape(dataSet)\n # 计算父节点方差\n parentVar = self.leafVar(dataSet)\n bestVar = inf\n bestIndex = 0\n bestValue = 0\n # 随机选取分割特征\n # 设置随机种子,在多进程的情况下,需要设置,否则生成的随机数不是严格意义上的随机数\n # 随机选择特征的1/3来进行分割\n random_features = sample(range(0, n - 1),int((n-1)/3))\n \n for featIndex in random_features:\n # 对特征进行排序,减少换页\n temp = [x[featIndex] for x in dataSet]\n index = np.argsort(temp ,axis=0)\n dataSet = dataSet[index]\n temp.sort()\n temp = list(set(temp))\n t = []\n for i in range(len(temp)):\n if i == 0:\n tag = temp[i]\n t.append(temp[i])\n else:\n if temp[i] - tag > 1:\n t.append(temp[i])\n tag = temp[i]\n temp = t\n for splitVal in temp:\n dataSet0, dataSet1 = self.binSplitDataSet(dataSet, featIndex, splitVal)\n # 当分割后的叶子小于叶子最少样本数时,不需要分割\n if (shape(dataSet0)[0] < self.min_samples_split) or (shape(dataSet1)[0] < self.min_samples_split): \n continue\n # 分割后的方差\n newVar = len(dataSet0)/len(dataSet)*self.leafVar(dataSet0) + len(dataSet1)/len(dataSet)*self.leafVar(dataSet1)\n # 分割后的方差与分割前的进行比较,去较好值\n if newVar < bestVar: \n bestIndex = featIndex\n bestValue = splitVal\n bestVar = newVar\n # 如果增益过小,不需要分割\n if (parentVar - bestVar) < self.min_increase_gain: \n return None, self.leafValue(dataSet)\n \n temp = [x[bestIndex] for x in dataSet]\n # temp = dataSet[:,featIndex]\n index = np.argsort(temp ,axis=0)\n dataSet = dataSet[index]\n dataSet0, dataSet1 = self.binSplitDataSet(dataSet, bestIndex, bestValue)\n if (shape(dataSet0)[0] < self.min_samples_split) or (shape(dataSet1)[0] < self.min_samples_split):\n return None, self.leafValue(dataSet)\n \n return bestIndex, bestValue\n \n def createTree(self, dataSet, deep = 20):\n '''\n 综合上面过程,建一棵完整的树\n 递归实现\n '''\n deep -= 1\n if deep == 0:\n return self.leafValue(dataSet)\n feat, val = self.chooseBestSplit(dataSet)\n if feat == None: \n return val\n retTree = {}\n retTree['featureIndex'] = feat\n retTree['spiltValue'] = val\n temp = [x[feat] for x in dataSet]\n index = np.argsort(temp ,axis=0)\n dataSet = dataSet[index]\n lSet, rSet = self.binSplitDataSet(dataSet, feat, val)\n retTree['left'] = self.createTree(lSet, deep)\n retTree['right'] = self.createTree(rSet, deep)\n return retTree\n\n\nclass RandomForestRegressor:\n def __init__(self, n_estimators = 2, max_depth = 20, sample_ratio = 0.1, min_sample_spilt = 100, process_num = 8, min_increase_gain = 1):\n '''\n n_estimators:树的个数\n max_depth:树的最大深度 \n sample_ratio:取样时候的比例\n min_sample_spilt:叶子最小样本数\n process_num:使用的进程数量\n '''\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.forests = []\n self.sample_ratio = sample_ratio\n self.min_sample_spilt = min_sample_spilt\n self.process_num = process_num\n self.min_increase_gain = min_increase_gain\n \n def predict_one_tree(self, tree, data):\n return predict(data)\n \n def fit(self, dataSet):\n # 多进程操作\n pool = multiprocessing.Pool(processes = self.process_num)\n start = time.time()\n result = []\n for i in range(self.n_estimators):\n result.append(pool.apply_async(self.build_tree_mulprocess, args=(i, dataSet,)))\n \n pool.close()\n pool.join()\n end = time.time()\n print('time:', (end - start))\n for r in result:\n self.forests.append(r.get())\n\n def build_tree_mulprocess(self, i, dataSet):\n \"\"\"\n 用于建树的进程\n \"\"\"\n print('Building tree ', i, ' ...')\n sample_dataSet = self.get_sample_dataSet(dataSet, i)\n t = RegressionTree(min_increase_gain=self.min_increase_gain, min_samples_split=self.min_sample_spilt)\n tree = t.createTree(sample_dataSet, self.max_depth)\n print('Build tree ', i, ' end')\n return tree\n\n def get_sample_dataSet(self, dataSet, i):\n '''\n 有放回选取数据集,\n smple_ratio是选取的数据集占原样本的比例\n '''\n print('Getting data for tree ', i)\n sample_size = round(len(dataSet) * self.sample_ratio)\n sample_dataSet = []\n random_indexs = []\n while len(random_indexs) < sample_size:\n random_indexs.append(random.randint(0, len(dataSet)-1))\n \n # 排序再取,避免过多分页\n random_indexs.sort()\n for j in random_indexs:\n sample_dataSet.append(np.array(dataSet[j]).reshape(14))\n print('Get data for tree ', i, ' end')\n return np.array(sample_dataSet)\n \n def getForest(self):\n return self.forests\n \n def predict(self, data):\n '''\n 预测森林的效果\n '''\n result = []\n for j in range(len(data)):\n if j%10000 == 0:\n print('[{}] of [{}]'.format(j, len(data)))\n s = 0\n for i in range(len(self.forests)):\n temp = self.forests[i]\n while 1:\n if type(temp) == np.float64:\n s += temp\n break\n index = temp['featureIndex']\n d = data[j][index]\n if d > temp['spiltValue']:\n temp = temp['left']\n else:\n temp = temp['right']\n result.append(s/len(self.forests))\n return np.array(result)\n\n\ntrain_data = loadTrainDataSet()\ntest_data = loadTestDataSet()\n\nr = RandomForestRegressor(n_estimators = 100, max_depth = 10, sample_ratio=0.01, min_sample_spilt = 1000, process_num = 8)\nr.fit(train_data)\n\nr.getForest()\n\nresult = r.predict(test_data)\n\ndf = pd.DataFrame({'Predicted':result})\ndf.index += 1\ndf.to_csv('./data/my_sub.csv', index = True, index_label = 'id')", "repo_name": "Vilinz/RandomForest", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 11058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 196, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 226, "usage_type": "call"}, {"api_name": "time.time", "line_number": 227, "usage_type": "call"}, {"api_name": "time.time", "line_number": 234, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 284, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 294, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 307, "usage_type": "call"}]} +{"seq_id": "25076667195", "text": "import pygame as pg\n\nfrom .core import Scene, Scenes\nfrom src.controls import Button, Label, ItemBox, PictureBox, ControlsContainer\nfrom src.setting import WINDOW_WIDTH, WINDOW_HEIGHT, button_color\nfrom src.gamePlay import Record\nfrom .play import Play\nfrom src import Maps\n\n\nclass GameMode:\n EASY = 0\n MEDIUM = 1\n\n\nclass Game(Scene):\n\n EzBg = pg.image.load(\"src/assets/EasyLevelBG.png\")\n MdBg = pg.image.load(\"src/assets/MediumLevelBG.png\")\n background = EzBg\n\n Backpic = PictureBox(20, 20, 100, 50, \"src/assets/Logo.png\")\n EzBt = Button(20, 250, 150, 50, \"EASY\", 10, (112, 187, 68), \"white\")\n MdBt = Button(20, 320, 150, 50, \"MEDIUM\", 10, (183, 51, 62), \"white\")\n PlayBt = Button(WINDOW_WIDTH - 120, WINDOW_HEIGHT - 60,\n 100, 50, \"Play\", 20, (112, 187, 68), \"white\")\n isEnabled = True\n LvLabel = Label(20, 180, 100, 50, \"MODE:\")\n\n teamTowers = []\n padding = 5\n for c in range(0, 5):\n box = ItemBox((c+1)*2*padding + c*100, 2 * padding,\n 100, \"src/assets/MediumLevelBG.png\", \"NONE\", \"\")\n teamTowers.append(box)\n container = ControlsContainer(350, 100, teamTowers, padding, \"grey\")\n\n Map_Loader = PictureBox(350, container.rect.top + 200,\n container.rect.width, 300, \"\")\n nextBt = Button(Map_Loader.rect.left + Map_Loader.rect.width/2 + 175,\n Map_Loader.rect.top + Map_Loader.rect.height + 10,\n 50, 50, \">\", 25, button_color, \"white\")\n\n preBt = Button(Map_Loader.rect.left + Map_Loader.rect.width/2 - 225,\n Map_Loader.rect.top + Map_Loader.rect.height + 10,\n 50, 50, \"<\", 25, button_color, \"white\")\n MapNameLb = Label(preBt.rect.right, nextBt.rect.top,\n nextBt.rect.left - preBt.rect.right, 50,\n text=\"\", bgcolor=\"white\")\n controls = pg.sprite.Group()\n map_index = 0\n\n panel = pg.Rect(container.rect.left - 25, container.rect.top - 10,\n container.rect.width + 50, nextBt.rect.bottom - 80)\n panel_color = EzBt.background_color\n controls.add(Backpic, LvLabel, EzBt, MdBt, PlayBt, container,\n Map_Loader, nextBt, preBt, MapNameLb)\n\n # other\n mode = GameMode.EASY\n\n @staticmethod\n def event_handler(event):\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_s:\n return Scenes.MENU\n return None\n\n @staticmethod\n def game(screen, login):\n Game.background = pg.transform.scale(\n Game.background, (WINDOW_WIDTH, WINDOW_HEIGHT))\n # Get list of tower set in player's team\n for t in range(5):\n if t < len(login.team):\n Game.teamTowers[t].pictureBox.img_path = login.team[t].img_src\n Game.teamTowers[t].mainText.text = login.team[t].name\n Game.teamTowers[t].subText.text = f\"${login.team[t].in_game_price}\"\n else:\n Game.teamTowers[t].pictureBox.img_path = \"src/assets/MediumLevelBG.png\"\n Game.teamTowers[t].mainText.text = \"NONE\"\n Game.teamTowers[t].subText.text = \"\"\n # Get map loaded on map_loader\n cur_map = Maps[Game.map_index]\n Game.Map_Loader.img_path = cur_map.img_path\n Game.MapNameLb.text = cur_map.name\n\n # Drawing session\n screen.blit(Game.background, (0, 0))\n pg.draw.rect(screen, color=Game.panel_color, rect=Game.panel,\n border_radius=15)\n for c in Game.controls:\n c.draw(screen)\n\n # clicked events\n for s in Game.teamTowers:\n if s.isClicked():\n return Scenes.INVENTORY\n if Game.nextBt.isClicked():\n if Game.map_index < len(Maps)-1:\n Game.map_index += 1\n if Game.preBt.isClicked():\n if Game.map_index > 0:\n Game.map_index -= 1\n if Game.Backpic.isClicked():\n return Scenes.MENU\n if Game.EzBt.isClicked():\n Game.background = Game.EzBg\n Game.panel_color = Game.EzBt.background_color\n Game.isEnabled = True\n Game.mode = GameMode.EASY\n if Game.MdBt.isClicked():\n Game.background = Game.MdBg\n Game.panel_color = Game.MdBt.background_color\n Game.mode = GameMode.MEDIUM\n Game.isEnabled = False\n if not Game.isEnabled:\n Game.PlayBt.background_color = \"grey\"\n Game.PlayBt.text = \"COMMING SOON\"\n else:\n Game.PlayBt.background_color = button_color\n Game.PlayBt.text = \"Play\"\n\n Game.PlayBt.rect.width = Game.PlayBt.text_rect.width + 20\n Game.PlayBt.rect.left = WINDOW_WIDTH - \\\n Game.PlayBt.rect.width - 20\n\n if Game.PlayBt.isClicked() and Game.isEnabled:\n record = Record(Game.mode, cur_map, login)\n Play.reset(record)\n return Scenes.PLAY\n return None\n", "repo_name": "HPhat03/TowerDefense_pygame", "sub_path": "src/scenes/gameSet.py", "file_name": "gameSet.py", "file_ext": "py", "file_size_in_byte": 4992, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "core.Scene", "line_number": 16, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "src.controls.PictureBox", "line_number": 22, "usage_type": "call"}, {"api_name": "src.controls.Button", "line_number": 23, "usage_type": "call"}, {"api_name": "src.controls.Button", "line_number": 24, "usage_type": "call"}, {"api_name": "src.controls.Button", "line_number": 25, "usage_type": "call"}, {"api_name": "src.setting.WINDOW_WIDTH", "line_number": 25, "usage_type": "name"}, {"api_name": "src.setting.WINDOW_HEIGHT", "line_number": 25, "usage_type": "name"}, {"api_name": "src.controls.Label", "line_number": 28, "usage_type": "call"}, {"api_name": "src.controls.ItemBox", "line_number": 33, "usage_type": "call"}, {"api_name": "src.controls.ControlsContainer", "line_number": 36, "usage_type": "call"}, {"api_name": "src.controls.PictureBox", "line_number": 38, "usage_type": "call"}, {"api_name": "src.controls.Button", "line_number": 40, "usage_type": "call"}, {"api_name": "src.setting.button_color", "line_number": 42, "usage_type": "argument"}, {"api_name": "src.controls.Button", "line_number": 44, "usage_type": "call"}, {"api_name": "src.setting.button_color", "line_number": 46, "usage_type": "argument"}, {"api_name": "src.controls.Label", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 65, "usage_type": "attribute"}, {"api_name": "core.Scenes.MENU", "line_number": 66, "usage_type": "attribute"}, {"api_name": "core.Scenes", "line_number": 66, "usage_type": "name"}, {"api_name": "pygame.transform.scale", "line_number": 71, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 71, "usage_type": "attribute"}, {"api_name": "src.setting.WINDOW_WIDTH", "line_number": 72, "usage_type": "name"}, {"api_name": "src.setting.WINDOW_HEIGHT", "line_number": 72, "usage_type": "name"}, {"api_name": "src.Maps", "line_number": 84, "usage_type": "name"}, {"api_name": "pygame.draw.rect", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 90, "usage_type": "attribute"}, {"api_name": "core.Scenes.INVENTORY", "line_number": 98, "usage_type": "attribute"}, {"api_name": "core.Scenes", "line_number": 98, "usage_type": "name"}, {"api_name": "src.Maps", "line_number": 100, "usage_type": "argument"}, {"api_name": "core.Scenes.MENU", "line_number": 106, "usage_type": "attribute"}, {"api_name": "core.Scenes", "line_number": 106, "usage_type": "name"}, {"api_name": "src.setting.button_color", "line_number": 121, "usage_type": "name"}, {"api_name": "src.setting.WINDOW_WIDTH", "line_number": 125, "usage_type": "name"}, {"api_name": "src.gamePlay.Record", "line_number": 129, "usage_type": "call"}, {"api_name": "play.Play.reset", "line_number": 130, "usage_type": "call"}, {"api_name": "play.Play", "line_number": 130, "usage_type": "name"}, {"api_name": "core.Scenes.PLAY", "line_number": 131, "usage_type": "attribute"}, {"api_name": "core.Scenes", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "19952507718", "text": "import tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport skimage.io as io\nfrom neighborhood_processing import box_smoothing, gaussian_smoothing, laplacian_sharping, order_statistic, \\\n highboost\nfrom point_processing import negative, intensity_level_slicing, contrast_stretching, power_law, histogram, \\\n global_histogram_equalization, local_histogram_equalization\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# this function shows result of histogram\ndef histogram_invoker(file_path):\n result = histogram(file_path)\n if result == -1:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n\n\n# resize pic to most suitable size into canvas\ndef resize(w_box, h_box, image):\n original_width, original_height = image.size\n f1 = 1.0 * w_box / original_width\n f2 = 1.0 * h_box / original_height\n factor = min([f1, f2])\n resized_width = int(original_width * factor)\n resize_height = int(original_height * factor)\n return image.resize((resized_width, resize_height), Image.ANTIALIAS)\n\n\n# this function shows result of mask image in sharping\ndef mask_invoker(file_path):\n try:\n mask = io.imread(file_path)\n plt.imshow(mask, cmap=plt.cm.gray, vmin=0, vmax=255)\n plt.title(\"Mask\", fontsize=6)\n plt.axis('off')\n plt.show()\n except Exception:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n\n\n# this function handles exception\ndef default_invoker():\n messagebox.showerror('Empty', 'Please import the image')\n return\n\n\nclass Window:\n root = None # root window\n image = None # imported image\n file_path = None # imported file path\n\n # following member fields of point processing\n negative_image = None\n bit_plane_selection = 0\n intensity_level_slicing_image = None\n stretched_image = None\n power_law_image = None\n power_law_constant = 0\n power_law_gamma = 0\n global_histogram_equalized_image = None\n local_histogram_equalized_image = None\n\n # following member fields of neighborhood processing\n box_kernel_size = 0\n box_smoothing_image = None\n \n gaussian_kernel_size = 0\n gaussian_constant_k = 0\n gaussian_sigma = 0\n gaussian_smoothing_image = None\n\n laplacian_sharping_image = None\n laplacian_sharping_mask = None\n\n median_filtering_image = None\n \n highboost_kernel_size = 0\n highboost_constant_k = 0\n highboost_sigma = 0\n highboost_image = None\n highboost_mask = None\n\n # window main components\n main_frame = None\n left_frame = None\n left_canvas = None\n left_hist_button = None\n right_frame = None\n right_canvas = None\n right_hist_button = None\n right_show_mask_button = None\n\n # IO image path\n GIF_PATH = 'current.gif'\n NEGATIVE_PATH = 'negative.gif'\n INTENSITY_LEVEL_SLICING_PATH = 'intensity_level_slicing.gif'\n CONTRAST_STRETCHING_PATH = 'contrast_stretching.gif'\n POWER_LAW_PATH = 'power.gif'\n GLOBAL_HISTOGRAM_EQUALIZED_PATH = 'global_histogram_equalized.gif'\n LOCAL_HISTOGRAM_EQUALIZED_PATH = 'local_histogram_equalized.gif'\n BOX_SMOOTHING_PATH = 'box_smoothing.gif'\n GAUSSIAN_SMOOTHING_PATH = 'gaussian_smoothing.gif'\n LAPLACIAN_SHARPING_PATH = 'laplacian_sharping.gif'\n LAPLACIAN_SHARPING_MASK_PATH = 'laplacian_sharping_mask.gif'\n HIGHBOOST_PATH = 'highboost.gif'\n HIGHBOOST_MASK_PATH = 'highboost_mask.gif'\n MEDIAN_FILTERING_PATH = 'median_filtering.gif'\n\n def __init__(self):\n\n self.root = tk.Tk()\n self.root.title('Image Processing Toolkit')\n self.root.geometry('700x430')\n self.main_frame = tk.Frame(self.root)\n self.main_frame.pack()\n self.left_frame = tk.Frame(self.main_frame)\n self.left_frame.pack(side='left')\n self.left_canvas = tk.Canvas(self.left_frame, height=350, width=330)\n self.left_canvas.pack()\n self.left_hist_button = tk.Button(self.left_frame, text='Show Histogram',\n command=lambda: histogram_invoker(file_path=self.file_path))\n\n self.right_frame = tk.Frame(self.main_frame)\n self.right_frame.pack(side='right')\n self.right_canvas = tk.Canvas(self.right_frame, height=350, width=330)\n self.right_canvas.pack()\n self.right_show_mask_button = tk.Button(self.right_frame, text='Show mask', command=default_invoker)\n self.right_hist_button = tk.Button(self.right_frame, text='Show Histogram', command=default_invoker)\n\n def create_menu(self, root):\n menuBar = tk.Menu(root)\n fileMenu = tk.Menu(menuBar, tearoff=0)\n menuBar.add_cascade(label='File', menu=fileMenu)\n fileMenu.add_command(label='Import Image', command=self.import_image)\n fileMenu.add_command(label='Exit', command=lambda: exit(0))\n\n pointMenu = tk.Menu(menuBar, tearoff=0)\n menuBar.add_cascade(label='Point Processing', menu=pointMenu)\n pointMenu.add_command(label='Negative', command=self.do_negative)\n pointMenu.add_command(label='Intensity-Level Slicing', command=self.do_intensity_level_slicing)\n pointMenu.add_command(label='Contrast Stretching', command=self.do_contrast_stretching)\n pointMenu.add_command(label='Global Histogram Equalization', command=self.do_global_histogram_equalization)\n pointMenu.add_command(label='Local Histogram Equalization', command=self.do_local_histogram_equalization)\n pointMenu.add_command(label='Power-Law', command=self.do_power_law)\n\n neighborhoodMenu = tk.Menu(menuBar, tearoff=0)\n menuBar.add_cascade(label='Neighborhood Processing', menu=neighborhoodMenu)\n smoothingMenu = tk.Menu(neighborhoodMenu)\n sharpingMenu = tk.Menu(neighborhoodMenu)\n smoothingMenu.add_command(label='Box Smoothing', command=self.do_box_smoothing)\n smoothingMenu.add_command(label='Gaussian Smoothing', command=self.do_gaussian_smoothing)\n sharpingMenu.add_command(label='Laplacian Sharping', command=self.do_laplacian_sharping)\n sharpingMenu.add_command(label='Unsharp Mask and Highboost', command=self.do_highboost)\n neighborhoodMenu.add_cascade(label='Smoothing', menu=smoothingMenu)\n neighborhoodMenu.add_cascade(label='Sharping', menu=sharpingMenu)\n neighborhoodMenu.add_command(label='Median filtering', command=self.do_median_filtering)\n root.config(menu=menuBar)\n\n def import_image(self):\n self.file_path = filedialog.askopenfilename()\n # resize then convert to PhotoImage\n try:\n image = Image.open(self.file_path)\n image = resize(300, 400, image)\n image.save(self.GIF_PATH)\n self.image = tk.PhotoImage(file=self.GIF_PATH)\n self.left_canvas.create_image(20, 20, anchor='nw', image=self.image)\n self.left_hist_button.pack(side='bottom')\n\n except ValueError:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n except AttributeError:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n\n def do_negative(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n negative_arr = negative(self.GIF_PATH)\n if negative_arr is None:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n Image.fromarray(np.uint8(negative_arr)).save(self.NEGATIVE_PATH)\n self.negative_image = tk.PhotoImage(file=self.NEGATIVE_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.negative_image)\n self.right_hist_button.config(command=lambda: histogram_invoker(file_path=self.NEGATIVE_PATH))\n self.right_hist_button.pack()\n\n def do_intensity_level_slicing(self):\n confirm_window = tk.Toplevel(self.root)\n confirm_window.geometry('300x300')\n confirm_label = tk.Label(confirm_window, text=\"Please select bit plane level:\")\n confirm_label.pack()\n selection_list = tk.Listbox(confirm_window)\n for item in [1, 2, 3, 4, 5, 6, 7, 8]:\n selection_list.insert(\"end\", item)\n selection_list.pack()\n selection_list.bind('<>', self.getSelectedItem)\n confirm_button = tk.Button(confirm_window, text='Confirm', width=10, height=3,\n command=self.intensity_level_slicing_invoker)\n confirm_button.pack()\n\n # call back function for selecting intensity level\n def getSelectedItem(self, evt):\n # Note here that Tkinter passes an event object\n w = evt.widget\n index = int(w.curselection()[0])\n self.bit_plane_selection = w.get(index)\n\n def intensity_level_slicing_invoker(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n intensity_level_slicing_arr = intensity_level_slicing(self.GIF_PATH, self.bit_plane_selection)\n if intensity_level_slicing_arr is None:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n Image.fromarray(np.uint8(intensity_level_slicing_arr)).save(self.INTENSITY_LEVEL_SLICING_PATH)\n self.intensity_level_slicing_image = tk.PhotoImage(file=self.INTENSITY_LEVEL_SLICING_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.intensity_level_slicing_image)\n self.right_hist_button.config(command=lambda: histogram_invoker(file_path=self.INTENSITY_LEVEL_SLICING_PATH))\n self.right_hist_button.pack()\n\n def do_contrast_stretching(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n stretched_arr = contrast_stretching(self.GIF_PATH)\n if stretched_arr is None:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n Image.fromarray(stretched_arr).save(self.CONTRAST_STRETCHING_PATH)\n self.stretched_image = tk.PhotoImage(file=self.CONTRAST_STRETCHING_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.stretched_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.CONTRAST_STRETCHING_PATH))\n self.right_hist_button.pack()\n\n def do_power_law(self):\n confirm_window = tk.Toplevel(self.root)\n confirm_window.geometry('300x150')\n constant_label = tk.Label(confirm_window, text=\"Please input constant value:\")\n constant_label.pack()\n constant_entry = tk.Entry(confirm_window, show=None)\n constant_entry.bind('', self.power_law_constant_assign)\n constant_entry.pack()\n gamma_label = tk.Label(confirm_window, text=\"Please input gamma value:\")\n gamma_label.pack()\n gamma_entry = tk.Entry(confirm_window, show=None)\n gamma_entry.bind('', self.gamma_assign)\n gamma_entry.pack()\n confirm_button = tk.Button(confirm_window, text='Confirm', width=10, height=3, command=self.power_law_invoker)\n confirm_button.pack()\n\n # call back functions for selecting parameters\n def power_law_constant_assign(self, evt):\n w = evt.widget\n self.power_law_constant = w.get()\n\n def gamma_assign(self, evt):\n w = evt.widget\n self.power_law_gamma = w.get()\n\n def power_law_invoker(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n gamma_arr = power_law(self.GIF_PATH, self.power_law_constant, self.power_law_gamma)\n if gamma_arr is None:\n messagebox.showerror('Invalid Input', 'Please input valid integer or numeric number')\n return\n Image.fromarray(np.uint8(gamma_arr)).save(self.POWER_LAW_PATH)\n self.power_law_image = tk.PhotoImage(file=self.POWER_LAW_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.power_law_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.POWER_LAW_PATH))\n self.right_hist_button.pack()\n\n def do_global_histogram_equalization(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n equalized_arr = global_histogram_equalization(self.GIF_PATH)\n if equalized_arr is None:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n Image.fromarray(equalized_arr).save(self.GLOBAL_HISTOGRAM_EQUALIZED_PATH)\n self.global_histogram_equalized_image = tk.PhotoImage(file=self.GLOBAL_HISTOGRAM_EQUALIZED_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.global_histogram_equalized_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.GLOBAL_HISTOGRAM_EQUALIZED_PATH))\n self.right_hist_button.pack()\n\n def do_local_histogram_equalization(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n equalized_arr = local_histogram_equalization(self.GIF_PATH)\n if equalized_arr is None:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n Image.fromarray(equalized_arr).save(self.LOCAL_HISTOGRAM_EQUALIZED_PATH)\n self.local_histogram_equalized_image = tk.PhotoImage(file=self.LOCAL_HISTOGRAM_EQUALIZED_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.local_histogram_equalized_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.GLOBAL_HISTOGRAM_EQUALIZED_PATH))\n self.right_hist_button.pack()\n\n def do_box_smoothing(self):\n confirm_window = tk.Toplevel(self.root)\n confirm_window.geometry('300x100')\n constant_label = tk.Label(confirm_window, text=\"Please input box smoothing kernel size:\")\n constant_label.pack()\n constant_entry = tk.Entry(confirm_window, show=None)\n constant_entry.bind('', self.box_kernel_size_assign)\n constant_entry.pack()\n confirm_button = tk.Button(confirm_window, text='Confirm', width=10, height=3, command=self.box_smoothing_invoker)\n confirm_button.pack()\n\n # call back function for box kernel size\n def box_kernel_size_assign(self, evt):\n w = evt.widget\n self.box_kernel_size = w.get()\n\n def box_smoothing_invoker(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n box_smoothing_arr = box_smoothing(self.GIF_PATH, self.box_kernel_size)\n if box_smoothing_arr is None:\n messagebox.showerror('Invalid Input', 'Please input valid odd integer size')\n return\n Image.fromarray(np.uint8(box_smoothing_arr)).save(self.BOX_SMOOTHING_PATH)\n self.box_smoothing_image = tk.PhotoImage(file=self.BOX_SMOOTHING_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.box_smoothing_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.BOX_SMOOTHING_PATH))\n self.right_hist_button.pack()\n\n def do_gaussian_smoothing(self):\n confirm_window = tk.Toplevel(self.root)\n confirm_window.geometry('300x200')\n kernel_label = tk.Label(confirm_window, text=\"Please input gaussian smoothing kernel size:\")\n kernel_label.pack()\n kernel_entry = tk.Entry(confirm_window, show=None)\n kernel_entry.bind('', self.gaussian_kernel_size_assign)\n kernel_entry.pack()\n constant_label = tk.Label(confirm_window, text=\"Please input gaussian smoothing constant value:\")\n constant_label.pack()\n constant_entry = tk.Entry(confirm_window, show=None)\n constant_entry.bind('', self.gaussian_constant_assign)\n constant_entry.pack()\n sigma_label = tk.Label(confirm_window, text=\"Please input gaussian smoothing sigma value:\")\n sigma_label.pack()\n sigma_entry = tk.Entry(confirm_window, show=None)\n sigma_entry.bind('', self.gaussian_sigma_assign)\n sigma_entry.pack()\n confirm_button = tk.Button(confirm_window, text='Confirm', width=10, height=3, command=self.gaussian_smoothing_invoker)\n confirm_button.pack()\n\n def gaussian_kernel_size_assign(self, evt):\n w = evt.widget\n self.gaussian_kernel_size = w.get()\n\n def gaussian_constant_assign(self, evt):\n w = evt.widget\n self.gaussian_constant_k = w.get()\n\n def gaussian_sigma_assign(self, evt):\n w = evt.widget\n self.gaussian_sigma = w.get()\n\n def gaussian_smoothing_invoker(self):\n gaussian_smoothing_arr = gaussian_smoothing(self.GIF_PATH, self.gaussian_constant_k, self.gaussian_kernel_size, self.gaussian_sigma)\n if gaussian_smoothing_arr is None:\n messagebox.showerror('Invalid Input', 'Please input valid value')\n return\n Image.fromarray(np.uint8(gaussian_smoothing_arr)).save(self.GAUSSIAN_SMOOTHING_PATH)\n self.gaussian_smoothing_image = tk.PhotoImage(file=self.GAUSSIAN_SMOOTHING_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.gaussian_smoothing_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.GAUSSIAN_SMOOTHING_PATH))\n self.right_hist_button.pack()\n\n def do_laplacian_sharping(self):\n laplacian_sharping_arr, laplacian_sharping_mask = laplacian_sharping(self.GIF_PATH)\n if laplacian_sharping_arr is None:\n messagebox.showerror('Unsupported image', 'Please import the right format image')\n return\n Image.fromarray(laplacian_sharping_arr).save(self.LAPLACIAN_SHARPING_PATH)\n Image.fromarray(laplacian_sharping_mask).save(self.LAPLACIAN_SHARPING_MASK_PATH)\n self.laplacian_sharping_image = tk.PhotoImage(file=self.LAPLACIAN_SHARPING_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.laplacian_sharping_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.LAPLACIAN_SHARPING_PATH))\n\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=self.LAPLACIAN_SHARPING_MASK_PATH))\n self.right_hist_button.pack()\n self.right_show_mask_button.pack()\n \n def do_median_filtering(self):\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=default_invoker))\n self.right_show_mask_button.pack_forget()\n median_filtering_arr = order_statistic(self.GIF_PATH, 3)\n if median_filtering_arr is None:\n messagebox.showerror('Invalid Input', 'Please input valid value')\n return\n Image.fromarray(np.uint8(median_filtering_arr)).save(self.MEDIAN_FILTERING_PATH)\n self.median_filtering_image = tk.PhotoImage(file=self.MEDIAN_FILTERING_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.median_filtering_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.MEDIAN_FILTERING_PATH))\n self.right_hist_button.pack()\n \n def do_highboost(self):\n confirm_window = tk.Toplevel(self.root)\n confirm_window.geometry('300x200')\n kernel_label = tk.Label(confirm_window, text=\"Please input highboost kernel size:\")\n kernel_label.pack()\n kernel_entry = tk.Entry(confirm_window, show=None)\n kernel_entry.bind('', self.highboost_kernel_size_assign)\n kernel_entry.pack()\n constant_label = tk.Label(confirm_window, text=\"Please input highboost constant k value:\")\n constant_label.pack()\n constant_entry = tk.Entry(confirm_window, show=None)\n constant_entry.bind('', self.highboost_constant_assign)\n constant_entry.pack()\n sigma_label = tk.Label(confirm_window, text=\"Please input highboost sigma value:\")\n sigma_label.pack()\n sigma_entry = tk.Entry(confirm_window, show=None)\n sigma_entry.bind('', self.highboost_sigma_assign)\n sigma_entry.pack()\n confirm_button = tk.Button(confirm_window, text='Confirm', width=10, height=3, command=self.highboost__invoker)\n confirm_button.pack()\n\n def highboost_kernel_size_assign(self, evt):\n w = evt.widget\n self.highboost_kernel_size = w.get()\n\n def highboost_constant_assign(self, evt):\n w = evt.widget\n self.highboost_constant_k = w.get()\n\n def highboost_sigma_assign(self, evt):\n w = evt.widget\n self.highboost_sigma = w.get()\n\n def highboost__invoker(self):\n highboost_arr, highboost_mask = highboost(self.GIF_PATH, self.highboost_constant_k, self.highboost_kernel_size, self.highboost_sigma)\n if highboost_arr is None:\n messagebox.showerror('Invalid Input', 'Please input valid value')\n return\n Image.fromarray(highboost_arr).save(self.HIGHBOOST_PATH)\n Image.fromarray(highboost_mask).save(self.HIGHBOOST_MASK_PATH)\n self.highboost_image = tk.PhotoImage(file=self.HIGHBOOST_PATH)\n self.right_canvas.create_image(20, 20, anchor='nw', image=self.highboost_image)\n self.right_hist_button.config(\n command=lambda: histogram_invoker(file_path=self.HIGHBOOST_PATH))\n self.right_show_mask_button.config(command=lambda: mask_invoker(file_path=self.HIGHBOOST_MASK_PATH))\n self.right_hist_button.pack()\n self.right_show_mask_button.pack()\n\n", "repo_name": "Jamies13syx/ImageProcessing", "sub_path": "IPTK/window_init.py", "file_name": "window_init.py", "file_ext": "py", "file_size_in_byte": 22291, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "point_processing.histogram", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 18, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 30, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 42, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 48, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 119, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 121, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 123, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 125, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 128, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 130, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 132, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 133, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 136, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 137, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 142, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 151, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 153, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 154, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 165, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 165, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 168, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 168, "usage_type": "name"}, {"api_name": "tkinter.PhotoImage", "line_number": 171, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 176, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 176, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 179, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 179, "usage_type": "name"}, {"api_name": "point_processing.negative", "line_number": 185, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 187, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 187, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 189, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 189, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 189, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 190, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 196, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 198, "usage_type": "call"}, {"api_name": "tkinter.Listbox", "line_number": 200, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 205, "usage_type": "call"}, {"api_name": "point_processing.intensity_level_slicing", "line_number": 219, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 221, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 221, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 223, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 223, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 223, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 224, "usage_type": "call"}, {"api_name": "point_processing.contrast_stretching", "line_number": 232, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 234, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 234, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 236, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 236, "usage_type": "name"}, {"api_name": "tkinter.PhotoImage", "line_number": 237, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 244, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 246, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 248, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 251, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 253, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 256, "usage_type": "call"}, {"api_name": "point_processing.power_law", "line_number": 271, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 273, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 273, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 275, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 275, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 275, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 276, "usage_type": "call"}, {"api_name": "point_processing.global_histogram_equalization", "line_number": 285, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 287, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 287, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 289, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 289, "usage_type": "name"}, {"api_name": "tkinter.PhotoImage", "line_number": 290, "usage_type": "call"}, {"api_name": "point_processing.local_histogram_equalization", "line_number": 299, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 301, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 301, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 303, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 303, "usage_type": "name"}, {"api_name": "tkinter.PhotoImage", "line_number": 304, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 311, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 313, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 315, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 318, "usage_type": "call"}, {"api_name": "neighborhood_processing.box_smoothing", "line_number": 329, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 331, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 331, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 333, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 333, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 333, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 334, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 341, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 343, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 345, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 348, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 350, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 353, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 355, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 358, "usage_type": "call"}, {"api_name": "neighborhood_processing.gaussian_smoothing", "line_number": 374, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 376, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 376, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 378, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 378, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 378, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 379, "usage_type": "call"}, {"api_name": "neighborhood_processing.laplacian_sharping", "line_number": 386, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 388, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 388, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 390, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 390, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 391, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 391, "usage_type": "name"}, {"api_name": "tkinter.PhotoImage", "line_number": 392, "usage_type": "call"}, {"api_name": "neighborhood_processing.order_statistic", "line_number": 404, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 406, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 406, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 408, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 408, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 408, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 409, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 416, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 418, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 420, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 423, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 425, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 428, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 430, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 433, "usage_type": "call"}, {"api_name": "neighborhood_processing.highboost", "line_number": 449, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 451, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 451, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 453, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 453, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 454, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 454, "usage_type": "name"}, {"api_name": "tkinter.PhotoImage", "line_number": 455, "usage_type": "call"}]} +{"seq_id": "32222504422", "text": "import pandas as pd\nimport numpy as np\nimport yaml\nimport os\nfrom typing import Text\nimport argparse\nimport tensorflow as tf\nfrom transformers import BertTokenizer\n\ndef test(config_path: Text) -> None:\n with open(\"params.yaml\") as config_file:\n config = yaml.safe_load(config_file)\n \n model = tf.keras.models.load_model('/Users/zulikahlatief/Desktop/personal/NLP/Project1: Sentimental analysis/results/sentiment_model')\n print('Summary of model: ', model.summary())\n\n \"\"\"Before making predictions we need to format our data, which requires two steps:\n - Tokenizing the data using the bert-base-cased tokenizer.\n - Transforming the data into a dictionary containing 'input_ids' and 'attention_mask' tensors.\"\"\"\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n\n def prep_data(text):\n tokens = tokenizer.encode_plus(text, max_length=512,\n truncation=True, padding='max_length', add_special_tokens=True,\n return_token_type_ids= False, return_tensors='tf')\n\n return {'input_ids': tf.cast(tokens['input_ids'], tf.float64),\n 'attention_mask': tf.cast(tokens['attention_mask'], tf.float64)}\n\n #testing with hello world text\n probs = model.predict(prep_data('hello world'))[0]\n print('Model output class when given hello world as input text: ', np.argmax(probs))\n\n # making predictions on test set\n pd.set_option('display.max_colwidth', None)\n df = pd.read_csv('/Users/zulikahlatief/Desktop/personal/NLP/Project1: Sentimental analysis/data/test.tsv', sep='\\t')\n print('Test data set: ', df.head())\n df = df.drop_duplicates(subset=['SentenceId'], keep='first')\n\n #make predictions\n df['Sentiment'] = None\n\n for i, row in df.iterrows():\n # get token tensors\n tokens = prep_data(row['Phrase'])\n # get probabilities\n probs = model.predict(tokens)\n # find argmax for winning class\n pred = np.argmax(probs)\n # add to dataframe\n df.at[i, 'Sentiment'] = pred\n\n print('Sentiments predicted on test set: ', df.head())\n print('Sentiments predicted on test set: ', df.tail())\n\n#to run from CLI use a constructer that allows to parse config file as an argument to the data_load function\nif __name__ == '__main__':\n args_parser = argparse.ArgumentParser()\n args_parser.add_argument('--config', dest='config', required=True)\n args = args_parser.parse_args()\n\n test(config_path=args.config)", "repo_name": "Zulaikha1207/Attention-It-s-Transformers---NLP-projects", "sub_path": "Project1: Sentimental analysis/src/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "21", "api": [{"api_name": "typing.Text", "line_number": 10, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 14, "usage_type": "attribute"}, {"api_name": "transformers.BertTokenizer.from_pretrained", "line_number": 21, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.cast", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.float64", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.float64", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 50, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "5399457162", "text": "from torch.utils.data import Dataset\nimport numpy as np\nimport json\nimport os\nfrom torchvision import transforms as tt\nfrom torchvision.datasets import MNIST, SVHN\nfrom PIL import Image\nimport torch\nimport h5py\nimport multiprocessing\nimport sys\nfrom synbols import stratified_splits\n\nimport copy\nimport requests\nsys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))\nfrom tools.download_dataset import get_data_path_or_download\n\ndef get_dataset(splits, data_root, exp_dict):\n dataset_dict = exp_dict[\"dataset\"]\n if dataset_dict[\"backend\"] == \"synbols_hdf5\":\n full_path = get_data_path_or_download(exp_dict[\"dataset\"][\"name\"],\n data_root=data_root)\n \n # For debugging purposes, reference_dataset should be None for most usages\n reference_dataset = dataset_dict.get(\"reference_dataset\", None)\n if reference_dataset is not None:\n reference_data = SynbolsHDF5(reference_dataset,\n dataset_dict[\"task\"],\n mask=dataset_dict[\"mask\"],\n trim_size=dataset_dict.get(\n \"trim_size\", None),\n reference_mask=dataset_dict[\"reference_mask\"])\n\n data = SynbolsHDF5(full_path,\n dataset_dict[\"task\"],\n mask=dataset_dict[\"mask\"],\n trim_size=dataset_dict.get(\"trim_size\", None),\n raw_labels=dataset_dict.get(\"raw_labels\", False))\n ret = []\n for split in splits:\n transform = [tt.ToPILImage()]\n if dataset_dict[\"augmentation\"] and split == \"train\":\n transform += [tt.RandomAffine(10, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-2, 2, -2, 2), resample=Image.BICUBIC, fillcolor=0),\n # tt.RandomHorizontalFlip(),\n tt.ColorJitter(0.4, 0.4, 0.4, 0.4)]\n transform += [tt.ToTensor(),\n tt.Normalize([0.5] * dataset_dict[\"channels\"],\n [0.5] * dataset_dict[\"channels\"])]\n transform = tt.Compose(transform)\n if reference_dataset is not None and split in [\"train\", \"val\"]:\n dataset = SynbolsSplit(\n reference_data, split, transform=transform)\n else:\n dataset = SynbolsSplit(data, split, transform=transform)\n ret.append(dataset)\n exp_dict[\"num_classes\"] = len(ret[0].labelset) # FIXME: this is hacky\n return ret\n elif dataset_dict[\"backend\"] == \"mnist\":\n ret = []\n exp_dict[\"num_classes\"] = 10 # FIXME: this is hacky\n for split in splits:\n transform = []\n if dataset_dict[\"augmentation\"] and split == \"train\":\n transform += [tt.Resize(dataset_dict[\"height\"]),\n tt.RandomAffine(10, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-2, 2, -2, 2), resample=Image.BICUBIC, fillcolor=0)]\n # tt.RandomHorizontalFlip()]\n else:\n transform += [tt.Resize(dataset_dict[\"height\"])]\n transform += [tt.ToTensor(),\n tt.Normalize([0.5], [0.5])]\n transform = tt.Compose(transform)\n ret.append(MNIST(data_root,\n train=(split == \"train\"), transform=transform, download=True))\n return ret\n elif dataset_dict[\"backend\"] == \"svhn\":\n ret = []\n exp_dict[\"num_classes\"] = 10 # FIXME: this is hacky\n for split in splits:\n transform = []\n if dataset_dict[\"augmentation\"] and split == \"train\":\n transform += [tt.RandomAffine(10, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-2, 2, -2, 2), resample=Image.BICUBIC, fillcolor=0),\n # tt.RandomHorizontalFlip(),\n tt.ColorJitter(0.4, 0.4, 0.4, 0.4)]\n transform += [tt.ToTensor(),\n tt.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n transform = tt.Compose(transform)\n split_dict = {'train': 'train', 'test': 'test', 'val': 'test'}\n ret += [SVHN(data_root,\n split=split_dict[split], transform=transform, download=True)]\n return ret\n else:\n raise ValueError\n\n\ndef _read_json_key(args):\n string, key = args\n return json.loads(string)[key]\n\ndef get_stratified(values, fn, ratios=[0.6, 0.2, 0.2], tomap=True):\n vfield = list(map(fn, values))\n if isinstance(vfield[0], float):\n pmap = stratified_splits.percentile_partition(vfield, ratios)\n else:\n pmap = stratified_splits.unique_class_based_partition(vfield, ratios)\n if tomap:\n return stratified_splits.partition_map_to_mask(pmap)\n else:\n return pmap\n\nclass SynbolsHDF5:\n \"\"\"HDF5 Backend Class\"\"\"\n def __init__(self, path, task, ratios=[0.6, 0.2, 0.2], mask=None, trim_size=None, raw_labels=False, reference_mask=None):\n \"\"\"Constructor: loads data and parses labels.\n\n Args:\n path (str): path where the data is stored (see full_path above)\n task (str): 'char', 'font', or the field of choice \n ratios (list, optional): The train/val/test split ratios. Defaults to [0.6, 0.2, 0.2].\n mask (ndarray, optional): Mask with the data partition. Defaults to None.\n trim_size (int, optional): Trim the dataset to a smaller size, for debugging speed. Defaults to None.\n raw_labels (bool, optional): Whether to include all the attributes of the synbols for each batch. Defaults to False.\n reference_mask (ndarray, optional): If train and validation are done with two different datasets, the \n reference mask specifies the partition of the training data. Defaults to None.\n\n Raises:\n ValueError: Error message\n \"\"\"\n self.path = path\n self.task = task\n self.ratios = ratios\n print(\"Loading hdf5...\")\n with h5py.File(path, 'r') as data:\n self.x = data['x'][...]\n y = data['y'][...]\n print(\"Converting json strings to labels...\")\n with multiprocessing.Pool(8) as pool:\n self.y = pool.map(json.loads, y)\n print(\"Done converting.\")\n if isinstance(mask, str):\n if \"split\" in data:\n if mask in data['split'] and mask == \"random\":\n self.mask = data[\"split\"][mask][...]\n else:\n self.mask = self.parse_mask(mask, ratios=ratios)\n else:\n raise ValueError\n else:\n self.mask = mask\n\n self.y = np.array([_y[task] for _y in self.y])\n\n if raw_labels:\n print(\"Parsing raw labels...\")\n raw_labels = copy.deepcopy(self.y)\n self.raw_labels = []\n self.raw_labelset = {k: [] for k in raw_labels[0].keys()}\n for item in raw_labels:\n ret = {}\n for key in item.keys():\n if isinstance(item[key], str) or isinstance(item[key], int):\n self.raw_labelset[key] = []\n ret[key] = item[key]\n\n self.raw_labels.append(ret)\n str2int = {}\n for k in self.raw_labelset.keys():\n v = self.raw_labelset[k]\n if len(v) > 0:\n v = list(sorted(set(v)))\n self.raw_labelset[k] = v\n str2int[k] = {k: i for i, k in enumerate(v)}\n for item in self.raw_labels:\n for k in str2int.keys():\n item[k] = str2int[k][item[k]]\n\n print(\"Done parsing raw labels.\")\n else:\n self.raw_labels = None\n\n self.trim_size = trim_size\n if trim_size is not None and len(self.x) > self.trim_size:\n self.mask = self.trim_dataset(self.mask)\n self.reference_mask = reference_mask\n if self.reference_mask is not None:\n self.mask[:, [0, 1]] = np.load(self.reference_mask)[...]\n print(\"Done reading hdf5.\")\n\n def trim_dataset(self, mask, train_size=60000, val_test_size=20000):\n labelset = np.sort(np.unique(self.y))\n counts = np.array([np.count_nonzero(self.y == y) for y in labelset])\n imxclass_train = int(np.ceil(train_size / len(labelset)))\n imxclass_val_test = int(np.ceil(val_test_size / len(labelset)))\n ind_train = np.arange(mask.shape[0])[mask[:, 0]]\n y_train = self.y[ind_train]\n ind_train = np.concatenate([np.random.permutation(ind_train[y_train == y])[\n :imxclass_train] for y in labelset], 0)\n ind_val = np.arange(mask.shape[0])[mask[:, 1]]\n y_val = self.y[ind_val]\n ind_val = np.concatenate([np.random.permutation(ind_val[y_val == y])[\n :imxclass_val_test] for y in labelset], 0)\n ind_test = np.arange(mask.shape[0])[mask[:, 2]]\n y_test = self.y[ind_test]\n ind_test = np.concatenate([np.random.permutation(ind_test[y_test == y])[\n :imxclass_val_test] for y in labelset], 0)\n current_mask = np.zeros_like(mask)\n current_mask[ind_train, 0] = True\n current_mask[ind_val, 1] = True\n current_mask[ind_test, 2] = True\n return current_mask\n\n def parse_mask(self, mask, ratios):\n args = mask.split(\"_\")[1:]\n if \"stratified\" in mask:\n mask = 1\n for arg in args:\n if arg == 'translation-x':\n def fn(x): return x['translation'][0]\n elif arg == 'translation-y':\n def fn(x): return x['translation'][1]\n else:\n def fn(x): return x[arg]\n mask *= get_stratified(self.y, fn,\n ratios=[ratios[1], ratios[0], ratios[2]])\n mask = mask[:, [1, 0, 2]]\n elif \"compositional\" in mask:\n partition_map = None\n if len(args) != 2:\n raise RuntimeError(\n \"Compositional splits must contain two fields to compose\")\n for arg in args:\n if arg == 'translation-x':\n def fn(x): return x['translation'][0]\n elif arg == 'translation-y':\n def fn(x): return x['translation'][1]\n else:\n def fn(x): return x[arg]\n if partition_map is None:\n partition_map = get_stratified(self.y, fn, tomap=False)\n else:\n _partition_map = get_stratified(self.y, fn, tomap=False)\n partition_map = stratified_splits.compositional_split(\n _partition_map, partition_map)\n partition_map = partition_map.astype(bool)\n mask = np.zeros_like(partition_map)\n for i, split in enumerate(np.argsort(partition_map.astype(int).sum(0))[::-1]):\n mask[:, i] = partition_map[:, split]\n else:\n raise ValueError\n return mask.astype(bool)\n\n\nclass SynbolsSplit(Dataset):\n def __init__(self, dataset, split, transform=None):\n \"\"\"Given a Backend (dataset), it splits the data in train, val, and test.\n\n\n Args:\n dataset (object): backend to load, it should contain the following attributes:\n - x, y, mask, ratios, path, task, mask\n split (str): train, val, or test\n transform (torchvision.transforms, optional): A composition of torchvision transforms. Defaults to None.\n \"\"\"\n self.path = dataset.path\n self.task = dataset.task\n self.mask = dataset.mask\n if dataset.raw_labels is not None:\n self.raw_labelset = dataset.raw_labelset\n self.raw_labels = dataset.raw_labels\n self.ratios = dataset.ratios\n self.split = split\n if transform is None:\n self.transform = lambda x: x\n else:\n self.transform = transform\n self.split_data(dataset.x, dataset.y, dataset.mask, dataset.ratios)\n\n def split_data(self, x, y, mask, ratios, rng=np.random.RandomState(42)):\n if mask is None:\n if self.split == 'train':\n start = 0\n end = int(ratios[0] * len(x))\n elif self.split == 'val':\n start = int(ratios[0] * len(x))\n end = int((ratios[0] + ratios[1]) * len(x))\n elif self.split == 'test':\n start = int((ratios[0] + ratios[1]) * len(x))\n end = len(x)\n indices = rng.permutation(len(x))\n indices = indices[start:end]\n else:\n mask = mask[:, [\"train\", \"val\", \"test\"].index(self.split)]\n indices = np.arange(len(y)) # 0....nsamples\n indices = indices[mask]\n self.labelset = list(sorted(set(y)))\n self.y = np.array([self.labelset.index(y) for y in y])\n self.x = x[indices]\n self.y = self.y[indices]\n if self.raw_labels is not None:\n self.raw_labels = np.array(self.raw_labels)[indices]\n\n def __getitem__(self, item):\n if self.raw_labels is None:\n return self.transform(self.x[item]), self.y[item]\n else:\n return self.transform(self.x[item]), self.y[item], self.raw_labels[item]\n\n def __len__(self):\n return len(self.x)\n", "repo_name": "ServiceNow/synbols-benchmarks", "sub_path": "classification/datasets.py", "file_name": "datasets.py", "file_ext": "py", "file_size_in_byte": 13921, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "21", "api": [{"api_name": "sys.path.insert", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tools.download_dataset.get_data_path_or_download", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomAffine", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 44, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 46, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 47, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 48, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 50, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 50, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 65, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 65, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomAffine", "line_number": 66, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 66, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 69, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 69, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 70, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 71, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 71, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 72, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 72, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 73, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomAffine", "line_number": 82, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 82, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 82, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 82, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 84, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 84, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 85, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 85, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 86, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 86, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 87, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 87, "usage_type": "name"}, {"api_name": "torchvision.datasets.SVHN", "line_number": 89, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 98, "usage_type": "call"}, {"api_name": "synbols.stratified_splits.percentile_partition", "line_number": 103, "usage_type": "call"}, {"api_name": "synbols.stratified_splits", "line_number": 103, "usage_type": "name"}, {"api_name": "synbols.stratified_splits.unique_class_based_partition", "line_number": 105, "usage_type": "call"}, {"api_name": "synbols.stratified_splits", "line_number": 105, "usage_type": "name"}, {"api_name": "synbols.stratified_splits.partition_map_to_mask", "line_number": 107, "usage_type": "call"}, {"api_name": "synbols.stratified_splits", "line_number": 107, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 133, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 137, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 196, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 200, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 204, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 206, "usage_type": "call"}, {"api_name": "synbols.stratified_splits.compositional_split", "line_number": 242, "usage_type": "call"}, {"api_name": "synbols.stratified_splits", "line_number": 242, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 253, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 278, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 300, "usage_type": "call"}]} +{"seq_id": "22568384771", "text": "\"\"\"Contains pre-built processors for data labeling/processing.\"\"\"\nfrom __future__ import annotations\n\nimport abc\nimport copy\nimport inspect\nimport json\nimport math\nimport os\nimport random\nimport types\nimport warnings\nfrom collections import Counter\nfrom typing import Any, Generator, Iterable, TypeVar, cast\n\nimport numpy as np\nimport numpy.typing as npt\nimport pkg_resources\n\ndefault_labeler_dir = pkg_resources.resource_filename(\"resources\", \"labelers\")\n\nProcessor = TypeVar(\"Processor\", bound=\"BaseDataProcessor\")\n\n\nclass AutoSubRegistrationMeta(abc.ABCMeta):\n \"\"\"For registering subclasses.\"\"\"\n\n def __new__(\n cls, clsname: str, bases: tuple[type, ...], attrs: dict[str, object]\n ) -> Any:\n \"\"\"Create AutoSubRegistration object.\"\"\"\n new_class: Any = super().__new__(cls, clsname, bases, attrs)\n new_class._register_subclass()\n return new_class\n\n\nclass BaseDataProcessor(metaclass=abc.ABCMeta):\n \"\"\"Abstract Data processing class.\"\"\"\n\n processor_type: str # override this\n __subclasses: dict[str, type[BaseDataProcessor]] = {}\n\n def __init__(self, **parameters: Any) -> None:\n \"\"\"Initialize BaseDataProcessor object.\"\"\"\n self._validate_parameters(parameters)\n self._parameters = parameters\n\n @classmethod\n def _register_subclass(cls) -> None:\n \"\"\"Register a subclass for the class factory.\"\"\"\n if not inspect.isabstract(cls):\n cls.__subclasses[cls.__name__.lower()] = cls\n\n @classmethod\n def get_class(\n cls: type[BaseDataProcessor], class_name: str\n ) -> type[BaseDataProcessor] | None:\n \"\"\"Get class of BaseDataProcessor object.\"\"\"\n return cls.__subclasses.get(class_name.lower(), None)\n\n def __eq__(self, other: object) -> bool:\n \"\"\"\n Check if two processors are equal with one another.\n\n :param self: a processor\n :param other: a processor\n :type self: BaseDataProcessor\n :type other: BaseDataProcessor\n :return: Whether or not self and other are equal\n :rtype: bool\n \"\"\"\n if (\n type(self) != type(other)\n or not isinstance(other, BaseDataProcessor)\n or self._parameters != other._parameters\n ):\n return False\n return True\n\n @abc.abstractmethod\n def _validate_parameters(self, parameters: Any) -> None:\n \"\"\"Validate class input parameters for processing.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abc.abstractmethod\n def help(cls) -> None:\n \"\"\"\n Describe alterable parameters.\n\n Input data formats for preprocessors.\n Output data formats for postprocessors.\n\n :return: None\n \"\"\"\n raise NotImplementedError()\n\n def get_parameters(self, param_list: list[str] | None = None) -> dict:\n \"\"\"\n Return a dict of parameters from the model given a list.\n\n :param param_list: list of parameters to retrieve from the model.\n :type param_list: list\n :return: dict of parameters\n \"\"\"\n if param_list is None:\n return copy.deepcopy(self._parameters)\n\n param_dict = {}\n for param in param_list:\n if param in self._parameters:\n param_dict[param] = self._parameters.get(param)\n else:\n raise ValueError(\n \"`{}` does not exist as a parameter in {}.\".format(\n param, self.__class__.__name__\n )\n )\n return param_dict\n\n def set_params(self, **kwargs: Any) -> None:\n \"\"\"Set the parameters if they exist given kwargs.\"\"\"\n # first check if any parameters are invalid\n self._validate_parameters(kwargs)\n\n for param in kwargs:\n self._parameters[param] = kwargs[param]\n\n @abc.abstractmethod\n def process(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Process data.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def load_from_disk(cls: type[Processor], dirpath: str) -> Processor:\n \"\"\"Load data processor from a given path on disk.\"\"\"\n with open(os.path.join(dirpath, cls.processor_type + \"_parameters.json\")) as fp:\n parameters = json.load(fp)\n\n return cls(**parameters)\n\n @classmethod\n def load_from_library(cls, name: str) -> BaseDataProcessor:\n \"\"\"Load data processor from within the library.\"\"\"\n return cls.load_from_disk(os.path.join(default_labeler_dir, name))\n\n def _save_processor(self, dirpath: str) -> None:\n \"\"\"Save data processor.\"\"\"\n with open(\n os.path.join(dirpath, self.processor_type + \"_parameters.json\"), \"w\"\n ) as fp:\n json.dump(self.get_parameters(), fp)\n\n def save_to_disk(self, dirpath: str) -> None:\n \"\"\"Save data processor to a path on disk.\"\"\"\n self._save_processor(dirpath)\n\n\nclass BaseDataPreprocessor(BaseDataProcessor):\n \"\"\"Abstract Data preprocessing class.\"\"\"\n\n processor_type = \"preprocessor\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, **parameters: Any) -> None:\n \"\"\"Initialize BaseDataPreprocessor object.\"\"\"\n super().__init__(**parameters)\n\n @abc.abstractmethod\n def process(\n self,\n data: np.ndarray,\n labels: np.ndarray | None = None,\n label_mapping: dict[str, int] | None = None,\n batch_size: int = 32,\n ) -> Generator[tuple[np.ndarray, np.ndarray] | np.ndarray, None, None] | tuple[\n np.ndarray, np.ndarray\n ] | np.ndarray:\n \"\"\"Preprocess data.\"\"\"\n raise NotImplementedError()\n\n\nclass BaseDataPostprocessor(BaseDataProcessor):\n \"\"\"Abstract Data postprocessing class.\"\"\"\n\n processor_type = \"postprocessor\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, **parameters: Any) -> None:\n \"\"\"Initialize BaseDataPostprocessor object.\"\"\"\n super().__init__(**parameters)\n\n @abc.abstractmethod\n def process(\n self,\n data: np.ndarray,\n results: dict,\n label_mapping: dict[str, int],\n ) -> dict:\n \"\"\"Postprocess data.\"\"\"\n raise NotImplementedError()\n\n\nclass DirectPassPreprocessor(BaseDataPreprocessor, metaclass=AutoSubRegistrationMeta):\n \"\"\"Subclass of BaseDataPreprocessor for preprocessing data.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the DirectPassPreprocessor class.\"\"\"\n super().__init__()\n\n def _validate_parameters(self, parameters: dict) -> None:\n \"\"\"\n Validate params set in processor and raise error if issues exist.\n\n :param parameters: parameter dict containing the following parameters:\n N/A\n :type parameters: dict\n :return: None\n :rtype: None\n \"\"\"\n if parameters:\n raise ValueError(\"`DirectPassPreprocessor` has no parameters.\")\n\n @classmethod\n def help(cls) -> None:\n \"\"\"\n Describe alterable parameters.\n\n Input data formats for preprocessors.\n Output data formats for postprocessors.\n\n :return: None\n \"\"\"\n help_str = (\n cls.__name__ + \"\\n\\n\" + \"Parameters:\\n\"\n \" There are no parameters for this processor.\"\n \"\\nProcess Input Format:\\n\"\n \" There is no required format for data or labels parameters for \"\n \" this processor. Please refer to the Model for input format.\"\n )\n print(help_str)\n\n def process(\n self,\n data: np.ndarray,\n labels: np.ndarray | None = None,\n label_mapping: dict[str, int] | None = None,\n batch_size: int = 32,\n ) -> tuple[np.ndarray, np.ndarray] | np.ndarray:\n \"\"\"Preprocess data.\"\"\"\n if labels is not None:\n return data, labels\n return data\n\n\nclass CharPreprocessor(BaseDataPreprocessor, metaclass=AutoSubRegistrationMeta):\n \"\"\"Subclass of BaseDataPreprocessor for preprocessing char data.\"\"\"\n\n def __init__(\n self,\n max_length: int = 3400,\n default_label: str = \"UNKNOWN\",\n pad_label: str = \"PAD\",\n flatten_split: float = 0,\n flatten_separator: str = \" \",\n is_separate_at_max_len: bool = False,\n **kwargs: Any,\n ) -> None:\n \"\"\"\n Initialize the CharPreprocessor class.\n\n :param max_length: Maximum char length in a sample.\n :type max_length: int\n :param default_label: Key for label_mapping that is the default label\n :type default_label: string (could be int, char, etc.)\n :param pad_label: Key for label_mapping that is the pad label\n :type pad_label: string (could be int, char, etc.)\n :param flatten_split: approximate output of split between flattened and\n non-flattened characters, value between [0, 1]. When the current\n flattened split becomes more than the `flatten_split` value, any\n leftover sample or subsequent samples will be non-flattened until\n the current flattened split is below the `flatten_split` value\n :type flatten_split: float\n :param flatten_separator: separator used to put between flattened\n samples.\n :type flatten_separator: str\n :param is_separate_at_max_len: if true, separates at max_length,\n otherwise at nearest separator\n :type is_separate_at_max_len: bool\n \"\"\"\n super().__init__(\n max_length=max_length,\n pad_label=pad_label,\n default_label=default_label,\n flatten_split=flatten_split,\n flatten_separator=flatten_separator,\n is_separate_at_max_len=is_separate_at_max_len,\n **kwargs,\n )\n\n def _validate_parameters(self, parameters: dict) -> None:\n \"\"\"\n Validate params set in processor and raise error if issues exist.\n\n :param parameters: parameter dict containing the following parameters:\n max_length: Maximum char length in a sample.\n default_label: Key for label_mapping that is the default label\n pad_label: Key for label_mapping that is the pad label\n flatten_split: Approximate output of split between flattened and\n non-flattened characters, value between [0, 1]. When the current\n flattened split becomes more than the `flatten_split` value,\n any leftover sample or subsequent samples will be non-flattened\n until the current flattened split is below the `flatten_split`\n value\n flatten_separator: Separator used to put between flattened samples.\n is_separate_at_max_len: If true, separates at max_length, otherwise\n at nearest separator\n :type parameters: dict\n :return: None\n :rtype: None\n \"\"\"\n errors = []\n allowed_parameters = self.__class__.__init__.__code__.co_varnames[\n 1 : self.__class__.__init__.__code__.co_argcount\n ]\n for param in parameters:\n value = parameters[param]\n if param == \"max_length\" and (not isinstance(value, int) or value < 1):\n errors.append(\"`max_length` must be an int > 0\")\n elif param in [\"default_label\", \"pad_label\"] and not isinstance(value, str):\n errors.append(f\"`{param}` must be a string.\")\n elif param == \"flatten_split\" and (\n not isinstance(value, (int, float))\n or value < 0\n or value > 1\n or math.isnan(value)\n ):\n errors.append(\"`flatten_split` must be a float or int \" \">= 0 and <= 1\")\n elif param == \"flatten_separator\" and not isinstance(value, str):\n errors.append(\"`flatten_separator` must be a str\")\n elif param == \"is_separate_at_max_len\" and not isinstance(value, bool):\n errors.append(f\"`{param}` must be a bool\")\n elif param not in allowed_parameters:\n errors.append(f\"{param} is not an accepted parameter.\")\n\n if errors:\n raise ValueError(\"\\n\".join(errors))\n\n @classmethod\n def help(cls) -> None:\n \"\"\"\n Describe alterable parameters.\n\n Input data formats.\n Output data formats for postprocessors.\n\n :return: None\n \"\"\"\n param_docs = cast(str, inspect.getdoc(cls._validate_parameters))\n param_start_ind = param_docs.find(\"parameters:\\n\") + 12\n param_end_ind = param_docs.find(\":type parameters:\")\n\n help_str = (\n cls.__name__\n + \"\\n\\n\"\n + \"Parameters:\\n\"\n + param_docs[param_start_ind:param_end_ind]\n + \"\\nProcess Input Format:\\n\"\n \" data = List of strings ['1st string', 'second string', ...]\\n\"\n ' labels = [[(, , \"