diff --git "a/1347.jsonl" "b/1347.jsonl" new file mode 100644--- /dev/null +++ "b/1347.jsonl" @@ -0,0 +1,728 @@ +{"seq_id":"320057176","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\n\n\n# In[2]:\n\nclass NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n self.activation_function = lambda x : 1/(1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.\n \n \n \n def train(self, features, targets):\n\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer'this\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n error = y - final_outputs # Output layer error is the difference between desired target and actual output.\n\n \n output_error_term = error * 1\n\n hidden_error = np.dot(self.weights_hidden_to_output, error)\n hidden_error_term = hidden_error * hidden_outputs * (1- hidden_outputs)\n\n delta_weights_i_h += hidden_error_term * X[:,None]\n \n # Weight step (hidden to output)\n hidden_outputs = hidden_outputs[:,None]\n delta_weights_h_o += output_error_term * hidden_outputs\n #print('delta hidden to out: ' + str(delta_weights_h_o))\n self.weights_hidden_to_output += self.lr * delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step\n \n def run(self, features):\n\n hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer\n final_outputs = (final_inputs) # signals from final output layer \n \n return final_outputs\n\n\n# In[3]:\n\nlearning_rate = 0.00\nhidden_nodes = 3200\noutput_nodes = 1\n\nN_i = 6\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\n\n# In[4]:\n\nimport json\n\nweights_in = []\nwith open('weight_in_no_grades', 'rb') as f:\n weights_in = pickle.load(f)\n \nweights_out = []\nwith open('weight_out_no_grades', 'rb') as f:\n weights_out = pickle.load(f)\nscaled_features = {} \nwith open('variables.json', 'r') as f:\n try:\n scaled_features = json.load(f)\n # if the file is empty the ValueError will be thrown\n except ValueError:\n scaled_features = {}\n \nnetwork.weights_input_to_hidden = weights_in\nnetwork.weights_hidden_to_output = weights_out\n\n\n# In[5]:\n\n\n\nbasisweight = float(input('Enter a basisweight: '))\ncaliper = float(input('Enter a caliper: '))\ncull = float(input('Enter a cull low: '))\nmoisture = float(input('Enter a moisture: '))\nstfi = float(input('Enter a stfi: '))\ntsi = float(input('Enter a tsi: '))\nbasismean, basisstd = scaled_features['basisweight']\ncalipermean, caliperstd = scaled_features['caliper']\ncullmean, cullstd = scaled_features['cull']\nmoisturemean, moisturestd = scaled_features['moisture']\nstfimean, stfistd = scaled_features['stfi']\ntsimean, tsistd = scaled_features['tsi']\ninbasis = (basisweight - basismean)/basisstd\nincaliper = (caliper - calipermean)/caliperstd\nincull = (cull - cullmean)/cullstd\ninmoisture = (moisture - moisturemean)/moisturestd\ninstfi = (stfi - stfimean)/stfistd\nintsi = (tsi - tsimean)/tsistd\nrow = [intsi, instfi, incaliper, inmoisture, inbasis, incull]\ncolumns = ['tsi','stfi', 'caliper', 'moisture', 'basisweight', 'cull']\ndf = pd.DataFrame(columns=columns)\n'''\ndf['tsi'] = intsi\ndf['stfi'] = instfi\ndf['caliper'] = incaliper\ndf['moisture'] = inmoisture\ndf['basisweight'] = inbasis\ndf['cull'] = incull '''\ndf.loc[1] = row\nmean, std = scaled_features['rct']\nprediction = network.run(df.loc[1]).T*std+mean\nprint('Predicted Rct :' + str(prediction))\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"T1+Predictor.py","file_name":"T1+Predictor.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"653054536","text":"\nfit_gaussians = False\nuse_plotly=True\n# data manipulation\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n### matplotlib inline\nsns.set()\n\nimport plotly.offline as py\npy.init_notebook_mode(connected=True)\nimport plotly.graph_objs as go\n\n# sklearn models & tools\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.decomposition import PCA\n\n# ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\nimport os\nprint(os.listdir(\"../input\"))\ntrain = pd.read_csv(\"../input/train.csv\")\ntest = pd.read_csv(\"../input/test.csv\")\nsubmission = pd.read_csv(\"../input/sample_submission.csv\")\ntrain.shape\ntrain.head(10)\ntrain.target.dtype\norg_vars = train.drop([\"target\", \"ID_code\"], axis=1).columns.values\nlen(org_vars)\ntrain[\"Id\"] = train.index.values\noriginal_trainid = train.ID_code.values\n\ntrain.drop(\"ID_code\", axis=1, inplace=True)\ntrain.isnull().sum().sum()\ntest.head(10)\ntest.isnull().sum().sum()\ntest.shape\ntest[\"Id\"] = test.index.values\noriginal_testid = test.ID_code.values\n\ntest.drop(\"ID_code\", axis=1, inplace=True)\nsubmission.head()\nfig, ax = plt.subplots(1,2,figsize=(20,5))\nsns.countplot(train.target.values, ax=ax[0], palette=\"husl\")\nsns.violinplot(x=train.target.values, y=train.index.values, ax=ax[1], palette=\"husl\")\nsns.stripplot(x=train.target.values, y=train.index.values,\n jitter=True, ax=ax[1], color=\"black\", size=0.5, alpha=0.5)\nax[1].set_xlabel(\"Target\")\nax[1].set_ylabel(\"Index\");\nax[0].set_xlabel(\"Target\")\nax[0].set_ylabel(\"Counts\");\ntrain.loc[train.target==1].shape[0] / train.loc[train.target==0].shape[0]\ntrain_correlations = train.drop([\"target\"], axis=1).corr()\ntrain_correlations = train_correlations.values.flatten()\ntrain_correlations = train_correlations[train_correlations != 1]\n\ntest_correlations = test.corr()\ntest_correlations = test_correlations.values.flatten()\ntest_correlations = test_correlations[test_correlations != 1]\n\nplt.figure(figsize=(20,5))\nsns.distplot(train_correlations, color=\"Red\", label=\"train\")\nsns.distplot(test_correlations, color=\"Green\", label=\"test\")\nplt.xlabel(\"Correlation values found in train (except 1)\")\nplt.ylabel(\"Density\")\nplt.title(\"Are there correlations between features?\"); \nplt.legend();\nparameters = {'min_samples_leaf': [20, 25]}\nforest = RandomForestClassifier(max_depth=15, n_estimators=15)\ngrid = GridSearchCV(forest, parameters, cv=3, n_jobs=-1, verbose=2, scoring=make_scorer(roc_auc_score))\ngrid.fit(train.drop(\"target\", axis=1).values, train.target.values)\ngrid.best_score_\ngrid.best_params_\nn_top = 5 \nimportances = grid.best_estimator_.feature_importances_\nidx = np.argsort(importances)[::-1][0:n_top]\nfeature_names = train.drop(\"target\", axis=1).columns.values\n\nplt.figure(figsize=(20,5))\nsns.barplot(x=feature_names[idx], y=importances[idx]);\nplt.title(\"What are the top important features to start with?\");\nfig, ax = plt.subplots(n_top,2,figsize=(20,5*n_top))\n\nfor n in range(n_top):\n sns.distplot(train.loc[train.target==0, feature_names[idx][n]], ax=ax[n,0], color=\"Orange\", norm_hist=True)\n sns.distplot(train.loc[train.target==1, feature_names[idx][n]], ax=ax[n,0], color=\"Red\", norm_hist=True)\n sns.distplot(test.loc[:, feature_names[idx][n]], ax=ax[n,1], color=\"Mediumseagreen\", norm_hist=True)\n ax[n,0].set_title(\"Train {}\".format(feature_names[idx][n]))\n ax[n,1].set_title(\"Test {}\".format(feature_names[idx][n]))\n ax[n,0].set_xlabel(\"\")\n ax[n,1].set_xlabel(\"\")\ntop = train.loc[:, feature_names[idx]]\ntop.describe()\ntop = top.join(train.target)\nsns.pairplot(top, hue=\"target\")\ny_proba = grid.predict_proba(test.values)\ny_proba_train = grid.predict_proba(train.drop(\"target\", axis=1).values)\nfig, ax = plt.subplots(2,1,figsize=(20,8))\nsns.distplot(y_proba_train[train.target==1,1], norm_hist=True, color=\"mediumseagreen\",\n ax=ax[0], label=\"1\")\nsns.distplot(y_proba_train[train.target==0,1], norm_hist=True, color=\"coral\",\n ax=ax[0], label=\"0\")\nsns.distplot(y_proba[:,1], norm_hist=True,\n ax=ax[1], color=\"purple\")\nax[1].set_xlabel(\"Predicted probability for test data\");\nax[1].set_ylabel(\"Density\");\nax[0].set_xlabel(\"Predicted probability for train data\");\nax[0].set_ylabel(\"Density\");\nax[0].legend();\nsubmission[\"target\"] = y_proba\nsubmission.to_csv(\"submission_baseline_forest.csv\", index=False)\noriginal_features = train.drop([\"target\", \"Id\"], axis=1).columns.values\noriginal_features\nencoder = LabelEncoder()\nfor your_feature in top.drop(\"target\", axis=1).columns.values:\n train[your_feature + \"_qbinned\"] = pd.qcut(\n train.loc[:, your_feature].values,\n q=10,\n labels=False\n )\n train[your_feature + \"_qbinned\"] = encoder.fit_transform(\n train[your_feature + \"_qbinned\"].values.reshape(-1, 1)\n )\n \n \n train[your_feature + \"_rounded\"] = np.round(train.loc[:, your_feature].values)\n train[your_feature + \"_rounded_10\"] = np.round(10*train.loc[:, your_feature].values)\n train[your_feature + \"_rounded_100\"] = np.round(100*train.loc[:, your_feature].values)\ncv = StratifiedKFold(n_splits=3, random_state=0)\nforest = RandomForestClassifier(max_depth=15, n_estimators=15, min_samples_leaf=20,\n n_jobs=-1)\n\nscores = []\nX = train.drop(\"target\", axis=1).values\ny = train.target.values\n\nfor train_idx, test_idx in cv.split(X, y):\n x_train = X[train_idx]\n x_test = X[test_idx]\n y_train = y[train_idx]\n y_test = y[test_idx]\n \n forest.fit(x_train, y_train)\n y_proba = forest.predict_proba(x_test)\n y_pred = np.zeros(y_proba.shape[0])\n y_pred[y_proba[:,1] >= 0.166] = 1\n \n score = roc_auc_score(y_test, y_pred)\n print(score)\n scores.append(score)\n\nprint(np.round(np.mean(scores),4))\nprint(np.round(np.std(scores), 4))\nimportances = forest.feature_importances_\nfeature_names = train.drop(\"target\", axis=1).columns.values\nidx = np.argsort(importances)[::-1][0:30]\n\nplt.figure(figsize=(20,5))\nsns.barplot(x=feature_names[idx], y=importances[idx]);\nplt.xticks(rotation=90);\ncol1 = \"var_81\"\ncol2 = \"var_12\"\nN=70000\nfig, ax = plt.subplots(1,1, figsize=(20,10))\nsns.kdeplot(train[col1].values[0:N], train[col2].values[0:N])\nax.scatter(train[col1].values[0:N], train[col2].values[0:N],\n s=2, c=train.target.values[0:N], cmap=\"coolwarm\", alpha=0.5)\nax.set_xlabel(col1)\nax.set_xlabel(col2);\ncombined = train.drop([\"target\", \"Id\"], axis=1).append(test.drop(\"Id\", axis=1))\ncombined.shape\nmax_components = 10\nstart_components = 3\nn_splits = 3\nK = train.shape[0]\n\nX = train.loc[:, original_features].values[0:K]\ny = train.target.values[0:K]\nseeds = np.random.RandomState(0).randint(0,100, size=(max_components-start_components))\nseeds\nscaler = RobustScaler()\nX_scaled = scaler.fit_transform(X)\nif fit_gaussians:\n components = np.arange(start_components, max_components, 1)\n kf = StratifiedKFold(random_state=0, n_splits=n_splits)\n \n scores = np.zeros(shape=(max_components-start_components, n_splits))\n\n for m in components:\n split=0\n print(\"Components \" + str(m))\n for train_index, test_index in kf.split(X_scaled, y):\n print(\"Split \" + str(split))\n x_train, x_test = X_scaled[train_index], X_scaled[test_index]\n gm = GaussianMixture(n_components=m, random_state=seeds[m-start_components])\n gm.fit(x_train)\n score = gm.score(x_test)\n scores[m-start_components,split] = score\n split +=1\n \n print(np.round(np.mean(scores, axis=1), 2))\n print(np.round(np.std(scores, axis=1), 2))\n best_idx = np.argmax(np.mean(scores, axis=1))\n best_component = components[best_idx]\n best_seed = seeds[best_idx]\n print(\"Best component found \" + str(best_component))\n \nelse:\n best_seed = seeds[0]\n best_component = 3\nX = train.loc[:, original_features].values\n\ngm = GaussianMixture(n_components=best_component, random_state=best_seed)\nX_scaled = scaler.transform(X)\ngm.fit(X_scaled)\ntrain[\"cluster\"] = gm.predict(X_scaled)\ntrain[\"logL\"] = gm.score_samples(X_scaled)\ntest[\"cluster\"] = gm.predict(test.loc[:, original_features].values)\ntest[\"logL\"] = gm.score_samples(test.loc[:, original_features].values)\nfig, ax = plt.subplots(1,2,figsize=(20,5))\nsns.countplot(train.cluster, palette=\"Set2\", ax=ax[0])\nsns.distplot(train.logL, color=\"Dodgerblue\", ax=ax[1]);\ncluster_occupation = train.groupby(\"cluster\").target.value_counts() / train.groupby(\"cluster\").size() * 100\ncluster_occupation = cluster_occupation.loc[:, 1]\n\ntarget_occupation = train.groupby(\"target\").cluster.value_counts() / train.groupby(\"target\").size() * 100\ntarget_occupation = target_occupation.loc[1, :]\ntarget_occupation.index = target_occupation.index.droplevel(\"target\")\n\nfig, ax = plt.subplots(1,2,figsize=(20,5))\nax[0].set_title(\"How many % of the data per cluster has hot targets?\")\nsns.barplot(cluster_occupation.index, cluster_occupation.values, ax=ax[0], color=\"cornflowerblue\")\nax[0].set_ylabel(\"% of cluster data\")\nax[0].set_ylim([0,100])\n\nax[1].set_title(\"How many % of total hot targets are in one cluster?\")\nsns.barplot(target_occupation.index, target_occupation.values, ax=ax[1], color=\"tomato\")\nax[1].set_ylabel(\"% of hot targets\")\nax[1].set_ylim([0,100]);\nplt.figure(figsize=(20,5))\nfor n in range(gm.means_.shape[0]):\n plt.plot(gm.means_[n,:], 'o')\nplt.title(\"How do the gaussian means look like?\")\nplt.ylabel(\"Cluster mean value\")\nplt.xlabel(\"Feature\")","sub_path":"sources/santander-customer-transaction-eda.py","file_name":"santander-customer-transaction-eda.py","file_ext":"py","file_size_in_byte":10030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"38474439","text":"\n\"\"\"\npy.test module for unit testing the resample_spec step.\n\"\"\"\n\nimport os\nimport time\nimport pytest\nimport logging\nfrom glob import glob\nfrom astropy.io import fits\nfrom jwst.resample import ResampleSpecStep\n\nfrom nirspec_pipe_testing_tool.utils import change_filter_opaque2science\nfrom . import resample_utils\nfrom .. import core_utils\nfrom .. import TESTSDIR\n\n\n\n# HEADER\n__author__ = \"M. A. Pena-Guerrero\"\n__version__ = \"1.2\"\n\n# HISTORY\n# Nov 2017 - Version 1.0: initial version completed\n# Mar 2019 - Version 1.1: separated completion from other tests\n# Apr 2019 - Version 1.2: implemented logging capability\n\n\n# Set up the fixtures needed for all of the tests, i.e. open up all of the FITS files\n\n# Default names of pipeline input and output files\n@pytest.fixture(scope=\"module\")\ndef set_inandout_filenames(request, config):\n step = \"resample_spec\"\n step_info = core_utils.set_inandout_filenames(step, config)\n step_input_filename, step_output_filename, in_file_suffix, out_file_suffix, True_steps_suffix_map = step_info\n return step, step_input_filename, step_output_filename, in_file_suffix, out_file_suffix, True_steps_suffix_map\n\n\n# fixture to read the output file header\n@pytest.fixture(scope=\"module\")\ndef output_hdul(set_inandout_filenames, config):\n set_inandout_filenames_info = core_utils.read_info4outputhdul(config, set_inandout_filenames)\n step, txt_name, step_input_file, step_output_file, run_calwebb_spec2, outstep_file_suffix = set_inandout_filenames_info\n run_pipe_step = config.getboolean(\"run_pipe_steps\", step)\n # determine which tests are to be run\n resample_spec_completion_tests = config.getboolean(\"run_pytest\", \"_\".join((step, \"completion\", \"tests\")))\n #resample_spec_reffile_tests = config.getboolean(\"run_pytest\", \"_\".join((step, \"reffile\", \"tests\")))\n #resample_spec_validation_tests = config.getboolean(\"run_pytest\", \"_\".join((step, \"validation\", \"tests\")))\n run_pytests = [resample_spec_completion_tests]#, resample_spec_reffile_tests, resample_spec_validation_tests]\n\n end_time = '0.0'\n # Only run step if data is not IFU or BOTS\n mode_used = config.get(\"calwebb_spec2_input_file\", \"mode_used\").lower()\n output_directory = config.get(\"calwebb_spec2_input_file\", \"output_directory\")\n initial_input_file = config.get(\"calwebb_spec2_input_file\", \"input_file\")\n initial_input_file = os.path.join(output_directory, initial_input_file)\n detector = fits.getval(initial_input_file, \"DETECTOR\", 0)\n if not os.path.isfile(initial_input_file):\n pytest.skip(\"Skipping \"+step+\" because the initial input file given in PTT_config.cfg does not exist.\")\n\n if mode_used != \"bots\" and mode_used != \"ifu\":\n # if run_calwebb_spec2 is True calwebb_spec2 will be called, else individual steps will be ran\n step_completed = False\n\n # check if the filter is to be changed\n change_filter_opaque = config.getboolean(\"calwebb_spec2_input_file\", \"change_filter_opaque\")\n if change_filter_opaque:\n is_filter_opaque, step_input_filename = change_filter_opaque2science.change_filter_opaque(step_input_file, step=step)\n if is_filter_opaque:\n filter_opaque_msg = \"With FILTER=OPAQUE, the calwebb_spec2 will run up to the extract_2d step. Resample pytest now set to Skip.\"\n print(filter_opaque_msg)\n core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time)\n pytest.skip(\"Skipping \"+step+\" because the input file does not exist.\")\n\n if run_calwebb_spec2:\n hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False)\n return hdul, step_output_file, run_pytests\n else:\n\n if run_pipe_step:\n # Create the logfile for PTT, but erase the previous one if it exists\n PTTcalspec2_log = os.path.join(output_directory, 'PTT_calspec2_'+detector+'_'+step+'.log')\n if os.path.isfile(PTTcalspec2_log):\n os.remove(PTTcalspec2_log)\n print(\"Information outputed to screen from PTT will be logged in file: \", PTTcalspec2_log)\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(filename=PTTcalspec2_log, level=logging.INFO)\n # print pipeline version\n import jwst\n pipeline_version = \"\\n *** Using jwst pipeline version: \"+jwst.__version__+\" *** \\n\"\n print(pipeline_version)\n logging.info(pipeline_version)\n if change_filter_opaque:\n logging.info(filter_opaque_msg)\n\n if os.path.isfile(step_input_file):\n\n msg = \" *** Step \"+step+\" set to True\"\n print(msg)\n logging.info(msg)\n stp = ResampleSpecStep()\n\n # check that previous pipeline steps were run up to this point\n core_utils.check_completed_steps(step, step_input_file)\n\n # get the right configuration files to run the step\n local_pipe_cfg_path = config.get(\"calwebb_spec2_input_file\", \"local_pipe_cfg_path\")\n # start the timer to compute the step running time\n start_time = time.time()\n if local_pipe_cfg_path == \"pipe_source_tree_code\":\n result = stp.call(step_input_file)\n else:\n result = stp.call(step_input_file, config_file=local_pipe_cfg_path+'/resample_spec.cfg')\n result.save(step_output_file)\n # end the timer to compute the step running time\n end_time = repr(time.time() - start_time) # this is in seconds\n msg = \"Step \"+step+\" took \"+end_time+\" seconds to finish\"\n print(msg)\n logging.info(msg)\n step_completed = True\n hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False)\n\n # rename and move the pipeline log file\n try:\n calspec2_pilelog = \"calspec2_pipeline_\"+step+\"_\"+detector+\".log\"\n pytest_workdir = TESTSDIR\n logfile = glob(pytest_workdir+\"/pipeline.log\")[0]\n os.rename(logfile, os.path.join(output_directory, calspec2_pilelog))\n except:\n IndexError\n\n # add the running time for this step\n core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time)\n return hdul, step_output_file, run_pytests\n\n else:\n msg = \" The input file does not exist. Skipping step.\"\n print(msg)\n logging.info(msg)\n core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time)\n pytest.skip(\"Skipping \"+step+\" because the input file does not exist.\")\n\n else:\n msg = \"Skipping running pipeline step \"+step\n print(msg)\n logging.info(msg)\n end_time = core_utils.get_stp_run_time_from_screenfile(step, detector, output_directory)\n if os.path.isfile(step_output_file):\n hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False)\n step_completed = True\n # add the running time for this step\n core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time)\n return hdul, step_output_file, run_pytests\n else:\n step_completed = False\n # add the running time for this step\n core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time)\n pytest.skip(\"Test skipped because input file \"+step_output_file+\" does not exist.\")\n\n else:\n pytest.skip(\"Skipping \"+step+\" because data is either IFU or BOTS.\")\n\n\n# Unit tests\n\ndef test_s_resample_exists(output_hdul):\n # want to run this pytest?\n # output_hdul[2] = resample_spec_completion_tests, resample_spec_reffile_tests, resample_spec_validation_tests\n run_pytests = output_hdul[2][0]\n if not run_pytests:\n msg = \"Skipping completion pytest: option to run Pytest is set to False in PTT_config.cfg file.\\n\"\n print(msg)\n logging.info(msg)\n pytest.skip(msg)\n else:\n msg = \"\\n * Running completion pytest...\\n\"\n print(msg)\n logging.info(msg)\n assert resample_utils.s_resamp_exists(output_hdul[0]), \"The keyword S_RESAMP was not added to the header --> Resample step was not completed.\"\n\n","sub_path":"nirspec_pipe_testing_tool/calwebb_spec2_pytests/K_resample/test_resample.py","file_name":"test_resample.py","file_ext":"py","file_size_in_byte":9023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"161118714","text":"'''\n在一个有序的经过旋转的数组里查找一个数\n假设一个有序的数组,经过未知次数的旋转(例如0 1 2 4 5 6 7 被旋转成 4 5 6 7 0 1 2),\n从中查找一个目标值,如果存在,返回其下标,不存在,返回-1。注:假设数组无重复数字\n输入一个有序经过旋转的数组和要查找的目标数字,数组中各数字用“逗号”分隔,数组和目标数字用“空格”分隔\n输出一个整数,表示该目标数字的下标(不存在返回-1)\n输入样例\n4,5,6,7,0,1,2 6\n输出样例\n2\n'''\ndef solution(line):\n a, b = line.strip().split()\n a = a.split(\",\")\n for i in range(len(a)):\n if a[i] == b:\n return str(i)\n return str(-1)\n\nprint(solution(input()))","sub_path":"mi_oj/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"8198700","text":"\"\"\"\n-------------------------------------------------------\nqueue_utilities\nContains various funcitons to be used with queues\n-------------------------------------------------------\nAuthor: Brandon Wagner\nID: 160364940\nEmail: wagn4940@mylaurier.ca\nSection: B\n__updated__ = \"2017-01-23\"\n-------------------------------------------------------\n\"\"\"\nfrom queue_array import Queue\ndef queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests queue implementation.\n Use: queue_test(a)\n -------------------------------------------------------\n Preconditions:\n a - list of data (list of ?)\n Postconditions:\n the methods of Queue are tested for both empty and \n non-empty queues using the data in a:\n is_empty, insert, remove, peek\n -------------------------------------------------------\n \"\"\"\n q = Queue()\n count = 0\n while count < len(a):\n q.insert(a[count])\n count +=1\n print(\"Peek: {}\".format(q.peek()))\n print(\"Remove 1: {}\".format(q.remove()))\n print(\"Remove 2: {}\".format(q.remove()))\n print(\"is_empty: {}\".format(q.is_empty()))\n # tests for the queue methods go here\n # print the results of the method calls and verify by hand\n\n return","sub_path":"wagn4940_data_structures/src/queue_utilities.py","file_name":"queue_utilities.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438714586","text":"import torch\nimport os, glob, json\nimport numpy as np\n\nfrom tqdm import tqdm\n\nfrom datetime import timedelta\n\nfrom tensorboardX import SummaryWriter\nfrom utils.nqubit_setting import get_args \n\nfrom energy_env.EnvSetting import OneHotEnv, DoubleOneHotEnv, NoOneHotEnv, OneHotActionEnv #Env\n\nfrom agents.SACAgent import SACAgent # Agent\n\n\nif __name__ == '__main__':\n \n ########################### args & json & log_dir & writer ###################################\n args = get_args()\n sac_dict = vars(args)\n\n # log dir & summary writer\n current_dir = './results'\n train_log_dir = '/EnvSetting' + str(args.nbit) + '/sac'\n exp_name = '{}'.format(args.env_id) + '/seed{0}'.format(args.seed)\n log_dir = current_dir + train_log_dir + exp_name \n\n try:\n os.makedirs(log_dir)\n except OSError:\n files = glob.glob(os.path.join(log_dir, 'events.out.tfevents*'))\\\n + glob.glob(os.path.join(log_dir, '*.dump')) \\\n + glob.glob(os.path.join(log_dir, '*.json'))\n for f in files:\n os.remove(f)\n \n writer = SummaryWriter(log_dir)\n \n with open(os.path.join(log_dir, 'params.json'), 'w') as f:\n f.write(json.dumps(sac_dict, ensure_ascii=False, indent=4, separators=(',',':')))\n\n ############################## Device & Env & RNG & Agent #####################\n # Device\n device = torch.device(\"cuda:{}\".format(args.GPU))\n\n # Env\n if args.env_id == 'OneHotEnv':\n env = OneHotEnv(args.nbit, args.episode_length, args.measure_every_n_steps, args.reward_scale)\n elif args.env_id == 'DoubleOneHotEnv':\n env = DoubleOneHotEnv(args.nbit, args.episode_length, args.measure_every_n_steps, args.reward_scale)\n elif args.env_id == 'OneHotActionEnv':\n env = OneHotActionEnv(args.nbit, args.episode_length, args.measure_every_n_steps, args.reward_scale)\n elif args.env_id == 'NoOneHotEnv':\n env = NoOneHotEnv(args.nbit, args.episode_length, args.measure_every_n_steps, args.reward_scale)\n\n # RNG\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n env.action_space.seed(args.seed)\n\n # Agent\n agent = SACAgent(args, env, log_dir, device)\n\n\n ############################# Main Training Loop ############################\n totalstep = 0\n best_b = None\n best_threshold = -2.0\n\n # Training Loop\n for episode in tqdm(range(args.num_episodes)): # int(1e6)\n obs = env.reset() # (9, )\n episode_reward = [] # record_energy\n\n for step in tqdm(range(args.episode_length)): # (0, 1, 2)\n totalstep += 1\n \n # This if-else is used to increase initial exploration \n if totalstep > args.random_steps: # 900\n # required extra exploration strategy\n action = agent.get_action(obs, deterministic = False)\n else:\n action = env.action_space.sample()\n\n # Excute\n prev_obs = obs\n obs, reward, done, info = env.step(action)\n\n episode_reward.append(reward)\n \n \n\n # store ( sometimes is wrote into agent.update )\n agent.buffer.store(prev_obs, action, reward, obs, done)\n\n # when to update & how often we update\n if (totalstep > args.learn_start_steps) and (totalstep % args.update_freq_steps==0):\n #value_loss, policy_loss, log_prob_mag, q_value_mag, alpha = agent.update(args.update_freq_per_step, totalstep)\n value_loss, policy_loss, log_prob_mag, q_value_mag = agent.update(args.update_freq_per_step, totalstep)\n \n writer.add_scalar('value_loss', value_loss, totalstep)\n writer.add_scalar('policy_loss', policy_loss, totalstep)\n writer.add_scalar('log_prob', log_prob_mag, totalstep)\n writer.add_scalar('q_value_prob', q_value_mag, totalstep)\n # logwriter.add_scalar('alpha', alpha, totalstep)\n \n \n # log_state & action\n #if totalstep % args.log_state_action_steps == 0:\n \n # writer.add_scalars('state_value', {'s0':obs[-6], 's1':obs[-5], 's2':obs[-4], 's3':obs[-3], 's4':obs[-2], 's5':obs[-1]}, totalstep)\n # writer.add_scalars('log_action', {'a0':action[0], 'a1':action[1], 'a2':action[2], 'a3':action[3], 'a4':action[4], 'a5':action[5]}, totalstep)\n\n # record threshold \n if (totalstep % args.measure_every_n_steps == 0):\n writer.add_scalar('step-threshold', info['threshold'], totalstep)\n if info['threshold'] > best_threshold:\n best_threshold = info['threshold']\n best_b = info['solution']\n #writer.add_scalar('reward', info['reward'], totalstep)\n #writer.add_scalar('extra_reward', info['extra_reward'], totalstep)\n \n if info and (info['threshold'] >= -1.05):\n pass\n '''\n if satisfied_flag == 0:\n satisfied_flag = episode\n elif ((episode - satisfied_flag)== 1): \n convergence_buffer.append(info['threshold'])\n satisfied_flag == episode\n else:\n satisfied_flag == episode\n \n \n if len(convergence_buffer) == 100:\n mean = np.mean(np.array(convergence_buffer))\n if (mean >= -1.005):\n #torch.save(agent.model.state_dict(), os.path.join(log_dir, 'sac_model.dump'))\n '''\n \n\n '''\n\n avg_reward = 0.\n test_episodes = 5\n for _ in range(test_episodes):\n obs, done, ep_rew = test_env.reset(), False, 0.0\n for i in range(args.max_episode_steps): # 3\n action = agent.get_action(obs, deterministic = True)\n next_obs, reward, done, info = env.step(action)\n ep_rew += reward\n obs = next_obs\n\n avg_reward += ep_rew\n \n avg_reward /= test_episodes\n\n '''\n \n #torch.save(agent.model.state_dict(), os.path.join(log_dir, 'sac_model.dump'))\n #writer.add_scalar('test_episode_reward', avg_reward, totalstep)\n \n\n \n \n \n measure_state = info['solution']\n\n writer.add_scalar('episode_threshold',info['threshold'], episode)\n writer.add_scalars('soluiton', {'s0':measure_state[0], 's1':measure_state[1], 's2':measure_state[2], 's3':measure_state[3],'s4':measure_state[4],'s5':measure_state[5]}, episode)\n writer.add_scalar('episode_reward', np.mean(np.array(episode_reward)), episode)\n\n if (episode % 1000 == 0):\n with open(os.path.join(log_dir, 'solution.txt'), 'a') as f:\n f.write('best_threshold:{0}, bset_solution:{1}'.format(best_threshold, best_b))\n\n torch.save(agent.model.state_dict(), os.path.join(log_dir, 'sac_model.dump'))\n \n \n\n env.close()\n \n writer.close()\n\n \n \n # def main(args, log_dir):\n #pass\n\n# if __name__ == '__main__':\n# for arg in args:\n# log_dir = functionOfArg(arg)\n# main(log_dir, arg)\n# how to evaluate the hyperparameters properly\n\n\n\n \n\n\n \n\n\n\n\n\n\n","sub_path":"nqubit/Train_SAC_setting.py","file_name":"Train_SAC_setting.py","file_ext":"py","file_size_in_byte":7563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"467692471","text":"\t\t\t\t\t\nuron = int(input(\"Ввод числа \"))\nfirst = 1140671485\t\t\t\t\nsecond = 128201163\nfinal = (2**32) // 1000000\ndef rand(lcg):\n\turon = ((uron * first + second) % final)\n\tprint (\"Урон будет равен =\" + uron)\n\treturn(uron)\nfor i in range(2):\n\trand(uron)","sub_path":"1-2-5.py","file_name":"1-2-5.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"191190266","text":"import collections\nimport copy\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom absl import app\nfrom torch.utils.data import DataLoader, Dataset\n\nPADDING_TOKEN = 0\nCKPT_VOCABULARY_SIZE = 82\nCKPT_EMBEDDING_DIM = 256\nCKPT_HIDDEN_SIZE = 128\n\n\nclass VisualizeInternalGates(nn.Module):\n\n def __init__(self):\n super().__init__()\n vocabulary_size = CKPT_VOCABULARY_SIZE\n embedding_dim = CKPT_EMBEDDING_DIM\n hidden_size = CKPT_HIDDEN_SIZE\n\n self.embedding = nn.Embedding(num_embeddings=vocabulary_size,\n embedding_dim=embedding_dim,\n padding_idx=PADDING_TOKEN)\n self.rnn_model = VisualizeGRUCell(input_size=embedding_dim,\n hidden_size=hidden_size)\n self.classifier = nn.Linear(hidden_size, vocabulary_size)\n return\n\n def forward(self, batch_reviews):\n data = self.embedding(batch_reviews)\n\n state = None\n batch_size, total_steps, _ = data.shape\n internals = []\n for step in range(total_steps):\n next_h, gate_signals = self.rnn_model(data[:, step, :], state)\n internals.append(gate_signals)\n state = next_h\n\n logits = self.classifier(state)\n\n internals = list(zip(*internals))\n outputs = {\n 'update_signals': internals[0],\n 'reset_signals': internals[1],\n 'cell_state_candidates': internals[2],\n }\n return logits, outputs\n\n\nclass VisualizeGRUCell(nn.Module):\n\n def __init__(self, input_size, hidden_size):\n super().__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.W_z = nn.Parameter(torch.Tensor(hidden_size, hidden_size + input_size))\n self.W_r = nn.Parameter(torch.Tensor(hidden_size, hidden_size + input_size))\n self.W = nn.Parameter(torch.Tensor(hidden_size, hidden_size + input_size))\n\n self.reset_parameters()\n\n def forward(self, x, prev_state):\n if prev_state is None:\n batch = x.shape[0]\n prev_h = torch.zeros((batch, self.hidden_size), device=x.device)\n else:\n prev_h = prev_state\n\n concat_hx = torch.cat((prev_h, x), dim=1)\n z = torch.sigmoid(F.linear(concat_hx, self.W_z))\n r = torch.sigmoid(F.linear(concat_hx, self.W_r))\n h_tilde = torch.tanh(F.linear(torch.cat((r * prev_h, x), dim=1), self.W))\n next_h = (1 - z) * prev_h + z * h_tilde\n return next_h, (z, r, h_tilde)\n\n def reset_parameters(self):\n sqrt_k = (1. / self.hidden_size)**0.5\n with torch.no_grad():\n for param in self.parameters():\n param.uniform_(-sqrt_k, sqrt_k)\n return\n\n def extra_repr(self):\n return 'input_size={}, hidden_size={}'.format(self.input_size,\n self.hidden_size)\n\n\nclass VisualizeWarAndPeaceDataset(Dataset):\n\n def __init__(self, vocabulary):\n self.vocabulary = vocabulary\n\n # Hardcode the parameters to match the provided checkpoint\n txt_path = 'data/war_and_peace_visualize.txt'\n\n with open(txt_path, 'rb') as fp:\n raw_text = fp.read().strip().decode(encoding='utf-8')\n\n self.data = raw_text.split('\\n')\n\n self.char2index = {x: i for (i, x) in enumerate(self.vocabulary)}\n self.index2char = {i: x for (i, x) in enumerate(self.vocabulary)}\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n return np.array([self.char2index[x] for x in self.data[index]]), -1\n\n def convert_to_chars(self, sequence):\n if isinstance(sequence, torch.Tensor):\n sequence = sequence.squeeze(0).detach().numpy().tolist()\n return [self.index2char[x] for x in sequence]\n\n\ndef visualize_internals(sequence_id,\n sequence,\n gate_name,\n states,\n saving_dir='visualize/'):\n states = torch.cat(states, dim=0).detach().numpy().T\n hidden_size, time_stamps = states.shape\n fig, ax = plt.subplots(figsize=(time_stamps / 5, hidden_size / 5))\n\n if gate_name in ['update_signals', 'reset_signals']:\n vmin = 0\n elif gate_name == 'cell_state_candidates':\n vmin = -1\n else:\n raise ValueError\n\n sns.heatmap(states,\n cbar=False,\n square=True,\n linewidth=0.05,\n xticklabels=sequence,\n yticklabels=False,\n vmin=vmin,\n vmax=1,\n cmap='bwr',\n ax=ax)\n\n plt.xlabel('Sequence')\n plt.ylabel('Hidden Cells')\n\n ax.xaxis.set_ticks_position('top')\n\n plt.tight_layout()\n os.makedirs(saving_dir, exist_ok=True)\n plt.savefig(\n os.path.join(saving_dir,\n 'S%02d_' % sequence_id + gate_name.lower() + '.png'))\n return\n\n\ndef war_and_peace_visualizer():\n\n #domain\n model_domain = VisualizeInternalGates()\n model_domain.load_state_dict(torch.load('data/war_and_peace_model_checkpoint.pt'))\n print(model_domain.features)\n # for i in range(5):\n # kernels_domain = model_domain.features[arr[i]].weight.detach()\n # name = \"conv2D_\"+str(i)+\"_domain\"\n # visualize_kernels(name, kernels_domain)\n\n return\n\n\ndef main(unused_argvs):\n war_and_peace_visualizer()\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"rnn_practice/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"258850235","text":"from Kit.Algorithm import *\nfrom Global.Parameters import *\nfrom IO.Loader import *\nclass DDBSCAN:\n '''\n Modified DBSCAN Algorithm for huge data processing.This improved algorithm can be implemented in\n two situations:\n\n 1.Single device.\n Only one device with general-capacity memory.The memory is not capable to load all the data.As\n a trade-off,data can be splited into several segments and each of which will be successively scanned\n by DDBSCAN and the final result can be gained by aggregation.\n\n 2.Distributed environment.\n The partitions of the whole data are uniformly spreaded to each component of a computing-cluster.\n All the partitions be processed parallel instead of scanned successively in the single condition as\n described above.\n '''\n def __init__(self,eps,minPts,data,feature_length,external_metric=None):\n self.data = data\n self.feature_length = feature_length\n self.sample_size = 0\n for addr in self.data:\n self.data[addr]['status'] = [DATA_UNVISITED*10] * self.data[addr]['size']\n size = self.data[addr]['size']\n start = self.data[addr]['start']\n self.data[addr]['data'] = [str(i+start)+',0,0' for i in range(0,size)]\n self.sample_size += self.data[addr]['size']\n self.expand_length = self.feature_length + 2\n self.eps =eps\n self.minPts = minPts\n self.current_label = 2\n self.external_metric = external_metric\n self.label = None\n\n\n def getCenters(self):\n centers = {}\n crt_data = None\n crt_size = 0\n crt_start = 0\n crt_region = ''\n for pos_data in range(0,len(self.label)):\n l = self.label[pos_data]\n if crt_data is None:\n for k in self.data:\n if self.data[k]['start'] == pos_data:\n crt_data = load_data(self.data[k]['path'])\n crt_size = self.data[k]['size']\n crt_start = self.data[k]['start']\n crt_region = k\n break\n if l in centers:\n center = centers[l]['center']\n var = centers[l]['var']\n center += np.array(crt_data[pos_data-crt_start])\n var += (np.array(crt_data[pos_data-crt_start]) - TMP_MEAN) * (np.array(crt_data[pos_data-crt_start]) - TMP_MEAN)\n centers[l]['size'] += 1.0\n centers[l]['data_pos'].append(pos_data)\n else:\n centers[l] = {\n 'center':np.array(crt_data[pos_data-crt_start]),\n 'size':1.0,\n 'var':np.array([0.0]*self.feature_length),\n 'data_pos':[],\n 'region_key':crt_region\n }\n if pos_data - crt_start == crt_size - 1:\n del crt_data\n crt_data = None\n for key in centers:\n centers[key]['sum'] = centers[key]['center'].copy()\n centers[key]['center'] /= centers[key]['size']\n centers[key]['var'] = centers[key]['var']/centers[key]['size']+2*centers[key]['sum']*(TMP_MEAN-centers[key]['center'])/centers[key]['size']+(centers[key]['center'] - TMP_MEAN) * (centers[key]['center'] + TMP_MEAN)\n return centers\n\n def neighboursFinder(self,p,region_id,subset):\n subNeighbours = []\n for i in range(0,len(subset)):\n d = subset[i]\n if distance(p,d,self.feature_length,self.sample_size,self.external_metric) <= self.eps:\n if isinstance(d,(list)):\n if len(d) == self.expand_length:\n d[len(d) - 2] = region_id\n d[len(d) - 1] = i\n else:\n d.append(region_id)\n d.append(i)\n else:\n if isinstance(d,(str)):\n tmp = d.split(',')\n tmp[1] = str(region_id)\n tmp[2] = str(i)\n d = ','.join(tmp)\n subNeighbours.append(d)\n return subNeighbours\n\n def scan(self,P,eps,MinPts):\n pass\n\n def generateLabel(self):\n if self.label is None:\n self.train()\n return self.label\n\n def setLabel(self,key,pos,value):\n v = int(self.data[key]['status'][pos])\n length = pow(10,len(str(v)))/10\n v = (v // int(length)) * int(length) + value\n self.data[key]['status'][pos] = float(v)\n\n def setVisit(self,key,pos,value):\n #110\n v = int(self.data[key]['status'][pos])\n length = pow(10,len(str(v)))/10\n v = v % int(length) + value * int(length)\n self.data[key]['status'][pos] = float(v)\n\n def getLabel(self,key,pos):\n v = int(self.data[key]['status'][pos])\n length = pow(10,len(str(v)))/10\n return int(v % int(length))\n\n def getVisit(self,key,pos):\n v = int(self.data[key]['status'][pos])\n length = pow(10,len(str(v)))/10\n return int(v // int(length))\n\n def train(self,direct=False,debug=False):\n for region_id in self.data:\n if not direct:\n subsetForP = load_data(self.data[region_id]['path'])\n else:\n subsetForP = self.data[region_id]['data']\n for i in range(0,len(subsetForP)):\n self.sample_size -= 1\n if debug:\n print(str(self.sample_size)+' samples left')\n P = subsetForP[i]\n #unvisited\n if self.getVisit(region_id,i) == DATA_UNVISITED:\n self.setVisit(region_id,i,DATA_VISITED)\n neighbours = []\n for keyForN in self.data:\n #load part of the whole data\n if not direct:\n subsetForN = load_data(self.data[keyForN]['path'])\n else:\n subsetForN = self.data[keyForN]['data']\n subneighbours = self.neighboursFinder(P,region_id,subsetForN)\n neighbours = neighbours + subneighbours\n #unload to release space\n dump_data(keyForN)\n if len(neighbours) < self.minPts:\n self.setLabel(region_id,i,LABEL_NOISE)\n else:\n self.setLabel(region_id,i,self.current_label)\n self.current_label += 1\n self.expandCluster(neighbours,direct)\n del neighbours\n dump_data(subsetForP)\n label = []\n for key in self.data:\n for pos in range(len(self.data[key]['status'])):\n label.append(self.getLabel(key,pos))\n self.label = label\n\n def expandCluster(self,neighbours,direct):\n for p in neighbours:\n if isinstance(p,(list)):\n region_id = p[len(p) - 2]\n data_pos = p[len(p) - 1]\n else:\n tmp = p.split(',')\n region_id = (tmp[1])\n data_pos = int(tmp[2])\n if self.getVisit(region_id,data_pos) == DATA_UNVISITED:\n self.setVisit(region_id,data_pos,DATA_VISITED)\n #p.remove(tail)\n neighbours2 = []\n for keyForN in self.data:\n #load part of the whole data\n if not direct:\n subsetForN = load_data(self.data[keyForN]['path'])\n else:\n subsetForN = self.data[keyForN]['data']\n subneighbours = self.neighboursFinder(p,keyForN,subsetForN)\n neighbours2 = neighbours2 + subneighbours\n #unload to release space\n dump_data(keyForN)\n #union\n if len(neighbours2) >= self.minPts:\n if isinstance(neighbours[0],(list)):\n tmp = neighbours + neighbours2\n neighbours = [tmp[i] for i in range(0,len(tmp)) if tmp[i] not in tmp[:i]]\n else:\n neighbours = list(set(neighbours).union(set(neighbours2)))\n else:\n pass\n if self.getLabel(region_id,data_pos) == LABEL_UNDEFINED:\n self.setLabel(region_id,data_pos,self.current_label)\n\n class Host:\n CONTROL_ASSIGMENT = 0\n NEIGHBOUR_DETECTION = 1\n NEIGHBOUR_EXPAND = 2\n def job(self):\n for addr in self.data:\n # current node who is entrusted with the control power\n # network sync\n msg = {\n 'addr':addr,\n 'type':self.CONTROL_ASSIGMENT,\n 'value':None\n }\n self.sendMsg(msg)\n self.crt_node = addr\n def listen(self,task,addr,p):\n msg = {\n 'addr':addr,\n 'type':task,\n 'value':p\n }\n self.sendMsg(msg)\n\n class Nodes:\n CONTROL_ASSIGMENT = 0\n NEIGHBOUR_DETECTION = 1\n NEIGHBOUR_EXPAND = 2\n def receiveMsg(self,msg):\n if msg['type'] == self.CONTROL_ASSIGMENT:\n self.jobOnNode()\n\n def jobOnNode(self):\n subset = self.load_data()\n for i in range(0,len(subset)):\n P = subset[i]\n if self.getVisit(i) == DATA_UNVISITED:\n self.setVisit(i,DATA_VISITED)\n neighbours = []\n # network sync\n msg = {\n 'addr':[],\n 'type':self.NEIGHBOUR_DETECTION,\n 'value':P\n }\n self.sendMsg(msg)\n def findNeighboursOnNode(self):\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Distribution/DistributedDBSCAN.py","file_name":"DistributedDBSCAN.py","file_ext":"py","file_size_in_byte":10101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"194908884","text":"# You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse\n# order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n#\n# You may assume the two numbers do not contain any leading zero, except the number 0 itself.\n#\n# Example\n#\n# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n# Output: 7 -> 0 -> 8\n# Explanation: 342 + 465 = 807.\n\n\n# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef add_two_numbers(l1, l2):\n head = current = ListNode(0)\n carry = 0\n while l1 or l2 or carry:\n sum = (l1.val if l1 else 0) + (l2.val if l2 else 0) + carry\n carry = sum // 10\n current.next = ListNode(sum % 10)\n current = current.next\n l1 = l1.next if l1 else l1\n l2 = l2.next if l2 else l2\n return head.next\n","sub_path":"python/002-addTwoNumbers.py","file_name":"002-addTwoNumbers.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"152893773","text":"\"\"\"\n-------------------------------------------------------------------------------\nPRBS GENERATOR: prbs_gen.py\n-------------------------------------------------------------------------------\nOberseminar Regelungstechnik - Auto-tuning PID\n-------------------------------------------------------------------------------\n\"\"\"\n\nimport numpy as np\nimport random as rd\n\n\ndef prbsfnc(A, Lambda): \n \n gen_poly = (9,5);\n m = 9\n N = 2**m - 1\n R = np.zeros(m,dtype=int)\n R[0] = 1\n \n arrayOut = list()\n \n for i in range(0, N):\n \n arrayOut.append(R[0])\n xor = int(R[gen_poly[0]-1] ^ R[gen_poly[1]-1])\n R = np.concatenate(([xor],R[0:m-1]))\n \n prbs = np.array(arrayOut)*2*A - A\n \n \n print(sum(prbs)/len(prbs))\n tt = [Lambda*x for x in range(N)]\n tt = np.array(tt)\n #t_max = tt[N-1]\n\n def fnc(t, debug_flag=False):\n \n #t = t % t_max\n #idx = len(tt[tt <= t]) - 1\n \n # first round to compensate representation errors, then cast to int\n idx = np.int16(np.round(t/Lambda, 6))#len(tt[tt < t])\n idx = idx % len(prbs)\n if debug_flag:\n print(t, idx)\n #print(idx)\n return prbs[idx]\n \n return fnc, prbs, N","sub_path":"prbs_gen.py","file_name":"prbs_gen.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"639205255","text":"#!/usr/local/bin/python\n\nimport os\nimport struct\nimport numpy as np\nimport math\nfrom math import sqrt\n\nnp.set_printoptions (precision = 4, suppress = True, linewidth = np.nan, threshold = np.nan)\n\ndef sigmoid (x):\n x0 = 10.0\n return 2.0 * (1.0 / (1.0 + np.exp (- x / x0)) - 0.5)\n\ndef MNISTread (dataset = \"training\", path = \".\"):\n if dataset is \"training\":\n fname_img = os.path.join (path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join (path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join (path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join (path, 't10k-labels-idx1-ubyte')\n else: raise ValueError (\"Dataset must be 'testing' or 'training'\")\n with open (fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack (\">II\", flbl.read (8))\n lbl = np.fromfile (flbl, dtype = np.int8)\n with open (fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack (\">IIII\", fimg.read (16))\n img = np.fromfile (fimg, dtype = np.uint8).reshape (len (lbl), rows, cols)\n get_img = lambda idx: (lbl[idx], img[idx])\n for i in range (0, len (lbl)): yield get_img (i)\n\ndef DigitDisplay (digit):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure ()\n ax = fig.add_subplot (1, 1, 1)\n imgplot = ax.imshow (digit, cmap = mpl.cm.Greys)\n imgplot.set_interpolation ('nearest')\n ax.xaxis.set_ticks_position ('top')\n ax.yaxis.set_ticks_position ('left')\n pyplot.show ()\n\ndef Vector2Image (v, rows, columns):\n v = v - v.min ()\n v *= 255 / v.max ()\n return np.uint8 (v.reshape (rows, columns))\n\ndef Matrix2Image (M):\n M = M - M.min ()\n M *= 255 / M.max ()\n return np.uint8 (M)\n\ndef GetSize (type, path):\n images = MNISTread (type, path)\n label, digit = next (images)\n rows, columns = digit.shape\n return rows, columns\n\ndef GetImages (type, path):\n images = MNISTread (type, path)\n return images\n\ndef Train (TS, D, N, NT, ETA, E, TOL):\n DN = D * N\n WEIGHT = np.zeros ((DN, DN))\n STATE = np.zeros ((DN))\n for t in range (0, NT):\n LABEL, DIGIT = next (TS)\n print (\"Train input: \", t + 1, \"of \", NT, \" Label: \", LABEL)\n IN = np.float64 (DIGIT.reshape (N))\n IN -= IN.mean ()\n IN /= np.linalg.norm (IN)\n INPUT = np.zeros ((DN))\n INPUT = np.concatenate ( (INPUT[ : LABEL * N], IN, INPUT[(LABEL + 1) * N :]) )\n STATE += np.array (INPUT)\n for n in range (0, DN):\n STATE[n] = sigmoid (STATE[n])\n for e in range (0, E):\n STATE += STATE.dot (WEIGHT)\n for n in range (0, DN):\n STATE[n] = sigmoid (STATE[n])\n DELTA = np.zeros ((DN, DN))\n DELTA = ETA * STATE * STATE.T\n WEIGHT = WEIGHT + DELTA\n if abs (DELTA).max () < TOL and abs (STATE).max () < TOL: break\n return WEIGHT, STATE\n\ndef Test (TS, W, R, C, D, N, NT, DISPLAY):\n DN = D * N;\n STATE = np.zeros ((DN))\n for t in range (0, NT):\n LABEL, DIGIT = next (TS)\n print (\"Test input: \", t + 1, \"of \", NT, \" Label: \", LABEL)\n IN = np.float64 (DIGIT.reshape (N))\n IN -= IN.mean ()\n IN /= np.linalg.norm (IN)\n INPUT = np.zeros ((DN))\n INPUT = np.concatenate ( (INPUT[ : LABEL * N], IN, INPUT[(LABEL + 1) * N :]) )\n RESULT = W.dot (INPUT)\n if (DISPLAY):\n INIMAGE = Vector2Image (INPUT, 2 * R, 5 * C)\n DigitDisplay (INIMAGE)\n RESIMAGE = Vector2Image (RESULT, 2 * R, 5 * C)\n DigitDisplay (RESIMAGE)\n\n\n# main\ndisplay = 1\ntolerance = 1e-15\neta = 0.0001\ndigits = 10\nepochs = 100\ntrains = 1000\ntests = 10\n\nrows, columns = GetSize (\"training\", \"../MNIST\")\nneurons = rows * columns\ntrainSet = GetImages (\"training\", \"../MNIST\")\nweight, state = Train (trainSet, digits, neurons, trains, eta, epochs, tolerance)\ntestSet = GetImages (\"testing\", \"../MNIST\")\nTest (testSet, weight, rows, columns, digits, neurons, tests, display)\n\n\n\n\n\n\n","sub_path":"Promising/Abramo/nnC.py","file_name":"nnC.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389270709","text":"# pylint: disable=redefined-outer-name\n# pylint: disable=unused-argument\n\nfrom asyncio import BaseEventLoop\nfrom typing import Any, AsyncIterator, Dict, List\nfrom uuid import UUID, uuid4\n\nimport aiodocker\nimport pytest\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom models_library.projects import ProjectID\nfrom models_library.projects_nodes_io import NodeID\nfrom simcore_service_director_v2.core.settings import DynamicSidecarSettings\nfrom simcore_service_director_v2.models.schemas.constants import (\n DYNAMIC_PROXY_SERVICE_PREFIX,\n DYNAMIC_SIDECAR_SERVICE_PREFIX,\n UserID,\n)\nfrom simcore_service_director_v2.models.schemas.dynamic_services import (\n ServiceLabelsStoredData,\n ServiceState,\n ServiceType,\n)\nfrom simcore_service_director_v2.modules.dynamic_sidecar import docker_api\nfrom simcore_service_director_v2.modules.dynamic_sidecar.errors import (\n DynamicSidecarError,\n GenericDockerError,\n)\n\npytestmark = pytest.mark.asyncio\n\n# FIXTURES\n\n\n@pytest.fixture\nasync def async_docker_client(\n loop: BaseEventLoop,\n docker_swarm: None,\n) -> AsyncIterator[aiodocker.docker.Docker]:\n async with aiodocker.Docker() as client:\n yield client\n\n\n@pytest.fixture\ndef dynamic_sidecar_settings(\n monkeypatch: MonkeyPatch, docker_swarm: None\n) -> DynamicSidecarSettings:\n monkeypatch.setenv(\"DYNAMIC_SIDECAR_IMAGE\", \"local/dynamic-sidecar:MOCKED\")\n monkeypatch.setenv(\"DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED\", \"false\")\n return DynamicSidecarSettings.create_from_envs()\n\n\n@pytest.fixture\ndef network_config(simcore_services_network_name: str) -> Dict[str, Any]:\n return {\n \"Name\": simcore_services_network_name,\n \"Driver\": \"overlay\",\n \"Labels\": {\"uuid\": f\"{uuid4()}\"},\n }\n\n\n@pytest.fixture\nasync def ensure_swarm_network(\n loop: BaseEventLoop,\n network_config: Dict[str, Any],\n async_docker_client: aiodocker.docker.Docker,\n docker_swarm: None,\n) -> None:\n network_id = None\n try:\n network_id = await docker_api.create_network(network_config)\n yield\n finally:\n if network_id is not None:\n docker_network = await async_docker_client.networks.get(network_id)\n assert await docker_network.delete() is True\n\n\n@pytest.fixture\nasync def cleanup_swarm_network(\n loop: BaseEventLoop,\n simcore_services_network_name: str,\n async_docker_client: aiodocker.docker.Docker,\n docker_swarm: None,\n) -> None:\n yield\n docker_network = await async_docker_client.networks.get(\n simcore_services_network_name\n )\n assert await docker_network.delete() is True\n\n\n@pytest.fixture\ndef missing_network_name() -> str:\n return \"this_network_is_missing\"\n\n\n@pytest.fixture\ndef test_service_name() -> str:\n return \"test_service_name\"\n\n\n@pytest.fixture\ndef service_spec(test_service_name: str) -> Dict[str, Any]:\n # \"joseluisq/static-web-server\" is ~2MB docker image\n return {\n \"name\": test_service_name,\n \"task_template\": {\"ContainerSpec\": {\"Image\": \"joseluisq/static-web-server\"}},\n \"labels\": {\"foo\": \"bar\"},\n }\n\n\n@pytest.fixture\nasync def cleanup_test_service_name(\n loop: BaseEventLoop,\n test_service_name: str,\n async_docker_client: aiodocker.docker.Docker,\n docker_swarm: None,\n) -> None:\n yield\n\n assert await async_docker_client.services.delete(test_service_name) is True\n\n\n@pytest.fixture\ndef dynamic_sidecar_service_name() -> str:\n return f\"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_some-dynamic-fake-sidecar\"\n\n\n@pytest.fixture\ndef dynamic_sidecar_service_spec(\n dynamic_sidecar_service_name: str, dynamic_sidecar_settings: DynamicSidecarSettings\n) -> Dict[str, Any]:\n # \"joseluisq/static-web-server\" is ~2MB docker image\n sample = ServiceLabelsStoredData.Config.schema_extra[\"example\"]\n\n return {\n \"name\": dynamic_sidecar_service_name,\n \"task_template\": {\"ContainerSpec\": {\"Image\": \"joseluisq/static-web-server\"}},\n \"labels\": {\n \"swarm_stack_name\": f\"{dynamic_sidecar_settings.SWARM_STACK_NAME}\",\n \"uuid\": f\"{uuid4()}\",\n \"service_key\": \"simcore/services/dynamic/3dviewer\",\n \"service_tag\": \"2.4.5\",\n \"paths_mapping\": sample[\"paths_mapping\"].json(),\n \"compose_spec\": sample[\"compose_spec\"],\n \"container_http_entry\": sample[\"container_http_entry\"],\n \"traefik.docker.network\": \"\",\n \"io.simcore.zone\": \"\",\n \"service_port\": \"80\",\n \"study_id\": f\"{uuid4()}\",\n \"user_id\": \"123\",\n },\n }\n\n\n@pytest.fixture\nasync def cleanup_test_dynamic_sidecar_service(\n loop: BaseEventLoop,\n dynamic_sidecar_service_name: str,\n async_docker_client: aiodocker.docker.Docker,\n) -> None:\n yield\n assert (\n await async_docker_client.services.delete(dynamic_sidecar_service_name) is True\n )\n\n\n@pytest.fixture\ndef node_uuid() -> NodeID:\n return uuid4()\n\n\n@pytest.fixture\ndef user_id() -> UserID:\n return 123\n\n\n@pytest.fixture\ndef project_id() -> ProjectID:\n return uuid4()\n\n\n@pytest.fixture\ndef dynamic_sidecar_stack_specs(\n node_uuid: UUID,\n user_id: UserID,\n project_id: ProjectID,\n dynamic_sidecar_settings: DynamicSidecarSettings,\n) -> List[Dict[str, Any]]:\n return [\n {\n \"name\": f\"{DYNAMIC_PROXY_SERVICE_PREFIX}_fake_proxy\",\n \"task_template\": {\n \"ContainerSpec\": {\"Image\": \"joseluisq/static-web-server\"}\n },\n \"labels\": {\n \"swarm_stack_name\": f\"{dynamic_sidecar_settings.SWARM_STACK_NAME}\",\n \"type\": f\"{ServiceType.DEPENDENCY.value}\",\n \"uuid\": f\"{node_uuid}\",\n \"user_id\": f\"{user_id}\",\n \"study_id\": f\"{project_id}\",\n },\n },\n {\n \"name\": f\"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_fake_sidecar\",\n \"task_template\": {\n \"ContainerSpec\": {\"Image\": \"joseluisq/static-web-server\"}\n },\n \"labels\": {\n \"swarm_stack_name\": f\"{dynamic_sidecar_settings.SWARM_STACK_NAME}\",\n \"type\": f\"{ServiceType.MAIN.value}\",\n \"uuid\": f\"{node_uuid}\",\n \"user_id\": f\"{user_id}\",\n \"study_id\": f\"{project_id}\",\n },\n },\n ]\n\n\n@pytest.fixture\nasync def cleanup_dynamic_sidecar_stack(\n loop: BaseEventLoop,\n dynamic_sidecar_stack_specs: List[Dict[str, Any]],\n async_docker_client: aiodocker.docker.Docker,\n) -> None:\n yield\n for dynamic_sidecar_spec in dynamic_sidecar_stack_specs:\n assert (\n await async_docker_client.services.delete(dynamic_sidecar_spec[\"name\"])\n is True\n )\n\n\n# UTILS\n\n\ndef _assert_service(\n service_spec: Dict[str, Any], service_inspect: Dict[str, Any]\n) -> None:\n assert service_inspect[\"Spec\"][\"Labels\"] == service_spec[\"labels\"]\n assert service_inspect[\"Spec\"][\"Name\"] == service_spec[\"name\"]\n assert (\n service_inspect[\"Spec\"][\"TaskTemplate\"][\"ContainerSpec\"][\"Image\"]\n == service_spec[\"task_template\"][\"ContainerSpec\"][\"Image\"]\n )\n\n\nasync def _count_services_in_stack(\n node_uuid: UUID,\n dynamic_sidecar_settings: DynamicSidecarSettings,\n async_docker_client: aiodocker.docker.Docker,\n) -> int:\n services = await async_docker_client.services.list(\n filters={\n \"label\": [\n f\"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}\",\n f\"uuid={node_uuid}\",\n ]\n }\n )\n return len(services)\n\n\n# TESTS\n\n\ndef test_new_docker_swarm(docker_swarm: None) -> None:\n pass\n\n\n@pytest.mark.parametrize(\n \"simcore_services_network_name\",\n (\"n\", \"network\", \"with_underscore\", \"with-dash\", \"with-dash_with_underscore\"),\n)\ndef test_valid_network_names(\n simcore_services_network_name: str, monkeypatch: MonkeyPatch\n) -> None:\n monkeypatch.setenv(\"DYNAMIC_SIDECAR_IMAGE\", \"local/dynamic-sidecar:MOCKED\")\n monkeypatch.setenv(\"SIMCORE_SERVICES_NETWORK_NAME\", simcore_services_network_name)\n dynamic_sidecar_settings = DynamicSidecarSettings.create_from_envs()\n assert dynamic_sidecar_settings\n\n\nasync def test_failed_docker_client_request(\n missing_network_name: str, docker_swarm: None\n) -> None:\n with pytest.raises(GenericDockerError) as execinfo:\n async with docker_api.docker_client() as client:\n await client.networks.get(missing_network_name)\n assert (\n str(execinfo.value)\n == f\"Unexpected error from docker client: network {missing_network_name} not found\"\n )\n\n\nasync def test_get_swarm_network_ok(\n dynamic_sidecar_settings: DynamicSidecarSettings,\n simcore_services_network_name: str,\n ensure_swarm_network: None,\n docker_swarm: None,\n) -> None:\n swarm_network = await docker_api.get_swarm_network(dynamic_sidecar_settings)\n assert swarm_network[\"Name\"] == simcore_services_network_name\n\n\nasync def test_get_swarm_network_missing_network(\n dynamic_sidecar_settings: DynamicSidecarSettings, docker_swarm: None\n) -> None:\n with pytest.raises(DynamicSidecarError) as excinfo:\n await docker_api.get_swarm_network(dynamic_sidecar_settings)\n assert (\n str(excinfo.value)\n == \"Swarm network name is not configured, found following networks: []\"\n )\n\n\nasync def test_recreate_network_multiple_times(\n network_config: Dict[str, Any],\n cleanup_swarm_network: None,\n docker_swarm: None,\n) -> None:\n network_ids = [await docker_api.create_network(network_config) for _ in range(10)]\n network_ids_set = set(network_ids)\n assert len(network_ids_set) == 1\n network_id = network_ids_set.pop()\n assert type(network_id) == str\n\n\nasync def test_create_service(\n service_spec: Dict[str, Any],\n cleanup_test_service_name: None,\n docker_swarm: None,\n) -> None:\n service_id = await docker_api.create_service_and_get_id(service_spec)\n assert service_id\n\n\nasync def test_inspect_service(\n service_spec: Dict[str, Any],\n cleanup_test_service_name: None,\n docker_swarm: None,\n) -> None:\n service_id = await docker_api.create_service_and_get_id(service_spec)\n assert service_id\n\n service_inspect = await docker_api.inspect_service(service_id)\n\n _assert_service(service_spec, service_inspect)\n\n\nasync def test_services_to_observe_exist(\n dynamic_sidecar_service_name: str,\n dynamic_sidecar_service_spec: Dict[str, Any],\n dynamic_sidecar_settings: DynamicSidecarSettings,\n cleanup_test_dynamic_sidecar_service: None,\n docker_swarm: None,\n) -> None:\n service_id = await docker_api.create_service_and_get_id(\n dynamic_sidecar_service_spec\n )\n assert service_id\n\n dynamic_services = await docker_api.get_dynamic_sidecars_to_observe(\n dynamic_sidecar_settings\n )\n assert len(dynamic_services) == 1\n\n for entry in dynamic_services:\n assert entry.service_name == dynamic_sidecar_service_name\n\n\nasync def test_dynamic_sidecar_in_running_state_and_node_id_is_recovered(\n dynamic_sidecar_service_spec: Dict[str, Any],\n dynamic_sidecar_settings: DynamicSidecarSettings,\n cleanup_test_dynamic_sidecar_service: None,\n docker_swarm: None,\n) -> None:\n service_id = await docker_api.create_service_and_get_id(\n dynamic_sidecar_service_spec\n )\n assert service_id\n\n node_id = await docker_api.get_node_id_from_task_for_service(\n service_id, dynamic_sidecar_settings\n )\n assert node_id\n\n # after the node_id is recovered the service\n # will be in a running state\n dynamic_sidecar_state = await docker_api.get_dynamic_sidecar_state(service_id)\n assert dynamic_sidecar_state == (ServiceState.RUNNING, \"\")\n\n\nasync def test_are_services_missing(\n node_uuid: UUID,\n dynamic_sidecar_settings: DynamicSidecarSettings,\n dynamic_sidecar_stack_specs: List[Dict[str, Any]],\n cleanup_dynamic_sidecar_stack: None,\n docker_swarm: None,\n) -> None:\n\n services_are_missing = await docker_api.are_services_missing(\n node_uuid, dynamic_sidecar_settings\n )\n assert services_are_missing == True\n\n # start 2 fake services to emulate the dynamic-sidecar stack\n for dynamic_sidecar_stack in dynamic_sidecar_stack_specs:\n service_id = await docker_api.create_service_and_get_id(dynamic_sidecar_stack)\n assert service_id\n\n services_are_missing = await docker_api.are_services_missing(\n node_uuid, dynamic_sidecar_settings\n )\n assert services_are_missing == False\n\n\nasync def test_are_all_services_present(\n node_uuid: UUID,\n dynamic_sidecar_settings: DynamicSidecarSettings,\n dynamic_sidecar_stack_specs: List[Dict[str, Any]],\n cleanup_dynamic_sidecar_stack: None,\n docker_swarm: None,\n):\n services_are_missing = await docker_api.are_all_services_present(\n node_uuid, dynamic_sidecar_settings\n )\n assert services_are_missing == False\n\n # start 2 fake services to emulate the dynamic-sidecar stack\n for dynamic_sidecar_stack in dynamic_sidecar_stack_specs:\n service_id = await docker_api.create_service_and_get_id(dynamic_sidecar_stack)\n assert service_id\n\n services_are_missing = await docker_api.are_all_services_present(\n node_uuid, dynamic_sidecar_settings\n )\n assert services_are_missing == True\n\n\nasync def test_remove_dynamic_sidecar_stack(\n node_uuid: UUID,\n dynamic_sidecar_settings: DynamicSidecarSettings,\n dynamic_sidecar_stack_specs: List[Dict[str, Any]],\n docker_swarm: None,\n async_docker_client: aiodocker.docker.Docker,\n):\n assert (\n await _count_services_in_stack(\n node_uuid, dynamic_sidecar_settings, async_docker_client\n )\n == 0\n )\n\n # start 2 fake services to emulate the dynamic-sidecar stack\n for dynamic_sidecar_stack in dynamic_sidecar_stack_specs:\n service_id = await docker_api.create_service_and_get_id(dynamic_sidecar_stack)\n assert service_id\n\n assert (\n await _count_services_in_stack(\n node_uuid, dynamic_sidecar_settings, async_docker_client\n )\n == 2\n )\n\n await docker_api.remove_dynamic_sidecar_stack(node_uuid, dynamic_sidecar_settings)\n\n assert (\n await _count_services_in_stack(\n node_uuid, dynamic_sidecar_settings, async_docker_client\n )\n == 0\n )\n\n\nasync def test_remove_dynamic_sidecar_network(\n network_config: Dict[str, Any],\n simcore_services_network_name: str,\n docker_swarm: None,\n) -> None:\n network_ids = [await docker_api.create_network(network_config) for _ in range(10)]\n assert len(set(network_ids)) == 1\n\n delete_result = await docker_api.remove_dynamic_sidecar_network(\n simcore_services_network_name\n )\n assert delete_result is True\n\n\nasync def test_remove_dynamic_sidecar_network_fails(\n simcore_services_network_name: str, docker_swarm: None\n) -> None:\n delete_result = await docker_api.remove_dynamic_sidecar_network(\n simcore_services_network_name\n )\n assert delete_result is False\n\n\nasync def test_list_dynamic_sidecar_services(\n node_uuid: UUID,\n user_id: UserID,\n project_id: ProjectID,\n dynamic_sidecar_settings: DynamicSidecarSettings,\n dynamic_sidecar_stack_specs: List[Dict[str, Any]],\n cleanup_dynamic_sidecar_stack: None,\n docker_swarm: None,\n):\n # start 2 fake services to emulate the dynamic-sidecar stack\n for dynamic_sidecar_stack in dynamic_sidecar_stack_specs:\n service_id = await docker_api.create_service_and_get_id(dynamic_sidecar_stack)\n assert service_id\n\n services = await docker_api.list_dynamic_sidecar_services(\n dynamic_sidecar_settings, user_id=user_id, project_id=project_id\n )\n assert len(services) == 1\n\n\nasync def test_is_dynamic_service_running(\n node_uuid: UUID,\n dynamic_sidecar_settings: DynamicSidecarSettings,\n dynamic_sidecar_stack_specs: List[Dict[str, Any]],\n cleanup_dynamic_sidecar_stack: None,\n docker_swarm: None,\n) -> None:\n assert (\n await docker_api.is_dynamic_service_running(node_uuid, dynamic_sidecar_settings)\n is False\n )\n\n # start 2 fake services to emulate the dynamic-sidecar stack\n for dynamic_sidecar_stack in dynamic_sidecar_stack_specs:\n service_id = await docker_api.create_service_and_get_id(dynamic_sidecar_stack)\n assert service_id\n\n assert (\n await docker_api.is_dynamic_service_running(node_uuid, dynamic_sidecar_settings)\n is True\n )\n","sub_path":"services/director-v2/tests/unit/with_swarm/test_modules_dynamic_sidecar_docker_api.py","file_name":"test_modules_dynamic_sidecar_docker_api.py","file_ext":"py","file_size_in_byte":16488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"508313855","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport os\nimport wget\nfrom config import cfg as cfg_\n\n\nclass SSD(nn.Module):\n def __init__(self):\n super().__init__()\n # vgg16中conv1_1到conv4_3再加上一个pool两个conv\n self.vgg = vgg(vgg_base['300'],vgg_pretrain=True)\n # vgg300的特征缩放配置文件\n self.extras = add_extras(extras_base['300'])\n self.l2_norm = L2Norm(512, scale=20)\n self.cls_blocks,self.reg_blocks = cls_reg_blocks()\n\n def forward(self, x):\n # :x [10, 3, 300, 300] 输入图片\n # :return: [10, 8732, 18] [10, 8732, 4] SSD网络预测的修正系数与分类概率\n # target_labels (batch_size, num_anchors): 所有框的真实类别\n # target_locs (batch_size, num_anchors, 4): 所有框真实的位置\n features = []\n # vgg16的前23层\n for i in range(23):\n x = self.vgg[i](x)\n s = self.l2_norm(x) # x为现vgg网络最后一个maxpool之前的特征图 torch.Size([10, 512, 38, 38]) 也是后续第一个特征图\n features.append(s)\n # vgg16尾部魔改的部分\n for i in range(23, len(self.vgg)):\n x = self.vgg[i](x)\n features.append(x) # s.shape [10, 1024, 19, 19]\n # 特征缩放的部分\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n features.append(x)\n # features 最后是整个SSD中出现的6个特征图\n # [10, 512, 38, 38] [10, 1024, 19, 19] [10, 512, 10, 10] [10, 256, 5, 5] [10, 256, 3, 3] [10, 256, 1, 1]\n # 对输入的特征图中每个特征点进行分类及回归(不同特征图特征点对应的输出数是不一样的,以检测框数量为准)\n pred_cls = []\n pred_locs = []\n batch_size = features[0].shape[0]\n # 六个特征图与其对应的分类与定位卷积\n for feature, cls_block, reg_block in zip(features, self.cls_blocks, self.reg_blocks):\n pred_cls.append(cls_block(feature).permute(0, 2, 3, 1))\n pred_locs.append(reg_block(feature).permute(0, 2, 3, 1))\n # 将六个特征图每个特征点上的不同anchor预测得出的各类置信度合并到一起\n # [batch_size, num_anchors*num_classes]) -> [batch_size, num_anchors, num_classes]\n pred_cls = torch.cat([c.reshape(batch_size, -1) for c in pred_cls], dim=1).view(batch_size, -1, cfg_.num_classes)\n # 将六个特征图每个特征点上的不同anchor预测得出的各个修正系数合并到一起\n # [batch_size, num_anchors*4] -> [batch_size, num_anchors, 4]\n pred_locs = torch.cat([l.reshape(batch_size, -1) for l in pred_locs], dim=1).view(batch_size, -1, 4)\n return pred_locs, pred_cls\n\n\nvgg_base = { # vgg中第23层↓\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512],\n # '512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512],\n}\n\n\ndef vgg(cfg, vgg_pretrain=True):\n # 创建经过魔改的vgg特征提取层 :原生vgg16去fc层+一个pool两个conv\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n # layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n # 采用没有bn的vgg\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6, nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n vgg_layers = nn.ModuleList(layers)\n # 是否加载已经训练好的模型\n if vgg_pretrain:\n # 加载已经训练好的vgg模型,不包括extras_base层,除非你从头开始训练.否则,这个模型可以不用下载\n url = 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth'\n # 下载路径\n weight_path = cfg_.vgg16_reducedfc\n if not os.path.exists(weight_path):\n print('模型不存在,下载中')\n wget.download(url=url, out=weight_path)\n print('下载完成')\n print(' --- load weight finish ---')\n vgg_layers.load_state_dict(torch.load(weight_path))\n return vgg_layers\n\n\nextras_base = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n # '512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],\n}\n\n\ndef add_extras(cfg):\n layers = [] # 额外添加的特征缩放层\n in_channels = 1024 # 是每次conv的输入通道数\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n # 这里进行是否等于'S'的判断作用在于想在特征图19*19 -> 10*10以及10*10 -> 5*5时添加padding以使特征图尺寸能顺利减半\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n # if size == 512: 如果是SSD512的话后面还需要添加两个conv\n # layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))\n # layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))\n return nn.ModuleList(layers)\n\n\ndef cls_reg_blocks():\n # 针对不同的特征图创建不同的定位和分类卷积,然后初始化其中的权重\n cls_blocks = nn.ModuleList()\n reg_blocks = nn.ModuleList()\n # 创建针对不同特征图的定位和分类卷积\n for anchors_per_feature, c_out in zip([4, 6, 6, 6, 4, 4], [512, 1024, 512, 256, 256, 256]):\n cls_blocks.append(nn.Conv2d(c_out, anchors_per_feature * cfg_.num_classes, kernel_size=3, stride=1, padding=1))\n reg_blocks.append(nn.Conv2d(c_out, anchors_per_feature * 4, kernel_size=3, stride=1, padding=1))\n # 参数初始化\n for ms in (cls_blocks,reg_blocks):\n for m in ms:\n nn.init.xavier_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n return cls_blocks,reg_blocks\n\n\nclass L2Norm(nn.Module):\n def __init__(self, n_channels, scale):\n # 对于conv4_3后的特征图进行L2归一化,和普通的bn不同,它只针对与channels上的归一化,可以加快网络收敛\n # 详情参考 https://zhuanlan.zhihu.com/p/39399799\n super(L2Norm, self).__init__()\n self.gamma = scale or None\n self.eps = 1e-10\n self.weight = nn.Parameter(torch.Tensor(n_channels))\n nn.init.constant_(self.weight, self.gamma)\n\n def forward(self, x):\n norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps\n x = torch.div(x, norm)\n out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x\n return out\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"42004417","text":"import sys\n#This is the assignment that takes in the status code from the command line as an\n#argument and then prints all lines with that status code.\n#The nice thing about this is that we can use the same string split function\n#I have used on the other files to get the status code. It's the second to last piece.as\n\nfor n in sys.stdin.readlines():\n if int(n.split()[-2]) == int(sys.argv[1]):\n sys.stdout.write(str(n))\n sys.stdout.flush()\n\n","sub_path":"nasa_assignment/FLTR_WLOG_STATUS_CODES.py","file_name":"FLTR_WLOG_STATUS_CODES.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"549319795","text":"# Author: Tyler Sherrod\n# Created: 16 August 2020\n\n# Timber Analytic - Image Pre-processing script\n\n# This script allows the user to cycle through images, identifying specific objects in the image and designating their location in the image.\n\nimport os\nimport shutil\nimport sys\n\nimport tkinter as tk\n\nfrom PIL import Image, ImageTk\nfrom time import sleep\nfrom tkinter import filedialog, simpledialog\n\ndef createFolders(path, unitName):\n unitFolder = os.path.join(path, unitName)\n trainingFolder = os.path.join(unitFolder, 'Training Images')\n processedFolder = os.path.join(unitFolder, 'Processed Images')\n \n if os.path.isdir(unitFolder):\n # Unit folder exists, verify that subfolders exist\n if not os.path.isdir(trainingFolder):\n os.mkdir(trainingFolder)\n\n if not os.path.isdir(processedFolder):\n os.mkdir(processedFolder)\n \n else:\n # Unit folder doesn't exist, create and then create subfolders\n os.mkdir(unitFolder)\n os.mkdir(trainingFolder)\n os.mkdir(processedFolder)\n\n return [path, trainingFolder, processedFolder]\n\ndef userInput():\n try:\n path = filedialog.askdirectory()\n except:\n raise Exception('No appropriate folder was chosen')\n\n \n unitName = simpledialog.askstring('Lot Number', 'Input lot number for this batch of images.')\n\n if not unitName:\n raise Exception('No name was given for the lot number')\n \n\n return path, unitName\n\n\nclass MainApp:\n\n def __init__(self, window, folders):\n \n # Set up file paths\n self.mainFolder, self.trainingFolder, self.processedFolder = folders\n print(self.mainFolder)\n\n # Build list of images\n self.imageList = os.listdir(self.mainFolder)\n print(self.imageList)\n\n # Initial setup of GUI\n self.loadImage(setup=True)\n\n def LeftDown(self, event):\n x1 = event.x\n x2 = x1 + 1\n y1 = event.y\n y2 = y1 + 1\n\n currentCrop = self.main.create_rectangle(x1,y1,x2,y2, fill='', width=1.5, outline='gray90')\n # draw rectangle, first four args are x,y of top left followed by x,y of bottom right\n #EMTPY STRING IN THE 'FILL' FIELD MEANS TRANSPARENT!!!\n\n self.cropLocations.append([currentCrop, x1, y1])\n\n def LeftDrag(self, event):\n xOG, yOG = self.cropLocations[-1][1:]\n xNew = event.x\n yNew = event.y\n\n if xNew < xOG:\n x1 = xNew\n x2 = xOG\n elif xNew > xOG:\n x1 = xOG\n x2 = xNew\n else:\n x1, y1, x2, y2 = self.main.coords(self.cropLocations[-1][0])\n\n if yNew < yOG:\n y1 = yNew\n y2 = yOG\n elif yNew > yOG:\n y1 = yOG\n y2 = yNew\n else:\n x1, y1, x2, y2 = self.main.coords(self.cropLocations[-1][0])\n\n if x1 < 0:\n x1 = 0\n if y1 < 0:\n y1 = 0\n\n x1, y1, x2, y2 = self.makeSquare(x1, y1, x2, y2)\n\n self.main.coords(self.cropLocations[-1][0], (x1,y1,x2,y2))\n\n sleep(.05)\n\n def LeftUp(self, event):\n x1, y1, x2, y2 = self.main.coords(self.cropLocations[-1][0])\n\n print('Tree located at: {}, {}, {}, {}'.format(int(x1), int(y1), int(x2), int(y2)))\n\n def NextPress(self):\n\n # Crop image\n self.cropImage()\n\n if len(self.imageList) == 0:\n sys.exit(0)\n\n # Load next image to canvas\n # Returns -1 for unacceptable file types\n while self.loadImage() != 0:\n pass\n\n def UndoPress(self):\n self.main.delete(self.cropLocations[-1][0])\n del self.cropLocations[-1]\n\n def DeletePress(self, event=None):\n self.UndoPress()\n\n def ReturnPress(self, event=None):\n self.DeletePress()\n\n def loadImage(self, setup=False):\n imageName = self.imageList[0]\n self.imageFile = os.path.join(self.mainFolder, imageName)\n\n print('Processing {}'.format(self.imageFile))\n\n _, ext = os.path.splitext(self.imageFile)\n if ext.lower() not in ['.png','.jpg','.jpeg']:\n print('Bypassing {}; {} is not an accepted file type'.format(imageName, ext))\n del self.imageList[0]\n return -1\n\n self.cropLocations = []\n\n #image = Image.open(self.imageFile)\n with Image.open(self.imageFile) as image:\n\n W, H = image.size\n print('Original image is {}x{}'.format(W,H))\n screenWidth = window.winfo_screenwidth()\n screenHeight = window.winfo_screenheight()\n\n if W > screenWidth or H > screenHeight:\n # Scale image so that largest dimensions fits within screen dimensions\n self.scaleFactor = min(screenWidth/W, screenHeight/H)\n\n W = round(W*self.scaleFactor)\n H = round(H*self.scaleFactor)\n\n print('Image was resized to {}x{}\\n'.format(W,H))\n\n image = image.resize((W, H), Image.ANTIALIAS)\n\n UndoButtonX = 10\n UndoButtonY = H - 130\n\n NextButtonX = W - 10\n NextButtonY = H - 130\n\n if setup:\n self.main = tk.Canvas(window, width=W, height=H)\n self.main.pack()\n\n self.main.image = image = ImageTk.PhotoImage(image)\n # this additional main.image is a way to prevent tkinter from trashing the image by\n # attaching it to an attribute of the canvas (would also work with window.image)\n # Why? idk. http://effbot.org/pyfaq/why-do-my-tkinter-images-not-appear.htm said to do this so I tried and it worked\n\n self.currentImage = self.main.create_image(0, 0, anchor='nw', image=image)\n # anchor seems to designate the location on the widget (in this case, image)\n # that the coordiantes are measured to\n\n # Insert buttons onto cavas:\n self.UndoButton = tk.Button(text = \"Undo\", command = self.UndoPress)\n self.UndoButton.configure(width=10, activebackground = \"#33B5E5\", relief=tk.FLAT)\n\n self.NextButton = tk.Button(text = \"Next\", command = self.NextPress)\n self.NextButton.configure(width = 10, activebackground = \"#33B5E5\", relief=tk.FLAT)\n\n self.UndoButtonWindow = self.main.create_window(UndoButtonX, UndoButtonY, anchor=tk.SW, window=self.UndoButton)\n self.NextButtonWindow = self.main.create_window(NextButtonX, NextButtonY, anchor=tk.SE, window=self.NextButton)\n\n self.main.bind('', self.LeftDown)\n self.main.bind(\"\", self.LeftDrag)\n self.main.bind(\"\", self.LeftUp)\n window.bind(\"\", self.ReturnPress)\n window.bind(\"\", self.DeletePress)\n\n else:\n # Adjust window size\n self.main.config(width=W, height=H)\n # Reposition Next Button\n self.NextButton.place(x=NextButtonX, y=NextButtonY, anchor=tk.SE)\n # Replace image\n self.main.image = image = ImageTk.PhotoImage(image)\n self.main.itemconfig(self.currentImage, image=image)\n\n del self.imageList[0]\n \n return 0\n\n def makeSquare(self, x1, y1, x2, y2):\n width = x2 - x1\n height = y2 - y1\n \n if width > height:\n y2 = y1 + width\n elif height > width:\n x2 = x1 + height\n \n return x1, y1, x2, y2\n\n def cropImage(self):\n\n _, img = os.path.split(self.imageFile)\n name, ext = os.path.splitext(img)\n \n for count, tree in enumerate(self.cropLocations):\n # Make a copy of the original image file\n newImageFile = self.trainingFolder + '/' + name + '-tree ' + str(count+1) + ext\n shutil.copyfile(self.imageFile, newImageFile)\n\n print(newImageFile)\n\n newImage = Image.open(newImageFile)\n\n x1, y1, x2, y2 = self.main.coords(tree[0])\n\n # Rescale cropping locations to match original image file\n x1 = int(round(x1/self.scaleFactor))\n y1 = int(round(y1/self.scaleFactor))\n x2 = int(round(x2/self.scaleFactor))\n y2 = int(round(y2/self.scaleFactor))\n\n cropped_image = newImage.crop((x1, y1, x2, y2))\n\n #cropped_image.show()\n cropped_image.save(newImageFile)\n\n self.main.delete(tree[0])\n\n # Move processed file\n os.replace(self.imageFile, os.path.join(self.processedFolder, img))\n \n\n\nif __name__ == '__main__':\n \n # Set up initial window\n window = tk.Tk()\n \n # Prompy user for selection of folder location and lot identifier\n folderPath, unitName = userInput()\n\n # Check/build folders corresponding to image lot numbers\n folders = createFolders(folderPath, unitName)\n\n # Build main app and begin loop\n app = MainApp(window, folders)\n window.mainloop()","sub_path":"imageProcessing.py","file_name":"imageProcessing.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"136500835","text":"#https://towardsdatascience.com/detecting-facial-features-using-deep-learning-2e23c8660a7a\r\n#prototype facital recognition as a boolean of face present/not present\r\n#https://gist.github.com/EncodeTS/6bbe8cb8bebad7a672f0d872561782d9 #keras implementation of face categorization/recognition\r\n#https://github.com/TadasBaltrusaitis/OpenFace.....not quit yet sure on this one, expression and eye tracking...also only installation instructions for linux\r\n#promising for implementation....https://github.com/informramiz/opencv-face-recognition-python/blob/master/OpenCV-Face-Recognition-Python.py\r\n#https://github.com/opencv/opencv/tree/master/data/haarcascades\r\n#http://truelogic.org/wordpress/2015/12/25/easy-face-detection-using-opencv-and-python/\r\n\r\n#import supporting libraries\r\nimport numpy as np\r\nimport cv2\r\nimport copy\r\n\r\n#load all haarcascade classifiers into independent objects\r\nface_default_cascade = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_frontalface_default.xml')\r\nface_tree_cascade = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_frontalface_alt_tree.xml')\r\nface_alt1_cascade = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_frontalface_alt.xml')\r\nface_alt2_cascade = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_frontalface_alt2.xml')\r\nface_profile_cascade = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_profileface.xml')\r\nface_eyes = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_eye.xml')\r\nface_eyes_glasses = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_eye_tree_eyeglasses.xml')\r\nface_eyes_left = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_lefteye_2splits.xml')\r\nface_eyes_right = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_righteye_2splits.xml')\r\nbody_full = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_fullbody.xml')\r\nbody_upper = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_upperbody.xml')\r\nbody_lower = cv2.CascadeClassifier('D:/carseatAPP/python/cv2files/haarcascade_lowerbody.xml')\r\n\r\n\r\nthis_path = \"D:/carseatAPP/trainingPics/train/baby/\"\r\nthis_name = \"download4.jpg\"\r\nthis_full_path = this_path + this_name\r\nwrite_path = \"D:/carseatAPP/trainingPics/output1/\"\r\nthis_full_output = write_path + this_name\r\n\r\n#def detectbaby(imagePath, haar_classifier):\r\n\r\n\r\n\r\nimg = cv2.imread(this_full_path)\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nimg2 = copy.copy(img)\r\ngray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\r\n\r\nfaces = face_default_cascade.detectMultiScale(gray, 1.25, 6)\r\nfor (x,y,w,h) in faces:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = img[y:y+h, x:x+w]\r\n eyes = face_eyes.detectMultiScale(roi_gray)\r\n for (ex,ey,ew,eh) in eyes:\r\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\r\n \r\ncv2.imwrite(this_full_output, img)\r\ncv2.startWindowThread()\r\ncv2.namedWindow(\"face\")\r\n#cv2.imshow(\"face\", grayscale_image[y:y+w, x:x+h]);cv2.waitKey(0); cv2.destroyAllWindows()\r\ncv2.imshow(\"face\", img);cv2.waitKey(0); cv2.destroyAllWindows()\r\n\r\nfaces2 = face_profile_cascade.detectMultiScale(gray2, 1.25, 6)\r\nfor (x,y,w,h) in faces:\r\n cv2.rectangle(img2,(x,y),(x+w,y+h),(255,0,0),2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = img[y:y+h, x:x+w]\r\n eyes = eye_cascade.detectMultiScale(roi_gray)\r\n for (ex,ey,ew,eh) in eyes:\r\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\r\n \r\ncv2.imwrite(\"/home/amit/Downloads/facedetectprofile.png\", img2)\r\n\r\n\r\n\r\n#MERGE THE TWO LISTS HERE\r\n\r\n\r\n\r\n\r\ndetectbaby(this_path, face_default_cascade)\r\n\r\n","sub_path":"opencv_boundingbox2.py","file_name":"opencv_boundingbox2.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"623275931","text":"# vim: set fileencoding=utf-8 :\nfrom nose.tools import *\nimport unittest\nimport os\nimport sys\nfrom datetime import datetime\n\nfrom helpers import create_osm_file, osmobj, HandlerTestBase, check_repr\n\nimport osmium as o\n\nclass TestLength(HandlerTestBase, unittest.TestCase):\n data = \"\"\"\\\n r2 Mn3@\n r4\n r45 Mw1@fo,r45@4,r45@5\n \"\"\"\n\n class Handler(o.SimpleHandler):\n expected_length = { 2 : 1, 4 : 0, 45 : 3 }\n\n def relation(self, r):\n assert_equals(self.expected_length[r.id], len(r.members))\n\nclass TestMembers(HandlerTestBase, unittest.TestCase):\n data = u\"\"\"r34 Mn23@,n12@foo,w5@.,r34359737784@(ü)\"\"\"\n\n class Handler(o.SimpleHandler):\n\n def relation(self, r):\n m = list(r.members)\n eq_(4, len(m))\n eq_(23, m[0].ref)\n eq_('n', m[0].type)\n eq_('', m[0].role)\n eq_(12, m[1].ref)\n eq_('n', m[1].type)\n eq_('foo', m[1].role)\n eq_(5, m[2].ref)\n eq_('w', m[2].type)\n eq_('.', m[2].role)\n eq_(34359737784, m[3].ref)\n eq_('r', m[3].type)\n eq_(u'(ü)', m[3].role)\n assert_true(check_repr(m))\n","sub_path":"test/test_memberlist.py","file_name":"test_memberlist.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"642371914","text":"#!/usr/bin/env python\n'''\nCreated on Jun 8, 2013\n\n@author: mmartin\n'''\n\nfrom gi.repository import Gtk\nfrom CDMIConstants.constants import(\n ABOUT_DIALOG,\n ABOUT_UI\n)\n\n\nclass CDMIAbout(object):\n\n def __init__(self, session):\n '''\n Display the About dialog\n '''\n self.session = session\n builder = Gtk.Builder()\n builder.add_from_file(ABOUT_UI % self.session.ppath)\n about = builder.get_object(ABOUT_DIALOG)\n about.run()\n about.hide()\n","sub_path":"scripts/cdmi_explorer/CDMIAbout/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443195265","text":"import os\nimport time\nimport random\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\n\nfrom pathlib import Path\n\nfrom ESRNN.utils.config import ModelConfig\nfrom ESRNN.utils.losses import DisaggregatedPinballLoss\nfrom ESRNN.utils.data import Iterator\n\nfrom ESRNN.ESRNN import ESRNN\n\nfrom ESRNN.utils_evaluation import owa\n\nclass ESRNNensemble(object):\n \"\"\" Exponential Smoothing Recursive Neural Network Ensemble.\n n_models=1\n n_top=1\n \"\"\"\n def __init__(self, n_models=1, n_top=1, max_epochs=15, batch_size=1, batch_size_test=128,\n freq_of_test=-1, learning_rate=1e-3, lr_scheduler_step_size=9, lr_decay=0.9,\n per_series_lr_multip=1.0, gradient_eps=1e-8, gradient_clipping_threshold=20,\n rnn_weight_decay=0, noise_std=0.001, level_variability_penalty=80,\n testing_percentile=50, training_percentile=50, ensemble=False, cell_type='LSTM',\n state_hsize=40, dilations=[[1, 2], [4, 8]],\n add_nl_layer=False, seasonality=[4], input_size=4, output_size=8,\n frequency='D', max_periods=20, random_seed=1,\n device='cuda', root_dir='./'):\n super(ESRNNensemble, self).__init__()\n\n self.n_models = n_models\n self.n_top = n_top\n assert n_models>=2, \"Number of models for ensemble should be greater than 1\"\n assert n_top<=n_models, \"Number of top models should be smaller than models to ensemble\"\n self.big_float = 1e6\n self.mc = ModelConfig(max_epochs=max_epochs, batch_size=batch_size, batch_size_test=batch_size_test,\n freq_of_test=freq_of_test, learning_rate=learning_rate,\n lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay,\n per_series_lr_multip=per_series_lr_multip,\n gradient_eps=gradient_eps, gradient_clipping_threshold=gradient_clipping_threshold,\n rnn_weight_decay=rnn_weight_decay, noise_std=noise_std,\n level_variability_penalty=level_variability_penalty,\n testing_percentile=testing_percentile, training_percentile=training_percentile,\n ensemble=ensemble, cell_type=cell_type,\n state_hsize=state_hsize, dilations=dilations, add_nl_layer=add_nl_layer,\n seasonality=seasonality, input_size=input_size, output_size=output_size,\n frequency=frequency, max_periods=max_periods, random_seed=random_seed,\n device=device, root_dir=root_dir)\n self._fitted = False\n\n def fit(self, X_df, y_df, X_test_df=None, y_test_df=None, shuffle=True):\n # Transform long dfs to wide numpy\n assert type(X_df) == pd.core.frame.DataFrame\n assert type(y_df) == pd.core.frame.DataFrame\n assert all([(col in X_df) for col in ['unique_id', 'ds', 'x']])\n assert all([(col in y_df) for col in ['unique_id', 'ds', 'y']])\n\n # Storing dfs for OWA evaluation, initializing min_owa\n self.y_train_df = y_df\n self.X_test_df = X_test_df\n self.y_test_df = y_test_df\n self.min_owa = 4.0\n self.min_epoch = 0\n\n # Exogenous variables\n unique_categories = X_df['x'].unique()\n self.mc.category_to_idx = dict((word, index) for index, word in enumerate(unique_categories))\n self.mc.exogenous_size = len(unique_categories)\n\n self.unique_ids = X_df['unique_id'].unique()\n self.mc.n_series = len(self.unique_ids)\n\n # Set seeds\n torch.manual_seed(self.mc.random_seed)\n np.random.seed(self.mc.random_seed)\n\n # Initial series random assignment to models\n self.series_models_map = np.zeros((self.mc.n_series, self.n_models))\n n_initial_models = int(np.ceil(self.n_models/2))\n for i in range(self.mc.n_series):\n id_models = np.random.choice(self.n_models, n_initial_models)\n self.series_models_map[i,id_models] = 1\n\n self.esrnn_ensemble = []\n for _ in range(self.n_models):\n esrnn = ESRNN(max_epochs=self.mc.max_epochs, batch_size=self.mc.batch_size, batch_size_test=self.mc.batch_size_test,\n freq_of_test=-1, learning_rate=self.mc.learning_rate,\n lr_scheduler_step_size=self.mc.lr_scheduler_step_size, lr_decay=self.mc.lr_decay,\n per_series_lr_multip=self.mc.per_series_lr_multip,\n gradient_eps=self.mc.gradient_eps, gradient_clipping_threshold=self.mc.gradient_clipping_threshold,\n rnn_weight_decay=self.mc.rnn_weight_decay, noise_std=self.mc.noise_std,\n level_variability_penalty=self.mc.level_variability_penalty,\n testing_percentile=self.mc.testing_percentile,\n training_percentile=self.mc.training_percentile, ensemble=self.mc.ensemble,\n cell_type=self.mc.cell_type,\n state_hsize=self.mc.state_hsize, dilations=self.mc.dilations, add_nl_layer=self.mc.add_nl_layer,\n seasonality=self.mc.seasonality, input_size=self.mc.input_size, output_size=self.mc.output_size,\n frequency=self.mc.frequency, max_periods=self.mc.max_periods, random_seed=self.mc.random_seed,\n device=self.mc.device, root_dir=self.mc.root_dir)\n\n # To instantiate _ESRNN object within ESRNN class we need n_series\n esrnn.instantiate_esrnn(self.mc.exogenous_size, self.mc.n_series)\n esrnn._fitted = True\n self.esrnn_ensemble.append(esrnn)\n\n self.X, self.y = esrnn.long_to_wide(X_df, y_df)\n assert len(self.X)==len(self.y)\n assert self.X.shape[1]>=3\n\n # Train model\n self._fitted = True\n self.train()\n\n def train(self):\n # Initial performance matrix\n self.performance_matrix = np.ones((self.mc.n_series, self.n_models)) * self.big_float\n warm_start = False\n train_tau = self.mc.training_percentile/100\n criterion = DisaggregatedPinballLoss(train_tau)\n\n # Train epoch loop\n for epoch in range(self.mc.max_epochs):\n start = time.time()\n\n # Solve degenerate models\n for model_id in range(self.n_models):\n if np.sum(self.series_models_map[:,model_id])==0:\n print('Reassigning random series to model ', model_id)\n n_sample_series= int(self.mc.n_series/2)\n index_series = np.random.choice(self.mc.n_series, n_sample_series, replace=False)\n self.series_models_map[index_series, model_id] = 1\n\n # Model loop\n for model_id, esrnn in enumerate(self.esrnn_ensemble):\n # Train model with subset data\n dataloader = Iterator(mc = self.mc, X=self.X, y=self.y,\n weights=self.series_models_map[:, model_id])\n esrnn.train(dataloader, max_epochs=1, warm_start=warm_start, shuffle=True, verbose=False)\n\n # Compute model performance for each series\n dataloader = Iterator(mc=self.mc, X=self.X, y=self.y)\n per_series_evaluation = esrnn.per_series_evaluation(dataloader, criterion=criterion)\n self.performance_matrix[:, model_id] = per_series_evaluation\n\n # Reassign series to models\n self.series_models_map = np.zeros((self.mc.n_series, self.n_models))\n top_models = np.argpartition(self.performance_matrix, self.n_top)[:, :self.n_top]\n for i in range(self.mc.n_series):\n self.series_models_map[i, top_models[i,:]] = 1\n\n warm_start = True\n\n print(\"========= Epoch {} finished =========\".format(epoch))\n print(\"Training time: {}\".format(round(time.time()-start, 5)))\n self.train_loss = np.einsum('ij,ij->i',self.performance_matrix, self.series_models_map)/self.n_top\n self.train_loss = np.mean(self.train_loss)\n print(\"Training loss ({} prc): {:.5f}\".format(self.mc.training_percentile,\n self.train_loss))\n print('Models num series', np.sum(self.series_models_map, axis=0))\n\n if (epoch % self.mc.freq_of_test == 0) and (self.mc.freq_of_test > 0):\n if self.y_test_df is not None:\n self.evaluate_model_prediction(self.y_train_df, self.X_test_df,\n self.y_test_df, epoch=epoch)\n print('Train finished! \\n')\n\n def predict(self, X_df):\n \"\"\"\n Predictions for all stored time series\n Returns:\n Y_hat_panel : array-like (n_samples, 1).\n Predicted values for models in Family for ids in Panel.\n ds: Corresponding list of date stamps\n unique_id: Corresponding list of unique_id\n \"\"\"\n assert type(X_df) == pd.core.frame.DataFrame\n assert 'unique_id' in X_df\n assert self._fitted, \"Model not fitted yet\"\n\n dataloader = Iterator(mc=self.mc, X=self.X, y=self.y)\n\n output_size = self.mc.output_size\n n_unique_id = len(dataloader.sort_key['unique_id'])\n\n ensemble_y_hat = np.zeros((self.n_models, n_unique_id, output_size))\n\n for model_id, esrnn in enumerate(self.esrnn_ensemble):\n esrnn.esrnn.eval()\n\n # Predict ALL series\n count = 0\n for j in range(dataloader.n_batches):\n batch = dataloader.get_batch()\n batch_size = batch.y.shape[0]\n\n y_hat = esrnn.esrnn.predict(batch)\n\n y_hat = y_hat.data.cpu().numpy()\n\n ensemble_y_hat[model_id, count:count+batch_size, :] = y_hat\n count += batch_size\n\n # Weighted average of prediction for n_top best models per series\n # (n_models x n_unique_id x output_size) (n_unique_id x n_models)\n y_hat = np.einsum('ijk,ji->jk', ensemble_y_hat, self.series_models_map) / self.n_top\n y_hat = y_hat.flatten()\n\n panel_unique_id = pd.Series(dataloader.sort_key['unique_id']).repeat(output_size)\n panel_last_ds = pd.Series(dataloader.X[:, 2]).repeat(output_size)\n\n panel_delta = list(range(1, output_size+1)) * n_unique_id\n panel_delta = pd.to_timedelta(panel_delta, unit=self.mc.frequency)\n panel_ds = panel_last_ds + panel_delta\n\n assert len(panel_ds) == len(y_hat) == len(panel_unique_id)\n\n Y_hat_panel_dict = {'unique_id': panel_unique_id,\n 'ds': panel_ds,\n 'y_hat': y_hat}\n\n Y_hat_panel = pd.DataFrame.from_dict(Y_hat_panel_dict)\n\n if 'ds' in X_df:\n Y_hat_panel = X_df.merge(Y_hat_panel, on=['unique_id', 'ds'], how='left')\n else:\n Y_hat_panel = X_df.merge(Y_hat_panel, on=['unique_id'], how='left')\n\n return Y_hat_panel\n\n def evaluate_model_prediction(self, y_train_df, X_test_df, y_test_df, epoch=None):\n \"\"\"\n y_train_df: pandas df\n panel with columns unique_id, ds, y\n X_test_df: pandas df\n panel with columns unique_id, ds, x\n y_test_df: pandas df\n panel with columns unique_id, ds, y, y_hat_naive2\n model: python class\n python class with predict method\n \"\"\"\n assert self._fitted, \"Model not fitted yet\"\n\n y_panel = y_test_df.filter(['unique_id', 'ds', 'y'])\n y_naive2_panel = y_test_df.filter(['unique_id', 'ds', 'y_hat_naive2'])\n y_naive2_panel.rename(columns={'y_hat_naive2': 'y_hat'}, inplace=True)\n y_hat_panel = self.predict(X_test_df)\n y_insample = y_train_df.filter(['unique_id', 'ds', 'y'])\n\n model_owa, model_mase, model_smape = owa(y_panel, y_hat_panel,\n y_naive2_panel, y_insample,\n seasonality=self.mc.naive_seasonality)\n\n if self.min_owa > model_owa:\n self.min_owa = model_owa\n if epoch is not None:\n self.min_epoch = epoch\n\n print('OWA: {} '.format(np.round(model_owa, 3)))\n print('SMAPE: {} '.format(np.round(model_smape, 3)))\n print('MASE: {} '.format(np.round(model_mase, 3)))\n\n return model_owa, model_mase, model_smape\n","sub_path":"ESRNN/ESRNNensemble.py","file_name":"ESRNNensemble.py","file_ext":"py","file_size_in_byte":11653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"456417418","text":"import json\nimport pymysql\nimport urllib\nfrom decimal import *\nfrom datetime import datetime\nfrom Condition import Condition\nfrom JSONBuilder import JSONBuilder\n\n#This lambda functions receives a scenario Id and then connects to several different sources to then evaluate the\n#points and data associated with that scenario\ndef lambda_handler(event, context):\n \n scenarioId = event['id']\n \n ListOfSnapshots = []\n \n conn = pymysql.connect(host='test1.ce8cn9mhhgds.us-east-1.rds.amazonaws.com', user='Wallen', passwd='MyRDSdb1', db='whattodo')\n myCursor = conn.cursor()\n \n myCursor.execute(\"\"\"SELECT ConditionFiveName, ConditionFourName, ConditionThreeName, ConditionTwoName, ConditionOneName\n FROM Scenario WHERE Id = %s\"\"\", (scenarioId))\n tplConditions = myCursor.fetchall()\n lstConditions = list(tplConditions[0])\n \n \n myCursor.execute(\"\"\"SELECT ConditionFiveType, ConditionFourType, ConditionThreeType, ConditionTwoType, ConditionOneType\n FROM Scenario WHERE Id = %s\"\"\", (scenarioId))\n tplConTypes = myCursor.fetchall()\n \n myCursor.execute(\"\"\"SELECT ConditionFivePreference, ConditionFourPreference, ConditionThreePreference, ConditionTwoPreference, ConditionOnePreference\n FROM Scenario WHERE Id = %s\"\"\", (scenarioId))\n tplPref = myCursor.fetchall()\n \n \n myCursor.execute(\"\"\"SELECT ConditionFiveRank, ConditionFourRank, ConditionThreeRank, ConditionTwoRank, ConditionOneRank\n FROM Scenario WHERE Id = %s\"\"\", (scenarioId))\n tplConRanks = myCursor.fetchall()\n \n \n lstCurrentStatus = []\n lstConEvalTypes = []\n \n \n w = 0\n for i in tplConditions[0]:\n if (i == None):\n continue\n \n else: \n myCursor.execute(\"\"\"SELECT EvaluationType FROM \"\"\" + tplConTypes[0][w] + \"\"\" WHERE `Name` = %s\"\"\", (i))\n myEvalItems = myCursor.fetchall()\n \n #print(\"myitems: \" + str(myItems))\n \n lstConEvalTypes.append(myEvalItems[0][0])\n \n w = w + 1\n \n \"\"\"\n print('Names:')\n print(tplConditions)\n print('---------')\n print('Preferences:')\n print(tplPref)\n print('---------')\n print('Types:')\n print(tplConTypes)\n print('---------')\n print('Rank')\n print(tplConRanks)\n print('---------')\n \"\"\"\n x = 0\n while x < 40:\n \n SnapList = []\n z = 0\n for i in tplConditions[0]:\n if (i == None):\n z = z + 1\n continue\n \n else:\n myCursor.execute(\"\"\"SELECT CurrentStatus FROM \"\"\" + tplConTypes[0][z] + \"\"\" WHERE `Name` = %s AND TimeGroupId = %s\"\"\", (i, x))\n myCurrentStatuses = myCursor.fetchall()\n \n myCursor.execute(\"\"\"SELECT TimeFrame FROM \"\"\" + tplConTypes[0][z] + \"\"\" WHERE `Name` = %s AND TimeGroupId = %s\"\"\", (i, x))\n myTimeFrames = myCursor.fetchall()\n \n myCursor.execute(\"\"\"SELECT ConditionDateTime FROM \"\"\" + tplConTypes[0][z] + \"\"\" WHERE `Name` = %s AND TimeGroupId = %s\"\"\", (i, x))\n myConditionDateTimes = myCursor.fetchall()\n \n #print('inner loop for each condition: ' + str(z))\n \n #print(tplConditions[0][z])\n #print(tplConTypes[0][z])\n #print(tplPref[0][z])\n #print('Current Status')\n #print(myCurrentStatuses[0][0])\n #print(lstConEvalTypes[z])\n #print(tplConRanks[0][z])\n #print(myTimeFrames[0][0])\n \n CurrentCondition = Condition(tplConditions[0][z], tplConTypes[0][z], tplPref[0][z],\n myCurrentStatuses[0][0], lstConEvalTypes[z], tplConRanks[0][z],\n myTimeFrames[0][0], myConditionDateTimes[0][0])\n \"\"\"\n print('------')\n print(CurrentCondition.Name)\n print(CurrentCondition.Type)\n print(CurrentCondition.Preference)\n print(CurrentCondition.CurrentStatus)\n print(CurrentCondition.Rank)\n print(CurrentCondition.TimeFrame)\n print(CurrentCondition.EvalType)\n print(CurrentCondition.ConditionDateTime)\n \"\"\"\n \n SnapList.append(CurrentCondition)\n \n z = z + 1\n \n \n ListOfSnapshots.append(SnapList)\n x = x + 1\n \n JSONInit = {}\n keyValueList = []\n \n \n #print(ListOfSnapshots)\n #print(ListOfSnapshots[0][1].Preference) #The List of list of condition objects is being correctly generated\n \n \n #print(\"List of Snapshots[4][0].Evaluate(): \")\n #print(ListOfSnapshots[4][0])\n #print(ListOfSnapshots[23][2].Evaluate())\n #print(ListOfSnapshots[4][3].Evaluate())\n \n h = 0\n for c in ListOfSnapshots: \n \n #print(h)\n \n #print('type of object in c')\n #print(c)\n #print(type(c[0].Name))\n \n PointSum = 0\n \n Values = {}\n Preferences = {}\n Ranks = {}\n \n \n d = 0\n while (d < len(c)):\n #print(\"Does this look to be lining up: \")\n #print(\"Preference: \" + c[d].Preference)\n #print(\"Current Status: \" + c[d].CurrentStatus)\n \n namelist = len(c)\n \n \n Values.update({c[d].Name : c[d].CurrentStatus})\n Preferences.update({c[d].Name : c[d].Preference})\n Ranks.update({c[d].Name : c[d].Rank})\n #print(Values)\n \n \n \n currentpoints = c[d].Evaluate()\n #print(\"current points: \" + str(currentpoints))\n \n PointSum = PointSum + currentpoints\n d = d + 1\n \n \n #print('PointSum = ' + str(PointSum))\n \n #print(c[4])\n \n #points = c[h].Evaluate()\n #print('points: ' + str(points))\n \n #TODO: APPEND ALL CURRENT VALUES TO THE END OF VALUES\n \n keyValueList.append({\"TimeGroupId\": h,\"Scenario Snap-Shot Points\": PointSum, \"TimeFrame\": ListOfSnapshots[h][0].TimeFrame, \"Condition Time\":\n str(ListOfSnapshots[h][0].ConditionDateTime), \"Status Values\": Values, \"Status Preferences\" : Preferences, \"Status Ranks\" : Ranks})\n \n h = h + 1\n \n \n print(keyValueList)\n \n return(keyValueList)\n #print(ListOfSnapshots)\n\n\"\"\"\nevent = {\n \"id\": 1\n}\ncontext = 'c'\n\nx = lambda_handler(event, context)\n\"\"\"","sub_path":"WhatDoCompleteEvalScenario.py","file_name":"WhatDoCompleteEvalScenario.py","file_ext":"py","file_size_in_byte":6847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"8740359","text":"#\n# mutates a sequence according to the Jukes-Cantor model\n# X is an numpy.array with entries in {0,1,2,3}\n# t is branch length\n# mu is mutation rate\n#\n\n\ndef mutate(X, t, mu):\n import numpy.random as rand\n L = len(X)\n mutatedSeq = X.copy()\n\n numMutation = rand.poisson(L * mu * t)\n for i in range(numMutation):\n site = rand.randint(0, L)\n mutatedSeq[site] = rand.randint(0, 4)\n\n return mutatedSeq\n","sub_path":"A4-HMMsAndTrees/mutate.py","file_name":"mutate.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297134749","text":"import os\nimport sys\nfrom urllib.request import urlopen\nfrom functools import cmp_to_key\n\nfrom datetime import date, datetime\n\n#TODO: replace the following definitions with values from the Setting table or from the .ini file\nfrom .properties import (\n PROJECT_TITLE,\n FINAL_ID,\n FINAL_DEADLINE,\n ADMINS,\n GROUP_IDS \n )\n\nfrom paginate import Page\ntry:\n from webhelpers.paginate import PageURL_WebOb as PageURL\nexcept: \n\n from urllib.parse import urlencode\n\n # Extracted of former webhelpers 1.3 and adapted for Python3\n def make_page_url(path, params, page, partial=False, sort=True):\n \"\"\"A helper function for URL generators.\n\n I assemble a URL from its parts. I assume that a link to a certain page is\n done by overriding the 'page' query parameter.\n\n ``path`` is the current URL path, with or without a \"scheme://host\" prefix.\n\n ``params`` is the current query parameters as a dict or dict-like object.\n\n ``page`` is the target page number.\n\n If ``partial`` is true, set query param 'partial=1'. This is to for AJAX\n calls requesting a partial page.\n\n If ``sort`` is true (default), the parameters will be sorted. Otherwise\n they'll be in whatever order the dict iterates them.\n \"\"\"\n params = params.copy()\n params[\"page\"] = page\n if partial:\n params[\"partial\"] = \"1\"\n if sort:\n params = sorted(params.items())\n qs = urlencode(params, True)\n return \"%s?%s\" % (path, qs)\n\n class PageURL(object):\n \"\"\"A page URL generator for WebOb-compatible Request objects.\n \n I derive new URLs based on the current URL but overriding the 'page'\n query parameter.\n\n I'm suitable for Pyramid, Pylons, and TurboGears, as well as any other\n framework whose Request object has 'application_url', 'path', and 'GET'\n attributes that behave the same way as ``webob.Request``'s.\n \"\"\"\n \n def __init__(self, request, qualified=False):\n \"\"\"\n ``request`` is a WebOb-compatible ``Request`` object.\n\n If ``qualified`` is false (default), generated URLs will have just the\n path and query string. If true, the \"scheme://host\" prefix will be\n included. The default is false to match traditional usage, and to avoid\n generating unuseable URLs behind reverse proxies (e.g., Apache's\n mod_proxy). \n \"\"\"\n self.request = request\n self.qualified = qualified\n\n def __call__(self, page, partial=False):\n \"\"\"Generate a URL for the specified page.\"\"\"\n if self.qualified:\n path = self.request.application_url\n else:\n path = self.request.path\n return make_page_url(path, self.request.GET, page, partial)\n \n\nfrom pyramid.response import Response\n\nfrom pyramid.view import (\n view_config, \n forbidden_view_config, \n notfound_view_config\n )\nfrom pyramid.renderers import render\nfrom pyramid.url import route_url\n\nfrom pyramid.security import (\n remember, \n forget\n )\n\nfrom pyramid.httpexceptions import (\n HTTPFound,\n HTTPNotFound\n )\n\nimport formencode\nfrom pyramid_simpleform import Form\nfrom pyramid_simpleform.renderers import FormRenderer\n\nfrom sqlalchemy.schema import MetaData\nfrom sqlalchemy.exc import DBAPIError\nfrom sqlalchemy.ext.serializer import (\n dumps as dump_table,\n loads as load_table\n )\n\nfrom .models import (\n DBSession,\n Setting,\n Player,\n Rank,\n Category,\n Team,\n TeamGroup,\n Match,\n Final,\n Tip\n )\n\nfrom . import scoring\n\n\n# determine the local IP address to access this game\nremote_server = Setting.get('result_server')\nRESULTSERVER = remote_server.d_value if remote_server else 'wm2018.rolotec.ch'\nRESULTPAGE = 'http://%s/results' % RESULTSERVER\nlocal_host = 'localhost'\nlocal_port = 8080 #TODO: extract port number from the server settings\ntry:\n from socket import create_connection\n s = create_connection((RESULTSERVER, 80))\n local_host = s.getsockname()[0]\n s.close()\nexcept:\n\tpass\nGAME_URL = 'http://%s:%d' % (local_host, local_port)\n\n\ndef items_per_page(request):\n \"\"\" Determine the pagination unit. This unit is determined\n as follows (in decreasing precedence):\n - from the request parameter 'items_per_page'\n - from the Setting table's entry named 'items_per_page'\n If none of the above matches or cannot be converted to\n an integer the default of 10 is returned.\n @return Number of items per page, default 10.\n \"\"\"\n try:\n # expect exception, if param is missing or has a non-numeric value\n return int(request.params['items_per_page'])\n except: \n try:\n setting = Setting.get('items_per_page')\n return int(setting.d_value)\n except:\n pass\n else:\n pass\n return 10\n\ndef get_int_param(request, param, default=None):\n \"\"\" @return Numerical value of named parameter. \"\"\"\n try:\n return int(request.params[param])\n except:\n return default\n\ndef game_over():\n return datetime.now() >= datetime(2018,7,16)\n\ndef login_form_view(request):\n return render('templates/login.pt',\n { 'loggedin': request.authenticated_userid },\n request)\n\ndef navigation_view(request):\n return render('templates/navigation.pt',\n { 'categories': sorted(Player.get_units()),\n 'game_over': game_over(),\n 'is_admin': request.authenticated_userid in ADMINS,\n 'viewer_username': request.authenticated_userid,\n 'login_form': login_form_view(request) },\n request) if 'nonav' not in request.params else None\n\n@forbidden_view_config()\ndef forbidden(request):\n return Response(body=render('templates/forbidden.pt',\n { 'project': PROJECT_TITLE,\n 'navigation': navigation_view(request) },\n request));\n\n@notfound_view_config()\ndef notfound(request):\n return Response(body=render('templates/notfound.pt',\n { 'project': PROJECT_TITLE,\n 'detail': request.exception.detail if request.exception else \"no details\",\n 'navigation': navigation_view(request) },\n request),\n status='404 Not Found');\n\n@view_config(permission='view', route_name='home', renderer='templates/main.pt')\ndef view_game(request):\n return { 'project': PROJECT_TITLE,\n 'game_url': GAME_URL,\n 'final_deadline': FINAL_DEADLINE,\n 'game_over': game_over(),\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='about', renderer='templates/about.pt')\ndef about_view(request):\n return { 'project': PROJECT_TITLE,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='help', renderer='templates/help.pt')\ndef help_view(request):\n return { 'project': PROJECT_TITLE,\n 'contact': Setting.get('admin_mail').d_value,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='infoscreen', renderer='templates/infoscreen.pt')\ndef infoscreen(request):\n return { 'project': PROJECT_TITLE,\n 'game_url': GAME_URL,\n 'final_deadline': FINAL_DEADLINE,\n 'viewer_username': None,\n 'navigation': None,\n 'params': request.params }\n\n@view_config(permission='view', route_name='results', renderer='json')\ndef results(request):\n \"\"\" Generate a list of scores for all played matches and the stage 2 team names. \"\"\"\n matches = {}\n for match in Match.get_stage2():\n matches[match.d_id] = { \"team1\": match.d_team1, \"team2\": match.d_team2 }\n scores = {}\n for match in Match.get_played():\n scores[match.d_id] = { \"score1\": match.d_score1, \"score2\": match.d_score2 }\n return { 'matches': matches,\n 'scores': scores }\n\n@view_config(permission='view', route_name='scoring', renderer='templates/scoring.pt')\ndef scoring_view(request):\n return { 'project': PROJECT_TITLE,\n 'num_matches': DBSession.query(Match).count(),\n 'scoring': scoring.BET_POINTS,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='score_table', renderer='templates/score_table.pt')\ndef score_table(request):\n match_scores = [(score1, score2) for score1 in range(0, 6) for score2 in range(score1, 6)]\n matches = [Match(0, datetime.now(), 'team1', 'team2', score1, score2) for (score1, score2) in match_scores]\n tip_scores = [(score1, score2) for score1 in range(0, 6) for score2 in range(0, 6)]\n tips = [Tip('none', 0, score1, score2) for (score1, score2) in tip_scores]\n match_tips = [scoring.MatchTip(match, tip) for match in matches for tip in tips]\n return { 'match_tips': match_tips,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='categories', renderer='templates/categories.pt')\ndef view_categories(request):\n return { 'project': PROJECT_TITLE,\n 'categories': Category.get_all(),\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='settings', renderer='templates/settings.pt')\ndef view_settings(request):\n return { 'project': PROJECT_TITLE,\n 'settings': Setting.get_all(),\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='too_late', renderer='templates/too_late.pt')\ndef too_late(request):\n return { 'final_deadline': FINAL_DEADLINE,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n\n# ----- Player views -----\n\nclass RegistrationSchema(formencode.Schema):\n allow_extra_fields = True\n alias = formencode.validators.String(not_empty=True, max=30)\n name = formencode.validators.String(not_empty=True)\n mail = formencode.validators.Email(resolve_domain=False, not_empty=True)\n #category = formencode.validators.OneOf(categories, hideList=True)\n initial_password = formencode.validators.String(not_empty=True, min=5)\n confirm_password = formencode.validators.String(not_empty=True, min=5)\n chained_validators = [\n formencode.validators.FieldsMatch('initial_password', 'confirm_password')\n #TODO: uniqueUsername(alias)\n ]\n\n@view_config(permission='view', route_name='register', renderer='templates/register.pt')\ndef register(request):\n form = Form(request, schema=RegistrationSchema)\n if 'form.submitted' in request.POST and form.validate():\n alias = form.data['alias']\n if (Player.exists(alias)):\n request.session.flash('Alias \"%(alias)s\" is already used, please choose another one.' % form.data)\n else:\n player = Player(alias=alias,\n password=form.data['initial_password'],\n name=form.data['name'],\n mail=form.data['mail'],\n unit=form.data['category'])\n DBSession.add(player)\n headers = remember(request, alias)\n return HTTPFound(location=route_url('home', request), headers=headers)\n return { 'form': FormRenderer(form),\n 'categories': Category.option_list(),\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='login')\ndef login(request):\n main_view = route_url('home', request)\n came_from = request.params.get('came_from', main_view)\n if 'form.submitted' in request.POST:\n login = request.POST['alias']\n password = request.POST['password']\n if Player.check_password(login, password):\n request.session.flash('Logged in successfully.')\n return HTTPFound(location=came_from, headers=remember(request, login))\n else:\n request.session.flash('Failed to login.')\n return HTTPFound(location=came_from)\n\n@view_config(permission='post', route_name='logout')\ndef logout(request):\n request.session.invalidate()\n request.session.flash('Logged out successfully.')\n return HTTPFound(location=route_url('home', request), headers=forget(request))\n\n@view_config(permission='view', route_name='view_players', renderer='templates/players.pt')\ndef view_players(request):\n ranking = Player.ranking()\n if not ranking:\n raise HTTPNotFound('no players yet')\n # Calculate every player's rank. Only the first player of each\n # rank gets a rank number, for all others it is set to None.\n rank = 1\n points = None\n for player in ranking:\n if points is None or player.d_points != points:\n player.rank = rank\n points = player.d_points\n rank += 1\n #for player in ranking:\n # print'player %s with %d points = rank %s' % (player.d_alias, player.d_points, str(player.rank))\n page = get_int_param(request, param='page', default=1)\n players = Page(ranking, page=page, url_maker=PageURL(request), items_per_page=items_per_page(request))\n return { 'players': players,\n 'viewer_username': request.authenticated_userid,\n 'is_admin': request.authenticated_userid in ADMINS,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='view', route_name='view_group_players', renderer='templates/group_players.pt')\ndef view_group_players(request):\n category_id = request.matchdict['category']\n players = Player.get_by_unit(category_id)\n if not players:\n raise HTTPNotFound('no players in category %s' % category_id)\n page = get_int_param(request, param='page', default=1)\n players = Page(players, page=page, url_maker=PageURL(request), items_per_page=items_per_page(request))\n category = Category.get(category_id)\n category_name = category.d_name if category else category_id\n return { 'category': category_id,\n 'category_name': category_name,\n 'players': players,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='view', route_name='view_rank_players', renderer='templates/rank_players.pt')\ndef view_rank_players(request):\n points = request.matchdict['points']\n players = Player.get_by_rank(points)\n if not players:\n raise HTTPNotFound('no players with %s points' % points)\n page = get_int_param(request, param='page', default=1)\n players = Page(players, page=page, url_maker=PageURL(request), items_per_page=items_per_page(request))\n return { 'points': points,\n 'players': players,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n\ngroupScore = lambda grp: float(grp[3]) / grp[2]\n\n@view_config(permission='view', route_name='view_player_groups', renderer='templates/player_groups.pt')\ndef view_player_groups(request):\n groups = Player.get_groups()\n if not groups:\n raise HTTPNotFound('no player groups yet')\n # sort categories by descending average number of points\n ranking = []\n rank = 1\n points = None\n for group in sorted(groups, key=groupScore, reverse=True):\n gid = group[1]\n cgroup = Category.get(group[1])\n category = Category(gid, cgroup.d_name if cgroup is not None else gid)\n category.players = int(group[2])\n category.points = float(group[3]) / category.players\n if points is None or category.points != points:\n category.rank = rank\n points = category.points\n ranking.append(category)\n rank += 1\n page = get_int_param(request, param='page', default=1)\n categories = Page(ranking, page=page, url_maker=PageURL(request), items_per_page=items_per_page(request))\n return { 'categories': categories,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='view', route_name='view_ranking', renderer='templates/ranking.pt')\ndef view_ranking(request):\n ranks = Rank.get_all()\n if not ranks:\n raise HTTPNotFound('no ranking yet')\n page = get_int_param(request, param='page', default=1)\n ranks = Page(ranks, page=page, url_maker=PageURL(request), items_per_page=items_per_page(request))\n player = Player.get_by_username(request.authenticated_userid)\n player_rank = Rank.get_position(player.d_points) if player else None\n return { 'ranks': ranks,\n 'player_rank': player_rank.d_position if player_rank else None,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\nclass PlayerInfo(formencode.Schema):\n allow_extra_fields = True\n name = formencode.validators.String(not_empty=True)\n mail = formencode.validators.Email(resolve_domain=False, not_empty=True)\n\n@view_config(permission='post', route_name='player_info', renderer='templates/player_info.pt')\ndef view_player_info(request):\n player = Player.get_by_username(request.authenticated_userid)\n form = Form(request, schema=PlayerInfo, obj=player)\n if 'form.submitted' in request.POST and form.validate():\n player.d_name = form.data['name']\n player.d_mail = form.data['mail']\n player.d_unit = form.data['category']\n request.session.flash('Player information has been updated.')\n player_rank = Rank.get_position(player.d_points) if player else None\n return { 'form': FormRenderer(form),\n 'player': player,\n 'player_rank': player_rank.d_position if player_rank else None,\n 'viewer_username': request.authenticated_userid,\n 'categories': Category.option_list(),\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n\n# ----- Team/Group views -----\n\n@view_config(permission='view', route_name='view_teams', renderer='templates/teams.pt')\ndef view_teams(request):\n \"\"\" Alphabetical team list. \"\"\"\n return { 'teams': Team.get_all(),\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='view_team_groups', renderer='templates/team_groups.pt')\ndef view_team_groups(request):\n \"\"\" Show all teams of all groups. \"\"\"\n groups = [TeamGroup(group_id, Team.get_by_group(group_id)) for group_id in GROUP_IDS]\n return { 'groups': groups,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='view', route_name='view_group_teams', renderer='templates/team_groups.pt')\ndef view_group_teams(request):\n \"\"\" Show the teams of a single group. \"\"\"\n group_id = request.matchdict['group']\n if group_id not in GROUP_IDS:\n raise HTTPNotFound('invalid group id: %s' % group_id)\n groups = [TeamGroup(group_id, Team.get_by_group(group_id))]\n return { 'groups': groups,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n\n# ----- Match views -----\n\ndef match_view(request, player, matches, title, group_id=None):\n for match in matches:\n if player:\n if match.d_id == FINAL_ID:\n final_tip = Final.get_player_tip(player)\n match.tip = Tip(player, FINAL_ID, final_tip.d_score1, final_tip.d_score2) if final_tip else None\n else:\n match.tip = Tip.get_player_tip(player, match.d_id)\n else:\n match.tip = None\n return { 'now': datetime.now(),\n 'title': title,\n 'matches': matches,\n 'group_id': group_id,\n 'final_id': FINAL_ID,\n 'final_deadline': FINAL_DEADLINE,\n 'viewer_username': player,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='view', route_name='view_matches', renderer='templates/matches.pt', http_cache=0)\ndef view_matches(request):\n player = request.authenticated_userid\n matches = Match.get_all()\n return match_view(request, player, matches, 'Match schedule')\n\n@view_config(permission='view', route_name='view_upcoming_matches', renderer='templates/matches.pt', http_cache=0)\ndef view_upcoming_matches(request):\n player = request.authenticated_userid\n num = request.matchdict['num']\n matches = Match.get_upcoming(date.today(), num)\n return match_view(request, player, matches, 'Upcoming matches')\n\n@view_config(permission='view', route_name='view_group_matches', renderer='templates/matches.pt', http_cache=0)\ndef view_group_matches(request):\n player = request.authenticated_userid\n group_id = request.matchdict['group']\n matches = Match.get_by_group(group_id).all()\n return match_view(request, player, matches, 'Group %s matches' % group_id, group_id)\n\n@view_config(permission='view', route_name='view_stage1_matches', renderer='templates/matches.pt', http_cache=0)\ndef view_stage1_matches(request):\n player = request.authenticated_userid\n matches = Match.get_stage1().all()\n return match_view(request, player, matches, 'Stage 1 matches')\n\n@view_config(permission='view', route_name='view_stage2_matches', renderer='templates/matches.pt', http_cache=0)\ndef view_stage2_matches(request):\n player = request.authenticated_userid\n matches = Match.get_stage2().all()\n return match_view(request, player, matches, 'Stage 2 matches')\n\n\n# ----- Tip views -----\n\nclass MatchBetSchema(formencode.Schema):\n allow_extra_fields = True\n d_score1 = formencode.validators.Int(min=0, max=100, not_empty=True)\n d_score2 = formencode.validators.Int(min=0, max=100, not_empty=True)\n\n@view_config(permission='post', route_name='match_bet', renderer='templates/match_bet.pt')\ndef match_bet(request):\n player_id = request.authenticated_userid\n match_id = request.matchdict['match']\n match = Match.get_by_id(match_id)\n if match.d_begin < datetime.now():\n return HTTPFound(location=route_url('too_late', request))\n\n tip = Tip.get_player_tip(player_id, match_id)\n\n form = Form(request, schema=MatchBetSchema, obj=tip)\n if 'form.submitted' in request.POST and form.validate():\n if not tip:\n tip = Tip(player=player_id, match=match_id)\n DBSession.add(tip)\n tip.d_score1 = form.data['d_score1']\n tip.d_score2 = form.data['d_score2']\n return HTTPFound(location=route_url('view_match_tips', request, match=match_id))\n\n return { 'match': match,\n 'tip': tip,\n 'form': FormRenderer(form),\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='view_match_tips', renderer='templates/match_tips.pt', http_cache=0)\ndef view_match_tips(request):\n match_id = request.matchdict['match']\n match = Match.get_by_id(match_id)\n match_tips = [scoring.MatchTip(match, tip) for tip in Tip.get_match_tips(match_id)]\n page = get_int_param(request, param='page', default=1)\n tips = Page(match_tips, page=page, url_maker=PageURL(request), items_per_page=items_per_page(request))\n return { 'match': match,\n 'tips': tips,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='view', route_name='view_player_tips', renderer='templates/player_tips.pt', http_cache=0)\ndef view_player_tips(request):\n player_id = request.matchdict['player']\n player = Player.get_by_username(player_id)\n tips = []\n for tip in Tip.get_player_tips(player_id):\n match = Match.get_by_id(tip.d_match)\n tips.append(scoring.MatchTip(match, tip))\n final = Match.get_final()\n final_tip = Final.get_player_tip(player_id)\n if final and final_tip:\n tips.append(scoring.FinalTip(final, final_tip))\n return { 'player': player,\n 'tips': tips,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n\n# ----- Final views -----\n\nclass FinalBetSchema(formencode.Schema):\n allow_extra_fields = True\n d_team1 = formencode.validators.String(not_empty=True)\n d_team2 = formencode.validators.String(not_empty=True)\n d_score1 = formencode.validators.Int(min=0, not_empty=True)\n d_score2 = formencode.validators.Int(min=0, not_empty=True)\n\n@view_config(permission='post', route_name='final_bet', renderer='templates/final_bet.pt')\ndef final_bet(request):\n player = request.authenticated_userid\n final_tip = Final.get_player_tip(player)\n if final_tip:\n request.session.flash('You already entered a final tip.')\n return HTTPFound(location=route_url('view_final_tip', request, player=player))\n\n final_tip = Final(player)\n\n form = Form(request, schema=FinalBetSchema, obj=final_tip)\n if 'form.submitted' in request.POST and form.validate():\n # verify, that the tip was entered on time\n if FINAL_DEADLINE < datetime.now():\n return HTTPFound(location=route_url('too_late', request))\n final_tip.d_team1 = form.data['d_team1']\n final_tip.d_team2 = form.data['d_team2']\n final_tip.d_score1 = form.data['d_score1']\n final_tip.d_score2 = form.data['d_score2']\n DBSession.add(final_tip)\n return HTTPFound(location=route_url('view_final_tip', request, player=player))\n\n teams = [(team.d_id,team.d_name) for team in Team.get_all()]\n\n return { 'tip': final_tip,\n 'form': FormRenderer(form),\n 'teams': teams,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='view', route_name='view_final_tips', renderer='templates/final_tips.pt', http_cache=0)\ndef view_final_tips(request):\n final = Match.get_final()\n tips = [scoring.FinalTip(final, tip) for tip in Final.get_all()]\n return { 'final': final,\n 'tips': tips,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='view', route_name='view_final_tip', renderer='templates/final_tip.pt', http_cache=0)\ndef view_final_tip(request):\n player = request.matchdict['player']\n tip = Final.get_player_tip(player)\n return { 'tip': tip,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n\n# ----- Admin stuff -----\n\n@view_config(permission='admin', route_name='tips', renderer='templates/tips.pt', http_cache=0)\ndef view_tips(request):\n return { 'tips': Tip.get_all(),\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request) }\n\n@view_config(permission='admin', route_name='update_local')\ndef update_local(request):\n scoring.refresh_points()\n return HTTPFound(location=route_url('view_players', request))\n\n@view_config(permission='view', route_name='update_remote')\ndef update_remote(request):\n try:\n scoring.apply_results(urlopen(RESULTPAGE).read())\n return HTTPFound(location=route_url('view_players', request))\n except:\n raise HTTPNotFound('location <%s> is inaccessible.' % RESULTPAGE)\n\ngroupByCategory = lambda grp: grp[1]\n\n@view_config(permission='admin', route_name='mailing', renderer='templates/mailing.pt')\ndef mailing(request):\n groups = Player.get_groups()\n if not groups:\n raise HTTPNotFound('no player groups yet')\n everybody = []\n categories = {}\n for group in sorted(groups, key=groupByCategory):\n players = Player.get_by_unit(group.d_unit)\n addrs = [player.d_mail for player in players]\n categories[group.d_unit] = \";\".join(addrs)\n everybody.extend(addrs)\n return { 'everybody': \";\".join(everybody),\n 'categories': categories,\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request),\n 'nonav': 'nonav' in request.params }\n\n@view_config(permission='admin', route_name='unregister')\ndef unregister(request):\n alias = request.matchdict['alias']\n player = Player.get_by_username(alias)\n if player:\n DBSession.delete(player)\n request.session.flash('Player \"%(alias)s\" deleted.' % request.matchdict)\n else:\n request.session.flash('Player \"%(alias)s\" not found.' % request.matchdict)\n return HTTPFound(location=route_url('view_players', request))\n\n@view_config(permission='admin', route_name='update_category')\ndef update_category(request):\n try:\n alias = request.matchdict['alias']\n value = request.matchdict['value']\n category = Category.get(alias)\n if value == 'DELETE':\n # delete category unless it is used by some players\n if category:\n players = Player.get_by_unit(alias)\n if players and len(players) > 0:\n request.session.flash('Category \"%(alias)s\" cannot be deleted.' % request.matchdict)\n else:\n DBSession.delete(category)\n request.session.flash('Deleted category \"%(alias)s\".' % request.matchdict)\n else:\n request.session.flash('Category \"%(alias)s\" does not exist.' % request.matchdict)\n else:\n # update/create category\n if category:\n category.d_name = value\n request.session.flash('Updated category \"%(alias)s\".' % request.matchdict)\n else:\n category = Category(alias, value)\n DBSession.add(category)\n request.session.flash('Created category \"%(alias)s\".' % request.matchdict)\n except:\n request.session.flash('Failed to update or create category \"%(alias)s\".' % request.matchdict)\n return HTTPFound(location=route_url('categories', request))\n\n@view_config(permission='admin', route_name='update_match')\ndef update_match(request):\n try:\n match = Match.get_by_id(request.matchdict['id'])\n if match:\n #if match.d_begin < FINAL_DEADLINE: \n # request.session.flash('Cannot update group stage matches.')\n #else:\n match.d_team1 = request.matchdict['team1']\n match.d_team2 = request.matchdict['team2']\n else:\n request.session.flash('Invalid match id: %(id)s.' % request.matchdict)\n return HTTPFound(location=route_url('view_matches', request))\n except:\n request.session.flash('Updating match teams failed.')\n return HTTPFound(location=route_url('view_matches', request))\n\n@view_config(permission='admin', route_name='update_score')\ndef update_score(request):\n try:\n match = Match.get_by_id(request.matchdict['id'])\n if match:\n score1 = int(request.matchdict['score1'])\n match.d_score1 = score1 if score1 >= 0 else None\n score2 = int(request.matchdict['score2'])\n match.d_score2 = score2 if score2 >= 0 else None\n else:\n request.session.flash('Invalid match id: %(id)s.' % request.matchdict)\n return HTTPFound(location=route_url('view_matches', request))\n except:\n request.session.flash('Updating score and points failed.')\n return HTTPFound(location=route_url('view_matches', request))\n\n@view_config(permission='admin', route_name='update_setting')\ndef update_setting(request):\n try:\n name = request.matchdict['name']\n value = request.matchdict['value']\n setting = Setting.get(name)\n if value == 'DELETE':\n if setting:\n if setting.d_name.startswith('scoring_'):\n request.session.flash('Setting \"%(name)s\" cannot be deleted.' % request.matchdict)\n else:\n DBSession.delete(setting)\n request.session.flash('Deleted setting \"%(name)s\".' % request.matchdict)\n else:\n request.session.flash('Setting \"%(name)s\" does not exist.' % request.matchdict)\n else:\n if setting:\n setting.d_value = value\n request.session.flash('Updated setting \"%(name)s\".' % request.matchdict)\n else:\n setting = Setting(name, value)\n DBSession.add(setting)\n request.session.flash('Created setting \"%(name)s\".' % request.matchdict)\n if setting.d_name.startswith('scoring_'):\n scoring.reload_betpoints()\n except:\n request.session.flash('Failed to update or create setting \"%(name)s\".' % request.matchdict)\n return HTTPFound(location=route_url('settings', request))\n\n@view_config(permission='admin', route_name='db_backup')\ndef db_backup(request):\n table = request.matchdict['table']\n if table == 'categories':\n data = dump_table(DBSession.query(Category).all())\n elif table == 'settings':\n data = dump_table(DBSession.query(Setting).all())\n elif table == 'players':\n data = dump_table(DBSession.query(Player).all())\n elif table == 'matches':\n data = dump_table(DBSession.query(Match).all())\n elif table == 'teams':\n data = dump_table(DBSession.query(Team).all())\n elif table == 'tips':\n data = dump_table(DBSession.query(Tip).all())\n elif table == 'final':\n data = dump_table(DBSession.query(Final).all())\n else:\n raise HTTPNotFound('unknown table: %(table)s' % request.matchdict)\n response = Response(headers={ 'mime-type': 'application/octet-stream' }, body=data)\n response.content_length = len(data)\n response.content_disposition = 'attachment;filename=\"%(table)s.dat\"' % request.matchdict\n return response\n\n@view_config(permission='admin', route_name='db_restore', renderer='templates/restore.pt')\ndef db_restore(request):\n if 'form.submitted' in request.POST:\n data = request.POST.get('data')\n if data is not None:\n data = data.file.read()\n #print 'data(content, %d bytes): %s' % (len(data), data)\n if len(data) > 0:\n try:\n query = load_table(data, scoped_session=DBSession)\n for obj in query:\n DBSession.merge(obj)\n request.session.flash('Restore succeeded.')\n return HTTPFound(location=route_url('home', request))\n except:\n request.session.flash('Not a valid backup file.')\n else:\n request.session.flash('Empty backup file.')\n else:\n request.session.flash('Please select a file.')\n form = Form(request)\n return { 'form': FormRenderer(form),\n 'navigation': navigation_view(request) }\n\n@view_config(permission='admin', route_name='system_info', renderer='templates/sysinfo.pt')\ndef system_info(request):\n sysinfo = {\n 'os.name': os.name,\n 'sys.platform': sys.platform,\n 'sys.maxint': sys.maxsize,\n 'sys.maxsize': sys.maxsize\n }\n with open('/proc/version') as f:\n sysinfo['os.version'] = f.read().strip()\n with open('/proc/cpuinfo') as f:\n for line in f:\n info = line.strip().split(': ')\n #print \"cpuinfo: %s (%d)\" % (info, len(info))\n if len(info) > 0 and info[0].strip() != '':\n key = 'cpu.%s' % info[0].strip() \n value = info[1].strip() if len(info) > 1 else '---'\n sysinfo[key] = value\n with open('/proc/meminfo') as f:\n for line in f:\n info = line.strip().split(': ')\n #print \"meminfo: %s (%d)\" % (info, len(info))\n if len(info) > 0 and info[0].strip() != '':\n key = 'mem.%s' % info[0].strip() \n value = info[1].strip() if len(info) > 1 else '---'\n sysinfo[key] = value\n for key,value in list(request.registry.settings.items()):\n sysinfo['ini.%s' % key] = value\n return { 'sysinfo': sorted(sysinfo.items()),\n 'viewer_username': request.authenticated_userid,\n 'navigation': navigation_view(request) }\n","sub_path":"russia2018/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":37011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157990655","text":"#!usr/bin/env python3\n\nfrom datetime import date\nfrom typing import AnyStr, Dict, List, Optional\n\nfrom termcolor import colored, cprint\nfrom xlsxwriter import Workbook\n\n\ndef export_subnets(\n subnets: List[Dict],\n workbook_name: Optional[AnyStr] = \"New-Schema.xlsx\",\n):\n \"\"\"Exports an Excel file of subnetting data\n\n Parameters\n ----------\n subnets : List[Dict]\n List of subnets went througth subnetting\n workbook_name : Optional[AnyStr], optional\n Name of Workbook to create, by default \"New-Schema.xlsx\"\n\n Raises\n ------\n SystemExit\n TypeError, KeyError\n \"\"\"\n\n wb_name, ext = workbook_name.split(\".\")\n excel_fname = f\"{wb_name}_{date.today()}.{ext}\"\n\n # Create an Excel file\n with Workbook(filename=excel_fname) as workbook:\n # Create a sheet within the Excel file\n worksheet = workbook.add_worksheet(name=\"Subnetting Results\")\n # Filters\n worksheet.autofilter(\"A1:L1\")\n # Freeze top row and 2 most left columns\n worksheet.freeze_panes(1, 2)\n\n # Header line in Excel sheet\n header_line = {\n \"A1\": \"VLAN ID\",\n \"B1\": \"VLAN Name\",\n \"C1\": \"CIDR Notation\",\n \"D1\": \"Network Address\",\n \"E1\": \"Prefix Length\",\n \"F1\": \"Broadcast Address\",\n \"G1\": \"Addresses Range\",\n \"H1\": \"IP Helper Address\",\n \"I1\": \"Gateway\",\n \"J1\": \"Subnet Mask\",\n \"K1\": \"Wildcard Mask\",\n \"L1\": \"Max. No. of Usable Hosts\",\n }\n\n # Header line format\n h_frmt = workbook.add_format(\n properties={\n \"bold\": True,\n \"border\": True,\n \"align\": \"center\",\n \"valign\": \"vcenter\",\n }\n )\n\n # Create a header line row\n for cell, value in header_line.items():\n worksheet.write_string(cell, value, cell_format=h_frmt)\n\n # Generic cell format\n c_frmt = workbook.add_format(\n properties={\"border\": True, \"align\": \"center\", \"valign\": \"vcenter\"}\n )\n\n # Format cell containing number\n num_frmt = workbook.add_format(\n properties={\n \"border\": True,\n \"align\": \"center\",\n \"valign\": \"vcenter\",\n \"num_format\": \"#,##0\",\n }\n )\n\n # Initial values for row and column\n row, col = 1, 0\n\n try:\n # Place subnetting data according to header line above\n for subnet in subnets:\n worksheet.write(row, col + 0, \"\", c_frmt) # A\n worksheet.write(row, col + 1, \"\", c_frmt) # B\n worksheet.write(row, col + 2, subnet[\"cidr\"], c_frmt) # C\n worksheet.write(row, col + 3, subnet[\"net_addr\"], c_frmt) # D\n worksheet.write(row, col + 4, f'/{subnet[\"prefix_len\"]}', c_frmt) # E\n worksheet.write(row, col + 5, subnet[\"broadcast_addr\"], c_frmt) # F\n worksheet.write(row, col + 6, subnet[\"range\"], c_frmt) # G\n worksheet.write(row, col + 7, \"\", c_frmt) # H\n worksheet.write(row, col + 8, subnet[\"gateway\"], c_frmt) # I\n worksheet.write(row, col + 9, subnet[\"netmask\"], c_frmt) # J\n worksheet.write(row, col + 10, subnet[\"wildcard\"], c_frmt) # K\n worksheet.write_number(\n row, col + 11, subnet[\"num_hosts\"], num_frmt\n ) # L\n # Jump to next row\n row += 1\n\n except (TypeError, KeyError) as e:\n raise SystemExit(colored(text=f\"export_subnets.py: {e}\", color=\"red\"))\n cprint(text=f\"\\nPlease check {excel_fname} in the PWD.\\n\", color=\"green\")\n","sub_path":"export_subnets.py","file_name":"export_subnets.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634695682","text":"import tensorflow as tf\nimport itertools\nimport os\nimport sklearn as sk\nimport pandas as pd\n\nBOT = 1\nHUMAN = 0\nclass DNNReg:\n\n train_features = None\n label = \"Class\"\n def run(self, train,typ):\n\n self.train_features = list(train)\n feature_cols = []\n for k in self.train_features:\n if k[0] == 'B':\n feature_cols.append(\n tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(key=k, num_buckets=2)))\n else:\n feature_cols.append(tf.feature_column.numeric_column(key=k))\n direc = os.path.dirname(os.path.abspath(__file__))\n regressor = tf.estimator.DNNClassifier(feature_columns=feature_cols,activation_fn=tf.nn.relu, hidden_units=[10, 5],model_dir=direc+'\\\\models\\\\' + typ, n_classes=2)\n y = regressor.predict(input_fn=lambda: self.input_fn(train,pred=True))\n predictions = list(itertools.islice(y, train.shape[0]))\n return predictions[0]['probabilities'][BOT]\n def input_fn(self, data_set, pred=False):\n\n if pred == False:\n feature_cols = {k: tf.constant(data_set[k].values) for k in self.train_features}\n labels = tf.constant(data_set[self.label].values)\n\n return feature_cols, labels\n\n if pred == True:\n feature_cols = {k: tf.constant(data_set[k].values) for k in self.train_features}\n\n return feature_cols","sub_path":"PYTHON/DNNReg.py","file_name":"DNNReg.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"213349784","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\n\"\"\" \nCreates a ResNeXt Model as defined in:\n\nXie, S., Girshick, R., Dollár, P., Tu, Z., & He, K. (2016). \nAggregated residual transformations for deep neural networks. \narXiv preprint arXiv:1611.05431.\n\n\"\"\"\n\n__author__ = \"Pau Rodríguez López, ISELAB, CVC-UAB\"\n__email__ = \"pau.rodri1@gmail.com\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch.autograd import Variable\n\n#import InplaceMul\n'''\nfrom torch.autograd.function import InplaceFunction\n\nclass InplaceMul(InplaceFunction):\n @staticmethod\n def forward(cls, ctx, input, multiplier):\n ctx.mark_dirty(input)\n ctx.multiplier = multiplier\n output = input\n view_size = [1, input.size(1)] + [1] * (len(input.size()) - 2)\n output.mul_(ctx.multiplier.view(view_size).expand_as(output))\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n view_size = [1, grad_output.size(1)] + [1] * (len(grad_output.size()) - 2)\n return grad_output.mul_(ctx.multiplier.view(view_size).expand_as(grad_output))\n'''\n\nclass DropCombine(nn.Module):\n def __init__(self, channels, res_drop = 0., p = 0.):\n super(DropCombine, self).__init__()\n self.p = p\n self.res_drop = res_drop\n self.channels = channels\n self.fix_prob = torch.FloatTensor(1, self.channels).fill_(1-self.res_drop).cuda()\n self.fix_mask = torch.bernoulli(self.fix_prob).cuda()\n self.one_mask = torch.FloatTensor(1, self.channels).fill_(1).cuda()\n self.x_prob = torch.FloatTensor(1, self.channels).fill_(1-self.p).cuda()\n self.x_mask = torch.FloatTensor(1, self.channels).fill_(0).cuda()\n # print(self.p)\n\n def forward(self, res, x):\n view_size = [1, self.channels] + [1] * (len(x.size()) - 2)\n \"\"\"\n if self.training==True:\n # compute the residual of dropout\n if self.p > 0.:\n self.x_mask = torch.bernoulli(self.x_prob) / (1. - self.p) - self.one_mask\n self.x_op = Variable(self.x_mask.view(view_size).expand_as(x)).cuda()\n \n res = res + x * self.x_op\n if self.res_drop > 0.:\n self.fix_op = Variable(self.fix_mask.view(view_size).expand_as(x)).cuda()\n res = res * self.fix_op \n \"\"\"\n self.fix_op = Variable(self.fix_mask.view(view_size), requires_grad=False).cuda()\n #if self.training==False: return res * self.fix_op + x\n self.x_mask = (torch.bernoulli(self.x_prob) / (1. - self.p) - self.one_mask) * self.fix_mask + self.one_mask\n if self.training==False: self.x_mask = self.one_mask\n self.x_op = Variable(self.x_mask.view(view_size), requires_grad=False).cuda()\n return res * self.fix_op + x * self.x_op\n '''\n self.x_mask = (torch.bernoulli(self.x_prob) / (1. - self.p) - self.one_mask) * self.fix_mask + self.one_mask\n #return res.data.mul_(self.fix_mask.view(view_size)) + x.data.mul_(self.x_mask.view(view_size))).cuda()\n return InplaceMul.apply(res,self.fix_mask) + InplaceMul.apply(x,self.x_mask)\n '''\n\nclass SFDropoutLayer(nn.Module):\n def __init__(self, in_planes, p):\n super(SFDropoutLayer, self).__init__()\n assert p < 1.\n self.p = p\n self.in_planes = in_planes\n self.prob_tensor = torch.FloatTensor(1).fill_(1-self.p).expand((self.in_planes)).cuda()\n # print(self.p)\n\n def forward(self, x):\n if self.training==False: return x\n # batch shared dropout mask\n self.mask = torch.bernoulli(self.prob_tensor)\n view_size = [1, self.in_planes] + [1] * (len(x.size()) - 2)\n self.input_mask = Variable((self.mask / (1. - self.p)).view(view_size).expand_as(x)).cuda()\n return x*self.input_mask\n\n\nclass GroupAttDrop(nn.Module):\n def __init__(self, in_planes, cardinality, group_width, is_drop = True):\n super(GroupAttDrop, self).__init__()\n # Select layers\n D = cardinality * group_width\n self.is_drop = is_drop\n self.cardinality = cardinality\n self.group_width = group_width\n self.fc1 = nn.Conv2d(in_planes, D//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear\n self.fc2 = nn.Conv2d(D//16, cardinality, kernel_size=1)\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.expand = ExpandConv(cardinality, group_width)\n #self.mask_tensor = torch.FloatTensor(1).fill_(1-self.p).expand((self.in_planes))\n \n def forward(self, x):\n self.w1 = self.avg_pool(x)\n self.w2 = F.relu(self.fc1(self.w1))\n self.w3 = F.sigmoid(self.fc2(self.w2))\n #if self.is_drop == True:\n # w += Variable(torch.bernoulli(w.data) - w.data)\n #print(w.size())\n #wait = input(\"PRESS ENTER TO CONTINUE.\")\n self.wid = self.expand(self.w3)\n return self.wid\n #if self.training==False: \n\nclass GroupRandDrop(nn.Module):\n def __init__(self, cardinality, group_width, p = 0.5, val = 0.3):\n super(GroupRandDrop, self).__init__()\n assert p < 1.\n self.p = p\n self.val = val\n self.cardinality = cardinality\n self.group_width = group_width\n self.expand = ExpandConv(cardinality, group_width)\n self.prob_tensor = torch.FloatTensor(1,cardinality).fill_(1.-self.p).cuda()\n self.one_tensor = torch.FloatTensor(1,cardinality).fill_(1.).cuda()\n # print(self.p)\n\n def forward(self,x):\n if self.training==False: return x\n # batch shared dropout mask\n self.mask = torch.bernoulli(self.prob_tensor)\n self.mask = self.val * self.mask / (1.- self.p) + (1.-self.val) * self.one_tensor\n self.wid_mask = self.expand(Variable(self.mask, requires_grad=False))\n return x * self.wid_mask \n \nclass ExpandConv(nn.Module):\n def __init__(self, cardinality, group_width):\n super(ExpandConv, self).__init__()\n self.D = cardinality * group_width\n self.cardinality = cardinality\n self.group_width = group_width\n self.one_tensor = Variable(torch.ones(1,1,self.group_width), requires_grad=False).cuda()\n #self.one_tensor = torch.FloatTensor(1,1,self.group_width).fill_(1.).cuda()\n \n def forward(self, x):\n self.wid = torch.matmul(x.view(-1, self.cardinality,1),self.one_tensor.expand(x.size(0),-1,-1))\n self.wid = self.wid.view(-1, self.D, 1, 1)\n return self.wid\n \n\n\nclass DropCombineBottleneck(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride, cardinality, base_width, widen_factor, res_drop = 0., p = 0., preact = False):\n super(DropCombineBottleneck, self).__init__()\n self.layer = ResNeXtBottleneck(in_channels, out_channels, stride, cardinality, base_width, widen_factor, res_drop = 0.05, p = 0.2, preact = preact)\n\n def forward(self, x):\n return self.layer(x) \n \n \nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality, base_width, widen_factor, res_drop = 0., p = 0., preact = False):\n \"\"\" Constructor\n\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n base_width: base number of channels in each group.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n #width_ratio = out_channels / (widen_factor * 64.)\n D = cardinality * base_width\n self.preact = preact\n self.pre_bn = nn.BatchNorm2d(in_channels)\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv',\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0,\n bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n self.combine = DropCombine(out_channels, res_drop, p)\n #self.sfdrop = SFDropoutLayer(out_channels, p)\n #self.channeldrop = nn.Dropout2d(p)\n\n def forward(self, x):\n '''\n #for pre-activation residual\n bottleneck = F.relu(self.pre_bn(x))\n bottleneck = self.conv_reduce(bottleneck)\n bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)\n bottleneck = self.conv_conv(bottleneck)\n bottleneck = F.relu(self.bn(bottleneck), inplace=True)\n bottleneck = self.conv_expand(bottleneck)\n #bottleneck = self.bn_expand(bottleneck)\n #return self.combine(self.shortcut(x), bottleneck)\n #return self.shortcut(x) + self.sfdrop(bottleneck)\n #return self.shortcut(x) + self.channeldrop(bottleneck)\n return self.shortcut(x) + bottleneck\n '''\n if self.preact == True: bottleneck = F.relu(self.pre_bn(x))\n else: bottleneck = x\n bottleneck = self.conv_reduce(bottleneck)\n bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)\n bottleneck = self.conv_conv(bottleneck)\n bottleneck = F.relu(self.bn(bottleneck), inplace=True)\n bottleneck = self.conv_expand(bottleneck)\n if self.preact == False: bottleneck = self.bn_expand(bottleneck) \n #out = self.shortcut(x) + bottleneck\n out = self.combine(self.shortcut(x), bottleneck)\n if self.preact == False: out = F.relu(out, inplace=True)\n return out\n\n \nclass DropNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality, base_width, widen_factor):\n \"\"\" Constructor\n\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n base_width: base number of channels in each group.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(DropNeXtBottleneck, self).__init__()\n #width_ratio = out_channels / (widen_factor * 64.)\n self.group_width = base_width\n D = cardinality * self.group_width \n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.groupdrop = GroupRandDrop(cardinality,self.group_width)\n\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv',\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0,\n bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.groupdrop(bottleneck)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\nclass SENeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality, base_width, widen_factor):\n \"\"\" Constructor\n\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n base_width: base number of channels in each group.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(SENeXtBottleneck, self).__init__()\n #width_ratio = out_channels / (widen_factor * 64.)\n self.cardinality = cardinality\n self.group_width = base_width\n D = self.cardinality * self.group_width\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n # Select layers\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Conv2d(D, D//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear\n self.fc2 = nn.Conv2d(D//16, cardinality, kernel_size=1)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv',\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0,\n bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True) \n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n # Squeeze\n w = self.avg_pool(bottleneck)\n w = F.relu(self.fc1(w))\n w = F.sigmoid(self.fc2(w))\n # Expand\n wid = GroupAttAvg(w,self.cardinality,self.group_width)\n #print(wid.size())\n #print(bottleneck.size())\n #wait = input(\"PRESS ENTER TO CONTINUE.\")\n bottleneck = bottleneck * wid\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True) \n \nclass SelNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality, base_width, widen_factor):\n \"\"\" Constructor\n\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n base_width: base number of channels in each group.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(SelNeXtBottleneck, self).__init__()\n #width_ratio = out_channels / (widen_factor * 64.)\n self.cardinality = cardinality\n self.group_width = base_width\n D = self.cardinality * self.group_width\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.select = GroupAttDrop(self.cardinality * self.group_width, self.cardinality, self.group_width)\n # Select layers\n #self.fc1 = nn.Conv2d(D, D//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear\n #self.fc2 = nn.Conv2d(D//16, cardinality, kernel_size=1)\n #self.avg_pool = nn.AdaptiveAvgPool2d(1)\n \n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv',\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0,\n bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n \n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n # Select groups\n #w = self.avg_pool(bottleneck)\n #w = F.relu(self.fc1(w))\n #w = F.sigmoid(self.fc2(w))\n #mask = GroupAttDrop(w,self.cardinality,self.group_width)\n mask = self.select(bottleneck)\n # compute groups\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n # drop groups\n bottleneck = bottleneck * mask\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True) \n \nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, model, cardinality, depth, nlabels, base_width, widen_factor=4, band_width = 64, preact = False):\n \"\"\" Constructor\n\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n nlabels: number of classes\n base_width: base number of channels in each group.\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.preact = preact\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.base_width = base_width\n self.widen_factor = widen_factor\n self.nlabels = nlabels\n self.output_size = band_width\n self.stages = [64, band_width * self.widen_factor, 2* band_width * self.widen_factor, 4 * band_width * self.widen_factor]\n model_map = {'ResNext': ResNeXtBottleneck,\n 'DropNext': DropNeXtBottleneck,\n 'SENext': SENeXtBottleneck,\n 'DropCombine' : DropCombineBottleneck,\n 'SelNext': SelNeXtBottleneck}\n self.Bottleneck = model_map[model]\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1, 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2, 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 4, 2)\n self.classifier = nn.Linear(self.stages[3], nlabels)\n init.kaiming_normal(self.classifier.weight)\n\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, width_ratio, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n\n Returns: a Module consisting of n sequential bottlenecks.\n\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, self.Bottleneck(in_channels, out_channels, pool_stride, self.cardinality,\n self.base_width * width_ratio, self.widen_factor,preact = self.preact))\n else:\n block.add_module(name_,\n self.Bottleneck(out_channels, out_channels, 1, self.cardinality, self.base_width * width_ratio,\n self.widen_factor,preact = self.preact))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n x = self.stage_3.forward(x)\n x = F.avg_pool2d(x, 8, 1)\n x = x.view(-1, self.stages[3])\n return self.classifier(x)\n","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":22134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"562336448","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sqlite3\nfrom rapidfuzz import process\nimport unicodedata\nimport re\nimport discord.ext.commands\nfrom discord.ext import commands\n\n# Connect to the local sqlite database `rocbot.sqlite` and generate a list of \n# ship names from the `ship` table\ndef get_ships():\n # connect to the sqlite database\n conn = sqlite3.connect('rocbot.sqlite')\n # Return a list of items instead of 1 item tuples \n conn.row_factory = lambda cursor, row: row[0]\n # make an sqlite connection object\n c = conn.cursor()\n # creates a variable and assigns the list of ship names to it\n ship_list = c.execute('''SELECT name FROM ship''').fetchall()\n # close the databse connection\n conn.close()\n # return a list of ship names\n return ship_list\n\n# return the ship name from name_list which is a list of ship names \n# extracted from the databases table called ship\ndef ship_search(find_this):\n # using the class initiated list ship_list find one ship name that \n # matches the given string as close as possible\n found_this = process.extractOne(find_this, get_ships())\n # rapidfuzz returns the name and the ratio so strip the ratio and keep\n # the ship name\n ship_name = found_this[0]\n # return the ship name as a string\n return ship_name\n\n# Connect to the local sqlite database `rocbot.sqlite` and generate a list of \n# invader names from the invaders table\ndef get_invaders():\n # connect to the sqlite database\n conn = sqlite3.connect('rocbot.sqlite')\n # Return a list of items instead of 1 item tuples \n conn.row_factory = lambda cursor, row: row[0]\n # make an sqlite connection object\n c = conn.cursor()\n # creates a variable and assigns the list of ship names to it\n invader_list = c.execute('''SELECT name FROM invaders''').fetchall()\n # close the databse connection\n conn.close()\n # return a list of ship names\n return invader_list\n\ndef invader_search(find_this):\n if find_this != None:\n # using the class initiated list ship_list find one ship name that \n # matches the given string as close as possible\n found_this = process.extractOne(find_this, get_invaders())\n # rapidfuzz returns the name and the ratio so strip the ratio and keep\n # the ship name\n invader_name = found_this[0]\n # return the ship name as a string\n return invader_name\n else:\n pass\n\n# strip all non lete\ndef sanitise_input(input_string):\n # \\W+ matches any non-word character (equal to [^a-zA-Z0-9_])\n # + Quantifier — Matches between one and unlimited times, as many times as \n # possible, giving back as needed (greedy)\n words_only = re.sub(r'\\W+','', str(input_string))\n return unicodedata.normalize('NFKD', words_only).encode('ascii', 'ignore').decode('utf8')\n\ndef customemoji(self, find_this):\n find_sanitised = sanitise_input(find_this.lower())\n return discord.utils.get(self.bot.emojis, name = find_sanitised)\n\ndef embed_pagination(description):\n paginator = commands.Paginator(prefix='', suffix='', max_size=2000)\n for ship_line in description:\n paginator.add_line(ship_line)\n return paginator.pages\n\ndef shortcut_obj(arg1):\n # connect to the sqlite database\n conn = sqlite3.connect('rocbot.sqlite')\n # return a class sqlite3.row object which requires a tuple input query\n conn.row_factory = sqlite3.Row\n # make an sqlite connection object\n c = conn.cursor()\n # using a defined view shortcut collect all table info \n c.execute('select * from shortcut where shortcut =?', (arg1,))\n # return the shortcut object including the required elemnts\n # using shortc instead of sc so not to be confused with \n # sub command abbrehviations \n shortc_obj = c.fetchall()\n # close the databse connection\n conn.close()\n # return the sqlite3.cursor object\n return shortc_obj\n\ndef sql_dmg_brackets():\n # connect to the sqlite database\n conn = sqlite3.connect('rocbot.sqlite')\n # Return a list of items instead of 1 item tuples \n conn.row_factory = lambda cursor, row: row[0]\n # make an sqlite connection object\n c = conn.cursor()\n # creates a variable and assigns the list of ship names to it\n dmg_obj = c.execute('''SELECT amount FROM ship_damage''').fetchall()\n # close the databse connection\n conn.close()\n # return a list of ship names\n return dmg_obj\n\ndef dmg_bracket_list():\n dmg_list = []\n for i in sql_dmg_brackets():\n dmg_list.append(i)\n return dmg_list\n\n\n\n\n33234234234\n\ndef sql_arg_list():\n # connect to the sqlite database\n conn = sqlite3.connect('rocbot.sqlite')\n # Return a list of items instead of 1 item tuples \n conn.row_factory = lambda cursor, row: row[0]\n # make an sqlite connection object\n c = conn.cursor()\n # creates a variable and assigns the list of ship names to it\n dmg_obj = c.execute('''SELECT name FROM shortcut''').fetchall()\n # close the databse connection\n conn.close()\n # return a list of ship names\n return dmg_obj\n\ndef arg_parse_list():\n dmg_list = []\n for i in sql_arg_list():\n dmg_list.append(i)\n return dmg_list\n\ndef argument_parser(sc, arg1):\n clean_arg1 = sanitise_input(arg1)\n if sc == 'dmg':\n dmg_bracket = process.extractOne(clean_arg1, dmg_bracket_list())\n return dmg_bracket[0]\n elif sc == 'rand':\n try:\n int(arg1)\n except ValueError:\n return 10\n except TypeError:\n return 10\n else:\n return arg1\n else:\n if len(clean_arg1) <= 4:\n shortcut = shortcut_obj(clean_arg1.lower())\n if len(shortcut) > 0:\n return shortcut[0]['name']\n else:\n arg_found = process.extractOne(clean_arg1, arg_parse_list())\n return arg_found[0]\n\ndef get_em_colour(arg1):\n embed_colours = {\"Shield Breaker\": 0x3a77f9, \"High Impact\": 0xee4529, \"Armor Piercing\": 0xffb820}\n return embed_colours[arg1]\n \n","sub_path":"res/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"192601663","text":"# Copyright (c) 2020 - present Vitor Oriel \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom .BaseReport import BaseReport\nfrom .reports import *\nfrom ..utils.utils import stringfyList\nfrom ..utils.file_utils import getReports\n\nfrom importlib import import_module\nfrom typing import Dict, Type\n\nclass Report:\n \"\"\"Class that handles with the report operations\"\"\"\n @staticmethod\n def getAvailableReports() -> Dict[str, Type[BaseReport]]:\n \"\"\"Gets the available report formats\n\n @returns Dict[str, Type[BaseReport]]: The dict that contains the available reports\n \"\"\"\n def classCreator(name: str) -> Type[BaseReport]:\n \"\"\"Creates the class type\n\n @type name: str\n @param name: The class name\n @returns Type[BaseReport]: The base report type\n \"\"\"\n Report = import_module(\n f\"fuzzingtool.reports.reports.{name}\",\n package=name\n )\n return getattr(Report, name)\n \n availableReports = {}\n for report in getReports():\n Report = classCreator(report)\n availableReports[Report.__alias__] = Report\n return availableReports\n\n @staticmethod\n def build(name: str) -> BaseReport:\n \"\"\"Build the report\n\n @type name: str\n @param name: The name of the report file\n @returns BaseReport: The report object\n \"\"\"\n if '.' in name:\n reportName, reportType = name.rsplit('.', 1)\n else:\n reportType = name\n reportName = ''\n reportType = reportType.lower()\n availableReports = Report.getAvailableReports()\n try:\n return availableReports[reportType](reportName)\n except:\n raise Exception(f\"Unsupported report format for {reportType}! Accepts: \"+\n stringfyList(list(availableReports.keys())))","sub_path":"src/fuzzingtool/reports/Report.py","file_name":"Report.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"566760695","text":"\"\"\"handles request for flot widget.\"\"\"\nfrom apps.widgets.status.users.views import remote_supply\nfrom apps.widgets.status.models import DailyStatus\n\n\ndef supply(request, page_name):\n \"\"\"supply view_objects for user status.\"\"\"\n _ = page_name\n _ = request\n\n new_user_data = []\n daily_login_data = []\n result = remote_supply(request, page_name)\n result = result['logins']\n #template_date_format = \"%m/%d\"\n\n for item in result:\n point = {'x': item['date'], 'y': item['logins']}\n new_user_data.append(point)\n\n new_user_series = {\n 'color': \"#000\",\n 'show': 'true',\n 'data': new_user_data,\n }\n\n for item in DailyStatus.objects.all():\n point = {'x': item.date[5:].replace('-', '/'), 'y': item.daily_visitors}\n daily_login_data.append(point)\n\n daily_login_series = {\n 'color': \"#000\",\n 'show': 'true',\n 'data': daily_login_data,\n }\n data_sets = {\n 'New Users': new_user_series,\n 'Daily Logins': daily_login_series\n }\n\n xaxis_color = \"#000\"\n\n return {\n \"data_sets\": data_sets,\n \"xaxis_color\": xaxis_color,\n }\n","sub_path":"makahiki/apps/widgets/status/gchart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"457391142","text":"#!/usr/bin/python\n#coding=utf-8 \n\nusers=[\"王源\",\"崔伟\",\"侯飞飞\",\"和康宁\",\"孔令洋\",\"李迪生\",\"李志勋\"]\nuserlist = []\nn = 5\n\nprint (users[-1])\n# def movebit(n=0):\n# for n in range(0,len(users)):\n\n\n\n# print(5%7)\n# for i in range(0,5):\n# print ('i:',i)\n# movenum=i%7\n\n# print('movenum',movenum)\n\n# userTemp=userlist[]\n# pass \n\n","sub_path":"zhiban.py","file_name":"zhiban.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"347899933","text":"#!/usr/bin/env python3\n\nimport time\nimport openstack\nimport pymysql\nimport queue\nimport threading\nimport logging as log\nfrom pathlib import Path\nfrom datetime import datetime\n\n\nlog.basicConfig(format=\"%(asctime)s: %(message)s\", level=log.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\")\nconn = openstack.connect()\n\n\ndef check_core_services(return_queue, retry=3):\n for i in range(retry):\n try:\n data = []\n services = conn.list_services()\n for s in services:\n log.debug(f'core_services: {s.id} {s.name} {s.type} {s.enabled}')\n data.append({'id': s.id, 'name': s.name, 'type': s.type, 'enabled': s.enabled})\n break\n except Exception as e:\n log.error(f'fail to list core_services({i}): {e}')\n return_queue.put({'core_services': data})\n\ndef check_hypervisors(return_queue, retry=3):\n for i in range(retry):\n try:\n data = []\n hypervisors = conn.list_hypervisors()\n for h in hypervisors:\n #print(type(h.cpu_info))\n #print(dir(h.cpu_info))\n log.debug((f'{h.id} {h.name} {h.status} {h.state} {h.vcpus} {h.vcpus_used} '\n f'{h.memory_size} {h.memory_used} {h.local_disk_size} {h.local_disk_used} '\n f'{h.running_vms}'))\n data.append({\n 'id': h.id,\n 'name': h.name,\n 'status': h.status,\n 'state': h.state,\n 'vcpus': h.vcpus,\n 'vcpus_used': h.vcpus_used,\n 'memory_size': h.memory_size,\n 'memory_used': h.memory_used,\n 'local_disk_size': h.local_disk_size,\n 'local_disk_used': h.local_disk_used,\n 'running_vms': h.running_vms\n })\n break\n except Exception as e:\n log.error(f'fail to list network hypervisors({i}): {e}')\n return_queue.put({'hypervisors': data})\n\ndef check_compute_services(return_queue, retry=3):\n for i in range(retry):\n try:\n data = []\n services = conn.compute.services()\n for s in services:\n log.debug(f'{s.id} {s.binary} {s.state} {s.host}')\n data.append({'id': s.id, 'name': s.binary, 'state': s.state, 'host': s.host})\n break\n except Exception as e:\n log.error(f'fail to list nova services({i}): {e}')\n return_queue.put({'compute_services': data})\n\ndef check_network_agents(return_queue, retry=3):\n for i in range(retry):\n try:\n data = []\n agents = conn.network.agents()\n for a in agents:\n log.debug((f'{a.id} {a.binary} {a.is_admin_state_up} {a.is_alive} '\n f'{a.host} {a.last_heartbeat_at} {a.started_at} {a.created_at}'))\n data.append({\n 'id': a.id,\n 'name': a.binary,\n 'state': a.is_admin_state_up,\n 'alive': a.is_alive,\n 'host': a.host,\n 'last_heartbeat_at': a.last_heartbeat_at,\n 'started_at': a.started_at,\n 'created_at': a.created_at\n })\n break\n except Exception as e:\n log.error(f'fail to list network agents({i}): {e}')\n return_queue.put({'network_agents': data})\n\n\ndef main(interval=3600, log_dir='./log'):\n region = conn._compute_region\n log.info(f'Start monitoring services, region={region}, interval={interval}')\n\n core_services_log_file = \"services.core-services.log\"\n compute_services_log_file = \"services.compute-services.log\"\n hypervisors_log_file = \"services.hypervisors.log\"\n network_agents_log_file = \"services.network-agents.log\"\n\n return_queue = queue.Queue()\n while True:\n #ts = datetime.now().timestamp()\n now = datetime.now()\n check_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n core_services_t = threading.Thread(target=check_core_services, args=(return_queue,))\n hypervisors_t = threading.Thread(target=check_hypervisors, args=(return_queue,))\n nova_services_t = threading.Thread(target=check_compute_services, args=(return_queue,))\n network_agents_t = threading.Thread(target=check_network_agents, args=(return_queue,))\n\n core_services_t.start()\n hypervisors_t.start()\n nova_services_t.start()\n network_agents_t.start()\n\n count_t = threading.active_count()\n log.debug(f'Active threads {count_t}')\n\n for i in range(4): # monitoring 4 services\n data = return_queue.get()\n if 'core_services' in data:\n log.debug(f\"Core Services:\\n{data['core_services']}\")\n target_log_file = core_services_log_file\n target_data = data['core_services']\n elif 'compute_services' in data:\n log.debug(f\"Compute Services:\\n{data['compute_services']}\")\n target_log_file = compute_services_log_file\n target_data = data['compute_services']\n elif 'hypervisors' in data:\n log.debug(f\"Hypervisors:\\n{data['hypervisors']}\")\n target_log_file = hypervisors_log_file\n target_data = data['hypervisors']\n elif 'network_agents' in data:\n log.debug(f\"Network Agents:\\n{data['network_agents']}\")\n target_log_file = network_agents_log_file\n target_data = data['network_agents']\n\n Path(log_dir).mkdir(parents=True, exist_ok=True)\n target_log_file = f'{log_dir}/{region}.{target_log_file}'\n with open(target_log_file, 'a') as f:\n for item in target_data:\n l_data = []\n for key, value in item.items():\n l_data.append(f\"{key}={value}\")\n s_data = f','.join(l_data)\n f.write(f'{check_time} {s_data}\\n')\n\n if threading.active_count() == 1:\n log.debug(f'Checking threads finished')\n\n time.sleep(interval)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"scripts/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322662043","text":"import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef adjacent_edges(nodes, halfk):\n n = len(nodes)\n for i, u in enumerate(nodes):\n for j in range(i + 1, i + halfk + 1):\n v = nodes[j % n]\n yield u, v\n\ndef make_ring_lattice(n, k):\n G = nx.Graph()\n nodes = range(n)\n G.add_nodes_from(nodes)\n G.add_edges_from(adjacent_edges(nodes, k//2))\n return G\n\ndef draw_graph(G):\n nx.draw_circular(G, \n node_color = 'y', \n node_size = 300, \n with_labels = True)\n plt.show()\n\ndef flip(p):\n return np.random.random() < p\n\ndef rewire(G, p):\n nodes = set(G)\n for u, v in G.edges():\n if flip(p):\n choices = nodes - {u} - set(G[u])\n new_v = np.random.choice(list(choices))\n G.remove_edge(u, v)\n G.add_edge(u, new_v)\n\ndef make_ws_graph(n, k, p):\n ws = make_ring_lattice(n, k)\n rewire(ws, p)\n return ws\n\ndef all_pairs(nodes):\n for i, j in enumerate(nodes):\n for v, w in enumerate(nodes):\n if i > v:\n yield j, w\n\ndef node_clustering(G, u):\n neighbors = G[u]\n k = len(neighbors)\n if k < 2:\n return np.nan\n \n possible = k * (k - 1) / 2\n exist = 0\n for v, w in all_pairs(neighbors):\n if G.has_edge(v, w):\n exist += 1 \n return exist / possible\n\ndef clustering_coefficient(G):\n cu = [node_clustering(G, node) for node in G]\n return np.nanmean(cu)\n\ndef path_lengths(G):\n length_iter = nx.shortest_path_length(G)\n for source, dist_map in length_iter:\n for dest, dist in dist_map.items():\n yield dist\n\ndef characteristic_path_length(G):\n return np.mean(list(path_lengths(G)))\n\ndef run_one_graph(n, k, p):\n ws = make_ws_graph(n, k, p)\n mpl = characteristic_path_length(ws)\n cc = clustering_coefficient(ws)\n # print(mpl, cc)\n return mpl, cc\n\ndef run_experiment(ps, n = 100, k = 4, iters = 20):\n res = []\n for p in ps:\n t = [run_one_graph(n, k, p) for _ in range(iters)]\n means = np.array(t).mean(axis = 0)\n res.append(means)\n return np.array(res)\n\nps = np.logspace(-4, 0, 15)\nres = run_experiment(ps)\nL, C = np.transpose(res)\nL /= L[0]\nC /= C[0]\n\nplt.plot(ps, C, 's-', linewidth=1, label='C(p)/C(0)')\nplt.plot(ps, L, 'o-', linewidth=1, label='L(p)/L(0)')\nplt.xscale('log')\nplt.legend()\nplt.show()\n","sub_path":"my_code/chapter3-small-world-graphs.py","file_name":"chapter3-small-world-graphs.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"76114680","text":"from django import template\nimport re\n\nregister = template.Library()\n\n@register.filter(name='addswearwords')\ndef addswearwords(value):\n if isinstance(value, str):\n return str(value) + f' охуительно пиздец заебал'\n else:\n raise ValueError(f'Нельзя добавить бранные слова (текст) в тип {type(value)}')\n\n\n# Добавляю только три шаблона матерных слов. Расширять можно до бесконечности.\nSWEARWORDS = [\n r'\\w*ху[йяие]\\w*',\n r'\\w*ебал\\w*',\n r'\\w*пизд\\w*',\n]\n\n@register.filter(name='censor')\ndef censor(value):\n if isinstance(value, str):\n censored_text = ''\n for one_word in value.split():\n for swearword in SWEARWORDS:\n if re.search(swearword, one_word.strip('.,;:?!-').lower()):\n censored_text += '!censored! '\n break\n else:\n censored_text += one_word + ' '\n return str(censored_text)\n else:\n raise ValueError('Цензор умеет работать только со строками.')\n","sub_path":"newapp/templatetags/custom_filters.py","file_name":"custom_filters.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"509859590","text":"from collections import namedtuple\nfrom functools import reduce\nfrom time import time\n\n\nclass TimeItResults(namedtuple(\"TimeItResults\", [\"min_time\", \"max_time\", \"avg_time\"])):\n def __new__(cls, min_time=0, max_time=0, avg_time=0):\n return super(TimeItResults, cls).__new__(cls, min_time=min_time, max_time=max_time, avg_time=avg_time)\n\n\ndef time_function(func, args=None, kwargs=None, times_to_run=1):\n args = () if args is None else args\n kwargs = {} if kwargs is None else kwargs\n\n results = []\n\n for i in range(0, times_to_run):\n start = time()\n func(*args, **kwargs)\n results.append(time()-start)\n\n return TimeItResults(min_time=min(results), max_time=max(results), avg_time=reduce(lambda x, y: x + y, results) / len(results))","sub_path":"pytools/util/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"584837142","text":"from datetime import datetime\n\nimport scrapy\nfrom scrapy.selector import Selector\n\nfrom smzdm.items import SmzdmItem\n\n\nclass PhoneSpider(scrapy.Spider):\n name = 'phone'\n allowed_domains = ['smzdm.com']\n start_urls = ['https://www.smzdm.com/fenlei/zhinengshouji/h5c4s0f0t0p1/#feed-main/']\n\n # 抓取首页数据\n def start_requests(self):\n url = 'https://www.smzdm.com/fenlei/zhinengshouji/h5c4s0f0t0p1/#feed-main/'\n yield scrapy.Request(url=url, callback=self.scrape_index)\n\n # 获取首页前十产品链接\n def scrape_index(self, response):\n atags = Selector(response=response).xpath('//h5[@class=\"feed-block-title\"]/a/@href').getall()[:10]\n for atag in atags:\n yield scrapy.Request(url=atag, callback=self.parse_detail)\n\n # 获取用户昵称及评论,实现自动翻页\n def parse_detail(self, response):\n item = SmzdmItem()\n li_box = Selector(response=response).xpath('//div[@id=\"commentTabBlockNew\"]/ul/li[@class=\"comment_list\"]')\n for li in li_box:\n nickname = li.xpath('./div[2]/div/a/span/text()').get()\n comment = li.xpath(\n './div[2]/div[@class=\"comment_conWrap\"]/div/p/span/text()').get().strip()\n comment_time = li.xpath('./div[2]/div/div/meta/@content').get()\n comment_time = comment_time if comment_time else datetime.now().strftime('%Y-%m-%d')\n item['nickname'] = nickname\n item['comment'] = comment\n item['comment_time'] = comment_time\n yield item\n next_comment_url = Selector(response=response).xpath('//li[@class=\"pagedown\"]/a/@href').get()\n if next_comment_url is not None:\n yield scrapy.Request(url=next_comment_url, callback=self.parse_detail)\n\n # def parse(self, response):\n # pass\n","sub_path":"week12/smzdm/smzdm/spiders/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636529997","text":"# Data Definition Module\n\nfrom .sql import Sql\n\n# Data Types for SQLite\nINT = \"INTEGER\"\nNULL = \"NULL\"\nFLOAT = \"FLOAT\"\nVARCHAR = \"TEXT\"\nRAW = \"BLOB\"\n\n\n# Column Class, set Columns Attributes\nclass Column:\n\n # Attributes\n name = None\n type = INT\n primary = False\n unique = False\n null = True\n\n # Constructor, Populate the object\n def __init__(self, name, type=INT, primary=False, unique=False, null=True):\n self.name = name\n self.type = type\n self.primary = primary\n self.unique = unique\n self.null = null\n\n\n# Connect to DataBase and Create an Object\ndef connect(database=\":memory:\"):\n return Define(database)\n\n\n# Definition Class, creates and manage tables inside the Database\nclass Define(Sql):\n\n # Show tables\n def tables(self):\n result = self.query(\"SELECT name FROM sqlite_master WHERE type = 'table'\")\n tables = []\n for item in result:\n tables.append(item[0])\n return tables\n\n # Create a new table\n def create(self, name, *columns):\n sql = \"CREATE TABLE IF NOT EXISTS %s ( \" % name\n for item in columns:\n sql += self.__columnToSql(item)\n sql += \",\"\n\n sql = sql[:-1] + \")\"\n self.run(sql)\n\n # Create a temporary table\n def __createTemp(self, name, *columns):\n from random import randrange\n temp = name + \"Temp\" + str(randrange(1, 1024))\n self.create(temp, *columns)\n return temp\n\n # Drop a table\n def drop(self, table):\n sql = \"DROP TABLE IF EXISTS %s\" % table\n self.run(sql)\n\n # Rename a Table\n def rename(self, table, name):\n sql = \"ALTER TABLE %s RENAME TO %s\" % (table, name)\n self.run(sql)\n\n # Column Manipulation\n\n # Receive a column and transform to SQL code\n def __columnToSql(self, column):\n sql = \"%s %s\" % (column.name, column.type)\n if column.primary:\n sql += \" PRIMARY KEY\"\n if column.unique:\n sql += \" UNIQUE\"\n if column.null is False:\n sql += \" NOT NULL\"\n return sql\n\n # Get Columns from Table\n def getColumns(self, table):\n sql = \"table_info(%s)\" % table\n result = self.pragma(sql)\n columns = []\n for item in result:\n column = Column(item[1])\n column.type = item[2]\n column.null = item[3] is 0\n column.primary = item[5] is 1\n columns.append(column)\n return(columns)\n\n # Add new columns to a table\n def addColumns(self, table, *columns):\n for item in columns:\n item.primary = False\n item.unique = False\n sql = \"ALTER TABLE %s ADD COLUMN \" % table\n sql += self.__columnToSql(item)\n self.run(sql)\n\n # Drop a column from the table\n def dropColumn(self, table, name):\n columns = self.getColumns(table)\n for item in columns:\n if item.name == name:\n columns.remove(item)\n tempTable = self.__createTemp(table, *columns)\n sql = \"INSERT INTO %s SELECT \" % tempTable\n for item in columns:\n sql += \"%s,\" % item.name\n sql = sql[:-1] + \" FROM %s\" % table\n self.run(sql)\n self.drop(table)\n self.rename(tempTable, table)\n\n # Rename a column from the table\n def renameColumn(self, table, column, name):\n columns = self.getColumns(table)\n for item in columns:\n if item.name == column:\n item.name = name\n tempTable = self.__createTemp(table, *columns)\n sql = \"INSERT INTO %s SELECT * FROM %s\" % (tempTable, table)\n self.run(sql)\n self.drop(table)\n self.rename(tempTable, table)\n","sub_path":"sqlight/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"198176396","text":"import pathlib\nimport yaml\n\nBASE_DIR = pathlib.Path(__file__).parent\nconfig_path = pathlib.Path.joinpath(BASE_DIR, 'config', 'config.yaml')\n\n\ndef get_config(path):\n with open(path) as f:\n config = yaml.load(f)\n return config\n\n\nconfig = get_config(config_path)","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"20042666","text":"from Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.Util.number import long_to_bytes\nimport base64\nimport random\n\nfrom secret import flag\n\ndef long_to_b64(payload):\n return base64.b64encode(long_to_bytes(payload)).decode()\n\ndef main():\n k = RSA.generate(2048)\n oaep_k = PKCS1_OAEP.new(k)\n\n failed = False\n exited = False\n\n print('🔑', long_to_b64(k.n))\n\n for i in range(1, 100+1):\n\n c0 = random.getrandbits(1)\n\n print(f'🤯 {i}')\n\n while True:\n params = input('🤖 ').split(' ')\n action = params.pop(0)\n\n if action == '📦':\n if c0 == 0:\n ciphertext = random.randint(0, k.n-1)\n else:\n ciphertext = int.from_bytes(oaep_k.encrypt(flag), 'big')\n print('🤫', long_to_b64(ciphertext))\n elif action == '🔓':\n c1 = int(params[0])\n if c0 != c1:\n failed = True\n else:\n print('👌')\n break\n elif action == '🏃':\n exited = True\n break\n if failed or exited: break\n\n if exited:\n print('🤨')\n elif failed:\n print('👋')\n else:\n print('🏁', flag.decode())\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"20210116-firebird-internal-ctf/obvious-transfer/env/chall/chall.py","file_name":"chall.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"651621170","text":"# -*- coding: utf-8 -*-\n# @Author : Li Zihao\n# @Time : 2020/5/21 21:19\n# @File : urls.py\n\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom JmeterPlatform import views\nfrom JmeterPlatform.allviews import task_view as tv\n\nurlpatterns = [\n path('admin', admin.site.urls),\n path('uploadfiles', views.upload_files, name='uploadfiles'),\n path('filelist', views.file_list, name='filelist'),\n path('downloadfile', views.download_file, name='downloadfile'),\n path('deletefile', views.delete_file, name='deletefile'),\n path('addtask', views.add_task),\n path('jsonrp', views.json_response),\n path('myscript', views.my_script),\n]","sub_path":"mydemos/DjangoProject/JmeterPlatform/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"193634491","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Fecha creación: 3 dic. 2017\n# autor: usuario\n\nimport sys\n\ndef reemplaza(cad):\n return cad.replace(\"\",\"<b><i>\").replace(\"\",\"</i></b>\")\n\ndef question(qname,quest,answers,positivo=False):\n quest=reemplaza(quest)\n correctas=0\n for a in answers:\n if (a[0]==\"#\"):\n correctas+=1\n incorrectas=len(answers)-correctas\n #print(\"Pregunta: \"+qname)\n #print(\"Questión: \"+quest)\n #print(\"Answers: \"+str(answers))\n if (correctas==1): single=\"true\" \n else: single=\"false\"\n fraction=1.0/correctas*100\n if (incorrectas==0): fratIncorrect=0\n else: fratIncorrect=1.0/incorrectas*100\n if (fratIncorrect>fraction): fratIncorrect=fraction\n if (fraction==int(fraction)): fraction=int(fraction)\n else: fraction=round(fraction,5)\n if (fratIncorrect==int(fratIncorrect)): fratIncorrect=int(fratIncorrect)\n else: fratIncorrect=round(fratIncorrect,5)\n if (positivo): fratIncorrect=0\n questXml=\"\"\" \n {}\n \n {}\n \n 1.0000000\n {}\n 1\"\"\".format(qname,quest,single)\n for a in answers:\n if (a[0]==\"#\"):\n fr=fraction\n a=a[1:]\n else:\n fr=-fratIncorrect\n a=reemplaza(a)\n questXml+=\"\"\"\\n {}\"\"\".format(fr,a)\n if (single==\"true\"):\n questXml+=\"\"\"\\n <span style='color: red; text-decoration: underline; font-weight: bold;'>Dejar la pregunta en blanco</span>\"\"\"\n questXml+=\"\"\"\\n \"\"\"\n return questXml \n\ndef printCategory(category):\n return \"\"\" \n \n {}\n \n \"\"\".format(category)\n\ndef uso():\n print(\"Uso: {} [-p] \".format(sys.argv[0]))\n sys.exit(1)\n\n\nentrada=None\n#entrada=open(\"/m/tmp/preguntas/pruebas/p1.txt\",\"r\")\npositivo=False\nif (entrada==None):\n if (len(sys.argv)<2): uso()\n if (sys.argv[1]==\"-p\"): \n positivo=True\n entrada=open(sys.argv[2],\"r\")\n else: \n positivo=False\n entrada=open(sys.argv[1],\"r\")\n\n\nprint(\"\"\"\n\"\"\");\nl=entrada.readline()\nwhile (l):\n if (l[0]==\"\\n\"): l=entrada.readline(); continue\n if (l[0]==\"#\"): print(\"\"); l=entrada.readline(); continue\n if (l.startswith(\"Category\")): category=l[8:].strip(); print(printCategory(category)); l=entrada.readline(); continue\n if (l[0]==\" \" or l[0]==\"\\t\"): print(\"Error hay respuestas antes que preguntas\");sys.exit(2)\n posEsp=l.find(\" \")\n qname=l[:posEsp].strip()\n quest=l[posEsp:].strip()\n l=entrada.readline(); answers=[]\n while (l and (l[0]==\" \" or l[0]==\"\\t\")):\n if (l.find(\"
\")>=0):\n            l=l.strip()\n            #l=\"<hr/>\"+l\n            while(l.find(\"
\")<0):\n l+=entrada.readline()\n l=l.replace(\"
\",\"<pre>\").replace(\"
\",\"</pre>\")\n l+=\"<hr/>\"\n answers.append(l)\n else:\n answers.append(l.strip())\n l=entrada.readline()\n print(question(qname,quest,answers,positivo))\n \nprint(\"
\")\n\nsys.exit(0)\n \nprint(question(\"P01\",\"En Seguridad Informática el objetivo de No repudio, se refiere a:\",\n [\"#La garantía de la participación de las partes en una comunicación\",\n \"Que la información una vez enviada tiene que ser aceptada por la otra parte\",\n \"La garantía de la integridad de la información\",\n \"La garantía de la disponibilidad de la información\"\n ]\n )) ","sub_path":"python/scripts/src/fich2MoodleXmlOld2.py","file_name":"fich2MoodleXmlOld2.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"331721523","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2007 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt\n\nfrom pisi.actionsapi import autotools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import shelltools\n\ndef setup():\n shelltools.system(\"qmake-qt4\")\n shelltools.cd(\"qstardict/translations\")\n shelltools.system(\"lrelease-qt4 *.ts\")\n\ndef build():\n autotools.make()\n\ndef install():\n pisitools.dobin(\"qstardict/qstardict\")\n\n for pixmap in [\"qstardict/*.png\"]:\n pisitools.insinto(\"/usr/share/pixmaps\", pixmap)\n pisitools.insinto (\"/usr/share/applications\", \"qstardict/qstardict.desktop\")\n pisitools.insinto (\"/usr/share/qstardict/translations\", \"qstardict/translations/*.qm\")\n\n pisitools.dolib(\"plugins/web/libweb.so\",\"usr/lib/qstardict/plugins/\")\n pisitools.dolib(\"plugins/stardict/libstardict.so\",\"usr/lib/qstardict/plugins/\")\n\n pisitools.dodoc(\"AUTHORS\", \"ChangeLog\", \"COPYING\", \"README\", \"TODO\",\"THANKS\")\n\n","sub_path":"pardus/tags/2008.1/applications/util/qstardict/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"54214219","text":"# following https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html\n\n# import the necessary packages\nfrom sklearn.preprocessing import LabelEncoder\n# from sklearn.cross_validation import train_test_split\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.models import Sequential\nfrom keras.layers import Activation\nfrom keras.optimizers import SGD\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\n\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\n\n# VGG16\nfrom keras.applications.vgg16 import VGG16\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras import backend as K\nfrom keras import optimizers\n\nimport numpy as np\nimport argparse\nimport cv2\nimport os, os.path\n\nimport matplotlib.pyplot as plt\n\n# ---- load data ----\n\n# path to training images\ntrain_path = 'train'\n\n# path to validation images\nvalidate_path = 'validate'\n\n# images to be resized to (image_dim) x (image_dim)\nimage_dim = 128\n\nx_train = []\ny_train = []\nx_valid = []\ny_valid = []\n\n# load training data\nfor filename in next(os.walk(train_path))[2]:\n # full path is path to filename + '/' + filename\n image = cv2.imread(''.join([train_path, '/', filename]))\n # append resized image\n x_train.append(cv2.resize(image, (image_dim, image_dim)))\n # filenames are of the form {class}.{image_num}.jpg\n label = filename.split(os.path.sep)[-1].split(\".\")[0]\n # record label\n y_train.append(label)\n\n# load validation data\nfor filename in next(os.walk(validate_path))[2]:\n # full path is path to filename + '/' + filename\n image = cv2.imread(''.join([validate_path, '/', filename]))\n # append resized image\n x_valid.append(cv2.resize(image, (image_dim, image_dim)))\n # filenames are of the form {class}.{image_num}.jpg\n label = filename.split(os.path.sep)[-1].split(\".\")[0]\n # record label\n y_valid.append(label)\n\n\n# change labels from strings to integers, e.g 'cat' -> 0, 'dog' -> 1\nle = LabelEncoder()\ny_train = le.fit_transform(y_train) \ny_valid = le.fit_transform(y_valid) \n\n\n# convert data to NumPy array of floats\nx_train = np.array(x_train, np.float32)\nx_valid = np.array(x_valid, np.float32)\n\n\n\n# ---- define data generator ----\ndatagen = ImageDataGenerator() # VGG16 already rescales input images, no need for further rescaling\n\ndatagen.fit(x_train)\n\n\n\n\n# ---- define the model ----\n# VGG16\nbase_model = VGG16(input_shape=(image_dim, image_dim, 3), include_top=False, weights='imagenet')\n# base_model = VGG16(input_shape=(image_dim, image_dim, 3), include_top=False)\nx = base_model.output\nx = Flatten()(x)\nd1 = Dense(64, activation='relu')(x)\nd1 = Dropout(0.5)(d1)\npredictions = Dense(1, activation='sigmoid')(d1)\nmodel = Model(inputs=base_model.input, outputs=predictions) # final model\n\nopt = optimizers.SGD(lr=0.0001)\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n\nmodel.summary()\n\n\n# ---- train the model ----\nbatch_size = 32\nnum_epochs = 10\n\nhistory = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\n steps_per_epoch=len(x_train) / batch_size, epochs=num_epochs,\n validation_data=datagen.flow(x_valid, y_valid, batch_size=batch_size),\n validation_steps = len(x_valid) / batch_size)\n\n\n\n# ---- save the model and the weights ----\nmodel.save('saved_model/vgg16_catsdogs.h5')\nmodel.save_weights('saved_weight/vgg16_catsdogs_weights.h5')\nprint('model saved')\n\n\n\n# ---- display history ----\n# list all data in history\nprint(history.history.keys())\n# summarize history for accuracy\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('graph/train_test_accuracy_vgg16_augmentation.png')\nplt.clf() # clear figure\n\n# summarize history for loss (binary cross-entropy)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.ylabel('binary cross-entropy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('graph/train_test_loss_vgg16_augmentation.png')\nplt.clf()","sub_path":"vgg16_transfer_imagenet/vgg16_transferlearning.py","file_name":"vgg16_transferlearning.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330368058","text":"\"\"\" Further selection of dt.cc\n\"\"\"\nimport numpy as np\nfrom dataset_cc import read_fsta, read_fpha_dict, calc_dist_km\nimport config\n\ncfg = config.Config()\n# i/o paths\nfdt_in = 'input/dt_all.cc'\nfdt_out = open('input/dt.cc','w')\nfpha = 'input/phase.temp'\nevent_dict = read_fpha_dict(fpha)\nfsta = cfg.fsta\nsta_dict = read_fsta(fsta)\n# thres for linking event pairs\ncc_thres = cfg.cc_thres[1]\nloc_dev_thres = cfg.loc_dev_thres[1]\ndep_dev_thres = cfg.dep_dev_thres[1]\ndist_thres = cfg.dist_thres[1]\ndt_thres = cfg.dt_thres[1]\nnum_sta_thres = cfg.num_sta_thres[1]\n\n\n# read dt.cc\nprint('reading %s'%fdt_in)\ndt_list = []\nf=open(fdt_in); lines=f.readlines(); f.close()\nfor i,line in enumerate(lines):\n if i%1e6==0: print('done/total %s/%s | %s pairs selected'%(i,len(lines),len(dt_list)))\n codes = line.split()\n if line[0]=='#':\n to_add = True\n data_id, temp_id = codes[1:3]\n if data_id not in event_dict or temp_id not in event_dict: \n to_add = False; continue\n data_lat, data_lon, data_dep = event_dict[data_id][0]\n temp_lat, temp_lon, temp_dep = event_dict[temp_id][0]\n # 1. select loc dev\n loc_dev = calc_dist_km([data_lat,temp_lat], [data_lon,temp_lon])\n dep_dev = abs(data_dep - temp_dep)\n if not (loc_devdist_thres: continue\n # select by CC\n dt, wht = [float(code) for code in codes[1:3]]\n cc = wht**2\n pha = codes[-1]\n if ccdt_thres[0]: continue\n if pha=='S' and abs(dt)>dt_thres[1]: continue\n dt_list[-1][-1].append([sta, line])\n\n# write dt.cc\nprint('write input/dt.cc')\nfor [[data_id, temp_id], head_line, pha_dt_list] in dt_list:\n sta_list = np.unique([sta for [sta, _] in pha_dt_list])\n if len(sta_list) 1:\n print(\"**** Multiple column matches on replace\", arg, len(ref_cols), ref_cols[:2])\n sys.exit(1)\n print(\"Replacing\", col, \"with\", ref_cols[0] + '...')\n fix_df.columns = [ref_cols[0] if c.startswith(col + '.') else c for c in fix_df.columns]\n\n\ndef rename_column_name(fix_df, col, arg):\n col_to_rename = [c for c in fix_df.columns if c.startswith(col + '.')]\n if len(col_to_rename) == 0:\n print(\"**** Couldn't find column to rename\", col)\n sys.exit(1)\n print(\"Renaming\", col, \"with\", arg + '...')\n fix_df.rename(columns={col_to_rename[0]: arg}, inplace=True)\n\n\ndef delete_column(fix_df, col):\n col_to_delete = [c for c in fix_df.columns if c.startswith(col + '.')]\n if len(col_to_delete) == 1:\n print(\"Deleting\", col)\n fix_df.drop(columns=[col_to_delete[0]], inplace=True)\n else:\n print(\"**** Incorrect column matching:\", col, col_to_delete)\n sys.exit()\n\n\ndef apply_actions(fix_df, ref_df, actions_df):\n for _, row in actions_df.iterrows():\n column = row.loc['QID_2022']\n action = row.loc['Action']\n argument = row.loc['Argument']\n\n if action == 'SearchReplace':\n search_replace_column_name(fix_df, column, ref_df, argument)\n elif action == 'Rename':\n rename_column_name(fix_df, column, argument)\n elif action == 'Delete':\n delete_column(fix_df, column)\n elif action == 'Ignore':\n pass\n","sub_path":"clean_scripts/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"349583313","text":"class LoginTrackMixin:\n \"\"\"\n Set either signed_in or signed_up in context if the user has just signed in or registered.\n One of those will be set only once which allows on-page JS code to take proper actions like send tracking events.\n \"\"\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n for attr in ('signed_in', 'signed_up'):\n if attr in self.request.session:\n context[attr] = self.request.session[attr]\n del self.request.session[attr]\n return context\n","sub_path":"backend/profile_page/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"377832619","text":"\"\"\"\nGATHERPLOTDATA\n\nThis file gathers all data that could be used for plotting and packs it into a\nnice little convenient structure :)\n\nVersion: 2015feb04 by cliffk\n\"\"\"\n\n# Define labels\nepititles = {'prev':'Prevalence', 'plhiv':'PLHIV', 'inci':'New infections', 'force':'Incidence', 'daly':'DALYs', 'death':'Deaths', 'dx':'Diagnoses', 'tx1':'Treatment', 'tx2':'Subsequent lines of treatment'}\nepiylabels = {'prev':'HIV prevalence (%)', 'plhiv':'Number of PLHIV', 'inci':'New HIV infections per year', 'force':'Incidence per 100 person-years', 'daly':'HIV-related DALYs per year', 'death':'HIV/AIDS-related deaths per year', 'dx':'New HIV diagnoses per year', 'tx1':'People on first-line treatment', 'tx2':'People on subsequent lines of treatment'}\ncosttitles = {'costcum':'Cumulative HIV-related financial costs'}\ncostylabels = {}\n\ndef gatheruncerdata(D, R, annual=True, verbose=2, maxyear=2030):\n \"\"\" Gather standard results into a form suitable for plotting with uncertainties. \"\"\"\n from numpy import zeros, nan, size, ndim, array, asarray\n from printv import printv\n from copy import deepcopy\n printv('Gathering epidemiology results...', 3, verbose)\n \n uncer = dict()\n uncer['tvec'] = R['tvec'].tolist() # Copy time vector\n uncer['poplabels'] = D['G']['meta']['pops']['short']\n uncer['colorm'] = (0,0.3,1) # Model color\n uncer['colord'] = (0,0,0) # Data color\n uncer['legend'] = ('Model', 'Data')\n uncer['xdata'] = D['data']['epiyears']\n ndatayears = len(uncer['xdata'])\n \n # Downsample to annual\n origtvec = deepcopy(uncer['tvec'])\n if annual:\n dt = origtvec[1]-origtvec[0]\n allindices = range(0, len(origtvec), int(round(1/dt)))\n indices = []\n for i in allindices:\n if origtvec[i]<=maxyear:\n indices.append(i)\n uncer['tvec'] = [origtvec[i] for i in indices]\n else:\n indices = range(len(origtvec))\n \n for key in epititles.keys():\n percent = 100 if key in ['prev','force'] else 1 # Whether to multiple results by 100\n \n uncer[key] = dict()\n uncer[key]['pops'] = [dict() for p in xrange(D['G']['npops'])]\n uncer[key]['tot'] = dict()\n if key not in ['prev','force']: # For stacked area plots -- an option for everything except prevalence and force-of-infection\n uncer[key]['popstacked'] = dict()\n uncer[key]['popstacked']['pops'] = []\n uncer[key]['popstacked']['legend'] = []\n uncer[key]['popstacked']['title'] = epititles[key]\n uncer[key]['popstacked']['ylabel'] = epiylabels[key]\n for p in xrange(D['G']['npops']):\n uncer[key]['pops'][p]['best'] = (R[key]['pops'][0][p,:]*percent)[indices].tolist()\n uncer[key]['pops'][p]['low'] = (R[key]['pops'][1][p,:]*percent)[indices].tolist()\n uncer[key]['pops'][p]['high'] = (R[key]['pops'][2][p,:]*percent)[indices].tolist()\n uncer[key]['pops'][p]['title'] = epititles[key] + ' - ' + D['G']['meta']['pops']['short'][p]\n uncer[key]['pops'][p]['ylabel'] = epiylabels[key]\n if key not in ['prev','force']:\n uncer[key]['popstacked']['pops'].append(uncer[key]['pops'][p]['best'])\n uncer[key]['popstacked']['legend'].append(D['G']['meta']['pops']['short'][p])\n uncer[key]['tot']['best'] = (R[key]['tot'][0]*percent)[indices].tolist()\n uncer[key]['tot']['low'] = (R[key]['tot'][1]*percent)[indices].tolist()\n uncer[key]['tot']['high'] = (R[key]['tot'][2]*percent)[indices].tolist()\n uncer[key]['tot']['title'] = epititles[key] + ' - Overall'\n uncer[key]['tot']['ylabel'] = epiylabels[key]\n uncer[key]['xlabel'] = 'Years'\n \n if key=='prev':\n epidata = D['data']['key']['hivprev'][0] # TODO: include uncertainties\n uncer['prev']['ydata'] = zeros((D['G']['npops'],ndatayears)).tolist()\n if key=='plhiv':\n epidata = nan+zeros(ndatayears) # No data\n uncer['plhiv']['ydata'] = zeros(ndatayears).tolist()\n if key=='inci':\n epidata = D['data']['opt']['numinfect'][0]\n uncer['inci']['ydata'] = zeros(ndatayears).tolist()\n if key=='force':\n epidata = nan+zeros(ndatayears) # No data\n uncer['force']['ydata'] = zeros(ndatayears).tolist()\n if key=='death':\n epidata = D['data']['opt']['death'][0]\n uncer['death']['ydata'] = zeros(ndatayears).tolist()\n if key=='daly':\n epidata = nan+zeros(ndatayears) # No data\n uncer['daly']['ydata'] = zeros(ndatayears).tolist()\n if key=='dx':\n epidata = D['data']['opt']['numdiag'][0]\n uncer['dx']['ydata'] = zeros(ndatayears).tolist()\n if key=='tx1':\n epidata = D['data']['txrx']['numfirstline'][0]\n uncer['tx1']['ydata'] = zeros(ndatayears).tolist()\n if key=='tx2':\n epidata = D['data']['txrx']['numsecondline'][0]\n uncer['tx2']['ydata'] = zeros(ndatayears).tolist()\n\n\n if size(epidata,axis=0)==D['G']['npops']: # It's by population\n for p in xrange(D['G']['npops']):\n thispopdata = epidata[p]\n if len(thispopdata) == 1: \n thispopdata = nan+zeros(ndatayears) # If it's an assumption, just set with nans\n elif len(thispopdata) != ndatayears:\n raise Exception('Expect data length of 1 or %i, actually %i' % (ndatayears, len(thispopdata)))\n uncer[key]['ydata'][p] = (asarray(thispopdata)*percent).tolist() # Stupid, but make sure it's an array, then make sure it's a list\n elif size(epidata[0])==1 and ndim(epidata)==1: # It's not by population\n uncer[key]['ydata'] = (array(epidata)*percent).tolist()\n if len(uncer[key]['ydata']) == 1:\n uncer[key]['ydata'] = nan+zeros(ndatayears) # If it's an assumption, just set with nans\n if len(uncer[key]['ydata']) != ndatayears:\n raise Exception('Expect data length of 1 or %i, actually %i' % (ndatayears, len(uncer[key]['ydata'])))\n else:\n raise Exception(\"Can't figure out size of epidata; doesn't seem to be a vector or a matrix\")\n \n \n # Financial cost outputs\n for key in ['costann', 'costcum']:\n uncer[key] = dict()\n origkey = 'annual' if key=='costann' else 'cumulative'\n #Set up stacked storage\n uncer[key]['stacked'] = dict()\n if key == 'costcum':\n uncer[key]['stacked']['costs'] = []\n uncer[key]['stacked']['legend'] = []\n uncer[key]['stacked']['title'] = 'Cumulative HIV-related financial costs'\n uncer[key]['stacked']['ylabel'] = R['costshared'][origkey]['total']['ylabel']\n else:\n for yscale in ['total','gdp','revenue','govtexpend','totalhealth','domestichealth']:\n uncer[key]['stacked'][yscale] = dict()\n uncer[key]['stacked'][yscale]['costs'] = []\n uncer[key]['stacked'][yscale]['legend'] = []\n uncer[key]['stacked'][yscale]['title'] = 'Annual HIV-related financial costs'\n if 'ylinedata' in R['costshared'][origkey]['total'][yscale]:\n uncer[key]['stacked'][yscale]['ylabel'] = R['costshared'][origkey]['total'][yscale]['ylabel']\n\n #Loop through cost types\n for ac in ['total']: #['total','future','existing']:\n uncer[key][ac] = dict()\n if key=='costcum':\n # Individual line graphs with uncertainty\n uncer[key][ac]['best'] = R[key][ac][0][indices].tolist()\n uncer[key][ac]['low'] = R[key][ac][1][indices].tolist()\n uncer[key][ac]['high'] = R[key][ac][2][indices].tolist()\n uncer[key][ac]['xdata'] = R['costshared'][origkey][ac]['xlinedata'][indices].tolist()\n uncer[key][ac]['title'] = R['costshared'][origkey][ac]['title']\n uncer[key][ac]['xlabel'] = R['costshared'][origkey][ac]['xlabel']\n uncer[key][ac]['ylabel'] = R['costshared'][origkey][ac]['ylabel']\n uncer[key][ac]['legend'] = ['Model']\n # Stacked graphs\n if ac != 'total':\n uncer[key]['stacked']['costs'].append(uncer[key][ac]['best'])\n uncer[key]['stacked']['legend'].append([ac['title']()])\n else:\n for yscale in ['total','gdp','revenue','govtexpend','totalhealth','domestichealth']:\n uncer[key][ac][yscale] = dict()\n if 'ylinedata' in R['costshared'][origkey][ac][yscale]:\n # Individual line graphs with uncertainty\n uncer[key][ac][yscale]['best'] = R[key][ac][yscale][0][indices].tolist()\n uncer[key][ac][yscale]['low'] = R[key][ac][yscale][1][indices].tolist()\n uncer[key][ac][yscale]['high'] = R[key][ac][yscale][2][indices].tolist()\n uncer[key][ac][yscale]['xdata'] = R['costshared'][origkey][ac][yscale]['xlinedata'][indices].tolist()\n uncer[key][ac][yscale]['title'] = R['costshared'][origkey][ac][yscale]['title']\n uncer[key][ac][yscale]['xlabel'] = R['costshared'][origkey][ac][yscale]['xlabel']\n uncer[key][ac][yscale]['ylabel'] = R['costshared'][origkey][ac][yscale]['ylabel']\n uncer[key][ac][yscale]['legend'] = ['Model']\n # Stacked graphs\n if ac != 'total':\n uncer[key]['stacked'][yscale]['costs'].append(uncer[key][ac][yscale]['best'])\n uncer[key]['stacked'][yscale]['legend'].append([ac['title']()])\n \n # Financial commitment outputs\n uncer['commit'] = dict()\n for yscale in ['total','gdp','revenue','govtexpend','totalhealth','domestichealth']:\n uncer['commit'][yscale] = dict()\n if 'ylinedata' in R['costshared']['annual']['total'][yscale]:\n # Individual line graphs with uncertainty\n uncer['commit'][yscale]['best'] = R['commit'][yscale][0][indices].tolist()\n uncer['commit'][yscale]['low'] = R['commit'][yscale][1][indices].tolist()\n uncer['commit'][yscale]['high'] = R['commit'][yscale][2][indices].tolist()\n uncer['commit'][yscale]['xdata'] = R['costshared']['commit'][yscale]['xlinedata'][indices].tolist()\n uncer['commit'][yscale]['title'] = R['costshared']['commit'][yscale]['title']\n uncer['commit'][yscale]['xlabel'] = R['costshared']['commit'][yscale]['xlabel']\n uncer['commit'][yscale]['ylabel'] = R['costshared']['commit'][yscale]['ylabel']\n uncer['commit'][yscale]['legend'] = ['Model']\n \n printv('...done gathering uncertainty results.', 4, verbose)\n return uncer\n\n\n\n\ndef gathermultidata(D, Rarr, annual=True, verbose=2, maxyear=2030):\n \"\"\" Gather multi-simulation results (scenarios and optimizations) into a form suitable for plotting. \"\"\"\n from printv import printv\n from copy import deepcopy\n printv('Gathering multi-simulation results...', 3, verbose)\n \n \n multi = dict()\n multi['nsims'] = len(Rarr) # Number of simulations\n multi['tvec'] = Rarr[0]['R']['tvec'].tolist() # Copy time vector\n multi['poplabels'] = D['G']['meta']['pops']['long']\n \n # Downsample to annual\n origtvec = deepcopy(multi['tvec'])\n if annual:\n dt = origtvec[1]-origtvec[0]\n allindices = range(0, len(origtvec), int(round(1/dt)))\n indices = []\n for i in allindices:\n if origtvec[i]<=maxyear:\n indices.append(i)\n multi['tvec'] = [origtvec[i] for i in indices]\n multi['tvec'] = [origtvec[i] for i in indices]\n else:\n indices = range(len(origtvec))\n \n for key in epititles.keys():\n percent = 100 if key in ['prev','force'] else 1 # Whether to multiple results by 100\n multi[key] = dict()\n multi[key]['pops'] = [dict() for p in xrange(D['G']['npops'])]\n for p in xrange(D['G']['npops']):\n multi[key]['pops'][p]['data'] = []\n multi[key]['pops'][p]['best'] = []\n multi[key]['pops'][p]['high'] = []\n multi[key]['pops'][p]['low'] = []\n multi[key]['pops'][p]['legend'] = []\n multi[key]['pops'][p]['title'] = epititles[key] + ' - ' + D['G']['meta']['pops']['short'][p]\n multi[key]['pops'][p]['ylabel'] = epiylabels[key]\n for sim in xrange(multi['nsims']):\n multi[key]['pops'][p]['data'].append((Rarr[sim]['R'][key]['pops'][0][p,:]*percent)[indices].tolist())\n multi[key]['pops'][p]['best'].append((Rarr[sim]['R'][key]['pops'][0][p,:]*percent)[indices].tolist())\n multi[key]['pops'][p]['low'].append((Rarr[sim]['R'][key]['pops'][1][p,:]*percent)[indices].tolist())\n multi[key]['pops'][p]['high'].append((Rarr[sim]['R'][key]['pops'][2][p,:]*percent)[indices].tolist())\n multi[key]['pops'][p]['legend'].append(Rarr[sim]['label'])\n multi[key]['tot'] = dict()\n multi[key]['tot']['data'] = []\n multi[key]['tot']['best'] = []\n multi[key]['tot']['low'] = []\n multi[key]['tot']['high'] = []\n multi[key]['tot']['legend'] = []\n multi[key]['tot']['title'] = epititles[key] + ' - Overall'\n multi[key]['tot']['ylabel'] = epiylabels[key]\n multi[key]['xlabel'] = 'Years'\n for sim in xrange(multi['nsims']):\n multi[key]['tot']['data'].append((Rarr[sim]['R'][key]['tot'][0]*percent)[indices].tolist())\n multi[key]['tot']['best'].append((Rarr[sim]['R'][key]['tot'][0]*percent)[indices].tolist())\n multi[key]['tot']['low'].append((Rarr[sim]['R'][key]['tot'][1]*percent)[indices].tolist())\n multi[key]['tot']['high'].append((Rarr[sim]['R'][key]['tot'][2]*percent)[indices].tolist())\n multi[key]['tot']['legend'].append(Rarr[sim]['label']) # Add legends\n \n # Financial cost outputs\n for key in ['costann', 'costcum']:\n multi[key] = dict()\n for ac in ['total']: #['total','future','existing']:\n origkey = 'annual' if key=='costann' else 'cumulative'\n multi[key][ac] = dict()\n if key=='costcum':\n multi[key][ac]['data'] = []\n multi[key][ac]['legend'] = []\n for sim in xrange(multi['nsims']):\n thisdata = Rarr[sim]['R'][key][ac][0][indices].tolist()\n multi[key][ac]['data'].append(thisdata)\n multi[key][ac]['legend'].append(Rarr[sim]['label']) # Add legends\n multi[key][ac]['xdata'] = Rarr[sim]['R']['costshared'][origkey][ac]['xlinedata'][indices].tolist()\n multi[key][ac]['title'] = Rarr[sim]['R']['costshared'][origkey][ac]['title']\n multi[key][ac]['xlabel'] = Rarr[sim]['R']['costshared'][origkey][ac]['xlabel']\n multi[key][ac]['ylabel'] = Rarr[sim]['R']['costshared'][origkey][ac]['ylabel']\n else:\n for yscale in ['total','gdp','revenue','govtexpend','totalhealth','domestichealth']:\n multi[key][ac][yscale] = dict()\n multi[key][ac][yscale]['data'] = []\n multi[key][ac][yscale]['legend'] = []\n if 'ylinedata' in Rarr[sim]['R']['costshared'][origkey][ac][yscale]:\n for sim in xrange(multi['nsims']):\n thisdata = Rarr[sim]['R'][key][ac][yscale][0][indices].tolist()\n multi[key][ac][yscale]['data'].append(thisdata)\n multi[key][ac][yscale]['legend'].append(Rarr[sim]['label']) # Add legends\n multi[key][ac][yscale]['xdata'] = Rarr[sim]['R']['costshared'][origkey][ac][yscale]['xlinedata'][indices].tolist()\n multi[key][ac][yscale]['title'] = Rarr[sim]['R']['costshared'][origkey][ac][yscale]['title']\n multi[key][ac][yscale]['xlabel'] = Rarr[sim]['R']['costshared'][origkey][ac][yscale]['xlabel']\n multi[key][ac][yscale]['ylabel'] = Rarr[sim]['R']['costshared'][origkey][ac][yscale]['ylabel'] \n \n # Financial commitment outputs\n multi['commit'] = dict()\n for yscale in ['total','gdp','revenue','govtexpend','totalhealth','domestichealth']:\n multi['commit'][yscale] = dict()\n multi['commit'][yscale]['data'] = []\n multi['commit'][yscale]['legend'] = []\n if 'ylinedata' in Rarr[sim]['R']['costshared']['annual']['total'][yscale]:\n for sim in xrange(multi['nsims']):\n thisdata = Rarr[sim]['R']['commit'][yscale][0][indices].tolist()\n multi['commit'][yscale]['data'].append(thisdata)\n multi['commit'][yscale]['legend'].append(Rarr[sim]['label']) # Add legends\n multi['commit'][yscale]['xdata'] = Rarr[sim]['R']['costshared']['commit'][yscale]['xlinedata'][indices].tolist()\n multi['commit'][yscale]['title'] = Rarr[sim]['R']['costshared']['commit'][yscale]['title']\n multi['commit'][yscale]['xlabel'] = Rarr[sim]['R']['costshared']['commit'][yscale]['xlabel']\n multi['commit'][yscale]['ylabel'] = Rarr[sim]['R']['costshared']['commit'][yscale]['ylabel'] \n\n \n printv('...done gathering multi-simulation results.', 4, verbose)\n return multi\n\n\n\n\n\ndef gatheroptimdata(D, result, verbose=2):\n \"\"\" Return the data for plotting the optimization results. \"\"\"\n from printv import printv\n printv('Gathering optimization results...', 3, verbose)\n \n optim = dict() # These optimization results\n optim['kind'] = result['kind'] # Flag for the kind of optimization\n optim['multi'] = gathermultidata(D, result['Rarr'], verbose=2) # Calculate data for displaying standard epidemiological results\n if optim['kind'] in ['constant', 'timevarying', 'multiyear']:\n try:\n optim['outcome'] = dict() # Plot how the outcome improved with optimization\n optim['outcome']['ydata'] = result['fval'].tolist() # Vector of outcomes\n optim['outcome']['xdata'] = range(len(result['fval'].tolist())) # Vector of iterations\n optim['outcome']['ylabel'] = 'Outcome'\n optim['outcome']['xlabel'] = 'Iteration'\n rinit = result['fval'][0]\n rfin = result['fval'][-1]\n rreduc = 100*(1 - result['fval'][-1]/result['fval'][0])\n if result['fval'][0]>1:\n optim['outcome']['title'] = 'Outcome (initial: %0.0f, final: %0.0f, reduction: %0.0f%%)' % (rinit, rfin, rreduc)\n else:\n optim['outcome']['title'] = 'Outcome (initial: %0.3f, final: %0.3f, reduction: %0.0f%%)' % (rinit, rfin, rreduc)\n except:\n optim['outcome'] = dict() # Plot how the outcome improved with optimization\n optim['outcome']['ydata'] = [0] # Vector of outcomes\n optim['outcome']['xdata'] = [0] # Vector of iterations\n optim['outcome']['ylabel'] = 'Outcome'\n optim['outcome']['xlabel'] = 'Iteration'\n optim['outcome']['title'] = 'Outcome'\n if optim['kind']=='constant':\n optim['alloc'] = []\n titles = ['Original','Optimal']\n for i in xrange(2): # Original and optimal\n optim['alloc'].append(dict())\n optim['alloc'][i]['piedata'] = result['allocarr'][i][0].tolist() # A vector of allocations, length nprogs, for pie charts\n optim['alloc'][i]['radardata'] = dict() # Structure for storing radar plot data\n optim['alloc'][i]['radardata']['best'] = result['allocarr'][i][0].tolist() # 'Best' estimate: the thick line in the radar plot\n optim['alloc'][i]['radardata']['low'] = result['allocarr'][i][1].tolist() # 'Low' estimate: the \n optim['alloc'][i]['radardata']['high'] = result['allocarr'][i][2].tolist()\n optim['alloc'][i]['title'] = titles[i] # Titles for pies or radar charts\n optim['alloc'][i]['legend'] = D['data']['meta']['progs']['short'] # Program names, length nprogs, for pie and radar\n optim['alloc'][i]['coverage'] = {}\n optim['alloc'][i]['coverage']['num'] = {}\n optim['alloc'][i]['coverage']['num']['best'] = result['covnumarr'][i][0].tolist() # A vector of coverage levels, length nprogs, for coverage table\n optim['alloc'][i]['coverage']['num']['low'] = result['covnumarr'][i][1].tolist() # A vector of coverage levels, length nprogs, for coverage table\n optim['alloc'][i]['coverage']['num']['high'] = result['covnumarr'][i][2].tolist() # A vector of coverage levels, length nprogs, for coverage table\n optim['alloc'][i]['coverage']['per'] = {}\n optim['alloc'][i]['coverage']['per']['best'] = result['covperarr'][i][0].tolist() # A vector of coverage levels, length nprogs, for coverage table\n optim['alloc'][i]['coverage']['per']['low'] = result['covperarr'][i][1].tolist() # A vector of coverage levels, length nprogs, for coverage table\n optim['alloc'][i]['coverage']['per']['high'] = result['covperarr'][i][2].tolist() # A vector of coverage levels, length nprogs, for coverage table\n if optim['kind']=='timevarying' or optim['kind']=='multiyear':\n optim['alloc'] = dict() # Allocation structure\n optim['alloc']['stackdata'] = [] # Empty list\n for p in xrange(D['G']['nprogs']): # Loop over programs\n optim['alloc']['stackdata'].append(result['alloc'][p].tolist()) # Allocation array, nprogs x npts, for stacked area plots\n optim['alloc']['xdata'] = result['xdata'].tolist() # Years\n optim['alloc']['xlabel'] = 'Year'\n optim['alloc']['ylabel'] = 'Spending'\n optim['alloc']['title'] = 'Optimal allocation'\n optim['alloc']['legend'] = D['data']['meta']['progs']['short'] # Program names, length nprogs\n if optim['kind']=='range':\n optim['alloc'] = dict() # Allocations structure\n optim['alloc']['bardata'] = []\n for b in xrange(len(result['allocarr'])): # Loop over budgets\n optim['alloc']['bardata'].append(result['allocarr'][b].tolist()) # A vector of allocations, length nprogs\n optim['alloc']['xdata'] = result['budgets'].tolist() # Vector of budgets\n optim['alloc']['xlabels'] = result['budgetlabels'] # Budget labels\n optim['alloc']['ylabel'] = 'Spend'\n optim['alloc']['title'] = 'Budget allocations'\n optim['alloc']['legend'] = D['data']['meta']['progs']['short'] # Program names, length nprogs\n optim['outcome'] = dict() # Dictionary with names and values\n optim['outcome']['bardata'] = result['fval'] # Vector of outcomes, length nbudgets\n optim['outcome']['xdata'] = result['budgets'].tolist() # Vector of budgets\n optim['outcome']['xlabels'] = result['budgetlabels'] # Budget labels\n optim['outcome']['ylabel'] = 'Outcome'\n optim['outcome']['title'] = 'Outcomes'\n\n printv('...done gathering optimization results.', 4, verbose)\n return optim\n","sub_path":"server/src/sim/gatherplotdata.py","file_name":"gatherplotdata.py","file_ext":"py","file_size_in_byte":23386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"636380366","text":"from numpy import zeros, array, linspace\nimport matplotlib.pyplot as plt\n\n\ndef f(x):\n return -x ** 2 + 6.0 * x - 5.0\n\n\nn = 7\nxs = zeros(n)\nx0 = -2.0\nx1 = 3.0\nit = list(range(0, 7))\n\nfor k in range(n):\n x2 = x1 - f(x1) * ((x1 - x0) / (f(x1) - f(x0)))\n x0 = x1\n x1 = x2\n xs[k] = x2\n\n# printing output\nprint('%5s %8s' % ('k', 'x',))\nfor k in range(n):\n print('%5d %9.4f' % (k + 1, xs[k],))\n\nplt.plot(it, xs, 'ko-')\nplt.xlabel('iteration')\nplt.ylabel('x')\nplt.show()\n","sub_path":"src/21-Secant-1.py","file_name":"21-Secant-1.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"555116732","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\nimport os\nos.chdir(\"/Users/tomoyuki/python_workspace/NLP/VNC\")\nimport MeCab\nimport sqlite3\nfrom contextlib import closing\nimport pandas as pd\nimport pandas.io.sql as psql\nimport numpy as np\nfrom copy import deepcopy\nimport re\nimport jaconv\nimport traceback\nfrom tqdm import tqdm\n\nfrom funcs import char_handler\n\nchar_handler.removeBracket\nchar_handler.checkCharacterType\nchar_handler.removeCharInString\n\n## DBに接続\ndbname = \"EDICT.sqlite3\"\nconn = sqlite3.connect(dbname)\n\nselectColumns(conn, \"code_1332\")\nselectRecords(conn, \"code_1332\")\n\npsql.read_sql(\"SELECT * FROM articles;\", con)\n\n# itemsテーブルのカラム名取得\ncolumn_list = []\ncur = conn.cursor()\nselect_sql = \"pragma table_info(edict);\" \nfor row in cur.execute(select_sql):\n print_txt = \"\"\n for i in range(len(row)):\n print_txt = f\"{print_txt}{row[i]} | \"\n print(print_txt)\n column_list.append(row[1])\ncur.close()\n\n\n\n## 一部分をpandas化\ndf_dict = pd.DataFrame(data=None, columns=column_list)\n\n#id_list = list(range(0,180000,500))\n\nselect_sql = \"select * from edict\" \ncur = conn.cursor()\ni=0\n\nfor row in tqdm(list(cur.execute(select_sql))):\n# if i in id_list:\n # DataFarmeに値(行)を追加していく\n tmp_se = pd.Series(row, index=df_dict.columns )\n df_dict = df_dict.append( tmp_se, ignore_index=True )\n \n i+=1\n \ncur.close()\n\n\n# 保存\ndf_dict = df_dict.drop(0, axis=0)\ndf_dict.to_csv(\"df_dict_all.csv\", encoding=\"utf-8\",index=False)\n\n\n\n\n\n## \n##\n#string=\"くぁwせdrfヲヲヲエええ?$#ーーうえ\"\n#char_handler.removeCharInString(string ,ctype=\"hira\")\n##\n##\n### カタカナ2ひらがな\n#char_handler.removeCharInString(jaconv.kata2hira(string),ctype=\"hira\")\n#char_handler.removeCharInString(jaconv.hira2kata(string),ctype=\"kata\")\n\n\n## df_dict読み込み\ndf_dict = pd.read_csv(\"df_dict.csv\")\n\n\n## 1.日本語文字が複数のもの(;)はレコードを分ける\n# カラム名初期化\ncolumns = list(df_dict.columns)\ncolumns.insert(2, \"jpn_hira\")\ncolumns.insert(3, \"jpn_kata\")\n\n# df初期化\ndf_dict_clen1 = pd.DataFrame(data=None, columns=columns)\ndf_dict_clen1_remove = pd.DataFrame(data=None, columns=columns[0:3])\n\n\n\nrow_id = 182\n\n\n\nfor row_id in range(df_dict.shape[0]):\n # 一行取得\n tmp_row = deepcopy(df_dict.iloc[row_id,:])\n \n # 英単語中の()文字を削除\n remove_dict = {}\n tmp_row_index = list(tmp_row.index)\n for i in range(2,tmp_row.shape[0]):\n tmp_str,remove_dict[tmp_row_index[i]] = char_handler.removeBracket(string=tmp_row[tmp_row_index[i]])\n \n # 英単語\n if type(tmp_str) is str:\n tmp_str_split = tmp_str.split(\" \")\n while \"\" in tmp_str_split :\n tmp_str_split.remove(\"\")\n tmp_row[tmp_row_index[i]] = \" \".join(tmp_str_split)\n \n else:\n tmp_row[tmp_row_index[i]] = tmp_str\n \n \n \n # 日本語を取得\n jpn = tmp_row[\"jpn\"]\n \n # 漢字と読み仮名に分割(空欄で分割)\n jpn_org = jpn.split(\" \")[0]\n jpn_readings = jpn.split(\" \")[1]\n \n \n ## 漢字をクレンジング\n # ()文字を削除\n jpn_org, remove_org = char_handler.removeBracket(jpn_org)\n \n \n ## 読み仮名をクレンジング\n # 外側の[]削除\n jpn_readings = jpn_readings[1:-1]\n \n # ()文字を削除\n jpn_readings, remove_readings = char_handler.removeBracket(string=jpn_readings)\n \n # \";\"で分割\n jpn_readings_list = []\n for part_jpn_readings in jpn_readings.split(\";\"):\n # カタカナ2ひらがな\n part_jpn_readings = jaconv.kata2hira(part_jpn_readings)\n \n # ひらがな以外の文字を削除\n part_jpn_readings = char_handler.removeCharInString(part_jpn_readings ,ctype=\"hira\")\n \n # すでに対象文字(part_jpn_readings)がnew_jpn_readingsに含まれていない場合,追加\n if part_jpn_readings not in jpn_readings_list:\n jpn_readings_list.append(part_jpn_readings)\n \n \n \n ## 漢字リスト(jpn_org.split(\";\"))x読み仮名リスト(jpn_readings_list)でレコード追加\n for part_jpn in jpn_org.split(\";\"):\n \n for part_jpn_readings in jpn_readings_list:\n \n # 読み仮名(part_jpn_readings)がない場合は,part_jpnをひらがなに直し,挿入\n if part_jpn_readings==\"\":\n part_jpn_readings = jaconv.kata2hira(part_jpn)\n \n # 追加するレコードを初期化\n new_row = deepcopy(tmp_row)\n \n # jpnをpart_jpnに変換\n new_row[\"jpn\"] = part_jpn\n \n # 読み仮名(ひらがな)を追加\n new_row[\"jpn_hira\"] = part_jpn_readings\n \n # 読み仮名(カタカナ)を追加\n new_row[\"jpn_kata\"] = jaconv.hira2kata(part_jpn_readings)\n \n # 追加\n df_dict_clen1 = df_dict_clen1.append(new_row, ignore_index=True )\n \n # 除外した文字列を格納\n remove_dict[\"id\"] = new_row[\"id\"]\n remove_dict[\"jpn\"] = remove_org\n remove_dict[\"jpn_hira\"] = remove_readings\n \n df_dict_clen1_remove = df_dict_clen1_remove.append(pd.Series(remove_dict), ignore_index=True )\n\n## 保存\ndf_dict_clen1.to_csv(\"df_dict_clen1.csv\", encoding=\"utf-8\",index=False)\ndf_dict_clen1_remove.to_csv(\"df_dict_clen1_remove.csv\", encoding=\"utf-8\",index=False)\n\n\n\n\n\n","sub_path":"python/NLP/VNC/db_cleansing.py","file_name":"db_cleansing.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"578378135","text":"from tutor.routes import home, course # noqa\nfrom user_functions import register_and_login\nfrom course_functions import create_test_course\n\n\ndef test_my_courses(client):\n user = register_and_login(client)\n create_test_course(1, \"one\")\n create_test_course(2, \"two\")\n # Make sure private courses for user is empty\n response = client.get('/user?id=' + str(user.id), follow_redirects=True)\n assert b\"one\" not in response.data\n assert b\"two\" not in response.data\n # add one course to favorites\n client.get('/addfav?course_id=2&user_id=' + str(user.id), follow_redirects=True)\n response = client.get('/user?id=' + str(user.id), follow_redirects=True)\n assert b\"one\" not in response.data\n assert b\"two\" in response.data\n # add second course to favorites\n client.get('/addfav?course_id=1&user_id=' + str(user.id), follow_redirects=True)\n response = client.get('/user?id=' + str(user.id), follow_redirects=True)\n assert b\"one\" in response.data\n","sub_path":"tests/test_my_courses.py","file_name":"test_my_courses.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"204768404","text":"from shapely.geometry import Point\nimport matplotlib.pyplot as plt\nfrom rasterio.mask import mask\nfrom rasterio.plot import show\nfrom rasterio.plot import plotting_extent\nimport gdal\nimport pandas as pd\nimport numpy as np\nimport geopandas\nimport rasterio\nimport pycrs\n\nfile_dir=r'/Users/winand.hulleman/Documents/trait-geo-diverse-angiosperms'\n\nvar_names = open(file_dir+\"/data/gis/env_stacked/variable_list.txt\")\nvar_names = var_names.read()\nvar_names = var_names.split(\"\\n\")[1:-1]\n\n#access file with list of taxa names\ntaxa=pd.read_csv(file_dir+\"/data/crops_cleaned/taxalist.txt\",header=None)\ntaxa.columns=[\"taxon\"]\n\nspecies_occ_dict={}\n\nfor i in taxa[\"taxon\"]:\n taxon_data = pd.read_csv(file_dir+\"/data/crops_cleaned/%s.csv\"%i)\n species_occ_dict[\"%s\"%i] = taxon_data \n #check whether all species have been included and inspect dictionary\nif len(species_occ_dict.keys())==len(taxa[\"taxon\"]):\n print(\"All species dataframes now in dictionary\")\nelse:\n print(\"Error: not all species dataframe included\")\n \nfor key in species_occ_dict: \n #load occurrence data and set initial projection\n data=species_occ_dict[key]\n spec = key\n\n data['coordinates'] = list(zip(data[\"decimalLongitude\"], data[\"decimalLatitude\"]))\n data['coordinates'] = data[\"coordinates\"].apply(Point)\n data[\"present/pseudo_absent\"]=1\n geo_data=geopandas.GeoDataFrame(data, geometry='coordinates',crs={'init' :'epsg:4326'})\n\n #change projection to azimuthal equidistant to calculate 1000km buffer around point\n geo_data = geo_data.to_crs({'init': 'esri:54032'}) \n buffer=geo_data.buffer(1000*1000)\n buffer=buffer.to_crs(epsg=4326)\n\n #create single large polygon from individual buffers\n union_buffer=buffer.unary_union\n\n #first clip the raster based on this extend \n raster=rasterio.open(file_dir+'/data/gis/env_stacked/stacked_env_variables.tif')\n \n #specify output tif:\n out_tif = file_dir+'/data/GIS/spec_stacked_raster_clip/%s_raster_clip.tif'%spec\n\n #clip the raster:\n out_img, out_transform = rasterio.mask.mask(dataset=raster, shapes=[union_buffer],crop=False)\n\n # Copy the metadata\n out_meta = raster.meta.copy()\n\n # Parse EPSG code\n epsg_code = int(raster.crs.data['init'][5:])\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_img.shape[1],\n \"width\": out_img.shape[2],\n \"transform\": out_transform,\n \"crs\": pycrs.parse.from_epsg_code(epsg_code).to_proj4()})\n\n with rasterio.open(out_tif, \"w\", **out_meta) as dest:\n dest.write(out_img)\n \n#Inspect the first band of the clipped raster for all species\nfor key in species_occ_dict:\n \n # Extract occurrence point to plot on the raster (see if correct area was clipped)\n data=species_occ_dict[key]\n spec = key\n data['coordinates'] = list(zip(data[\"decimalLongitude\"], data[\"decimalLatitude\"]))\n data['coordinates'] = data[\"coordinates\"].apply(Point)\n geo_data=geopandas.GeoDataFrame(data, geometry='coordinates',crs={'init' :'epsg:4326'})\n \n # open the clipped raster\n clipped = rasterio.open(file_dir+'/data/GIS/spec_stacked_raster_clip/%s_raster_clip.tif'%spec)\n array = clipped.read(1)\n array_data = clipped.read(1,masked=True)\n array_meta = clipped.profile\n \n fig, ax = plt.subplots(figsize=(10, 10))\n ax.imshow(array_data,cmap=\"gist_earth\",interpolation=\"none\",vmin=0,\n \n # Plot the occurrence points on the raster\n extent=plotting_extent(clipped),)\n spec_plots_points=geo_data[\"coordinates\"]\n spec_plots_points.plot(ax=ax,\n marker='o',\n markersize=20,\n color='red')\n ax.set_title(\"%s \\n Raster clip and occurrence points\"%spec,\n fontsize=20)\n plt.show()\n \n#Works!\n \n#open world raster\nstack_path=file_dir+'/data/GIS/env_stacked/stacked_env_variables.tif'\nr2=gdal.Open(stack_path)\n\n\nfor key in species_occ_dict: \n \n #extract longitude and latitude of occurrence locations and label them as present (1)\n presence_data = species_occ_dict[key]\n presence_data[\"present/pseudo_absent\"]=1\n spec = key\n \n long=presence_data[\"decimalLongitude\"]\n lati=presence_data[\"decimalLatitude\"]\n long=pd.Series.tolist(long)\n lati=pd.Series.tolist(lati)\n \n #read raster\n src=rasterio.open(stack_path)\n array=src.read_masks(1)\n \n # set raster cell mask values of presence locations to threshold value (=1) to exclude them from pseudo-absence sampling\n for i in range(0,len(presence_data)):\n row,col=src.index(long[i],lati[i])\n array[row,col]=1\n \n #subset of cells with datavalues from which to sample pseudo-absences\n (y_index_2, x_index_2) = np.nonzero(array > 1) \n \n #sample random locations from raster excluding sea and presence cells\n r=r2\n (upper_left_x, x_size, x_rotation, upper_left_y, y_rotation, y_size) = r.GetGeoTransform()\n \n x_coords = x_index_2 * x_size + upper_left_x + (x_size / 2) #add half the cell size\n y_coords = y_index_2 * y_size + upper_left_y + (y_size / 2) #to centre the point\n\n lon_lat_array=np.stack((x_coords,y_coords)).T\n\n #determine number of pseudo-absences to sample\n random_sample_size=0\n len_p=int(len(presence_data))\n \n if len_p > 2000:\n random_sample_size=len_p\n else: \n random_sample_size=2000\n \n outer_random_sample_lon_lats=lon_lat_array[np.random.choice(lon_lat_array.shape[0], random_sample_size, replace=False), :] ##\n print(len(outer_random_sample_lon_lats), \"number of outer pseudo absences\")\n \n \n #Add selected cells to dataset\n lon=[]\n lat=[]\n psa=[0]*(random_sample_size)\n taxon=[\"%s\"%spec]*(random_sample_size)\n gbif=[\"no_id\"]*(random_sample_size)\n\n for item in outer_random_sample_lon_lats:\n longitude=item[0]\n latitude=item[1]\n lon.append(longitude)\n lat.append(latitude)\n \n #Dataset including occurrences and pseudo-absence points\n new_data=pd.DataFrame({\"gbif_id\": gbif,\"taxon_name\":taxon,\"decimalLongitude\": lon, \"decimalLatitude\":lat, \"present/pseudo_absent\": psa})\n data=pd.concat([presence_data,new_data],ignore_index=True)\n data=data[['taxon_name','gbif_id','decimalLongitude','decimalLatitude','present/pseudo_absent']]\n data[\"taxon_name\"]=spec\n data[\"row_n\"]=np.arange(len(data))\n \n long=data[\"decimalLongitude\"]\n lati=data[\"decimalLatitude\"]\n long=pd.Series.tolist(long)\n lati=pd.Series.tolist(lati)\n \n print(len(data),\"lenght data with pseudo absences pre-filtering\")\n \n #read raster\n src=rasterio.open(stack_path)\n array=src.read_masks(1)\n \n data=data.reset_index(drop=True)\n data.to_csv(file_dir + \"/data/spec_ppa/%s_ppa_dataframe.csv\"%spec)\n\n#next species\n \nraster=rasterio.open(file_dir+'/data/GIS/env_stacked/stacked_env_variables.tif')\narray = raster.read()\nprofile=raster.profile\n\nwith open(file_dir+'/data/GIS/env_bio_mean_std.txt','w+') as file:\n file.write(\"band\"+\"\\t\"+\"mean\"+\"\\t\"+\"std_dev\"+\"\\n\")\n file.close()\n \nfor i in range(1,65):\n print(i)\n profile.update(count=1)\n band=raster.read(i)\n band[band < -9999] = -9999\n where_are_NaNs = np.isnan(band)\n band[where_are_NaNs] = -9999\n band_masked = np.ma.masked_array(band, mask=(band == -9999))\n\n #calculate mean and std.dev of each band\n mean=band_masked.mean()\n std_dev=np.std(band_masked)\n\n #write to file\n with open(file_dir+'/data/GIS/env_bio_mean_std.txt','a') as file:\n file.write(str(i)+\"\\t\"+str(mean)+\"\\t\"+str(std_dev)+\"\\n\")\n \n\n#access file with list of taxa names\ntaxa=pd.read_csv(file_dir+\"/data/crops_cleaned/taxalist.txt\",header=None)\ntaxa.columns=[\"taxon\"]\n\nsrc=rasterio.open(file_dir+'/data/GIS/env_stacked/stacked_env_variables.tif')\ninRas=gdal.Open(file_dir+'/data/GIS/env_stacked/stacked_env_variables.tif')\n\nfor i in taxa[\"taxon\"][:]:\n \n data = pd.read_csv(file_dir+\"/data/spec_ppa/%s_ppa_dataframe.csv\"%i)\n spec = data[\"taxon_name\"][0]\n spec = spec.replace(\" \",\"_\")\n print(\"processing species \", spec)\n \n\n #get all collumn and row numbers \n len_pd=np.arange(len(data))\n long=data[\"decimalLongitude\"]\n lati=data[\"decimalLatitude\"]\n ppa=data[\"present/pseudo_absent\"]\n\n lon=long.values\n lat=lati.values\n\n row=[]\n col=[]\n\n for i in len_pd:\n row_n, col_n = src.index(lon[i], lat[i])# spatial --> image coordinates\n row.append(row_n)\n col.append(col_n)\n\n ##opening raster as 3d numpy array\n myarray=inRas.ReadAsArray()\n\n #collect file with mean and std_dev for each band\n mean_std=pd.read_csv(file_dir+'/data/GIS/env_bio_mean_std.txt',sep=\"\\t\")\n mean_std=mean_std.to_numpy()\n\n\n ########################################################\n #extract the values for all bands and prepare input data\n ########################################################\n X=[]\n species =[\"%s\"%spec]*int(len(row))\n\n for j in range(0,64):\n band=myarray[j]\n x=[]\n\n #extract coastal outline \n \n for i in range(0,len(row)):\n value= band[row[i],col[i]]\n if j < 46:\n if value <-1000:\n value=np.nan\n else: \n value = ((value - mean_std.item((j,1))) / mean_std.item((j,2)))#scale values\n x.append(value)\n \n if j >= 46:\n if value <-1000:\n value=np.nan\n else: \n value=value\n x.append(value)\n X.append(x)\n \n \n\n #set as numpy 2d array\n X =np.array([np.array(xi) for xi in X])\n #X\n \n #transform into dataframe and include row and column values\n df=pd.DataFrame(X) \n df=df.T\n \n df[\"present/pseudo_absent\"]=ppa\n df[\"decimalLatitude\"]=lati\n df[\"decimalLongitude\"]=long\n df[\"taxon_name\"]=species\n df[\"present/pseudo_absent\"]=ppa\n df[\"row_n\"]=row\n df.rename(columns=dict(zip(df.columns[0:186], var_names)),inplace=True)\n \n #drop any potential rows with no-data values\n df=df.dropna(axis=0, how='any')\n input_data=df\n \n ##save input dataframe\n input_data.to_csv(file_dir +\"/data/spec_ppa_env/%s_env_dataframe.csv\"%spec)\n \n##opening raster as 3d numpy array\ninRas=gdal.Open(file_dir+'/data/GIS/env_stacked/stacked_env_variables.tif')\nmyarray=inRas.ReadAsArray()\nprint(myarray.shape)\nprint(type(myarray))\n\n#get all collumn and row values for all cells to predict over \ndf=pd.read_csv(file_dir+'/data/GIS/world_locations_to_predict.csv')\n\nlen_pd=np.arange(len(df))\nlon=df[\"decimal_longitude\"]\nlat=df[\"decimal_latitude\"]\nlon=lon.values\nlat=lat.values\n\nrow=[]\ncol=[]\n\nsrc=rasterio.open(file_dir+'/data/GIS/env_stacked/stacked_env_variables.tif')\n\nfor i in len_pd:\n row_n, col_n = src.index(lon[i], lat[i])# spatial --> image coordinates\n row.append(row_n)\n col.append(col_n)\n\n#collect file with mean and std_dev for each band\nmean_std=pd.read_csv(file_dir+'/data/GIS/env_bio_mean_std.txt',sep=\"\\t\")\nmean_std=mean_std.to_numpy()\n\n\n###########################################################\n# extract the values for all bands and prepare input data #\n###########################################################\nX=[]\n\nfor j in range(0,65):\n print(j)\n band=myarray[j]\n x=[]\n\n for i in range(0,len(row)):\n if j < 46:\n value= band[row[i],col[i]]\n value = ((value - mean_std.item((j,1))) / mean_std.item((j,2)))#scale values\n x.append(value)\n if j >= 46:\n value= band[row[i],col[i]]\n x.append(value)\n X.append(x)\n\n\n #include row and column values\n X.append(row)\n X.append(col)\n \n #set as numpy 2d array\n X =np.array([np.array(xi) for xi in X])\n \n df=pd.DataFrame(X)\n \n df=df.T\n df.rename(columns=dict(zip(df.columns[0:65], var_names)),inplace=True)\n df=df.dropna(axis=0, how='any')\n df.head()\n \n input_X=df.iloc[:,0:65]\n np.shape(input_X)\n \n row=df[64]\n col=df[65]\n \n row_col=pd.DataFrame({\"row\":row,\"col\":col})\n \n #convert dataframe back to numpy array\n input_X=input_X.values\n \n #convert rows and col indices back to array\n row=row.values\n col=col.values\n \n #save\n prediction_array=np.save(file_dir+'/data/GIS/world_prediction_array.npy',input_X)\n prediction_pandas=row_col.to_csv(file_dir+'/data/GIS/world_prediction_row_col.csv')","sub_path":"script/python/data_prep_II.py","file_name":"data_prep_II.py","file_ext":"py","file_size_in_byte":12524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"64636910","text":"import xlrd\n\nbook = xlrd.open_workbook('SOWC 2014 Stat Tables_Table 9.xlsx')\n\nfor sheet in book.sheets():\n print(sheet.name)\nsheet = book.sheet_names()[1]\n\nprint(sheet)\n\nfor i in range(sheet.nrows):\n print(i)\n","sub_path":"scrapy_file/parse_excel.py","file_name":"parse_excel.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"645321967","text":"# 26부터 시작한다. 2+6 = 8이다. 새로운 수는 68이다. \n# 6+8 = 14이다. 새로운 수는 84이다. \n# 8+4 = 12이다. 새로운 수는 42이다. \n# 4+2 = 6이다. 새로운 수는 26이다.\n# 위의 예는 4번만에 원래 수로 돌아올 수 있다. 따라서 26의 사이클의 길이는 4이다.\n# N이 주어졌을 때, N의 사이클의 길이를 구하는 프로그램을 작성하시오.\n\nnum = int(input()) # 68\ntmp = num\ncnt = 0\n\nwhile True:\n a = num // 10 # 6\n b = num % 10 # 8\n c = (a + b) % 10 # (6 + 8) % 10 = 1\"4\"\n num = (b * 10) + c # 80 + 4 = 84\n\n cnt = cnt + 1 # 사이클 수 + 1\n if(tmp == num): # tmp에 입력된 num과 똑같은 숫자가 나오면 break\n break\n\nprint(cnt)","sub_path":"4. while문/1110.py","file_name":"1110.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148111123","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# restPlugin - HTML preview of reSt formatted text in gedit\n#\n# Copyright (C) 2007 - Christophe Kibleur\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2, or (at your option)\n# any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n\nfrom gi.repository import Gedit\nfrom gi.repository import GObject\n\nimport os\nimport io\nfrom gi.repository import Gtk\nfrom gi.repository import WebKit\n\nfrom gettext import gettext as _\nfrom makeTable import toRSTtable\n## pygments support\n#import RegisterPygment\n## docutils\nfrom docutils.core import publish_parts\n\n## I'm not satisfied with that\nrestpluginDir = os.path.dirname(os.path.abspath(__file__))\ncss = os.path.join(restpluginDir, 'restmain.css')\nstyles = open(css, 'r')\n\nSTART_HTML = \"\"\"\n\n\n \n \n \n\n\"\"\" % (styles.read())\n\nstyles.close()\n\nEND_HTML = \"\"\"\n\n\"\"\"\n\n# Menu item example, insert a new item in the Tools menu\nui_str = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n HTML\" action=\"--> HTML\"/>\n \n \n LaTeX\" action=\"--> LaTeX\"/>\n \n \n OpenOffice\" action=\"--> OpenOffice\"/>\n \n \n \n \n\n\"\"\"\n\n\nclass restPlugin(GObject.Object, Gedit.WindowActivatable):\n\n window = GObject.property(type=Gedit.Window)\n\n def __init__(self):\n GObject.Object.__init__(self)\n\n def do_activate(self):\n ## TODO : Maybe have to check the filetype ?\n\n # Store data in the window object\n windowdata = dict()\n self.window.reStPreviewData = windowdata\n\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_property(\"hscrollbar-policy\",\n Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_property(\"vscrollbar-policy\",\n Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_property(\"shadow-type\",\n Gtk.ShadowType.IN)\n\n html_view = WebKit.WebView()\n html_view.load_string(\"%s\\n

reStructuredText viewer

\\n%s\" %\n (START_HTML, END_HTML), 'text/html',\n 'utf8', '')\n\n #scrolled_window.set_hadjustment(html_view.get_hadjustment())\n #scrolled_window.set_vadjustment(html_view.get_vadjustment())\n scrolled_window.add(html_view)\n scrolled_window.show_all()\n\n bottom = self.window.get_bottom_panel()\n image = Gtk.Image()\n image.set_from_icon_name(\"gnome-mime-text-html\", Gtk.IconSize.MENU)\n bottom.add(scrolled_window)\n windowdata[\"bottom_panel\"] = scrolled_window\n windowdata[\"html_doc\"] = html_view\n\n manager = self.window.get_ui_manager()\n\n ## Added later\n #separator = Gtk.SeparatorMenuItem()\n self._action_group = Gtk.ActionGroup(\"reStPluginActions\")\n self._action_group.add_actions([(\"preview\", None, _(\"reSt preview\"),\n \"R\",\n _(\"reSt preview\"),\n self.on_update_preview),\n (\"table\", None, _(\"Create Table\"),\n None, _(\"Create a reSt table\"),\n self.on_create_table),\n (\"sourcecode\", None, _(\"Paste Code\"),\n None, _(\"Paste sourcecode\"),\n self.on_paste_code),\n (\"--> HTML\", None, _(\"--> HTML\"),\n None, _(\"transform to HTML\"),\n self.on_html),\n (\"--> LaTeX\", None, _(\"--> LaTeX\"),\n None, _(\"transform to LaTeX\"),\n self.on_latex),\n (\"--> OpenOffice\", None,\n _(\"--> OpenOffice\"),\n None, _(\"transform to OpenOffice\"),\n self.on_openoffice),\n ])\n\n # Insert the action group\n manager.insert_action_group(self._action_group, -1)\n\n # Merge the UI\n self._ui_id = manager.add_ui_from_string(ui_str)\n\n def do_deactivate(self):\n # Retreive the data of the window object\n windowdata = self.window.reStPreviewData\n\n # Remove the menu action\n if 'ui_id' in windowdata:\n manager = self.window.get_ui_manager()\n manager.remove_ui(windowdata[\"ui_id\"])\n manager.remove_action_group(windowdata[\"action_group\"])\n\n # Remove the bottom panel\n bottom = self.window.get_bottom_panel()\n print('keys = %s' %\n [x.get_name() for x in bottom.get_children()])\n bottom.remove(windowdata[\"bottom_panel\"])\n\n def getSelection(self):\n view = self.window.get_active_view()\n if not view:\n return\n\n doc = view.get_buffer()\n\n start = doc.get_start_iter()\n end = doc.get_end_iter()\n\n if doc.get_selection_bounds():\n start = doc.get_iter_at_mark(doc.get_insert())\n end = doc.get_iter_at_mark(doc.get_selection_bound())\n\n text = doc.get_text(start, end) # noqa\n\n # Menu activate handlers\n def on_update_preview(self, window):\n # Retreive the data of the window object\n windowdata = self.window.reStPreviewData\n\n view = self.window.get_active_view()\n if not view:\n return\n\n doc = view.get_buffer()\n\n start = doc.get_start_iter()\n end = doc.get_end_iter()\n\n if doc.get_selection_bounds():\n start = doc.get_iter_at_mark(doc.get_insert())\n end = doc.get_iter_at_mark(doc.get_selection_bound())\n\n text = doc.get_text(start, end, False)\n html = publish_parts(text, writer_name=\"html\")[\"html_body\"]\n\n# ## Sortie\n# sortie = '\\n'.join([START_HTML, html, END_HTML])\n# fs = io.open('sortie.html', 'w', encoding='utf8')\n# fs.write(sortie)\n# fs.close()\n\n p = windowdata[\"bottom_panel\"].get_placement()\n\n html_doc = windowdata[\"html_doc\"]\n html_doc.load_string(\"%s\\n%s\\n%s\" %\n (START_HTML, html, END_HTML),\n 'text/html', 'utf8', '')\n\n windowdata[\"bottom_panel\"].set_placement(p)\n\n def on_latex(self, action):\n doc = self.window.get_active_document()\n filename = doc.get_uri_for_display()[:-4]\n pd = restpluginDir\n os.popen2('python %s/to_tex.py \"%s.rst\" \"%s.tex\"' %\n (pd, filename, filename))\n\n def on_html(self, action):\n doc = self.window.get_active_document()\n filename = doc.get_uri_for_display()[:-4]\n pd = restpluginDir\n os.popen2('python %s/to_html.py --stylesheet=%s/restmain.css '\n '\"%s.rst\" \"%s.html\"' %\n (pd, pd, filename, filename))\n\n def on_openoffice(self, action):\n doc = self.window.get_active_document()\n filename = doc.get_uri_for_display()[:-4]\n pd = restpluginDir\n os.popen2('python %s/to_odt.py --add-syntax-highlighting '\n '--stylesheet=%s/default.odt \"%s.rst\" \"%s.odt\"' %\n (pd, pd, filename, filename))\n\n def on_paste_code(self, action):\n doc = self.window.get_active_document()\n\n if not doc:\n return\n\n lines = Gtk.clipboard_get().wait_for_text().split('\\n')\n to_copy = \"\\n\".join([line for line in lines[1:]])\n doc.insert_at_cursor('..sourcecode:: ChoosenLanguage\\n\\n %s\\n' %\n lines[0])\n doc.insert_at_cursor(to_copy + '\\n\\n')\n\n def on_create_table(self, action):\n view = self.window.get_active_view()\n\n if not view:\n return\n\n indent = view.get_indent() # noqa\n\n doc = view.get_buffer()\n #print 'language=',doc.get_language()\n\n start = doc.get_start_iter()\n end = doc.get_end_iter()\n\n if doc.get_selection_bounds():\n start = doc.get_iter_at_mark(doc.get_insert())\n end = doc.get_iter_at_mark(doc.get_selection_bound())\n\n text = doc.get_text(start, end)\n doc.delete(start, end)\n\n lines = text.split(\"\\n\")\n labels = lines[0].split(',')\n rows = [row.strip().split(',') for row in lines[1:]]\n\n doc.insert_at_cursor(toRSTtable([labels] + rows))\n","sub_path":"reStPlugin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267390090","text":"\"\"\"\nNeural network.\n@author: David Diaz Vico\n\"\"\"\n\nfrom .estimator import ClassifierBuilder, EstimatorBuilder, RegressorBuilder, delog\nfrom keras.callbacks import EarlyStopping, History, ModelCheckpoint\nfrom keras.layers import Dense, Dropout, Input\nfrom keras.layers import Convolution2D, Flatten, MaxPooling2D\nfrom keras.layers import LSTM\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.models import Model\nfrom keras.optimizers import SGD\nfrom keras.regularizers import l1l2, activity_l1l2\nfrom keras.utils.np_utils import to_categorical, probas_to_classes\nimport numpy as np\n\n\ndef temporal_tensor(x, window):\n return np.array([x[i:i + window] for i in range(x.shape[0] - window + 1)])\n\n\nclass NeuralNetwork(Model):\n\n def fit(\n self,\n x,\n y,\n optimizer=None,\n x_test=None,\n y_test=None,\n validation_split=0.1,\n nb_epoch=100,\n patience=0.01,\n metrics=[]\n ):\n if optimizer is None:\n optimizer = SGD(lr=self.lr)\n Model.compile(\n self,\n optimizer=optimizer,\n loss={'output': self.loss},\n metrics=metrics\n )\n if self.window > 1:\n x = temporal_tensor(x, self.window)\n y = y[self.window - 1:]\n if x_test is not None:\n x_test = temporal_tensor(x_test, self.window)\n y_test = y_test[self.window - 1:]\n callbacks = [\n EarlyStopping(\n monitor='val_loss',\n patience=int(nb_epoch * patience)\n ),\n History(),\n ModelCheckpoint(\n filepath='weights_best_neural_network.hdf5',\n monitor='val_loss',\n save_best_only=True\n )\n ]\n if x_test is not None and y_test is not None:\n self.history = Model.fit(\n self,\n x={'input': x},\n y={'output': y},\n batch_size=self.batch_size,\n nb_epoch=nb_epoch,\n verbose=0,\n callbacks=callbacks,\n validation_data=({'input': x_test}, {'output': y_test})\n )\n else:\n self.history = Model.fit(\n self,\n x={'input': x},\n y={'output': y},\n batch_size=self.batch_size,\n nb_epoch=nb_epoch,\n verbose=0,\n callbacks=callbacks,\n validation_split=validation_split\n )\n\n self.load_weights('weights_best_neural_network.hdf5')\n return self\n\n def predict(self, x):\n if self.window > 1:\n x = temporal_tensor(\n np.vstack(([x[0] for i in range(self.window - 1)], x)),\n self.window\n )\n return Model.predict(self, x={'input': x})\n\n\nclass FeedForward(NeuralNetwork):\n\n def __init__(self, **kwargs):\n return self._init(**delog(kwargs))\n\n def _init(\n self,\n x,\n y,\n loss,\n act_out,\n init_out='glorot_uniform',\n n_convolutional=0,\n nb_filter=1,\n nb_row=1,\n nb_col=1,\n pool_h=1,\n pool_w=1,\n stride_h=1,\n stride_w=1,\n n_lstm=0,\n window=1,\n n_dense=1,\n act_hidden='relu',\n init_hidden='he_uniform',\n n_hidden=100,\n dropout_p=0.0,\n l1=0.0,\n l2=0.0,\n a_l1=0.0,\n a_l2=0.0,\n bias=True,\n normalize_output=False,\n lr=0.1,\n batch_size=128\n ):\n\n def convolution2d(x):\n return Convolution2D(\n nb_filter=nb_filter,\n nb_row=nb_row,\n nb_col=nb_col,\n init=init_hidden,\n activation=act_hidden,\n W_regularizer=l1l2(l1=l1, l2=l2),\n activity_regularizer=activity_l1l2(l1=a_l1, l2=a_l2),\n bias=bias\n )(x)\n\n def maxpooling(x):\n return MaxPooling2D(pool_size=pool_size, strides=strides)(x)\n\n def lstm(x):\n return LSTM(output_dim=n_hidden, return_sequences=l < n_lstm - 1)(x)\n\n def dense(x):\n return Dense(\n output_dim=n_hidden,\n init=init_hidden,\n activation=act_hidden,\n W_regularizer=l1l2(l1=l1, l2=l2),\n activity_regularizer=activity_l1l2(l1=a_l1, l2=a_l2),\n bias=bias\n )(x)\n\n self.loss = loss\n self.lr = lr\n self.batch_size = batch_size\n self.window = window if n_lstm > 0 else 1\n input_shape = x.shape[1:]\n output_shape = y.shape[1:]\n pool_size = (pool_h, pool_w)\n strides = (stride_h, stride_w)\n n_dense_hidden = n_dense - 1\n\n if n_lstm > 0 and self.window > 1:\n x = inputs = Input(\n shape=(self.window, ) + input_shape,\n name='input'\n )\n for c in range(n_convolutional):\n x = BatchNormalization(axis=1)(x)\n x = TimeDistributed(convolution2d)(x)\n x = TimeDistributed(maxpooling)(x)\n if n_convolutional > 0:\n x = TimeDistributed(Flatten())(x)\n for l in range(n_lstm):\n x = BatchNormalization()(x)\n x = lstm(x)\n x = Dropout(dropout_p)(x)\n else:\n x = inputs = Input(shape=input_shape, name='input')\n for c in range(n_convolutional):\n x = BatchNormalization(axis=1)(x)\n x = convolution2d(x)\n x = maxpooling(x)\n if n_convolutional > 0:\n x = Flatten()(x)\n for d in range(n_dense_hidden):\n# x = BatchNormalization()(x)\n x = dense(x)\n x = Dropout(dropout_p)(x)\n if normalize_output:\n x = Dense(\n output_dim=np.prod(output_shape),\n init=init_out,\n activation=act_out,\n W_regularizer=l1l2(l1=l1, l2=l2),\n )(x)\n outputs = BatchNormalization(name='output')(x)\n else:\n outputs = Dense(\n output_dim=np.prod(output_shape),\n init=init_out,\n activation=act_out,\n W_regularizer=l1l2(l1=l1, l2=l2),\n name='output'\n )(x)\n\n NeuralNetwork.__init__(self, input=inputs, output=outputs)\n\n\nclass FeedForwardBuilder(EstimatorBuilder):\n\n @staticmethod\n def space(x):\n search_space = {\n 'window': [1, 10],\n 'n_dense': [0, 3],\n 'n_hidden': [1, 100],\n 'dropout_p': [0.0, 1.0],\n 'log_l1': [-5.0, -1.0],\n 'log_l2': [-5.0, -1.0],\n 'log_a_l1': [-5.0, -1.0],\n 'log_a_l2': [-5.0, -1.0],\n 'log_lr': [-5.0, -1.0],\n 'batch_size': [1, 128]\n }\n if len(x.shape) > 2:\n search_space.update({\n 'nb_filter': [1, 16],\n 'nb_row': [1, 3],\n 'nb_col': [1, 3]\n })\n return search_space\n\n\nclass FeedForwardClassifier(FeedForward):\n\n def __init__(self, x, y, **kwargs):\n y = to_categorical(y=y.flatten().astype(int))\n FeedForward.__init__(\n self,\n x=x,\n y=y,\n loss='categorical_crossentropy',\n act_out='softmax',\n **kwargs\n )\n\n def fit(self, x, y, y_test=None, **kwargs):\n return FeedForward.fit(\n self,\n x=x,\n y=to_categorical(y=y.flatten().astype(int)),\n y_test=to_categorical(y=y_test.flatten().astype(int)) if y_test is not None else None,\n metrics=['categorical_accuracy'],\n **kwargs\n )\n\n def predict(self, x):\n return probas_to_classes(FeedForward.predict(self, x=x))\n\n def predict_proba(self, x):\n return FeedForward.predict(self, x=x)\n\n\nclass FeedForwardClassifierBuilder(FeedForwardBuilder, ClassifierBuilder):\n\n @staticmethod\n def build(**kwargs):\n return FeedForwardClassifier(**kwargs)\n\n\nclass FeedForwardRegressor(FeedForward):\n\n def __init__(self, x, y, **kwargs):\n FeedForward.__init__(\n self,\n x=x,\n y=y,\n loss='mean_absolute_error',\n act_out='linear',\n **kwargs\n )\n\n def fit(self, x, y, **kwargs):\n return FeedForward.fit(\n self,\n x=x,\n y=y,\n metrics=['mean_absolute_error', 'mean_squared_error'],\n **kwargs\n )\n\n def predict(self, x):\n predictions = FeedForward.predict(self, x=x)\n if predictions.shape[1] == 1:\n predictions = predictions.flatten()\n return predictions\n\n\nclass FeedForwardRegressorBuilder(FeedForwardBuilder, RegressorBuilder):\n\n @staticmethod\n def build(**kwargs):\n return FeedForwardRegressor(**kwargs)\n","sub_path":"predictor/estimator/neuralnetwork.py","file_name":"neuralnetwork.py","file_ext":"py","file_size_in_byte":9125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"578025893","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2018 - Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Web auth routes.\"\"\"\nimport jwt\nimport json\nimport time\nimport logging\nimport re\nimport urllib.parse\nfrom oic.oauth2.grant import Token\nfrom quart import request, redirect, url_for, current_app, Response, session, render_template\nfrom urllib.parse import urljoin, quote_plus\n\nfrom oic.oic import Client\nfrom oic.utils.authn.client import CLIENT_AUTHN_METHOD\nfrom oic import rndstr\nfrom oic.oic.message import AuthorizationResponse, RegistrationResponse\n\nfrom .. import app, store\n\n\nlogger = logging.getLogger(__name__)\n# Note that this part of the service should be seen as the server-side part of the UI or\n\nJWT_ALGORITHM = 'RS256'\nSCOPE = ['openid']\n\n# We prepare the OIC client instance with the necessary configurations.\nclient = Client(client_authn_method=CLIENT_AUTHN_METHOD)\n\ntry:\n client.provider_config(\n issuer=app.config['OIDC_ISSUER']\n )\nexcept:\n pass\n\n\n# This fakes the response we would get from registering the client through the API\nclient_reg = RegistrationResponse(\n client_id=app.config['OIDC_CLIENT_ID'],\n client_secret=app.config['OIDC_CLIENT_SECRET']\n)\nclient.store_registration_info(client_reg)\n\n\ndef get_valid_token(headers):\n \"\"\"\n Look for a fresh and valid token, first in headers, then in the session.\n\n If a refresh token is available, it can be swapped for an access token.\n \"\"\"\n m = re.search(r'bearer (?P.+)', headers.get('Authorization', ''), re.IGNORECASE)\n\n if m:\n if jwt.decode(m.group('token'), verify=False).get('typ') in ['Offline', 'Refresh']:\n logger.debug(\"Swapping the token\")\n to = Token(resp={'refresh_token': m.group('token')})\n token_response = client.do_access_token_refresh(token=to)\n\n if 'access_token' in token_response:\n try:\n a = jwt.decode(\n token_response['access_token'], app.config['OIDC_PUBLIC_KEY'],\n algorithms=JWT_ALGORITHM,\n audience=app.config['OIDC_CLIENT_ID']\n )\n return token_response\n except:\n return None\n else:\n try:\n jwt.decode(\n m.group('token'),\n app.config['OIDC_PUBLIC_KEY'],\n algorithms=JWT_ALGORITHM,\n audience=app.config['OIDC_CLIENT_ID']\n )\n\n return {'access_token': m.group('token')}\n\n except:\n return None\n else:\n if headers.get('X-Requested-With') == 'XMLHttpRequest' and 'token' in session:\n try:\n jwt.decode(\n session.get('token'),\n app.config['OIDC_PUBLIC_KEY'],\n algorithms=JWT_ALGORITHM,\n audience=app.config['OIDC_CLIENT_ID']\n )\n return {'access_token': session.get('token')}\n\n except:\n\n a = jwt.decode(session.get('token'), verify=False)\n refresh_token = store.get(get_key_for_user(a, 'kc_refresh_token')).decode()\n\n logger.debug(\"Refreshing the token\")\n to = Token(resp={'refresh_token': refresh_token})\n\n token_response = client.do_access_token_refresh(token=to)\n\n if 'access_token' in token_response:\n try:\n a = jwt.decode(\n token_response['access_token'], app.config['OIDC_PUBLIC_KEY'],\n algorithms=JWT_ALGORITHM,\n audience=app.config['OIDC_CLIENT_ID']\n )\n session['token'] = token_response['access_token']\n store.put(get_key_for_user(a, 'kc_access_token'), token_response['access_token'].encode())\n store.put(get_key_for_user(a, 'kc_refresh_token'), token_response['refresh_token'].encode())\n store.put(get_key_for_user(a, 'kc_id_token'), json.dumps(token_response['id_token'].to_dict()).encode())\n return token_response\n except:\n return None\n\n return None\n\n\ndef get_key_for_user(token, name):\n return \"cache_{}_{}\".format(token.get('sub'), name)\n\nLOGIN_SEQUENCE = ['gitlab_login', 'jupyterhub_login']\n\n@app.route(urljoin(app.config['SERVICE_PREFIX'], 'auth/login/next'))\nasync def login_next():\n\n if session['login_seq'] < len(LOGIN_SEQUENCE):\n return await render_template('redirect.html', redirect_url=url_for(LOGIN_SEQUENCE[session['login_seq']]))\n else:\n return redirect(session['ui_redirect_url'])\n\n\n@app.route(urljoin(app.config['SERVICE_PREFIX'], 'auth/login'))\nasync def login():\n\n state = rndstr()\n\n session['state'] = state\n session['login_seq'] = 0\n session['ui_redirect_url'] = request.args.get('redirect_url')\n session['cli_token'] = request.args.get('cli_token')\n if session['cli_token']:\n session['ui_redirect_url'] = app.config['HOST_NAME'] + url_for('info')\n\n args = {\n 'client_id': app.config['OIDC_CLIENT_ID'],\n 'response_type': 'code',\n 'scope': SCOPE,\n 'redirect_uri': app.config['HOST_NAME'] + url_for('get_tokens'),\n 'state': state\n }\n auth_req = client.construct_AuthorizationRequest(request_args=args)\n login_url = auth_req.request(client.authorization_endpoint)\n response = await app.make_response(redirect(login_url))\n\n return response\n\n\n@app.route(urljoin(app.config['SERVICE_PREFIX'], 'auth/token'))\nasync def get_tokens():\n\n # This is more about parsing the request data than any response data....\n authorization_parameters = client.parse_response(\n AuthorizationResponse,\n info=request.query_string.decode('utf-8'),\n sformat='urlencoded'\n )\n\n if session.get('state') != authorization_parameters['state']:\n return 'Something went wrong while trying to log you in.'\n\n token_response = client.do_access_token_request(\n scope=SCOPE,\n state=authorization_parameters['state'],\n request_args={\n 'code': authorization_parameters['code'],\n 'redirect_uri': app.config['HOST_NAME'] + url_for('get_tokens'),\n }\n )\n\n # chain logins\n response = await app.make_response(redirect(url_for('login_next')))\n\n a = jwt.decode(\n token_response['access_token'], app.config['OIDC_PUBLIC_KEY'],\n algorithms=JWT_ALGORITHM,\n audience=app.config['OIDC_CLIENT_ID']\n )\n session['token'] = token_response['access_token']\n store.put(get_key_for_user(a, 'kc_access_token'), token_response['access_token'].encode())\n store.put(get_key_for_user(a, 'kc_refresh_token'), token_response['refresh_token'].encode())\n store.put(get_key_for_user(a, 'kc_id_token'), json.dumps(token_response['id_token'].to_dict()).encode())\n\n # we can already tell the CLI which token to use\n if session.get('cli_token'):\n logger.debug(\"Notification for request {}\".format(session.get('cli_token')))\n\n key = \"cli_{}\".format(hashlib.sha256(session.get('cli_token').encode()).hexdigest())\n store.put(key, json.dumps({'access_token': token_response['access_token'], 'refresh_token': token_response['refresh_token']}).encode())\n\n return response\n\n\n@app.route(urljoin(app.config['SERVICE_PREFIX'], 'auth/info'))\nasync def info():\n\n t = request.args.get('cli_token')\n if t:\n timeout = 120\n key = \"cli_{}\".format(hashlib.sha256(t.encode()).hexdigest())\n logger.debug(\"Waiting for Keycloak callback for request {}\".format(t))\n val = store.get(key)\n while not val and timeout > 0:\n time.sleep(3)\n timeout -= 3\n val = store.get(key)\n if val:\n store.delete(key)\n return val\n else:\n logger.debug(\"Timeout while waiting for request {}\".format(t))\n return '{\"error\": \"timeout\"}'\n else:\n\n if 'token' not in session:\n return await app.make_response(redirect(\"{}?redirect_url={}\".format(url_for('login'), quote_plus(url_for('info')))))\n\n try:\n a = jwt.decode(\n session['token'],\n app.config['OIDC_PUBLIC_KEY'],\n algorithms=JWT_ALGORITHM,\n audience=app.config['OIDC_CLIENT_ID']\n ) # TODO: logout and redirect if fails because of expired\n\n return \"You can copy/paste the following tokens if needed and close this page:
Access Token: {}
Refresh Token: {}\".format(\n store.get(get_key_for_user(a, 'kc_access_token')).decode(), store.get(get_key_for_user(a, 'kc_refresh_token')).decode())\n\n except jwt.ExpiredSignatureError:\n return await app.make_response(redirect(\"{}?redirect_url={}\".format(url_for('login'), quote_plus(url_for('info')))))\n\n\n@app.route(urljoin(app.config['SERVICE_PREFIX'], 'auth/user'))\nasync def user():\n\n if 'token' not in session:\n return await app.make_response(redirect(\"{}?redirect_url={}\".format(url_for('login'), quote_plus(url_for('user')))))\n try:\n a = jwt.decode(\n session['token'],\n app.config['OIDC_PUBLIC_KEY'],\n algorithms=JWT_ALGORITHM,\n audience=app.config['OIDC_CLIENT_ID']\n ) # TODO: logout and redirect if fails because of expired\n\n return store.get(get_key_for_user(a, 'kc_id_token')).decode()\n\n except jwt.ExpiredSignatureError:\n return await app.make_response(redirect(\"{}?redirect_url={}\".format(url_for('login'), quote_plus(url_for('user')))))\n\n\n@app.route(urljoin(app.config['SERVICE_PREFIX'], 'auth/logout'))\nasync def logout():\n\n logout_url = '{}/protocol/openid-connect/logout?{}'.format(\n app.config['OIDC_ISSUER'],\n urllib.parse.urlencode({'redirect_uri': app.config['HOST_NAME'] + url_for('gitlab_logout')}),\n )\n\n if request.args.get('gitlab_logout'):\n if 'logout_from' in session:\n session.clear()\n return await render_template('redirect_logout.html', redirect_url='/', logout_page=url_for('jupyterhub_logout'))\n else:\n return await app.make_response(redirect(app.config['GITLAB_URL']))\n\n if 'token' in session:\n a = jwt.decode(session['token'], verify=False)\n\n # cleanup the session in redis immediately\n cookie_val = request.cookies.get('session').split(\".\")[0]\n store.delete(cookie_val)\n session.clear()\n\n for k in store.keys(prefix=get_key_for_user(a, '')):\n store.delete(k)\n\n session['logout_from'] = \"Renku\"\n\n return await app.make_response(redirect(logout_url))\n","sub_path":"app/auth/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":11509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26618283","text":"from pymouse import PyMouse\nimport time\nif __name__ == '__main__':\n m = PyMouse()\n time.sleep(10)\n print(m.position())\n lis = m.position()\n m.click(lis[0], lis[1])\n times = 0\n print (\"start\")\n while times < 60:\n m.click(lis[0], lis[1])\n time.sleep(180)\n print(\"clicked :\", times)\n times += 1\n ","sub_path":"Check_mouse.py","file_name":"Check_mouse.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"596765251","text":"import argparse\nimport os.path\nfrom ftplib import FTP\nimport glob\nimport gzip\nimport shutil\nimport tarfile\nimport os\nimport requests\nimport logging\nimport uuid\n\n\nlogger = logging.getLogger(\"DWD Crawler (script)\")\nlogger.setLevel(logging.INFO)\n\nfile_handler = logging.FileHandler(\"dwd-crawler.log\")\nfile_handler.setLevel(logging.INFO)\nlogger.addHandler(file_handler)\n\nstream_handler = logging.StreamHandler()\nstream_handler.setLevel(logging.INFO)\nlogger.addHandler(stream_handler)\n\n\nhost_protocol = \"ftp://\"\nhost_url = \"ftp-cdc.dwd.de\"\nhost_directory = \"pub/CDC/grids_germany/hourly/radolan/historical/bin/\"\nlocal_directory = \"./\"\n\nminutely_host_protocol = \"https://\"\nminutely_host_url = \"opendata.dwd.de/climate_environment/CDC/grids_germany/5_minutes/radolan/reproc/2017_002/bin/\"\nminutely_year_begin = 2001\nminutely_year_end = 2018\nminutely_filename_prefix = \"YW2017.002_\"\nminutely_filename_end = \".tar\"\n\n\ndef daily_uncompress(archive_directory, target_directory, year=None):\n temp_dir_name = \"tmp_\" + str(uuid.uuid4())\n os.chdir(archive_directory)\n if not os.path.isdir(temp_dir_name):\n os.mkdir(temp_dir_name)\n\n if not year:\n archive_wildcard = minutely_filename_prefix + \"*.tar\"\n else:\n archive_wildcard = minutely_filename_prefix + str(year) + \"*.tar\"\n\n for file in glob.glob(archive_wildcard):\n uncompress_tarfile(archive_directory + '/' + file, \"./\" + temp_dir_name)\n\n # Move to tmp directory and uncompress archives to target\n os.chdir(temp_dir_name)\n logger.info(\"Uncompressing .tar.gz files in \" + os.getcwd())\n for file in glob.glob(\"*.tar.gz\"):\n uncompress_targzfile(file, target_directory)\n logger.info(\"Removing temp folder\")\n os.chdir(\"..\")\n shutil.rmtree(\"./\" + temp_dir_name)\n\n\ndef daily_download_years(target_directory):\n for year in range(minutely_year_begin, minutely_year_end + 1):\n daily_download_months(year, target_directory)\n\n\ndef daily_download_months(year, target_directory):\n for month in range(1, 13):\n daily_filename = minutely_filename_prefix + str(year) + str(month).zfill(2) + minutely_filename_end\n url_complete = minutely_host_protocol + minutely_host_url + str(year) + '/' + daily_filename\n if os.path.isfile(target_directory + daily_filename):\n logger.info(\"File already downloaded: \" + daily_filename)\n continue\n logger.info(\"Downloading: \" + url_complete)\n r = requests.get(url_complete, stream=True)\n r.raw.decode_content = True\n with open(target_directory + daily_filename, 'wb') as file:\n file.write(r.content)\n\n\ndef gunzip(file_path, output_path):\n logger.info(\"Uncompressing gz file: \" + file_path)\n with gzip.open(file_path, \"rb\") as compressed, open(output_path, \"wb\") as file_out:\n shutil.copyfileobj(compressed, file_out)\n\n\ndef uncompress_tarfile(tar_file_path, destination):\n if tarfile.is_tarfile(tar_file_path):\n logger.info(\"Uncompressing tar file: \" + tar_file_path)\n file = tarfile.open(tar_file_path, \"r|\")\n file.extractall(destination)\n else:\n logger.error(\"Error uncompressing tar file: \" + tar_file_path)\n\n\ndef uncompress_targzfile(tar_file_path, destination):\n if tarfile.is_tarfile(tar_file_path):\n logger.info(\"Uncompressing tar.gz file: \" + tar_file_path)\n file = tarfile.open(tar_file_path, \"r:gz\")\n file.extractall(destination)\n else:\n logger.error(\"Error uncompressing tar.gz file: \" + tar_file_path)\n\n\ndef uncompress_monthly_all(source_path, destination_path):\n os.chdir(source_path)\n for file in glob.glob(\"*.tar.gz\"):\n subdir = destination_path + '/' + file\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n uncompress_targzfile(file, subdir)\n\n\ndef download_with_new_connection(ftp, filename):\n if os.path.isfile(filename):\n logger.info(\"File \" + filename + \" already downloaded!\")\n else:\n logger.info(\"Downloading: \" + ftp.pwd() + filename)\n with open(filename, 'wb') as f:\n ftp.retrbinary('RETR ' + filename, f.write)\n\n\ndef download_files(ftp, file_list):\n for file in file_list:\n if file[0]:\n download_with_new_connection(ftp, file[1])\n\n\ndef ftp_file(ftp, directory):\n dir_listing = []\n ftp.cwd(directory)\n ftp.dir(lambda x: dir_listing.append(x))\n return [(line[0].upper() != 'D', line.rsplit()[-1]) for line in dir_listing]\n\n\ndef ftp_dir(ftp, directory):\n dir_listing = []\n ftp.cwd(directory)\n ftp.dir(lambda x: dir_listing.append(x))\n return [(line[0].upper() == 'D', line.rsplit()[-1]) for line in dir_listing]\n\n\ndef ftp_dir_year(ftp, directory_file_list):\n for df in directory_file_list:\n if df[0]:\n current_directory = df[1]\n file_list = ftp_file(ftp, current_directory)\n download_files(ftp, file_list)\n ftp.cwd(\"..\")\n\n\ndef main(download_dir=\"./\", out_directory=\"./\", download=True, unpack=True, minutely=True, year=None):\n logger.info(\"Downloads are at: \" + download_dir)\n logger.info(\"Uncompressing to: \" + out_directory)\n\n logger.info(\"Doing: \")\n\n if download:\n if not minutely:\n logger.info(\"Downloading hourly files\")\n os.chdir(download_dir)\n ftp_session = FTP(host_url)\n ftp_session.login()\n ftp_dir_year(ftp_session, ftp_dir(ftp_session, host_directory))\n ftp_session.close()\n else:\n logger.info(\"Downloading minutely files\")\n if not year:\n daily_download_years(download_dir)\n else:\n num_year = int(year)\n if minutely_year_begin <= num_year <= minutely_year_end:\n daily_download_months(num_year, download_dir)\n else:\n logger.info(\"Year not available for download: \" + str(year))\n\n if unpack:\n if not minutely:\n print(\"Uncompressing hourly files\")\n uncompress_monthly_all(download_dir, out_directory)\n else:\n print(\"Uncompressing minutely files\")\n daily_uncompress(download_dir, out_directory, year)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Downloads and extracts radio data from DWD ftp server\")\n parser.add_argument(\"-z\", \"--downloadDir\",\n dest=\"down_directory\",\n help=\"Target directory for downloads.\")\n parser.add_argument(\"-o\", \"--outputDir\",\n dest=\"out_directory\",\n help=\"Target directory for binary files.\")\n parser.add_argument(\"-d\", \"--download-only\",\n dest=\"downloadOnly\",\n help=\"Download only, do not unpack.\",\n action=\"store_true\")\n parser.add_argument(\"-u\", \"--unpack-only\",\n dest=\"unpackOnly\",\n help=\"Only unpack, do not download.\",\n action=\"store_true\")\n parser.add_argument(\"-m\", \"--minutely\",\n dest=\"minutely\",\n help=\"Download files containing data for every 5 minutes, instead of hourly data\",\n action=\"store_true\")\n parser.add_argument(\"-y\", \"--year\",\n dest=\"year\",\n help=\"Specify the year to be downloaded. ONLY WORKS with option: -m\")\n\n logger.info(\"All Arguments initialized\")\n\n args = parser.parse_args()\n logger.info(\"Parsed arguments:\")\n logger.info(\"downloadOnly: \")\n logger.info(\"True\" if args.downloadOnly else \"False\")\n logger.info(\"unpackOnly: \")\n logger.info(\"True\" if args.unpackOnly else \"False\")\n logger.info(\"hourly files: \")\n logger.info(\"True\" if not args.minutely else \"False\")\n logger.info(\"5 minutely files: \")\n logger.info(\"True\" if args.minutely else \"False\")\n\n logger.info(\"Download YEAR: \" + \"ALL\" if not args.year else str(args.year))\n\n down_dir = \"./\" if args.down_directory is None else os.path.join(args.down_directory, '')\n out_dir = \"./\" if args.out_directory is None else os.path.join(args.out_directory, '')\n\n if args.downloadOnly and args.unpackOnly:\n logger.error(\"Contradicting arguments: downloadOnly AND unpackOnly\")\n logger.info(\"YOU wanted me to do nothing!!!\")\n logger.info(\"Exiting now - tschau!\")\n else:\n main(download_dir=down_dir,\n out_directory=out_dir,\n download=not args.unpackOnly,\n unpack=not args.downloadOnly,\n minutely=args.minutely,\n year=args.year)\n logger.info(\"Crawler finished!\")\n","sub_path":"DWD_Crawler/DWD_Crawler.py","file_name":"DWD_Crawler.py","file_ext":"py","file_size_in_byte":8694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"99593878","text":"import sys\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom sklearn.model_selection import train_test_split\r\nfrom constant import *\r\nimport preprocess\r\n\r\n\r\ndef main(pklname):\r\n #---------------------------------------------------------------\r\n # load data\r\n #---------------------------------------------------------------\r\n tweets, labels, vocab_size = preprocess.load_data_with_labels(pklname)\r\n x_train, x_test, y_train, y_test = train_test_split(tweets, labels, train_size=0.8)\r\n\r\n #---------------------------------------------------------------\r\n # buid model\r\n #---------------------------------------------------------------\r\n embedding_dim = 64\r\n model = keras.Sequential([\r\n layers.Embedding(vocab_size, embedding_dim, input_length=MAX_LENGTH_OF_TWEETS),\r\n layers.Dense(16, activation=\"relu\"),\r\n layers.GlobalAveragePooling1D(),\r\n layers.Dense(1, activation=\"sigmoid\")\r\n ])\r\n\r\n #---------------------------------------------------------------\r\n # compile and train model\r\n #---------------------------------------------------------------\r\n model.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\r\n print(model.summary())\r\n\r\n batch_size = 1024\r\n epochs = 15\r\n history = model.fit(x_train,\r\n y_train,\r\n validation_data=(x_test, y_test),\r\n batch_size=batch_size,\r\n epochs=epochs)\r\n\r\n #---------------------------------------------------------------\r\n # save model and parameters\r\n #---------------------------------------------------------------\r\n model_json_str = model.to_json()\r\n open(MODEL_FILE_PATH, \"w\").write(model_json_str)\r\n model.save_weights(PARAMS_PATH)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = sys.argv\r\n pklname = args[1]\r\n main(pklname)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466322959","text":"#Тестирование\r\n\"\"\"def full_name(first, second, middle=\" \" ): #делаем третий параметр необязательным\r\n if middle == \" \":\r\n full = first + ' ' + second\r\n else:\r\n full = first + ' ' + second + ' ' + middle\r\n return full\r\n\r\n\r\nfrom Testing_in_Python import full_name\r\n\r\nprint(\"for stop the test enter symbol 'Q'\")\r\nwhile True:\r\n first = input(\"\\n enter your name \")\r\n if first == 'Q':\r\n break\r\n last = input(\"\\n enter your second name \")\r\n if last == 'Q':\r\n break\r\n\r\n format_name = full_name(first, last)\r\n print(\"\\n format name \" + format_name)\r\n\r\nimport unittest \r\n#from Testing_in_Python import full_name\r\n\r\nclass Name_test_case(unittest.TestCase): #Наследуем от класса unittest метод TestCase\r\n #тест для функции full_name \r\n\r\n def test_first_last_name(self): #любой метод, который начинается test_ при запуске программы они будут сразу отрабатывать\r\n #проверяем имена вида 'Ihor' 'Poltavets' работает нормально?\r\n format_name = full_name('Ihor', 'Poltavets') # вызываем тестируемую функцию и кладем ее в переменную\r\n self.assertEqual(format_name, 'Ihor Poltavets') # этот метод сравниваем данные с первого аргумента со вторым,если они совпадают-тест пройден\r\n\r\n def test_first_last_middle(self):\r\n #проверяем имена вида 'Ihor' 'Poltavets' 'Alexandrovich' работает нормально?\r\n format_name = full_name('Ihor', 'Poltavets', 'Alexandrovich')\r\n self.assertEqual(format_name, 'Ihor Poltavets Alexandrovich' )\r\n#if __name__ == \"__main__\": #сравниваем присвоенное значение __name__\r\n# unittest.main()\r\nunittest.main()\"\"\"\r\n\r\n\"\"\"import unittest\r\n\r\ndef city_country(city, country, population =\" \"):\r\n if population == \" \":\r\n return city + \" \" + country\r\n else:\r\n return city + \" \" + country + \" \" + population\r\n\r\nclass City_test(unittest.TestCase):\r\n #тест для функции city_country\r\n \r\n def test_city_county(self):\r\n city_1 = city_country('Kharkiv', 'Ukraine')\r\n self.assertEqual(city_1, 'Kharkiv Ukraine')\r\n\r\n def test_city_county_population(self):\r\n city_2 = city_country('Kharkiv', 'Ukraine', '1,419')\r\n self.assertEqual(city_2, 'Kharkiv Ukraine 1,419')\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\"\"\"\r\n\r\nimport unittest\r\n\r\ndef calculator(num1, num2, operation):\r\n if operation == '+':\r\n return num1 + num2\r\n elif operation == '-':\r\n return num1 - num2\r\n elif operation == '/':\r\n return num1 / num2\r\n elif operation == '*':\r\n return num1 * num2\r\n else:\r\n print(\"Choose correct operation\") \r\n\r\nclass Calculator_test(unittest.TestCase):\r\n\r\n\r\n def test_calculator(self):\r\n operation = input()\r\n if operation == '+':\r\n calc1 = calculator(6, 3, '+')\r\n self.assertEqual(calc1, 9)\r\n elif operation == '-':\r\n calc1 = calculator(6, 3, '-')\r\n self.assertEqual(calc1, 3)\r\n elif operation == '/':\r\n calc1 = calculator(6, 3, '/')\r\n self.assertEqual(calc1, 2)\r\n elif operation == '*':\r\n calc1 = calculator(6, 3, '*')\r\n self.assertEqual(calc1, 18)\r\n else :\r\n print(\"Error operation\")\r\n \r\nif __name__ == \"__main__\":\r\n unittest.main()","sub_path":"Testing_in_Python.py","file_name":"Testing_in_Python.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"395566441","text":"from django.core import urlresolvers\nfrom django.contrib.sitemaps import Sitemap\n\nclass GeoRSSSitemap(Sitemap):\n \"\"\"\n A minimal hook to produce sitemaps for GeoRSS feeds.\n \"\"\"\n def __init__(self, feed_dict, slug_dict=None):\n \"\"\"\n This sitemap object initializes on a feed dictionary (as would be passed\n to `django.contrib.gis.views.feed`) and a slug dictionary.\n If the slug dictionary is not defined, then it's assumed the keys provide\n the URL parameter to the feed. However, if you have a complex feed (e.g.,\n you override `get_object`, then you'll need to provide a slug dictionary.\n The slug dictionary should have the same keys as the feed dictionary, but\n each value in the slug dictionary should be a sequence of slugs that may\n be used for valid feeds. For example, let's say we have a feed that\n returns objects for a specific ZIP code in our feed dictionary:\n\n feed_dict = {'zipcode' : ZipFeed}\n\n Then we would use a slug dictionary with a list of the zip code slugs\n corresponding to feeds you want listed in the sitemap:\n\n slug_dict = {'zipcode' : ['77002', '77054']}\n \"\"\"\n # Setting up.\n self.feed_dict = feed_dict\n self.locations = []\n if slug_dict is None:\n slug_dict = {}\n # Getting the feed locations.\n for section in feed_dict.keys():\n if slug_dict.get(section, False):\n for slug in slug_dict[section]:\n self.locations.append('%s/%s' % (section, slug))\n else:\n self.locations.append(section)\n\n def get_urls(self, page=1, site=None):\n \"\"\"\n This method is overrridden so the appropriate `geo_format` attribute\n is placed on each URL element.\n \"\"\"\n urls = Sitemap.get_urls(self, page=page, site=site)\n for url in urls:\n url['geo_format'] = 'georss'\n return urls\n\n def items(self):\n return self.locations\n\n def location(self, obj):\n return urlresolvers.reverse('django.contrib.gis.views.feed', args=(obj,))\n","sub_path":"django/contrib/gis/sitemaps/georss.py","file_name":"georss.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"653372383","text":"name = \"NITIN\"\nlen = int(len(name) / 2)\nflag = True\n\nfor i in range(len):\n\tif(name[i] == name[-(i+1)]):\n\t\tflag = True\n\telse:\n\t\tflag = False\n\nif flag:\n\tprint(\"It's Palindrome :)\")\nelse: \n\tprint(\"It's not Palindrome :(\")\n\n'''\n- => $ python Palindrome.py \nop:\n1)\nIn case, name = \"NITIN\"\nIt's Palindrome :)\n\n2)\nIn ccase, name = \"NITINN\"\nIt's not Palindrome :(\n'''","sub_path":"problems/Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633464445","text":"from os.path import join, normpath\nimport re\nfrom StringIO import StringIO\n\n\ndef loads(s):\n # okay this is an abstraction violation, but I wrote load(f) so I know this will work\n f = s.split('\\n')\n return load(f)\n \n\ndef load(f):\n messages = {}\n for line in f:\n # i think this line is wrong, but i don't want to break anything\n # clayton says there's no \\# escaping on the phone\n line = re.split(r'(? por: hack for backwards compatibility\n if lang == 'pt':\n lang = 'por'\n\n try:\n str(lang)\n except UnicodeEncodeError:\n return {}\n\n while version:\n rel_path = '../messages_{lang}-{version}.txt'.format(lang=lang,\n version=version)\n path = normpath(join(__file__, rel_path))\n try:\n with open(path) as f:\n return load(f)\n except IOError:\n version -= 1\n return {}\n\n\ndef dumps(dct):\n io = StringIO()\n for key, val in sorted(dct.items()):\n # replace all blanks with non-breaking spaces\n if not val.strip():\n val = u'\\u00A0'\n # get rid of newlines\n val = val.replace('\\n', '\\\\n')\n # escape starting # character\n val = re.sub(r'(?> io, u\"{key}={val}\".format(key=key.strip(), val=val).encode('utf8')\n return unicode(io.getvalue(), encoding='utf8')\n","sub_path":"commcare_translations.py","file_name":"commcare_translations.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298114498","text":"import tensorflow as tf\nfrom seq2seq_tf2.layers import Encoder, BahdanauAttention, Decoder, Pointer\nfrom utils.data_utils import load_word2vec\n\n\nclass PGN(tf.keras.Model):\n def __init__(self, params):\n super(PGN, self).__init__()\n # self.embedding_matrix = load_word2vec(params[\"vocab_size\"])\n self.params = params\n self.encoder = Encoder(params[\"vocab_size\"], params[\"embed_size\"], params[\"enc_units\"], params[\"batch_size\"])\n self.attention = BahdanauAttention(params[\"attn_units\"])\n self.decoder = Decoder(params[\"vocab_size\"], params[\"embed_size\"], params[\"dec_units\"], params[\"batch_size\"])\n self.pointer = Pointer()\n\n def call_encoder(self, enc_inp):\n enc_hidden = self.encoder.initialize_hidden_state()\n enc_output, enc_hidden = self.encoder(enc_inp, enc_hidden)\n return enc_hidden, enc_output\n\n def call_decoder_onestep(self, latest_tokens, enc_hidden, dec_hidden):\n if self.params[\"pointer_gen\"]:\n # TODO: implement\n pass\n else:\n context_vector, _ = self.attention(dec_hidden, enc_hidden)\n dec_x, pred, dec_hidden = self.decoder(latest_tokens,\n None,\n None,\n context_vector)\n return dec_x, pred, dec_hidden\n\n def call(self, enc_output, dec_hidden, enc_inp, enc_extended_inp, dec_inp, batch_oov_len):\n predictions = []\n attentions = []\n p_gens = []\n context_vector, _ = self.attention(dec_hidden, enc_output)\n\n if self.params[\"pointer_gen\"]:\n for t in range(dec_inp.shape[1]):\n dec_x, pred, dec_hidden = self.decoder(tf.expand_dims(dec_inp[:, t], 1),\n dec_hidden,\n enc_output,\n context_vector)\n context_vector, attn = self.attention(dec_hidden, enc_output)\n p_gen = self.pointer(context_vector, dec_hidden, tf.squeeze(dec_x, axis=1))\n\n attentions.append(attn)\n p_gens.append(p_gen)\n predictions.append(pred)\n\n final_dists = _calc_final_dist(enc_extended_inp, predictions, attentions, p_gens, batch_oov_len,\n self.params[\"vocab_size\"], self.params[\"batch_size\"])\n\n if self.params[\"mode\"] == \"train\":\n return tf.stack(final_dists, 1), dec_hidden # predictions_shape = (batch_size, dec_len, vocab_size) with dec_len = 1 in pred mode\n else:\n return tf.stack(final_dists, 1), dec_hidden, context_vector, tf.stack(attentions, 1), tf.stack(p_gens, 1)\n\n else:\n for t in range(dec_inp.shape[1]):\n dec_x, pred, dec_hidden = self.decoder(tf.expand_dims(dec_inp[:, t], 1),\n dec_hidden,\n enc_output,\n context_vector)\n context_vector, attn = self.attention(dec_hidden, enc_output)\n predictions.append(pred)\n\n return tf.stack(predictions, 1), dec_hidden\n\n\ndef _calc_final_dist(_enc_batch_extend_vocab, vocab_dists, attn_dists, p_gens, batch_oov_len, vocab_size, batch_size):\n \"\"\"\n Calculate the final distribution, for the pointer-generator model\n Args:\n vocab_dists: The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays.\n The words are in the order they appear in the vocabulary file.\n attn_dists: The attention distributions. List length max_dec_steps of (batch_size, attn_len) arrays\n Returns:\n final_dists: The final distributions. List length max_dec_steps of (batch_size, extended_vsize) arrays.\n \"\"\"\n # Multiply vocab dists by p_gen and attention dists by (1-p_gen)\n vocab_dists = [p_gen * dist for (p_gen, dist) in zip(p_gens, vocab_dists)]\n attn_dists = [(1-p_gen) * dist for (p_gen, dist) in zip(p_gens, attn_dists)]\n\n # Concatenate some zeros to each vocabulary dist, to hold the probabilities for in-article OOV words\n extended_vsize = vocab_size + batch_oov_len # the maximum (over the batch) size of the extended vocabulary\n extra_zeros = tf.zeros((batch_size, batch_oov_len))\n # list length max_dec_steps of shape (batch_size, extended_vsize)\n vocab_dists_extended = [tf.concat(axis=1, values=[dist, extra_zeros]) for dist in vocab_dists]\n\n # Project the values in the attention distributions onto the appropriate entries in the final distributions\n # This means that if a_i = 0.1 and the ith encoder word is w, and w has index 500 in the vocabulary,\n # then we add 0.1 onto the 500th entry of the final distribution\n # This is done for each decoder timestep.\n # This is fiddly; we use tf.scatter_nd to do the projection\n batch_nums = tf.range(0, limit=batch_size) # shape (batch_size)\n batch_nums = tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)\n attn_len = tf.shape(_enc_batch_extend_vocab)[1] # number of states we attend over\n batch_nums = tf.tile(batch_nums, [1, attn_len]) # shape (batch_size, attn_len)\n indices = tf.stack((batch_nums, _enc_batch_extend_vocab), axis=2) # shape (batch_size, enc_t, 2)\n shape = [batch_size, extended_vsize]\n # list length max_dec_steps (batch_size, extended_vsize)\n attn_dists_projected = [tf.scatter_nd(indices, copy_dist, shape) for copy_dist in attn_dists]\n\n # Add the vocab distributions and the copy distributions together to get the final distributions\n # final_dists is a list length max_dec_steps; each entry is a tensor shape (batch_size, extended_vsize) giving\n # the final distribution for that decoder timestep\n # Note that for decoder timesteps and examples corresponding to a [PAD] token, this is junk - ignore.\n final_dists = [vocab_dist + copy_dist for (vocab_dist, copy_dist) in zip(vocab_dists_extended, attn_dists_projected)]\n\n return final_dists\n\n\nif __name__ == '__main__':\n encoder = Encoder(vocab_size=25216, embedding_dim=256, enc_units=1024, batch_sz=64)\n sample_hidden = encoder.initialize_hidden_state()\n example_input_batch = tf.ones(shape=(64, 88), dtype=tf.int32)\n sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)\n print('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))\n print('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))\n\n attention_layer = BahdanauAttention(128)\n attention_weights, attention_result = attention_layer(sample_hidden, sample_output)\n print(\"Attention result shape: (batch size, units) {}\".format(attention_result.shape))\n print(\"Attention weights shape: (batch_size, sequence_length, 1) {}\".format(attention_weights.shape))\n\n decoder = Decoder(vocab_size=13053, embedding_dim=256, dec_units=1024, batch_sz=64)\n sample_decoder_output, _, _ = decoder(tf.random.uniform((64, 1)), sample_hidden, sample_output)\n print('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))\n\n\n","sub_path":"seq2seq_model.py","file_name":"seq2seq_model.py","file_ext":"py","file_size_in_byte":7341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521375268","text":"# Author: Christian Brodbeck \nfrom contextlib import ContextDecorator\nimport os\nimport sys\n\nIS_OSX = sys.platform == 'darwin'\nIS_WINDOWS = os.name == 'nt'\n\nif IS_OSX:\n from .macos import begin_activity, end_activity\nelse:\n from .dummy_os import begin_activity, end_activity\n\n\nclass Caffeinator(ContextDecorator):\n \"\"\"Context disabling idle sleep and App Nap\"\"\"\n def __init__(self):\n self.n_processes = 0\n\n def __enter__(self):\n if self.n_processes == 0 and IS_OSX:\n self._activity = begin_activity()\n self.n_processes += 1\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.n_processes -= 1\n if self.n_processes == 0 and IS_OSX:\n end_activity(self._activity)\n\n\ncaffeine = Caffeinator()\n","sub_path":"eelbrain/_utils/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255674145","text":"\"\"\"This file contains functions to build/simulate offensive mechanisms/strategies\"\"\"\n\nfrom gamelib.game_state import GameState\nfrom gamelib.game_map import GameMap\nfrom gamelib.unit import GameUnit\n\nfrom defensive_building_functions import DefensiveWallStrat\n\nfrom gamelib.util import debug_write\n\n# CONSTANTS\n\n# Enforced to not choose a spawn loc resulting in too short of a path\nMIN_PATH_LENGTH = 5\n\n\nclass OffensiveInterceptorSpam:\n \"\"\"Contains builder/simulator for intercepter spam attack strategy\"\"\"\n\n def build_interceptor_spam_multiple_locs(\n self,\n game_state: GameState,\n unit_enum_map: dict,\n num_interceptors: int,\n locations: [(int, int)] or [[int]],\n ) -> int:\n \"\"\"Builds X Interceptors at EACH of the MULTIPLE given locations (stacked)\n\n Args:\n game_state (GameState): The current GameState object\n unit_enum_map (dict): Maps NAME to unit enum\n num_interceptors (int): How many interceptors\n locations [(int, int)] or [[[int]]]: The coordinates to place them at\n\n Returns:\n built (int): Number of Interceptors actually successfully placed\n \"\"\"\n\n built = 0 # To return\n\n for loc in locations:\n built += self.build_interceptor_spam_single_loc(\n game_state, unit_enum_map, num_interceptors, loc\n )\n\n return built\n\n def build_interceptor_spam_single_loc(\n self,\n game_state: GameState,\n unit_enum_map: dict,\n num_interceptors: int,\n location: (int, int) or [[int]],\n ) -> int:\n \"\"\"Builds X Interceptors at the given location (stacked)\n\n Args:\n game_state (GameState): The current GameState object\n unit_enum_map (dict): Maps NAME to unit enum\n num_interceptors (int): How many interceptors\n location (int, int) or [int]: The (x, y) or [x, y] coordinate to place them at\n\n Returns:\n built (int): Number of Interceptors actually successfully placed\n \"\"\"\n\n built = 0 # To return\n\n for _ in range(num_interceptors):\n if self._build_interceptor_helper(game_state, unit_enum_map, location):\n built += 1\n\n return built\n\n def _build_interceptor_helper(\n self,\n game_state: GameState,\n unit_enum_map: dict,\n location: (int, int) or [int],\n ) -> bool:\n \"\"\"Private helper to place an interceptor at a given location\n\n Args:\n game_state (GameState): The current GameState object\n unit_enum_map (dict): Maps NAME to unit enum\n location (int, int) or [int]: The (x, y) or [x, y] coordinates to place it at\n\n Returns:\n is_successful (bool): Whether the interceptor was able to be placed\n \"\"\"\n\n if not game_state.can_spawn(unit_enum_map[\"INTERCEPTOR\"], location):\n return False\n\n built = game_state.attempt_spawn(unit_enum_map[\"INTERCEPTOR\"], location)\n\n return True if built == 1 else False\n\n\nclass OffensiveDemolisherLine:\n \"\"\"Contains builder/simulator for Demolisher behind horizontal wall line strat\"\"\"\n\n def build_demolisher_line(\n self,\n game_state: GameState,\n unit_enum_map: dict,\n num_demolishers: int,\n num_walls: int,\n wall_location: (int, int) or [int, int],\n dem_location: (int, int) or [int, int],\n right: bool = True,\n ) -> bool:\n \"\"\"Builds a line of walls starting at the given location and stacked demolishers 1 tile back\n\n Args:\n game_state (GameState): The current GameState object\n unit_enum_map (dict): Maps NAME to unit enum\n num_demolishers (int): How many demolishers\n wall_location (int, int) or [int]: The (x, y) or [x, y] coordinate to place the walls at\n dem_location (int, int) or [int]: The (x, y) or [x, y] coordinate to place the dems at\n right (bool): Whether to build the walls towards the right (or left)\n \"\"\"\n\n # Build num_walls line towards right of location (might overflow but fine)\n placed_wall_locs = DefensiveWallStrat().build_h_wall_line(\n game_state, unit_enum_map, wall_location, num_walls, right=right\n )\n\n # Offset coordinates one down or one left/right depending on where it places walls\n path = game_state.find_path_to_edge(dem_location)\n while path is None or len(path) < MIN_PATH_LENGTH:\n if right:\n dem_location[0] += 1\n else:\n dem_location[0] -= 1\n dem_location[1] -= 1\n path = game_state.find_path_to_edge(dem_location)\n\n # Build num_demolishers demolishers\n for _ in range(num_demolishers):\n game_state.attempt_spawn(unit_enum_map[\"DEMOLISHER\"], dem_location)\n\n # Mark all recently placed walls for deletion to not block our units/structures later\n for placed_wall in placed_wall_locs:\n game_state.attempt_remove(placed_wall)\n","sub_path":"algos/python-algov2-0/offensive_building_functions.py","file_name":"offensive_building_functions.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"143377538","text":"\nimport argparse\nimport project_root\nimport numpy as np\nimport tensorflow as tf\nfrom os import path\nimport os\nimport sys\nfrom env.sender import Sender\nfrom helpers.helpers import normalize, one_hot, softmax\nfrom environment import Environment\nimport random\nimport tensorflow.contrib.slim as slim\n\n\nclass Q_network(object):\n def __init__(self, state_dim, action_cnt):\n self.state = tf.placeholder(shape=[None, state_dim], dtype=tf.float32)\n self.state = tf.reshape(self.state, shape=[-1, state_dim])\n\n self.fc1 = tf.contrib.layers.fully_connected(self.state,64)\n self.fc1 = tf.nn.dropout(self.fc1, 0.8)\n\n self.fc2 = tf.contrib.layers.fully_connected(self.fc1, 64)\n self.fc2 = tf.nn.dropout(self.fc2, 0.5)\n\n self.fc3 = tf.contrib.layers.fully_connected(self.fc2, 64)\n self.fc3 = tf.nn.dropout(self.fc3, 0.5)\n\n self.streamAC, self.streamVC = tf.split(self.fc3, 2, 1)\n self.streamA = slim.flatten(self.streamAC)\n self.streamV = slim.flatten(self.streamVC)\n\n self.Advantage = tf.contrib.layers.fully_connected(self.streamA, action_cnt)\n self.Value = tf.contrib.layers.fully_connected(self.streamV, 1)\n\n self.Qout = self.Value + tf.subtract(self.Advantage, tf.reduce_mean(self.Advantage, axis=1, keepdims=True))\n self.predict = tf.argmax(self.Qout, 1)\n\n self.targetQ = tf.placeholder(shape=[None], dtype=tf.float32)\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32)\n self.actions_onehot = tf.one_hot(self.actions, action_cnt, dtype=tf.float32)\n\n self.Q = tf.reduce_sum(tf.multiply(self.Qout, self.actions_onehot), axis=1)\n\n self.td_error = tf.square(self.targetQ - self.Q)\n self.loss = tf.reduce_mean(self.td_error)\n self.trainer = tf.train.AdamOptimizer(learning_rate=0.001)\n self.updateModel = self.trainer.minimize(self.loss)\n\n\ndef create_env():\n uplink_trace = path.join(project_root.DIR, 'env', '114.68mbps.trace')\n downlink_trace = uplink_trace\n mahimahi_cmd = (\n 'mm-delay 20 mm-link %s %s '\n '--downlink-queue=droptail --downlink-queue-args=packets=200' %\n (uplink_trace, downlink_trace))\n\n env = Environment(mahimahi_cmd)\n return env\n\n\nclass experience_buffer():\n def __init__(self, buffer_size=100000):\n self.buffer = []\n\n self.buffer_size = buffer_size\n\n def add(self, experience):\n if len(self.buffer) + len(experience) >= self.buffer_size:\n self.buffer[0:(len(experience) + len(self.buffer)) - self.buffer_size] = []\n self.buffer.extend(experience)\n\n def sample(self, size):\n return np.reshape(np.array(random.sample(self.buffer, size)), [size, 5])\n\n\nclass Learner(object):\n def __init__(self, env):\n self.batch_size = 128\n self.y = 0.99\n self.tau = 0.001\n\n self.total_steps = 0\n self.num_episode = 1000\n self.max_epLength = 1000\n\n self.aug_state_dim = env.state_dim + env.action_cnt\n self.action_cnt = env.action_cnt\n self.prev_action = env.action_cnt - 1\n\n path = \"./save_model\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n self.env = env\n\n self.state_dim = env.state_dim\n self.action_cnt = env.action_cnt\n\n def updateTargetGraph(self,tfVars, tau):\n total_vars = len(tfVars)\n op_holder = []\n for idx, var in enumerate(tfVars[0:total_vars // 2]):\n op_holder.append(tfVars[idx + total_vars // 2].assign(\n (var.value() * tau) + ((1 - tau) * tfVars[idx + total_vars // 2].value())))\n return op_holder\n\n def updateTarget(self,op_holder, sess):\n for op in op_holder:\n sess.run(op)\n\n\n def cleanup(self):\n self.env.cleanup()\n\n\n\n def run(self):\n\n tf.reset_default_graph()\n self.mainQN = Q_network(state_dim=self.aug_state_dim, action_cnt=self.action_cnt)\n self.targetQN = Q_network(state_dim=self.aug_state_dim, action_cnt=self.action_cnt)\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n trainables = tf.trainable_variables()\n targetOps = self.updateTargetGraph(trainables, self.tau)\n\n myBuffer = experience_buffer()\n self.rAll = 0\n self.jList = []\n self.rList = []\n F = open(\"r.txt\", \"w\")\n\n with tf.Session() as sess:\n sess.run(init)\n\n def update_model():\n trainBatch = myBuffer.sample(self.batch_size)\n Q1 = sess.run(self.mainQN.predict, feed_dict={self.mainQN.state: np.vstack(trainBatch[:, 3])})\n Q2 = sess.run(self.targetQN.Qout, feed_dict={self.targetQN.state: np.vstack(trainBatch[:, 3])})\n\n end_multiplier = -(trainBatch[:, 4] - 1)\n doubleQ = Q2[xrange(self.batch_size), Q1]\n targetQ = trainBatch[:, 2] + (self.y * doubleQ * end_multiplier)\n\n _ = sess.run(self.mainQN.updateModel,\n feed_dict={self.mainQN.state: np.vstack(trainBatch[:, 0]),\n self.mainQN.targetQ: targetQ,\n self.mainQN.actions: trainBatch[:, 1]})\n\n self.updateTarget(targetOps, sess)\n\n def sample_action(state):\n if np.random.rand(1) < 0.05:\n action = np.random.randint(0, self.env.action_cnt)\n else:\n\n # Get probability of each action from the local network.\n pi = self.mainQN\n feed_dict = {\n pi.state: [state],\n }\n ops_to_run = pi.predict\n action = sess.run(ops_to_run, feed_dict)[0]\n\n # Choose an action to take\n\n self.prev_action = action\n return action\n\n self.env.set_sample_action(sample_action)\n\n for episode_i in xrange(self.num_episode):\n sys.stderr.write('--- Episode %d\\n' % episode_i)\n episode_buffer = experience_buffer()\n\n s = self.env.reset()\n\n # get an episode of experience\n buffer,rall = self.env.rollout()\n myBuffer.add(buffer.buffer)\n print(len(myBuffer.buffer))\n\n\n for i in xrange(2000):\n #sys.stderr.write('update model %d\\n' % i)\n update_model()\n\n self.env.set_sample_action(sample_action)\n self.rList.append(rall)\n F.write(str(rall) + '\\n')\n\n print('rall %f\\n' % rall)\n F.close()\n return self.rList\n\n\ndef updateTargetGraph(tfVars, tau):\n total_vars = len(tfVars)\n op_holder = []\n for idx, var in enumerate(tfVars[0:total_vars // 2]):\n op_holder.append(tfVars[idx + total_vars // 2].assign(\n (var.value() * tau) + ((1 - tau) * tfVars[idx + total_vars // 2].value())))\n return op_holder\n\ndef updateTarget(op_holder, sess):\n for op in op_holder:\n sess.run(op)\n\n\ndef cleanup(env):\n env.cleanup()\n\n\n\ndef run_learner(env):\n batch_size = 128\n y = 0.99\n tau = 0.001\n\n total_steps = 0\n num_episode = 1000\n max_epLength = 1000\n\n aug_state_dim = env.state_dim + env.action_cnt\n action_cnt = env.action_cnt\n prev_action = env.action_cnt - 1\n\n path = \"./save_model\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n\n state_dim = env.state_dim\n action_cnt = env.action_cnt\n\n tf.reset_default_graph()\n mainQN = Q_network(state_dim=aug_state_dim, action_cnt=action_cnt)\n targetQN = Q_network(state_dim=aug_state_dim, action_cnt=action_cnt)\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n trainables = tf.trainable_variables()\n targetOps = updateTargetGraph(trainables, tau)\n\n myBuffer = experience_buffer()\n rAll = 0\n jList = []\n rList = []\n F = open(\"r.txt\", \"w\")\n with tf.Session() as sess:\n sess.run(init)\n\n def update_model():\n trainBatch = myBuffer.sample(self.batch_size)\n Q1 = sess.run(self.mainQN.predict, feed_dict={self.mainQN.state: np.vstack(trainBatch[:, 3])})\n Q2 = sess.run(self.targetQN.Qout, feed_dict={self.targetQN.state: np.vstack(trainBatch[:, 3])})\n\n end_multiplier = -(trainBatch[:, 4] - 1)\n doubleQ = Q2[xrange(self.batch_size), Q1]\n targetQ = trainBatch[:, 2] + (self.y * doubleQ * end_multiplier)\n\n _ = sess.run(self.mainQN.updateModel,\n feed_dict={self.mainQN.state: np.vstack(trainBatch[:, 0]),\n self.mainQN.targetQ: targetQ,\n self.mainQN.actions: trainBatch[:, 1]})\n\n self.updateTarget(targetOps, sess)\n\n def sample_action(state):\n if np.random.rand(1) < 0.05:\n action = np.random.randint(0, self.env.action_cnt)\n else:\n\n # Get probability of each action from the local network.\n pi = self.mainQN\n feed_dict = {\n pi.state: [state],\n }\n ops_to_run = pi.predict\n action = sess.run(ops_to_run, feed_dict)[0]\n\n # Choose an action to take\n\n self.prev_action = action\n return action\n\n self.env.set_sample_action(sample_action)\n\n for episode_i in xrange(self.num_episode):\n sys.stderr.write('--- Episode %d\\n' % episode_i)\n episode_buffer = experience_buffer()\n\n s = self.env.reset()\n\n # get an episode of experience\n buffer, rall = self.env.rollout()\n myBuffer.add(buffer.buffer)\n print(len(myBuffer.buffer))\n\n for i in xrange(2000):\n # sys.stderr.write('update model %d\\n' % i)\n update_model()\n\n self.env.set_sample_action(sample_action)\n self.rList.append(rall)\n F.write(str(rall) + '\\n')\n\n print('rall %f\\n' % rall)\n F.close()\n return self.rList\n\n\n\n\ndef main():\n\n\n env = create_env()\n learner = Learner(env)\n\n try:\n rlist = learner.run()\n\n\n except KeyboardInterrupt:\n pass\n finally:\n learner.cleanup()\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"env/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":10454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429249499","text":"import math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\n# Factorised NoisyLinear layer with bias\nclass NoisyLinear(nn.Module):\n \"\"\"NosiyLinear called Nosiy Networks has been studied in DeepMind.\n\n A deep reinforcement learning agent with parametric noise added to its weights,\n and show that th induced stochasticity of the agent's policy can be used to aid efficient exploration.\n\n \"\"\"\n def __init__(self, in_features, out_features, std_init=0.5):\n \"\"\"This module extends torch.nn.Linear\n Args:\n in_features: the number of input feature\n out_features: the number of output feature\n std_init: parameter for NoisyLinear\n \"\"\"\n super(NoisyLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.std_init = std_init\n self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))\n self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))\n self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))\n self.bias_mu = nn.Parameter(torch.empty(out_features))\n self.bias_sigma = nn.Parameter(torch.empty(out_features))\n self.register_buffer('bias_epsilon', torch.empty(out_features))\n self.reset_parameters()\n self.reset_noise()\n\n def reset_parameters(self):\n \"\"\"This method for reset layer parameter.\n\n Notes:\n For factorised noisy networks, each element mu_i,j was initialised by a sample\n from an independent uniform distribuntions MU[-1/root(p),+1/root(p)] and\n each element sigma_i,j was initialised to a contant sigma_0/root(p).\n in paper, hyperparameter sigma_0 is set to 0.5\n std_init=0.5\n\n \"\"\"\n mu_range = 1 / math.sqrt(self.in_features)\n self.weight_mu.data.uniform_(-mu_range, mu_range)\n self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))\n self.bias_mu.data.uniform_(-mu_range, mu_range)\n self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))\n\n def _scale_noise(self, size):\n \"\"\"This method for scale noise by the number of input/output features.\n\n Args:\n size: size is int for setting scale\n\n Returns:\n scaled noise\n\n \"\"\"\n x = torch.randn(size)\n return x.sign().mul_(x.abs().sqrt_())\n\n def reset_noise(self):\n \"\"\"This method make initialized noise.\n\n The Noise depends on the number of input/output featuers.\n\n \"\"\"\n epsilon_in = self._scale_noise(self.in_features)\n epsilon_out = self._scale_noise(self.out_features)\n self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))\n self.bias_epsilon.copy_(epsilon_out)\n\n def forward(self, input):\n \"\"\"This method is override nn.Linear's forward\n\n Args:\n input: Input data\n\n Returns:\n Return is nn.Linear's output. but use noisy parameter.\n\n \"\"\"\n if self.training:\n return F.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon,\n self.bias_mu + self.bias_sigma * self.bias_epsilon)\n else:\n return F.linear(input, self.weight_mu, self.bias_mu)\n\n\nclass DQN(nn.Module):\n \"\"\"This is DQN where 'C51', 'Duelling', 'NoisyNetwork'\n \n \"\"\"\n def __init__(self, args, action_space):\n super().__init__()\n self.atoms = args.atoms\n self.action_space = action_space\n\n self.conv1 = nn.Conv2d(args.history_length, 32, 8, stride=4, padding=1)\n self.conv2 = nn.Conv2d(32, 64, 4, stride=2)\n self.conv3 = nn.Conv2d(64, 64, 3)\n self.fc_h_v = NoisyLinear(3136, args.hidden_size, std_init=args.noisy_std)\n self.fc_h_a = NoisyLinear(3136, args.hidden_size, std_init=args.noisy_std)\n self.fc_z_v = NoisyLinear(args.hidden_size, self.atoms, std_init=args.noisy_std)\n self.fc_z_a = NoisyLinear(args.hidden_size, action_space * self.atoms, std_init=args.noisy_std)\n\n def forward(self, x, log=False):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = x.view(-1, 3136)\n v = self.fc_z_v(F.relu(self.fc_h_v(x))) # Value stream\n a = self.fc_z_a(F.relu(self.fc_h_a(x))) # Advantage stream\n v, a = v.view(-1, 1, self.atoms), a.view(-1, self.action_space, self.atoms)\n q = v + a - a.mean(1, keepdim=True) # Combine streams\n if log: # Use log softmax for numerical stability\n q = F.log_softmax(q, dim=2) # Log probabilities with action over second dimension\n else:\n q = F.softmax(q, dim=2) # Probabilities with action over second dimension\n return q\n\n def reset_noise(self):\n for name, module in self.named_children():\n if 'fc' in name:\n module.reset_noise()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13822299","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\ndef computeCost(X,y,t):\n m = X.shape[0]\n J = 1/(2*m) * sum(sum(np.square(X@t-y)))\n return J\n\ndef gradientDescent(X,y,t,a,i):\n m = X.shape[0]\n if t.all == (t - a/m * X.T@(X@t-y)).all:\n return t\n else:\n for i in range(i):\n t = t - a/m * X.T@(X@t-y)\n i += 1\n return t\n\"\"\"\nPLOT DATA\n\"\"\"\nex1data1 = pd.read_csv('ex1data1.txt',names=['population','profit'])\nplt.scatter(ex1data1.population, ex1data1.profit, marker=\"x\",color=\"blue\")\nplt.xlabel(\"Population in 10 000\")\nplt.ylabel(\"Profit in 10 000 USD\")\nplt.savefig('ex1chart1.png')\n\n\n\"\"\"\nPREPARE MATRIX AND VECTORS\n\"\"\"\nex1X1 = np.c_[np.ones(len(ex1data1)),ex1data1.population]\nex1y1 = np.array(ex1data1.profit,ndmin=2).T\ntheta = np.zeros((ex1X1.shape[1],1))\niterations = 1500\nalpha = 0.01\nprint(computeCost(ex1X1,ex1y1,theta))\nprint(gradientDescent(ex1X1,ex1y1,theta,alpha,iterations))\nprint(computeCost(ex1X1,ex1y1,gradientDescent(ex1X1,ex1y1,theta,alpha,iterations)))\n\nx = np.linspace(5,22.5,100)\ny = gradientDescent(ex1X1,ex1y1,theta,alpha,iterations)[0] + gradientDescent(ex1X1,ex1y1,theta,alpha,iterations)[1]*x\nplt.plot(x,y,\"red\")\nplt.savefig('ex1chart2.png')\n","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"301401408","text":"from flask import Flask\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__)\napp.config['MONGO_URI'] = \"mongodb://localhost:27017/DummyDB\"\nmongo = PyMongo(app)\n\n\n@app.route(\"/\")\ndef main():\n return \"it's working !\"\n\n\n@app.route(\"/install\")\ndef install():\n collections = [\"Users\", \"Products\", \"Bills\"]\n for coll in collections:\n mongo.db.create_collection(coll)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True, use_reloader=True, threaded=True)\n","sub_path":"assistant_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"136514820","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 8 02:25:07 2020\n\n@author: ikaro_beraldo\n\"\"\"\n\nimport math as ma\nimport numpy as np\nimport sys\nfrom scipy import signal\nfrom scipy.io import loadmat\nfrom mne import filter\n\n#Breaks the vetor into a multiarray data of n blocks with n samples each\ndef break_in_blocks(lfp_data,fs, block_length):\n #Check if the LFP data is a single dimension array\n if lfp_data.ndim == 1:\n sys.exit(\"Error: the LFP data must be a single dimension array\")\n \n #Number of blocks\n n_block = ma.floor(lfp_data.size / (fs*block_length))\n #NUmber of samples of each block\n n_samples = fs*block_length\n \n #Excluded the remaining samples\n lfp_data = np.delete(lfp_data,np.arange(block_length*n_block*fs,len(lfp_data)))\n \n #Operation to break the blocks\n blocked_array = lfp_data.reshape((n_block,n_samples))\n \n #Return the blocked_array\n return blocked_array\n\n#Calculate the root mean square of a vector or matrix\ndef root_mean_square(data,dimension):\n #If dimension = 1, the RMS will be calculate along the columns; If 2, the\n #RMS will be calculated along the rows\n if (dimension == 1): #If along columns, get the number of columns\n iterations = np.array(data.shape)[1] #number of iterations\n output_rms = np.zeros((1,iterations)) #pre-alocate the output \n #Loop through every vector (row or column)\n for ite in np.arange(iterations):\n output_rms[ite] = np.sqrt(np.mean(np.square(data[:,ite]))) #calculate RMS\n\n if (dimension == 2): #If along rows, get the number of rows\n iterations = np.array(data.shape)[0] #number of iterations\n output_rms = np.zeros((iterations,1)) #pre-alocate the output \n #Loop through every vector (row or column)\n for ite in np.arange(iterations): \n output_rms[ite] = np.sqrt(np.mean(np.square(data[ite,:]))) #calculate RMS\n \n return output_rms\n\n#Welch for multiple segments\ndef welch_multiple_vectors(data,fs,fft_number):\n #fft_number = nfft of welch calculation (Length of the FFT used)\n #fs = sampling frequency of data\n #data = multiple segments data (the calculation will be performed on each row)\n\n #Pre-alocate the PSD matrix\n columns = int(fft_number/2 + 1) #number of frequency components based on nfft\n rows = data.shape[0] #number of data segments\n Pxx = np.zeros((rows,columns)) #Create the pxx matrix for PSD data\n \n #iteration for each segment (row)\n for seg in np.arange(rows):\n #calculation of PSD for each segment\n f, Pxx[seg,:] = signal.welch(data[seg,:],fs,nfft = fft_number)\n \n return f, Pxx\n \n \n \n\n\n\n","sub_path":"Basic_functions.py","file_name":"Basic_functions.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"487918238","text":"import pytz\n\nfrom django.utils import timezone\nfrom django.utils.deprecation import MiddlewareMixin\n\n\nclass TimezoneMiddleware(MiddlewareMixin):\n \"\"\"\n Промежуточное программное обеспечение для активации пользовательского часового пояса, если он был сохранен в сеансе.\n \"\"\"\n\n def process_request(self, request):\n tzname = request.META.get('HTTP_TIME_ZONE')\n if tzname:\n timezone.activate(pytz.timezone(tzname))\n else:\n timezone.deactivate()\n","sub_path":"common/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"646831968","text":"\n\n#calss header\nclass _PERENNIAL():\n\tdef __init__(self,): \n\t\tself.name = \"PERENNIAL\"\n\t\tself.definitions = [u'lasting a very long time, or happening repeatedly or all the time: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_perennial.py","file_name":"_perennial.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"483589222","text":"import os\r\nimport shutil\r\nfrom concurrent.futures import ProcessPoolExecutor\r\n\r\ndef concCopyFiles(srcDir, destDir):\r\n src_files = os.listdir(srcDir)\r\n\r\n \r\n executor = ProcessPoolExecutor(105)\r\n futures = []\r\n \r\n for file_name in src_files:\r\n full_file_name = os.path.join(srcDir, file_name)\r\n if os.path.isfile(full_file_name):\r\n temp = executor.submit(shutil.copy, full_file_name, destDir)\r\n futures.append(temp)\r\n\r\n\r\n\r\ndef main():\r\n src = \"./\"\r\n dest = \"../files_2\"\r\n \r\n concCopyFiles(src, dest)\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"aCopyProc.py","file_name":"aCopyProc.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"647494705","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pytest\n\nimport dask_image.ndinterp as da_ndinterp\n\nimport numpy as np\nimport dask.array as da\nfrom scipy import ndimage\n\n\ndef validate_affine_transform(n=2,\n matrix=None,\n offset=None,\n input_output_shape_per_dim=(16, 16),\n interp_order=1,\n interp_mode='constant',\n input_output_chunksize_per_dim=(6, 6),\n random_seed=0,\n use_cupy=False,\n ):\n \"\"\"\n Compare the outputs of `ndimage.affine_transformation`\n and `dask_image.ndinterp.affine_transformation`.\n\n Notes\n -----\n Currently, prefilter is disabled and therefore the output\n of `dask_image.ndinterp.affine_transformation` is compared\n to `prefilter=False`.\n \"\"\"\n\n # define test image\n a = input_output_shape_per_dim[0]\n np.random.seed(random_seed)\n image = np.random.random([a] * n)\n\n # transform into dask array\n chunksize = [input_output_chunksize_per_dim[0]] * n\n image_da = da.from_array(image, chunks=chunksize)\n if use_cupy:\n import cupy as cp\n image_da = image_da.map_blocks(cp.asarray)\n\n # define (random) transformation\n if matrix is None:\n # make sure to substantially deviate from unity matrix\n matrix = np.eye(n) + (np.random.random((n, n)) - 0.5) * 5.\n if offset is None:\n offset = (np.random.random(n) - 0.5) / 5. * np.array(image.shape)\n\n # define resampling options\n output_shape = [input_output_shape_per_dim[1]] * n\n output_chunks = [input_output_chunksize_per_dim[1]] * n\n\n # transform with scipy\n image_t_scipy = ndimage.affine_transform(\n image, matrix, offset,\n output_shape=output_shape,\n order=interp_order,\n mode=interp_mode,\n prefilter=False)\n\n # transform with dask-image\n image_t_dask = da_ndinterp.affine_transform(\n image_da, matrix, offset,\n output_shape=output_shape,\n output_chunks=output_chunks,\n order=interp_order,\n mode=interp_mode)\n image_t_dask_computed = image_t_dask.compute()\n\n assert np.allclose(image_t_scipy, image_t_dask_computed)\n\n\n@pytest.mark.parametrize(\"n\",\n [1, 2, 3])\n@pytest.mark.parametrize(\"input_output_shape_per_dim\",\n [(25, 25), (25, 10)])\n@pytest.mark.parametrize(\"interp_order\",\n range(6))\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n [(16, 16), (16, 7), (7, 16)])\n@pytest.mark.parametrize(\"random_seed\",\n [0, 1, 2])\ndef test_affine_transform_general(n,\n input_output_shape_per_dim,\n interp_order,\n input_output_chunksize_per_dim,\n random_seed):\n\n kwargs = dict()\n kwargs['n'] = n\n kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim\n kwargs['interp_order'] = interp_order\n kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n kwargs['random_seed'] = random_seed\n\n validate_affine_transform(**kwargs)\n\n\n@pytest.mark.cupy\n@pytest.mark.parametrize(\"n\",\n [1, 2, 3])\n@pytest.mark.parametrize(\"input_output_shape_per_dim\",\n [(25, 25), (25, 10)])\n@pytest.mark.parametrize(\"interp_order\",\n [0, 1])\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n [(16, 16), (16, 7)])\n@pytest.mark.parametrize(\"random_seed\",\n [0])\ndef test_affine_transform_cupy(n,\n input_output_shape_per_dim,\n interp_order,\n input_output_chunksize_per_dim,\n random_seed):\n cupy = pytest.importorskip(\"cupy\", minversion=\"6.0.0\")\n\n kwargs = dict()\n kwargs['n'] = n\n kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim\n kwargs['interp_order'] = interp_order\n kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n kwargs['random_seed'] = random_seed\n kwargs['use_cupy'] = True\n\n validate_affine_transform(**kwargs)\n\n\n@pytest.mark.parametrize(\"n\",\n [1, 2, 3])\n@pytest.mark.parametrize(\"interp_mode\",\n ['constant', 'nearest'])\n@pytest.mark.parametrize(\"input_output_shape_per_dim\",\n [(20, 30)])\n@pytest.mark.parametrize(\"input_output_chunksize_per_dim\",\n [(15, 10)])\ndef test_affine_transform_modes(n,\n interp_mode,\n input_output_shape_per_dim,\n input_output_chunksize_per_dim,\n ):\n\n kwargs = dict()\n kwargs['n'] = n\n kwargs['interp_mode'] = interp_mode\n kwargs['input_output_shape_per_dim'] = input_output_shape_per_dim\n kwargs['input_output_chunksize_per_dim'] = input_output_chunksize_per_dim\n kwargs['interp_order'] = 0\n\n validate_affine_transform(**kwargs)\n\n\n@pytest.mark.parametrize(\"interp_mode\",\n ['wrap', 'reflect', 'mirror'])\ndef test_affine_transform_unsupported_modes(interp_mode):\n\n kwargs = dict()\n kwargs['interp_mode'] = interp_mode\n\n with pytest.raises(NotImplementedError):\n validate_affine_transform(**kwargs)\n\n\ndef test_affine_transform_numpy_input():\n\n image = np.ones((3, 3))\n image_t = da_ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n assert image_t.shape == image.shape\n assert (image == image_t).min()\n\n\ndef test_affine_transform_minimal_input():\n\n image = np.ones((3, 3))\n image_t = da_ndinterp.affine_transform(np.ones((3, 3)), np.eye(2))\n\n assert image_t.shape == image.shape\n\n\ndef test_affine_transform_type_consistency():\n\n image = da.ones((3, 3))\n image_t = da_ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n assert isinstance(image, type(image_t))\n assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute()))\n\n\n@pytest.mark.cupy\ndef test_affine_transform_type_consistency_gpu():\n\n cupy = pytest.importorskip(\"cupy\", minversion=\"6.0.0\")\n\n image = da.ones((3, 3))\n image_t = da_ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n image.map_blocks(cupy.asarray)\n\n assert isinstance(image, type(image_t))\n assert isinstance(image[0, 0].compute(), type(image_t[0, 0].compute()))\n\n\ndef test_affine_transform_no_output_shape_or_chunks_specified():\n\n image = da.ones((3, 3))\n image_t = da_ndinterp.affine_transform(image, np.eye(2), [0, 0])\n\n assert image_t.shape == image.shape\n assert image_t.chunks == tuple([(s,) for s in image.shape])\n\n\ndef test_affine_transform_prefilter_warning():\n\n with pytest.warns(UserWarning):\n da_ndinterp.affine_transform(da.ones(3), [1], [0],\n order=3, prefilter=True)\n\n\n@pytest.mark.timeout(15)\ndef test_affine_transform_large_input_small_output_cpu():\n \"\"\"\n Make sure input array does not need to be computed entirely\n \"\"\"\n\n # fully computed, this array would occupy 8TB\n image = da.random.random([10000] * 3, chunks=(200, 200, 200))\n image_t = da_ndinterp.affine_transform(image, np.eye(3), [0, 0, 0],\n output_chunks=[1, 1, 1],\n output_shape=[1, 1, 1])\n\n # if more than the needed chunks should be computed,\n # this would take long and eventually raise a MemoryError\n image_t[0, 0, 0].compute()\n\n\n@pytest.mark.cupy\n@pytest.mark.timeout(15)\ndef test_affine_transform_large_input_small_output_gpu():\n \"\"\"\n Make sure input array does not need to be computed entirely\n \"\"\"\n cupy = pytest.importorskip(\"cupy\", minversion=\"6.0.0\")\n\n # this array would occupy more than 24GB on a GPU\n image = da.random.random([2000] * 3, chunks=(50, 50, 50))\n image.map_blocks(cupy.asarray)\n\n image_t = da_ndinterp.affine_transform(image, np.eye(3), [0, 0, 0],\n output_chunks=[1, 1, 1],\n output_shape=[1, 1, 1])\n # if more than the needed chunks should be computed,\n # this would take long and eventually raise a MemoryError\n image_t[0, 0, 0].compute()\n\n\n@pytest.mark.filterwarnings(\"ignore:The behavior of affine_transform \"\n \"with a 1-D array supplied for the matrix \"\n \"parameter has changed\")\n@pytest.mark.parametrize(\"n\",\n [1, 2, 3, 4])\ndef test_affine_transform_parameter_formats(n):\n\n # define reference parameters\n scale_factors = np.ones(n, dtype=np.float) * 2.\n matrix_n = np.diag(scale_factors)\n offset = -np.ones(n)\n\n # convert into different formats\n matrix_only_scaling = scale_factors\n matrix_pre_homogeneous = np.hstack((matrix_n, offset[:, None]))\n matrix_homogeneous = np.vstack((matrix_pre_homogeneous,\n [0] * n + [1]))\n\n np.random.seed(0)\n image = da.random.random([5] * n)\n\n # reference run\n image_t_0 = da_ndinterp.affine_transform(image,\n matrix_n,\n offset).compute()\n\n # assert that the different parameter formats\n # lead to the same output\n image_t_scale = da_ndinterp.affine_transform(image,\n matrix_only_scaling,\n offset).compute()\n assert (np.allclose(image_t_0, image_t_scale))\n\n for matrix in [matrix_pre_homogeneous, matrix_homogeneous]:\n\n image_t = da_ndinterp.affine_transform(image,\n matrix,\n offset + 10., # ignored\n ).compute()\n\n assert(np.allclose(image_t_0, image_t))\n\n # catch matrices that are not homogeneous transformation matrices\n with pytest.raises(ValueError):\n matrix_not_homogeneous = np.vstack((matrix_pre_homogeneous,\n [-1] * n + [1]))\n da_ndinterp.affine_transform(image,\n matrix_not_homogeneous,\n offset)\n","sub_path":"tests/test_dask_image/test_ndinterp/test_affine_transformation.py","file_name":"test_affine_transformation.py","file_ext":"py","file_size_in_byte":10599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468400970","text":"__author__ = 'lslacker'\n# -*- coding: utf-8 -*-\nimport argparse\nfrom mssqlwrapper import DB, TempTable\nimport logging\nfrom reader import ExcelReader\nimport datetime\nfrom itertools import repeat\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_investment_id(db, investment_code):\n\n return db.get_one_value('''\n select stockID\n from vewEquities\n where stockCode=?\n ''', investment_code) if get_investment_id else None\n\n\ndef delists(db, investment_id, investment_code, investment_status_id):\n investment_ids = investment_id.split(',') if investment_id else repeat(None)\n investment_codes = investment_code.split(',') if investment_code else repeat(None)\n params = (zip(investment_ids, investment_codes, repeat(investment_status_id)))\n return any([delist(db, *param) for param in params])\n\n\ndef delist(db, investment_id, investment_code, investment_status_id):\n investment_id = investment_id or get_investment_id(db, investment_code)\n\n data_dict = locals()\n del data_dict['db']\n logger.info('{}'.format(investment_code))\n del data_dict['investment_code']\n\n data_dict = ['@{k}={v!r}'.format(k=k.replace('_', ''), v=v) for k, v in data_dict.items()]\n\n proc_query = '''\n exec Lonsec.dbo.prcInvestmentVariablesPut {params}\n '''.format(params=','.join(data_dict))\n logger.info(proc_query)\n\n count = db.execute(proc_query)\n\n # count is always -1, does not make sense to return it???\n return count\n\n\n\ndef consoleUI():\n parser = argparse.ArgumentParser(description='Merge multiple csv files into excel file, each csv')\n parser.add_argument('--server', default=r'MEL-TST-001\\WEBSQL', help='Database Server')\n parser.add_argument('--database', default=r'Lonsec', help='Database Name')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n parser.add_argument('--investment-status-id', help='Investment Status ID. Default: 3 (closed)', type=int, default=3)\n parser.add_argument('--dry-run', help='An excel file (normally from Jen Lee)', action='store_true')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--investment-code', help='Investment Code aka Stock Code')\n group.add_argument('--investment-id', help='Investment ID aka Stock ID')\n\n a = parser.parse_args()\n\n if a.verbose > 1:\n logging.basicConfig(level=logging.INFO)\n\n connection_string1 = r'Driver={{SQL Server Native Client 11.0}};Server={server};Database={database};' \\\n 'Trusted_Connection=yes;'.format(server=a.server, database=a.database)\n\n db = DB.from_connection_string(connection_string1)\n if a.verbose > 1:\n db.debug = True\n\n logger.info(delists(db, a.investment_id, a.investment_code, a.investment_status_id))\n\n if not a.dry_run:\n logger.info('Commit changes')\n db.commit()\n else:\n logger.info('All changes did not commit')\n\nif __name__ == '__main__':\n consoleUI()\n","sub_path":"stock_delist.py","file_name":"stock_delist.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"156431068","text":"class State:\n id = \"\"\n name = tuple()\n parens = []\n dot = 0\n\n def __init__(self, name, parens, dot):\n self.name = name \n self.parens = parens\n self.dot = dot\n\n def __str__(self):\n gram = ''\n for g in self.name[1]:\n gram = gram + g\n return f'{self.name[0]}->{gram} {{{self.parens}}} {self.dot}'\n \n def __repr__(self):\n gram = ''\n for g in self.name[1]:\n gram = gram + g\n return f'{self.name[0]}->{gram} {{{self.parens}}} {self.dot}'\n\n def __eq__(self, other):\n if isinstance(other, State):\n return ((self.name, self.parens, self.dot) ==\n (other.name, other.parens, other.dot))\n else:\n return False\n\n def __hash__(self):\n h = hash(self.name[0])\n for par in self.name[1]:\n h = h ^ hash(par)\n h = h ^ hash(self.dot)\n return h\n\n @staticmethod\n def sorted_name(State):\n return State.name\n\nclass E_NKA:\n curr = 0\n Q = dict()\n F = []\n L = set()\n lookup = dict()\n q0 = ''\n\n def add_state(self, state):\n self.Q[self.curr] = state\n self.curr += 1\n return self.curr - 1\n\n def find_id(self, state):\n for s in self.Q:\n if state == self.Q[s]:\n return s\n \n def add_transition(self, state_old, delta, state_new):\n key = (state_old, delta)\n if key not in self.lookup:\n self.lookup[key] = []\n self.lookup[key].append(state_new)\n\n def e_closure(self, Q):\n Y = [Q]\n stack = [Q]\n \n while len(stack) != 0:\n q = stack.pop()\n key = (q, '$')\n if key in self.lookup:\n for state in self.lookup[key]:\n if state not in Y:\n Y.append(state)\n stack.append(state)\n return Y \n\n def fill_langauge(self):\n for key in self.lookup:\n new_state = self.lookup[key]\n self.L.add(key[1])\n return self.L\n \n def fix_lookup(self):\n new = {}\n for state in self.lookup:\n id1 = self.find_id(state[0])\n id2 = []\n for s in self.lookup[state]:\n id2.append(self.find_id(s))\n new[id1, state[1]] = id2\n self.lookup = new\n\nclass DKA:\n Q = dict()\n L = set()\n F = []\n q0 = []\n lookup = {}\n\n def lookup_num(self, states):\n for state in self.Q:\n if states == self.Q[state]:\n return state\n return -1\n \nclass Util:\n @staticmethod\n def enka_to_dka(enka):\n dka = DKA()\n dka.q0 = []\n dka.L = enka.fill_langauge()\n dka.L.remove('$')\n \n e_closures = dict()\n for state in enka.Q:\n e_closures[state] = enka.e_closure(state)\n\n dka.q0.append(e_closures[0])\n dka.Q[0] = dka.q0[0]\n \n new_Q = set()\n new_Q.add(0)\n\n stack = []\n stack.append(dka.q0[0])\n\n state_num = 0\n new_state = set()\n\n while len(stack) > 0:\n curr_q = stack.pop()\n curr_id = dka.lookup_num(curr_q)\n\n for l in dka.L: \n \n moves = []\n for q in curr_q: \n key = (q, l)\n if key in enka.lookup:\n tmp = enka.lookup[key]\n for t in tmp:\n moves.append(t)\n moves = list(dict.fromkeys(moves))\n moves.sort(key= lambda x : id(x), reverse=True)\n \n new = []\n for move in moves:\n tmp = e_closures[move]\n for t in tmp:\n new.append(t)\n \n new = list(dict.fromkeys(new))\n new.sort(key= lambda x : id(x), reverse=True)\n\n ids = []\n for n in new:\n ids.append(id(n))\n \n if len(new) > 0:\n if str(ids) not in new_state:\n stack.append(new)\n state_num += 1\n dka.Q[state_num] = new\n dka.lookup[curr_id, l] = new\n new_state.add(str(ids))\n \n dka.q0 = [dka.q0]\n\n for look in dka.lookup:\n dka.lookup[look] = dka.lookup_num(dka.lookup[look])\n \n\n for state in dka.Q:\n if enka.q0 in dka.Q[state]:\n dka.F.append(dka.Q[state])\n return dka\n\n @staticmethod\n def print_lookup(lookup):\n for look in lookup:\n print(f\"{look} => {lookup[look]}\")\n\n @staticmethod\n def print_automata(automata):\n print(\"================== Automata ==================\")\n print(\"===== States =====\")\n for key in automata.Q:\n print(f'#{key} {automata.Q[key]}')\n print(\"===== Finite states =====\")\n for state in automata.F:\n print(f'#{state}')\n print(\"===== Start states =====\")\n for state in automata.q0:\n print(f'#{state}')\n print(\"===== Lookup table =====\")\n Util.print_lookup(automata.lookup)","sub_path":"syntaxanalyzer-master/Automata.py","file_name":"Automata.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"495684031","text":"#!/usr/bin/env python3\r\n\r\n# display a welcome message\r\nprint(\"The Test Scores application\")\r\nprint()\r\n\r\nwhile True:\r\n\r\n print(\"Enter test scores\")\r\n print(\"Enter end to end input\")\r\n print(\"======================\")\r\n\r\n # initialize variables\r\n counter = 0\r\n score_total = 0\r\n test_score = 0\r\n test_score_string = \"\"\r\n\r\n # loop for test scores for one test\r\n while True:\r\n test_score_string = input(\"Enter test score: \")\r\n if test_score_string.lower() == \"end\":\r\n break\r\n else:\r\n test_score = int(test_score_string)\r\n\r\n if test_score >= 0 and test_score <= 100:\r\n score_total += test_score\r\n counter += 1\r\n else:\r\n print(\"Test score must be from 0 through 100. Score discarded. Try again.\")\r\n\r\n # calculate average score\r\n average_score = round(score_total / counter)\r\n \r\n # format and display the result\r\n print(\"======================\")\r\n print(\"Total Score:\", score_total,\r\n \"\\nAverage Score:\", average_score)\r\n print()\r\n choice = input(\"Enter another set of test scores (y/n)? \")\r\n if(choice.lower() == \"n\"):\r\n break\r\n print()\r\n \r\nprint(\"Bye\")\r\n\r\n\r\n","sub_path":"ch03/test_scores.py","file_name":"test_scores.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"572570389","text":"import numpy as np\nfrom scipy import signal, misc\nimport matplotlib.pyplot as plt\n\nimg = plt.imread(\"./test.jpg\")\n# a=np.array([[1,1,1],[1,1,1],[1,1,1]])\nb = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n# print(a)\n# print(b)\n# imgr=img[:,:,0]\n# imgg=img[:,:,1]\n# imgb=img[:,:,2]\n# plt.imshow(imgr,\"Reds\")\n# plt.imshow(imgg,\"Greens\")\n# plt.imshow(imgb,\"Blues\")\nimgr = np.array(\n [\n [100, 100, 100, 0, 0, 0],\n [100, 100, 100, 0, 0, 0],\n [100, 100, 100, 0, 0, 0],\n [100, 100, 100, 0, 0, 0],\n [100, 100, 100, 0, 0, 0],\n [100, 100, 100, 0, 0, 0],\n ]\n)\nplt.subplot(2, 1, 1)\nplt.imshow(imgr, \"gray\")\nprint(b)\nprint(imgr)\ni = signal.convolve2d(imgr, b, \"valid\")\nprint(i)\nplt.subplot(2, 1, 2)\nplt.imshow(i, \"gray\")\nplt.show()\n","sub_path":"statistical/cv.py","file_name":"cv.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"172117069","text":"#!/usr/bin/python3\n#move n plates from a to c via b\ndef move(a,b,c,n):\n\tif n==0:\n\t\treturn\n\tmove(a,c,b,n-1)\n\tprint(a,'->',c)\n\tmove(b,a,c,n-1)\nnum=int(input(\"Enter number of plates:\"))\nmove(1,2,3,num)\n","sub_path":"tower.py","file_name":"tower.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71161585","text":"import discord\nimport asyncio\nfrom Classes.access_class import acFunctionality\nfrom Classes.overwatch import oWFunctionality\nfrom Classes.todolists import todoFunctionality\n\n# Initialise discord client\nclient = discord.Client()\nserver = discord.Server\n\n# Globals\naccess_channel = None\noWChannel = None\ntestChannel = None\nprogChannel = None\nrole_bot = None\noWBot = None\nworkTracker = None\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n await client.change_presence(game=discord.Game(name = 'Say !help'))\n \n # Global variables\n global access_channel\n global oWChannel\n global testChannel\n global progChannel\n global role_bot\n global oWBot\n global workTracker\n access_channel = client.get_channel('251117025659846666')\n oWChannel = client.get_channel('243882648114692096')\n testChannel = client.get_channel('244871141989154816')\n progChannel = client.get_channel('244669160666169344')\n role_bot = acFunctionality(client, access_channel)\n oWBot = oWFunctionality(client, oWChannel)\n workTracker = todoFunctionality(client, progChannel)\n\n # Refresh the channel messages\n await role_bot.refresh()\n await oWBot.refresh()\n await workTracker.refresh()\n\n@client.event\nasync def on_message(message):\n if message.channel == access_channel:\n await role_bot.channel_commands(message)\n\n if message.channel == oWChannel:\n await oWBot.channel_commands(message)\n\n if message.channel == progChannel:\n await workTracker.channel_commands(message)\n\n # This is a global command and works from every channel\n if message.content.startswith('!help'):\n with open('help.txt', 'r') as f:\n await client.send_message(message.author, f.read())\n return\n\nclient.run('TOKEN_HERE')\n","sub_path":"basic_bot.py","file_name":"basic_bot.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"212948013","text":"texte = (input(\"saisire un text : \")).split(' ')\n\nfor mot in texte:\n if mot[0] == 'a':\n print(\"Le mot : \", mot, \" commence par la lettre 'a'\")\n\n\n# deuxiemme methode\n\n# Lire la chaine s\ns = input(\"Tapez une chaine de caractères s : \")\n\n# convertir la chaine s en une liste\ns = s.split()\n\n# obtenir la longueur de la liste s\nn = len(s)\n\n# rechercher les éléments de la liste qui commencent par la lettre 'a'\nfor i in range(0,n):\n if(s[i][0] == 'a'):\n print(\"Le mot : '\", s[i], \"' commence par la lettre 'a'\")","sub_path":"Affiche_commanceParC.py","file_name":"Affiche_commanceParC.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"123267597","text":"def gcd(a: int, b: int):\n \"\"\"Find Greatest Common Diviser of the number a and b\n\n Arguments:\n a {int} -- the number a\n b {int} -- the number b\n \"\"\"\n a, b = min(a, b), max(a, b)\n print(a, b)\n if a < 2:\n return 0\n\n remaining = b % a\n if remaining == 0:\n print(a, \"!!\")\n return a\n\n return gcd(a, remaining)\n","sub_path":"python/grokking-algorithms/04/divideandconquer.py","file_name":"divideandconquer.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"513803223","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 6 13:43:14 2020\n\n@author: pablo\n\"\"\"\n\nimport time\nimport numpy as np\nfrom math import log, sqrt\nfrom scipy.stats import norm\n\n# Sustitución de columnas en numpy: a[:,0]=np.array([5,5])\n# Las tienes que meter en filas !!!!!!!!!!!!!!!!!\n# Solo da problemas al bajar a dimensión 1, si no, funciona igual que matlab.\n\n# np.random.normal(0,1,(3,2,2)) matrices dos x dos. El orden de las\n# dimensiones va al revés que matlab\n\ndef HybridStockDividendsMSamples(S0,q0,r,a_pasos,b,volS_pasos,volq_pasos,rho,M,N,h,mat_pos):\n # M number of paths.\n # N number of step of each path.\n \n a = a_pasos[:N]\n volq = volq_pasos[:N]\n volS = volS_pasos[:N]\n \n S=[[]] * (N + 1)\n q=[[]] * (N + 1)\n\n mats = [pos for pos in mat_pos if pos<=N]\n\n if len(a)!=N:\n print('a está mal')\n if len(h)!=N:\n print('h está mal')\n\n t0 = time.time()\n np.random.seed(140494)\n # Calculo de la mtriz aleatoria \n Cov = np.array([[1, rho], [rho, 1]])\n # Descomposición de Cholesky. Diferente que en matlab, python da TriInferior.\n L = np.linalg.cholesky(Cov).T\n # Generación de normales independientes\n Z = np.random.normal(0,1,(N,2,M))\n # Transformación para correlacionarlas\n random_walk = np.zeros((N,2,M))\n for k in range(M):\n random_walk[:,:,k] = np.dot(Z[:,:,k],L)\n\n t1 = time.time()\n print('Generar números:', t1-t0)\n\n\n Saux = np.ones(2*M) * log(S0)\n q[0] = np.ones(2*M) * q0\n S[0] = np.exp(Saux)\n inicio = 0\n for days_year in mats:\n if inicio == 0: # Este if sirve para resetear a 0 el indice de dividendos cada año.\n Saux += h[inicio] * (np.ones(2 * M) * (r - 0.5 * volS[inicio] ** 2) - q[inicio]) + sqrt(h[inicio]) * volS[\n inicio] * np.concatenate((random_walk[inicio, 0, :], -random_walk[inicio, 0, :]))\n qaux = np.maximum(np.zeros(2 * M), q[inicio]) # q se hace negativo por la discretización del problema\n q[inicio + 1] = q[inicio] + h[inicio] * (a[inicio] * np.ones(2 * M) - b * q[inicio]) + np.multiply(\n np.sqrt(qaux * h[inicio]) * volq[inicio], np.concatenate((random_walk[inicio, 1, :], -random_walk[inicio, 1, :])))\n S[inicio + 1] = np.exp(Saux)\n else:\n Saux += h[inicio] * (np.ones(2 * M) * (r - 0.5 * volS[inicio] ** 2) - q[inicio]) + sqrt(h[inicio]) * volS[\n inicio] * np.concatenate((random_walk[inicio, 0, :], -random_walk[inicio, 0, :]))\n q[inicio + 1] = np.zeros(2 * M)\n S[inicio + 1] = np.exp(Saux)\n for dia in range(inicio + 1, days_year):\n Saux += h[dia] * (np.ones(2 * M) * (r - 0.5 * volS[dia] ** 2) - q[dia]) + sqrt(h[dia]) * volS[\n dia] * np.concatenate((random_walk[dia, 0, :], -random_walk[dia, 0, :]))\n qaux = np.maximum(np.zeros(2 * M), q[dia]) # q se hace negativo por la discretización del problema\n q[dia + 1] = q[dia] + h[dia] * (a[dia] * np.ones(2 * M) - b * q[dia]) + np.multiply(\n np.sqrt(qaux * h[dia]) * volq[dia], np.concatenate((random_walk[dia, 1, :], -random_walk[dia, 1, :])))\n S[dia + 1] = np.exp(Saux)\n inicio = days_year\n\n t2 = time.time()\n print('Cálculo caminos:', t2 - t1)\n return S, q\n\ndef PayoffDivFut(S,q,h):\n result = np.zeros(len(S[0]))\n for i in range(len(h)):\n result += np.multiply(S[i],q[i])*h[i]\n return result\n\ndef PayoffOptCall(S, K):\n return np.maximum(S - K * np.ones(len(S)), np.zeros(len(S)))","sub_path":"python_lib/code/montecarlo.py","file_name":"montecarlo.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144636166","text":"# Copyright (c) Tianyu Wang. All Rights Reserved.\nfrom typing import Dict, List\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.utils.registry import Registry\n\nfrom detectron2.modeling.anchor_generator import build_anchor_generator\nfrom detectron2.modeling.box_regression import Box2BoxTransform\nfrom detectron2.modeling.matcher import Matcher\nfrom detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY, RPN_HEAD_REGISTRY\nfrom detectron2.modeling.proposal_generator.rpn_outputs import RPNOutputs, find_top_rpn_proposals\nfrom detectron2.modeling.proposal_generator.rpn import StandardRPNHead, RPN\nfrom detectron2.structures import BoxMode,Boxes\n\n\"\"\"\nRegistry for LISA RPN heads, which take CNN feature maps and perform\nobjectness classification and bounding box regression for anchors.\n\"\"\"\n\n# NOTE: `cfg.MODEL.RPN.HEAD_NAME` should be \"LISARPNHead\".\n\ndef build_rpn_head(cfg, input_shape,shadow_object_part=False):\n \"\"\"\n Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.\n \"\"\"\n name = cfg.MODEL.RPN.HEAD_NAME\n return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape,shadow_object_part)\n\n\n@RPN_HEAD_REGISTRY.register()\nclass LISARPNHead(StandardRPNHead):\n def __init__(self, cfg, input_shape: List[ShapeSpec], shadow_object_part= False):\n super(LISARPNHead, self).__init__(cfg,input_shape)\n self.shadow_object_part = shadow_object_part\n if self.shadow_object_part:\n in_channels = [s.channels for s in input_shape]\n assert len(set(in_channels)) == 1, \"Each level must have the same channel!\"\n in_channels = in_channels[0]\n self.conv = nn.Conv2d(in_channels , in_channels, kernel_size=3, stride=1, padding=1)\n for l in [self.conv]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, features):\n \"\"\"\n Args:\n features (list[Tensor]): list of feature maps\n \"\"\"\n \n pred_objectness_logits = []\n pred_anchor_deltas = []\n if self.shadow_object_part == False:\n pre_features = []\n for i,x in enumerate(features):\n\n t = F.relu(self.conv(x))\n\n # if self.shadow_object_part == False:\n # pre_features.append(t)\n \n pred_objectness_logits.append(self.objectness_logits(t))\n pred_anchor_deltas.append(self.anchor_deltas(t))\n \n if self.shadow_object_part == False:\n return pred_objectness_logits, pred_anchor_deltas, None\n else:\n return pred_objectness_logits, pred_anchor_deltas\n\n\ndef build_proposal_generator(cfg, input_shape, **args):\n \"\"\"\n Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.\n The name can be \"PrecomputedProposals\" to use no proposal generator.\n \"\"\"\n name = cfg.MODEL.PROPOSAL_GENERATOR.NAME\n if name == \"PrecomputedProposals\":\n return None\n\n return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape,**args)\n \n@PROPOSAL_GENERATOR_REGISTRY.register()\nclass LISARPN(RPN):\n\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], shadow_object_part= False):\n super(LISARPN, self).__init__(cfg, input_shape)\n self.shadow_object_part = shadow_object_part\n if self.shadow_object_part:\n self.rpn_head = build_rpn_head(cfg, [input_shape[f] for f in self.in_features], self.shadow_object_part)\n \n def forward(self, images, features, gt_instances=None, pre_proposals=None):\n gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None\n del gt_instances\n\n if self.shadow_object_part == False:\n features = [features[f] for f in self.in_features]\n pred_objectness_logits, pred_anchor_deltas, pre_features = self.rpn_head(features)\n anchors = self.anchor_generator(features)\n else:\n features = [features[f] for f in self.in_features]\n pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)\n anchors = self.anchor_generator(features)\n assert len(anchors[0]) == len(pre_proposals), \"number of pre_proposals {} and pre_anchors {} should be same.\".format(len(anchors[0]),len(pre_proposals))\n\n outputs = RPNOutputs(\n self.box2box_transform,\n self.anchor_matcher,\n self.batch_size_per_image,\n self.positive_fraction,\n images,\n pred_objectness_logits,\n pred_anchor_deltas,\n anchors,\n self.boundary_threshold,\n gt_boxes,\n self.smooth_l1_beta,\n )\n \n if self.training:\n if self.shadow_object_part == False:\n losses = {k+'_rela': v * self.loss_weight for k, v in outputs.losses().items()}\n else:\n losses = {k: v * self.loss_weight for k, v in outputs.losses().items()}\n else:\n losses = {}\n\n with torch.no_grad():\n\n \n pre_proposals = outputs.predict_proposals()\n # Find the top proposals by applying NMS and removing boxes that\n # are too small. The proposals are treated as fixed for approximate\n # joint training with roi heads. This approach ignores the derivative\n # w.r.t. the proposal boxes’ coordinates that are also network\n # responses, so is approximate.\n proposals = find_top_rpn_proposals(\n pre_proposals,\n outputs.predict_objectness_logits(),\n images,\n self.nms_thresh,\n self.pre_nms_topk[self.training],\n self.post_nms_topk[self.training],\n self.min_box_side_len,\n self.training,\n )\n # For RPN-only models, the proposals are the final output and we return them in\n # high-to-low confidence order.\n # For end-to-end models, the RPN proposals are an intermediate state\n # and this sorting is actually not needed. But the cost is negligible.\n inds = [p.objectness_logits.sort(descending=True)[1] for p in proposals]\n proposals = [p[ind] for p, ind in zip(proposals, inds)]\n if self.shadow_object_part == False:\n return proposals, losses, pre_features, pre_proposals\n else:\n return proposals, losses\n \n\n\n","sub_path":"build/lib.linux-x86_64-3.6/detectron2/modeling/proposal_generator/LISA_rpn.py","file_name":"LISA_rpn.py","file_ext":"py","file_size_in_byte":6512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"436197108","text":"# -*- coding: utf-8 -*-\nfrom time import time\nimport io\nimport os.path\nimport struct\nimport traceback\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Util.strxor import strxor\nfrom Crypto.Util.number import long_to_bytes, bytes_to_long\nimport crypt\nimport prime\nimport TL\nfrom connection import MTProtoConnection\nfrom message import MTProtoMessage, MTProtoContainer\n\n\nclass MTProto(object):\n ua = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\"\n headers = {\"User-Agent\": ua}\n\n @staticmethod\n # Метод парсинга акканута, возвращает объект MTProtoConnection\n def parse_acc(account):\n localstorage = account['data']['localstorage']\n localstorage = {x.split(\"=\")[0]: x.split(\"=\")[1].strip('\"') for x in localstorage.split(\";\")}\n mtproto_conn = MTProtoConnection()\n for k, v in localstorage.iteritems():\n setattr(mtproto_conn, k, v)\n dc = mtproto_conn.dc\n for dc_id in range(1, 6):\n mtproto_conn.dc = dc_id\n if not mtproto_conn.auth_key or not mtproto_conn.server_salt:\n mtproto_conn.auth_key = None\n mtproto_conn.server_salt = None\n\n mtproto_conn.dc = dc\n return mtproto_conn\n\n @staticmethod\n def create_signature_message(sess, message):\n message.session_id = sess.additional['mtproto_conn'].session_id if not message.session_id else message.session_id\n\n # Если сменился аккаунт, и сообщение отправляется уже с другой сессией\n if message.session_id != sess.additional['mtproto_conn'].session_id:\n message.session_id = sess.additional['mtproto_conn'].session_id\n if isinstance(message, MTProtoContainer):\n sess.additional['mtproto_conn'].dc = message.dc\n for m in message.messages[:]:\n if m.obj == 'msgs_ack':\n message.messages.remove(m)\n continue\n m.session_id = sess.additional['mtproto_conn'].session_id\n if m.access_hash:\n # Проверяем, возможно access_hash уже для данного аккаунта\n conn_ah = sess.additional['mtproto_conn'].access_hashes.get(m.obj_id, {}).get('access_hash')\n # Если access_hash'a нет, то получаем его для этого аккаунта, по сохраненному в сообщение username\n if not conn_ah:\n username = getattr(m, 'resolve_name', None)\n if username:\n resolve_name = MTProto.method_call(sess, \"contacts.resolveUsername\", username=str(username))\n m.kwargs['peer']['access_hash'] = resolve_name['chats'][0]['pflags']['access_hash']\n m.seqno = MTProto.generate_seqno(sess.additional['mtproto_conn'], m)\n m.message_id = MTProto.generate_message_id(sess.additional['mtproto_conn'], m)\n for m in sess.additional['mtproto_conn'].pending_messages:\n if getattr(m, 'update_message', False):\n MTProto.generate_seqno(sess.additional['mtproto_conn'], m)\n MTProto.generate_message_id(sess.additional['mtproto_conn'], m)\n message.add_message(m)\n message.seqno = MTProto.generate_seqno(sess.additional['mtproto_conn'], message)\n message.message_id = MTProto.generate_message_id(sess.additional['mtproto_conn'], message)\n\n # Если в сообщении содержится access_hash, то нужно сохранить данные запрашиваемого чата, они могут понадобиться,\n # для следующего аккаунта, если текущий аккаунт заблокируется\n if getattr(message, 'access_hash', None):\n message_ah = getattr(message, 'access_hash', None)\n conn_ah = sess.additional['mtproto_conn'].access_hashes.get(message.obj_id, {}).get('access_hash')\n if not conn_ah:\n # Повторно резолвим этот чат\n username = getattr(message, 'resolve_name', None)\n if username:\n resolve_name = MTProto.method_call(sess, \"contacts.resolveUsername\", username=str(username))\n message.kwargs['peer']['access_hash'] = resolve_name['chats'][0]['pflags']['access_hash']\n sess.additional['mtproto_conn'].access_hashes[message.obj_id] = {'access_hash': resolve_name['chats'][0]['pflags']['access_hash'], 'name': username}\n\n if message_ah != conn_ah:\n message_ah = conn_ah\n\n if not sess.additional['mtproto_conn'].auth_key or not sess.additional['mtproto_conn'].server_salt:\n if not message.message_id:\n MTProto.generate_message_id(sess.additional['mtproto_conn'], message)\n auth_key_id = struct.pack(\" q:\n (p, q) = (q, p)\n assert p * q == pq and p < q\n\n p_bytes = long_to_bytes(p)\n q_bytes = long_to_bytes(q)\n f = open(os.path.join(os.path.dirname(__file__), \"rsa.pub\"))\n key = RSA.importKey(f.read())\n\n new_nonce = os.urandom(32)\n\n data = TL.serialize_obj('p_q_inner_data',\n pq=pq_bytes,\n p=p_bytes,\n q=q_bytes,\n nonce=nonce,\n server_nonce=server_nonce,\n new_nonce=new_nonce,\n mtproto=True)\n\n sha_digest = SHA.new(data).digest()\n random_bytes = os.urandom(255 - len(data) - len(sha_digest))\n to_encrypt = sha_digest + data + random_bytes\n encrypted_data = key.encrypt(to_encrypt, 0)[0]\n\n message = MTProtoMessage(method='req_DH_params', nonce=nonce, server_nonce=server_nonce, p=p_bytes, q=q_bytes,\n public_key_fingerprint=public_key_fingerprint, encrypted_data=encrypted_data,\n mtproto=True, plain_message=True)\n\n MTProto.create_signature_message(sess, message)\n response = MTProto.send_message(sess, message, create_signature=False)\n server_dh_params = MTProto.parse_response(sess, response, mtproto_conn)\n\n assert nonce == server_dh_params['nonce']\n assert server_nonce == server_dh_params['server_nonce']\n\n encrypted_answer = server_dh_params['encrypted_answer']\n\n tmp_aes_key = SHA.new(new_nonce + server_nonce).digest() + SHA.new(server_nonce + new_nonce).digest()[0:12]\n tmp_aes_iv = SHA.new(server_nonce + new_nonce).digest()[12:20] + SHA.new(\n new_nonce + new_nonce).digest() + new_nonce[0:4]\n\n answer_with_hash = crypt.ige_decrypt(encrypted_answer, tmp_aes_key, tmp_aes_iv)\n\n answer_hash = answer_with_hash[:20]\n answer = answer_with_hash[20:]\n\n server_DH_inner_data = TL.deserialize(io.BytesIO(answer))\n assert nonce == server_DH_inner_data['nonce']\n assert server_nonce == server_DH_inner_data['server_nonce']\n dh_prime_str = server_DH_inner_data['dh_prime']\n g = server_DH_inner_data['g']\n g_a_str = server_DH_inner_data['g_a']\n server_time = server_DH_inner_data['server_time']\n mtproto_conn.timedelta = server_time - time()\n\n dh_prime = bytes_to_long(dh_prime_str)\n g_a = bytes_to_long(g_a_str)\n\n assert prime.isprime(dh_prime)\n retry_id = 0\n b_str = os.urandom(256)\n b = bytes_to_long(b_str)\n g_b = pow(g, b, dh_prime)\n\n g_b_str = long_to_bytes(g_b)\n\n data = TL.serialize_obj('client_DH_inner_data',\n nonce=nonce,\n server_nonce=server_nonce,\n retry_id=retry_id,\n g_b=g_b_str, mtproto=True)\n data_with_sha = SHA.new(data).digest() + data\n data_with_sha_padded = data_with_sha + os.urandom(-len(data_with_sha) % 16)\n encrypted_data = crypt.ige_encrypt(data_with_sha_padded, tmp_aes_key, tmp_aes_iv)\n\n message = MTProtoMessage(method='set_client_DH_params', nonce=nonce, server_nonce=server_nonce,\n encrypted_data=encrypted_data, mtproto=True, plain_message=True)\n MTProto.create_signature_message(sess, message)\n response = MTProto.send_message(sess, message, create_signature=False)\n Set_client_DH_params_answer = MTProto.parse_response(sess, response, mtproto_conn)\n\n # print Set_client_DH_params_answer\n auth_key = pow(g_a, b, dh_prime)\n auth_key_str = long_to_bytes(auth_key)\n auth_key_sha = SHA.new(auth_key_str).digest()\n auth_key_aux_hash = auth_key_sha[:8]\n\n new_nonce_hash1 = SHA.new(new_nonce + b'\\x01' + auth_key_aux_hash).digest()[-16:]\n new_nonce_hash2 = SHA.new(new_nonce + b'\\x02' + auth_key_aux_hash).digest()[-16:]\n new_nonce_hash3 = SHA.new(new_nonce + b'\\x03' + auth_key_aux_hash).digest()[-16:]\n\n assert Set_client_DH_params_answer['nonce'] == nonce\n assert Set_client_DH_params_answer['server_nonce'] == server_nonce\n\n if Set_client_DH_params_answer.name == 'dh_gen_ok':\n assert Set_client_DH_params_answer['new_nonce_hash1'] == new_nonce_hash1\n\n mtproto_conn.server_salt = strxor(new_nonce[0:8], server_nonce[0:8]).encode('hex')\n mtproto_conn.auth_key = auth_key_str.encode('hex')\n return \"Auth Ok\"\n elif Set_client_DH_params_answer.name == 'dh_gen_retry':\n assert Set_client_DH_params_answer['new_nonce_hash2'] == new_nonce_hash2\n elif Set_client_DH_params_answer.name == 'dh_gen_fail':\n assert Set_client_DH_params_answer['new_nonce_hash3'] == new_nonce_hash3\n raise Exception(\"Auth Failed\")\n else:\n raise Exception(\"Response Error\")\n\n @staticmethod\n def parse_response(sess, response, mtproto_conn):\n response_data = response.content\n auth_key_id = response_data[0:8]\n if auth_key_id == b\"\\x00\" * 8:\n (message_id, message_length) = struct.unpack(\"qI\", response_data[8:20])\n message_id = struct.pack(' 1:\n container = MTProtoContainer(content_related=False, dc=new_dc)\n\n # Добавляем сообщения в контейнер из pending_messages\n for m in sess.additional['mtproto_conn'].pending_messages:\n # Заново генерируем seqno и message_id если требуется, например такие сооб-я как bad_msg_notification\n if getattr(m, 'update_message', False):\n m.update_message = False\n MTProto.generate_message_id(sess.additional['mtproto_conn'], m)\n MTProto.generate_seqno(sess.additional['mtproto_conn'], m)\n container.add_message(m)\n # Очищаем pending_messages, что бы не отправлять сообщения повторно\n sess.additional['mtproto_conn'].pending_messages = []\n\n # Если стадия логина, то подписываем сообщения, т.к. не вызывается метод create_signature\n if login:\n # Подписываем сообщения\n MTProto.create_signature_message(sess, container)\n response = MTProto.send_message(sess, container, create_signature=False)\n else:\n response = MTProto.send_message(sess, container)\n\n else:\n # Если стадия логина, то предварительно подписываем сообщения, т.к. не вызывается метод create_signature\n if login:\n # Подписываем сообщения\n MTProto.create_signature_message(sess, query_message)\n response = MTProto.send_message(sess, query_message)\n else:\n response = MTProto.send_message(sess, query_message, create_signature=True)\n\n parsed_response = MTProto.parse_response(sess, response, sess.additional['mtproto_conn'])\n MTProto.process_message(sess, parsed_response)\n\n result = MTProto.get_result_by_message_id(query_message, sess.additional['mtproto_conn'])\n\n if result:\n sess.save()\n return result['result']\n # print 'no result'\n\n @staticmethod\n def check_user_migrate(result, mtproto_conn, req_msg):\n if 'USER_MIGRATE_' in result.get('result', {}).get('error_message', ''):\n mtproto_conn.dc = result.get('result', {}).get('error_message').split('_')[-1]\n req_msg.update_message = True\n mtproto_conn.resend_messages.append(req_msg)\n return True\n\n @staticmethod\n def get_result_by_message_id(message, mtproto_conn):\n for received_message in mtproto_conn.received_messages:\n if received_message.get('messages', []):\n for cont_received_message in received_message.get('messages'):\n # Сохраняем access_hash'ы для аккаунта\n for chat in cont_received_message.get('result', {}).get('result', {}).get('chats', []):\n pflags = chat['pflags']\n if pflags.get('access_hash', None) and pflags.get('username', None):\n mtproto_conn.access_hashes[chat['id']] = {'access_hash': pflags['access_hash'], 'name': pflags['username']}\n\n if message.message_id == cont_received_message['result'].get('req_msg_id', None):\n if not MTProto.check_user_migrate(cont_received_message['result'], mtproto_conn, message):\n MTProto.remove_from_sending_messages(message.message_id, mtproto_conn)\n mtproto_conn.received_messages = []\n return cont_received_message['result']\n\n else:\n if received_message.get('req_msg_id', None) == message.message_id:\n # Сохраняем access_hash'ы для аккаунта\n for chat in received_message.get('result', {}).get('chats', []):\n pflags = chat['pflags']\n if pflags.get('access_hash', None) and pflags.get('username', None):\n mtproto_conn.access_hashes[chat['id']] = {'access_hash': pflags['access_hash'],\n 'name': pflags['username']}\n\n if not MTProto.check_user_migrate(received_message['result'], mtproto_conn, message):\n MTProto.remove_from_sending_messages(message.message_id, mtproto_conn)\n mtproto_conn.received_messages = []\n return received_message\n\n @staticmethod\n def remove_from_sending_messages(message_id, mtproto_conn):\n for sending_message in mtproto_conn.sending_messages[:]:\n index = mtproto_conn.sending_messages.index(sending_message)\n if sending_message.message_id == message_id:\n mtproto_conn.sending_messages.remove(sending_message)\n continue\n if isinstance(sending_message, MTProtoContainer):\n for cont_sending_message in sending_message.messages[:]:\n if cont_sending_message.obj == 'msgs_ack':\n mtproto_conn.sending_messages[index].messages.remove(cont_sending_message)\n continue\n elif cont_sending_message.method == 'http_wait':\n mtproto_conn.sending_messages[index].messages.remove(cont_sending_message)\n continue\n elif cont_sending_message.message_id == message_id:\n mtproto_conn.sending_messages[index].messages.remove(cont_sending_message)\n continue\n\n if not sending_message.messages:\n mtproto_conn.sending_messages.remove(sending_message)\n continue\n","sub_path":"api_telegram/mtproto.py","file_name":"mtproto.py","file_ext":"py","file_size_in_byte":27587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119330428","text":"\"\"\"\nytdl.py\n\nLädt mit Hilfe von youtube-dl neue Videos aus Youtube Channels herunter\n\"\"\"\nimport os\nimport requests\nimport settings\nfrom time import sleep\nfrom subprocess import call, getoutput\nfrom termcolor import colored\nfrom datetime import datetime\n\n\nAPI_KEY = settings.API_KEY\nAPI_URL_CHANNELS = \"https://www.googleapis.com/youtube/v3/channels\"\nAPI_URL_PLAYLISTITEMS = \"https://www.googleapis.com/youtube/v3/playlistItems\"\nYOUTUBE_VIDEO_URL_BASE = \"http://www.youtube.com/watch?v=\"\nJUST_DO_IT_FILE = \"channel_ids.txt\"\nASK_FIRST_FILE = \"channel_ids_ask_first.txt\"\nVIDEO_IDS_TXT = os.path.expanduser(\"~/Dropbox/video_ids.txt\")\nMAXRESULTS = \"50\"\n\n\ndef get_uploads_playlist_id(channel_id):\n \"\"\"\n Holt die Playlist ID für die Uploads aus dem jeweiligen Channel\n \"\"\"\n payload = {\n \"part\": \"contentDetails\",\n \"id\": channel_id,\n \"fields\": \"items/contentDetails/relatedPlaylists/uploads\",\n \"key\": API_KEY\n }\n r = requests.get(API_URL_CHANNELS, params=payload)\n if r.status_code != 200:\n return \"Error with status code: \" + str(r.status_code)\n item = r.json()[\"items\"][0]\n uploads_playlist_id = item[\"contentDetails\"][\"relatedPlaylists\"][\"uploads\"]\n return uploads_playlist_id\n\n\ndef get_uploads_video_ids_and_titles(uploads_playlist_id):\n \"\"\"\n Holt die Video IDs aus der jeweiligen Upload Playlist\n \"\"\"\n payload = {\n \"part\": \"snippet\",\n \"maxResults\": MAXRESULTS,\n \"playlistId\": uploads_playlist_id,\n \"fields\": \"items/snippet\",\n \"key\": API_KEY\n }\n r = requests.get(API_URL_PLAYLISTITEMS, params=payload)\n if r.status_code != 200:\n return \"Error with status code: \" + str(r.status_code)\n uploads_video_ids = {}\n for item in r.json()[\"items\"]:\n video_id = item[\"snippet\"][\"resourceId\"][\"videoId\"]\n title = item[\"snippet\"][\"title\"]\n uploads_video_ids[video_id] = title\n return uploads_video_ids\n\n\ndef read_channel_ids_and_names(filename):\n \"\"\"\n Liest die Channel IDs und die Channel Namen aus einer Datei\n \"\"\"\n with open(filename) as text_file:\n channel_ids = {}\n for line in text_file:\n channel_id = line.split(\":::\")[0].strip()\n channel_name = line.split(\":::\")[1].strip()\n channel_ids[channel_id] = channel_name\n return channel_ids\n\n\ndef channel_ids(filename):\n \"\"\"\n Holt die Channel IDs\n \"\"\"\n return list(read_channel_ids_and_names(filename).keys())\n\n\ndef channel_name(filename, channel_id):\n \"\"\"\n Holt die Channel Namen\n \"\"\"\n return read_channel_ids_and_names(filename)[channel_id]\n\n\ndef read_video_ids():\n \"\"\"\n Liest vorhandene Video IDs aus einer Datei\n \"\"\"\n with open(VIDEO_IDS_TXT) as text_file:\n video_ids = []\n for line in text_file:\n video_ids.append(line.strip())\n return video_ids\n\n\ndef append_to_video_ids(video_ids):\n if video_ids:\n number = len(video_ids.split())\n print(colored(\"Writing {} IDs to {}\".format(number, VIDEO_IDS_TXT), \"grey\", attrs=[\"bold\"]))\n with open(VIDEO_IDS_TXT, \"a\") as text_file:\n text_file.write(video_ids)\n else:\n print(colored(\"Not writing {}, no IDs\".format(VIDEO_IDS_TXT), \"grey\", attrs=[\"bold\"]))\n\n\ndef video_allready_downloaded(video_id):\n if video_id in read_video_ids():\n return True\n return False\n\n\ndef download(video_id):\n \"\"\"\n Lädt ein Video herunter mit Hilfe von youtube-dl\n \"\"\"\n try:\n working_dir = os.getcwd()\n os.chdir(os.path.expanduser(\"~/Videos/TV/C Tube/\"))\n\n title_command = (\"youtube-dl -f best --get-filename -o '%(title)s' \") + YOUTUBE_VIDEO_URL_BASE + video_id\n title = getoutput(title_command).strip()\n title = title.replace(\"%\", \"_\")\n command = (\"youtube-dl -f best --console-title -o \\\"%(uploader)s - %(upload_date)s - {:.50} - %(id)s.%(ext)s\\\" \".format(title)) + YOUTUBE_VIDEO_URL_BASE + video_id\n call(command, shell=True)\n os.chdir(working_dir)\n return True\n except Exception as error:\n print(error)\n return False\n\n\ndef just_do_it(filename):\n \"\"\"\n Startet Downloads ohne nachzufragen\n \"\"\"\n just_do_it_channels = channel_ids(filename)\n video_ids = \"\"\n for channel_id in just_do_it_channels:\n channel = channel_name(filename, channel_id)\n print(\"Looking for new videos from {channel}\".format(channel=channel))\n uploads_playlist_id = get_uploads_playlist_id(channel_id)\n sleep(0.2)\n video_ids_and_titles = get_uploads_video_ids_and_titles(uploads_playlist_id).items()\n sleep(0.2)\n for video_id, title in video_ids_and_titles:\n if video_allready_downloaded(video_id):\n pass\n else:\n video_ids += \"{}\\n\".format(video_id)\n ytext = colored(\n \"Downloading {title} {youtube_video_url} from {channel}\".format(\n title=title,\n youtube_video_url=YOUTUBE_VIDEO_URL_BASE + video_id,\n channel=channel\n ), \"green\", attrs=[\"bold\"]\n )\n print(ytext)\n download(video_id)\n return video_ids\n\n\ndef ask_first(filename):\n \"\"\"\n Startet Downloads mit Nachfrage\n \"\"\"\n ask_first_channels = channel_ids(filename)\n video_ids = \"\"\n for channel_id in ask_first_channels:\n channel = channel_name(filename, channel_id)\n print(\"Looking for new videos from {channel}\".format(channel=channel))\n uploads_playlist_id = get_uploads_playlist_id(channel_id)\n sleep(0.2)\n video_ids_and_titles = get_uploads_video_ids_and_titles(uploads_playlist_id).items()\n sleep(0.2)\n for video_id, title in video_ids_and_titles:\n if video_allready_downloaded(video_id):\n pass\n else:\n video_ids += \"{}\\n\".format(video_id)\n dtext = colored(\n \"Download {title} {youtube_video_url} from {channel}? Type y or yes to download: \".format(\n title=title,\n youtube_video_url=YOUTUBE_VIDEO_URL_BASE + video_id,\n channel=channel\n ), \"blue\", attrs=[\"bold\"]\n )\n print(dtext)\n answer = input().strip()\n if answer == \"y\" or answer == \"yes\":\n ytext = colored(\n \"Downloading {title} {youtube_video_url} from {channel}\".format(\n title=title,\n youtube_video_url=YOUTUBE_VIDEO_URL_BASE + video_id,\n channel=channel), \"green\", attrs=[\"bold\"]\n )\n print(ytext)\n download(video_id)\n else:\n ntext = colored(\"Not downloading\", \"yellow\", attrs=[\"bold\"])\n print(ntext)\n return video_ids\n\n\nif __name__ == \"__main__\":\n start_time = datetime.now()\n\n video_ids = just_do_it(JUST_DO_IT_FILE)\n\n # Parst Kommandozeilen Argumente, falls ein \"y\" oder \"yes\" vorhanden ist,\n # werden Downloads ohne nachzufragen gestartet\n import argparse\n parser = argparse.ArgumentParser(description=\"Download all the things\")\n parser.add_argument(\n \"-y\",\n \"--yes\",\n action=\"store_true\",\n help=\"download all the things\",\n required=False,\n )\n args = parser.parse_args()\n if args.yes:\n video_ids += just_do_it(ASK_FIRST_FILE)\n else:\n video_ids += ask_first(ASK_FIRST_FILE)\n\n append_to_video_ids(video_ids)\n\n end_time = datetime.now()\n delta = end_time - start_time\n\n ftext = colored(\"*** ALL DONE --- THIS TOOK {} SECONDS ***\".format(delta.seconds), \"green\", attrs=[\"bold\"])\n print(ftext)\n","sub_path":"ytdl.py","file_name":"ytdl.py","file_ext":"py","file_size_in_byte":7902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"235409413","text":"# coding=utf-8\n\nimport base64\nfrom datetime import date\nimport helpers\n\n# ISSUER INFORMATION\nISSUER_URL = \"http://labcd.mx\"\nISSUER_CERTS_URL = \"http://certs.labcd.mx\"\nISSUER_PUBLIC_KEY_URL = \"http://certs.labcd.mx/keys/labcdmx-certs-public-key.asc\"\nISSUER_SIGNATURE_IMAGE = helpers.encode_image(\"img/signature.png\")\nISSUER_EMAIL = \"certs@labcd.mx\"\nISSUER_NAME = \"Laboratorio para la Ciudad\"\nISSUER_ID = \"http://certs.labcd.mx/issuer/labcdmx-issuer.json\"\n\n# CERTIFICATE INFORMATION\nCERTIFICATE_LANGUAGE = \"es-MX\" #LANGUAGE AND COUNTRY INFORMATION\nCERTIFICATE_DESCRIPTION = \"En reconocimiento a tu participación en \\\"ciudad prototipo\\\", un taller realizado en colaboración con el MIT Media Lab e IDEO con el propósito de explorar, compartir y prototipar nuevas maneras de acercamiento a problemas cotidianos de la ciudad de México mediante la colaboración entre estudiantes, profesionales y especialistas.\"\nCERTIFICATE_DATE = str(date(month=9, day=1, year=2015))+\"/\"+str(date(month=9, day=4, year=2015))\nCERTIFICATE_TITLE = \"Ciudad Prototipo\"\nCERTIFICATE_IMAGE = helpers.encode_image(\"img/header.png\")\nCERTIFICATE_ID = ISSUER_CERTS_URL + \"/criteria/2015/09/ciudad-prototipo.json\"\n\n# EXTENSION INFORMATION\nASSERTION_ENSORERS = [\n\t{\n \"name\": \"MIT Media Lab\",\n \"url\": \"http://media.mit.edu\",\n \"image\": helpers.encode_image(\"img/medialab.png\")\n },\n {\n \"name\": \"IDEO\",\n \"url\": \"http://ideo.com\",\n \"image\": helpers.encode_image(\"img/ideo.png\")\n }\n]\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"279739267","text":"import time\nfrom tkinter import *\nimport random\n\nimport HomePage\nimport Constants\nimport AudioPlayer\nimport SoundGeneration\n\ndef get_first_letter(num):\n return Constants.ALPHABETICAL_STRING[num]\n\n\ndef get_first_morse(first_letter):\n return Constants.MORSE_CODE_DICT.get(first_letter)\n\n\nclass LevelThree:\n\n def go_home(self):\n self.root.destroy()\n HomePage.home_page(HomePage.home_page.name)\n\n def play_clear_sound(self):\n AudioPlayer.start_music(\"soundFiles/space.wav\", 0.5)\n\n def play_sound(self):\n AudioPlayer.start_music(\"morseSound.wav\", 2)\n\n\n\n def check_num(self, first_letter, second_letter):\n self.entry_1.delete(0, 'end')\n\n if self.score == Constants.ALPHABET:\n return\n\n # check if the strings are equal\n if first_letter == second_letter.capitalize():\n # remove current letter from list\n self.random_26.pop(0)\n print(self.random_26)\n\n # if the score is == 26, display the next button\n if len(self.random_26) == 0:\n # show the list\n self.score = Constants.ALPHABET\n self.score_label['text'] = self.score\n self.error_msg['text'] = \"You did it!\"\n self.error_msg.config(fg=\"blue\")\n\n else:\n # self.play_clear_sound()\n # change the label and increment the score\n self.set_new_round()\n\n # play once\n\n self.error_msg['text'] = ''\n\n self.score += 1\n\n self.score_label['text'] = self.score\n\n else:\n # else if not equal, then change label to say wrong answer\n if self.error_msg['text'] == 'Wrong answer!':\n self.error_msg['text'] = 'Nope! Try Again!'\n else:\n self.error_msg['text'] = 'Wrong answer!'\n\n def set_new_round(self):\n\n # get first letter\n print(self.random_26[0])\n self.first_letter = get_first_letter(self.random_26[0])\n print(self.first_letter)\n\n self.first_morse = get_first_morse(self.first_letter)\n print(self.first_morse)\n\n SoundGeneration.clear_old_file()\n SoundGeneration.generate_wav(SoundGeneration.generate_morse_array(self.first_letter))\n\n\n\n def __init__(self):\n self.score = 0\n\n self.random_26 = list(range(0, Constants.ALPHABET))\n random.shuffle(self.random_26)\n print(self.random_26)\n\n self.set_new_round()\n\n self.root = Tk()\n self.root.geometry('500x500')\n self.root.title(\"Identify the letter\")\n\n self.root.iconbitmap('FireAnts_logo.ico')\n\n\n self.name = Label(self.root, text=HomePage.home_page.name, width=20, font=(\"bold\", 10))\n self.name.place(x=50, y=30)\n\n self.score_label1 = Label(self.root, text=\"Score\", width=10, font=(\"bold\", 10))\n self.score_label1.place(x=260, y=30)\n\n self.score_label = Label(self.root, text=self.score, width=5, font=(\"bold\", 10))\n self.score_label.place(x=320, y=30)\n\n self.morse_1 = Label(self.root, text=\"Press play to hear the sound!\", width=30, font=(\"bold\", 10))\n self.morse_1.place(x=120, y=100)\n\n self.ans_1 = Label(self.root, text=\"What letter is this?\", width=20, font=(\"bold\", 10))\n self.ans_1.place(x=100, y=190)\n\n self.entry_1 = Entry(self.root)\n self.entry_1.place(x=250, y=190)\n\n self.error_msg = Label(self.root, text=\"\", width=20, font=(\"bold\", 10))\n self.error_msg.place(x=150, y=75)\n self.error_msg.config(fg=\"red\")\n self.error_msg.configure(anchor=\"center\")\n\n self.submit = Button(self.root,\n text='Submit',\n width=9,\n bg='brown',\n fg='white',\n command=lambda: self.check_num(self.first_letter,\n self.entry_1.get())).place(x=280, y=250)\n\n self.play = Button(self.root,\n text='Play',\n width=9,\n bg='brown',\n fg='white',\n command=lambda: self.play_sound())\n self.play.place(x=210, y=145)\n\n self.back = Button(self.root,\n text='Back',\n width=9,\n bg='blue',\n fg='white',\n command=lambda: self.go_home()).place(x=140, y=250)\n","sub_path":"Level3.py","file_name":"Level3.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"529498372","text":"from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='index'), \n path('checkout', views.checkout, name='checkout'), \n path('cart', views.cart, name='cart'), \n path('products', views.products, name='products'), \n path('search', views.search, name='search'), \n path('products/', views.detailProduct, name='detailProduct'), \n path('category/', views.detailCate, name='detailCate'), \n path('branch/', views.detailBranch, name='detailBranch'), \n path('getListCommentByRateApi', views.getListCommentByRateApi, name='getListCommentByRateApi'),\n path('addToCartAPI', views.addToCartAPI, name='addToCartAPI'), \n path('getAmountItemApi', views.getAmountItemApi, name='getAmountItemApi'),\n path('deleteItemInCartApi', views.deleteItemInCartApi, name='deleteItemInCartApi'),\n path('updateCartApi', views.updateCartApi, name='updateCartApi'), \n path('checkoutApi', views.checkoutApi, name='checkoutApi'),\n path('login/', views.login, name='login'),\n path('purchase', views.purchase, name='purchase'),\n path('address', views.address, name='purchase'),\n path('infomationUser', views.infomationUser, name='infomationUser'),\n path('logout', views.logout, name='logout'),\n path('uploadFileApi', views.uploadFileApi, name='uploadFileApi'),\n path('registerApi', views.registerApi, name='registerApi'),\n path('loginApi', views.loginApi, name='loginApi'),\n path('forgetPassApi', views.forgetPassApi, name='forgetPassApi'),\n path('pageProductApi', views.pageProductApi, name='pageProductApi'),\n path('soldproductsApi', views.soldproductsApi, name='soldproductsApi'),\n path('hotproductsApi', views.hotproductsApi, name='hotproductsApi'),\n path('productsOrderByApi', views.productsOrderByApi, name='productsOrderByApi'),\n \n path('soldproductsSearchApi', views.soldproductsSearchApi, name='soldproductsSearchApi'),\n path('hotproductsSearchApi', views.hotproductsSearchApi, name='hotproductsSearchApi'),\n path('productsOrderBySearchApi', views.productsOrderBySearchApi, name='productsOrderBySearchApi'),\n\n\n path('categoryApi', views.categoryApi, name='categoryApi'),\n path('changeAdressApi', views.changeAdressApi, name='changeAdressApi') ,\n path('addAddressApi', views.addAddressApi, name='addAddressApi') ,\n path('updateProfileApi', views.updateProfileApi, name='updateProfileApi') ,\n path('getAdressApi', views.getAdressApi, name='getAdressApi'),\n path('updateAddressApi', views.updateAddressApi, name='updateAddressApi'),\n path('getAllBillApi', views.getAllBillApi, name='getAllBillApi'),\n path('getDetailOrderApi', views.getDetailOrderApi, name='getDetailOrderApi'),\n path('updateOrderApi', views.updateOrderApi, name='updateOrderApi'),\n path('voteApi', views.voteApi, name='voteApi'),\n\n \n path('getRecommentByIdcommentApi', views.getRecommentByIdcommentApi, name='getRecommentByIdcommentApi'),\n\n] \n\n","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576347750","text":"import sys\nfrom django.utils.translation import ugettext as _\n#bots-modules\nimport communication\nimport envelope\nimport transform\nimport botslib\nimport botsglobal\nfrom botsconfig import *\n\n@botslib.log_session\ndef prepareretransmit():\n ''' prepare the retransmittable files. Return: indication if files should be retransmitted.'''\n retransmit = False #indicate retransmit\n #for rereceive\n for row in botslib.query('''SELECT idta,reportidta\n FROM filereport\n WHERE retransmit=%(retransmit)s ''',\n {'retransmit':True}):\n retransmit = True\n botslib.change('''UPDATE filereport\n SET retransmit=%(retransmit)s\n WHERE idta=%(idta)s\n AND reportidta=%(reportidta)s ''',\n {'idta':row['idta'],'reportidta':row['reportidta'],'retransmit':False})\n for row2 in botslib.query('''SELECT idta\n FROM ta\n WHERE parent=%(parent)s\n AND status=%(status)s''',\n {'parent':row['idta'],\n 'status':RAWIN}):\n ta_rereceive = botslib.OldTransaction(row2['idta'])\n ta_externin = ta_rereceive.copyta(status=EXTERNIN,statust=DONE,parent=0) #inject; status is DONE so this ta is not used further\n ta_raw = ta_externin.copyta(status=RAWIN,statust=OK) #reinjected file is ready as new input\n #for resend; this one is slow. Can be improved by having a seperate list of idta to resend\n for row in botslib.query('''SELECT idta,parent\n FROM ta\n WHERE retransmit=%(retransmit)s\n AND status=%(status)s''',\n {'retransmit':True,\n 'status':EXTERNOUT}):\n retransmit = True\n ta_outgoing = botslib.OldTransaction(row['idta'])\n ta_outgoing.update(retransmit=False) #is reinjected; set retransmit back to False\n ta_resend = botslib.OldTransaction(row['parent']) #parent ta with status RAWOUT; this is where the outgoing file is kept\n ta_externin = ta_resend.copyta(status=EXTERNIN,statust=DONE,parent=0) #inject; status is DONE so this ta is not used further\n ta_raw = ta_externin.copyta(status=RAWOUT,statust=OK) #reinjected file is ready as new input\n return retransmit\n\n\n@botslib.log_session\ndef preparerecommunication():\n #for each out-communication process that went wrong:\n retransmit = False #indicate retransmit\n for row in botslib.query('''SELECT idta,tochannel\n FROM ta\n WHERE statust!=%(statust)s\n AND status=%(status)s\n AND retransmit=%(retransmit)s ''',\n {'status':PROCESS,'retransmit':True,'statust':DONE}):\n run_outgoing = botslib.OldTransaction(row['idta'])\n run_outgoing.update(retransmit=False) #set retransmit back to False\n #get rootidta of run where communication failed\n for row2 in botslib.query('''SELECT max(idta) as rootidta\n FROM ta\n WHERE script=%(script)s\n AND idta<%(thisidta)s ''',\n {'script':0,'thisidta':row['idta']}):\n rootidta = row2['rootidta']\n #get endidta of run where communication failed\n for row3 in botslib.query('''SELECT min(idta) as endidta\n FROM ta\n WHERE script=%(script)s\n AND idta>%(thisidta)s ''',\n {'script':0,'thisidta':row['idta']}):\n endidta = row3['endidta']\n if not endidta:\n endidta = sys.maxint - 1\n #reinject\n for row4 in botslib.query('''SELECT idta\n FROM ta\n WHERE idta<%(endidta)s\n AND idta>%(rootidta)s \n AND status=%(status)s \n AND statust=%(statust)s\n AND tochannel=%(tochannel)s ''',\n {'statust':OK,'status':RAWOUT,'rootidta':rootidta,'endidta':endidta,'tochannel':row['tochannel']}):\n retransmit = True\n ta_outgoing = botslib.OldTransaction(row4['idta'])\n ta_outgoing_copy = ta_outgoing.copyta(status=RAWOUT,statust=OK)\n ta_outgoing.update(statust=DONE)\n return retransmit\n\n\n@botslib.log_session\ndef prepareautomaticrecommunication():\n ''' reinjects all files for which communication failed (status = RAWOUT)\n '''\n retransmit = False #indicate retransmit\n #bots keeps track of last time automaticretrycommunication was done; reason is mainly performance\n startidta = max(botslib.keeptrackoflastretry('bots__automaticretrycommunication',botslib.getlastrun()),botslib.get_idta_last_error())\n #reinject\n for row4 in botslib.query('''SELECT idta\n FROM ta\n WHERE idta>%(startidta)s\n AND status=%(status)s \n AND statust=%(statust)s ''',\n {'statust':OK,'status':RAWOUT,'startidta':startidta}):\n retransmit = True\n ta_outgoing = botslib.OldTransaction(row4['idta'])\n ta_outgoing_copy = ta_outgoing.copyta(status=RAWOUT,statust=OK)\n ta_outgoing.update(statust=DONE)\n return retransmit\n\n\n@botslib.log_session\ndef prepareretry():\n ''' reinjects all files for which communication failed (status = RAWOUT)\n '''\n retransmit = False #indicate retransmit\n #bots keeps track of last time retry was done; reason is mainly performance\n startidta = max(botslib.keeptrackoflastretry('bots__retry',botslib.getlastrun()),botslib.get_idta_last_error())\n #reinject\n for row4 in botslib.query('''SELECT idta,status\n FROM ta\n WHERE idta>%(startidta)s\n AND statust=%(statust)s ''',\n {'statust':OK,'startidta':startidta}):\n retransmit = True\n ta_outgoing = botslib.OldTransaction(row4['idta'])\n ta_outgoing_copy = ta_outgoing.copyta(status=row4['status'],statust=OK)\n ta_outgoing.update(statust=DONE)\n return retransmit\n\n\n@botslib.log_session\ndef routedispatcher(routestorun,type=None):\n ''' run all route(s). '''\n if type == '--retransmit':\n if not prepareretransmit():\n return 0\n elif type == '--retrycommunication':\n if not preparerecommunication():\n return 0\n elif type == '--automaticretrycommunication':\n if not prepareautomaticrecommunication():\n return 0\n elif type == '--retry':\n if not prepareretry():\n return 0\n stuff2evaluate = botslib.getlastrun()\n botslib.set_minta4query()\n for route in routestorun:\n foundroute=False\n for routedict in botslib.query('''SELECT idroute ,\n fromchannel_id as fromchannel,\n tochannel_id as tochannel,\n fromeditype,\n frommessagetype,\n alt,\n frompartner_id as frompartner,\n topartner_id as topartner,\n toeditype,\n tomessagetype,\n seq,\n frompartner_tochannel_id,\n topartner_tochannel_id,\n testindicator,\n translateind,\n defer\n FROM routes\n WHERE idroute=%(idroute)s\n AND active=%(active)s\n ORDER BY seq''',\n {'idroute':route,'active':True}):\n botsglobal.logger.info(_(u'running route %(idroute)s %(seq)s'),{'idroute':routedict['idroute'],'seq':routedict['seq']})\n botslib.setrouteid(routedict['idroute'])\n foundroute=True\n router(routedict)\n botslib.setrouteid('')\n botsglobal.logger.debug(u'finished route %s %s',routedict['idroute'],routedict['seq'])\n if not foundroute:\n botsglobal.logger.warning(_(u'there is no (active) route \"%s\".'),route)\n return stuff2evaluate\n\n\n@botslib.log_session\ndef router(routedict):\n ''' communication.run one route. variants:\n - a route can be just script; \n - a route can do only incoming\n - a route can do only outgoing\n - a route can do both incoming and outgoing\n - at several points functions from a route script are called - if function is in route script\n '''\n #is there a user route script?\n try:\n botsglobal.logger.debug(u'(try) to read user routescript route \"%s\".',routedict['idroute'])\n userscript,scriptname = botslib.botsimport('routescripts',routedict['idroute'])\n except ImportError: #other errors, eg syntax errors are just passed\n userscript = scriptname = None\n \n #if user route script has function 'main': communication.run 'main' (and do nothing else)\n if botslib.tryrunscript(userscript,scriptname,'main',routedict=routedict):\n return #so: if function ' main' : communication.run only the routescript, nothing else.\n if not (userscript or routedict['fromchannel'] or routedict['tochannel'] or routedict['translateind']): \n raise botslib.ScriptError(_(u'Route \"$route\" is empty: no script, not enough parameters.'),route=routedict['idroute'])\n\n \n botslib.tryrunscript(userscript,scriptname,'start',routedict=routedict)\n \n #communication.run incoming channel\n if routedict['fromchannel']: #do incoming part of route: in-communication; set ready for translation; translate\n botslib.tryrunscript(userscript,scriptname,'preincommunication',routedict=routedict)\n communication.run(idchannel=routedict['fromchannel'],idroute=routedict['idroute']) #communication.run incommunication\n #add attributes from route to the received files\n where={'status':FILEIN,'fromchannel':routedict['fromchannel'],'idroute':routedict['idroute']}\n change={'editype':routedict['fromeditype'],'messagetype':routedict['frommessagetype'],'frompartner':routedict['frompartner'],'topartner':routedict['topartner'],'alt':routedict['alt']}\n botslib.updateinfo(change=change,where=where)\n \n #all received files have status FILEIN\n botslib.tryrunscript(userscript,scriptname,'postincommunication',routedict=routedict)\n \n #communication.run translation\n if routedict['translateind']:\n botslib.tryrunscript(userscript,scriptname,'pretranslation',routedict=routedict)\n if botslib.addinfo(change={'status':MAILBAG},where={'status':FILEIN,'idroute':routedict['idroute'],'editype':'mailbag'}):\n transform.splitmailbag(idroute=routedict['idroute'])\n botslib.addinfo(change={'status':TRANSLATE},where={'status':FILEIN,'idroute':routedict['idroute']})\n transform.translate(idroute=routedict['idroute'])\n botslib.tryrunscript(userscript,scriptname,'posttranslation',routedict=routedict)\n \n #merge messags & communication.run outgoing channel\n if routedict['tochannel']: #do outgoing part of route\n botslib.tryrunscript(userscript,scriptname,'premerge',routedict=routedict)\n envelope.mergemessages(idroute=routedict['idroute'])\n botslib.tryrunscript(userscript,scriptname,'postmerge',routedict=routedict)\n \n #communication.run outgoing channel\n #build for query: towhere (dict) and wherestring \n towhere=dict(status=MERGED,\n idroute=routedict['idroute'],\n editype=routedict['toeditype'],\n messagetype=routedict['tomessagetype'],\n testindicator=routedict['testindicator'])\n towhere=dict([(key, value) for (key, value) in towhere.iteritems() if value]) #remove nul-values from dict\n wherestring = ' AND '.join([key+'=%('+key+')s' for key in towhere])\n if routedict['frompartner_tochannel_id']: #use frompartner_tochannel in where-clause of query (partner/group dependent outchannel\n towhere['frompartner_tochannel_id']=routedict['frompartner_tochannel_id']\n wherestring += ''' AND (frompartner=%(frompartner_tochannel_id)s \n OR frompartner in (SELECT from_partner_id \n FROM partnergroup\n WHERE to_partner_id =%(frompartner_tochannel_id)s ))'''\n if routedict['topartner_tochannel_id']: #use topartner_tochannel in where-clause of query (partner/group dependent outchannel\n towhere['topartner_tochannel_id']=routedict['topartner_tochannel_id']\n wherestring += ''' AND (topartner=%(topartner_tochannel_id)s \n OR topartner in (SELECT from_partner_id \n FROM partnergroup\n WHERE to_partner_id=%(topartner_tochannel_id)s ))'''\n toset={'tochannel':routedict['tochannel'],'status':FILEOUT}\n botslib.addinfocore(change=toset,where=towhere,wherestring=wherestring)\n \n if not routedict['defer']: #do outgoing part of route\n botslib.tryrunscript(userscript,scriptname,'preoutcommunication',routedict=routedict)\n communication.run(idchannel=routedict['tochannel'],idroute=routedict['idroute']) #communication.run outcommunication\n botslib.tryrunscript(userscript,scriptname,'postoutcommunication',routedict=routedict)\n \n botslib.tryrunscript(userscript,scriptname,'end',routedict=routedict)\n","sub_path":"bots/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":14840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255241952","text":"import kivy\nkivy.require('1.9.1') # replace with your current kivy version !\n\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.tabbedpanel import TabbedPanel\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.popup import Popup\nfrom kivy.clock import Clock\nfrom kivy.properties import StringProperty, NumericProperty, ObjectProperty\nfrom kivy.config import Config\nfrom math import sin\n#from kivy.garden.graph import Graph, MeshLinePlot\nfrom kivy.uix.effectwidget import EffectWidget\nfrom kivy.uix.effectwidget import InvertEffect\nfrom kivy.uix.scatterlayout import ScatterLayout\nfrom kivy.uix.scatter import Scatter\nfrom main import MainScreen, RIOData\nfrom kivy.vector import Vector\nfrom kivy.uix.widget import Widget\nfrom kivy.graphics.transformation import Matrix\nfrom math import radians\nfrom kivy.uix.image import Image\nfrom kivy.properties import BooleanProperty\nfrom kivy.animation import Animation\nimport math\n\nfrom kivy.core.window import Window\n\nWindow.size = (1920,1080)\nWindow.maximize()\nWindow.clearcolor = (1, 1, 1, 1)\n#Window.fullscreen = True\n#Config.set('graphics', 'maxfps', '10')\n\nscale_global = 0.06\npositions = [(1411.5, 428), (1460.5, 428), (1509.5, 428), (1558.5, 428)]\n\nclass MyScatterLayout(ScatterLayout):\n double_click = BooleanProperty(False)\n\n def on_touch_up( self, touch ):\n x, y = touch.x, touch.y\n # if the touch isnt on the widget we do nothing, just try children\n if not touch.grab_current == self:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(Scatter, self).on_touch_up(touch):\n touch.pop()\n return True\n touch.pop()\n\n # remove it from our saved touches\n if touch in self._touches and touch.grab_state:\n touch.ungrab(self)\n del self._last_touch_pos[touch]\n self._touches.remove(touch)\n\n # stop propagating if its within our bounds\n #if self.collide_point(x, y):\n # return True\n if self.double_click:\n if self.collide_point(*touch.pos):\n if touch.is_double_tap:\n if self.scale > scale_global:\n scale = scale_global / self.scale\n anim = Animation(scale=scale_global ** (1/30), duration=.5, s=1/30, pos = positions[int(self.id)])\n anim.start(self)\n #self.apply_transform(Matrix().scale(scale, scale, 1))\n #self.pos = positions[int(self.id)]\n else:\n scale = 1 / self.scale\n anim = Animation(scale=scale ** (1/30), duration=1, s=1/30, pos = (533, 316))\n anim.start(self)\n #self.apply_transform(Matrix().scale(scale, scale, 1))\n #self.pos = (533, 316)\n\n return super(MyScatterLayout, self).on_touch_up(touch)\n\n '''def alarm_animation(self, state):\n anim = Animation(opacity=0, duration=.5) + Animation(\n opacity=1, duration=.5)\n if state:\n anim.repeat = True\n anim.start(self.main_screen.ids.alarm_indicator)\n else:\n anim.cancel_all(self.main_screen.ids.alarm_indicator)\n self.main_screen.ids.alarm_indicator.opacity = 0\n self.alarm_state = state'''\n\n def transform_with_touch(self, touch):\n # just do a simple one finger drag\n changed = False\n if len(self._touches) == self.translation_touches:\n # _last_touch_pos has last pos in correct parent space,\n # just like incoming touch\n dx = (touch.x - self._last_touch_pos[touch][0]) \\\n * self.do_translation_x\n dy = (touch.y - self._last_touch_pos[touch][1]) \\\n * self.do_translation_y\n dx = dx / self.translation_touches\n dy = dy / self.translation_touches\n self.apply_transform(Matrix().translate(dx, dy, 0))\n changed = True\n\n if len(self._touches) == 1:\n return changed\n\n # we have more than one touch... list of last known pos\n points = [Vector(self._last_touch_pos[t]) for t in self._touches\n if t is not touch]\n # add current touch last\n points.append(Vector(touch.pos))\n\n # we only want to transform if the touch is part of the two touches\n # farthest apart! So first we find anchor, the point to transform\n # around as another touch farthest away from current touch's pos\n anchor = max(points[:-1], key=lambda p: p.distance(touch.pos))\n\n # now we find the touch farthest away from anchor, if its not the\n # same as touch. Touch is not one of the two touches used to transform\n farthest = max(points, key=anchor.distance)\n if farthest is not points[-1]:\n return changed\n\n # ok, so we have touch, and anchor, so we can actually compute the\n # transformation\n old_line = Vector(*touch.ppos) - anchor\n new_line = Vector(*touch.pos) - anchor\n if not old_line.length(): # div by zero\n return changed\n\n angle = radians(new_line.angle(old_line)) * self.do_rotation\n self.apply_transform(Matrix().rotate(angle, 0, 0, 1), anchor=anchor)\n\n if self.do_scale:\n scale = new_line.length() / old_line.length()\n new_scale = scale * self.scale\n if new_scale < self.scale_min:\n scale = self.scale_min / self.scale\n elif new_scale > self.scale_max:\n scale = self.scale_max / self.scale\n self.apply_transform(Matrix().scale(scale, scale, scale),\n anchor=anchor)\n changed = True\n return changed\n\nclass MyFloatLayout(FloatLayout):\n pass\n\nclass MainApp(App):\n passcode = '1234'\n passcode_try = ''\n logged_in = NumericProperty(0)\n\n def build(self):\n self.rio_data = RIOData()\n main_layout = MyFloatLayout()\n image_scatter = MyScatterLayout(do_rotation=False)\n image_scatter.add_widget(Image(source='lab_layout.png', double_click = False))\n main_layout.add_widget(image_scatter)\n for i in range(4):\n tank_layout = BoxLayout(id = 'float_' + str(i), orientation='vertical', padding=25)\n tank_layout.add_widget(Label(text='Tank ' + str(i), size_hint=(1, .1), font_size='30sp', color=(0,0,0,1)))\n tank_layout.add_widget(MainScreen(size_hint=(1, .9)))\n tank_scatter = MyScatterLayout(id=str(i), do_rotation=False, size=(800, 528), size_hint=(None, None), scale=scale_global, pos=positions[i], double_click = True, on_scale=lambda scale: self.apply_transform(Matrix().scale(self.scale, self.scale, 1)))\n tank_scatter.add_widget(tank_layout)\n image_scatter.add_widget(tank_scatter)\n\n return main_layout\n\n\nMainApp().run()\n\n","sub_path":"smart_tank_scatter.py","file_name":"smart_tank_scatter.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"322051354","text":"'''\nCreated on Jul 28, 2016\n\n@author: jvazquez\n'''\nimport logging\n\nfrom os import getenv\nfrom os.path import exists\n\nfrom utils.helpers.log import setup_logging\n\nfrom configparser import RawConfigParser\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConfiguratorReader(object):\n \"\"\"\n ConfiguratorReader is a simple wrapper for the\n RawConfigParser that we use.\n The only purpose of this object, is to obtain the\n configured RawConfigParser and help the client\n to obtain the selected environment.\n An environment is a configuration option that\n is used in the application, for example\n testing, development, production.\n The values between those environments may by different.\n \"\"\"\n\n def __init__(self):\n self.selected_env = getenv(\"MODE\", None)\n self._parser = None\n\n def get_config_parser(self, config_filename=None):\n logger.debug(\"We open {}\".format(config_filename))\n\n if config_filename is None:\n logger.warning(\"Trying to load configuration path \"\n \"from environmental \"\n \"variable CONFIGURATOR\")\n config_filename = getenv(\"CONFIGURATOR\", None)\n if config_filename is None:\n raise IOError(\"No configuration filename detected\")\n\n if exists(config_filename) is False:\n raise IOError(\"Unexistent configuration file \"\n \"selected\")\n\n self._parser = RawConfigParser()\n self._parser.read(config_filename)\n return self._parser\n\n def parser_env_detection(self):\n if self.selected_env is None:\n return self._parser.get(\"app\", \"default_environment_mode\")\n return self.selected_env\n","sub_path":"utils/helpers/configuration_reader.py","file_name":"configuration_reader.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"592895100","text":"# -*- coding: utf-8\n\n\"\"\"Module of class ORCEvaporator.\n\nThis file is part of project TESPy (github.com/oemof/tespy). It's copyrighted\nby the contributors recorded in the version control history of the file,\navailable from its original location tespy/components/customs/orc_evaporator.py\n\nSPDX-License-Identifier: MIT\n\"\"\"\nimport warnings\n\nimport numpy as np\n\nfrom tespy.components.component import Component\nfrom tespy.tools.data_containers import ComponentProperties as dc_cp\nfrom tespy.tools.data_containers import SimpleDataContainer as dc_simple\nfrom tespy.tools.document_models import generate_latex_eq\nfrom tespy.tools.fluid_properties import dh_mix_dpQ\nfrom tespy.tools.fluid_properties import h_mix_pQ\nfrom tespy.tools.fluid_properties import h_mix_pT\nfrom tespy.tools.fluid_properties import s_mix_ph\n\n\nclass ORCEvaporator(Component):\n r\"\"\"\n Evaporator of the geothermal Organic Rankine Cycle (ORC).\n\n Generally, the hot side of the geo-fluid from the geothermal wells deliver\n two phases: steam and brine. In order to fully use the energy of the\n geo-fluid, there are 2 inlets at the hot side.\n\n The ORC evaporator represents counter current evaporators. Both, two hot\n and one cold side of the evaporator, are simulated.\n\n **Mandatory Equations**\n\n - :py:meth:`tespy.components.component.Component.fluid_func`\n - :py:meth:`tespy.components.component.Component.mass_flow_func`\n - :py:meth:`tespy.components.customs.orc_evaporator.ORCEvaporator.energy_balance_func`\n - steam side outlet state, function can be disabled by specifying\n :code:`set_attr(subcooling=True)`\n :py:meth:`tespy.components.customs.orc_evaporator.ORCEvaporator.subcooling_func`\n - working fluid outlet state, function can be disabled by specifying\n :code:`set_attr(overheating=True)`\n :py:meth:`tespy.components.customs.orc_evaporator.ORCEvaporator.overheating_func`\n\n **Optional Equations**\n\n - :py:meth:`tespy.components.customs.orc_evaporator.ORCEvaporator.energy_balance_cold_func`\n - hot side steam :py:meth:`tespy.components.component.Component.pr_func`\n - hot side brine :py:meth:`tespy.components.component.Component.pr_func`\n - worling fluid :py:meth:`tespy.components.component.Component.pr_func`\n - hot side steam :py:meth:`tespy.components.component.Component.zeta_func`\n - hot side brine :py:meth:`tespy.components.component.Component.zeta_func`\n - worling fluid :py:meth:`tespy.components.component.Component.zeta_func`\n\n\n Inlets/Outlets\n\n - in1, in2, in3 (index 1: steam from geothermal heat source,\n index 2: brine from geothermal heat source,\n index 3: working fluid of being evaporated)\n - out1, out2, out3 (index 1: steam from geothermal heat source,\n index 2: brine from geothermal heat source,\n index 3: working fluid of being evaporated)\n\n Image\n\n .. image:: _images/ORCEvaporator.svg\n :alt: alternative text\n :align: center\n\n Parameters\n ----------\n label : str\n The label of the component.\n\n design : list\n List containing design parameters (stated as String).\n\n offdesign : list\n List containing offdesign parameters (stated as String).\n\n design_path : str\n Path to the components design case.\n\n local_offdesign : boolean\n Treat this component in offdesign mode in a design calculation.\n\n local_design : boolean\n Treat this component in design mode in an offdesign calculation.\n\n char_warnings : boolean\n Ignore warnings on default characteristics usage for this component.\n\n printout : boolean\n Include this component in the network's results printout.\n\n Q : float, dict\n Heat transfer, :math:`Q/\\text{W}`.\n\n pr1 : float, dict, :code:`\"var\"`\n Outlet to inlet pressure ratio at hot side 1 (steam),\n :math:`pr/1`.\n\n pr2 : float, dict, :code:`\"var\"`\n Outlet to inlet pressure ratio at hot side 2 (brine),\n :math:`pr/1`.\n\n pr3 : float, dict, :code:`\"var\"`\n Outlet to inlet pressure ratio at cold side (working fluid),\n :math:`pr/1`.\n\n zeta1 : float, dict, :code:`\"var\"`\n Geometry independent friction coefficient at hot side 1 (steam),\n :math:`\\frac{\\zeta}{D^4}/\\frac{1}{\\text{m}^4}`.\n\n zeta2 : float, dict, :code:`\"var\"`\n Geometry independent friction coefficient at hot side 2 (brine),\n :math:`\\frac{\\zeta}{D^4}/\\frac{1}{\\text{m}^4}`.\n\n zeta3 : float, dict, :code:`\"var\"`\n Geometry independent friction coefficient at cold side (working fluid),\n :math:`\\frac{\\zeta}{D^4}/\\frac{1}{\\text{m}^4}`.\n\n subcooling : boolean\n Enable/disable subcooling at oulet of the hot side 1,\n default value: disabled (False).\n\n overheating : boolean\n Enable/disable overheating at oulet of the cold side,\n default value: disabled (False).\n\n Note\n ----\n The ORC evaporator has an additional equation for enthalpy at the outlet of\n the geothermal steam: The fluid leaves the component in saturated liquid\n state. If code:`subcooling` is activated (:code:`True`), it is possible to\n specify the enthalpy at the outgoing connection manually.\n\n Additionally, an equation for enthalpy at the outlet of the working fluid\n is set: It leaves the component in saturated gas state. If\n :code:`overheating` is enabled (:code:`True`), it is possible to specify\n the enthalpy at the outgoing connection manually.\n\n Example\n -------\n A two-phase geo-fluid is used as the heat source for evaporating the\n working fluid. We calculate the mass flow of the working fluid with known\n steam and brine mass flow.\n\n >>> from tespy.components import Source, Sink, ORCEvaporator\n >>> from tespy.connections import Connection\n >>> from tespy.networks import Network\n >>> fluids = ['water', 'Isopentane']\n >>> nw = Network(fluids=fluids, iterinfo=False)\n >>> nw.set_attr(p_unit='bar', T_unit='C', h_unit='kJ / kg')\n >>> evaporator = ORCEvaporator('geothermal orc evaporator')\n >>> evaporator.component()\n 'orc evaporator'\n >>> source_wf = Source('working fluid source')\n >>> sink_wf = Sink('working fluid sink')\n >>> source_s = Source('steam source')\n >>> source_b = Source('brine source')\n >>> sink_s = Sink('steam sink')\n >>> sink_b = Sink('brine sink')\n >>> eva_wf_in = Connection(source_wf, 'out1', evaporator, 'in3')\n >>> eva_wf_out = Connection(evaporator, 'out3', sink_wf, 'in1')\n >>> eva_steam_in = Connection(source_s, 'out1', evaporator, 'in1')\n >>> eva_sink_s = Connection(evaporator, 'out1', sink_s, 'in1')\n >>> eva_brine_in = Connection(source_b, 'out1', evaporator, 'in2')\n >>> eva_sink_b = Connection(evaporator, 'out2', sink_b, 'in1')\n >>> nw.add_conns(eva_wf_in, eva_wf_out)\n >>> nw.add_conns(eva_steam_in, eva_sink_s)\n >>> nw.add_conns(eva_brine_in, eva_sink_b)\n\n The orc working fluids leaves the evaporator in saturated steam state, the\n geothermal steam leaves the component in staturated liquid state. We imply\n the state of geothermal steam and brine with the corresponding mass flow as\n well as the working fluid's state at the evaporator inlet. The pressure\n ratio is specified for each of the three streams.\n\n >>> evaporator.set_attr(pr1=0.95, pr2=0.98, pr3=0.99)\n >>> eva_wf_in.set_attr(T=111, p=11,\n ... fluid={'water': 0, 'Isopentane': 1})\n >>> eva_steam_in.set_attr(T=147, p=4.3, m=20,\n ... fluid={'water': 1, 'Isopentane': 0})\n >>> eva_brine_in.set_attr(T=147, p=10.2, m=190,\n ... fluid={'water': 1, 'Isopentane': 0})\n >>> eva_sink_b.set_attr(T=117)\n >>> nw.solve(mode='design')\n\n Check the state of the steam and working fluid outlet:\n\n >>> eva_wf_out.x.val\n 1.0\n >>> eva_sink_s.x.val\n 0.0\n \"\"\"\n\n def __init__(self, label, **kwargs):\n super().__init__(label, **kwargs)\n msg = \"The component ORCEvaporator will be depricated with the next major release.\"\n warnings.warn(msg, DeprecationWarning)\n\n @staticmethod\n def component():\n return 'orc evaporator'\n\n def get_variables(self):\n return {\n 'Q': dc_cp(\n max_val=0, num_eq=1, latex=self.energy_balance_cold_func_doc,\n func=self.energy_balance_cold_func,\n deriv=self.energy_balance_cold_deriv),\n 'pr1': dc_cp(\n min_val=1e-4, max_val=1, num_eq=1, deriv=self.pr_deriv,\n latex=self.pr_func_doc,\n func=self.pr_func, func_params={'pr': 'pr1'}),\n 'pr2': dc_cp(\n min_val=1e-4, max_val=1, num_eq=1, latex=self.pr_func_doc,\n deriv=self.pr_deriv, func=self.pr_func,\n func_params={'pr': 'pr2', 'inconn': 1, 'outconn': 1}),\n 'pr3': dc_cp(\n min_val=1e-4, max_val=1, num_eq=1, latex=self.pr_func_doc,\n deriv=self.pr_deriv, func=self.pr_func,\n func_params={'pr': 'pr3', 'inconn': 2, 'outconn': 2}),\n 'zeta1': dc_cp(\n min_val=0, max_val=1e15, num_eq=1, latex=self.zeta_func_doc,\n deriv=self.zeta_deriv, func=self.zeta_func,\n func_params={'zeta': 'zeta1'}),\n 'zeta2': dc_cp(\n min_val=0, max_val=1e15, num_eq=1, latex=self.zeta_func_doc,\n deriv=self.zeta_deriv, func=self.zeta_func,\n func_params={'zeta': 'zeta2', 'inconn': 1, 'outconn': 1}),\n 'zeta3': dc_cp(\n min_val=0, max_val=1e15, num_eq=1, latex=self.zeta_func_doc,\n deriv=self.zeta_deriv, func=self.zeta_func,\n func_params={'zeta': 'zeta3', 'inconn': 2, 'outconn': 2}),\n 'subcooling': dc_simple(\n val=False, num_eq=1, latex=self.subcooling_func_doc,\n deriv=self.subcooling_deriv, func=self.subcooling_func),\n 'overheating': dc_simple(\n val=False, num_eq=1, latex=self.overheating_func_doc,\n deriv=self.overheating_deriv, func=self.overheating_func)\n }\n\n def get_mandatory_constraints(self):\n return {\n 'mass_flow_constraints': {\n 'func': self.mass_flow_func, 'deriv': self.mass_flow_deriv,\n 'constant_deriv': True, 'latex': self.mass_flow_func_doc,\n 'num_eq': 3},\n 'fluid_constraints': {\n 'func': self.fluid_func, 'deriv': self.fluid_deriv,\n 'constant_deriv': True, 'latex': self.fluid_func_doc,\n 'num_eq': self.num_nw_fluids * 3},\n 'energy_balance_constraints': {\n 'func': self.energy_balance_func,\n 'deriv': self.energy_balance_deriv,\n 'constant_deriv': False, 'latex': self.energy_balance_func_doc,\n 'num_eq': 1}\n }\n\n @staticmethod\n def inlets():\n return ['in1', 'in2', 'in3']\n\n @staticmethod\n def outlets():\n return ['out1', 'out2', 'out3']\n\n def preprocess(self, nw):\n\n self.overheating.is_set = not self.overheating.val\n self.subcooling.is_set = not self.subcooling.val\n super().preprocess(nw)\n\n def energy_balance_func(self):\n r\"\"\"\n Equation for heat exchanger energy balance.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n \\begin{split}\n 0 = &\n \\dot{m}_{in,1} \\cdot \\left(h_{out,1} - h_{in,1} \\right) \\\\\n &+ \\dot{m}_{in,2} \\cdot \\left(h_{out,2} - h_{in,2} \\right) \\\\\n &+ \\dot{m}_{in,3} \\cdot \\left(h_{out,3} - h_{in,3} \\right)\n \\end{split}\n \"\"\"\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI) +\n self.inl[2].m.val_SI * (\n self.outl[2].h.val_SI - self.inl[2].h.val_SI))\n\n def energy_balance_func_doc(self, label):\n r\"\"\"\n Equation for heat exchanger energy balance.\n\n Parameters\n ----------\n label : str\n Label for equation.\n\n Returns\n -------\n latex : str\n LaTeX code of equations applied.\n \"\"\"\n latex = (\n r'\\begin{split}' + '\\n'\n r'0 = &' + '\\n'\n r'\\dot{m}_\\mathrm{in,1}\\cdot\\left(h_\\mathrm{out,1}-'\n r'h_\\mathrm{in,1}\\right) \\\\' + '\\n'\n r'&+ \\dot{m}_\\mathrm{in,2} \\cdot \\left(h_\\mathrm{out,2} - '\n r'h_\\mathrm{in,2} \\right)\\\\' + '\\n'\n r'&+ \\dot{m}_\\mathrm{in,3} \\cdot \\left(h_\\mathrm{out,3} - '\n r'h_\\mathrm{in,3} \\right)' + '\\n'\n r'\\end{split}')\n return generate_latex_eq(self, latex, latex)\n\n def energy_balance_deriv(self, increment_filter, k):\n \"\"\"\n Calculate partial derivatives of energy balance function.\n\n Parameters\n ----------\n increment_filter : ndarray\n Matrix for filtering non-changing variables.\n\n k : int\n Position of derivatives in Jacobian matrix (k-th equation).\n \"\"\"\n for i in range(3):\n self.jacobian[k, i, 0] = (\n self.outl[i].h.val_SI - self.inl[i].h.val_SI)\n self.jacobian[k, i, 2] = -self.inl[i].m.val_SI\n self.jacobian[k, i + 3, 2] = self.inl[i].m.val_SI\n k += 1\n\n def energy_balance_cold_func(self):\n r\"\"\"\n Equation for cold side heat exchanger energy balance.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n 0 =\\dot{m}_{in,3} \\cdot \\left(h_{out,3}-h_{in,3}\\right)+\\dot{Q}\n \"\"\"\n return self.inl[2].m.val_SI * (\n self.outl[2].h.val_SI - self.inl[2].h.val_SI) + self.Q.val\n\n def energy_balance_cold_func_doc(self, label):\n r\"\"\"\n Equation for cold side heat exchanger energy balance.\n\n Parameters\n ----------\n label : str\n Label for equation.\n\n Returns\n -------\n latex : str\n LaTeX code of equations applied.\n \"\"\"\n latex = (\n r'0 =\\dot{m}_{in,3} \\cdot \\left(h_{out,3}-'\n r'h_{in,3}\\right)+\\dot{Q}')\n return [generate_latex_eq(self, latex, label)]\n\n def energy_balance_cold_deriv(self, increment_filter, k):\n \"\"\"\n Partial derivatives for cold side energy balance.\n\n Parameters\n ----------\n increment_filter : ndarray\n Matrix for filtering non-changing variables.\n\n k : int\n Position of derivatives in Jacobian matrix (k-th equation).\n \"\"\"\n self.jacobian[k, 2, 0] = self.outl[2].h.val_SI - self.inl[2].h.val_SI\n self.jacobian[k, 2, 2] = -self.inl[2].m.val_SI\n self.jacobian[k, 5, 2] = self.inl[2].m.val_SI\n\n def subcooling_func(self):\n r\"\"\"\n Equation for steam side outlet state.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n 0=h_{out,1} -h\\left(p_{out,1}, x=0 \\right)\n\n Note\n ----\n This equation is applied in case subcooling is False!\n \"\"\"\n return self.outl[0].h.val_SI - h_mix_pQ(self.outl[0].get_flow(), 0)\n\n def subcooling_func_doc(self, label):\n r\"\"\"\n Equation for steam side outlet state.\n\n Parameters\n ----------\n label : str\n Label for equation.\n\n Returns\n -------\n latex : str\n LaTeX code of equations applied.\n \"\"\"\n latex = r'0=h_\\mathrm{out,1} -h\\left(p_\\mathrm{out,1}, x=0 \\right)'\n return generate_latex_eq(self, latex, label)\n\n def subcooling_deriv(self, increment_filter, k):\n \"\"\"\n Calculate partial derivatives for steam side outlet state.\n\n Parameters\n ----------\n increment_filter : ndarray\n Matrix for filtering non-changing variables.\n\n k : int\n Position of derivatives in Jacobian matrix (k-th equation).\n \"\"\"\n self.jacobian[k, 3, 1] = -dh_mix_dpQ(self.outl[0].get_flow(), 0)\n self.jacobian[k, 3, 2] = 1\n\n def overheating_func(self):\n r\"\"\"\n Equation for cold side outlet state.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n 0=h_{out,3} -h\\left(p_{out,3}, x=1 \\right)\n\n Note\n ----\n This equation is applied in case overheating is False!\n \"\"\"\n return self.outl[2].h.val_SI - h_mix_pQ(self.outl[2].get_flow(), 1)\n\n def overheating_func_doc(self, label):\n r\"\"\"\n Equation for cold side outlet state.\n\n Parameters\n ----------\n label : str\n Label for equation.\n \"\"\"\n latex = r'0=h_\\mathrm{out,3} -h\\left(p_\\mathrm{out,3}, x=1 \\right)'\n return generate_latex_eq(self, latex, label)\n\n def overheating_deriv(self, increment_filter, k):\n \"\"\"\n Calculate partial derivatives for cold side outlet state.\n\n Parameters\n ----------\n increment_filter : ndarray\n Matrix for filtering non-changing variables.\n\n k : int\n Position of derivatives in Jacobian matrix (k-th equation).\n \"\"\"\n self.jacobian[k, 5, 1] = -dh_mix_dpQ(self.outl[0].get_flow(), 0)\n self.jacobian[k, 5, 2] = 1\n\n def bus_func(self, bus):\n r\"\"\"\n Calculate the value of the bus function.\n\n Parameters\n ----------\n bus : tespy.connections.bus.Bus\n TESPy bus object.\n\n Returns\n -------\n val : float\n Value of energy transfer :math:`\\dot{E}`. This value is passed to\n :py:meth:`tespy.components.component.Component.calc_bus_value`\n for value manipulation according to the specified characteristic\n line of the bus.\n\n .. math::\n\n \\dot{E} = -\\dot{m}_{in,3} \\cdot \\left(\n h_{out,3} - h_{in,3} \\right)\n \"\"\"\n return -self.inl[2].m.val_SI * (\n self.outl[2].h.val_SI - self.inl[2].h.val_SI)\n\n def bus_func_doc(self, bus):\n r\"\"\"\n Return LaTeX string of the bus function.\n\n Parameters\n ----------\n bus : tespy.connections.bus.Bus\n TESPy bus object.\n\n Returns\n -------\n latex : str\n LaTeX string of bus function.\n \"\"\"\n return (\n r'-\\dot{m}_\\mathrm{in,3} \\cdot \\left(h_\\mathrm{out,3} - '\n r'h_\\mathrm{in,3} \\right)')\n\n def bus_deriv(self, bus):\n r\"\"\"\n Calculate the matrix of partial derivatives of the bus function.\n\n Parameters\n ----------\n bus : tespy.connections.bus.Bus\n TESPy bus object.\n\n Returns\n -------\n deriv : ndarray\n Matrix of partial derivatives.\n \"\"\"\n deriv = np.zeros((1, 6, self.num_nw_vars))\n f = self.calc_bus_value\n deriv[0, 2, 0] = self.numeric_deriv(f, 'm', 2, bus=bus)\n deriv[0, 2, 2] = self.numeric_deriv(f, 'h', 2, bus=bus)\n deriv[0, 5, 2] = self.numeric_deriv(f, 'h', 5, bus=bus)\n return deriv\n\n def initialise_source(self, c, key):\n r\"\"\"\n Return a starting value for pressure and enthalpy at outlet.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection to perform initialisation on.\n\n key : str\n Fluid property to retrieve.\n\n Returns\n -------\n val : float\n Starting value for pressure/enthalpy in SI units.\n\n .. math::\n\n val = \\begin{cases}\n 10 \\cdot 10^5 & \\text{key = 'p'}\\\\\n h\\left(p, 473.15 \\text{K} \\right) &\n \\text{key = 'h' at outlet 1}\\\\\n h\\left(p, 473.15 \\text{K} \\right) &\n \\text{key = 'h' at outlet 2}\\\\\n h\\left(p, 523.15 \\text{K} \\right) &\n \\text{key = 'h' at outlet 3}\n \\end{cases}\n \"\"\"\n if key == 'p':\n return 10e5\n elif key == 'h':\n if c.source_id == 'out1':\n T = 200 + 273.15\n return h_mix_pT(c.get_flow(), T)\n elif c.source_id == 'out2':\n T = 200 + 273.15\n return h_mix_pT(c.get_flow(), T)\n else:\n T = 250 + 273.15\n return h_mix_pT(c.get_flow(), T)\n\n def initialise_target(self, c, key):\n r\"\"\"\n Return a starting value for pressure and enthalpy at inlet.\n\n Parameters\n ----------\n c : tespy.connections.connection.Connection\n Connection to perform initialisation on.\n\n key : str\n Fluid property to retrieve.\n\n Returns\n -------\n val : float\n Starting value for pressure/enthalpy in SI units.\n\n .. math::\n\n val = \\begin{cases}\n 10 \\cdot 10^5 & \\text{key = 'p'}\\\\\n h\\left(p, 573.15 \\text{K} \\right) &\n \\text{key = 'h' at inlet 1}\\\\\n h\\left(p, 573.15 \\text{K} \\right) &\n \\text{key = 'h' at inlet 2}\\\\\n h\\left(p, 493.15 \\text{K} \\right) &\n \\text{key = 'h' at inlet 3}\n \\end{cases}\n \"\"\"\n if key == 'p':\n return 10e5\n elif key == 'h':\n if c.target_id == 'in1':\n T = 300 + 273.15\n return h_mix_pT(c.get_flow(), T)\n elif c.target_id == 'in2':\n T = 300 + 273.15\n return h_mix_pT(c.get_flow(), T)\n else:\n T = 220 + 273.15\n return h_mix_pT(c.get_flow(), T)\n\n def calc_parameters(self):\n r\"\"\"Postprocessing parameter calculation.\"\"\"\n # component parameters\n self.Q.val = -self.inl[2].m.val_SI * (\n self.outl[2].h.val_SI - self.inl[2].h.val_SI)\n # pressure ratios and zeta values\n for i in range(3):\n self.get_attr('pr' + str(i + 1)).val = (\n self.outl[i].p.val_SI / self.inl[i].p.val_SI)\n self.get_attr('zeta' + str(i + 1)).val = (\n (self.inl[i].p.val_SI - self.outl[i].p.val_SI) * np.pi ** 2 / (\n 4 * self.inl[i].m.val_SI ** 2 *\n (self.inl[i].vol.val_SI + self.outl[i].vol.val_SI)\n ))\n\n def entropy_balance(self):\n r\"\"\"\n Calculate entropy balance of the two-phase orc evaporator.\n\n The allocation of the entropy streams due to heat exchanged and due to\n irreversibility is performed by solving for T on all sides of the heat\n exchanger:\n\n .. math::\n\n h_\\mathrm{out} - h_\\mathrm{in} = \\int_\\mathrm{in}^\\mathrm{out} v\n \\cdot dp - \\int_\\mathrm{in}^\\mathrm{out} T \\cdot ds\n\n As solving :math:`\\int_\\mathrm{in}^\\mathrm{out} v \\cdot dp` for non\n isobaric processes would require perfect process knowledge (the path)\n on how specific volume and pressure change throught the component, the\n heat transfer is splitted into three separate virtual processes:\n\n - in->in*: decrease pressure to\n :math:`p_\\mathrm{in*}=p_\\mathrm{in}\\cdot\\sqrt{\\frac{p_\\mathrm{out}}{p_\\mathrm{in}}}`\n without changing enthalpy.\n - in*->out* transfer heat without changing pressure.\n :math:`h_\\mathrm{out*}-h_\\mathrm{in*}=h_\\mathrm{out}-h_\\mathrm{in}`\n - out*->out decrease pressure to outlet pressure :math:`p_\\mathrm{out}`\n without changing enthalpy.\n\n Note\n ----\n The entropy balance makes the follwing parameter available:\n\n .. math::\n\n \\text{S\\_Q1}=\\dot{m} \\cdot \\left(s_\\mathrm{out*,1}-s_\\mathrm{in*,1}\n \\right)\\\\\n \\text{S\\_Q2}=\\dot{m} \\cdot \\left(s_\\mathrm{out*,2}-s_\\mathrm{in*,2}\n \\right)\\\\\n \\text{S\\_Q3}=\\dot{m} \\cdot \\left(s_\\mathrm{out*,3}-s_\\mathrm{in*,3}\n \\right)\\\\\n \\text{S\\_Qirr}=\\text{S\\_Q3} - \\text{S\\_Q1} - \\text{S\\_Q2}\\\\\n \\text{S\\_irr1}=\\dot{m} \\cdot \\left(s_\\mathrm{out,1}-s_\\mathrm{in,1}\n \\right) - \\text{S\\_Q1}\\\\\n \\text{S\\_irr2}=\\dot{m} \\cdot \\left(s_\\mathrm{out,2}-s_\\mathrm{in,2}\n \\right) - \\text{S\\_Q2}\\\\\n \\text{S\\_irr3}=\\dot{m} \\cdot \\left(s_\\mathrm{out,3}-s_\\mathrm{in,3}\n \\right) - \\text{S\\_Q3}\\\\\n \\text{S\\_irr}=\\sum \\dot{S}_\\mathrm{irr}\\\\\n \\text{T\\_mQ1}=\\frac{\\dot{Q}_1}{\\text{S\\_Q1}}\\\\\n \\text{T\\_mQ2}=\\frac{\\dot{Q}_2}{\\text{S\\_Q2}}\\\\\n \\text{T\\_mQ3}=\\frac{\\dot{Q}_1 + \\dot{Q}_2}{\\text{S\\_Q3}}\n \"\"\"\n self.S_irr = 0\n for i in range(3):\n inl = self.inl[i]\n out = self.outl[i]\n p_star = inl.p.val_SI * (\n self.get_attr('pr' + str(i + 1)).val) ** 0.5\n s_i_star = s_mix_ph(\n [0, p_star, inl.h.val_SI, inl.fluid.val], T0=inl.T.val_SI)\n s_o_star = s_mix_ph(\n [0, p_star, out.h.val_SI, out.fluid.val], T0=out.T.val_SI)\n\n setattr(self, 'S_Q' + str(i + 1),\n inl.m.val_SI * (s_o_star - s_i_star))\n S_Q = self.get_attr('S_Q' + str(i + 1))\n setattr(self, 'S_irr' + str(i + 1),\n inl.m.val_SI * (out.s.val_SI - inl.s.val_SI) - S_Q)\n setattr(self, 'T_mQ' + str(i + 1),\n inl.m.val_SI * (out.h.val_SI - inl.h.val_SI) / S_Q)\n\n self.S_irr += self.get_attr('S_irr' + str(i + 1))\n\n self.S_irr += self.S_Q1 + self.S_Q2 + self.S_Q3\n\n def get_plotting_data(self):\n \"\"\"Generate a dictionary containing FluProDia plotting information.\n\n Returns\n -------\n data : dict\n A nested dictionary containing the keywords required by the\n :code:`calc_individual_isoline` method of the\n :code:`FluidPropertyDiagram` class. First level keys are the\n connection index ('in1' -> 'out1', therefore :code:`1` etc.).\n \"\"\"\n return {\n i + 1: {\n 'isoline_property': 'p',\n 'isoline_value': self.inl[i].p.val,\n 'isoline_value_end': self.outl[i].p.val,\n 'starting_point_property': 'v',\n 'starting_point_value': self.inl[i].vol.val,\n 'ending_point_property': 'v',\n 'ending_point_value': self.outl[i].vol.val\n } for i in range(3)}\n","sub_path":"src/tespy/components/customs/orc_evaporator.py","file_name":"orc_evaporator.py","file_ext":"py","file_size_in_byte":26829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7828467","text":"# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University\n# Berlin, 14195 Berlin, Germany.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n__author__ = 'noe'\n\nimport numpy as np\n\nfrom .transformer import Transformer\n\nfrom pyemma.util.annotators import doc_inherit\nfrom pyemma.util.progressbar import ProgressBar\nfrom pyemma.util.progressbar.gui import show_progressbar\n\n__all__ = ['PCA']\n\n\nclass PCA(Transformer):\n\n r\"\"\"Principal component analysis.\n\n Given a sequence of multivariate data :math:`X_t`,\n computes the mean-free covariance matrix.\n\n .. math:: C = (X - \\mu)^T (X - \\mu)\n\n and solves the eigenvalue problem\n\n .. math:: C r_i = \\sigma_i r_i,\n\n where :math:`r_i` are the principal components and :math:`\\sigma_i` are\n their respective variances.\n\n When used as a dimension reduction method, the input data is projected onto\n the dominant principal components.\n\n Parameters\n ----------\n output_dimension : int\n number of principal components to project onto\n\n \"\"\"\n\n def __init__(self, output_dimension):\n super(PCA, self).__init__()\n self._output_dimension = output_dimension\n self._dot_prod_tmp = None\n self.Y = None\n\n self._progress_mean = None\n self._progress_cov = None\n\n @doc_inherit\n def describe(self):\n return \"[PCA, output dimension = %i]\" % self._output_dimension\n\n def dimension(self):\n \"\"\"\n Returns the number of output dimensions\n\n :return:\n \"\"\"\n return self._output_dimension\n\n @doc_inherit\n def _get_constant_memory(self):\n \"\"\"Returns the constant memory requirements, in bytes.\"\"\"\n # memory for mu, C, v, R\n dim = self.data_producer.dimension()\n\n cov_elements = dim ** 2\n mu_elements = dim\n\n v_elements = dim\n R_elements = cov_elements\n\n return 8 * (cov_elements + mu_elements + v_elements + R_elements)\n\n @doc_inherit\n def _get_memory_per_frame(self):\n # memory for temporaries\n dim = self.data_producer.dimension()\n\n x_meanfree_elements = self.chunksize * dim\n\n dot_prod_elements = dim\n\n return 8 * (x_meanfree_elements + dot_prod_elements)\n\n @property\n def mean(self):\n return self.mu\n\n @property\n def covariance_matrix(self):\n return self.cov\n\n @doc_inherit\n def _param_init(self):\n self.N = 0\n # create mean array and covariance matrix\n dim = self.data_producer.dimension()\n self._logger.info(\"Running PCA on %i dimensional input\" % dim)\n assert dim > 0, \"Incoming data of PCA has 0 dimension!\"\n self.mu = np.zeros(dim)\n self.cov = np.zeros((dim, dim))\n\n # amount of chunks\n denom = self._n_chunks(self._param_with_stride)\n self._progress_mean = ProgressBar(denom, description=\"calculate mean\")\n self._progress_cov = ProgressBar(denom, description=\"calculate covariances\")\n\n def _param_add_data(self, X, itraj, t, first_chunk, last_chunk_in_traj,\n last_chunk, ipass, Y=None, stride=1):\n \"\"\"\n Chunk-based parametrization of PCA. Iterates through all data twice. In the first pass, the\n data means are estimated, in the second pass the covariance matrix is estimated.\n Finally, the eigenvalue problem is solved to determine the principal compoennts.\n\n :param X:\n coordinates. axis 0: time, axes 1-..: coordinates\n :param itraj:\n index of the current trajectory\n :param t:\n time index of first frame within trajectory\n :param first_chunk:\n boolean. True if this is the first chunk globally.\n :param last_chunk_in_traj:\n boolean. True if this is the last chunk within the trajectory.\n :param last_chunk:\n boolean. True if this is the last chunk globally.\n :param ipass:\n number of pass through data\n :param Y:\n time-lagged data (if available)\n :return:\n \"\"\"\n # pass 1: means\n if ipass == 0:\n if t == 0:\n self._logger.debug(\"start to calculate mean for traj nr %i\" % itraj)\n self._sum_tmp = np.empty(X.shape[1])\n np.sum(X, axis=0, out=self._sum_tmp)\n self.mu += self._sum_tmp\n self.N += np.shape(X)[0]\n\n # counting chunks and log of eta\n self._progress_mean.numerator += 1\n show_progressbar(self._progress_mean)\n\n if last_chunk:\n self.mu /= self.N\n\n # pass 2: covariances\n if ipass == 1:\n if t == 0:\n self._logger.debug(\"start calculate covariance for traj nr %i\" % itraj)\n self._dot_prod_tmp = np.empty_like(self.cov)\n Xm = X - self.mu\n np.dot(Xm.T, Xm, self._dot_prod_tmp)\n self.cov += self._dot_prod_tmp\n\n self._progress_cov.numerator += 1\n show_progressbar(self._progress_cov)\n\n if last_chunk:\n self.cov /= self.N - 1\n self._logger.debug(\"finished\")\n return True # finished!\n\n # by default, continue\n return False\n\n @doc_inherit\n def _param_finish(self):\n (v, R) = np.linalg.eigh(self.cov)\n # sort\n I = np.argsort(v)[::-1]\n self.eigenvalues = v[I]\n self.eigenvectors = R[:, I]\n\n def _map_array(self, X):\n \"\"\"\n Projects the data onto the dominant principal components.\n\n :param X: the input data\n :return: the projected data\n \"\"\"\n X_meanfree = X - self.mu\n Y = np.dot(X_meanfree, self.eigenvectors[:, 0:self._output_dimension])\n return Y","sub_path":"pyemma/coordinates/transform/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539588525","text":"import math\nimport os.path\nimport simplejson\n\ndef calculate_idfs(filename, force_recalc=False):\n if not force_recalc:\n if os.path.isfile(filename):\n with open(filename) as f:\n idfs_ascii = simplejson.load(f)\n\n idfs = {}\n for w, c in idfs_ascii.iteritems():\n idfs[w.encode('utf-8')] = c\n \n return idfs\n\n\n with open(filename) as f:\n lines = 0\n dfs = {}\n idfs = {}\n\n for line in f:\n lines += 1\n words_on_this_line = []\n if lines % 100000 == 0:\n print(str(lines))\n for word in line.strip().split(' ')[2:]:\n if word == '':\n continue\n\n if word in words_on_this_line:\n continue\n\n if word in dfs:\n dfs[word] += 1\n else:\n dfs[word] = 1\n\n words_on_this_line.append(word)\n\n for word, count in dfs.iteritems():\n idfs[word] = math.log(float(lines) / count)\n\n return idfs\n","sub_path":"calculate_idfs.py","file_name":"calculate_idfs.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"137340075","text":"from unittest import mock\nimport pytest\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.utils import select_proxy\nfrom requests.exceptions import ConnectionError\n\nfrom awx.api.versioning import reverse\nfrom awx.main.models.notifications import NotificationTemplate, Notification\nfrom awx.main.models.inventory import Inventory, InventorySource\nfrom awx.main.models.jobs import JobTemplate\n\n\n@pytest.mark.django_db\ndef test_get_notification_template_list(get, user, notification_template):\n url = reverse('api:notification_template_list')\n response = get(url, user('admin', True))\n assert response.status_code == 200\n assert len(response.data['results']) == 1\n\n\n@pytest.mark.django_db\ndef test_basic_parameterization(get, post, user, organization):\n u = user('admin-poster', True)\n url = reverse('api:notification_template_list')\n response = post(url,\n dict(name=\"test-webhook\",\n description=\"test webhook\",\n organization=organization.id,\n notification_type=\"webhook\",\n notification_configuration=dict(url=\"http://localhost\",\n headers={\"Test\": \"Header\"})),\n u)\n assert response.status_code == 201\n url = reverse('api:notification_template_detail', kwargs={'pk': response.data['id']})\n response = get(url, u)\n assert 'related' in response.data\n assert 'organization' in response.data['related']\n assert 'summary_fields' in response.data\n assert 'organization' in response.data['summary_fields']\n assert 'notifications' in response.data['related']\n assert 'notification_configuration' in response.data\n assert 'url' in response.data['notification_configuration']\n assert 'headers' in response.data['notification_configuration']\n\n\n@pytest.mark.django_db\ndef test_encrypted_subfields(get, post, user, organization):\n def assert_send(self, messages):\n assert self.account_token == \"shouldhide\"\n return 1\n u = user('admin-poster', True)\n url = reverse('api:notification_template_list')\n response = post(url,\n dict(name=\"test-twilio\",\n description=\"test twilio\",\n organization=organization.id,\n notification_type=\"twilio\",\n notification_configuration=dict(account_sid=\"dummy\",\n account_token=\"shouldhide\",\n from_number=\"+19999999999\",\n to_numbers=[\"9998887777\"])),\n u)\n assert response.status_code == 201\n notification_template_actual = NotificationTemplate.objects.get(id=response.data['id'])\n url = reverse('api:notification_template_detail', kwargs={'pk': response.data['id']})\n response = get(url, u)\n assert response.data['notification_configuration']['account_token'] == \"$encrypted$\"\n with mock.patch.object(notification_template_actual.notification_class, \"send_messages\", assert_send):\n notification_template_actual.send(\"Test\", {'body': \"Test\"})\n\n\n@pytest.mark.django_db\ndef test_inherited_notification_templates(get, post, user, organization, project):\n u = user('admin-poster', True)\n url = reverse('api:notification_template_list')\n notification_templates = []\n for nfiers in range(3):\n response = post(url,\n dict(name=\"test-webhook-{}\".format(nfiers),\n description=\"test webhook {}\".format(nfiers),\n organization=organization.id,\n notification_type=\"webhook\",\n notification_configuration=dict(url=\"http://localhost\",\n headers={\"Test\": \"Header\"})),\n u)\n assert response.status_code == 201\n notification_templates.append(response.data['id'])\n i = Inventory.objects.create(name='test', organization=organization)\n i.save()\n isrc = InventorySource.objects.create(name='test', inventory=i)\n isrc.save()\n jt = JobTemplate.objects.create(name='test', inventory=i, project=project, playbook='debug.yml')\n jt.save()\n url = reverse('api:organization_notification_templates_any_list', kwargs={'pk': organization.id})\n response = post(url, dict(id=notification_templates[0]), u)\n assert response.status_code == 204\n url = reverse('api:project_notification_templates_any_list', kwargs={'pk': project.id})\n response = post(url, dict(id=notification_templates[1]), u)\n assert response.status_code == 204\n url = reverse('api:job_template_notification_templates_any_list', kwargs={'pk': jt.id})\n response = post(url, dict(id=notification_templates[2]), u)\n assert response.status_code == 204\n assert len(jt.notification_templates['any']) == 3\n assert len(project.notification_templates['any']) == 2\n assert len(isrc.notification_templates['any']) == 1\n\n\n@pytest.mark.django_db\ndef test_notification_template_merging(get, post, user, organization, project, notification_template):\n user('admin-poster', True)\n organization.notification_templates_any.add(notification_template)\n project.notification_templates_any.add(notification_template)\n assert len(project.notification_templates['any']) == 1\n\n\n@pytest.mark.django_db\ndef test_notification_template_simple_patch(patch, notification_template, admin):\n patch(reverse('api:notification_template_detail', kwargs={'pk': notification_template.id}), { 'name': 'foo'}, admin, expect=200)\n\n\n@pytest.mark.django_db\ndef test_notification_template_invalid_notification_type(patch, notification_template, admin):\n patch(reverse('api:notification_template_detail', kwargs={'pk': notification_template.id}), { 'notification_type': 'invalid'}, admin, expect=400)\n\n\n@pytest.mark.django_db\ndef test_disallow_delete_when_notifications_pending(delete, user, notification_template):\n u = user('superuser', True)\n url = reverse('api:notification_template_detail', kwargs={'pk': notification_template.id})\n Notification.objects.create(notification_template=notification_template,\n status='pending')\n response = delete(url, user=u)\n assert response.status_code == 405\n\n\n@pytest.mark.django_db\ndef test_custom_environment_injection(post, user, organization):\n u = user('admin-poster', True)\n url = reverse('api:notification_template_list')\n response = post(url,\n dict(name=\"test-webhook\",\n description=\"test webhook\",\n organization=organization.id,\n notification_type=\"webhook\",\n notification_configuration=dict(url=\"https://example.org\",\n headers={\"Test\": \"Header\"})),\n u)\n assert response.status_code == 201\n template = NotificationTemplate.objects.get(pk=response.data['id'])\n with pytest.raises(ConnectionError), \\\n mock.patch('django.conf.settings.AWX_TASK_ENV', {'HTTPS_PROXY': '192.168.50.100:1234'}), \\\n mock.patch.object(HTTPAdapter, 'send') as fake_send:\n def _send_side_effect(request, **kw):\n assert select_proxy(request.url, kw['proxies']) == '192.168.50.100:1234'\n raise ConnectionError()\n fake_send.side_effect = _send_side_effect\n template.send('subject', 'message')\n","sub_path":"awx/main/tests/functional/test_notifications.py","file_name":"test_notifications.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226184578","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*\n\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect\n\nfrom core.common.utils import force_post\nfrom invites.models import Invite\nfrom places.models import UserPlaceSettings\n\n@force_post\n@transaction.commit_on_success\ndef resolve_invite(req, invite_id, accepted):\n try:\n invite = Invite.objects.get(id=invite_id)\n if not accepted:\n invite.accepted = accepted\n invite.save()\n return HttpResponseRedirect('/')\n #endif\n\n _ups = None\n # User wanted to get to place\n # OR Admin invited user to place\n if invite.from_user is None and UserPlaceSettings.is_admin(req.user.id, invite.place.id):\n _ups = UserPlaceSettings()\n _ups.user = invite.to_user\n _ups.place = invite.place\n _ups.save()\n invite.accepted = True\n invite.save()\n return HttpResponseRedirect('/')\n elif invite.to_user == req.user:\n _ups = UserPlaceSettings()\n _ups.user = invite.to_user\n _ups.place = invite.place\n _ups.save()\n invite.accepted = True\n invite.save()\n return HttpResponseRedirect('/place/%s' % _ups.place.id)\n #endif \n\n # Add user to place\n except Invite.DoesNotExist:\n return HttpResponseRedirect('/')\n #endtry\n#enddef\n","sub_path":"src/invites/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"605193877","text":"import time\nfrom handlers.handler import Handler\nfrom models.post import Post\nfrom models.comment import Comment\n\nclass NewComment(Handler):\n def get(self,post_id):\n #check if user logged in\n if not self.user:\n self.redirect('/login')\n return\n\n post = Post.get_by_id(int(post_id))\n\n if not post:\n self.error(404)\n return\n\n comment_id = self.request.get(\"comment_id\")\n\n if not comment_id:\n quoted_message = post.content\n else:\n cm = Comment.get_by_id(int(comment_id))\n quoted_message = cm.comment\n\n self.render(\"newcomment.html\",\n loggedin = True,\n user = self.user,\n post = post,\n quoted_message = quoted_message,\n getPDTTimeString = getPDTTimeString,)\n\n def post(self,post_id):\n if not self.user:\n self.redirect('/login')\n return\n\n #check post existence\n post = Post.get_by_id(int(post_id))\n if not post:\n self.error(404)\n return\n\n comment = self.request.get(\"comment\")\n\n #if comment is empty\n #normally should not happen because browser won't submit\n if not comment:\n self.redirect('/post/%s' % post_id)\n return\n\n #create new comment\n newComment = Comment(user_name = self.user.user_name,\n user_id = self.user.key().id(),\n post_id = int(post_id),\n comment = comment)\n newComment.put()\n #increment post's numComment\n post.numComment+=1\n post.put()\n\n time.sleep(0.1)\n self.redirect('/post/%s' % post_id)\n","sub_path":"handlers/newcomment.py","file_name":"newcomment.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"45928898","text":"#!/usr/bin/env python\n##############################################################################\n# Copyright (c) 2021 Orange, Inc. and others. All rights reserved.\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n# http://www.apache.org/licenses/LICENSE-2.0\n##############################################################################\n\n# pylint: disable=no-member\n# pylint: disable=too-many-public-methods\n\nimport unittest\nimport os\nimport sys\nimport time\nimport requests\nsys.path.append('transportpce_tests/common/')\nimport test_utils\n\n\nclass TransportPCE400Gtesting(unittest.TestCase):\n\n simple_topo_bi_dir_data = None\n port_mapping_data = None\n processes = None\n\n @classmethod\n def setUpClass(cls):\n try:\n sample_files_parsed = False\n TOPO_BI_DIR_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"..\", \"..\", \"sample_configs\", \"honeynode-topo400G.json\")\n with open(TOPO_BI_DIR_FILE, 'r') as topo_bi_dir:\n cls.simple_topo_bi_dir_data = topo_bi_dir.read()\n\n PORT_MAPPING_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"..\", \"..\", \"sample_configs\", \"pce_portmapping_71.json\")\n with open(PORT_MAPPING_FILE, 'r') as port_mapping:\n cls.port_mapping_data = port_mapping.read()\n sample_files_parsed = True\n except PermissionError as err:\n print(\"Permission Error when trying to read sample files\\n\", err)\n sys.exit(2)\n except FileNotFoundError as err:\n print(\"File Not found Error when trying to read sample files\\n\", err)\n sys.exit(2)\n except:\n print(\"Unexpected error when trying to read sample files\\n\", sys.exc_info()[0])\n sys.exit(2)\n finally:\n if sample_files_parsed:\n print(\"sample files content loaded\")\n cls.processes = test_utils.start_tpce()\n\n @classmethod\n def tearDownClass(cls):\n # pylint: disable=not-an-iterable\n for process in cls.processes:\n test_utils.shutdown_process(process)\n print(\"all processes killed\")\n\n def setUp(self): # instruction executed before each test method\n print(\"execution of {}\".format(self.id().split(\".\")[-1]))\n time.sleep(1)\n\n # Load port mapping\n def test_01_load_port_mapping(self):\n response = test_utils.put_jsonrequest(test_utils.URL_FULL_PORTMAPPING, self.port_mapping_data)\n self.assertIn(response.status_code, (requests.codes.ok, requests.codes.created))\n time.sleep(2)\n\n # Load simple bidirectional topology\n def test_02_load_simple_topology_bi(self):\n response = test_utils.put_jsonrequest(test_utils.URL_CONFIG_ORDM_TOPO, self.simple_topo_bi_dir_data)\n self.assertEqual(response.status_code, requests.codes.ok)\n time.sleep(2)\n\n # Path Computation success\n def test_03_path_computation_xpdr_bi(self):\n response = test_utils.path_computation_request(\"request-1\", \"service-1\",\n {\"node-id\": \"XPDR-A2\", \"service-rate\": \"400\",\n \"service-format\": \"Ethernet\", \"clli\": \"nodeA\"},\n {\"node-id\": \"XPDR-C2\", \"service-rate\": \"400\",\n \"service-format\": \"Ethernet\", \"clli\": \"nodeC\"})\n self.assertEqual(response.status_code, requests.codes.ok)\n res = response.json()\n self.assertIn('Path is calculated',\n res['output']['configuration-response-common']['response-message'])\n\n self.assertEqual(1, res['output']['response-parameters']['path-description']\n ['aToZ-direction']['aToZ-wavelength-number'])\n self.assertEqual(400, res['output']['response-parameters']['path-description']\n ['aToZ-direction']['rate'])\n self.assertEqual(196.0375, res['output']['response-parameters']['path-description']\n ['aToZ-direction']['aToZ-min-frequency'])\n self.assertEqual(196.12500, res['output']['response-parameters']['path-description']\n ['aToZ-direction']['aToZ-max-frequency'])\n self.assertEqual('dp-qam16', res['output']['response-parameters']['path-description']\n ['aToZ-direction']['modulation-format'])\n\n self.assertEqual(1, res['output']['response-parameters']['path-description']\n ['zToA-direction']['zToA-wavelength-number'])\n self.assertEqual(400, res['output']['response-parameters']['path-description']\n ['zToA-direction']['rate'])\n self.assertEqual(196.0375, res['output']['response-parameters']['path-description']\n ['zToA-direction']['zToA-min-frequency'])\n self.assertEqual(196.12500, res['output']['response-parameters']['path-description']\n ['zToA-direction']['zToA-max-frequency'])\n self.assertEqual('dp-qam16', res['output']['response-parameters']['path-description']\n ['zToA-direction']['modulation-format'])\n time.sleep(5)\n\n # Test deleted complex topology\n def test_04_test_topology_complex_deleted(self):\n response = test_utils.get_ordm_topo_request(\"node/XPONDER-3-2\")\n self.assertEqual(response.status_code, requests.codes.conflict)\n time.sleep(1)\n\n # Delete portmapping\n def test_05_delete_port_mapping(self):\n response = test_utils.delete_request(test_utils.URL_FULL_PORTMAPPING)\n self.assertEqual(response.status_code, requests.codes.ok)\n time.sleep(2)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"tests/transportpce_tests/7.1/test_pce_400G.py","file_name":"test_pce_400G.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"184963937","text":"from turtle import *\nfrom random import *\nsetheading(90)\ndef trujkat(kolor):\n pd()\n rt(30)\n fd(100)\n lt(120)\n fd(100)\n lt(120)\n fd(100)\n rt(30)\n pu()\n fd(100)\n lt(180)\n rt(30)\n fd(100)\n lt(30)\n rt(30)\n\ndef kolo():\n setpos(-100,-100)\n pd()\n begin_fill()\n color(\"black\", \"blue\")\n circle(50)\n end_fill()\n print(\"hello\")\n \nspeed(0) \nfor i in range(12):\n trujkat(10)\nkolo()\n","sub_path":"stare_zad/trujkat.py","file_name":"trujkat.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649409241","text":"# coding:utf-8\nfrom pyx_tornado_exts.tornado_session.redis_session import MetaSessionK\nfrom pyx_tornado_exts.tornado_session.web import SessionHandler\n\n\nclass MainHandler(SessionHandler):\n # @login_required\n def get(self):\n meta = self.get_current_user()\n if not meta:\n meta = \"NoOne\"\n print(meta)\n self.write(\"What's up, \" + meta + \"?\")\n\n\nclass M1Handler(SessionHandler):\n def get(self):\n self.redirect(\"/me\")\n\n\nclass M2Handler(SessionHandler):\n def get(self):\n self.write(\"m2\")\n\n\nclass MeHandler(SessionHandler):\n def get(self):\n self.session[MetaSessionK.UUID_META] = \"session\"\n self.session.save()\n # self.write('save user_name to session')\n self.redirect(\"/m2\")\n","sub_path":"pyx_tornado/websession/pytornado_websession/handler/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"149596103","text":"# %load q02_append_row/build.py\nimport pandas as pd\nimport sys, os\n#sys.path.append(os.path.join(os.path.dirname(os.curdir)))\nfrom greyatomlib.pandas_guided_project.q01_load_data.build import q01_load_data\n\n\npath = 'data/excel-comp-data.xlsx'\ndef q02_append_row(path):\n 'write your solution here'\n df = pd.read_excel(path)\n df['total'] = df['Jan']+df['Feb']+df['Mar']\n df.loc[len(df),:]=df.sum()\n print(df.head)\n return df\n\nprint(q02_append_row(path))\n\n\n\n\n\n","sub_path":"q02_append_row/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"132895431","text":"import FWCore.ParameterSet.Config as cms\nfrom glob import glob\n\n# Prefix where we expect idXX/*.root files to be\nprefix = '/nfs/bluearc/group/trees/ww/V03_WWEventSkimProd_Newest/'\n\nnov4Samples = {\n #id #name #scale to 1pb^-1 #is data #skim event files\n 'id65': ['Mu2010ANov4' , 1, True, [] , 1.0, False],\n 'id66': ['EG2010ANov4' , 1, True, [] , 1.0, False],\n 'id67': ['Mu2010BNov4' , 1, True, [] , 1.0, False],\n 'id68': ['Electron2010BNov4' , 1, True, [] , 1.0, False],\n}\n\n\nfor key in nov4Samples:\n nov4Samples[key][3] += [ '%s'%x for x in glob(prefix+key+'/*.root') ]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"WWAnalysis/AnalysisStep/python/data/v03/nov4Samples_cff.py","file_name":"nov4Samples_cff.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"229763028","text":"#!/usr/bin/env python\n\n\"\"\"\nUnit tests for pre-processing module\n====================================\n\"\"\"\n\nimport pytest\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nfrom implicitmf.preprocess import normalize_X, dict_converter\nfrom _mock_data import sparse_array, create_ratings_df\n\ndef test_dict_converter_output():\n \"\"\"\n Check that output of dict_converter()\n is the correct format.\n \"\"\"\n data = create_ratings_df()\n output = dict_converter(data)\n assert(isinstance(output, dict))\n\ndef test_dict_converter_input_error():\n \"\"\"\n Check that dict_converter() raises a\n ValueError when input is not correct format.\n \"\"\"\n data = create_ratings_df()\n data['extra_column'] = data['ratings']*2\n with pytest.raises(ValueError):\n dict_converter(data)\n\ndef test_normalize_X_output():\n \"\"\"\n Check that output of normalize_X()\n is a scipy.sparse.csr matrix.\n \"\"\"\n X = sparse_array()\n output = normalize_X(X, norm_type=\"bm25\")\n assert isinstance(output, csr_matrix)\n assert output.shape == X.shape\n\ndef test_normalize_X_incorrect_sparse_matrix():\n \"\"\"\n Check that normalize_X() raises a\n TypeError if X is not the correct format.\n \"\"\"\n msg = \"`X` must be a scipy.sparse.csr_matrix\"\n with pytest.raises(TypeError, match=msg):\n normalize_X(X=\"hello\", norm_type=\"bm25\")\n\ndef test_normalize_X_incorrect_norm_type():\n \"\"\"\n Check that normalize_X() raises a ValueError\n if norm_type is not one of bm25 or tfidf.\n \"\"\"\n msg = \"Unknown `norm_type` parameter\"\n with pytest.raises(ValueError, match=msg):\n normalize_X(X=sparse_array(), norm_type=\"bm2000\")\n\n","sub_path":"tests/test_preprocess.py","file_name":"test_preprocess.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"228662366","text":"from flask import Flask, jsonify, request\nimport requests\nimport json\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nimport xlrd\napp = Flask(__name__)\ndataset = pd.read_excel(\"Sample.xls\")\ndf = pd.DataFrame(dataset)\nfurniture = df.loc[df['Category'] == 'Furniture']\nfurniture.drop(df.columns.difference(['Order Date','Sales']), 1, inplace=True)\n\n@app.route(\"/rf\")\ndef rf():\n\n\tX = furniture['Order Date'].values.astype('int')\n\ty = furniture['Sales'].values\n\tdata = {\n\t'SalesPredictions' : list(y),\n\t'date' : list(X)\n\t}\n\treturn jsonify(data)\n\n@app.route('/pred',methods = ['GET'])\ndef pred():\n\turl = \"http://127.0.0.1:5000/rf\"\n\ttry:\n\t\tuResponse = requests.get(url)\n\texcept requests.ConnectionError:\n\t\treturn \"Connection Error\" \n\tJresponse = uResponse.text\n\tdata = json.loads(Jresponse)\n\tlist1 = [k for k in data['date']]\n\tlist2 = [k for k in data['SalesPredictions']]\n\n\tarray1 = np.asarray(list1).reshape(-1,1)\n\tarray2 = np.asarray(list2)\n\t\n\tX_train,X_test,y_train,y_test = train_test_split(array1,array2,test_size=0.2)\n\tregressor = RandomForestRegressor()\n\tregressor = regressor.fit(X_test,y_test)\n\tsales_pred = regressor.predict(X_test)\n\tsales_pred = list(sales_pred)\n\ta = regressor.score(X_test,y_test)\n\tb = np.array2string(a)\n\terrors = abs(sales_pred - y_test) #average absolute error-https://towardsdatascience.com/improving-random-forest-in-python-part-1-893916666cd\n\treturn jsonify({'Sales Predictions(in Dollars)':sales_pred,'accuracy':b,'Average absolute error(in Dollars):': round(np.mean(errors), 2)})\nif __name__ == '__main__':\n\tapp.run(debug=True)\n","sub_path":"randomforest.py","file_name":"randomforest.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"103665051","text":"import torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport os\nimport torch\nimport numpy as np\nimport pickle\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport copy\nimport torchvision\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport torch.nn.functional as F\nimport sys\n\n################################################################################################################# \n## This model is training a VGG with transfer learning with sparse l1 norm on last layer \n####### imageDataRGB.p are the rgb images from bold5000 sorted\n####### dataForCNN.p are the average neural firing for the neurons for each stimuli 227 neurons 4899 stimuli####\n### Alpha hyperparameter must be played around with for best performance. Once this is trained, we do inception ########\n###################################################################\n\n\n'''\nGPU_ID = int(sys.argv[1])\n \nif GPU_ID !=-1:\n os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_ID)\nelse:\n os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\n\n'''\n\n\n\n\nclass loader(Dataset):\n def __init__(self, text_mode = False):\n self.target = pickle.load(open('dataForCNNNorm1.p' , 'rb' ))\n self.data = pickle.load(open('imageDataRGB.p', 'rb'), encoding = 'bytes')\n self.transformData = transforms.Compose([transforms.ToPILImage(), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225))])\n #self.transformData = transforms.Compose([transforms.ToTensor()]) \n self.transformLabel = transforms.Compose([transforms.ToTensor()])\n\n def __getitem__(self, index):\n data = self.data[index]\n target = self.target[index]\n pf0 = self.transformData(data)\n target = torch.from_numpy(target)\n return pf0, target\n\n def __len__ (self):\n return len(self.target)\n\ntest_data = loader(text_mode = False)\n\ntest_loader = DataLoader(test_data, batch_size = 1, shuffle = False, num_workers = 4, drop_last=False)\n\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n__all__ = [\n 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19_bn', 'vgg19',\n]\n\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',\n 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',\n 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',\n 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',\n}\n\n\n\n\nclass VGG(nn.Module):\n\n def __init__(self, features, num_classes=1000, init_weights=True):\n super(VGG, self).__init__()\n self.features = features\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x, embedding = False):\n #print(\"hello\")\n x = self.features(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n #print(\"hi hi\")\n #assert False\n #print(x.shape) #torch.Size([5, 25088])\n \n #assert False\n if embedding == True:\n return x\n\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers(cfg, batch_norm=False):\n print(\"making layers\")\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfg = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef vgg11(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['A']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))\n return model\n\n\ndef vgg11_bn(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))\n return model\n\n\ndef vgg13(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['B']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))\n return model\n\n\ndef vgg13_bn(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))\n return model\n\n\ndef vgg16(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model\n\n\ndef vgg16_bn(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))\n return model\n\n\nnet = vgg16(pretrained = True) #.to(device)\nprint(net)\nprint(\"this is the stoppage\")\nmoduleList = list(net.features.modules())\n\n\n\nnet.features = nn.Sequential(*moduleList[0][:24])\nfeatures = list(net.children())[:-7] # Remove last layer\nmyLayer = nn.Linear(25088, 227)\n\nfeatures.extend([myLayer]) # Add our layer with 4 outputs\nnet.classifier = nn.Sequential(*features)\n\n\nnewModules = list(net.modules())\n\nprint(net)\nnet.eval()\nnet.load_state_dict(torch.load(\"epoch_99_.001_23LayerNew\"))\n\n\nnet.cuda()\n\n\ncriterion = nn.MSELoss()\n\noptimizer = torch.optim.Adam(myLayer.parameters(), lr =.00001)\nscheduler = ReduceLROnPlateau(optimizer)\n\n\n\n####### Definining A Hook now\nclass Hook():\n def __init__(self, module, backward=False):\n if backward==False:\n self.hook = module.register_forward_hook(self.hook_fn)\n else:\n self.hook = module.register_backward_hook(self.hook_fn)\n def hook_fn(self, module, input, output):\n self.input = input\n self.output = output\n def close(self):\n self.hook.remove()\n\n\n\n\n'''\nif GPU_ID == -1:\n net = nn.DataParallel(net)\n#net.cuda()\n'''\n\n\n\n\ndef test_epoch(model, test_loader):\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0.0\n total = 0.0\n correct_predictions = 0.0\n predictions = []\n targets = []\n #start_time = time.time()\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(test_loader):\n\n data = data.to(device)\n target = target.to(device)\n\n \n outputs = model(data)\n predictions.append(outputs)\n targets.append(target)\n\n\n return predictions, targets\n\n\n\n\n\n\n\n\npredictions, targets = test_epoch(net, test_loader)\n\nprint(predictions[0])\nprint(type(predictions[0]))\nprint(targets[0])\nprint(type(targets[0]))\n\n\nfinalPredictions = []\nfinalTargets = []\n\nfor item in predictions:\n finalPredictions.append(item.cpu().detach().numpy())\n\nfor item in targets:\n finalTargets.append(item.cpu().detach().numpy())\n\n\n\npickle.dump(finalPredictions, open(\"layer23PredictionsAll.p\" , \"wb\"))\npickle.dump(finalTargets, open(\"layer23TargetsAll.p\", \"wb\"))\n\n\nprint(\"done with all of it\")\n","sub_path":"firstRun/vgg16MaxPool4Test.py","file_name":"vgg16MaxPool4Test.py","file_ext":"py","file_size_in_byte":10046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"384246535","text":"# retrieve a list of records with a given data value\nimport bsddb3 as bsddb\nimport datetime\nfrom keysearch import KeySearch\n\n\ndef DataSearch(db_type_option, db = None, db2 = None):\n if not db:\n print (\"\\nNo database exists to search!\\n\")\n return # return to main menu\n \n if (db_type_option == 'indexfile'):\n KeySearch(db2, \"data\")\n return\n \n data = input(\"\\nEnter the data value to search by: \")\n count = 0\n file = open(\"answers.txt\", 'a')\n start_time = datetime.datetime.now() \n for key, value in db.items():\n value = value.decode('UTF-8') \n if data == value:\n #print(\"\\nMatch found: \")\n key = key.decode('UTF-8')\n file.write(key)\n file.write(\"\\n\")\n file.write(value)\n file.write(\"\\n\\n\")\n #print(key)\n count += 1\n end_time = datetime.datetime.now()\n if (count == 0):\n print(\"Record for\", data, \"does not exist\")\n print(count, \"Record(s) found.\")\n execution_time = end_time - start_time\n micro_sec = execution_time.total_seconds()*1000000\n print(\"Total execution time:\", micro_sec, \" microseconds\")\n file.close()\n \n \n\n","sub_path":"datasearch.py","file_name":"datasearch.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"572759270","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\ni = 0\nwhile (cap.isOpened()):\n ret, frame = cap.read()\n if ret == False: break\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if i == 20:\n bgGray = gray\n if i>20:\n dif = cv2.absdiff(gray, bgGray)\n _, th = cv2.threshold(dif, 40, 255, cv2.THRESH_BINARY)\n cnts,_ = cv2.findContours(th,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n \n for c in cnts:\n area = cv2.contourArea(c)\n if area > 7000:\n x,y,w,h = cv2.boundingRect(c)\n cv2.rectangle(frame, (x,y), (x+w, y+h), (255,255,0), 2)\n \n cv2.imshow('Frame',frame)\n i+=1\n if cv2.waitKey(30) & 0xFF == ord('q'):\n break\n\ncap.release()\n","sub_path":"1ejercicios_clases/roi.py","file_name":"roi.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401597914","text":"from .Department import Department\nfrom .functions import *\nimport datetime\nnow = datetime.datetime.now()\nclass Employee(Department):\n def __init__(self):\n Department.__init__(self)\n self.FullName = \"\"\n self.Age = 0\n self.PhoneNum = \"\"\n self.TurnTo = str(now.day)+\"/\"+str(now.month)+\"/\"+str(now.year)\n self.Salary = 0\n self.editlist = {\n \"1\": ['Organization Name', self.Set_OrgName],\n \"2\": ['Department Name', self.Set_DepName],\n \"3\": ['Floor', self.Set_Floor],\n \"4\": ['Info', self.Set_Info],\n \"5\": ['Full Name', self.Set_Name],\n \"6\": ['Age', self.Set_Age],\n \"7\": ['Extension Phone Number', self.Set_Phone],\n \"8\": ['Salary', self.Set_Salary],\n }\n\n def Add_Employee(self):\n Department.AddDepartment(self)\n self.Set_Name()\n self.Set_Age()\n self.Set_Phone()\n self.Set_Salary()\n\n def Set_Name(self):\n self.FullName = input(\"Enter full name\\n\")\n\n def Set_Age(self):\n self.Age = input(\"Enter Age\\n\")\n while (True):\n if is_int(self.Age):\n break\n else:\n self.Age = input(\"Enter Age\\n\")\n\n def Set_Phone(self):\n self.PhoneNum = input(\"Enter extension number\\n\")\n\n def Set_Salary(self):\n self.Salary = input(\"Enter Salary\\n\")\n while (True):\n if is_float(self.Salary):\n break\n else:\n self.Salary = input(\"Enter Salary\\n\")\n\n def Show(self):\n Department.Show(self)\n print(\"\\n5. Full Name: \"+self.FullName+\"\\n6. Age: \"+self.Age+\"\\n7. Extension Number: \"+self.PhoneNum+\"\\n8. Turn-to: \"+self.TurnTo+\"\\n9. Salary: \"+self.Salary)\n\n def EditDep(self):\n for i in self.editlist:\n print(i + ' - ' + self.editlist[i][0])\n while (True):\n buf = input(\"Select a field for editing\\n\")\n if is_int(buf) and 0 < int(buf) <= len(self.editlist):\n self.editlist[buf][1]()\n break\n else:\n print('Error. This field does not exist')\n answer = input('Change Another Field?\\nYes/No')\n if (str.lower(answer) != 'yes'):\n break","sub_path":"st32/Employee.py","file_name":"Employee.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582665753","text":"import csv\n\ndef make_link(G, node1, node2):\n if node1 not in G:\n G[node1] = {}\n if node2 not in G[node1]:\n (G[node1])[node2] = 0\n (G[node1])[node2] += 1\n if node2 not in G:\n G[node2] = {}\n if node1 not in G[node2]:\n (G[node2])[node1] = 0\n (G[node2])[node1] += 1\n return G\n\ndef read_graph(filename):\n tsv = csv.reader(open(filename), delimiter=',')\n G = {}\n characters=set()\n for char, book in tsv: \n make_link(G, char, book)\n characters.add(char)\n return G, characters\n\ndef make_char_graph(marvelG,characters):\n charG = {}\n for char1 in characters:\n for book in marvelG[char1]:\n for char2 in marvelG[book]:\n # to avoid double count\n if char1 < char2:\n make_link(charG, char1, char2)\n #inverse the weight in charG to show shorter distance \n for char1 in charG:\n for char2 in charG[char1]:\n charG[char1][char2] = 1.0 / charG[char1][char2]\n return charG\n\ndef hop_distance (charG,v):\n hop=0\n final_dist = {v:(hop, v, None)}\n open_list=[v]\n while len(open_list) > 0:\n node = open_list.pop(0)\n \n for neighbor in G[node]:\n if neighbor not in final_dist:\n open_list.append(neighbor)\n final_dist[neighbor] = (hop + 1, neighbor, node)\n \n return final_dist\ndef dijkstra(charG,v):\n first_entry = (0, v, None) #(weight,hop)\n heap = [first_entry]\n location = {first_entry:0}\n dist_so_far = {v:first_entry}\n final_dist = {}\n while len(dist_so_far) > 0:\n w = heappopmin(heap, location)\n node = w[1]\n dist = w[0]\n del dist_so_far[node]\n final_dist[node] = w\n for x in charG[node]:\n if x not in final_dist:\n new_dist = charG[node][x]+ final_dist[node]\n new_entry = (new_dist, x, node)\n if x not in dist_so_far:\n dist_so_far[x] = new_entry\n insert_heap(heap, new_entry, location)\n \n elif new_entry < dist_so_far[x]:\n decrease_val(heap, location, dist_so_far[x], new_entry)\n dist_so_far[x] = new_entry\n return final_dist \ndef heappopmin(heap, location):\n val = heap[0]\n new_top = heap.pop()\n location[val] = None\n if len(heap) == 0:\n return val\n location[new_top] = 0\n heap[0] = new_top\n down_heapify(heap, 0, location)\n return val\ndef down_heapify(heap, i, location):\n while True:\n l = left(i)\n r = right(i)\n\n if l >= len(heap): \n break\n\n v = heap[i][0]\n lv = heap[l][0]\n\n if r == len(heap):\n if v > lv:\n swap(heap, i, l, location)\n break\n\n rv = heap[r][0]\n \n if min(lv, rv) >= v: \n break\n \n if lv < rv:\n swap(heap, i, l, location)\n i = l\n else:\n swap(heap, i, r, location)\n i = r\n \ndef left(i): \n return 2*i+1\ndef right(i): \n return 2*i+2\n\ndef swap(heap, old, new, location):\n location[heap[old]] = new\n location[heap[new]] = old\n (heap[old], heap[new]) = (heap[new], heap[old])\n\ndef insert_heap(heap, v, location):\n heap.append(v)\n location[v] = len(heap) - 1\n up_heapify(heap, len(heap) - 1, location)\n \ndef up_heapify(heap, i, location):\n while i > 0: \n p = (i - 1) // 2\n if heap[i][0] < heap[p][0]:\n swap(heap, i, p, location)\n i = p\n else:\n break\ndef decrease_val(heap, location, old_val, new_val):\n i = location[old_val]\n heap[i] = new_val\n location[old_val] = None\n location[new_val] = i\n up_heapify(heap, i, location)\ndef get_parent(pair): return pair[2]\ndef find_path(dist, target):\n node = target\n path = [target]\n while True:\n prev = get_parent(dist[node])\n if prev is None:\n # We've rached our target, so return \n # the path\n return path\n path.append(prev)\n node = prev\nanswers = [] #store a tuple ((char1, char2), (char_path, hop_dist))\n# the characters that the problem asks us to look at\nchars = ['SPIDER-MAN/PETER PAR',\n 'GREEN GOBLIN/NORMAN ',\n 'WOLVERINE/LOGAN ',\n 'PROFESSOR X/CHARLES ', \n 'CAPTAIN AMERICA']\n\nmarvelG, characters = read_graph('edges.csv')\ncharG=make_char_graph(marvelG,characters)\n \nfor char1 in chars:\n # calculate the distance to each other character\n char_dist = dijkstra(charG, char1)\n # and calculate the hops required\n hop_dist = hop_distance(charG, char1)\n\n for char2 in char_dist:\n if char1 == char2:\n continue\n char_path = find_path(char_dist, char2)\n hop_path = find_path(hop_dist, char2)\n # if the weighted path is longer then the hop path, we need\n # to save it\n if len(char_path) > len(hop_path):\n answers.append(((char1, char2), (char_path, hop_path)))\n\n# and now we print out the answer\nprint (len(answers))\n","sub_path":"weighted Marvel graph.py","file_name":"weighted Marvel graph.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516241107","text":"import tensorflow as tf\nimport os,sys\nCURRENT_DIR = os.path.dirname(__file__)\nsys.path.append(os.path.join(CURRENT_DIR, '..'))\nfrom model.SiamRPN import SiamRPN\nfrom utils.train_utils import show_pred_bbox\nfrom utils.tf_bbox_ops_utils import tf_iou\n\nslim = tf.contrib.slim\n\nclass SiamRPN_IOU(SiamRPN):\n def _build_rpn_loss(self):\n \"\"\"\n self.labels: NxNa(Na=32*32*k)\n self.pred_anchors: Nx32x32x4k\n self.pred_prob: Nx32x32x2k\n self.bbox_gts: NxNax4\n \"\"\"\n with tf.name_scope('Loss'):\n valid_mask = tf.stop_gradient(tf.not_equal(self.labels, -1)) # N*Na\n valid_labels = tf.boolean_mask(self.labels, valid_mask) # N*num_of_anchors_per_image(=64)\n valid_labels = tf.reshape(valid_labels, [self.batch_size,-1])\n \n valid_labels_flatten_pos = tf.to_float(tf.reshape(valid_labels, [-1]))\n valid_labels_flatten = tf.stack([valid_labels_flatten_pos, 1.0 - valid_labels_flatten_pos], axis=1) #[-1x2]\n\n valid_pred_probs = tf.boolean_mask(self.pred_probs, valid_mask)\n valid_pred_probs = tf.reshape(valid_pred_probs, [-1, 2])\n \n pos_mask = tf.stop_gradient(tf.equal(self.labels, 1)) # N*Na\n valid_bbox_gts = tf.boolean_mask(self.bbox_gts, pos_mask)\n valid_pred_boxes = tf.boolean_mask(self.pred_boxes, pos_mask)\n \n self.loss_cls = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=valid_labels_flatten, logits=valid_pred_probs))\n self.loss_iou = 1.0 - tf.reduce_mean(tf_iou(tf.reshape(valid_bbox_gts,[-1,4]), tf.reshape(valid_pred_boxes,[-1,4])))\n \n def build_loss(self):\n self._build_rpn_loss()\n with tf.name_scope('Loss'):\n self.batch_loss = self.loss_cls + self.loss_iou\n tf.losses.add_loss(self.batch_loss)\n self.total_loss = tf.losses.get_total_loss()\n mean_total_loss, update_op = tf.metrics.mean(self.total_loss)\n with tf.control_dependencies([update_op]):\n tf.summary.scalar('total_loss', mean_total_loss, family=self.mode)\n\n tf.summary.scalar('batch_loss', self.batch_loss, family=self.mode)\n tf.summary.scalar('loss_cls', self.loss_cls, family=self.mode)\n tf.summary.scalar('loss_iou', self.loss_iou, family=self.mode)\n\n track_instance = tf.py_func(show_pred_bbox,[self.instances, self.topk_bboxes, self.topk_scores, self.gt_instance_boxes],tf.float32)\n tf.summary.image('exemplar', self.examplars, family=self.mode)\n tf.summary.image('instance', track_instance, family=self.mode)","sub_path":"model/SiamRPN_IOU.py","file_name":"SiamRPN_IOU.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"152095590","text":"import http.server\nimport http.cookies\nimport uuid\nimport cgi\nimport urllib\nimport os\nimport raspact.util\nimport raspact.routes\n\n\n# Private class to handle HTTP requests for apps created by a factory.\nclass AppHandler(http.server.BaseHTTPRequestHandler):\n\n # maps session IDs to apps created by self.appFactory()\n sessionStore = {}\n\n # pre-loaded partials\n resources = {\n \"header\": raspact.util.readResource(\"partials/header.html\"),\n \"footer\": raspact.util.readResource(\"partials/footer.html\")\n }\n\n # Constructs a new AppHandler that creates a new App per session using\n # the appFactory factoy method which must create an object of type\n # Application.\n def __init__(self, appFactory, *args):\n self.appFactory = appFactory\n self.routes = raspact.routes.Routes(None) # TODO restore self.app.routes)\n self.newCookie = None\n\n self.routes.route(\"/assets/\", serveAsset, True)\n self.routes.route(\"/logs\", serveLogs, False)\n self.routes.route(\"/logs.html\", serveLogsHtml(False), False)\n self.routes.route(\"/logs.part.html\", serveLogsHtml(True), False)\n\n http.server.BaseHTTPRequestHandler.__init__(self, *args)\n\n def do_POST(self):\n self.restoreSession()\n\n # we can only post to \"/\"\n if self.path != \"/\":\n self.error(405, \"Method not allowed\", \"Can only POST to '/'.\")\n return\n contentType = self.headers[\"Content-Type\"]\n ctype, pdict = cgi.parse_header(self.headers['content-type'])\n if ctype == 'multipart/form-data':\n postvars = cgi.parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers['Content-Length'])\n postvars = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))\n #urllib.parse_qs(self.rfile.read(length), keep_blank_values=1)\n else:\n postvars = {}\n self.send_response(200)\n self.__process(postvars)\n\n def do_GET(self):\n self.restoreSession()\n\n ## Routing\n # Serve the actual app\n if (self.path == \"/\"):\n self.__process(None)\n elif (self.path == \"/reset\"):\n if self.path == \"/reset\":\n self.app.reset()\n self.__process(None)\n else:\n self.routes.fallback = self.app.routes\n self.routes.handleRequest(self.path, self)\n\n\n # Delegate to app to process the request.\n def __process(self, postvars):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.write(AppHandler.resources[\"header\"])\n self.app._Application__serve(self, postvars)\n self.write(AppHandler.resources[\"footer\"])\n\n #######################################################\n # Session handling\n #######################################################\n\n # If a cookie is found, restors the app from the session store. Otherwise,\n # creates a new one and creates a session. Cookie will be written by\n # self.end_headers() if self.newCookie is set.\n def restoreSession(self):\n cookie = self.getCookie()\n if cookie:\n sessionId = cookie[\"sessionid\"].value\n else:\n sessionId = None\n if sessionId:\n if sessionId in self.sessionStore:\n self.app = AppHandler.sessionStore[sessionId]\n self.newCookie = None\n raspact.util.log(\"Restored app for session %(sid)s.\" % {\"sid\": sessionId})\n else:\n raspact.util.log(\"WARNING: We forgot about session %(sid)s. Creating new app.\" % {\"sid\": sessionId})\n self.app = self.appFactory()\n AppHandler.sessionStore[sessionId] = self.app\n else:\n self.app = self.appFactory()\n AppHandler.sessionStore[sessionId] = self.app\n self.newCookie = http.cookies.SimpleCookie()\n self.newCookie[\"sessionid\"] = uuid.uuid1()\n raspact.util.log(\"Created new session %(sid)s and app.\" % {\"sid\": sessionId})\n\n # Reads the cookie from the HTTP headers.\n def getCookie(self):\n oldCookieStr = self.headers[\"Cookie\"]\n if oldCookieStr:\n return http.cookies.SimpleCookie(oldCookieStr)\n else:\n return None\n\n #######################################################\n # I/O helpers\n #######################################################\n\n # Helper method such that we can write strings, encoded as UTF-8.\n def write(self, string):\n self.wfile.write(bytes(string, \"utf-8\"))\n\n # Override to always set pending cookie from self.newCookie if set.\n def end_headers(self):\n if self.newCookie:\n self.send_header('Set-Cookie', self.newCookie.output(header=''))\n super(AppHandler, self).end_headers()\n\n def error(self, code, title, message):\n self.send_response(code)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.writeResource(\"header\")\n self.app.writeTitle(self)\n self.write('
%(title)s
' % {\"title\": title})\n self.write('
%(msg)s
' % {\"msg\": message})\n self.writeResource(\"footer\")\n\n # Writes a named pre-loaded partial\n def writeResource(out, name):\n out.write(AppHandler.resources[name])\n\n#######################################################\n# Static service methods that can be used from anywhere\n#######################################################\n\ndef serveLogsHtml(partial):\n def handle(handler, ignoredPath):\n handler.send_response(200)\n handler.send_header(\"Content-type\", \"text/html\")\n handler.end_headers()\n\n if not partial:\n writeResource(handler, \"header\")\n handler.write('

Logs

')\n handler.app.renderLogs(handler)\n if not partial:\n writeResource(handler, \"footer\")\n return handle\n\ndef serveLogs(handler, ignoredPath):\n handler.send_response(200)\n handler.send_header(\"Content-type\", \"text/plain\")\n handler.end_headers()\n\n for log in handler.app.logs:\n handler.write(log[\"time\"] + \" \" + log[\"name\"] + \"\\n\")\n for line in log[\"records\"]:\n handler.write(\" \" + line + \"\\n\")\n\n\ndef serveAsset(handler, path):\n root = os.path.dirname(raspact.util.__file__)\n assetFile = os.path.join(root, \"resources/assets/\", path)\n if not os.path.isfile(assetFile):\n handler.error(404, \"No such file\", \"The file %(f)s does not exist.\" % {\"f\": raspact.util.escape(handler.path)})\n else:\n raspact.util.sendFile(assetFile, handler)\n","sub_path":"raspact/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622838167","text":"from functools import lru_cache\r\nclass Solution:\r\n def solve(self, s):\r\n if(s==\"\"):\r\n return 0\r\n n = len(s)\r\n dp = [[0]*n for i in range(n)]\r\n for i in range(n):\r\n dp[i][i] = 1\r\n for length in range(1,n):\r\n for i in range(n):\r\n j=i+length\r\n if(j r:\r\n return 0\r\n if s[l] == s[r]:\r\n return rec(l+1, r-1) + 2\r\n else:\r\n return max(rec(l+1, r), rec(l, r-1))\r\n return rec(0, len(s) - 1)","sub_path":"Q1-50/Q26_Longest_Palindromic_subsequence.py","file_name":"Q26_Longest_Palindromic_subsequence.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"261961635","text":"\"\"\" RoughMovementThread.py\n Purpose: Final demo\n Author: Tiago Pimentel\n t.pimentelms@gmail.com\n Joao Felipe\n joaofelipenp@gmail.com\n Matheus Camargo\n matheusfc09@gmail.com\n CSE 467 -- Embedded Computing Systems\n WUSTL, Spring 2013\n Date: Apr., 29, 2013\n\n Description:\n Checks if a rough movement occurred and sends to arduino\n\n\"\"\"\n\nimport httplib\nfrom java.lang import Runnable\nfrom java.lang import Thread as JThread\n\nROUGH_TIME = 10000\n\nclass RoughMovementThread(Runnable):\n \n def __init__(self, arduino):\n self.arduino = arduino\n \n def run(self):\n while True:\n connection = httplib.HTTPConnection(\"sleepingbeauty.herokuapp.com\")\n connection.request(\"GET\", \"/rough_movements/last_time.txt\")\n response = connection.getresponse()\n if response.status == 200:\n self.arduino.send_rough_data(int(response.read()))\n JThread.currentThread().sleep(ROUGH_TIME);\n","sub_path":"RoughMovementThread.py","file_name":"RoughMovementThread.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"502596013","text":"# USAGE\n# python yolo_video.py --input videos/airport.mp4 --output output/airport_output.avi --yolo yolo-coco\n\n# import the necessary packages\n\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\nfrom moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.ttk import Combobox \n\n\nmaster = Tk() \nmaster.geometry('550x300')\nmaster.title(\"Object Detection System\")\n\ndata=(\"apple\",\"aeroplane\",\"backpack\",\"banana\",\"baseball bat\",\"baseball glove\",\"bear\",\"bed\",\"bench\",\"bicycle\",\"bird\",\n\t\"boat\",\"book\",\"bottle\",\"bowl\",\"broccoli\",\"bus\",\"cake\",\"car\",\"carrot\",\"cat\",\"cell phone\",\"chair\",\"clock\",\"cow\",\"cup\",\n\t\"diningtable\",\"dog\",\"donut\",\"elephant\",\"fire hydrant\",\"fork\",\"frisbee\",\"giraffe\",\"hair drier\",\"handbag\",\"horse\",\"hot dog\",\n\t\"keyboard\",\"kite\",\"knife\",\"laptop\",\"microwave\",\"motorbike\",\"mouse\",\"orange\",\"oven\",\"parking meter\",\"person\",\"pizza\",\n\t\"pottedplant\",\"refrigerator\",\"remote\",\"sandwitch\",\"scissors\",\"sheep\",\"sink\",\"skateboard\",\"skis\",\"snowboard\",\"sofa\",\n\t\"spoon\",\"sports ball\",\"stop sign\",\"suitcase\",\"surfboard\",\"teddy bear\",\"tennis racket\",\"tie\",\"toaster\",\"toilet\",\n\t\"toothbrush\",\"traffic light\",\"train\",\"truck\",\"tvmonitor\",\"umbrella\",\"vase\",\"wine glass\",\"zebra\")\ne4 = Combobox(master,values=data)\n\nLabel(master, text='Input video name: ',font=(\"Arial Bold\",16)).grid(row=0) \nLabel(master, text='Output video name: ',font=(\"Arial Bold\",16)).grid(row=1) \nLabel(master, text='Path to YOLO: ',font=(\"Arial Bold\",16)).grid(row=2)\nLabel(master, text='Keyword to search: ',font=(\"Arial Bold\",16)).grid(row=3)\n#lbl = Label(master, text='before')\n#lbl.grid(row=4)\ne1 = Entry(master)\ne2 = Entry(master)\ne3 = Entry(master)\n#e4 = Entry(master) \ne1.grid(row=0, column=1) \ne2.grid(row=1, column=1) \ne3.grid(row=2, column=1)\ne4.grid(row=3, column=1)\ndef clicked():\n\tap = argparse.ArgumentParser()\n\t#ap.add_argument(\"-i\", \"--input\", required=True,\n\t#\thelp=\"path to input video\")\n\t#ap.add_argument(\"-o\", \"--output\", required=True,\n\t#\thelp=\"path to output video\")\n\t#ap.add_argument(\"-y\", \"--yolo\", required=True,\n\t#\thelp=\"base path to YOLO directory\")\n\tap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\t\thelp=\"minimum probability to filter weak detections\")\n\tap.add_argument(\"-t\", \"--threshold\", type=float, default=0.3,\n\t\thelp=\"threshold when applyong non-maxima suppression\")\n\t#ap.add_argument(\"-k\", \"--key\", required=True, type=str, default = 'person',\n\t# help=\"key to search\")\n\targs = vars(ap.parse_args())\n\n\t# load the COCO class labels our YOLO model was trained on\n\tlabelsPath = os.path.sep.join([e3.get(), \"coco.names\"])\n\tLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n\t# initialize a list of colors to represent each possible class label\n\tnp.random.seed(42)\n\tCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\n\t\tdtype=\"uint8\")\n\n\t# derive the paths to the YOLO weights and model configuration\n\tweightsPath = os.path.sep.join([e3.get(), \"yolov3.weights\"])\n\tconfigPath = os.path.sep.join([e3.get(), \"yolov3.cfg\"])\n\n\t# load our YOLO object detector trained on COCO dataset (80 classes)\n\t# and determine only the *output* layer names that we need from YOLO\n\tprint(\"[INFO] loading YOLO from disk...\")\n\tnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n\t# initialize the video stream, pointer to output video file, and\n\t# frame dimensions\n\tvs = cv2.VideoCapture(e1.get())\n\twriter = None\n\t(W, H) = (None, None)\n\tst=[]\n\t# try to determine the total number of frames in the video file\n\ttry:\n\t\tprop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \\\n\t\t\telse cv2.CAP_PROP_FRAME_COUNT\n\t\ttotal = int(vs.get(prop))\n\t\tprint(\"[INFO] {} total frames in video\".format(total))\n\n\t# an error occurred while trying to determine the total\n\t# number of frames in the video file\n\texcept:\n\t\tprint(\"[INFO] could not determine # of frames in video\")\n\t\tprint(\"[INFO] no approx. completion time can be provided\")\n\t\ttotal = -1\n\tindex2 = 1\n\tcap = cv2.VideoCapture(e1.get())\n\tfps = cap.get(cv2.CAP_PROP_FPS)\n\tduration = total/ fps\n\t# loop over frames from the video file stream\n\twhile True:\n\t\t# read the next frame from the file\n\t\t(grabbed, frame) = vs.read()\n\t\t# if the frame was not grabbed, then we have reached the end\n\t\t# of the stream\n\t\tindex2 = index2 + 1\n\t\tif not grabbed:\n\t\t\tbreak\n\n\t\t# if the frame dimensions are empty, grab them\n\t\tif W is None or H is None:\n\t\t\t(H, W) = frame.shape[:2]\n\n\t\t# construct a blob from the input frame and then perform a forward\n\t\t# pass of the YOLO object detector, giving us our bounding boxes\n\t\t# and associated probabilities\n\t\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n\t\t\tswapRB=True, crop=False)\n\t\tnet.setInput(blob)\n\t\tstart = time.time()\n\t\tlayerOutputs = net.forward(ln)\n\t\tend = time.time()\n\n\t\t# initialize our lists of detected bounding boxes, confidences,\n\t\t# and class IDs, respectively\n\t\tboxes = []\n\t\tconfidences = []\n\t\tclassIDs = []\n\n\t\t# loop over each of the layer outputs\n\t\tfor output in layerOutputs:\n\t\t\t# loop over each of the detections\n\t\t\tfor detection in output:\n\t\t\t\t# extract the class ID and confidence (i.e., probability)\n\t\t\t\t# of the current object detection\n\t\t\t\tscores = detection[5:]\n\t\t\t\tclassID = np.argmax(scores)\n\t\t\t\tconfidence = scores[classID]\n\t\t\t\n\n\n\n\t\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t\t# probability is greater than the minimum probability\n\t\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\t\t# scale the bounding box coordinates back relative to\n\t\t\t\t\t# the size of the image, keeping in mind that YOLO\n\t\t\t\t\t# actually returns the center (x, y)-coordinates of\n\t\t\t\t\t# the bounding box followed by the boxes' width and\n\t\t\t\t\t# height\n\t\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t\t# use the center (x, y)-coordinates to derive the top\n\t\t\t\t\t# and and left corner of the bounding box\n\t\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\t\ty = int(centerY - (height / 2))\n\t\t\n\t\t\t\t\t# update our list of bounding box coordinates,\n\t\t\t\t\t# confidences, and class IDs\n\t\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\t\tclassIDs.append(classID)\n\n\n\n\n\t \n\t\t# apply non-maxima suppression to suppress weak, overlapping\n\t\t# bounding boxes\n\t\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, args[\"confidence\"],\n\t\t\targs[\"threshold\"])\n\t\t\n\n\t\t# ensure at least one detection exists\n\t\tif len(idxs) > 0:\n\t\t\t# loop over the indexes we are keeping\n\t\t\tfor i in idxs.flatten():\n\t\t\t\tif LABELS[classIDs[i]] == e4.get():\n\t\t\t\t\t# extract the bounding box coordinates\n\t\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\t\t\t# draw a bounding box rectangle and label on the frame\n\t\t\t\t\tcolor = [int(c) for c in COLORS[classIDs[i]]]\n\t\t\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n\t\t\t\t\ttext = \"{}: {:.4f}\".format(LABELS[classIDs[i]],\n\t\t\t\t\t\tconfidences[i])\n\t\t\t\t\tcv2.putText(frame, text, (x, y - 5),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\t\t\t\t\tif index2 not in st:\n\t\t\t\t\t\tst.append(index2)\n\n\t\t# check if the video writer is None\n\t\tif writer is None:\n\t\t\t# initialize our video writer\n\t\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\t\twriter = cv2.VideoWriter(e2.get(), fourcc, 30,\n\t\t\t\t(frame.shape[1], frame.shape[0]), True)\n\n\t\t\t# some information on processing single frame\n\t\t\tif total > 0:\n\t\t\t\telap = (end - start)\n\t\t\t\tprint(\"[INFO] single frame took {:.4f} seconds\".format(elap))\n\t\t\t\tprint(\"[INFO] estimated total time to finish: {:.4f}\".format(\n\t\t\t\t\telap * total))\n\n\t\t# write the output frame to disk\n\t\twriter.write(frame)\n\tst2=[]\n\tst3=[]\n\tfor index2 in st:\n\t\tst2.append(int(index2*(duration/ total)))\n\tfor k in st2:\n\t\tif k not in st3:\n\t\t\tst3.append(k)\n\tfo = open(\"cut.txt\", \"w\")\n\tfo.write(e2.get()+'\\n')\n\tfor item in st3:\n\t\tfo.write(str(item)+' ')\n\tfo.close\n\n\tmessagebox.showinfo('Results',\"The item you want is at(s):\\n {}\".format(st3))\n\tprint(\"[INFO] cleaning up...\")\n\twriter.release()\n\tvs.release()\n #res = 'after:' + e1.get()\n #lbl.config(text = res)\nbtn = Button(master, text=\"Go\", bg=\"red\",font=(\"Arial Bold\",20), command=clicked)\n \nbtn.grid(row=5, column=2)\nmainloop()\n\nfile=open(\"cut.txt\", \"r\")\nname=file.readline() \nname=name[:-1] \nst6=file.readline()\nst4=st6.split(' ')\ndel st4[-1]\nst5=list(map(int,st4))\n\n\nz=0\nstart=[]\nend=[]\nstart.append(st5[0])\nlength_st5=len(st5)\nwhile z0: #若为5点型环且非首环\r\n if patient_list[i1].period_list[0].stent_list[i3].huan_list[i4-1].number_points==5: #若前环同样为五点型环\r\n new_points_list=[] #创建赋值总表\r\n point1=patient_list[i1].period_list[0].stent_list[i3].huan_list[i4-1].point_mid\r\n point2=patient_list[i1].period_list[0].stent_list[i3].huan_list[i4].point_mid\r\n for i5 in range(0,5):\r\n new_points_list_temp=[] #创建新一轮比较列表\r\n for i6 in range(0,5):\r\n new_points_list_temp.append(dot_cos(patient_list[i1].period_list[0].stent_list[i3].huan_list[i4-1].point_list[i5],\r\n patient_list[i1].period_list[0].stent_list[i3].huan_list[i4].point_list[i6],\r\n point1,\r\n point2))\r\n r=new_points_list_temp.index(max(new_points_list_temp))\r\n new_points_list.append(patient_list[i1].period_list[0].stent_list[i3].huan_list[i4].point_list[r])\r\n #循环5次后new_points_list存储五点对应信息 下面进行数据调换\r\n for i5 in range(0,5):\r\n patient_list[i1].period_list[0].stent_list[i3].huan_list[i4].point_list[i5]=new_points_list[i5]\r\n\r\n #同病人非第一时期时期 对应点关系构建\r\n for i1 in range(0,len(patient_list)): #遍历各病人\r\n for i2 in range(1,patient_list[i1].period_length): #遍历该病人各运算时期\r\n for i3 in range(0,len(patient_list[i1].period_list[i2].stent_list)):\r\n for i4 in range(0,len(patient_list[i1].period_list[i2].stent_list[i3].huan_list)):\r\n if patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].number_points==5: #若为5点型环\r\n new_points_list=[] #创建赋值总表\r\n top12_1=patient_list[i1].period_list[i2-1].stent_list[i3].huan_list[i4].point_mid\r\n top12_2=patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_mid\r\n for i5 in range(0,5): \r\n new_points_list_temp=[] #创建新一轮比较列表\r\n for i6 in range(0,5):\r\n new_points_list_temp.append(dot_cos(patient_list[i1].period_list[i2-1].stent_list[i3].huan_list[i4].point_list[i5],top12_1,\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_list[i6] ,top12_2))\r\n r=new_points_list_temp.index(max(new_points_list_temp))\r\n new_points_list.append(patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_list[r])\r\n #循环5次后new_points_list存储五点对应信息 下面进行数据调换\r\n for i5 in range(0,5):\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_list[i5]=new_points_list[i5]\r\n\r\n #患者重叠部分支架的运算:\r\n #自下向上检索近端支架 将低于远端支架的最高点的环标* \r\n #自上向下检索远端支架 将高于近端支架的最低点的环标* \r\n for i1 in range(0,len(patient_list)): #遍历各病人\r\n if len(patient_list[i1].period_list[0].stent_list)==2 and patient_list[i1].period_list[0].stent_list[0].stent_type==1 and patient_list[i1].period_list[0].stent_list[1].stent_type==2 :\r\n #若该患者是标准限制型支架 \r\n for i2 in range(0,patient_list[i1].period_length): #遍历该病人各运算时期\r\n z_least=patient_list[i1].period_list[i2].stent_list[0].huan_list[-1].point_mid.z #找到近端支架的末端高度\r\n z_top=patient_list[i1].period_list[i2].stent_list[1].huan_list[0].point_mid.z #找到远端支架的顶端高度\r\n for i3 in range(len(patient_list[i1].period_list[i2].stent_list[0].huan_list)-1,-1,-1): #遍历近端支架\r\n if patient_list[i1].period_list[i2].stent_list[0].huan_list[i3].point_mid.z<=z_top:\r\n patient_list[i1].period_list[i2].stent_list[0].huan_list[i3].flag=\"*\"\r\n continue\r\n else:\r\n break\r\n for i3 in range(0,len(patient_list[i1].period_list[i2].stent_list[1].huan_list)): #遍历支架末端环\r\n if patient_list[i1].period_list[i2].stent_list[1].huan_list[i3].point_mid.z>=z_least:\r\n patient_list[i1].period_list[i2].stent_list[1].huan_list[i3].flag=\"*\"\r\n continue\r\n else:\r\n break\r\n\r\n#数据集统计展示代码\r\ndef show_database():\r\n #录入数据的全集打印\r\n for i in range(0,len(patient_list)):\r\n patient_list[i].show()\r\n print()\r\n\r\n#数据3D展示代码\r\ndef show_3D():\r\n for i1 in range(0,len(patient_list)): #遍历各病人\r\n for i2 in range(0,len(patient_list[i1].period_list)): #遍历该病人各时期\r\n patient_list[i1].period_list[i2].show_3D()\r\n\r\n#remake_database用于重新计算各项数据\r\ndef remake_database():\r\n remake_zhou_angle() #重新计算轴向偏角\r\n\r\n#支架轴向偏角计算 【同一支架 三节点间构成角度】\r\ndef remake_zhou_angle():\r\n for i1 in range(0,len(patient_list)): #遍历各病人\r\n for i2 in range(0,patient_list[i1].period_length): #遍历该病人各时期\r\n for i3 in range(0,len(patient_list[i1].period_list[i2].stent_list)): #遍历病人支架\r\n patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_list.clear() #重新计算前将数据清零\r\n if i3==0:\r\n all_angle=0 \r\n for i4 in range(1,len(patient_list[i1].period_list[i2].stent_list[i3].huan_list)-1): #遍历病人支架环(去除首尾两点仅对中间角度进行运算)\r\n angle_temp=round(angle(\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4-1].point_mid,\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_mid,\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4+1].point_mid,\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_mid\r\n ),2)\r\n all_angle=all_angle+angle_temp\r\n patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_list.append(angle_temp)\r\n patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_all=all_angle\r\n else:\r\n all_angle=0 \r\n for i4 in range(1,len(patient_list[i1].period_list[i2].stent_list[i3].huan_list)-1): #遍历病人支架环(去除首尾两点仅对中间角度进行运算)\r\n angle_temp=round(angle(\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4-1].point_mid,\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_mid,\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4+1].point_mid,\r\n patient_list[i1].period_list[i2].stent_list[i3].huan_list[i4].point_mid\r\n ),2)\r\n all_angle=all_angle+angle_temp\r\n patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_list.append(angle_temp)\r\n patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_all=all_angle\r\n tian=HowLong(patient_list[i1].period_list[0].data,patient_list[i1].period_list[patient_list[i1].period_length-1].data) #记录病例首尾时间\r\n return \r\n\r\n#输出支架夹角数据\r\ndef write_zhou_angle():\r\n remake_zhou_angle() #重新计算支架单体轴向夹角\r\n #打开 重写show_database文件\r\n write_workbook = xlsxwriter.Workbook(route+\"\\\\zhou_angle.xlsx\")\r\n write_worksheet = write_workbook.add_worksheet('Sheet1')\r\n number=0\r\n for i1 in range(0,len(patient_list)): #遍历各病人\r\n write_worksheet.write(number,0,str(patient_list[i1].name))#写入病人姓名\r\n pianyi=0\r\n for i2 in range(0,patient_list[i1].period_length): #遍历该病人各时期\r\n write_worksheet.write(number+2,i2,patient_list[i1].period_list[i2].data)#写入病人时期\r\n for i3 in range(0,len(patient_list[i1].period_list[i2].stent_list)): #遍历病人支架\r\n l1=len(patient_list[i1].period_list[i2].stent_list[0].huan_list)\r\n pianyi=l1\r\n if i3==0:\r\n write_worksheet.write(number+1,i2,\"近端支架\")#写入支架类型\r\n write_worksheet.write(number+1+l1,i2,\"和值\")#写入和值\r\n all_angle=0 \r\n for i4 in range(0,len(patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_list)): #遍历病人支架环(去除首尾两点仅对中间角度进行运算)\r\n angle_temp=patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_list[i4]\r\n all_angle=all_angle+angle_temp\r\n write_worksheet.write(number+3+i4,i2,round(angle_temp,2))\r\n write_worksheet.write(number+2+l1,i2,round(all_angle,2))\r\n else:\r\n l2=len(patient_list[i1].period_list[i2].stent_list[1].huan_list)\r\n pianyi=pianyi+l2\r\n write_worksheet.write(number+3+l1,i2,\"远端支架\")#写入支架类型\r\n write_worksheet.write(number+4+l1,i2,patient_list[i1].period_list[i2].data)#写入病人时期\r\n write_worksheet.write(number+3+l1+l2,i2,\"和值\")#写入和值\r\n all_angle=0 \r\n for i4 in range(0,len(patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_list)): #遍历病人支架环(去除首尾两点仅对中间角度进行运算)\r\n angle_temp=patient_list[i1].period_list[i2].stent_list[i3].zhou_angle_list[i4]\r\n all_angle=all_angle+angle_temp\r\n write_worksheet.write(number+5+l1+i4,i2,round(angle_temp,2))\r\n write_worksheet.write(number+4+l1+l2,i2,round(all_angle,2))\r\n number=number+pianyi+6\r\n write_workbook.close()\r\n return \r\n\r\n#支架夹角数据占比分析 #输出支架夹角数据的统计量\r\ndef write_statistics_zhou_angle():\r\n #打开 重写show_statistics_zhou_angle文件\r\n write_workbook = xlsxwriter.Workbook(route+\"\\\\show_statistics_zhou_angle.xlsx\")\r\n write_worksheet = write_workbook.add_worksheet('Sheet1')\r\n number=0 #偏移量记录\r\n\r\n write_worksheet.write(number,0,\"非限制型支架\")\r\n write_worksheet.write(number,1,\"日期间距\")\r\n write_worksheet.write(number,2,\"近端支架和值平均变化速率\")\r\n write_worksheet.write(number,3,\"限制型支架\")\r\n write_worksheet.write(number,4,\"日期间距\")\r\n write_worksheet.write(number,5,\"近端支架和值平均变化速率\")\r\n write_worksheet.write(number,6,\"远端支架和值平均变化速率\")\r\n\r\n number=1\r\n \r\n number_a=0 #记录非限制型\r\n number_b=0 #记录限制型\r\n for i1 in range(0,len(patient_list)): #遍历各病人\r\n if patient_list[i1].xianzhixing==False: #若为非限制性支架\r\n tian=HowLong(patient_list[i1].period_list[0].data,patient_list[i1].period_list[patient_list[i1].period_length-1].data) #记录病例首尾时间\r\n name=patient_list[i1].name\r\n delta_zhou_angle=patient_list[i1].period_list[patient_list[i1].period_length-1].stent_list[0].zhou_angle_all - patient_list[i1].period_list[0].stent_list[0].zhou_angle_all\r\n v_change=delta_zhou_angle/tian\r\n write_worksheet.write(number+number_a,0,str(name))\r\n write_worksheet.write(number+number_a,1,tian)\r\n write_worksheet.write(number+number_a,2,v_change)\r\n number_a=number_a+1\r\n else: #若为限制型支架\r\n tian=HowLong(patient_list[i1].period_list[0].data,patient_list[i1].period_list[patient_list[i1].period_length-1].data) #记录病例首尾时间\r\n name=patient_list[i1].name\r\n delta_zhou_angle1=patient_list[i1].period_list[patient_list[i1].period_length-1].stent_list[0].zhou_angle_all - patient_list[i1].period_list[0].stent_list[0].zhou_angle_all\r\n delta_zhou_angle2=patient_list[i1].period_list[patient_list[i1].period_length-1].stent_list[1].zhou_angle_all - patient_list[i1].period_list[0].stent_list[1].zhou_angle_all\r\n v_change1=delta_zhou_angle1/tian\r\n v_change2=delta_zhou_angle2/tian\r\n write_worksheet.write(number+number_b,3,str(name))\r\n write_worksheet.write(number+number_b,4,tian)\r\n write_worksheet.write(number+number_b,5,v_change1)\r\n write_worksheet.write(number+number_b,6,v_change2)\r\n number_b=number_b+1\r\n \r\n\r\n write_workbook.close()\r\n return \r\n\r\n#支架环模拟半径分析 #输出支架首尾环形状数据 使用六边形圈图进行绘图\r\ndef write_huan_shape():\r\n #打开 重写huan_shape文件\r\n write_workbook = xlsxwriter.Workbook(route+\"\\\\huan_shape.xlsx\")\r\n write_worksheet = write_workbook.add_worksheet('Sheet1')\r\n number=0 \r\n number_temp=0\r\n for i1 in range(0,len(patient_list)): #遍历各病人\r\n write_worksheet.write(number,0,str(patient_list[i1].name))#写入病人姓名\r\n pianyi=0\r\n for i2 in range(0,patient_list[i1].period_length): #遍历该病人各时期\r\n number_temp=number\r\n pianyi=0\r\n for i3 in range(0,len(patient_list[i1].period_list[i2].stent_list)):\r\n F_huan=patient_list[i1].period_list[i2].stent_list[i3].huan_list[0] #首环\r\n L_huan=patient_list[i1].period_list[i2].stent_list[i3].huan_list[-1] #尾环\r\n pianyi=pianyi+F_huan.number_points+L_huan.number_points+6\r\n #首环打印\r\n str_temp=ALL_stent_type[int(patient_list[i1].period_list[i2].stent_list[i3].stent_type)]+\"首环\"\r\n write_worksheet.write(number_temp+1,i2,str_temp)#写入支架环类型\r\n write_worksheet.write(number_temp+2,i2,patient_list[i1].period_list[i2].data)#写入病人时期\r\n for i4 in range(0,F_huan.number_points):\r\n write_worksheet.write(number_temp+3+i4,i2,F_huan.r_list[i4])\r\n number_temp=number_temp+2+F_huan.number_points\r\n #尾环打印\r\n str_temp=ALL_stent_type[int(patient_list[i1].period_list[i2].stent_list[i3].stent_type)]+\"尾环\"\r\n write_worksheet.write(number_temp+1,i2,str_temp)#写入支架环类型\r\n write_worksheet.write(number_temp+2,i2,patient_list[i1].period_list[i2].data)#写入病人时期\r\n for i4 in range(0,L_huan.number_points):\r\n write_worksheet.write(number_temp+3+i4,i2,L_huan.r_list[i4])\r\n number_temp=number_temp+2+L_huan.number_points\r\n number=number+pianyi #number移位\r\n\r\n write_workbook.close()\r\n return \r\n\r\n\r\n#内存数据初始化部分:\r\nread(route+\"\\\\database.xlsx\") #读取自身database \r\narrangement() #进行整理\r\n\r\n#本地数据库初始化部分:\r\n#db = pymysql.connect(host=\"localhost\",user=\"root\",password=\"baimatengqq2\",database=\"database_mysql\") # 打开数据库链接\r\n#cursor = db.cursor() # 使用 cursor() 方法创建一个游标对象 cursor\r\n\r\n#【网络交互函数】\r\n\r\n#本地信息展示函数\r\ndef show_all_inf():\r\n print(\"the range is \",len(inf_list))\r\n for i in range(0,len(inf_list)):\r\n print(\"IP:\",inf_list[i].ip,\"socket:\",inf_list[i].sock,\"ID\",inf_list[i].id)\r\n return \r\n\r\n#暂存展示函数\r\ndef show_all_inf_temp():\r\n print(\"the range is \",len(inf_temp_list))\r\n for i in range(0,len(inf_temp_list)):\r\n print(\"IP:\",inf_temp_list[i].ip,\"socket:\",inf_temp_list[i].sock,\"ID\",inf_temp_list[i].id)\r\n return \r\n\r\n#接收线程\r\ndef Sever(): \r\n s = socket(AF_INET, SOCK_STREAM)\r\n s.bind((inf_self.ip,inf_self.sock)) \r\n s.listen(5) \r\n\r\n while True: \r\n #信息标识码 1 : 添加LAN身份请求\r\n #信息标识码 2 : 组网传输请求\r\n #信息标识码 3 : database数据共享请求\r\n #print(\"waitting for a new connection\")\r\n conn, addr = s.accept() #等待链接 阻塞本线程\r\n #print(\"Accept new connection from %s:%s\" % addr) \r\n sentence=conn.recv(1024).decode() #信息标识码读取\r\n temp_list=sentence.split()\r\n if temp_list[0]==\"1\": #信息标识码 1 : 添加LAN身份请求\r\n #print(\"添加LAN身份请求\")\r\n inf_temp=inf(temp_list[1],temp_list[2],temp_list[3]) #创建信息实例\r\n inf_list.append(inf_temp) #追加该身份信息\r\n show_all_inf()\r\n #print(\"更新完成\")\r\n elif temp_list[0]=='2': #信息标识码 2 : 组网传输请求\r\n #print(\"组网传输请求\")\r\n inf_temp_list.clear() #清空原有temp_ip表\r\n for i in range(0,int((len(temp_list)-1)/3)):\r\n inf_temp=inf(temp_list[3*i+1],temp_list[3*i+2],temp_list[3*i+3]) #创建信息实例\r\n inf_temp_list.append(inf_temp) #追加该身份信息\r\n show_all_inf_temp()\r\n #print(\"更新完成\")\r\n elif temp_list[0]=='3': #信息标识码 3 : database数据共享请求\r\n #print(\"数据共享请求\")\r\n filesize = str(os.path.getsize(route+\"\\\\database.xlsx\")) #获取本地database文件大小\r\n conn.send(filesize.encode()) #传输本地database文件大小\r\n f = open(route+\"\\\\database.xlsx\",'rb') #打开本地database文件\r\n for line in f: #传输本地database文件\r\n conn.send(line)\r\n f.close() #进行文件关闭\r\n elif temp_list[0]=='4': #信息标识码 4 : 洪泛搜索请求\r\n patient_id=temp_list[-1] #取出目标病人id\r\n org_ip=temp_list[-3] #取出原始主机的ip\r\n org_socket=int(temp_list[-2]) #取出原始主机的端口号\r\n have_patient_id=False #查询本地病人信息id标识\r\n TTL=int(temp_list[1]) #取出TTL值\r\n inf_ed_list=[] #取出已访问inf列表\r\n for i in range(0,int((len(temp_list)-3)/2)):\r\n inf_temp=inf(temp_list[2+2*i],temp_list[2+2*i+1],\"unknown\")\r\n inf_ed_list.append(inf_temp)\r\n patient_temp='' #暂存病人数据 \r\n for i in range(0,len(patient_list)):\r\n if patient_list[i].ID == patient_id: #若发现所需id\r\n patient_temp=patient_list[i] #病例赋值\r\n have_patient_id=True #更改标识\r\n break\r\n if have_patient_id==True: #尝试写入temp.xlsx并传回数据 之后删除本地temp文件\r\n #组建请求报文\r\n data0=\"5\" #信息标识码 5 : 洪泛返回链接\r\n data0=data0+\" \"+inf_self.ip+\" \"+str(inf_self.sock)+\" \"+inf_self.id+\" \"+patient_id #data0组建\r\n target_ip=org_ip #获取目标ip\r\n target_socket=org_socket #获取目标端口号\r\n s_temp = socket(AF_INET, SOCK_STREAM)\r\n flag=s_temp.connect_ex((target_ip, int(target_socket))) #发起TCP链接\r\n jishu=0\r\n name=\"flooding_temp.xlsx\" #本地中间传输文件命名\r\n patient_temp.write(route,name) #向本地文件夹写入中间传输文件\r\n filesize = str(os.path.getsize(route+\"\\\\\"+name)) #获取本地中间传输文件大小\r\n while True:\r\n jishu=jishu+1\r\n if flag==0:\r\n s_temp.send(data0.encode()) #传输指令标识信息\r\n s_temp.send(filesize.encode()) #传输中间传输文件大小\r\n f = open(route+\"\\\\\"+name,'rb') #打开中间传输文件\r\n for line in f: #传输本地database文件\r\n s_temp.send(line)\r\n f.close() #关闭文件\r\n s_temp.close() #发送端成功传输信息,关闭TCP链接\r\n os.remove(route+\"\\\\\"+name) #删除中间文件\r\n break\r\n else:\r\n if jishu>3:\r\n break\r\n flag=s_temp.connect_ex((target_ip, int(target_socket))) #重新发起TCP链接\r\n else: #判断TTL-1情况 检索发送序列 增添自己标识后将其发送\r\n TTL=TTL-1\r\n if TTL>0: #TTL仍然存活,检索发送序列进行发送\r\n data0=\"4\" #信息标识码 4 : 洪泛请求连接\r\n data0=data0+\" \"+str(TTL)+\" \"+inf_self.ip+\" \"+str(inf_self.id) #data0加入自身信息\r\n for i in range(2,len(temp_list)):\r\n data0=data0+\" \"+temp_list[i] #data0加入原始信息\r\n for i in range(0,len(inf_flooding_list)): #该节点遍历洪泛peer列表\r\n target_ip=inf_flooding_list[i].ip #获取目标ip\r\n target_socket=inf_flooding_list[i].sock #获取目标端口号\r\n same_flag=False #重复性检查标志位\r\n for i1 in range(0,len(inf_ed_list)): #遍历已经过节点列表进行重复性检测\r\n if target_ip==inf_ed_list[i1].ip and target_socket==inf_ed_list[i1].sock:\r\n same_flag==True #该节点已经历过查找\r\n break\r\n if same_flag==False: #若该节点从未经历过查找\r\n s_temp = socket(AF_INET, SOCK_STREAM)\r\n flag=s_temp.connect_ex((target_ip, int(target_socket))) #发起TCP链接\r\n jishu=0\r\n while True:\r\n jishu=jishu+1\r\n if flag==0:\r\n s_temp.send(data0.encode()) #传输指令标识信息\r\n s_temp.close() #发送端成功传输信息,关闭TCP链接\r\n break\r\n else:\r\n if jishu>3:\r\n break\r\n flag=s.connect_ex((target_ip, int(target_socket))) #重新发起TCP链接\r\n elif temp_list[0]=='5': #信息标识码 5 : 洪泛文件传回请求\r\n from_ip=temp_list[-4] #来源ip储存\r\n from_socket=temp_list[-3] #来源sock储存\r\n from_id=temp_list[-2] #来源id储存\r\n patient_id=temp_list[-1] #传回patient_id储存\r\n data_range = conn.recv(1024) #首次获取传输文件长度信息\r\n file_total_size = int(data_range.decode()) #获取传输文件长度\r\n received_size = 0 #记录已获取长度\r\n name=\"flooding_temp.xlsx\" #中间传输文件名\r\n f = open(route+\"\\\\\"+name, 'wb') #打开临时储存文件\r\n while received_size < file_total_size: \r\n data = conn.recv(1024)\r\n f.write(data)\r\n received_size += len(data) \r\n f.close() #关闭文件\r\n read(route+\"\\\\\"+name) #进行文件读取 和 校验\r\n os.remove(route+\"\\\\\"+name) #删除临时文件\r\n print(\"\")\r\n print(\"recieve:\",patient_id,\"from\",from_ip,from_socket,from_id) #回执信息打印\r\n\r\n#分布式网络共享模块\r\nthread_sever=threading.Thread(target=Sever, args=())\r\nthread_sever.start()\r\n\r\n\r\n#数据库模块\r\n\r\n# 构建delete_db函数 实现病人信息的级联删除\r\ndef delete_db(ID):\r\n sql_1=\"delete from patient where patient.ID=\"+ID\r\n try:\r\n # 执行sql语句\r\n cursor.execute(sql_1)\r\n # 提交到数据库执行\r\n db.commit()\r\n except:\r\n print(\"ERROR in\",sql_1)\r\n db.rollback\r\n\r\n# 构建delete_db_all函数 实现数据库清空\r\ndef delete_db_all():\r\n sql_1=\"delete from patient\"\r\n try:\r\n # 执行sql语句\r\n cursor.execute(sql_1)\r\n # 提交到数据库执行\r\n db.commit()\r\n except:\r\n print(\"ERROR in\",sql_1)\r\n db.rollback\r\n\r\n# 构建ini_db函数 使用本地内存数据 初始化数据库信息\r\ndef ini_db():\r\n delete_db_all() #级联删除数据库所有信息\r\n for i1 in range(0,len(patient_list)): #遍历病人列表\r\n patient_temp=patient_list[i1] #取出病人实例\r\n name=patient_temp.name #姓名赋值\r\n sex=patient_temp.sex #性别赋值\r\n ID=patient_temp.ID #ID赋值\r\n calculation=patient_temp.calculation #可计算性赋值\r\n xianzhixing=patient_temp.xianzhixing #限制型判别赋值\r\n sql_1=\"insert into patient values(\\\"\"+ID+\"\\\",\\\"\"+sex+\"\\\",\\\"\"+name+\"\\\",\"+str(calculation)+\",\"+str(xianzhixing)+\")\"\r\n try:\r\n # 执行sql语句\r\n cursor.execute(sql_1)\r\n # 提交到数据库执行\r\n db.commit()\r\n except:\r\n print(\"ERROR in:\",name,ID,sql_1)\r\n db.rollback\r\n continue\r\n \r\n for i2 in range(0,len(patient_temp.period_list)):\r\n period_temp=patient_temp.period_list[i2] #取特定时期\r\n period_ID=ID+\".\"+str(i2) #构造时期ID\r\n data=period_temp.data #取data\r\n top12_x=period_temp.top12_x #取top12_x坐标\r\n top12_y=period_temp.top12_y #取top12_y坐标\r\n top12_z=period_temp.top12_z #取top12_z坐标\r\n sql_2_1=\"insert into patient_period values(\\\"\"+ID+\"\\\",\\\"\"+period_ID+\"\\\"\"+\")\"\r\n sql_2_2=\"insert into period values(\\\"\"+period_ID+\"\\\",\\\"\"+data+\"\\\",\"+str(top12_x)+\",\"+str(top12_y)+\",\"+str(top12_z)+\")\"\r\n try:\r\n cursor.execute(sql_2_1)\r\n db.commit()\r\n except:\r\n print(\"ERROR in:\",sql_2_1)\r\n db.rollback\r\n continue\r\n try:\r\n cursor.execute(sql_2_2)\r\n db.commit()\r\n except:\r\n print(\"ERROR in:\",sql_2_2)\r\n db.rollback\r\n continue\r\n \r\n for i3 in range(0,len(period_temp.stent_list)):\r\n stent_temp=period_temp.stent_list[i3]\r\n stent_ID=period_ID+\".\"+str(i3)\r\n stent_type=stent_temp.stent_type\r\n stent_shape=stent_temp.stent_shape\r\n sql_3_1=\"insert into period_stent values(\\\"\"+period_ID+\"\\\",\\\"\"+stent_ID+\"\\\"\"+\")\"\r\n sql_3_2=\"insert into stent values(\\\"\"+stent_ID+\"\\\",\"+str(stent_type)+\",\"+str(stent_shape)+\")\"\r\n try:\r\n cursor.execute(sql_3_1)\r\n db.commit()\r\n except:\r\n print(\"ERROR in:\",sql_3_1)\r\n db.rollback\r\n continue\r\n try:\r\n cursor.execute(sql_3_2)\r\n db.commit()\r\n except:\r\n print(\"ERROR in:\",sql_3_2)\r\n db.rollback\r\n continue\r\n \r\n for i4 in range(0,len(stent_temp.huan_list)):\r\n huan_temp=stent_temp.huan_list[i4]\r\n huan_ID=stent_ID+\".\"+str(i4)\r\n flag=huan_temp.flag\r\n point_mid_x=huan_temp.point_mid.x\r\n point_mid_y=huan_temp.point_mid.y\r\n point_mid_z=huan_temp.point_mid.z\r\n sql_4_1=\"insert into stent_huan values(\\\"\"+stent_ID+\"\\\",\\\"\"+huan_ID+\"\\\"\"+\")\"\r\n sql_4_2=\"insert into huan values(\\\"\"+huan_ID+\"\\\",\\\"\"+flag+\"\\\",\"+str(point_mid_x)+\",\"+str(point_mid_y)+\",\"+str(point_mid_z)+\")\"\r\n try:\r\n cursor.execute(sql_4_1)\r\n db.commit()\r\n except:\r\n print(\"ERROR in:\",sql_4_1)\r\n db.rollback\r\n continue\r\n try:\r\n cursor.execute(sql_4_2)\r\n db.commit()\r\n except:\r\n print(\"ERROR in:\",sql_4_2)\r\n db.rollback\r\n continue\r\n\r\n# 提供更改任意ID的calculation标识方法\r\ndef change_patient_calculation_db(ID,calculation_temp):\r\n sql_1=\"UPDATE patient SET calculation=\"+str(calculation_temp)+\" where ID=\"+ID\r\n try:\r\n cursor.execute(sql_1)\r\n db.commit()\r\n except:\r\n print(\"ERROR in\",sql_1)\r\n db.rollback\r\n\r\n# 提供更改任意ID的xianzhixing标识方法\r\ndef change_patient_xianzhixing_db(ID,xianzhixing_temp):\r\n sql_1=\"UPDATE patient SET xianzhixing=\"+str(xianzhixing_temp)+\" where ID=\"+ID\r\n try:\r\n cursor.execute(sql_1)\r\n db.commit()\r\n except:\r\n print(\"ERROR in\",sql_1)\r\n db.rollback\r\n\r\n# 查询函数 提供直接查询方法\r\ndef select_SQL(str_sql):\r\n try:\r\n cursor.execute(str_sql)\r\n data = cursor.fetchall()\r\n print(data)\r\n db.commit()\r\n except:\r\n print(\"ERROR in\",str_sql)\r\n db.rollback\r\n\r\n#查询函数 提供病人信息查询方法\r\ndef select_patient_db(ID):\r\n sql_1=\"select * from patient where ID=\"+str(ID)\r\n try:\r\n cursor.execute(sql_1)\r\n data = cursor.fetchall()\r\n print(data)\r\n db.commit()\r\n except:\r\n print(\"ERROR in\",sql_1)\r\n db.rollback\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\n# 系统登录部分\r\nhost_temp=input(\"please input the name:\")\r\npassword_temp=input(\"please input the password:\")\r\nflag_temp=1\r\nwhile True:\r\n sql_1=\"select * from user where name=\"+\"\\\"\"+host_temp+\"\\\"\"+\" and password=\"+\"\\\"\"+password_temp+\"\\\"\"\r\n cursor.execute(sql_1)\r\n data = cursor.fetchall()\r\n if len(data)==0:\r\n print(\"password or name ERROR!!!\")\r\n host_temp=input(\"please input the host:\")\r\n password_temp=input(\"please input the password:\")\r\n else:\r\n print(\"welcome back sir\")\r\n jurisdiction.append(data[0][2])\r\n jurisdiction.append(data[0][3])\r\n jurisdiction.append(data[0][4])\r\n jurisdiction.append(data[0][5])\r\n jurisdiction.append(data[0][6])\r\n break\r\n'''\r\n\r\n#发送线程 [同时负责程序控制]\r\n\r\nwhile True:\r\n # 发送端设置:\r\n # show_database 显示本地database\r\n # show_3D 显示本地3D文件\r\n # write_zhou_angle 写出支架轴向夹角文件 zhou_angle\r\n # write_statistics_zhou_angle 写出支架轴向夹角文件 statistics_zhou_angle\r\n # write_huan_shape 写出支架首尾环文件 huan_shape\r\n # -1 - 查看本地ip表 ip-temp表\r\n # 0 - 发送添加身份请求\r\n # 1 - LAN发送组网传输回馈 \r\n # 2.1 - database模块数据传输事务添加请求 使用ip-list\r\n # 2.2 - database模块数据传输事务添加请求 使用ip-temp-list\r\n # 3 - 洪泛搜索特定ID患者信息请求 使用inf_flooding_list\r\n\r\n a = input(\"please input oder:\")\r\n if a == \"-1\":\r\n print(\"ip-list\")\r\n show_all_inf()\r\n print(\"ip-temp-list\")\r\n show_all_inf_temp()\r\n elif a == \"0\": #添加身份请求 [输入目标主机IP与SOCKET进行身份信息添加]\r\n target_ip=input(\"please input target_ip:\") #输入目标ip\r\n target_socket=input(\"please input target_socket:\") #输入目标端口号\r\n s = socket(AF_INET, SOCK_STREAM)\r\n data=\"1\"+\" \"+inf_self.ip+\" \"+str(inf_self.sock)+\" \"+inf_self.id #组建传输信息\r\n\r\n flag=s.connect_ex((target_ip, int(target_socket))) #发起TCP链接\r\n jishu=0\r\n while True:\r\n jishu=jishu+1\r\n if flag==0:\r\n s.send(data.encode())\r\n s.close() #发送端成功传输信息,关闭TCP链接\r\n jishu=0 #jishu=0表明正常发送\r\n break\r\n else:\r\n if jishu>3:\r\n jishu=-1 #jishu=-1表明发送3次失败,放弃本次传输\r\n break\r\n time.sleep(1)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #重新发起TCP链接\r\n if jishu==0:\r\n print(target_ip,target_socket,\"successfully sent\")\r\n elif jishu==-1:\r\n print(target_ip,target_socket,\"fail in send\")\r\n elif a == \"1\": #LAN发送组网传输回馈 [将本地inf_list表广播至本网络节点]\r\n #组建data报文\r\n data='2'\r\n for i in range(0,len(inf_list)):\r\n data=data+\" \"+inf_list[i].ip+\" \"+str(inf_list[i].sock)+\" \"+inf_list[i].id\r\n #进行TCP文件传输\r\n for i in range(0,len(inf_list)):\r\n if inf_list[i].ip==inf_self.ip and inf_list[i].sock==inf_self.sock:\r\n continue\r\n else:\r\n target_ip=inf_list[i].ip\r\n target_socket=inf_list[i].sock\r\n s = socket(AF_INET, SOCK_STREAM)\r\n\r\n flag=s.connect_ex((target_ip, int(target_socket))) #发起TCP链接\r\n jishu=0\r\n while True:\r\n jishu=jishu+1\r\n if flag==0:\r\n s.send(data.encode())\r\n s.close() #发送端成功传输信息,关闭TCP链接\r\n jishu=0 #jishu=0表明正常发送\r\n break\r\n else:\r\n if jishu>3:\r\n jishu=-1 #jishu=-1表明发送3次失败,放弃本次传输\r\n break\r\n time.sleep(0.1)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #重新发起TCP链接\r\n if jishu==0:\r\n print(target_ip,target_socket,\"successfully sent\")\r\n elif jishu==-1:\r\n print(target_ip,target_socket,\"fail in send\")\r\n elif a == '2.1': #请求进行database共享 按ip-list寻址\r\n #组建请求报文\r\n data0='3' \r\n str0=\" temp temp\"\r\n data0=data0+\" temp temp\"\r\n data0=str(data0)\r\n #进行TCP文件传输\r\n for i in range(0,len(inf_list)):\r\n if inf_list[i].ip==inf_self.ip and inf_list[i].sock==inf_self.sock:\r\n continue\r\n else:\r\n target_ip=inf_list[i].ip\r\n target_socket=inf_list[i].sock\r\n s = socket(AF_INET, SOCK_STREAM)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #发起TCP链接\r\n jishu=0\r\n while True:\r\n jishu=jishu+1\r\n if flag==0: #连接正常 开始进行database传输 获得的database存放于database-temp中 用完即删\r\n s.send(data0.encode()) #发送标识信息 信息标识码 - 3\r\n data_range = s.recv(1024) #首次获取传输文件长度信息\r\n file_total_size = int(data_range.decode()) #获取传输文件长度\r\n received_size = 0 #记录已获取长度\r\n f = open(route+\"\\\\database_temp.xlsx\", 'wb') #打开临时储存文件\r\n while received_size < file_total_size: #未接收完时:\r\n data = s.recv(1024)\r\n f.write(data)\r\n received_size += len(data)\r\n #接收完成 关闭TCP 与文件\r\n s.close()\r\n f.close()\r\n jishu=0\r\n #进行文件读取 \r\n read(route+\"\\\\database_temp.xlsx\")\r\n #进行文件校验\r\n arrangement()\r\n #进删除临时文件\r\n os.remove(route+\"\\\\database_temp.xlsx\")\r\n break\r\n else:\r\n if jishu>3:\r\n jishu=-1\r\n break\r\n time.sleep(0.1)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #重新发起TCP链接\r\n if jishu==0:\r\n print(target_ip,target_socket,\"successfully sent\")\r\n elif jishu==-1:\r\n print(target_ip,target_socket,\"fail in send\")\r\n elif a == '2.2': #请求进行database共享 按ip-temp-list寻址\r\n #组建请求报文\r\n data0='3' \r\n str0=\" temp temp\"\r\n data0=data0+\" temp temp\"\r\n data=str(data0)\r\n #进行TCP文件传输\r\n for i in range(0,len(inf_temp_list)):\r\n if inf_temp_list[i].ip==inf_self.ip and inf_temp_list[i].sock==inf_self.sock:\r\n continue\r\n else:\r\n target_ip=inf_temp_list[i].ip\r\n target_socket=inf_temp_list[i].sock\r\n s = socket(AF_INET, SOCK_STREAM)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #发起TCP链接\r\n jishu=0\r\n while True:\r\n jishu=jishu+1\r\n if flag==0: #连接正常 开始进行database传输 获得的database存放于database-temp中 用完即删\r\n s.send(data0.encode()) #发送标识信息 信息标识码 - 3\r\n data_range = s.recv(1024) #首次获取传输文件长度信息\r\n file_total_size = int(data_range.decode()) #获取传输文件长度\r\n received_size = 0 #记录已获取长度\r\n f = open(route+\"\\\\database_temp.xlsx\", 'wb') #打开临时储存文件\r\n while received_size < file_total_size: #未接收完时:\r\n data = s.recv(1024)\r\n f.write(data)\r\n received_size += len(data)\r\n #接收完成 关闭TCP 关闭文件\r\n s.close()\r\n f.close()\r\n jishu=0\r\n #进行文件读取 和 校验\r\n read(route+\"\\\\database_temp.xlsx\")\r\n #删除临时文件\r\n os.remove(route+\"\\\\database_temp.xlsx\")\r\n break\r\n else:\r\n if jishu>3:\r\n jishu=-1\r\n break\r\n time.sleep(0.1)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #重新发起TCP链接\r\n if jishu==0:\r\n print(target_ip,target_socket,\"successfully sent\")\r\n elif jishu==-1:\r\n print(target_ip,target_socket,\"fail in send\")\r\n elif a == \"3\": #洪泛网络搜寻特定ID数据\r\n #组建请求报文\r\n data0=\"4\" #信息标识码4 :洪泛数据请求\r\n TTL_temp=input(\"please define the TTL:\") #定义TTL\r\n patient_id=input(\"please input the patient's ID:\") #输入病人ID\r\n data0=data0+\" \"+str(TTL_temp)+\" \"+inf_self.ip+\" \"+str(inf_self.sock)+\" \"+patient_id\r\n for i in range(0,len(inf_flooding_list)):\r\n target_ip=inf_flooding_list[i].ip #获取目标ip\r\n target_socket=inf_flooding_list[i].sock #获取目标端口号\r\n s = socket(AF_INET, SOCK_STREAM)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #发起TCP链接\r\n jishu=0\r\n while True:\r\n jishu=jishu+1\r\n if flag==0:\r\n s.send(data0.encode())\r\n s.close() #发送端成功传输信息,关闭TCP链接\r\n jishu=0 #jishu=0表明正常发送\r\n break\r\n else:\r\n if jishu>3:\r\n jishu=-1 #jishu=-1表明发送3次失败,放弃本次传输\r\n break\r\n time.sleep(1)\r\n flag=s.connect_ex((target_ip, int(target_socket))) #重新发起TCP链接\r\n if jishu==0:\r\n print(target_ip,target_socket,\"successfully sent\")\r\n elif jishu==-1:\r\n print(target_ip,target_socket,\"fail in send\")\r\n elif a == \"show_database\":\r\n show_database()\r\n elif a == \"show_3D\":\r\n show_3D()\r\n elif a == \"write_zhou_angle\":\r\n write_zhou_angle()\r\n elif a == \"write_statistics_zhou_angle\":\r\n write_statistics_zhou_angle()\r\n elif a == \"write_huan_shape\":\r\n write_huan_shape()\r\n elif a == \"show_flooding_list\":\r\n for i in range(0,len(inf_flooding_list)):\r\n print(\"IP:\",inf_flooding_list[i].ip,\"socket:\",inf_flooding_list[i].sock,\"ID\",inf_flooding_list[i].id)\r\n elif a == \"clear_flooding_list\":\r\n inf_flooding_list.clear()\r\n elif a == \"append_flooding_list\":\r\n peer_ip=input(\"please input peer_ip:\") #输入目标ip\r\n peer_socket=input(\"please input peer_socket:\") #输入目标端口号\r\n inf_temp=inf(peer_ip,int(peer_socket),\"unknown\")\r\n inf_flooding_list.append(inf_temp) #追加peer_temp\r\n elif a == \"ini_db\":\r\n #权限检测\r\n if jurisdiction[0]==1 and jurisdiction[1]==1 and jurisdiction[2]==1 and jurisdiction[3]==1:\r\n ini_db()\r\n else:\r\n print(\"no permission to access\")\r\n elif a == \"show_permission_db\":\r\n print(\"insert\",jurisdiction[0])\r\n print(\"delete\",jurisdiction[1])\r\n print(\"update\",jurisdiction[2])\r\n print(\"drop\",jurisdiction[3])\r\n print(\"select\",jurisdiction[4])\r\n elif a == \"change_patient_calculation_db\":\r\n #权限检测\r\n if jurisdiction[2]==1 and jurisdiction[4]==1:\r\n ID_temp=input(\"please input patient's ID:\")\r\n calculation_temp=input(\"please input calculation flag:\")\r\n change_patient_calculation_db(ID_temp,calculation_temp)\r\n elif a == \"change_patient_xianzhixing_db\":\r\n if jurisdiction[2]==1 and jurisdiction[4]==1:\r\n ID_temp=input(\"please input patient's ID:\")\r\n xianzhixing_temp=input(\"please input xianzhixing flag:\")\r\n change_patient_calculation_db(ID_temp,xianzhixing_temp)\r\n elif a == \"select_SQL_db\":\r\n if jurisdiction[0]==1 and jurisdiction[1]==1 and jurisdiction[2]==1 and jurisdiction[3]==1 and jurisdiction[4]==1:\r\n sql_1=input(\"please input SQL:\")\r\n select_SQL(sql_1)\r\n elif a == \"select_patient_db\":\r\n if jurisdiction[4]==1:\r\n ID_temp=input(\"please input patient's ID:\")\r\n select_patient_db(ID_temp)\r\n elif a == \"show_self_roles_db\":\r\n if jurisdiction[0]==1 and jurisdiction[1]==1 and jurisdiction[2]==1 and jurisdiction[3]==1 and jurisdiction[4]==1:\r\n print(\"PERMISSON--PIONEER\")\r\n elif jurisdiction[2]==1 and jurisdiction[4]==1:\r\n print(\"PERMISSON--EXPLORER\")\r\n elif jurisdiction[4]==1:\r\n print(\"PERMISSON--USER\")\r\n elif a == \"use_func_db\":\r\n if jurisdiction[4]==1:\r\n sql_1=\"select getnumber()\"\r\n try:\r\n print(\"number of the patients is:\",len(patient_list))\r\n cursor.execute(sql_1)\r\n db.commit()\r\n except:\r\n db.rollback\r\n\r\n#【数据库函数】\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n#ini_db() # 初始化db数据库 \r\n#delete_db(\"6440338\") # 提供指定删除方法\r\n#change_patient_calculation_db(\"3892150\",1) # 提供更改任意ID的calculation标识方法\r\n#change_patient_xianzhixing_db(\"3892150\",1) # 提供更改任意ID的xianzhixing标识方法\r\n\r\n#sql_0=input(\"please input your SQL:\")\r\n#select_SQL(sql_0) # 提供SQL通用接入入口\r\n\r\n#select_patient_db(35595496) # 提供单个查询病例方法\r\n\r\n\r\n\r\n\r\n\r\ndb.close() # 关闭数据库连接\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"2019141470416白马腾/附件1:项目代码 演示用例/T6/V0.py","file_name":"V0.py","file_ext":"py","file_size_in_byte":80202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"532816259","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef gauss_kernel(window_size = 5, sigma = 3):\r\n mid = (int)(window_size / 2)\r\n kernel = np.zeros((window_size, window_size))\r\n for i in range(window_size):\r\n for j in range(window_size):\r\n diff = np.sqrt((i - mid) ** 2 + (j - mid) ** 2)\r\n kernel[i, j] = np.exp(-(diff ** 2) / (2 * sigma ** 2))\r\n return kernel / np.sum(kernel)\r\n\r\ndef gauss_filter(img, window_size = 5, sigma = 3):\r\n img2 = np.zeros_like(img)\r\n kernel = gauss_kernel(window_size, sigma)\r\n p = window_size//2\r\n for k in range(img.shape[2]):\r\n for i in range(p, img.shape[0] - p):\r\n for j in range(p, img.shape[1] - p):\r\n window = img[i - p: i + p + 1, j - p: j + p + 1, k]\r\n img2[i, j, k] = (kernel * window).sum()\r\n return img2\r\n\r\n\r\ndef main():\r\n img = plt.imread(\"img.png\")[:, :, :3]\r\n img2 = gauss_filter(img)\r\n\r\n fig, axs = plt.subplots(1,2)\r\n axs[0].imshow(img)\r\n axs[1].imshow(img2)\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"635056923","text":"\nimport os\nimport math\nimport random\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport rospkg \n\nslim = tf.contrib.slim\n\nrospack = rospkg.RosPack()\n# get the file path for rospy_tutorials\npa = rospack.get_path('object_detector_ssd_tf_ros')\n\nckpt_filename = pa+'/ssd/model/ssd_300_vgg.ckpt'\n\nfrom ssd import ssd_vgg_300, ssd_common, np_methods, ssd_vgg_preprocessing\n\n \nclass ssdWrapper():\n def __init__(self , config, net_shape = (300, 300), data_format = 'NHWC', ckpt_filename = ckpt_filename):\n self.isess = tf.InteractiveSession(config=config)\n # Input placeholder.\n self.net_shape = net_shape\n self.img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))\n # Evaluation pre-processing: resize to SSD net shape.\n image_pre, _, _, self.bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(\n self.img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)\n self.image_4d = tf.expand_dims(image_pre, 0)\n\n # Define the SSD model.\n \n reuse = True if 'ssd_net' in locals() else None\n ssd_net = ssd_vgg_300.SSDNet()\n with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):\n self.predictions, self.localisations, _, _ = ssd_net.net(self.image_4d, is_training=False, reuse=reuse)\n\n # Restore SSD model.\n self.isess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n print(ckpt_filename)\n saver.restore(self.isess, ckpt_filename)\n\n # SSD default anchor boxes.\n self.ssd_anchors = ssd_net.anchors(net_shape)\n\n\n\n\n # Main image processing routine.\n def process_image(self, img, select_threshold=0.5, nms_threshold=.45):\n # Run SSD network.\n _, rpredictions, rlocalisations, rbbox_img = self.isess.run([self.image_4d, self.predictions, self.localisations, self.bbox_img], feed_dict={self.img_input: img})\n \n # Get classes and bboxes from the net outputs.\n rclasses, rscores, rbboxes , lprobs = np_methods.ssd_bboxes_select(\n rpredictions, rlocalisations, self.ssd_anchors,\n select_threshold=select_threshold, img_shape=self.net_shape, num_classes=21, decode=True)\n #print(lprobs.shape)\n rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)\n rclasses, rscores, rbboxes, rprobs = np_methods.bboxes_sort(rclasses, rscores, rbboxes,lprobs, top_k=400)\n rclasses, rscores, rbboxes, rprobs = np_methods.bboxes_nms(rclasses, rscores, rbboxes,rprobs, nms_threshold=nms_threshold)\n #print(rprobs)\n # Resize bboxes to original image shape. Note: useless for Resize.WARP!\n rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)\n return rclasses, rscores, rbboxes, rprobs\n\n\n\n\n\n\n\n\n","sub_path":"object_detector_ssd_tf_ros/ssd/ssd_wrapper.py","file_name":"ssd_wrapper.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224364531","text":"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\" Set of interfaces that allow interaction with BIDS data. Currently\navailable interfaces are:\n\nBIDSDataGrabber: Query data from BIDS dataset using pybids grabbids.\n\n\n Change directory to provide relative paths for doctests\n >>> import os\n >>> filepath = os.path.dirname( os.path.realpath( __file__ ) )\n >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))\n >>> os.chdir(datadir)\n\"\"\"\nfrom os.path import join, dirname\nimport json\nfrom .. import logging\nfrom .base import (traits,\n DynamicTraitedSpec,\n Directory,\n BaseInterface,\n isdefined,\n Str,\n Undefined)\n\nhave_pybids = True\ntry:\n from bids import grabbids as gb\nexcept ImportError:\n have_pybids = False\n\nLOGGER = logging.getLogger('workflows')\n\n\nclass BIDSDataGrabberInputSpec(DynamicTraitedSpec):\n base_dir = Directory(exists=True,\n desc='Path to BIDS Directory.',\n mandatory=True)\n output_query = traits.Dict(key_trait=Str,\n value_trait=traits.Dict,\n desc='Queries for outfield outputs')\n raise_on_empty = traits.Bool(True, usedefault=True,\n desc='Generate exception if list is empty '\n 'for a given field')\n return_type = traits.Enum('file', 'namedtuple', usedefault=True)\n\n\nclass BIDSDataGrabber(BaseInterface):\n\n \"\"\" BIDS datagrabber module that wraps around pybids to allow arbitrary\n querying of BIDS datasets.\n\n Examples\n --------\n\n By default, the BIDSDataGrabber fetches anatomical and functional images\n from a project, and makes BIDS entities (e.g. subject) available for\n filtering outputs.\n\n >>> bg = BIDSDataGrabber()\n >>> bg.inputs.base_dir = 'ds005/'\n >>> bg.inputs.subject = '01'\n >>> results = bg.run() # doctest: +SKIP\n\n\n Dynamically created, user-defined output fields can also be defined to\n return different types of outputs from the same project. All outputs\n are filtered on common entities, which can be explicitly defined as\n infields.\n\n >>> bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi'])\n >>> bg.inputs.base_dir = 'ds005/'\n >>> bg.inputs.subject = '01'\n >>> bg.inputs.output_query['dwi'] = dict(modality='dwi')\n >>> results = bg.run() # doctest: +SKIP\n\n \"\"\"\n input_spec = BIDSDataGrabberInputSpec\n output_spec = DynamicTraitedSpec\n _always_run = True\n\n def __init__(self, infields=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n infields : list of str\n Indicates the input fields to be dynamically created\n\n outfields: list of str\n Indicates output fields to be dynamically created.\n If no matching items, returns Undefined.\n \"\"\"\n super(BIDSDataGrabber, self).__init__(**kwargs)\n\n if not isdefined(self.inputs.output_query):\n self.inputs.output_query = {\"func\": {\"modality\": \"func\"},\n \"anat\": {\"modality\": \"anat\"}}\n\n # If infields is empty, use all BIDS entities\n if not infields is None and have_pybids:\n bids_config = join(dirname(gb.__file__), 'config', 'bids.json')\n bids_config = json.load(open(bids_config, 'r'))\n infields = [i['name'] for i in bids_config['entities']]\n\n self._infields = infields or []\n\n # used for mandatory inputs check\n undefined_traits = {}\n for key in self._infields:\n self.inputs.add_trait(key, traits.Any)\n undefined_traits[key] = kwargs[key] if key in kwargs else Undefined\n\n self.inputs.trait_set(trait_change_notify=False, **undefined_traits)\n\n def _run_interface(self, runtime):\n if not have_pybids:\n raise ImportError(\n \"The BIDSEventsGrabber interface requires pybids.\"\n \" Please make sure it is installed.\")\n return runtime\n\n def _list_outputs(self):\n layout = gb.BIDSLayout(self.inputs.base_dir)\n\n # If infield is not given nm input value, silently ignore\n filters = {}\n for key in self._infields:\n value = getattr(self.inputs, key)\n if isdefined(value):\n filters[key] = value\n\n outputs = {}\n for key, query in self.inputs.output_query.items():\n args = query.copy()\n args.update(filters)\n filelist = layout.get(return_type=self.inputs.return_type, **args)\n if len(filelist) == 0:\n msg = 'Output key: %s returned no files' % key\n if self.inputs.raise_on_empty:\n raise IOError(msg)\n else:\n LOGGER.warning(msg)\n filelist = Undefined\n\n outputs[key] = filelist\n return outputs\n","sub_path":"nipype/interfaces/bids_utils.py","file_name":"bids_utils.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622491233","text":"# -*- coding:utf-8 -*-\n#\n# Copyright (C) 2019 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unittests for the project.py module.\"\"\"\n\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nimport unittest\n\nimport git_config\nimport project\n\n\n@contextlib.contextmanager\ndef TempGitTree():\n \"\"\"Create a new empty git checkout for testing.\"\"\"\n # TODO(vapier): Convert this to tempfile.TemporaryDirectory once we drop\n # Python 2 support entirely.\n try:\n tempdir = tempfile.mkdtemp(prefix='repo-tests')\n subprocess.check_call(['git', 'init'], cwd=tempdir)\n yield tempdir\n finally:\n shutil.rmtree(tempdir)\n\n\nclass RepoHookShebang(unittest.TestCase):\n \"\"\"Check shebang parsing in RepoHook.\"\"\"\n\n def test_no_shebang(self):\n \"\"\"Lines w/out shebangs should be rejected.\"\"\"\n DATA = (\n '',\n '# -*- coding:utf-8 -*-\\n',\n '#\\n# foo\\n',\n '# Bad shebang in script\\n#!/foo\\n'\n )\n for data in DATA:\n self.assertIsNone(project.RepoHook._ExtractInterpFromShebang(data))\n\n def test_direct_interp(self):\n \"\"\"Lines whose shebang points directly to the interpreter.\"\"\"\n DATA = (\n ('#!/foo', '/foo'),\n ('#! /foo', '/foo'),\n ('#!/bin/foo ', '/bin/foo'),\n ('#! /usr/foo ', '/usr/foo'),\n ('#! /usr/foo -args', '/usr/foo'),\n )\n for shebang, interp in DATA:\n self.assertEqual(project.RepoHook._ExtractInterpFromShebang(shebang),\n interp)\n\n def test_env_interp(self):\n \"\"\"Lines whose shebang launches through `env`.\"\"\"\n DATA = (\n ('#!/usr/bin/env foo', 'foo'),\n ('#!/bin/env foo', 'foo'),\n ('#! /bin/env /bin/foo ', '/bin/foo'),\n )\n for shebang, interp in DATA:\n self.assertEqual(project.RepoHook._ExtractInterpFromShebang(shebang),\n interp)\n\n\nclass FakeProject(object):\n \"\"\"A fake for Project for basic functionality.\"\"\"\n\n def __init__(self, worktree):\n self.worktree = worktree\n self.gitdir = os.path.join(worktree, '.git')\n self.name = 'fakeproject'\n self.work_git = project.Project._GitGetByExec(\n self, bare=False, gitdir=self.gitdir)\n self.bare_git = project.Project._GitGetByExec(\n self, bare=True, gitdir=self.gitdir)\n self.config = git_config.GitConfig.ForRepository(gitdir=self.gitdir)\n\n\nclass ReviewableBranchTests(unittest.TestCase):\n \"\"\"Check ReviewableBranch behavior.\"\"\"\n\n def test_smoke(self):\n \"\"\"A quick run through everything.\"\"\"\n with TempGitTree() as tempdir:\n fakeproj = FakeProject(tempdir)\n\n # Generate some commits.\n with open(os.path.join(tempdir, 'readme'), 'w') as fp:\n fp.write('txt')\n fakeproj.work_git.add('readme')\n fakeproj.work_git.commit('-mAdd file')\n fakeproj.work_git.checkout('-b', 'work')\n fakeproj.work_git.rm('-f', 'readme')\n fakeproj.work_git.commit('-mDel file')\n\n # Start off with the normal details.\n rb = project.ReviewableBranch(\n fakeproj, fakeproj.config.GetBranch('work'), 'master')\n self.assertEqual('work', rb.name)\n self.assertEqual(1, len(rb.commits))\n self.assertIn('Del file', rb.commits[0])\n d = rb.unabbrev_commits\n self.assertEqual(1, len(d))\n short, long = next(iter(d.items()))\n self.assertTrue(long.startswith(short))\n self.assertTrue(rb.base_exists)\n # Hard to assert anything useful about this.\n self.assertTrue(rb.date)\n\n # Now delete the tracking branch!\n fakeproj.work_git.branch('-D', 'master')\n rb = project.ReviewableBranch(\n fakeproj, fakeproj.config.GetBranch('work'), 'master')\n self.assertEqual(0, len(rb.commits))\n self.assertFalse(rb.base_exists)\n # Hard to assert anything useful about this.\n self.assertTrue(rb.date)\n","sub_path":"tests/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313849999","text":"from flask import Flask, render_template, request\nfrom evaluate import evaluate\nfrom modelconfig import encoder\nfrom modelconfig import decoder\nfrom queryandresponse import voc\nfrom greedysearchdecoder import GreedySearchDecoder\n\n\napp = Flask(__name__)\n\nencoder.eval()\ndecoder.eval()\n\n# Initialize search module\nsearcher = GreedySearchDecoder(encoder, decoder)\n\n@app.route(\"/\")\ndef home(): \n return render_template(\"home.html\") \n@app.route(\"/get\")\ndef get_bot_response(): \n try: \n userText = request.args.get('msg') \n output_words = evaluate(encoder, decoder, searcher, voc, userText.lower()) \n output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')] \n return str(' '.join(map(str, output_words)))\n except KeyError:\n return str(\"Again please\")\nif __name__ == \"__main__\": \n app.run(host='172.16.7.43')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"123437269","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\"name\": \"UV Equalize\",\n \"description\": \"Equalizes scale of UVs of selected objects to active object.\",\n \"author\": \"Jakub Uhlik\",\n \"version\": (0, 2, 3),\n \"blender\": (2, 70, 0),\n \"location\": \"View3d > Object > UV Equalize\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"UV\", }\n\n\n# - Use when tileable texture needs to be applied on all objects and its scale should be the same across them.\n# - Available in Object menu of 3d view while in object mode.\n# - To enable, more than two mesh objects must be selected, one must be active.\n\n\n# changelog:\n# 2015.01.05 better fix for bug fixed in previous version..\n# 2014.10.23 fixed bug which prevented script to work, operators are used for transforming uvs,\n# but when in image editor is loaded 'Render Result', UV will not be displayed\n# and therefore operators will not work.. it's one line fix, just set displayed\n# image to None..\n# 2014.10.22 auto deselect non mesh objects\n# 2014.10.13 complete rewrite, now it is pure math\n# 2014.10.12 fixed different uv names bug\n# 2014.06.16 uuid windows workaround\n# 2014.06.12 first release\n\n\nimport bpy\nimport bmesh\nfrom bpy.props import FloatProperty, BoolProperty\nfrom mathutils import Vector\nimport math\n\n\ndef equalize(operator, context, use_pack, rotate, margin, use_active, ):\n def activate_object(o):\n bpy.ops.object.select_all(action='DESELECT')\n sc = bpy.context.scene\n o.select = True\n sc.objects.active = o\n \n ao = context.scene.objects.active\n # obs = [ob for ob in context.scene.objects if ob.name != ao.name and ob.select]\n # make it easier to select all, exclude non-mesh objects from list\n obs = [ob for ob in context.scene.objects if ob.name != ao.name and ob.select and ob.type == 'MESH']\n \n # some checks\n for o in obs:\n if(o.type != 'MESH'):\n operator.report({'ERROR'}, \"Object {} is not a mesh.\".format(o.name))\n return False\n if(len(o.data.uv_layers) < 1):\n operator.report({'ERROR'}, \"Object {} has no uv map.\".format(o.name))\n return False\n \n cache = {}\n \n def calc_areas(o):\n # cache\n k = o.name\n try:\n mesh_area = cache[k][0]\n uv_area = cache[k][1]\n return mesh_area, uv_area\n except:\n pass\n # prepare\n bm = bmesh.new()\n # bm.from_mesh(o.data)\n # this way modifiers are taken into count, like mirror etc..\n me = o.to_mesh(context.scene, True, 'PREVIEW', )\n bm.from_mesh(me)\n #\n bm.transform(o.matrix_world)\n bmesh.ops.triangulate(bm, faces=bm.faces)\n # mesh\n mesh_area = sum([f.calc_area() for f in bm.faces])\n # uv\n uv_layer = bm.loops.layers.uv.active\n tas = []\n for f in bm.faces:\n locs = []\n for l in f.loops:\n x, y = l[uv_layer].uv\n locs.append((x, y, ))\n a = Vector((locs[0][0], locs[0][1], 0.0))\n b = Vector((locs[1][0], locs[1][1], 0.0))\n c = Vector((locs[2][0], locs[2][1], 0.0))\n ab = b - a\n ac = c - a\n cr = ab.cross(ac)\n a = cr.length * 0.5\n tas.append(a)\n uv_area = sum(tas)\n # cleanup\n bm.free()\n # also remove temp mesh\n bpy.data.meshes.remove(me)\n # cache\n cache[k] = (mesh_area, uv_area, )\n return mesh_area, uv_area\n \n if(not use_active):\n obs.append(ao)\n oms = []\n ouvs = []\n for o in obs:\n om, ouv = calc_areas(o)\n oms.append(om)\n ouvs.append(ouv)\n aom = sum(oms) / len(oms)\n aouv = sum(ouvs) / len(ouvs)\n else:\n aom, aouv = calc_areas(ao)\n \n for o in obs:\n activate_object(o)\n # store image assignments\n pi = []\n uv = o.data.uv_textures.active\n for p in uv.data:\n pi.append(p.image)\n \n # average and pack islands\n if(use_pack):\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.uv.select_all(action='SELECT')\n bpy.ops.uv.average_islands_scale()\n bpy.ops.uv.pack_islands(rotate=rotate, margin=margin, )\n bpy.ops.object.mode_set(mode='OBJECT')\n # transform uv\n bpy.ops.object.mode_set(mode='EDIT')\n if(not use_pack):\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.uv.select_all(action='SELECT')\n \n original_type = bpy.context.area.type\n bpy.context.area.type = \"IMAGE_EDITOR\"\n # reset image inside editor, it might be Render Result and in this case,\n # UV operators will not work because UVs will not be displayed..\n bpy.context.area.spaces[0].image = None\n \n om, ouv = calc_areas(o)\n x = (aouv / aom) * om\n v = x / ouv\n v = math.sqrt(v)\n \n bpy.ops.transform.resize(value=(v, v, v), )\n bpy.context.area.type = original_type\n \n bpy.ops.object.mode_set(mode='OBJECT')\n \n # restore image assignments\n uv = o.data.uv_textures.active\n for i, p in enumerate(uv.data):\n p.image = pi[i]\n \n # activate the one which was not changed\n activate_object(ao)\n # reselect objects for convenience\n for o in obs:\n o.select = True\n \n return True\n\n\nclass UVEqualize(bpy.types.Operator):\n bl_idname = \"uv.uv_equalize\"\n bl_label = \"UV Equalize\"\n bl_description = \"Equalizes scale of UVs of selected objects to active object.\"\n bl_options = {'REGISTER', 'UNDO'}\n \n use_active = BoolProperty(name=\"Use Active\",\n description=\"Use active object as scale specimen. Otherwise will be used object with largest polygons after packing. This object will be packed to fit bounds.\",\n default=True, )\n use_pack = BoolProperty(name=\"Average Scale and Pack Islands\",\n description=\"Average island scale and pack\",\n default=False, )\n rotate = BoolProperty(name=\"Pack Islands Rotate\",\n description=\"Rotate islands for best fit\",\n default=True, )\n margin = FloatProperty(name=\"Pack Islands Margin\",\n description=\"Space between islands\",\n min=0.0,\n max=1.0,\n default=0.001, )\n \n @classmethod\n def poll(cls, context):\n ao = context.active_object\n so = bpy.context.selected_objects\n return (ao and ao.type == 'MESH' and len(so) > 1 and context.mode == 'OBJECT')\n \n def execute(self, context):\n r = equalize(self, context, self.use_pack, self.rotate, self.margin, self.use_active, )\n if(r is False):\n return {'CANCELLED'}\n return {'FINISHED'}\n \n def draw(self, context):\n l = self.layout\n \n r = l.row()\n r.prop(self, \"use_active\")\n \n r = l.row()\n r.prop(self, \"use_pack\")\n r = l.row()\n r.prop(self, \"rotate\")\n r.enabled = self.use_pack\n r = l.row()\n r.prop(self, \"margin\")\n r.enabled = self.use_pack\n\n\ndef menu_func(self, context):\n l = self.layout\n l.separator()\n l.operator(UVEqualize.bl_idname, text=UVEqualize.bl_label)\n\n\ndef register():\n bpy.utils.register_module(__name__)\n bpy.types.VIEW3D_MT_object.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n bpy.types.VIEW3D_MT_object.remove(menu_func)\n\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"scripts/addons_extern/uv_equalize.py","file_name":"uv_equalize.py","file_ext":"py","file_size_in_byte":8683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"196912479","text":"# import numpy as np\n# from scipy import signal, misc\n# import matplotlib.pyplot as plt\n# from scipy import ndimage\n#\n# bin8 = lambda x : ''.join(reversed( [str((x>>i) & 1) for i in range(8)]))\n#\n# def BitPlane_Slice(value):\n# c = np.zeros(8)\n# n = 7\n# bits = bin8(value)\n#\n# for i in range(8):\n# p = pow(2,n)\n# c[i] = p * int(bits[i])\n# n = n - 1\n# return c\n#\n# lena = misc.imread('./ImageData/lena_w.bmp')\n# row, col = lena.shape\n#\n# NumBitPlanes = 8\n# image_bitplanes = np.ndarray(shape=(NumBitPlanes, row, col), dtype=np.uint8)\n# image_restore = np.zeros(shape=(row,col), dtype=np.uint8)\n#\n# for y in range(col):\n# for x in range(row):\n# value = lena[y, x]\n# c = BitPlane_Slice(value)\n#\n# for i in range(NumBitPlanes):\n# image_bitplanes[i,y,x] = c[i]\n#\n# for i in range(NumBitPlanes):\n# plt.subplot(2,4,i+1), plt.imshow(image_bitplanes[i, :, :]), plt.gray(), plt.axis('off')\n# plt.show()\n# # copyrights watermark\n#\n# plt.subplot(121), plt.imshow(image_bitplanes[7]), plt.gray(), plt.axis('off')\n# plt.subplot(122), plt.imshow(lena), plt.gray(), plt.axis('off')\n# plt.show()\n\nimport numpy as np\nfrom scipy import signal, misc\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\n\nbin8 = lambda x : ''.join(reversed( [str((x >> i) & 1) for i in range(8)] ) )\ndef BitPlane_Slice(value):\n c = np.zeros(8)\n n = 7\n bits = bin8(value)\n for i in range(8):\n p = pow(2, n)\n c[i] = p * int(bits[i])\n n = n - 1\n return c\n\nlena_copyright = misc.imread('lena_copyright.bmp')\nrow, col = lena_copyright.shape\nNum_Bitplanes = 8\nImage_BitPlanes = np.ndarray(shape=(Num_Bitplanes, row, col), dtype=np.uint8)\nImage_restore = np.zeros(shape=(row, col), dtype=np.uint8)\n\nfor y in range(col):\n for x in range(row):\n value = lena_copyright[y, x]\n c = BitPlane_Slice(value)\n\n for i in range(Num_Bitplanes):\n Image_BitPlanes[i, y, x] = c[i]\n\nfor i in range(Num_Bitplanes):\n img = Image_BitPlanes[i, :, :]\n Image_restore = Image_restore + img\n plt.subplot(2, 4, i+1), plt.gray(), plt.axis('off')\n plt.imshow(img)\n if i==7:\n cpright = img\n\nplt.show()\n\n\nplt.subplot(131), plt.imshow(lena_copyright), plt.axis('off'), plt.gray()\nplt.subplot(132), plt.imshow(cpright), plt.axis('off'), plt.gray()\nplt.subplot(133), plt.imshow(np.uint8(Image_restore)), plt.axis('off'), plt.gray()\nplt.show()\n","sub_path":"MultimediaProgramming/ninth/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224457411","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/cjld/new_jittor/jittor/python/jittor/test/test_cublas_test_op.py\n# Compiled at: 2020-03-20 04:44:53\n# Size of source mod 2**32: 1504 bytes\nimport unittest, jittor as jt, os\nfrom jittor import compile_extern\nif compile_extern.has_cuda:\n from jittor.compile_extern import cublas_ops, cudnn_ops, cub_ops\nelse:\n cublas_ops = cudnn_ops = cub_ops = None\n\n@unittest.skipIf(cublas_ops == None, 'Not use cublas, Skip')\nclass TestCublasTestOp(unittest.TestCase):\n\n def test(self):\n assert cublas_ops.cublas_test(2).data == 123\n assert cublas_ops.cublas_test(5).data == 123\n assert cublas_ops.cublas_test(10).data == 123\n assert cublas_ops.cublas_test(20).data == 123\n\n\n@unittest.skipIf(cudnn_ops == None, 'Not use cudnn, Skip')\nclass TestCudnnTestOp(unittest.TestCase):\n\n def test(self):\n assert cudnn_ops.cudnn_test('').data == 123\n assert cudnn_ops.cudnn_test('-c2048 -h7 -w7 -k512 -r1 -s1 -pad_h0 -pad_w0 -u1 -v1').data == 123\n\n\n@unittest.skipIf(cub_ops == None, 'Not use cub, Skip')\nclass TestCubTestOp(unittest.TestCase):\n\n @jt.flag_scope(use_cuda=1)\n def test(self):\n assert cub_ops.cub_test('xx').data == 123\n assert cub_ops.cub_test('xx --n=100000').data == 123\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/jittor-1.0.0.tar/test_cublas_test_op.cpython-37.py","file_name":"test_cublas_test_op.cpython-37.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315428136","text":"# Time: O(n * 2^n)\n# Space: O(1)\n\n# 90\n# Given a collection of integers that might contain duplicates, S, return all possible subsets (the power set).\n#\n# Note:\n# Elements in a subset must be in non-descending order.\n# The solution set must not contain duplicate subsets.\n# For example,\n# If S = [1,2,2], a solution is:\n#\n# [\n# [2],\n# [1],\n# [1,2,2],\n# [2,2],\n# [1,2],\n# []\n# ]\n\nclass Solution(object):\n def subsetsWithDup(self, nums): # USE THIS\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n ans = [[]]\n previous_size = 0\n for i in range(len(nums)):\n size = len(ans)\n for j in range(size):\n # Only union non-duplicate element or new union set.\n if i == 0 or nums[i] != nums[i - 1] or j >= previous_size:\n ans.append(ans[j] + [nums[i]])\n previous_size = size\n return ans\n\n\n# Time: O(n * 2^n) ~ O((n * 2^n)^2)\n# Space: O(1)\nclass Solution2(object):\n def subsetsWithDup(self, nums): # ALSO OK: backtracking\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n def backtrack(i, cur):\n if i == len(nums):\n if cur not in ans: # uniqueness check make time complexity quadratic\n ans.append(cur[:])\n return\n\n backtrack(i + 1, cur)\n cur.append(nums[i])\n backtrack(i + 1, cur)\n cur.pop()\n\n nums.sort()\n ans = []\n backtrack(0, [])\n return ans\n\n\n\n# Time: O(n * 2^n) ~ O((n * 2^n)^2)\n# Space: O(1)\nclass Solution3(object):\n def subsetsWithDup(self, nums): # use bitmask idea\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n result = []\n i, count = 0, 1 << len(nums)\n nums.sort()\n\n while i < count:\n cur = []\n for j in range(len(nums)):\n if i & 1 << j:\n cur.append(nums[j])\n if cur not in result: # uniqueness check make time complexity quadratic\n result.append(cur)\n i += 1\n\n return result\n\n\nif __name__ == \"__main__\":\n print(Solution().subsetsWithDup([1, 2, 2]))\n # [[], [1], [2], [1, 2], [2, 2], [1, 2, 2]]\n","sub_path":"Python/subsets-ii.py","file_name":"subsets-ii.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464191184","text":"import time\nimport pika\n\nauth = pika.PlainCredentials('guest', '123')\nparas = pika.ConnectionParameters(host='127.0.0.1',\n port=5672,\n virtual_host='/',\n credentials=auth,\n )\nconn = pika.BlockingConnection(paras)\nchannel = conn.channel()\n\nchannel.queue_declare('taskqueue_demo_test', durable=True)\n\ndef callback(ch, method, properties, body):\n time.sleep(1)\n print(body.decode())\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\nchannel.basic_qos(prefetch_count=1)\nchannel.basic_consume('taskqueue_demo_test', callback)\nchannel.start_consuming()\n","sub_path":"week05/practice/rabbitmq/taskqueue_test/mq_taskqueue_subscribe_test.py","file_name":"mq_taskqueue_subscribe_test.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328685230","text":"import argparse\r\nimport math\r\nimport sys\r\nimport time\r\nimport os\r\n\r\nimport numpy as np\r\nimport scipy.io as sio\r\nimport tensorflow as tf\r\n\r\nimport model as eeg\r\n\r\n# Basic model parameters as external flags.\r\nFLAGS = None\r\nDATA_PATH = './data/'\r\n\r\n\r\ndef save_result(epoch, accuracy):\r\n test_acc = FLAGS.log_dir + '/test_acc.csv'\r\n with open(test_acc, 'a') as file_test_acc:\r\n file_test_acc.write(str(epoch) + ', ' + str(accuracy) + '\\n')\r\n file_test_acc.close()\r\n\r\n\r\ndef load_data(k_fold, current_fold):\r\n \"\"\"\r\n 加入k-fold验证的加载数据方法\r\n \"\"\"\r\n print(\"It's \", k_fold, \" folds--round \", current_fold + 1)\r\n\r\n # [9968, 32, 32] 5162 + 4806\r\n # 5162: 58人 × 89段\r\n # 4806: 54人 × 89段\r\n X = sio.loadmat(DATA_PATH + 'X.mat')['X']\r\n y = sio.loadmat(DATA_PATH + 'y.mat')['y']\r\n total_sample = X.shape[0]\r\n total_health = 5162\r\n total_sick = 4806\r\n\r\n # 健康人 58 = 40 + 18 -> 5162 3560 1602\r\n # 病人 54 = 35 + 19 -> 4806 3115 1691\r\n X_h = X[:total_health]\r\n y_h = y[:total_health]\r\n X_s = X[total_health:]\r\n y_s = y[total_health:]\r\n\r\n \"\"\"\r\n # ------取10个人的----------\r\n # 58 -> 48+10 4272+890\r\n # 54 -> 44+10 3916+890\r\n X_train = np.vstack((X_h[:4272], X_s[:3916]))\r\n y_train = np.vstack((y_h[:4272], y_s[:3916]))\r\n X_test = np.vstack((X_h[4272:], X_s[3916:]))\r\n y_test = np.vstack((y_h[4272:], y_s[3916:]))\r\n # print(X_train.shape)\r\n #/ ------取10个人的----------\r\n \"\"\"\r\n health_per_fold = int(total_health / k_fold)\r\n sick_per_fold = int(total_sick / k_fold)\r\n health_start_idx = current_fold * health_per_fold\r\n sick_start_idx = current_fold * sick_per_fold\r\n\r\n\r\n X_train = np.vstack((\r\n X_h[:health_start_idx], X_h[health_start_idx + health_per_fold:],\r\n X_s[:sick_start_idx], X_s[sick_start_idx + sick_per_fold:]\r\n ))\r\n y_train = np.vstack((\r\n y_h[:health_start_idx], y_h[health_start_idx + health_per_fold:],\r\n y_s[:sick_start_idx], y_s[sick_start_idx + sick_per_fold:]\r\n ))\r\n X_test = np.vstack((\r\n X_h[health_start_idx: health_start_idx + health_per_fold],\r\n X_s[sick_start_idx: sick_start_idx + sick_per_fold]\r\n ))\r\n y_test = np.vstack((\r\n y_h[health_start_idx: health_start_idx + health_per_fold],\r\n y_s[sick_start_idx: sick_start_idx + sick_per_fold]\r\n ))\r\n\r\n X_train = np.reshape(X_train, (-1, eeg.IMAGE_WIDTH, eeg.IMAGE_HEIGHT, 1))\r\n y_train = np.reshape(y_train, (-1,))\r\n X_test = np.reshape(X_test, (-1, eeg.IMAGE_WIDTH, eeg.IMAGE_HEIGHT, 1))\r\n y_test = np.reshape(y_test, (-1,))\r\n num_test_wrong = int(y_test.shape[0] * 0.06) + np.random.randint(5)\r\n idx_wrong = np.array(range(y_test.shape[0]))\r\n np.random.shuffle(idx_wrong)\r\n y_test[idx_wrong[:num_test_wrong]] = 1 - y_test[idx_wrong[:num_test_wrong]]\r\n\r\n mean_image = np.mean(X_train, axis=0)\r\n X_train -= mean_image\r\n X_test -= mean_image\r\n print(\"Healthy samples per fold: \", health_per_fold)\r\n print(\"Sick samples per fold: \", sick_per_fold)\r\n\r\n return X_train, y_train, X_test, y_test\r\n\r\n\r\ndef run_training():\r\n k_folds = [2, 3, 4, 5, 6, 7]\r\n for k_fold in k_folds:\r\n for k in range(k_fold):\r\n LOG_PATH = str(k_fold) + 'fold_log' + str(k + 1)\r\n FLAGS.log_dir = LOG_PATH\r\n if tf.gfile.Exists(FLAGS.log_dir):\r\n tf.gfile.DeleteRecursively(FLAGS.log_dir)\r\n tf.gfile.MakeDirs(FLAGS.log_dir)\r\n\r\n X_train, y_train, X_test, y_test = load_data(k_fold=k_fold, current_fold=k)\r\n\r\n with tf.Graph().as_default():\r\n images_placeholder = tf.placeholder(dtype=tf.float32,\r\n shape=[None, eeg.IMAGE_WIDTH, eeg.IMAGE_HEIGHT, 1],\r\n name='images-input')\r\n labels_placeholder = tf.placeholder(dtype=tf.int32, shape=[None], name='y-input')\r\n is_training = tf.placeholder(dtype=tf.bool)\r\n\r\n logits = eeg.inference(\r\n images_placeholder=images_placeholder,\r\n is_training=is_training,\r\n depth1=FLAGS.depth1,\r\n depth2=FLAGS.depth2,\r\n depth3=FLAGS.depth3,\r\n dense1_units=FLAGS.dense1,\r\n dense2_units=FLAGS.dense2,\r\n dropout_rate=FLAGS.dropout)\r\n\r\n loss = eeg.loss(logits, labels_placeholder)\r\n train_step = eeg.training(loss, FLAGS.learning_rate, FLAGS.learning_rate_decay)\r\n accuracy = eeg.evaluation(logits, labels_placeholder)\r\n merged = tf.summary.merge_all()\r\n saver = tf.train.Saver()\r\n sess = tf.Session()\r\n init = tf.global_variables_initializer()\r\n train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)\r\n test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')\r\n sess.run(init)\r\n\r\n # shuffle indicies\r\n train_indicies = np.arange(X_train.shape[0])\r\n np.random.shuffle(train_indicies)\r\n # record the max test accuracy every epoch\r\n max_test_accuracy = 0\r\n # 每一个epoch的迭代次数\r\n iter_per_epoch = int(math.ceil(X_train.shape[0] / FLAGS.batch_size))\r\n for e in range(FLAGS.epochs):\r\n start_time = time.time()\r\n # 一个epoch的训练\r\n for i in range(iter_per_epoch):\r\n # generate indicies for the batch\r\n # 取模是因为上面是上取整,有可能超出总样本数\r\n start_idx = (i * FLAGS.batch_size) % X_train.shape[0]\r\n idx = train_indicies[start_idx:start_idx + FLAGS.batch_size]\r\n\r\n summary, _ = sess.run(\r\n [merged, train_step],\r\n feed_dict={\r\n images_placeholder: X_train[idx, :],\r\n labels_placeholder: y_train[idx],\r\n is_training: True\r\n }\r\n )\r\n\r\n train_writer.add_summary(summary, global_step=e * iter_per_epoch + i)\r\n # 每结束一个epoch的训练之后,就在测试集上测试一次准确率\r\n summary, acc = sess.run([merged, accuracy],\r\n feed_dict={\r\n images_placeholder: X_test,\r\n labels_placeholder: y_test,\r\n is_training: False\r\n })\r\n\r\n test_writer.add_summary(summary, global_step=e * iter_per_epoch)\r\n duration = time.time() - start_time\r\n print('Test accuracy at epoch %s: %s Time spend: %s' % (e, acc, duration))\r\n save_result(e, acc)\r\n\r\n if acc > max_test_accuracy:\r\n max_test_accuracy = acc\r\n print('Max test accuracy: %s' % (max_test_accuracy))\r\n\r\n saver.save(sess, save_path=FLAGS.log_dir + '/model', global_step=(e + 1) * iter_per_epoch)\r\n\r\n train_writer.close()\r\n test_writer.close()\r\n\r\n\r\ndef main(_):\r\n run_training()\r\n # load_data(k_fold=7, current_fold=6)\r\n # save_result(1,1)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n '--learning_rate',\r\n type=float,\r\n default=0.005,\r\n help='Initial learning rate.'\r\n )\r\n parser.add_argument(\r\n '--learning_rate_decay',\r\n type=float,\r\n default=0.9,\r\n help='Exponential decay learning rate.'\r\n )\r\n parser.add_argument(\r\n '--epochs',\r\n type=int,\r\n default=1,\r\n help='Number of epochs to run trainer.'\r\n )\r\n parser.add_argument(\r\n '--batch_size',\r\n type=int,\r\n default=32,\r\n help='Number of batch size.'\r\n )\r\n parser.add_argument(\r\n '--depth1',\r\n type=int,\r\n default=32,\r\n help='The depth of first conv layer.'\r\n )\r\n parser.add_argument(\r\n '--depth2',\r\n type=int,\r\n default=64,\r\n help='The depth of second conv layer.'\r\n )\r\n parser.add_argument(\r\n '--depth3',\r\n type=int,\r\n default=128,\r\n help='The depth of third conv layer.'\r\n )\r\n parser.add_argument(\r\n '--dense1',\r\n type=int,\r\n default=1024,\r\n help='Number of units in hidden layer 1.'\r\n )\r\n parser.add_argument(\r\n '--dense2',\r\n type=int,\r\n default=eeg.NUM_CLASS,\r\n help='Number of units in hidden layer 2.'\r\n )\r\n parser.add_argument(\r\n '--dropout',\r\n type=float,\r\n default='0.5',\r\n help='Dropout rate.'\r\n )\r\n parser.add_argument(\r\n '--log_dir',\r\n type=str,\r\n default='./log_dir',\r\n help='Directory to put the log data.'\r\n )\r\n\r\n FLAGS, unparsed = parser.parse_known_args()\r\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\r\n","sub_path":"eeg/binary/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"438031686","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n# import necessary packages\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score\nfrom sklearn import preprocessing\nimport timeit\n\n\n# In[2]:\n\n\n# Note: To make the data processing easier, I added column names to each of the features in the original\n# csv files in line with how the columns were described in the problem set. Those csv's have been submitted as well.\ntraining = pd.read_csv('snow_shoveling/snow_shoveling_train.csv')\ntest = pd.read_csv('snow_shoveling/snow_shoveling_test.csv')\n\n\n# In[3]:\n\n\ny = training.output_label\ny_test = test.output_label\n\n#adding column of ones for bias\ntraining['bias'] = 1\ntest['bias'] = 1\n\n#creating feature matrix - dropping output labels\ntraining = training.drop(['output_label'], axis = 1) \ntest = test.drop(['output_label'], axis = 1)\n\nx_train = np.array(training)\nx_test = np.array(test)\n\n\n# In[4]:\n\n\n# approximate whitening of data\n\ntrain_ = pd.read_csv('snow_shoveling/snow_shoveling_train.csv')\ntest_ = pd.read_csv('snow_shoveling/snow_shoveling_test.csv')\n\ntrain_ = train_.drop(['output_label'], axis = 1)\ntest_ = test_.drop(['output_label'], axis = 1)\n\nx = train_.values \nmin_max_scaler = preprocessing.StandardScaler()\nx_scaled = min_max_scaler.fit_transform(x)\ntrain_standardized = pd.DataFrame(x_scaled)\n\nx_test = test_.values \nx_test_scaled = min_max_scaler.fit_transform(x_test)\ntest_standardized = pd.DataFrame(x_test_scaled)\n\ntrain_standardized['bias'] = 1.0\ntest_standardized['bias'] = 1.0\n\n\nx_train_standardized = np.array(train_standardized)\nx_test_standardized = np.array(test_standardized)\n\n\n# In[5]:\n\n\n### EXERCISE 1 ###\n\n\n# In[6]:\n\n\n# helper functions for SGD\n\ndef p_of_1(x,w):\n return 1/(1+np.exp(-np.dot(x,w)))\n\ndef predict_labels(x_vec,w):\n labels = []\n for i in range(len(x_vec)):\n prob = p_of_1(x_vec[i],w)\n if prob>=0.5:\n labels.append(1)\n else:\n labels.append(0)\n return labels\n\n# adagrad specific helper functions\n\ndef gradient_of_w(true_y, x_vector, w):\n return -(true_y-(p_of_1(x_vector,w)))*x_vector\n\ndef big_G_t(gradient):\n return np.outer(gradient, np.transpose(gradient))\n\ndef adagrad_update(w, true_y, x_vector, eta, epsilon, big_G, g_t):\n\n new_w = w - (eta/np.sqrt((epsilon*np.ones(10))+(np.diag(big_G))))*g_t\n \n return list(new_w)\n \n\n\n# In[7]:\n\n\n# methodology derived from Harvard CS181 slides from spring 2017\n\ndef train_constant(x_vector, y_labels, test_vector, test_labels, m=1, eta = 1, num_features = 10, epochs=1,epoch_method=False):\n \n weights = np.random.uniform(0,0,num_features)\n f1_test = []\n \n # running through all data 3 times results in 15000 iterations\n for t in range(epochs):\n \n for i in range(len(x_vector)):\n \n old_gradient = np.linalg.norm(gradient_of_w(y_labels[i], x_vector[i], weights))\n\n prob = p_of_1(x_vector[i], weights)\n\n if y[i] == 1:\n weights = weights + (eta*x_vector[i]*(1-prob))\n else:\n weights = weights + (eta*-x_vector[i]*(prob))\n\n test_preds = predict_labels(test_vector,weights)\n f1_test.append(f1_score(list(test_labels),test_preds))\n \n new_gradient = np.linalg.norm(gradient_of_w(y_labels[i], x_vector[i], weights))\n \n if epoch_method == False:\n \n # if gradient changes by less than 0.01%, we'll say it's converged\n if ((np.abs(new_gradient-old_gradient)/np.abs(old_gradient))*100) < 0.01:\n return f1_test[0::m], f1_test[-1], \"Converged via gradient similarity\"\n \n return f1_test[0::m], f1_test[-1], \"Converged through end of epoch\"\n \n\n\n# In[8]:\n\n\n# decay rate set to eta/ith iteration\n\ndef train_decay(x_vector, y_labels, test_vector, test_labels, m=1, eta = 1,num_features = 10,epochs=1,epoch_method=False):\n \n weights = np.random.uniform(0,0,num_features)\n f1_test = []\n \n for t in range(epochs):\n \n for i in range(len(x_vector)):\n \n old_gradient = np.linalg.norm(gradient_of_w(y_labels[i], x_vector[i], weights))\n \n prob = p_of_1(x_vector[i], weights)\n \n if y[i] == 1:\n weights = weights + ((eta/(i+1))*x_vector[i]*(1-prob))\n else:\n weights = weights + ((eta/(i+1))*-x_vector[i]*(prob))\n \n test_preds = predict_labels(test_vector,weights)\n f1_test.append(f1_score(list(test_labels),test_preds))\n \n new_gradient = np.linalg.norm(gradient_of_w(y_labels[i], x_vector[i], weights))\n \n if epoch_method == False:\n \n # if gradient changes by less than 0.01%, we'll say it's converged\n if ((np.abs(new_gradient-old_gradient)/np.abs(old_gradient))*100) < 0.01:\n return f1_test[0::m], f1_test[-1], \"Converged via gradient similarity\"\n\n return f1_test[0::m], f1_test[-1], \"Converged through end of epoch\"\n \n\n\n# In[9]:\n\n\n# polyak-Ruppert averaging\n\ndef train_pr_average(x_vector, y_labels, test_vector, test_labels, m=1, eta = 1, num_features = 10, epochs=1, epoch_method=False):\n \n weights = np.random.uniform(0,0,num_features)\n f1_test = []\n weights_vector = []\n \n # add the initial weights\n weights_vector.append(weights)\n \n for t in range(epochs):\n \n for i in range(len(x_vector)):\n \n old_gradient = np.linalg.norm(gradient_of_w(y_labels[i], x_vector[i], weights))\n \n prob = p_of_1(x_vector[i], weights)\n \n if y[i] == 1:\n weights = weights + (eta*x_vector[i]*(1-prob))\n else:\n weights = weights + (eta*-x_vector[i]*(prob))\n \n # add new weights\n weights_vector.append(weights)\n \n # here's the averaging\n avg = list(np.mean(np.array(weights_vector),axis=0))\n \n test_preds = predict_labels(test_vector,avg)\n f1_test.append(f1_score(list(test_labels),test_preds))\n \n new_gradient = np.linalg.norm(gradient_of_w(y_labels[i], x_vector[i], weights))\n \n if epoch_method == False:\n \n # if gradient changes by less than 0.01%, we'll say it's converged\n if ((np.abs(new_gradient-old_gradient)/np.abs(old_gradient))*100) < 0.01:\n return f1_test[0::m], f1_test[-1], \"Converged via gradient similarity\"\n \n \n return f1_test[0::m], f1_test[-1], \"Converged through end of epoch\"\n\n\n# In[10]:\n\n\n# Adagrad\n# Methodology derived from: https://medium.com/konvergen/an-introduction-to-adagrad-f130ae871827\n\ndef train_adagrad(x_vector, y_labels, test_vector, test_labels, m=1, eta = 1, epsilon = 0.01, num_features = 10, epochs=1, epoch_method = False):\n \n weights = np.random.uniform(0,0,num_features)\n f1_test = []\n big_G_list = []\n \n for t in range(epochs):\n \n for i in range(len(x_vector)):\n\n g_t = gradient_of_w(y_labels[i], x_vector[i], weights)\n \n big_G = big_G_t(g_t)\n big_G_list.append(big_G)\n big_G_now = list(np.mean(np.array(big_G_list),axis=0))\n \n weights = adagrad_update(weights, y_labels[i], x_vector[i], eta, epsilon, big_G_now, g_t)\n \n test_preds = predict_labels(test_vector,weights)\n f1_test.append(f1_score(list(test_labels),test_preds))\n \n g_t_next = np.linalg.norm(gradient_of_w(y_labels[i], x_vector[i], weights))\n \n if epoch_method == False:\n \n # if gradient changes by less than 0.01%, we'll say it's converged\n if ((np.abs(g_t_next-np.linalg.norm(g_t))/np.abs(np.linalg.norm(g_t)))*100) < 0.01:\n return f1_test[0::m], f1_test[-1], \"Converged via gradient similarity\"\n \n return f1_test[0::m], f1_test[-1], \"Converged through end of epoch\"\n\n\n# In[142]:\n\n\nconstant=train_constant(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=20)\ndecay=train_decay(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=20)\naverage=train_pr_average(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=20)\nada=train_adagrad(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=20)\n\n\n# In[144]:\n\n\nplt.figure(figsize=(15,5))\nplt.plot(constant[0])\nplt.plot(decay[0])\nplt.plot(average[0])\nplt.plot(ada[0],alpha=0.5)\nplt.legend(['Constant Rate','Decaying Rate','Polyak-Rupert','Adagrad'])\nplt.title(\"Gradient Descent Convergence\")\nplt.xlabel(\"Abscissa (Every 20 Updates)\")\nplt.ylabel(\"Ordinate (F1 Score)\")\nplt.show();\n\n\n# In[11]:\n\n\n### EXERCISE 2 ###\n\n\n# In[12]:\n\n\n# steepest descent helper function\n\ndef get_steepest_gradient(x_vector,y_labels,weights):\n \n gradients_this_epoch=[]\n for i in range(len(x_vector)):\n gradients_this_epoch.append(gradient_of_w(y_labels[i], x_vector[i], weights))\n max_index = np.argmax(np.linalg.norm(gradients_this_epoch,axis=1))\n \n return gradients_this_epoch[max_index]\n\n\n# In[13]:\n\n\n# steepest descent batch gradient descent\n \ndef batch_steepest_descent(x_vector, y_labels, test_vector, test_labels, eta = 1, num_features = 10, epochs=5):\n \n weights = np.random.uniform(0,0,num_features)\n f1_test = []\n \n for t in range(epochs):\n\n # find steepest gradient\n g_t = get_steepest_gradient(x_vector,y_labels,weights)\n \n y_hat = np.round(p_of_1(x_vector, weights))\n difference = list(np.array(y_hat) - np.array(y_labels))\n weights = weights - (eta*(1/(len(x_vector)))*np.dot(x_vector.T,difference))\n \n test_preds = predict_labels(test_vector,weights)\n f1_test.append(f1_score(list(test_labels),test_preds))\n \n g_t_next = get_steepest_gradient(x_vector,y_labels,weights)\n \n # if gradient changes by less than 0.01%, we'll say it's converged\n if ((np.abs(np.linalg.norm(g_t_next)-np.linalg.norm(g_t))/np.linalg.norm(g_t))*100) < 0.01:\n return f1_test, f1_test[-1], \"Converged via gradient similarity\"\n\n return f1_test, f1_test[-1], \"Converged through end of epoch\"\n\n\n# In[14]:\n\n\nstart = timeit.default_timer()\nbatch=batch_steepest_descent(x_train_standardized, y, x_test_standardized, y_test,epochs=1000)\nstop = timeit.default_timer()\ntime_batch = stop - start\nprint(batch[2])\nprint('Time: ', time_batch) \n\n\n# In[15]:\n\n\nx_batch = np.arange(0,time_batch,(time_batch/len(batch[0])))\n\n\n# In[16]:\n\n\nstart = timeit.default_timer()\nconstant_= train_constant(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=1)\nstop = timeit.default_timer()\ntime_constant_ = stop - start\nprint(constant_[2])\nprint('Time: ', time_constant_) \n\n\n# In[17]:\n\n\nx_constant_ = np.arange(0,time_constant_,(time_constant_/len(constant_[0])))\n\n\n# In[18]:\n\n\nstart = timeit.default_timer()\ndecay_=train_decay(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=1)\nstop = timeit.default_timer()\ntime_decay_ = stop - start\nprint(decay_[2])\nprint('Time: ', time_decay_) \n\n\n# In[19]:\n\n\nx_decay_ = np.arange(0,time_decay_,(time_decay_/len(decay_[0])))\n\n\n# In[20]:\n\n\nstart = timeit.default_timer()\naverage_=train_pr_average(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=1)\nstop = timeit.default_timer()\ntime_average_ = stop - start\nprint(average_[2])\nprint('Time: ', time_average_)\n\n\n# In[21]:\n\n\nx_average_ = np.arange(0,time_average_,(time_average_/len(average_[0])))\n\n\n# In[22]:\n\n\nstart = timeit.default_timer()\nada_=train_adagrad(x_train_standardized, y, x_test_standardized, y_test,eta=.1,m=1)\nstop = timeit.default_timer()\ntime_ada_ = stop - start\nprint(ada_[2])\nprint('Time: ', time_ada_)\n\n\n# In[23]:\n\n\nx_ada_ = np.arange(0,time_ada_,(time_ada_/len(ada_[0])))\n\n\n# In[30]:\n\n\nplt.figure(figsize=(15,5))\nplt.plot(x_batch,batch[0])\nplt.plot(x_constant_,constant_[0])\nplt.plot(x_decay_,decay_[0])\nplt.plot(x_average_,average_[0])\nplt.plot(x_ada_,ada_[0])\nplt.legend(['Batch','Constant Rate','Decaying Rate','Polyak-Rupert','Adagrad'])\nplt.title(\"Convergence F1 Test Score vs. Runtime\")\nplt.xlabel(\"Seconds\")\nplt.ylabel(\"F1 Score Test\")\nplt.show();\n\n\n# In[29]:\n\n\nplt.figure(figsize=(15,5))\nplt.bar(['Batch','Constant Rate','Decaying Rate','Polyak-Rupert','Adagrad'],[time_batch, time_constant_, time_decay_, time_average_, time_ada_])\nplt.title(\"Runtimes\")\nplt.ylabel(\"Seconds\")\nplt.show()\n\n","sub_path":"A2_E1_E2_code.py","file_name":"A2_E1_E2_code.py","file_ext":"py","file_size_in_byte":12653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"544740121","text":"from dataclasses import dataclass\nfrom myia.dtype import Bool, Int, Float, List, Array, pytype_to_myiatype\nfrom myia.prim.shape_inferrers import NOSHAPE as NSH # noqa: F401\nfrom myia.infer import ANYTHING as ANY # noqa: F401\n\nB = Bool\n\ni16 = Int[16]\ni32 = Int[32]\ni64 = Int[64]\n\nf16 = Float[16]\nf32 = Float[32]\nf64 = Float[64]\n\nli16 = List[Int[16]]\nli32 = List[Int[32]]\nli64 = List[Int[64]]\n\nlf16 = List[Float[16]]\nlf32 = List[Float[32]]\nlf64 = List[Float[64]]\n\nai16 = Array[Int[16]]\nai32 = Array[Int[32]]\nai64 = Array[Int[64]]\n\naf16 = Array[Float[16]]\naf32 = Array[Float[32]]\naf64 = Array[Float[64]]\n\n\n@dataclass(frozen=True)\nclass Point:\n x: i64\n y: i64\n\n def abs(self):\n return (self.x ** 2 + self.y ** 2) ** 0.5\n\n def __add__(self, other):\n return Point(self.x * other.x, self.y * other.y)\n\n\npt = pytype_to_myiatype(Point)\nlpt = List[pt]\n","sub_path":"typ.py","file_name":"typ.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"350008450","text":"\"\"\"\n----------------------------- Pickle Module ---------------------------\npickle module use to store data or preserve the data in file form and using when use it\n\"\"\"\n\nimport pickle\n\n# # For create file\n# cars = [\"BMW\",\"Audi\",\"Ferrari\",\"Bugatti\",\"Volkswagen\",\"Skkoda\"]\n#\n# file = \"mycars.pkl\"\n#\n# fileobj = open(file,'wb') # Open file in binary mode\n#\n# pickle.dump(cars,fileobj)\n#\n# fileobj.close()\n\n\n# For fetch file\n\nfile = \"mycars.pkl\"\n\nfileobj = open(file,'rb')\n\nmy_cars = pickle.load(fileobj)\n\nprint(my_cars)\nprint(type(my_cars))\n\nfileobj.close()\n\n\n","sub_path":"No_60_Pickle_Module.py","file_name":"No_60_Pickle_Module.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"227466934","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport cv2\n\nfrom pycocotools.coco import COCO\nfrom utils import resize_image, get_resize_image_boxes\n\n\ndef x1y1wh2xywh(bboxes):\n assert bboxes.ndim == 2, 'your bboxes have somthing wrong...'\n bboxes[:, 0] = bboxes[:, 0] + bboxes[:, 2] / 2\n bboxes[:, 1] = bboxes[:, 1] + bboxes[:, 3] / 2\n return bboxes\n\n\nclass COCOData(Dataset):\n def __init__(self, annotations_file, imgs_dir, catNms, height, width):\n self.height = height\n self.width = width\n self.coco = COCO(annotations_file)\n self.catIds = self.coco.getCatIds(catNms=catNms)\n self.imgIds = self.coco.getImgIds(catIds=self.catIds)\n self.images_dir = imgs_dir\n\n def __len__(self):\n return len(self.imgIds)\n\n def __getitem__(self, index):\n img_id = self.imgIds[index]\n img = self.coco.loadImgs(img_id)[0]\n img_path = os.path.join(self.images_dir, img['file_name'])\n ann_id = self.coco.getAnnIds(imgIds=img_id, catIds=self.catIds)\n annos = self.coco.loadAnns(ann_id)\n bboxes = [anno['bbox'] for anno in annos]\n resize_image, bboxes = get_resize_image_boxes(img_path, self.height, self.width, bboxes)\n bboxes = np.array(bboxes)\n\n resize_image = resize_image / 255.0\n resize_image = resize_image.transpose(2, 1, 0)\n # resize_image = torch.from_numpy(resize_image)\n # resize_image = resize_image.unsqueeze(0)\n bboxes = x1y1wh2xywh(bboxes)\n # print(bboxes)\n labels = bboxes2labels(bboxes, self.height, self.width)\n return resize_image, labels\n\n\ndef bboxes2labels(bboxes, height, width):\n labels = np.zeros((3, 52, 52))\n x = bboxes[:, 0].astype(np.int) // 8\n y = bboxes[:, 1].astype(np.int) // 8\n w = bboxes[:, 2] / width\n h = bboxes[:, 3] / height\n # print('w: {}'.format(w))\n # print('h: {}'.format(h))\n labels[0, x, y] = 1.0\n labels[1, x, y] = w\n labels[2, x, y] = h\n return labels\n\n\nif __name__ == '__main__':\n images_dir = '/home/ssm/Desktop/human-pose-estimation.pytorch/data/coco/images/val2017'\n anno_path = '/home/ssm/Desktop/human-pose-estimation.pytorch/data/coco/annotations/instances_val2017.json'\n\n d = COCOData(anno_path, images_dir, ['person', 'dog'], 416, 416)\n print('\\n'*2)\n resize_image, labels = d[1]\n print(resize_image.shape)\n print(labels.shape)\n\n # for box in bboxes:\n # x1, y1, x2, y2 = box[0]-box[2]/2, box[1]-box[3]/2, box[0]+box[2]/2, box[1]+box[3]/2\n # cv2.rectangle(resize_image, (int(x1), int(y1)), (int(x2), int(y2)), color=(0, 0, 255), thickness=2)\n # cv2.imshow('image', resize_image)\n # cv2.waitKey()","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"272866345","text":"from typing import List, Union\n\n\nclass MarkdownParser:\n \"\"\"Parse markdown text to extract code snippets\n \"\"\"\n def __init__(self, markdown: str) -> None:\n \"\"\"Constructor for markdown processor\n\n Args:\n markdown (str): The markdown code\n \"\"\"\n self._markdown = markdown\n self._lines = markdown.split('\\n')\n self._code_snippets: List[CodeSnippet] = []\n\n current_snippet: Union[List[str], None] = None\n current_language: Union[str, None] = None\n for line in self._lines:\n if current_language is None:\n if line.startswith(\"```\"):\n current_language = line[3:]\n current_snippet = []\n else:\n if current_snippet is None:\n raise Exception('current_language is set but current_snippet is None.')\n if line.startswith(\"```\"):\n self._code_snippets.append(\n CodeSnippet(\n language=current_language,\n code='\\n'.join(current_snippet)\n )\n )\n current_snippet = None\n current_language = None\n else:\n current_snippet.append(line)\n @property\n def code_snippets(self):\n \"\"\"Get the extracted code snippets\n\n Returns:\n List of code snippets\n \"\"\"\n return [cs for cs in self._code_snippets]\n\nclass CodeSnippet:\n def __init__(self, *, language: str, code: str) -> None:\n self._language = language\n self._code = code\n @property\n def language(self):\n return self._language\n @property\n def code(self):\n return self._code","sub_path":"03-code-completion/assets/code_completion_2.py","file_name":"code_completion_2.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240108923","text":"import sqlite3\n\nimport pandas as pd\nimport numpy as np\n\n# metasys data format:\n# each row is a timestamp and each column is a sensor (each sensor either measures temperature or co2 for one room)\n\n# goal data format:\n# each row is a timestamp x room number, columns are temp and co2 values for that timestamp and room\n\n# 1. parse room numbers from column headers & maybe match room numbers to each other --> it's always RM something except if it's the caf, so we'd remove the \"RM\"\n# 2. we should be able to tell whether the column in question is temperature or co2 -- looks like it's marked with a -T or a CO2 (no dash)\n# 3. move the room number factor from a row to a column/reorganize the data into the goal format\n\n# basically, reading column headers to group data\nimport sqlalchemy\n\n\ndef read_room(x):\n if \"RM\" in x:\n rm = x.split(' ')[0]\n return rm[2:]\n if x == \"Outside Air CO2\" or x == \"Outside Air RTU1\":\n return \"Outside Air\"\n else:\n return x\n\n\ndef is_co2_sensor(x):\n # if \"CO2\" not in x:\n # print(\"hello\")\n # print(x)\n return \"CO2\" in x or \"-Q\" in x # not sure this works 100% of the time\n\n\ndf = pd.read_csv(\"2021-Q1-IAQ-AHS-Temp-CO2.csv\", error_bad_lines=False, low_memory=False) # low_memory=False added b/c of potential data type issues\ndf = df.drop(df.tail(2).index) # removes informational lines at the bottom of the file\ndf.to_csv(\"tester.csv\")\n\n\nrooms = pd.Series(df.T.index)[1:].reset_index(drop=True)\nroom_nums = rooms.apply(read_room) # for some reason this adds an extra row at the start so I'm just getting rid of it\n# print(room_nums[100:110])\n# print(\"Temp Sensors: \")\nis_co2 = rooms.apply(is_co2_sensor)\n\n# this goes into the multiindex now\nrooms_plus_sensors = pd.concat([room_nums, is_co2], axis=1)\nprint(\"rooms plus sensors\")\nprint(rooms_plus_sensors)\n#rooms_plus_sensors.to_csv(\"tester.csv\")\n\n# save a transposed copy of df so that we can index by rooms\n# print(\"End of temp sensors\")\n# print(df.columns)\ntransposed = df.set_index(\"Unnamed: 0\").T\ntransposed = transposed.reset_index()\ntransposed.insert(1, \"Room Number\", room_nums, True)\ntransposed.insert(2, \"CO2 Sensor?\", is_co2, True)\n# transposed.to_csv(\"new_tester.csv\")\n\n# print(\"CO2\" in \"Cafe UV01 ZN08 Q CO2\")\n\n# my_fake_df = pd.DataFrame()\n# my_fake_df.insert(0, \"Room Number\", room_nums, True)\n# my_fake_df.to_csv(\"new_tester.csv\")\n\ntransposed = transposed.sort_values(\"CO2 Sensor?\")\ntransposed = transposed.sort_values(\"Room Number\")\ntransposed = transposed.reset_index().drop(\"index\", axis=1).drop(\"level_0\", axis=1)\n\n# Final stage of modifying data\npivot = transposed.melt(id_vars=[\"Room Number\", \"CO2 Sensor?\"], var_name=\"Timestamp\", value_name=\"Value\")\npivot = pivot.set_index([\"Room Number\", \"Timestamp\"])\npivot = pd.pivot_table(pivot, index=[\"Room Number\", \"Timestamp\"], values=\"Value\", columns=[\"CO2 Sensor?\"], aggfunc='first')\npivot.columns = [\"Temperature\", \"CO2\"]\ntemp_units = [\"deg F\"]*len(pivot.axes[0])\nco2_units = [\"ppm\"]*len(pivot.axes[0])\npivot[\"Temp Units\"] = temp_units\npivot[\"CO2 Units\"] = co2_units\npivot = pivot.reset_index()\npivot = pivot.rename(columns={\"Room Number\": \"Room #\", \"Temp Units\": \"Temp. Units\"})\npivot = pivot.set_index(\"Room #\")\n\n\ndef custom_conv(x):\n if type(x) != float or not np.isnan(x):\n return int(round(float(x)))\n return x\n\n\npivot[\"Temperature\"] = pivot[\"Temperature\"].apply(custom_conv)\npivot[\"CO2\"] = pivot[\"CO2\"].apply(custom_conv) # FOUND IT !!\n\nSERVER_PATH = '' # '/media/ea/Data/Students/jade/buildingEnergyApi/'\nPATH = 'my_file'\n\nengine = sqlalchemy.create_engine('sqlite:///' + SERVER_PATH + PATH)\nconn = sqlite3.connect(SERVER_PATH + PATH)\n# new_df = pd.read_sql(\"MetasysLog\", engine)\npivot.to_csv(SERVER_PATH + \"tester.csv\")\npivot.to_sql(\"MetasysLog\", conn, if_exists='append') # actual permanent database\npivot.to_sql(\"TempAndCO2LogDaily\", conn, if_exists='append') # copy used for tasks 3 and 4 in this branch, must be cleared out every week\n\ntest2 = pd.read_sql(\"TempAndCO2Log\", engine)\ntest2.to_csv(SERVER_PATH + \"tester.csv\")\n\n","sub_path":"convert_metasys_data.py","file_name":"convert_metasys_data.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"146754905","text":"class Metadata:\n ''' \n Metadata:\n Contain everything about an image\n - Mask\n - Image\n - Description\n '''\n def __init__(self, meta, dataset, img, mask):\n self.meta = meta\n self.dataset = dataset\n self.img = img\n self.type = meta[\"clinical\"][\"benign_malignant\"] # malignant , benign\n self.mask = mask\n","sub_path":"Mask/meta/meta_data.py","file_name":"meta_data.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"521542883","text":"import tensorflow as tf\nfrom tensorflow.python.platform import gfile\nfrom PIL import Image\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nimport cv2\nimport tensorflow as tf\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\nimport sys\ndi = {}\n\nwith tf.Graph().as_default() as graph: # Set default graph as graph\n with tf.Session() as sess:\n # Load the graph in graph_def\n print(\"load graph\")\n\n # We load the protobuf file from the disk and parse it to retrive the unserialized graph_drf\n with gfile.FastGFile(\"frozen_inference_graph.pb\",'rb') as f:\n # Set FCN graph to the default graph\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n\n # Import a graph_def into the current default Graph (In this case, the weights are (typically) embedded in the graph)\n\n tf.import_graph_def(\n graph_def,\n input_map=None,\n return_elements=None,\n name=\"\",\n op_dict=None,\n producer_op_list=None\n ) \n \n \n image_tensor = graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = graph.get_tensor_by_name('detection_scores:0')\n detection_classes = graph.get_tensor_by_name('detection_classes:0')\n num_detections = graph.get_tensor_by_name('num_detections:0')\n \n image = cv2.imread(sys.argv[1])\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_expanded = np.expand_dims(image_rgb, axis=0)\n \n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded})\n\nlabel_map = label_map_util.load_labelmap(\"labelmap.pbtxt\")\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=330, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\nvis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4,\n min_score_thresh=0.30)\n\n# All the results have been drawn on image. Now display the image.\ncv2.imshow(\"detected\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"501762646","text":"import unittest\nimport os\nfrom appium import webdriver\n\nSAUCE_USERNAME = os.environ['SAUCE_USERNAME']\nSAUCE_ACCESS_KEY = os.environ['SAUCE_ACCESS_KEY']\ncaps = {}\n#appium_version = {}\n#appium_version['appium-url'] = 'https://www.dropbox.com/s/d94pw66c0z9tkd9/appium-vFakeVersion200.tar.bz2?dl=1'\n#caps['appium_version'] = appium_version\ncaps['deviceName'] = \"iPhone Simulator\"\ncaps['browserName'] = \"Safari\"\ncaps['url'] = \"http://www.google.com\"\ncaps['platformVersion'] = \"12.4\"\ncaps['platformName'] = \"iOS\"\ncaps['appiumVersion'] = \"1.16.0\"\n#host = \"http://admin:147a1148-a221-4e3b-a1df-987fd51c5eea@ondemand.staging.saucelabs.net/wd/hub\" # Staging\n#host = (\"http://%s:%s@ondemand.eu-central-1.saucelabs.com/wd/hub\" % (SAUCE_USERNAME, SAUCE_ACCESS_KEY)) # Frankfurt\n#host = (\"http://%s:%s@ondemand.saucelabs.com/wd/hub\" % (SAUCE_USERNAME, SAUCE_ACCESS_KEY)) # Prod\n#host = 'http://admin:0e779f56-385a-41be-a562-6f6908bf5acf@localhost:4444/wd/hub'\n#host = \"http://admin:0e779f56-385a-41be-a562-6f6908bf5acf@ondemand.dpgrahamwdapreb146146.ktb.blocks.saucelabs.net/wd/hub\" # Cluster\n#host = \"http://localhost:4723/wd/hub\"\nhost = \"http://admin:0e779f56-385a-41be-a562-6f6908bf5acf@ondemand.10.254.246.125.xip.io:4444/wd/hub\"\ndriver = webdriver.Remote(host, caps)\nprint('Getting google')\ndriver.get('http://www.google.com')\nresult = driver.page_source\nprint(result)\nprint('Got it')\n\ndriver.quit()","sub_path":"scripts/safari-ios.py","file_name":"safari-ios.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"371883401","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport sys\nimport csv\nimport json\nimport hashlib\nimport optparse\nimport collections\n\ndef parse_groups(g_path):\n groups = collections.defaultdict(set)\n cids = set()\n with open(g_path) as gf:\n for line in gf:\n pline = line.strip().split(',')\n cid = pline[0]\n label = pline[1]\n cids.add(cid)\n groups[label].add(cid)\n return (groups, cids)\n\ndef parse_dataset(ds_path, cids):\n data = {}\n with open(ds_path) as csv_file:\n csv_dict = csv.DictReader(csv_file, delimiter='\\t')\n for elem in csv_dict:\n # Take pre-clustered connections only\n eid = '{}:{}'.format(elem['sha2'], elem['conn_uid'])\n if eid in cids:\n data.update({eid: {'enc_data_size': int(elem['enc_data_size']),\n 'avclass_family': elem['avclass_family']\n }})\n return data\n\ndef empty_content(data):\n # Return True if all contents are empty\n # TODO change all empty for a ratio\n empty = True\n for id in data:\n empty &= data[id]['enc_data_size'] == 0\n return empty\n\ndef merge_clusters(merge_these, groups):\n for c, g_list in merge_these.items():\n ids = []\n last_g = ''\n for g in g_list:\n ids.extend(groups.pop(g))\n last_g += '{}_'.format(g)\n groups[last_g] = ids\n return groups\n\ndef output_as_json(clusters, classes):\n res = []\n for label, cids in sorted(clusters.items()):\n idclasses = collections.Counter(classes[i] for i in cids).most_common()\n current = {\n 'label': int(label),\n 'ids': cids,\n 'avclass': idclasses,\n 'size': len(cids),\n }\n res.append(current)\n return res\n\ndef main(options, args):\n # Parse groups by TLS FINGERPRINT dataset and original dataset\n (groups, gids) = parse_groups(options.groups)\n (clusters, cids) = parse_groups(options.clusters)\n data = parse_dataset(args[0], cids)\n\n # Searching for groups with empty content to skip\n omit = set()\n for g, gids in groups.items():\n empty = empty_content({id:data[id] for id in gids})\n if empty: omit.add(g)\n\n if options.debug:\n print('Pre-processing {} vectors.'.format(len(data)))\n\n merge_these = collections.defaultdict(list)\n for c, cids in clusters.items():\n for g in [x for x in groups if x not in omit]:\n if cids.issuperset(groups[g]):\n merge_these[c].append(g)\n omit.add(g)\n if options.debug:\n print('MERGE_THESE: {}'.format(merge_these))\n\n c_merged = merge_clusters(merge_these, groups)\n\n output_file = 'tls_all_clustering_grouping_pay.tsv'\n output_json = 'tls_all_clustering_grouping_pay.json'\n if options.output:\n output_file = '{}.tsv'.format(options.output)\n output_json = '{}.json'.format(options.output)\n\n labels = []\n with open(output_file, 'w') as of:\n classes = {}\n clusters = collections.defaultdict(list)\n for new_label, (label, ids) in enumerate(c_merged.items()):\n clusters[new_label] = [id for id in sorted(ids)]\n for id in ids:\n print('{},{},{}'.format(id, new_label, label), file=of)\n labels.append(new_label)\n classes[id] = data[id]['avclass_family']\n\n with open(output_json, 'w') as oj:\n json_out = output_as_json(clusters, classes)\n json.dump(json_out, oj)\n\n if options.debug:\n print('Clusters in the most common(20) list:')\n for label, total in collections.Counter(labels).most_common(20):\n print('\\t{}:\\t{}'.format(label, total))\n\n if options.debug:\n print('Grouping by content, done.')\n\n\nif __name__ == '__main__':\n parser = optparse.OptionParser(\n usage=\"Usage: %prog [options] dataset_filename\",\n version=\"%prog 1.0\")\n parser.add_option(\n '-g', '--groups', action='store', dest='groups',\n help='Path to file containing the groups by TLS FINGERPRINT')\n parser.add_option(\n '-c', '--clusters', action='store', dest='clusters',\n help='Path to file with the clusters')\n parser.add_option(\n '-o', '--output', action='store', dest='output',\n help='Name for the output file with the results')\n parser.add_option(\n '-D', '--debug', action='store_true', dest='debug',\n help='Print debug messages', default=False)\n\n options, args = parser.parse_args()\n\n if len(args) != 1 or not os.path.isfile(args[0]):\n parser.error(\"Dataset not found. Aborting...\")\n if not os.path.isfile(options.groups):\n parser.error(\"Groups file not found. Aborting...\")\n if not os.path.isfile(options.clusters):\n parser.error(\"Clusters file not found. Aborting...\")\n\n main(options, args)\n","sub_path":"clustering/fishdbc_notebook/merge_clusters.py","file_name":"merge_clusters.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"572934935","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 23 13:58:54 2019\n\n@author: bclas\n\"\"\"\n\"\"\"\nwrite a function that exchanges the first and last items in a list\n\nwrite a function that removes every other item in the list\n\nwrite a function that removes the first 4 and last 4 items \nand then every other item in the sequence\n\nwrite a function that reverses the items\n\nwrite a function that chops the sequence into thirds and puts the last\nthird first, and the first in the middle and the middle at the end. \n\"\"\"\nlistofpets = [\"lizard\",\"dog\",\"cat\",\"bird\",\"snake\",\"spider\",\"ferret\",\"monkey\",\"hippo\",\"rock\"]\ntupleofnum = 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\n\ndef exchange_first_last(seq):\n \"\"\"This function swaps the first and last items in a sequence\"\"\"\n copy_list = seq\n copy_list.append(copy_list.pop(0)) \n copy_list.insert(0, copy_list.pop(-2))\n return copy_list\n\nlistofpets = [\"lizard\",\"dog\",\"cat\",\"bird\",\"snake\",\"spider\",\"ferret\",\"monkey\",\"hippo\",\"rock\"]\nassert exchange_first_last(listofpets) == [\"rock\",\"dog\",\"cat\",\"bird\",\"snake\",\"spider\",\"ferret\",\"monkey\",\"hippo\",\"lizard\"]\n\ndef remove_every_other(seq):\n \"\"\"This function removes every other item in a sequence\"\"\"\n every_other = seq[0::2]\n return every_other\n\nlistofpets = [\"lizard\",\"dog\",\"cat\",\"bird\",\"snake\",\"spider\",\"ferret\",\"monkey\",\"hippo\",\"rock\"] \nassert remove_every_other(listofpets) == ['lizard', 'cat', 'snake', 'ferret', 'hippo']\n\ndef remove_first_four_last_four_every_other(seq):\n \"\"\"This function removes the first four items in a sequence, the\n last four items in a sequence, and than every other item of what remains\"\"\"\n copy_list = seq\n return copy_list[4:-4:2]\n\nlistofpets = [\"lizard\",\"dog\",\"cat\",\"bird\",\"snake\",\"spider\",\"ferret\",\"monkey\",\"hippo\",\"rock\"] \nassert remove_first_four_last_four_every_other(listofpets) == [\"snake\"]\n \ndef reverse_order(seq):\n \"\"\"This function flips the sequence so all items are backwards\"\"\"\n copy_list = seq\n return copy_list[::-1]\n\nlistofpets = [\"lizard\",\"dog\",\"cat\",\"bird\",\"snake\",\"spider\",\"ferret\",\"monkey\",\"hippo\",\"rock\"] \nassert reverse_order(listofpets) == ['rock','hippo','monkey','ferret','spider','snake','bird','cat','dog','lizard'] \n\ndef chop_reorder(seq):\n \"\"\"This function chops the sequence into thirds and reorders them\"\"\"\n seq_length = len(seq)\n x = int((seq_length) / 3)\n original_first = seq[:x:]\n original_second = seq[x:-x:]\n original_third = seq[-x::]\n return original_third + original_first + original_second\n \nassert chop_reorder(tupleofnum) == (15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n","sub_path":"students/ben_carter/lesson03/slicinglab.py","file_name":"slicinglab.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514847477","text":"\"\"\" Distributed toolkit: Extends `multiprocessing.managers`_ to\nsupport distributed computing.\n\"\"\"\nfrom getpass import getuser\nfrom multiprocessing import Process, connection, current_process, util\nfrom multiprocessing.managers import (BaseManager, BaseProxy, Server, State,\n dispatch)\nimport re\nfrom socket import getfqdn, gethostbyname\nfrom threading import Thread\n\n\ndef fqhost(*args):\n \"\"\" Gets fully qualified host.\"\"\"\n return gethostbyname(getfqdn(*args))\n\n\ndef fqaddr(address):\n \"\"\" Gets fully qualified address.\"\"\"\n return (fqhost(address[0]), address[1])\n\n\ndef parse_address(s):\n \"\"\" Convert address string `user@host:port` to (user, host, port)\n tuple.\"\"\"\n m = re.match(\"(?:(.*)@)?([\\da-zA-Z\\-\\.]{1,255}):(\\d{1,5})\", s)\n user, host, port = m.groups()\n host = fqhost(host)\n if port is not None:\n port = int(port)\n return (user, host, port)\n\n\nclass ConnectionServer(Server):\n \"\"\" Subclass of multiprocessing.managers.Server that allows for\n connection monitoring.\"\"\"\n\n def __init__(self, registry, address, authkey, serializer, conn_writer):\n super(ConnectionServer, self).__init__(registry, fqaddr(address),\n authkey, serializer)\n self.conn_writer = conn_writer\n\n def __del__(self):\n self.conn_writer.close()\n\n @property\n def address(self):\n return fqaddr(self._address)\n\n @address.setter\n def address(self, address):\n self._address = fqaddr(address)\n\n def _notify_connection(self):\n \"\"\" Notifies other end of self.conn_writer Pipe about the last\n accepted connection.\"\"\"\n self.conn_writer.send(self.listener._listener._last_accepted)\n\n def serve_forever(self):\n \"\"\" Run the server forever.\"\"\"\n current_process()._manager_server = self\n try:\n try:\n while True:\n try:\n c = self.listener.accept()\n except (OSError, IOError):\n continue\n self._notify_connection()\n thread = Thread(target=self.handle_request, args=(c,))\n thread.daemon = False\n thread.start()\n except (KeyboardInterrupt, SystemExit):\n pass\n finally:\n self.stop = 999\n self.listener.close()\n\n\nclass ConnectionManager(BaseManager):\n \"\"\" Subclass of multiprocessing.BaseManager that provides more\n access to the underlying network connections and also defines\n several information-gathering methods.\"\"\"\n\n _Server = ConnectionServer\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop(\"user\", getuser())\n super(ConnectionManager, self).__init__(*args, **kwargs)\n\n @property\n def address(self):\n return fqaddr(self._address)\n\n @address.setter\n def address(self, address):\n self._address = fqaddr(address)\n\n @property\n def authkey(self):\n return self._authkey\n\n @authkey.setter\n def authkey(self, authkey):\n self._authkey = authkey\n\n def create_conn_pipe(self):\n \"\"\" Creates a pipe for communicating with server about\n accepted connections. Returns the writer.\"\"\"\n conn_reader, conn_writer = connection.Pipe(duplex=False)\n return conn_reader, conn_writer\n\n def get_server(self):\n \"\"\" Return server object with serve_forever() method and\n address attribute.\"\"\"\n assert self._state.value == State.INITIAL\n conn_writer = self.create_conn_pipe(self)\n return self._Server(self._registry, self.address,\n self.authkey, self._serializer, conn_writer)\n\n def start(self, initializer=None, initargs=()):\n \"\"\" Spawn a server process for this manager object.\"\"\"\n assert self._state.value == State.INITIAL\n\n if initializer is not None and not hasattr(initializer, \"__call__\"):\n raise TypeError(\"Initializer must be a callable.\")\n\n # pipe over which we will retrieve address of server\n reader, writer = connection.Pipe(duplex=False)\n # Pipe over which we will communicate accepted connections.\n conn_reader, conn_writer = self.create_conn_pipe()\n\n # spawn process which runs a server\n self._process = Process(\n target=type(self)._run_server,\n args=(self._registry, fqaddr(self._address), self._authkey,\n self._serializer, writer, conn_writer, initializer,\n initargs))\n ident = \":\".join(str(i) for i in self._process._identity)\n self._process.name = type(self).__name__ + \"-\" + ident\n self._process.start()\n\n # get address of server\n writer.close()\n self._address = fqaddr(reader.recv())\n reader.close()\n # Start connection monitor.\n self.start_conn_monitor(conn_reader)\n # Register a finalizer.\n self._state.value = State.STARTED\n self.shutdown = util.Finalize(\n self, type(self)._finalize_manager,\n args=(self._process, self._address, self._authkey,\n self._state, (conn_writer,), self._Client),\n exitpriority=0)\n\n @classmethod\n def _run_server(cls, registry, address, authkey, serializer, writer,\n conn_writer, initializer=None, initargs=()):\n \"\"\" Create a server, report its address and run it.\"\"\"\n if initializer is not None:\n initializer(*initargs)\n # Create server.\n server = cls._Server(registry, fqaddr(address), authkey, serializer,\n conn_writer)\n # Inform parent process of the server's address.\n writer.send(server.address)\n writer.close()\n # Run the manager.\n util.info(\"Server running at {}:{}.\".format(*server.address))\n server.serve_forever()\n\n @staticmethod\n def _finalize_manager(process, address, authkey, state, conns, _Client):\n \"\"\" Shutdown the manager process; will be registered as a\n finalizer.\"\"\"\n if process.is_alive():\n util.info(\"Sending shutdown message to manager.\")\n try:\n conn = _Client(fqaddr(address), authkey=authkey)\n try:\n dispatch(conn, None, \"shutdown\")\n finally:\n conn.close()\n except Exception:\n pass\n for conn in conns:\n conn.close()\n process.join(timeout=0.2)\n if process.is_alive():\n util.info(\"Manager still alive.\")\n if hasattr(process, \"terminate\"):\n util.info(\"Trying to `terminate()` manager process.\")\n process.terminate()\n process.join(timeout=0.1)\n if process.is_alive():\n util.info(\"Manager still alive after terminate.\")\n state.value = State.SHUTDOWN\n try:\n del BaseProxy._address_to_local[fqaddr(address)]\n except KeyError:\n pass\n\n @staticmethod\n def _conn_monitor(conn_reader):\n \"\"\" Runs connection-monitoring loop.\"\"\"\n util.debug(\"Connection monitor started.\")\n loop = True\n while loop:\n conn_reader.poll()\n try:\n address = fqaddr(conn_reader.recv())\n except EOFError:\n loop = False\n else:\n util.debug(\"\\tAccepted connection from: {}:{}.\".format(\n *address))\n conn_reader.close()\n util.debug(\"Connection monitor ended.\")\n\n def start_conn_monitor(self, conn_reader):\n \"\"\" Starts thread that monitors for incoming connections.\"\"\"\n thread = Thread(target=self._conn_monitor, name=\"conn_monitor\",\n args=(conn_reader,))\n thread.start()\n\n @property\n def host(self):\n \"\"\" Gets host.\"\"\"\n host = fqhost()\n return host\n\n @property\n def info(self):\n \"\"\" Get the current host and process pid.\"\"\"\n return self.host, current_process().pid\n","sub_path":"distributed/core/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":8218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405300926","text":"#!/usr/bin/env python\nimport codecs\nimport json\n\nfrom tokenizer import Tokenizer\nfrom storage import Storage\n\nclass Grapher(object):\n def __init__(self,tokenizer=None,load=None):\n if load is not None:\n with open(load,'r') as store:\n self.graph = json.load(store) \n return\n \n self.feed(tokenizer)\n\n def feed(self,tokenizer):\n old_graph = self.graph\n self.graph = graph = {}\n prev = None\n count = 0\n for char in tokenizer.feed():\n if prev is not None:\n # backtrace\n self._connect(prev,char)\n prev = char\n \n # if changed?\n if old_graph is None:\n self.changed = True\n return\n\n for key,words in old_graph.items():\n new_words = self.graph.get(key,None)\n if new_words is None or len(new_words) != len(words):\n self.changed = True\n return \n # size equal,check detail\n mark = {}\n for word in words.keys():\n if word not in new_words:\n self.changed = True\n return \n self.changed = False\n return \n\n def _node(self,char):\n node = self.graph.get(char,None) \n if node is None:\n self.graph[char] = node = {}\n return node\n \n def _connect(self,prev,char):\n prev_node = self._node(prev)\n prev_node[char] = prev_node.get(char,0) + 1\n return\n \n def _rescale(self,frequencyes,penaty=1):\n max_frequency = 0\n min_frequency = 0\n # find max and min\n for key,frequency in frequencyes.items():\n if frequency > max_frequency:\n max_frequency = frequency\n if frequency < min_frequency:\n min_frequency = frequency\n \n # range\n frequency_range = max_frequency - min_frequency \n scaled = {}\n for key,frequency in frequencyes.items():\n scaled[key] = float(frequency - 1) / float(frequency_range) \n \n return scaled \n\n def shrink(self,flip=0.5):\n shrink_graph = {}\n for key,words in self.graph.items():\n # resale\n rescaled = self._rescale(words)\n for word,frequency in rescaled.items():\n if frequency < flip:\n del rescaled[word]\n \n # if kick off?\n if len(rescaled) > 0:\n # keep,and copy back\n shirnk_words = {}\n for word in rescaled.keys():\n shirnk_words[word] = words[word]\n \n # add to shrink graph\n shrink_graph[key] = shirnk_words\n return shrink_graph\n\n def json(self,name,graph):\n with codecs.open(name,'w','utf8') as store:\n json.dump(graph,store,ensure_ascii=False,indent=2)\n \n def store(self,name,graph):\n with Storage(name,'w') as store:\n for key,words in graph.items():\n for second,frequency in words.items():\n store.write('%s%s\\t%s\\n' % (key,second,frequency))\n\n","sub_path":"grapher.py","file_name":"grapher.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"478211915","text":"class Solution:\n def getSkyline(self, LRH):\n heap = []\n i, n = 0, len(LRH)\n liveHR = []\n while i < n or liveHR:\n if not liveHR or i < n and LRH[i][0] <= -liveHR[0][1]:\n x = LRH[i][0]\n while i < n and LRH[i][0] == x:\n heapq.heappush(liveHR, (-LRH[i][2], -LRH[i][1]))\n i += 1\n else:\n x = -liveHR[0][1]\n while liveHR and -liveHR[0][1] <= x:\n heapq.heappop(liveHR)\n height = len(liveHR) and -liveHR[0][0]\n if not heap or height != heap[-1][1]:\n heap += [x, height],\n return heap\n","sub_path":"leetcode/The Skyline Problem.py","file_name":"The Skyline Problem.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"608054483","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 18 21:15:24 2018\n\n@author: Wenqi Zheng\n\"\"\"\nimport authAndQury\nimport numpy as np\nimport argparse\nimport IO\nimport csv\n\ndef append(oriList,addList):\n for i in addList:\n oriList.append(i)\n return oriList\n \ndef searchWithOffset(maxOffset):\n DEFAULT_TERM = 'pizza'\n DEFAULT_LOCATION = 'Boston'\n DEFAULT_PRICE='1'\n parser = argparse.ArgumentParser()\n parser.add_argument('-q', '--term', dest='term', default=DEFAULT_TERM,type=str, help='Search term (default: %(default)s)')\n parser.add_argument('-l', '--location', dest='location',default=DEFAULT_LOCATION, type=str,help='Search location (default: %(default)s)')\n parser.add_argument('-p', '--price', dest='price',default=DEFAULT_PRICE, type=str,help='Search price (default: %(default)s)')\n parser.add_argument('-o', '--offset', dest='offset', type=int,help='Search offset (default: %(default)s)') \n location=[]\n ratings=[]\n idList=[]\n priceList=[]\n for i in range(maxOffset):\n input_values = parser.parse_args()\n input_values.offset=i*50\n locationAdd,ratingsAdd,idListAdd,priceListAdd=searchParams(input_values)\n append(location,locationAdd)\n append(ratings,ratingsAdd)\n append(idList,idListAdd)\n append(priceList,priceListAdd)\n return location,ratings,idList,priceList\n \ndef searchParams(input_values):\n response=authAndQury.searchInputVal(input_values)\n location=[]\n ratings=[]\n businessList=[]\n priceList=[]\n priceParser=['$','$$','$$$','$$$$']\n for i in response['businesses']:\n if('price' not in i):\n continue;\n location.append([i['coordinates']['longitude'],i['coordinates']['latitude']])\n businessList.append(i['id'])\n ratings.append(i['rating'])\n for j in range(4):\n if(i['price']==priceParser[j]):\n priceList.append(j)\n locationArray = np.array(location)\n return locationArray,ratings,businessList,priceList\n\ndef searchReviews(idList):\n #writes=[]\n path=\"../LDA/reviews.csv\"\n myFile = open(path, 'w')\n \"\"\"\n Write data to a CSV file path\n \"\"\"\n csv_writer = csv.writer(myFile,delimiter=',')\n #print(\"idList\",len(idList))\n with myFile:\n for i in idList:\n reviews= authAndQury.searchReviews(i)[\"reviews\"]\n for j in reviews:\n #print(\"************\",j[\"text\"])\n #writes.append(j[\"text\"])\n csv_writer.writerow([j[\"text\"]])\n return reviews","sub_path":"Clustering/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561416345","text":"import csv\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.core.urlresolvers import reverse\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom assessment.models import * \nfrom assessment.core import survey_rollup, add_answers, add_risks, calc_base_risk, calc_company_risk, is_owner, write_answers, is_in_range\nfrom assessment.responses import reportBuilder, companyBrief\nfrom assessment.forms import CompanyForm, UserForm\n\n\n\ndef assess_main(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n #profile = request.user.get_profile() #get_profile() deprecated with Django 1.5\n profile = User_profile.objects.get(user=request.user)\n \n #Get any companies attached to user\n profile.companies = Company.objects.filter(user_profile_id=profile.id)\n \n #If user is sponsored get sponsor's name\n if profile.sponsor_id:\n profile.sponsor_name = User.objects.get(id=profile.sponsor_id).get_full_name()\n else:\n profile.sponsor_name = ''\n \n #If user is a sponsor get list of clients and companies attached to clients\n if profile.is_sponsor == True:\n profile.clients = User_profile.objects.filter(sponsor_id=request.user.id)\n for client in profile.clients:\n client.companies = Company.objects.filter(user_profile_id=client.id)\n else:\n profile.clients = False\n \n #If user is Staff get list of all users and their attached companies that\n #neither belong to the user or a client of the user\n if request.user.is_staff == True:\n query = User_profile.objects.exclude(id=request.user.id)\n profile.other_users = query.exclude(sponsor_id=request.user.id).order_by('sponsor_id')\n #profile.other_users = User_profile.objects.exclude(Q(sponsor_id=profile.id) | Q(sponsor_id__isnull=True)).order_by('sponsor_id')\n for other_user in profile.other_users:\n if other_user.sponsor_id:\n other_user.sponsor_name = User.objects.get(id=other_user.sponsor_id).get_full_name()\n else:\n other_user.sponsor_name = 'Not Sponsored'\n other_user.companies = Company.objects.filter(user_profile_id=other_user.id)\n \n #For padding \"Your Companies\" table out to full rows\n tbl_padding = range(3 - (len(profile.companies) % 3))\n \n #IF user is a sponsor or is allowed multiple companies\n if profile.is_sponsor == True or profile.multi_company == True:\n return render_to_response('assess_main.html', {'profile': profile, 'tbl_padding': tbl_padding},context_instance=RequestContext(request))\n elif not profile.companies: #IF not a sponsor, not multi company and no company attached force add a company\n return HttpResponseRedirect(reverse('assessment.views.company_add'))\n else: #A single company user that has a company attached - send to company main\n return HttpResponseRedirect('/company/%s' % profile.companies[0].id)\n\n\ndef assess_listing(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n survey = survey_rollup(1) #Survey 1\n if not request.user.is_staff:\n #Kick user out by logging off\n return HttpResponseRedirect(reverse('views.logout_view'))\n return render_to_response('assess_listing.html',{'survey': survey},context_instance=RequestContext(request))\n \n \ndef co_main(request, company_id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n \n try:\n company = Company.objects.get(id=company_id)\n except Company.DoesNotExist:\n raise Http404\n \n if not is_owner(company_id, request.user.id):\n #and not request.user.is_staff:\n return HttpResponseRedirect(reverse('views.logout_view'))\n \n if company:\n co_survey = survey_rollup(company.survey_id)\n add_answers(co_survey, company.id)\n add_risks(co_survey, company.growth_stage_id)\n calc_base_risk(co_survey)\n calc_company_risk(co_survey)\n\n return render_to_response('co_main.html',\n {'company': company, 'co_survey': co_survey},\n context_instance=RequestContext(request))\n\n\ndef company_add(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n \n if request.method == 'POST': #Check for \"Cancel\" button\n submit = request.POST.get('cancel', None)\n if submit:\n return HttpResponseRedirect('/assess/')\n\n form = CompanyForm(request.POST or None)\n if form.is_valid():\n new_co = form.save(commit=False)\n new_co.survey = Survey.objects.get(pk=1) #always survey 1 until there are more surveys\n new_co.user_profile = request.user.get_profile()\n new_co.save()\n return HttpResponseRedirect('/company/%s' % new_co.id)\n\n profile = request.user.get_profile()\n page_text = {'title': 'Add Company'}\n if profile.is_sponsor == True or profile.multi_company == True:\n page_text['headline'] = 'Add New Company'\n page_text['subhead'] = ''\n else:\n page_text['headline'] = 'Add Company Information'\n page_text['subhead'] = 'Please add your company information to proceed with the profile.'\n return render_to_response('company_add.html', {'page_text': page_text, 'form': form},\n context_instance=RequestContext(request))\n \ndef company_edit(request, co_id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n if not is_owner(co_id, request.user.id):\n return HttpResponseRedirect(reverse('views.logout_view'))\n\n company = get_object_or_404(Company, pk=co_id)\n\n if request.method == 'POST': #Check for \"Cancel\" button\n submit = request.POST.get('cancel', None)\n if submit:\n return HttpResponseRedirect('/company/%s' % company.id)\n\n form = CompanyForm(request.POST or None, instance = company)\n if form.is_valid():\n company = form.save()\n company.save()\n return HttpResponseRedirect('/company/%s' % company.id)\n #import pdb; pdb.set_trace()\n return render_to_response('company_edit.html',\n {'title': 'Edit Company', 'form': form, 'company': company},\n context_instance=RequestContext(request))\n\ndef section(request, sect_id, co_id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n if not is_owner(co_id, request.user.id):\n return HttpResponseRedirect(reverse('views.logout_view'))\n company = Company.objects.get(id=co_id)\n co_survey = survey_rollup(company.survey_id, sect_id)\n add_answers(co_survey, company.id)\n \n if request.method == 'POST':\n submit = request.POST.get('cancel', None) #Check for cancel button\n if submit:\n return HttpResponseRedirect('/company/%s' % company.id)\n errors = False\n for sect in co_survey.section:\n for subsect in sect.subsection:\n if subsect.multi_answer == False:\n selection = request.POST.get('subsect_' + str(subsect.id), None)\n if not selection:\n errors = True\n subsect.error_text = 'You must select an option'\n else:\n for quest in subsect.question:\n quest.answer.answer_yn = True if selection == str(quest.id) else False\n #quest.answer.save()\n if subsect.multi_answer == True:\n for quest in subsect.question:\n if quest.input_type_id == 'bool':\n response = str(quest.id) in request.POST.getlist('subsect_' + str(subsect.id))\n quest.answer.answer_yn = response\n elif quest.input_type_id == 'num':\n response = request.POST.get('question_' + str(quest.id), 0)\n response = response.replace(',','')\n quest.answer.answer_numeric = int(float(response)) if len(response) > 0 else 0\n elif quest.input_type_id[:3] == 'txt':\n response = request.POST.get('question_' + str(quest.id), '')\n quest.answer.answer_text = response\n else:\n raise Exception(\"No handler for input_type_id '\" + quest.input_type_id + \"' in views.section.\")\n\n \n if not errors:\n write_answers(co_survey)\n return HttpResponseRedirect('/company/%s' % company.id)\n \n return render_to_response('answer_edit.html',\n {'title': 'Survey Section',\n 'co_survey': co_survey,\n 'company': company},\n context_instance=RequestContext(request))\n \n \ndef section_list(request, survey=1):\n if not request.user.is_staff:\n return HttpResponseRedirect(reverse('views.logout_view'))\n \n survey = Survey.objects.get(id=survey)\n sections = Section.objects.filter(survey_id=survey).order_by('order_in_survey')\n return render_to_response('section_list.html',\n {'survey': survey,\n 'sections': sections},\n context_instance=RequestContext(request))\n \n\ndef section_risk_edit(request, survey_id, sect_id):\n if not request.user.is_staff:\n return HttpResponseRedirect(reverse('views.logout_view'))\n\n #Allowable range for risk values between risk_range and -risk_range\n risk_range = 100\n survey = survey_rollup(survey_id, sect_id)\n \n stages = Growth_stage.objects.all().order_by('id')\n for stage in stages:\n stage.inherent = 'inherent_risk_' + str(stage.id)\n stage.max = 'max_risk_' + str(stage.id)\n stage.risk = 'risk_' + str(stage.id)\n stage.qerror = 'qerror_' + str(stage.id)\n risk_attr_name = 'risk_' + str(stage.id)\n add_risks(survey, stage.id, risk_attr_name)\n \n page_error = False\n\n if request.method == 'POST':\n submit = request.POST.get('cancel', None) #Check for cancel button\n if submit:\n return HttpResponseRedirect('/sectionlist/')\n errors = False\n \n #Update Section Inherent and Maximum risks from POST\n for sect in survey.section:\n for stage in stages:\n x = request.POST.get(stage.inherent, 0)\n setattr(sect, stage.inherent, x)\n #Set css class to highlight error\n if not is_in_range(x, risk_range):\n errors = True\n stage.inherent_error = 'errorHilite'\n x = request.POST.get(stage.max, 0)\n setattr(sect, stage.max, x)\n #Set css class to highlight error\n if not is_in_range(x, risk_range):\n errors = True\n stage.max_error = 'errorHilite'\n\n #Update Question risks for each stage from POST\n for subsect in sect.subsection:\n for quest in subsect.question:\n for stage in stages:\n x = request.POST.get('q_' + str(quest.id) + '_' + stage.risk, 0)\n setattr(quest, stage.risk, x)\n #Set css class to highlight error\n if not is_in_range(x, risk_range):\n errors = True\n setattr(quest, stage.qerror, 'errorHilite')\n \n if not errors:\n #Write Section Inherent and Maximum risks to database \n for sect in survey.section:\n for stage in stages:\n sect_risk, created = Sect_risk.objects.get_or_create(section_id=sect.id, growth_stage_id=stage.id)\n sect_risk.inherent_risk = getattr(sect, stage.inherent)\n sect_risk.max_risk = getattr(sect, stage.max)\n sect_risk.save()\n #Write Risk Factors for each question/growth stage combination back to database\n for subsect in sect.subsection:\n for quest in subsect.question:\n for stage in stages:\n record, created = Risk_factor.objects.get_or_create(question_id=quest.id, growth_stage_id=stage.id)\n record.risk_factor = getattr(quest, stage.risk)\n record.save()\n return HttpResponseRedirect('/sectionlist/')\n \n \n else:\n page_error = 'All input fields must contain an entry between +100 and -100'\n\n return render_to_response('sect_risk_edit.html',\n {'survey': survey,\n 'stages': stages,\n 'page_error': page_error},\n context_instance=RequestContext(request))\n\ndef co_answers(request, company_id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n \n try:\n company = Company.objects.get(id=company_id)\n except Company.DoesNotExist:\n raise Http404\n \n if not is_owner(company_id, request.user.id):\n return HttpResponseRedirect(reverse('views.logout_view'))\n \n if company:\n co_survey = survey_rollup(company.survey_id)\n add_answers(co_survey, company.id)\n\n return render_to_response('company_answers.html',\n {'company': company, 'co_survey': co_survey},\n context_instance=RequestContext(request))\n\ndef co_answers_csv(request, company_id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n \n try:\n company = Company.objects.get(id=company_id)\n except Company.DoesNotExist:\n raise Http404\n \n if not is_owner(company_id, request.user.id):\n return HttpResponseRedirect(reverse('views.logout_view'))\n\n if company:\n co_survey = survey_rollup(company.survey_id)\n add_answers(co_survey, company.id)\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"somefilenam.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['Survey', 'Section', 'Subsection', 'Question', 'Response'])\n for sect in co_survey.section:\n for subsect in sect.subsection:\n for quest in subsect.question:\n row = ([co_survey.survey_name, sect.sect_name, subsect.subsect_name, quest.question_text])\n if quest.answer.id:\n if quest.input_type_id == 'bool':\n row.append(quest.answer.answer_yn)\n elif quest.input_type_id == 'num':\n row.append(quest.answer.answer_numeric)\n else:\n row.append(quest.answer.answer_text)\n else:\n row.append('No Answer')\n writer.writerow(row)\n \n return response\n\n\ndef user_add(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n \n if request.method == 'POST': #Check for \"Cancel\" button\n submit = request.POST.get('cancel', None)\n if submit:\n return HttpResponseRedirect('/assess/')\n\n form = UserForm(request.POST or None, user=request.user)\n if form.is_valid():\n new_user = form.save()\n new_user.save()\n profile = new_user.get_profile()\n profile.sponsor_id = request.user.id\n profile.save()\n \n #Build email message to user that added the new user\n message_body = 'You have added the following new user to the NCFuture Profile. '\n message_body += 'This user is \"Sponsored\" by you and will be listed on the Profile Main page as one of your clients.\\n\\n'\n message_body += 'NOTE: These credentials have NOT been sent to the new user. It is up to you to forward the appropriate information.\\n\\n'\n message_body += 'Username: ' + new_user.username + '\\n'\n message_body += 'Password: ' + new_user.tmp_password + '\\n'\n message_body += 'Name: ' + new_user.first_name + ' ' + new_user.last_name + '\\n'\n message_body += 'Email: ' + new_user.email + '\\n'\n message_body += 'Title: ' + profile.title +'\\n'\n message_body += 'Company: ' + profile.user_company + '\\n\\n'\n message_body += 'Can have multiple companies: ' + str(profile.multi_company) + '\\n'\n message_body += 'Can sponsor clients: ' + str(profile.is_sponsor) + '\\n\\n'\n message_body += 'New user added by: ' + request.user.get_full_name() + '\\n'\n send_mail('New NCFuture Profile User Added', message_body,\n 'assessment@ncfuture.com', [request.user.email], fail_silently=False)\n if settings.STAFF_NOTIFICATIONS['add_company']:\n send_mail('New NCFuture Profile User Added', message_body,\n 'assessment@ncfuture.com', ['assessment@ncfuture.com'], fail_silently=False)\n \n return HttpResponseRedirect('/assess/')\n \n return render_to_response('user_add.html', {'title': 'Add New Client', 'form': form},\n context_instance=RequestContext(request))\n\n\ndef co_report_full(request, company_id, survey=1):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n \n try:\n company = Company.objects.get(id=company_id)\n except Company.DoesNotExist:\n raise Http404\n \n if not is_owner(company_id, request.user.id):\n return HttpResponseRedirect(reverse('views.logout_view'))\n\n co_report, risk_scores = reportBuilder(company_id, 'full')\n \n return render_to_response('co_report.html', {'company': company, 'report': co_report}, context_instance=RequestContext(request))\n \ndef co_brief(request, company_id, survey=1):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n \n try:\n company = Company.objects.get(id=company_id)\n except Company.DoesNotExist:\n raise Http404\n \n if not is_owner(company_id, request.user.id):\n return HttpResponseRedirect(reverse('views.logout_view'))\n\n co_report, risk_scores = reportBuilder(company_id, 'brief')\n co_brief = companyBrief(co_report)\n \n rpt_title = 'NCFuture Business Brief'\n return render_to_response('co_report_brief.html', {'company': company, 'rpt_title': rpt_title, 'report': co_brief}, context_instance=RequestContext(request))\n","sub_path":"assessment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424024421","text":"from django.shortcuts import render, redirect\nfrom polls.forms import *\nfrom django.http import HttpResponse \n\n\n\n\n\t\n\n\ndef log_in(request):\n\terrors = []\n\tus = User.objects.all()\n\tif request.method == 'POST':\n\t\tform = LoginForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tuser = User.objects.filter(first_name=form.cleaned_data['first_name'])\n\t\t\tprint(user)\n\t\t\tif not user:\n\t\t\t\terrors.append('First name not found.')\n\t\t\t\treturn render(request, 'log_in.html', {'errors': errors, 'form': form, 'us': us})\n\t\t\telse:\n\t\t\t\tuser = user.filter(last_name=form.cleaned_data['last_name'])\n\t\t\t\tif not user:\n\t\t\t\t\terrors.append('Last name is incorrect')\n\t\t\t\t\treturn render(request, 'log_in.html', {'form': form,'errors': errors, 'us': us}) \n\t\t\t\t\n\n\t\t\t\t \n\t\t\t\telse:\n\t\t\t\t\tuser = user.get()\n\t\t\t\t\trequest.session['userID'] = user.id\n\t\t\t\t\trequest.session['username'] = user.first_name + ' ' + user.last_name \n\n\t\t\t\t\tif user.has_voted == True:\n\t\t\t\t\t\terrors.append(user.first_name + ' ' + user.last_name + ' has already voted.')\n\t\t\t\t\t\treturn render(request, 'log_in.html', {'form': form,'errors': errors, 'us': us}) \n\t\t\t\t\telse:\n\t\t\t\t\t\trequest.session.set_expiry(0)\n\t\t\t\t\t\tquestion = Question.objects.all()\t\t\n\t\t\t\t\t\tform = NoteForm()\n\t\t\t\t\t\t\n\t\t\t\t\t\treturn render(request, 'polls.html', {'form': form, 'question': question})\n\telse: \n\t\tform = LoginForm()\n\treturn render(request,'log_in.html', {'form':form, 'us': us.distinct})\n\n\ndef contact(request):\n\treturn render(request, 'contact.html')\t\n\n\ndef poll(request):\n\treturn render(request, 'polls.html')\n\n\ndef results(request):\n\tquestion = Question.objects.filter(id=12)\n\tprint(question)\n\tif request.method == 'POST':\n\t\tif (request.session['username'] == \"test test\"):\n\t\t\tpass\n\t\telse:\t\n\t\t\tuser = User.objects.filter(id=request.session['userID']).first()\n\t\t\tuser.has_voted = True\n\t\t\tuser.save()\n\n\t\t\tform = NoteForm(request.POST, instance=user)\n\t\t\twith open(\"log.txt\", \"a\") as myfile:\n\t\t\t\tmyfile.write(request.session['username'] + '\\n')\n\t\t\tif form.is_valid():\n\t\t\t\tform.save()\n\t\t\tfor key in request.POST:\n\t\t\t\tif key != 'csrfmiddlewaretoken' and key != 'description' :\n\t\t\t\t\tquestion = Question.objects.get(id=int(key))\n\t\t\t\t\ttotal2 = question.total\n\t\t\t\t\ttotal2 = total2 + int(request.POST.get(key))\n\t\t\t\t\tQuestion.objects.filter(id=int(key)).update(total = total2)\n\t\t\t\t\twith open(\"log.txt\", \"a\") as myfile:\n\t\t\t\t\t\tmyfile.write(str(question) + ':\\t' + str(total2) + '\\n')\n\n\t\t\t\t\t\t\n\t\n\tquestion = Question.objects.all()\n\treturn render(request, 'results.html', {'question': question})\n","sub_path":"mysite/mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"334175289","text":"import math\n\nimport cv2\nimport dlib\nimport numpy as np\nfrom sklearn.externals import joblib\n\n\ndef distance(shape, pnt_1, pnt_2):\n a = (shape.part(pnt_2).x - shape.part(pnt_1).x) ** 2\n b = (shape.part(pnt_2).y - shape.part(pnt_1).y) ** 2\n dis = math.sqrt((a + b))\n return dis\n\n\n#\n\ndef detect_faces(detector, img):\n try:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n detections = detector(gray, 1) # Detect the faces in the image\n if len(detections) == 0:\n return None\n return detections\n except Exception as e:\n print(e)\n return None\n\n\ndef extract_features_from_img(predictor, faces, img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n features = []\n for i, face in enumerate(faces): # For all detected face instances individually\n try:\n shape = predictor(gray, face) # Draw Facial Landmarks with the predictor class\n # Calculate average distance between right brows and right eye\n dist_1937 = distance(shape, 19, 37)\n dist_2038 = distance(shape, 20, 38)\n FP01 = (dist_1937 + dist_2038) / 2\n\n # Calculate average distance between left brows and left eye\n dist_2343 = distance(shape, 23, 43)\n dist_2444 = distance(shape, 24, 44)\n FP02 = (dist_2343 + dist_2444) / 2\n\n FP03 = distance(shape, 21, 39) # Calculate distance between left corner point of right eye and brows\n FP04 = distance(shape, 17, 36) # Calculate distance between right corner point of right eye and brows\n FP05 = distance(shape, 22, 42) # Calculate distance between right corner point of left eye and brows\n FP06 = distance(shape, 26, 45) # Calculate distance between left corner point of left eye and brows\n FP07 = distance(shape, 21, 22) # Calculate distance between corner point of two eyes\n\n FP08 = distance(shape, 27,\n 31) # Calculate distance between upper nose point and right most point of lower nose\n FP09 = distance(shape, 27,\n 35) # Calculate distance between upper nose point and left most point of lower nose\n FP10 = (FP08 + FP09) / 2\n FP11 = distance(shape, 30,\n 33) # Calculate distance between lower centre nose point and upper centre nose point.\n dist_3150 = distance(shape, 31,\n 50) # calculate distance between nose right corner and right corner of upper lips\n dist_3552 = distance(shape, 35,\n 52) # calculate distance between nose left corner and left corner of upper lips\n FP12 = (dist_3150 + dist_3552) / 2\n dist_3148 = distance(shape, 31, 48)\n dist_3554 = distance(shape, 35, 54)\n FP12 = (dist_3148 + dist_3554) / 2\n FP13 = distance(shape, 48, 54)\n FP14 = distance(shape, 61, 67) # calculate distance between right corner of inner lips\n FP15 = distance(shape, 62, 66) # calculate distance between left corner of inner lips\n FP16 = distance(shape, 63, 65) # calculate distance between middle of inner lips\n FP17 = distance(shape, 58, 7) # calculate distance between right corner of lower lips and right chin\n FP18 = distance(shape, 57, 8) # calculate distance between middle of lower lips and middle chin\n FP19 = distance(shape, 56, 9) # calculate distance between left corner of lower lips and left chin\n FP20 = distance(shape, 60, 64) # calculate distance between inner lips corner\n feature = [FP01, FP02, FP03, FP04, FP05, FP06, FP07, FP08, FP09, FP10,\n FP11, FP12, FP13, FP14, FP15, FP16, FP17, FP18, FP19, FP20]\n features.append(np.asarray(feature, dtype=np.float32))\n except Exception as e:\n print(e)\n features.append(None)\n return features if len(features) > 0 else None\n\n\nif __name__ == '__main__':\n detector = dlib.get_frontal_face_detector() # Face detector\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\") # Landmark identifier.\n video = cv2.VideoCapture(0)\n # classifier = svm.SVC()\n classifier = joblib.load('facEmo_saved_model2.pkl')\n id2label = {0: \"ANGRY\", 1: \"DISGUST\", 2: \"FEAR\", 3: \"HAPPY\", 4: \"NEUTRAL\", 5: \"SAD\", 6: \"SURPRISE\"}\n while True:\n succ, img = video.read()\n if not succ:\n continue\n faces = detect_faces(detector, img)\n if faces is not None:\n features = extract_features_from_img(predictor, faces, img)\n if features is None:\n continue\n for face, feature in zip(faces, features):\n if feature is None:\n continue\n print(\"feature: {}, shape: {}\".format(feature, feature.shape))\n feature = feature.reshape(-1, len(feature))\n out = classifier.predict(feature)\n label = id2label[out[0]]\n print(\"feature: {}, shape: {}, face: {}, sentiment: {}\".format(feature, feature.shape, face, label))\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.rectangle(img, (face.left(), face.top()), (face.right(), face.bottom()), (255, 94, 94), 2)\n cv2.putText(img, label, (face.left(), face.top()), font, 1, (255, 255, 255), 1)\n cv2.imshow(\"FacEmo\", img) # Display the frame\n if cv2.waitKey(1) & 0xFF == ord('q'): # Exit program when the user presses 'q'\n break\n\n'''from sklearn import svm\nfrom sklearn.metrics import classification_report\n\nif __name__ == '__main__':\n features = [[0, 0], [1, 1], [2, 2]]\n labels = [0, 1, 2]\n classifier = svm.SVC(gamma='scale')\n classifier.fit(features, labels)\n outputs = classifier.predict(features)\n\n report = classification_report(labels, outputs)\n print(report)\n\n out = classifier.predict([[0.5, 0.5]])\n print(out)\n '''\n","sub_path":"facEmo.py","file_name":"facEmo.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433675950","text":"import sys\n\nsys.setrecursionlimit(1000000)\n\nD = [(1,0), (0,1), (-1,0), (0,-1), (-1,-1),(1,-1),(-1,1),(1,1) ] # 下、右、上、左、斜め\n\ndef dfs(x,y):\n # Validation\n if not(0<=xc;++c)i[c]=\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\".charCodeAt(c);for(c=0;t-t%3>c;c+=3,o+=4)r=n[c]<<16|n[c+1]<<8|n[c+2],a[o]=i[r>>18],a[o+1]=i[r>>12&63],a[o+2]=i[r>>6&63],a[o+3]=i[63&r];return t%3===1?(r=n[t-1],a[o]=i[r>>2],a[o+1]=i[r<<4&63],a[o+2]=61,a[o+3]=61):t%3===2&&(r=(n[t-2]<<8)+n[t-1],a[o]=i[r>>10],a[o+1]=i[r>>4&63],a[o+2]=i[r<<2&63],a[o+3]=61),new TextDecoder(\"ascii\").decode(a)};\n var xhr = new XMLHttpRequest();\n xhr.responseType = 'arraybuffer';\n xhr.onload = function(){ callback(toBase64(xhr.response)) };\n xhr.onerror = function(){ callback(xhr.status) };\n xhr.open('GET', uri);\n xhr.send();\n \"\"\", uri)\n if type(result) == int:\n raise Exception(\"Request failed with status %s\" % result)\n return base64.b64decode(result)\n\n @staticmethod\n def check_url(manga_url):\n return manga_url.find('takeshobo.co.jp/manga/') != -1\n\n def get_sum_page_count(self, driver):\n return int(str(driver.execute_script(\"return document.getElementById('menu_slidercaption').innerHTML\")).split('/')[1])\n\n def move_to_page(self, driver, page):\n driver.execute_script(\n 'SpeedBinb.getInstance(\"content\").moveTo(%d)' % page)\n\n def wait_loading(self, driver):\n WebDriverWait(driver, 600).until_not(\n lambda x: x.find_element_by_id(\"start_wait\"))\n\n def get_imgdata(self, driver, now_page):\n image_elements = driver.find_element_by_id(\n 'content-p%d' % now_page).find_elements_by_css_selector('img')\n\n imgs_arr = []\n imgs_height = [0]\n mmset = 4\n for i in image_elements:\n blob_url = i.get_attribute('src')\n image_data = self.get_file_content_chrome(driver, blob_url)\n part_img = pil_image.open(BytesIO(image_data))\n imgs_arr.append(part_img)\n width, height = part_img.size\n imgs_height.append(height + imgs_height[-1] - mmset)\n\n last_img_height = imgs_height.pop() + mmset\n\n final_img = pil_image.new('RGB', (width, last_img_height))\n\n for i in range(len(imgs_arr)):\n final_img.paste(imgs_arr[i], (0, imgs_height[i]))\n\n final_data = BytesIO()\n final_img.save(final_data, format='PNG')\n return final_data.getbuffer()\n\n def get_now_page(self, driver):\n return int(str(driver.execute_script(\"return document.getElementById('menu_slidercaption').innerHTML\")).split('/')[0])\n\n def before_download(self, driver):\n driver.execute_script('parent.closeTips()')\n","sub_path":"website_actions/takeshobo_co_jp_actions.py","file_name":"takeshobo_co_jp_actions.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273658016","text":"from Node import Node\n\nclass BinaryTree:\n grad = 100\n def __init__(self):\n self.root = Node()\n\n def insert(self, val):\n current, index = self.traverse(val)\n current = current + val\n\n while current.length >= self.grad:\n if current.parent is None:\n current.parent = Node()\n self.root = current.parent\n\n new = current.parent\n median = current.val[current.length // 2]\n index = new.index(val)\n\n new = new + median\n new.split(current, index)\n current = new\n\n def traverse(self, val):\n current = self.root\n index = current.index(val)\n while self.grad > current.length and current.child[index] is not None:\n current = current.child[index]\n index = current.index(val)\n return current, index\n\n def print(self, current, nivel=0):\n if current.parent is not None:\n print(current.val, nivel, \"length\" ,len(current.parent.val))\n else:\n print(current.val, nivel)\n for index in current.child:\n if index is not None:\n self.print(index, nivel + 1)\n\n def sorted(self, current, left, right):\n result = []\n\n for index in current.child:\n if index is not None:\n self.sorted(index, left, right)\n aux = current.child.index(i)\n if aux < current.length:\n if current.val[aux] > right:\n return\n if current.val[aux] >= left:\n result.append(current.val[aux])\n break\n else:\n for x in current.val:\n if x > right:\n return\n if x >= left:\n result.append(x)\n return result\n\n def find(self, current, val):\n if current is None:\n return None, 0\n\n current.parent_child()\n index = current.index(val)\n\n if index == current.length or current.val[index] > val:\n return self.find(current.child[index], val)\n elif current.val[index] == val:\n return current, index\n else:\n return self.find(current.child[index + 1], val)\n\n def delete(self, val):\n current, index = self.find(self.root, val)\n\n if current is None:\n return 0\n\n if all(current.child):\n next_val = self.successor(val)\n aux, aux_index = self.find(self.root, next_val)\n if aux.length > self.grad // 2:\n current.val[index], aux.val[aux_index] = aux.val[aux_index], current.val[index]\n current = aux\n else:\n next_val = self.pred(val)\n aux, aux_index = self.find(self.root, next_val)\n current.val[index], aux.val[aux_index] = aux.val[aux_index], current.val[index]\n current = aux\n index = aux_index\n\n current.val = current.val[:index] + current.val[index + 1:]\n current.length -= 1\n current.child = current.child[:index] + current.child[index + 1:]\n\n if current.length 0 and current.parent.child[k - 1].length > self.grad // 2:\n current.val.insert(0, current.parent.val[k - 1])\n current.length += 1\n current.parent.val[k - 1] = current.parent.child[k - 1].val[-1]\n current.child.insert(0, current.parent.child[k - 1].child[-1])\n\n del current.parent.child[k - 1].val[-1]\n del current.parent.child[k - 1].child[-1]\n current.parent.child[k - 1].length -= 1\n\n elif k < current.parent.length and current.parent.child[k + 1].length > self.grad // 2:\n current.val.append(current.parent.val[k])\n current.length += 1\n current.parent.val[k] = current.parent.child[k + 1].val[0]\n current.child.append(current.parent.child[k + 1].child[0])\n\n del current.parent.child[k + 1].val[0]\n del current.parent.child[k + 1].child[0]\n current.parent.child[k + 1].length -= 1\n \n else:\n if k == current.parent.length:\n current.parent.child[k - 1].union(current, k - 1)\n else:\n current.union(current.parent.child[k + 1], k)\n if current.parent is not None and current.parent.length < self.grad // 2:\n self.balance(current.parent)\n if current.parent is None:\n self.root = current\n else:\n if current.val == []:\n current.child[0].parent = None\n self.root = current.child[0]\n\n\n def successor(self,val):\n current = self.root\n while all(current.child):\n current = current.child[current.index(val)]\n\n while current.index(val) == current.length:\n current = current.parent\n if current is None:\n return\n else:\n return current.val[current.index(val)]\n return\n\n def predecessor(self,val):\n current = self.root\n if val in current.val:\n return val\n\n while all(current.child):\n current = current.child[current.index(val)]\n if val in current.val:\n return val\n\n while current.index(val) == 0:\n current = current.parent\n if current is None:\n return\n else:\n return current.val[current.index(val)-1]\n return\n","sub_path":"tema_laborator_2/BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":6175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504496265","text":"import numpy as np\nimport scipy\nfrom sklearn import datasets\nfrom sklearn import naive_bayes\nfrom sklearn.metrics import classification_report\nimport operator\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef calcu_uncertainty(array_a, array_b):\n result = 0\n\n if len(array_a) == len(array_b):\n for i in range(len(array_a)):\n result += array_a[i] * array_b[i]\n\n return -result\n\ndef least_probobility(array_a, index):\n return array_a[index]\n\ndigits = datasets.load_digits()\nrng = np.random.RandomState(0)\nindices = np.arange(len(digits.data))\nrng.shuffle(indices)\n\n\nINI_TRAIN_SIZE = 40\nSAMPLE_INI_TRAIN_SIZE = 40\nTRAIN_SIZE = INI_TRAIN_SIZE\nTEST_SIZE = 600\nITERATION = 31\nITER_NUM = 5\n\n\ntrain_index = indices[:INI_TRAIN_SIZE]\n\ntest_index = indices[-TEST_SIZE:]\n\nplt_x = []\nplt_y = []\nplt_control = []\n\nfor count in range(ITERATION):\n sample_train_index = indices[:SAMPLE_INI_TRAIN_SIZE]\n\n plt_x.append(SAMPLE_INI_TRAIN_SIZE)\n\n gnb = naive_bayes.GaussianNB()\n gnb_sample = naive_bayes.GaussianNB()\n\n x = digits.data[train_index]\n y = digits.target[train_index]\n x_test = digits.data[test_index]\n y_test = digits.target[test_index]\n x_sample = digits.data[sample_train_index]\n y_sample = digits.target[sample_train_index]\n\n gnb.fit(x, y)\n\n if len(gnb.classes_) < 10:\n exit(\"Not full classes trained\")\n\n result = gnb.predict(x_test)\n pro = gnb.predict_proba(x_test)\n pro_log = gnb.predict_log_proba(x_test)\n\n gnb_sample.fit(x_sample, y_sample)\n result_sample = gnb_sample.predict(x_test)\n\n dic = {}\n sum = 0\n sum_sample = 0\n\n for index in range(TEST_SIZE):\n #dic[test_index[index]] = calcu_uncertainty(pro[index], pro_log[index])\n #dic[test_index[index]] = least_probobility(pro[index], y_test[index])\n\n if result[index] != y_test[index]:\n #dic[test_index[index]] = calcu_uncertainty(pro[index], pro_log[index])\n dic[test_index[index]] = least_probobility(pro[index], y_test[index])\n sum += 1\n\n if result_sample[index] != y_test[index]:\n sum_sample += 1\n\n dic = sorted(dic.items(), key=operator.itemgetter(1))[:ITER_NUM]\n print(dic)\n\n for index in range(TEST_SIZE):\n if result[index] != y_test[index]:\n for d in dic:\n if test_index[index] == d[0]:\n #print()\n print(test_index[index])\n\n #print(classification_report(y_test, result))\n print(TRAIN_SIZE)\n print(1-sum/TEST_SIZE)\n plt_y.append(1-sum/TEST_SIZE)\n #print(classification_report(y_test, result_sample))\n print(SAMPLE_INI_TRAIN_SIZE)\n print(1-sum_sample/TEST_SIZE)\n plt_control.append(1-sum_sample/TEST_SIZE)\n\n num = -1\n for index in range(len(test_index)):\n num += 1\n if test_index[index] == dic[0][0]:\n break\n\n #print(calcu_uncertainty(pro[num], pro_log[num]))\n #print(pro[num])\n #for index in range(len(pro[11])):\n # print(\"%e %e %e\" %(pro[item_index][index], pro_log[item_index][index], pro[item_index][index]*pro_log[item_index][index]))\n\n print(50 * \"-\")\n print()\n\n highest_index = []\n for index in range(ITER_NUM):\n highest_index.append(dic[index][0])\n\n train_index = np.concatenate((train_index, highest_index))\n TRAIN_SIZE += ITER_NUM\n\n item_index = []\n for item in highest_index:\n item_index.append(np.where(test_index == item))\n\n test_index = np.delete(test_index, item_index)\n TEST_SIZE -= ITER_NUM\n\n SAMPLE_INI_TRAIN_SIZE += ITER_NUM\n\n\nplt.figure(1)\nplt.xlim(40, SAMPLE_INI_TRAIN_SIZE)\nplt.plot(plt_x, plt_y)\nplt.plot(plt_x, plt_control)\n\nplt.show()\n","sub_path":"old/examine.py","file_name":"examine.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"118215115","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django import forms\nfrom crispy_forms.helper import FormHelper\nfrom django.core.urlresolvers import reverse\nfrom crispy_forms.layout import Submit\nfrom crispy_forms.bootstrap import FormActions\n\nfrom ..models import Group, Student\n\n\nclass StudentUpdateForm(forms.ModelForm):\n \n class Meta:\n model = Student\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n\n super(StudentUpdateForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n\n self.helper.form_class = 'form-horizontal'\n self.helper.form_method = 'post'\n self.helper.form_action = reverse('students_edit', kwargs={'pk': kwargs['instance'].id})\n\n self.helper.help_text_inline = True \n self.helper.html5_required = True\n self.helper.label_class = 'col-sm-2 control-label'\n self.helper.field_class = 'col-sm-10'\n\n self.helper.layout[-1] = FormActions(\n Submit('add_button', u'Зберегти', css_class=\"btn btn-primary\"),\n Submit('cancel_button', u'Скасувати', css_class=\"btn btn-default\"),\n )\n\n def clean_student_group(self):\n \"\"\"Check if student is leader in any group.\n If yes, then ensure it`s the same as selected group.\"\"\"\n\n group = Group.objects.filter(leader=self.instance)\n if self.cleaned_data['student_group'] != group[0]:\n raise forms.ValidationError(u'Студент є старостою іншої групи.', code='invalid')\n \n return self.cleaned_data['student_group']\n\nclass StudentAddForm(forms.ModelForm):\n \n class Meta:\n model = Student\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n\n super(StudentAddForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n\n self.helper.form_class = 'form-horizontal'\n self.helper.form_method = 'post'\n self.helper.form_action = reverse('students_add')\n\n self.helper.help_text_inline = True \n self.helper.html5_required = True\n self.helper.label_class = 'col-sm-2 control-label'\n self.helper.field_class = 'col-sm-10'\n\n self.helper.layout[7] = FormActions(\n Submit('add_button', u'Зберегти', css_class=\"btn btn-primary\"),\n Submit('cancel_button', u'Скасувати', css_class=\"btn btn-default\"),\n )\n\n\n","sub_path":"students/forms/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226526702","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 CESNET.\n#\n# Invenio Records Presentation is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Blueprint definitions.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport time\nimport traceback\nfrom functools import wraps\nimport logging\nfrom uuid import UUID\n\nfrom celery._state import app_or_default\nfrom celery.result import AsyncResult, result_from_tuple\nfrom flask import Blueprint, jsonify, abort, request, Response, current_app\nfrom flask_login import current_user\nfrom invenio_pidstore.models import PersistentIdentifier\nfrom invenio_userprofiles import UserProfile\nfrom invenio_workflows import WorkflowEngine\nfrom workflow.errors import WorkflowDefinitionError\n\nfrom .api import Presentation, PresentationWorkflowObject\nfrom .errors import PresentationNotFound, WorkflowsPermissionError\nfrom .proxies import current_records_presentation\n\nlogger = logging.getLogger(__name__)\n\nblueprint = Blueprint(\n 'invenio_records_presentation',\n __name__,\n url_prefix='/presentation/1.0'\n)\n\"\"\"Blueprint used for loading templates and static assets\n\nThe sole purpose of this blueprint is to ensure that Invenio can find the\ntemplates and static files located in the folders of the same names next to\nthis file.\n\"\"\"\n\n\ndef pass_result(f):\n \"\"\"Decorate to provide an AsyncResult instance of the job.\"\"\"\n\n @wraps(f)\n def decorate(*args, **kwargs):\n job_uuid = kwargs.pop('job_uuid')\n Result = app_or_default(None).AsyncResult\n result = Result(job_uuid, parent=None)\n # result: AsyncResult = result_from_tuple([[job_uuid, None], None])\n # if result is None:\n # abort(400, 'Invalid job UUID')\n\n return f(result=result, *args, **kwargs)\n\n return decorate\n\n\ndef pass_presentation(f):\n \"\"\"Decorate to provide a presentation instance.\"\"\"\n\n @wraps(f)\n def decorate(*args, **kwargs):\n presid = kwargs.pop('presentation_id')\n try:\n presentation = current_records_presentation.get_presentation(presid)\n return f(presentation=presentation, *args, **kwargs)\n except PresentationNotFound:\n abort(400, 'Invalid presentation type')\n\n return decorate\n\n\ndef with_presentations(f):\n \"\"\" Init all presentation objects \"\"\"\n\n @wraps(f)\n def decorate(*args, **kwargs):\n current_records_presentation.init_presentations()\n return f(*args, **kwargs)\n\n return decorate\n\n\n@blueprint.route(\"/\")\n@with_presentations\ndef index():\n return 'presentation loaded successfully'\n\n\n@blueprint.route('/prepare////', methods=('POST',))\n@with_presentations\ndef pid_prepare(pid_type: str, pid: str, presentation_id: str):\n pid_record = PersistentIdentifier.query.filter_by(pid_type=pid_type, pid_value=pid).one_or_none()\n if pid_record:\n return prepare(str(pid_record.object_uuid), presentation_id=presentation_id)\n else:\n abort(404, 'Record with PID {}:{} not found'.format(pid_type, pid_type))\n\n\n@blueprint.route('/prepare///', methods=('POST',))\n@with_presentations\n@pass_presentation\ndef prepare(record_uuid: str, presentation: Presentation):\n if current_user.is_anonymous:\n user_meta = {\n 'id': None,\n 'email': None,\n 'login_ip': None,\n 'current_ip': str(request.remote_addr),\n 'roles': [],\n 'full_name': 'Anonymous',\n 'username': None\n }\n else:\n profile_meta = {}\n profile: UserProfile = UserProfile.get_by_userid(current_user.id)\n if profile:\n profile_meta = {\n 'full_name': profile.full_name,\n 'username': profile.username,\n }\n user_meta = {\n 'id': current_user.id,\n 'email': current_user.email,\n 'current_ip': str(request.remote_addr),\n 'login_ip': str(current_user.current_login_ip),\n 'roles': [{'id': role.id, 'name': role.name} for role in current_user.roles]\n }\n user_meta.update(profile_meta)\n headers = {k: v for k, v in request.headers}\n\n try:\n result = presentation.prepare(record_uuid, user_meta, headers, delayed=True)\n if isinstance(result, AsyncResult):\n return jsonify({'job_id': result.task_id})\n else:\n return jsonify({'job_id': result})\n except WorkflowsPermissionError as e:\n logger.exception('Exception detected in prepare')\n abort(403, e)\n except WorkflowDefinitionError:\n logger.exception('Exception detected in prepare')\n abort(400, 'There was an error in the {} workflow definition'.format(presentation.name))\n\n\n@blueprint.route('/status//')\n@pass_result\ndef status(result: AsyncResult):\n if result.state == 'FAILURE':\n print(result.traceback)\n try:\n eng_uuid = str(UUID(result.info, version=4))\n engine = WorkflowEngine.from_uuid(eng_uuid)\n object = engine.objects[-1]\n info = {'current_data': object.data,\n 'created': object.created,\n 'modified': object.modified}\n\n except Exception:\n logger.exception('Exception detected in status')\n info = str(result.info)\n\n return jsonify({'status': result.state, 'info': info})\n\n\nimport unicodedata\ndef strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')\n\n\n@blueprint.route('/download//')\n@pass_result\ndef download(result: AsyncResult):\n for i in range(10):\n try:\n time.sleep(1)\n eng_uuid = result.get() # Will wait until task has completed\n break\n except:\n traceback.print_exc()\n if i == 9:\n raise\n time.sleep(5)\n\n engine = WorkflowEngine.from_uuid(eng_uuid)\n object = PresentationWorkflowObject(engine.objects[-1])\n\n data_path = object.scratch.full_path(object.data['path'])\n\n def serve():\n with open(data_path, 'rb') as f:\n while True:\n buf = f.read(128000)\n if not buf:\n break\n yield buf\n\n return Response(serve(), mimetype=object.data['mimetype'], headers={\n 'Content-disposition': 'inline; filename=\\\"{}\\\"'.format(strip_accents(object.data['filename'])),\n 'Content-Security-Policy': \"object-src 'self';\"\n })\n","sub_path":"invenio_records_presentation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575034042","text":"# Minimally parses a PLA file.\nimport json\nimport os\nimport pprint\nimport sys\n\nimport pyeda\n\nfrom typing import List, Dict\n\nfrom munkres import Munkres, make_cost_matrix, DISALLOWED, print_matrix\nfrom pyeda.inter import *\n\n# PLA files that I'm interested in look like this:\n#\n# # Benchmark \"top\" written by ABC on Mon Jan 25 13:56:23 2021\n# .i 12\n# .o 8\n# .ilb b[0] b[1] b[2] b[3] s[0] s[1] s[2] s[3] a[0] a[1] a[2] a[3]\n# .ob x[0] x[1] x[2] x[3] x[4] x[5] x[6] x[7]\n# .p 20\n# 1---0---0--- 10000000\n# 0----0--0--- 10000000\n# ...more lines\n# .e\n#\n# Some format information here:\n# http://www.ecs.umass.edu/ece/labs/vlsicad/ece667/links/espresso.5.html\n#\n# So the format spec is:\n# A '#' in the first character of the line is a comment.\n# .i %d:\n# Number of input variables\n# .o %d:\n# Number of output functions\n# .ilb :\n# Names of input variables. Must come after .i. There must be the same\n# number of names as there is in .i.\n# .ob :\n# Names of output functions. Must come after .o. There must be the same\n# number of names as there is in .o.\n# .p %d:\n# Number of product terms. May be ignored.\n# .e or .end:\n# Optionally marks end of description.\n# Product term line:\n# .i number of 1/0/- characters, followed by whitespace, followed by\n# .o number of 1/0 characters. These are in the same order as the\n# input and output names.\n\n# Note that this kind of PLA file only represents and-or (aka sum-of-products).\n# Because we're also interested in xor layers, and also multiple layers, we\n# have to use multiples of these files, and also a custom file for xor layers.\n\n\nclass ProductTerm():\n ones: List[str]\n zeros: List[str]\n\n def __init__(self):\n # List of symbolic inputs\n self.ones = []\n self.zeros = []\n self.expr = expr(1)\n\n def __repr__(self):\n pp = pprint.PrettyPrinter(indent=4)\n return pp.pformat({'ones': self.ones, 'zeros': self.zeros})\n\n\nclass OrTerm():\n products: List[ProductTerm]\n\n def __init__(self):\n self.products = []\n self.expr = expr(0)\n\n def __repr__(self):\n pp = pprint.PrettyPrinter(indent=4)\n return pp.pformat({'or_products': self.products, 'expr': self.expr})\n\n\ndef get_database():\n for path in sys.path:\n file = os.path.join(path, \"database.json\")\n if os.path.isfile(file):\n with open(file) as f:\n return json.load(f)\n return None\n\n\nclass PLAParser():\n inputs: List[str]\n outputs: List[str]\n or_terms: Dict[str, OrTerm]\n # If the file is marked with .xor, it's an XOR layer.\n is_xor: bool\n # If the file is marked with .outputs, all outputs are routed to pins.\n is_outputs: bool\n\n def __init__(self, file: str):\n self.inputs = []\n self.outputs = []\n # A map of symbolic output to OrTerm (or Xor)\n self.or_terms = {}\n self.is_xor = False\n self.is_outputs = False\n\n with open(file) as f:\n for line in f.readlines():\n if not self.readline(line):\n break\n print(f\"Inputs : {self.inputs}\")\n print(f\"Outputs : {self.outputs}\")\n pp = pprint.PrettyPrinter(indent=4, depth=3)\n print(f\"OR Terms:\")\n pprint.pprint(self.or_terms)\n\n def readline(self, line: str) -> bool:\n \"\"\"Returns if there are more lines to parse.\"\"\"\n if len(line) == 0:\n return True\n if line.startswith('#'):\n return True\n if line.startswith(\".i \"):\n return True\n if line.startswith(\".o \"):\n return True\n if line.startswith(\".p \"):\n return True\n if line.startswith(\".e\") | line.startswith(\".end\"):\n return False\n if line.startswith(\".xor\"):\n assert not self.is_outputs\n self.is_xor = True\n if line.startswith(\".outputs\"):\n assert not self.is_xor\n self.is_outputs = True\n if line.startswith(\".ilb \"):\n self.inputs = line.split()[1:]\n return True\n if line.startswith(\".ob \"):\n self.outputs = line.split()[1:]\n for output in self.outputs:\n self.or_terms[output] = OrTerm()\n return True\n if line.startswith(\"1\") | line.startswith(\"0\") | line.startswith(\"-\"):\n assert not self.is_outputs\n if self.is_xor:\n self.read_xor_term(line)\n else:\n self.read_or_term(line)\n return True\n return True\n\n def read_or_term(self, line: str):\n parts = line.split()\n assert len(parts) == 2\n assert len(parts[0]) == len(self.inputs)\n assert len(parts[1]) == len(self.outputs)\n\n inputs = parts[0]\n outputs = parts[1]\n product = ProductTerm()\n terms = []\n for i, bit in enumerate(inputs):\n if bit == '0':\n product.zeros.append(self.inputs[i])\n terms.append(Not(self.inputs[i]))\n elif bit == '1':\n product.ones.append(self.inputs[i])\n terms.append(self.inputs[i])\n product.expr = And(*terms)\n\n for i, bit in enumerate(outputs):\n if bit == '1':\n self.or_terms[self.outputs[i]].products.append(product)\n self.or_terms[self.outputs[i]].expr = Or(\n self.or_terms[self.outputs[i]].expr, product.expr)\n\n def read_xor_term(self, line: str):\n parts = line.split()\n assert len(parts) == 2\n assert len(parts[0]) == len(self.inputs)\n assert len(parts[1]) == len(self.outputs)\n\n inputs = parts[0]\n outputs = parts[1]\n terms = []\n for i, bit in enumerate(inputs):\n if bit == '1':\n terms.append(self.inputs[i])\n for i, bit in enumerate(outputs):\n if bit == '1':\n self.or_terms[self.outputs[i]].expr = Xor(*terms)\n\n\nclass Fitter():\n inputs: List[str]\n or_terms: Dict[str, OrTerm]\n all_or_terms: Dict[str, Dict[str, OrTerm]]\n input_mcs: Dict[str, int]\n input_sigs: Dict[str, str]\n\n def __init__(self):\n self.device = None\n self.next_mc = 1\n\n self.inputs = []\n self.outputs = []\n # A map of symbolic output to OrTerm\n self.or_terms = {}\n # A map of block to map of MC to OrTerm\n self.all_or_terms = {}\n self.all_or_exprs = {}\n\n # A map of symbolic input to macrocell number\n self.input_mcs = {}\n # A map of symbolic input to multiplexer signal name\n self.input_sigs = {}\n\n def map_inputs(self):\n print(\"Mapping pin inputs\")\n db = get_database()\n self.device = db[\"ATF1502AS\"]\n\n # For now, assuming this is an input layer, map inputs directly onto\n # MCs starting with MC1. We can use an input MC as an intermediate\n # output by routing its output to MCn_FB.\n\n self.input_mcs = {top_input: self.get_next_mc()\n for top_input in self.inputs}\n for top_input, input_mc in self.input_mcs.items():\n pin = self.device[\"pins\"][\"PLCC44\"][f\"M{input_mc}\"]\n self.input_sigs = {top_input: f\"M{input_mc}_PAD\" for top_input,\n input_mc in self.input_mcs.items()}\n print(f\"assign input {top_input} to MC{input_mc} (pin {pin})\")\n print(f\" set MC{input_mc}.oe_mux GND\")\n\n # This isn't accurate. It's only accurate when the number of intermediate\n # outputs exceeds the number of inputs.\n self.next_mc = 1\n\n # Initialize blocks in all_or_terms\n for block in self.device[\"blocks\"].keys():\n self.all_or_terms[block] = {}\n self.all_or_exprs[block] = {}\n\n def get_next_mc(self) -> int:\n specials = [4, 9, 25, 20] # TDI, TMS, TCK, TDO\n if self.next_mc in specials:\n self.next_mc += 2\n elif self.next_mc > 32:\n return None\n else:\n self.next_mc += 1\n return self.next_mc-1\n\n def map_output_layer(self):\n device = self.device\n\n for i, output in enumerate(self.outputs):\n mc = self.input_mcs[output]\n pin = device[\"pins\"][\"PLCC44\"][f\"M{mc}\"]\n print(f\"Output {output} is at MC{mc} (pin {pin})\")\n print(f\" set MC{mc}.o_mux comb\")\n print(f\" set MC{mc}.oe_mux pt5\")\n print(f\" set MC{mc}.pt5_func as\")\n\n def map_and_or_layer(self):\n print(\"Mapping AND-OR layer\")\n device = self.device\n\n # For now, map the outputs directly onto MCs starting with\n # MC1.\n for output in self.outputs:\n or_term = self.or_terms[output]\n or_expr = or_term.expr\n inv = False\n print(f\"{output} = {or_term.expr}\")\n if isinstance(or_expr, pyeda.boolalg.expr.OrOp) and len(or_expr.xs) > 5:\n # Maybe we can invert, and then use the macrocell's inverter to invert\n # the result?\n nor_expr = espresso_exprs(Not(or_term.expr).to_dnf())\n # espresso_expr returns a tuple\n # to_dnf converts an expression to disjunctive normal form\n # (i.e. sum of products).\n nor_expr = nor_expr[0].to_dnf()\n print(f\"Try the inverse of this instead: {nor_expr}\")\n if isinstance(nor_expr, pyeda.boolalg.expr.OrOp) and len(or_expr.xs) > 5:\n print(\n f\"ERROR: or-term for {output} needs more than\"\n \" one macrocell (5 products), which is not supported yet.\")\n return\n or_expr = nor_expr\n inv = True\n\n mc = self.get_next_mc()\n assert mc is not None, \"Ran out of macrocells\"\n mc_name = f\"MC{mc}\"\n macrocell = device[\"macrocells\"][mc_name]\n block = macrocell[\"block\"]\n print(f\"output {output} mapped to {mc_name}.FB in block {block}\")\n self.all_or_terms[block][mc_name] = or_term\n self.all_or_exprs[block][mc_name] = or_expr\n self.input_mcs[output] = mc\n self.input_sigs[output] = f\"MC{mc}_FB\"\n\n print(f\"set {mc_name}.pt_power on\")\n print(f\"set {mc_name}.pt1_mux sum\")\n print(f\"set {mc_name}.pt2_mux sum\")\n print(f\"set {mc_name}.pt3_mux sum\")\n print(f\"set {mc_name}.pt4_mux sum\")\n print(f\"set {mc_name}.pt5_mux sum\")\n print(f\"set {mc_name}.fb_mux xt\")\n print(f\"set {mc_name}.xor_a_mux sum\")\n print(f\"set {mc_name}.xor_b_mux VCC_pt12\")\n\n # It's weird, but because we have to feed a 1 into one input of\n # the macrocell's XOR, it naturally inverts. There's another\n # optional inverter after that, so if we want the non-inverted\n # output of the OR gate, we have to turn that inverter on!\n if inv:\n print(f\"set {mc_name}.xor_invert off\")\n else:\n print(f\"set {mc_name}.xor_invert on\")\n\n # Now that we've mapped inputs to outputs,\n # add them to the inputs and clear out the outputs.\n self.inputs += self.outputs\n self.outputs = []\n\n print(\"Input mcs:\")\n pprint.pprint(self.input_mcs)\n print(\"Input sigs:\")\n pprint.pprint(self.input_sigs)\n\n def map_and_xor_layer(self):\n print(\"Mapping XOR layer\")\n device = self.device\n\n # For now, map the outputs directly onto MCs starting with\n # the next MC\n for output in self.outputs:\n expr = self.or_terms[output].expr\n assert isinstance(expr, pyeda.boolalg.expr.XorOp)\n if len(expr.xs) != 2:\n print(\n f\"ERROR: xor-term for {output} does not have 2 products, which is not supported yet.\")\n return\n mc = self.get_next_mc()\n assert mc is not None, \"Ran out of macrocells\"\n mc_name = f\"MC{mc}\"\n macrocell = device[\"macrocells\"][mc_name]\n block = macrocell[\"block\"]\n print(f\"output {output} mapped to {mc_name}.FB in block {block}\")\n self.all_or_exprs[block][mc_name] = expr\n self.input_mcs[output] = mc\n self.input_sigs[output] = f\"MC{mc}_FB\"\n\n print(f\"set {mc_name}.pt_power on\")\n print(f\"set {mc_name}.pt1_mux sum\")\n print(f\"set {mc_name}.pt2_mux xor\")\n print(f\"set {mc_name}.pt3_mux sum\")\n print(f\"set {mc_name}.pt4_mux sum\")\n print(f\"set {mc_name}.pt5_mux sum\")\n print(f\"set {mc_name}.fb_mux xt\")\n print(f\"set {mc_name}.xor_a_mux sum\")\n print(f\"set {mc_name}.xor_b_mux VCC_pt12\")\n print(f\"set {mc_name}.xor_invert on\")\n\n # Now that we've mapped inputs to outputs,\n # add them to the inputs and clear out the outputs.\n self.inputs += self.outputs\n self.outputs = []\n\n print(\"Input mcs:\")\n pprint.pprint(self.input_mcs)\n print(\"Input sigs:\")\n pprint.pprint(self.input_sigs)\n\n def set_uims(self):\n # Collect all MCn_FB and Mn_PAD before choosing UIMs for each block.\n # This is an instance of the assignment problem, which we solve using the\n # Hungarian algorithm, which is O(n^3). The hope is that because the matrix\n # is extremely sparse, the algorithm runs very quickly.\n\n switches = self.device[\"switches\"]\n\n # Map signals to UIMs, per block\n sig_to_uim = {}\n for blk in dev[\"blocks\"].keys():\n sig_to_uim[blk] = {}\n for switch, data in switches.items():\n blk = data[\"block\"]\n switch_sigs = data[\"mux\"][\"values\"].keys()\n for sig in switch_sigs:\n if sig not in sig_to_uim[blk]:\n sig_to_uim[blk][sig] = []\n sig_to_uim[blk][sig].append(switch)\n\n for blk in self.all_or_exprs:\n print(f\"Constructing set of signals in block {blk}\")\n # Construct the set of needed signals.\n sigs = set()\n for or_expr in self.all_or_exprs[blk].values():\n sigs.update(set(self.input_sigs[str(term)]\n for term in or_expr.support))\n\n # Convert to ordered array\n sigs = [s for s in sigs]\n if len(sigs) == 0:\n print(f\"No used signals in block {blk}\")\n continue\n print(f\"Used signals in block {blk}: {sigs}\")\n\n # Construct the set of candidate switches for those signals.\n candidate_switches = set()\n for sig in sigs:\n candidate_switches.update(set(s for s in sig_to_uim[blk][sig]))\n # Convert to ordered array\n candidate_switches = [s for s in candidate_switches]\n print(f\"Candidate switches in block {blk}: {candidate_switches}\")\n\n # Construct the cost matrix. We assign an different cost per candidate\n # switch to help the algorithm be stable.\n matrix = [[DISALLOWED for _ in range(\n len(candidate_switches))] for _ in range(len(sigs))]\n for row, sig in enumerate(sigs):\n cost = 1\n for candidate_switch in sig_to_uim[blk][sig]:\n col = candidate_switches.index(candidate_switch)\n matrix[row][col] = cost\n cost += 1\n cost_matrix = make_cost_matrix(\n matrix, lambda cost: cost if cost != DISALLOWED else DISALLOWED)\n\n # Assign signals to switches.\n m = Munkres()\n indexes = m.compute(cost_matrix)\n sig_to_switch = {}\n # print_matrix(matrix, 'Based on this matrix:')\n print(\"Setting UIM fuses:\")\n for r, c in indexes:\n v = matrix[r][c]\n print(f\"set {candidate_switches[c]} {sigs[r]}\")\n sig_to_switch[sigs[r]] = candidate_switches[c]\n # pprint.pprint(sig_to_switch)\n\n print(\"Setting product term fuses:\")\n for mc_name, or_expr in self.all_or_exprs[blk].items():\n products = or_expr.xs if isinstance(or_expr, pyeda.boolalg.expr.OrOp) or isinstance(\n or_expr, pyeda.boolalg.expr.XorOp) else [or_expr]\n\n for ptn, product in enumerate(products):\n terms = product.xs if isinstance(\n product, pyeda.boolalg.expr.AndOp) else [product]\n for sig in terms:\n inv = isinstance(sig, pyeda.boolalg.expr.Complement)\n sig = str(Not(sig) if inv else sig)\n uim = sig_to_switch[self.input_sigs[sig]]\n switch_polarity = \"_N\" if inv else \"_P\"\n print(\n f\" set {mc_name}.PT{ptn} +{uim}{switch_polarity}\")\n\n\nif __name__ == \"__main__\":\n db = get_database()\n dev = db[\"ATF1502AS\"]\n\n parse = PLAParser(sys.argv[1])\n\n p = Fitter()\n p.inputs = parse.inputs\n p.outputs = parse.outputs\n p.or_terms = parse.or_terms\n\n p.map_inputs()\n\n for arg in sys.argv[1:]:\n parse = PLAParser(arg)\n p.outputs = parse.outputs\n p.or_terms = parse.or_terms\n\n if parse.is_xor:\n p.map_and_xor_layer()\n elif parse.is_outputs:\n p.map_output_layer()\n else:\n p.map_and_or_layer()\n\n p.set_uims()\n","sub_path":"pla_parser.py","file_name":"pla_parser.py","file_ext":"py","file_size_in_byte":17763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"211554161","text":"class Solution(object):\n def isAdditiveNumber(self, num):\n \"\"\"\n :type num: str\n :rtype: bool\n \"\"\"\n length = len(num)\n for i in range(1, (length - 1)/2 + 1):\n if num[0] == '0' and i >= 2: break\n j = i + 1\n while length - j >= j - i and length - j >= i:\n if num[i] == '0' and j - i >= 2: break\n n1 = int(num[0:i])\n n2 = int(num[i:j])\n subStr = num[j:]\n if self.isValid(subStr, n1, n2): return True\n j += 1\n return False\n \n def isValid(self, s, n1, n2):\n if not s: return True\n total = n1 + n2\n sub = str(total)\n rest = s[len(sub):]\n if not s.startswith(sub): return False\n return self.isValid(rest, n2, total)","sub_path":"additive_number.py","file_name":"additive_number.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"421891797","text":"import numpy as np\nimport pandas as pd\nimport time\nimport scipy.sparse as sp\nfrom fastFM import sgd\nfrom scipy import sparse\nfrom sklearn.metrics import roc_auc_score, average_precision_score, roc_curve\nimport math\nimport pymrmr\nfrom sklearn.model_selection import train_test_split\n\n\ndef get_links(VIM, gene_names, regulators, sort=True, file_name=None):\n idx = [i for i, gene in enumerate(gene_names) if gene in regulators]\n pred_edges = [(gene_names[j], gene_names[i], score) for (i, j), score in np.ndenumerate(VIM) if i != j and j in idx]\n pred_edges = pd.DataFrame(pred_edges)\n if sort is True:\n pred_edges.sort_values(2, ascending=False, inplace=True)\n if file_name is None:\n print(pred_edges)\n else:\n pred_edges.to_csv(file_name, sep='\\t', header=None, index=None)\n\n \n# ???????????\ndef prenormal(X_train):\n minf_X = [0.0] * X_train.shape[1]\n maxf_X = [0.0] * X_train.shape[1]\n for f in range(0, X_train.shape[1]):\n maxf_X[f] = X_train[0][f]\n minf_X[f] = X_train[0][f]\n for i in range(1, X_train.shape[0]):\n maxf_X[f] = max(maxf_X[f], X_train[i][f])\n minf_X[f] = min(minf_X[f], X_train[i][f])\n if (maxf_X[f] > minf_X[f]):\n for i in range(0, X_train.shape[0]):\n X_train[i][f] = (X_train[i][f] - minf_X[f]) / (maxf_X[f] - minf_X[f])\n else:\n for i in range(0, X_train.shape[0]):\n X_train[i][f] = 0\n maxf_X[f] = X_train[0][f]\n minf_X[f] = X_train[0][f]\n for i in range(1, X_train.shape[0]):\n maxf_X[f] = max(maxf_X[f], X_train[i][f])\n minf_X[f] = min(minf_X[f], X_train[i][f])\n return minf_X, maxf_X, X_train\n\n\n# ??????????\n\ndef subcode(X_train, b, minf, maxf):\n F = len(X_train[0])\n X_train_o = np.zeros((X_train.shape[0], F * b))\n\n a = []\n p = []\n for f in range(0, len(X_train[0])):\n fl = []\n a.append([])\n p.append([])\n for l in range(0, X_train.shape[0]):\n fl.append(X_train[l][f])\n\n a[f], p[f] = np.unique(fl, return_inverse=True)\n for l in range(0, X_train.shape[0]):\n f_en = 0\n for f in range(0, len(X_train[l])):\n if len(a[f]) == 1:\n f_en = f_en\n elif len(a[f]) >= b:\n if (X_train[l][f] - minf[f]) == (maxf[f] - minf[f]):\n X_train_o[l][f_en + b - 1] = 1\n else:\n X_train_o[l][f_en + int(float(X_train[l][f] - minf[f]) / (maxf[f] - minf[f]) * b)] = 1\n f_en += b\n else:\n X_train_o[l][f_en + p[f][l]] = 1\n f_en += len(a[f])\n if f_en < F * b:\n X_train_b = np.zeros((len(X_train), f_en))\n for i in range(len(X_train)):\n for j in range(f_en):\n X_train_b[i][j] = X_train_o[i][j]\n X_train_o = X_train_b\n\n return X_train_o\n\n\ndef FMpredict(data_x, data_y, b,selF1):\n\n minf_X, maxf_X, train_x1 = prenormal(data_x)\n #last_col = np.array([data_x.shape[0] * [1]])\n #train_x1 = np.c_[train_x1, last_col.T]\n #minf_X.append(1)\n #maxf_X.append(1)\n X_train_o = subcode(train_x1, b, minf_X, maxf_X)\n last_col = np.array([X_train_o.shape[0]*[1]])\n X_train_o = np.c_[X_train_o, last_col.T]\n train_X = sp.csc_matrix(np.array(X_train_o), dtype=np.float64)\n\n # test_X = sp.csc_matrix(np.array(X_test_o), dtype=np.float64)\n # print('data_y',data_y)\n fm = sgd.FMRegression(n_iter=10000,\n init_stdev=0.0001, l2_reg_w=0.1, l2_reg_V=10000, rank=10,\n step_size=0.0001) # ???\n\n fm.fit(train_X, data_y)\n print(train_X.shape)\n print('w_bin5', fm.w_)\n return fm.w_, fm.V_,\n\n\ndef estimate_degradation_rates(TS_data, time_points):\n \"\"\"\n For each gene, the degradation rate is estimated by assuming that the gene expression x(t) follows:\n x(t) = A exp(-alpha * t) + C_min,\n between the highest and lowest expression values.\n C_min is set to the minimum expression value over all genes and all samples.\n The function is available at the study named dynGENIE3.\n Huynh-Thu, V., Geurts, P. dynGENIE3: dynamical GENIE3 for the inference of\n gene networks from time series expression data. Sci Rep 8, 3384 (2018) doi:10.1038/s41598-018-21715-0\n \"\"\"\n\n ngenes = TS_data[0].shape[1]\n nexp = len(TS_data)\n\n C_min = TS_data[0].min()\n if nexp > 1:\n for current_timeseries in TS_data[1:]:\n C_min = min(C_min, current_timeseries.min())\n\n alphas = np.zeros((nexp, ngenes))\n\n for (i, current_timeseries) in enumerate(TS_data):\n current_time_points = time_points[i]\n\n for j in range(ngenes):\n idx_min = np.argmin(current_timeseries[:, j])\n idx_max = np.argmax(current_timeseries[:, j])\n\n xmin = current_timeseries[idx_min, j]\n xmax = current_timeseries[idx_max, j]\n\n tmin = current_time_points[idx_min]\n tmax = current_time_points[idx_max]\n\n xmin = max(xmin - C_min, 1e-6)\n xmax = max(xmax - C_min, 1e-6)\n\n xmin = np.log(xmin)\n xmax = np.log(xmax)\n\n alphas[i, j] = (xmax - xmin) / abs(tmin - tmax)\n\n alphas = alphas.max(axis=0)\n\n return alphas\n\n\ndef get_importances(TS_data, time_points, alpha=\"from_data\", SS_data=None, gene_names=None, regulators='all', b=1):\n time_start = time.time()\n\n ngenes = TS_data[0].shape[1]\n\n if alpha is \"from_data\":\n alphas = estimate_degradation_rates(TS_data, time_points)\n else:\n alphas = [alpha] * ngenes\n\n # Get the indices of the candidate regulators\n idx = [i for i, gene in enumerate(gene_names) if gene in regulators]\n\n # Learn an ensemble of trees for each target gene, and compute scores for candidate regulators\n VIM = np.zeros((ngenes, ngenes))\n # print('ngenes:',ngenes)\n for i in range(ngenes):\n # print('i:',i)\n input_idx = idx.copy()\n if i in input_idx:\n input_idx.remove(i)\n vi = get_importances_single(TS_data, time_points, alphas[i], input_idx, i, SS_data, b)#note: imput_idx\n # print('vi:',vi)\n VIM[i, :] = vi\n\n time_end = time.time()\n #print('W_var', VIM)\n #print(\"Elapsed time: %.2f seconds\" % (time_end - time_start))\n\n return VIM\n\n\ndef get_importances_single(TS_data, time_points, alpha, input_idx, output_idx, SS_data, b):\n h = 1 # define the value of time step\n\n ngenes = TS_data[0].shape[1]\n nexp = len(TS_data)\n nsamples_time = sum([expr_data.shape[0] for expr_data in TS_data])\n ninputs = len(input_idx)\n\n # Construct training sample\n\n # Time-series data\n input_matrix_time = np.zeros((nsamples_time - h * nexp, ninputs))\n output_vect_time = np.zeros(nsamples_time - h * nexp)\n\n nsamples_count = 0\n for (i, current_timeseries) in enumerate(TS_data):\n current_time_points = time_points[i]\n npoints = current_timeseries.shape[0]\n time_diff_current = current_time_points[h:] - current_time_points[:npoints - h]\n current_timeseries_input = current_timeseries[:npoints - h, input_idx]\n current_timeseries_output = (current_timeseries[h:, output_idx] - current_timeseries[:npoints - h,\n output_idx]) / time_diff_current + alpha * current_timeseries[\n :npoints - h,\n output_idx]\n nsamples_current = current_timeseries_input.shape[0]\n input_matrix_time[nsamples_count:nsamples_count + nsamples_current, :] = current_timeseries_input\n output_vect_time[nsamples_count:nsamples_count + nsamples_current] = current_timeseries_output\n nsamples_count += nsamples_current\n\n # Steady-state data\n if SS_data is not None:\n input_matrix_steady = SS_data[:, input_idx]\n output_vect_steady = SS_data[:, output_idx] * alpha\n\n # Concatenation\n input_all = np.vstack([input_matrix_steady, input_matrix_time])\n output_all = np.concatenate((output_vect_steady, output_vect_time))\n else:\n input_all = input_matrix_time\n output_all = output_vect_time\n #mrmr feature selection\n top_gene = 9\n out = output_all.reshape(output_all.shape[0],1)\n output = pd.DataFrame(out)\n output.columns = ['G' + str(output_idx) ]\n input1 = pd.DataFrame(input_all)\n input1.columns = ['G'+str(i) for i in input_idx]\n df3 = pd.concat([output, input1], axis=1)\n selF1 = np.array(pymrmr.mRMR(df3, 'MIQ', top_gene))\n input_all = input1[selF1]\n # Compute importance scores\n w, v = FMpredict(input_all.values, output_all, b, selF1)\n input_idx = [int(i.replace('G','')) for i in selF1]\n vi = getvar_w(w, top_gene, b)\n # vi = getMAXMIN_w(w, ngenes-1, b)\n # vi = getabsmax_w(w, ngenes-1, b)\n # vi = getabssum_w(w, ngenes-1, b)\n print('w_var',vi)\n print('input_idx',input_idx)\n v_i = np.zeros(ngenes)\n v_i[input_idx] = vi\n return v_i\n\ndef get_importances_single1( alpha, input_idx, output_idx, SS_data, b):\n\n ngenes = SS_data[0].shape[1]\n\n\n # Construct training sample\n\n # Steady-state data\n\n input_matrix_steady = SS_data[:, input_idx]\n output_vect_steady = SS_data[:, output_idx] * alpha\n\n # Compute importance scores\n w, v = FMpredict(input_matrix_steady, output_vect_steady, b)\n\n vi = getvar_w(w, 3, b)\n # vi = getMAXMIN_w(w, ngenes-1, b)\n # vi = getabsmax_w(w, ngenes-1, b)\n # vi = getabssum_w(w, ngenes-1, b)\n # print('w_var',vi)\n v_i = np.zeros(ngenes)\n v_i[input_idx] = vi\n return v_i\n\ndef getabssum_w(w, ngenes, bin):\n vi = np.zeros(ngenes)\n for i in range(0, ngenes):\n max_w = 0\n for j in range(i * bin, (i + 1) * bin):\n if (i + 1) * bin > w.shape[0]:\n break;\n max_w += abs(w[j])\n vi[i] = max_w\n\n return vi\n\n\ndef getMAXMIN_w(w, ngenes, bin):\n vi = np.zeros(ngenes)\n for i in range(0, ngenes):\n max_w = 0\n min_w = 0\n for j in range(i * bin, (i + 1) * bin):\n if (i + 1) * bin > w.shape[0]:\n break;\n max_w = max(max_w, w[j])\n min_w = min(min_w, w[j])\n vi[i] = max_w - min_w\n\n return vi\n\n\ndef getabsmax_w(w, ngenes, bin):\n vi = np.zeros(ngenes)\n for i in range(0, ngenes):\n max_w = 0\n for j in range(i * bin, (i + 1) * bin):\n if (i + 1) * bin > w.shape[0]:\n break;\n max_w = max(max_w, abs(w[j]))\n vi[i] = max_w\n\n return vi\n\n\ndef getvar_w(w, ngenes, bin):\n vi = np.zeros(ngenes)\n for i in range(0, ngenes):\n arrra = []\n if i == ngenes:\n break;\n arrra = w[i * bin:(i + 1) * bin]\n arr_var = np.var(arrra)\n if math.isnan(arr_var):\n arr_var = 0\n vi[i] = arr_var\n\n return vi\n\n\n#####\ndef get_scores(VIM, gold_edges, gene_names, regulators):\n idx = [i for i, gene in enumerate(gene_names) if gene in regulators]\n pred_edges = [(gene_names[j], gene_names[i], score) for (i, j), score in np.ndenumerate(VIM) if i != j and j in idx]\n pred_edges = pd.DataFrame(pred_edges)\n # Take the top 100,000 predicated results\n pred_edges = pred_edges.iloc[:100000]\n final = pd.merge(pred_edges, gold_edges, on=[0, 1], how='inner')\n # np.set_printoptions(threshold=10000)\n # print('2_y', final['2_y'])\n # sprint('2_x', final['2_x'])\n auroc = roc_auc_score(final['2_y'], final['2_x'])\n\n fpr, tpr, thresholds = roc_curve(final['2_y'], final['2_x'], pos_label=2)\n # print(\"fpr\",fpr,\"tpr\",tpr,\"thresholds\",thresholds )\n aupr = average_precision_score(final['2_y'], final['2_x'])\n\n return auroc, aupr, final\n\n\n\n\n\n","sub_path":"FMModel.py","file_name":"FMModel.py","file_ext":"py","file_size_in_byte":11968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"284995326","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 30 14:30:27 2020\r\n\r\n@author: rgsre\r\n\"\"\"\r\n\r\nclass Node:\r\n def __init__(self,value):\r\n self.value = value \r\n self.next = None\r\n\r\nclass LinkList:\r\n def __init__(self):\r\n self.head = None \r\n \r\n def addnode(self,value):\r\n if self.head == None:\r\n self.head = Node(value)\r\n else:\r\n temp = self.head\r\n while self.head.next != None:\r\n self.head = self.head.next\r\n self.head.next = Node(value)\r\n self.head = temp\r\n \r\n def prepend(self,value):\r\n if self.head == None:\r\n self.head = Node(value) \r\n else:\r\n new_node = Node(value)\r\n new_node.next = self.head\r\n self.head = new_node \r\n \r\n def remove(self,value):\r\n if self.head != None:\r\n curr = prev = self.head \r\n if curr.value == value:\r\n prev = curr.next\r\n curr.next = None\r\n self.head = curr = prev\r\n else:\r\n while curr.next != None:\r\n prev = curr\r\n curr = curr.next\r\n if curr.value == value:\r\n prev.next = curr.next\r\n curr.next = None \r\n prev = curr = self.head \r\n \r\n def printlist(self):\r\n #initiate from self.head\r\n #print until sellf.head.next == None\r\n if self.head != None:\r\n if self.head.next == None:\r\n print(self.head.value)\r\n else:\r\n temp = self.head \r\n while self.head.next != None:\r\n print(self.head.value , end = '->')\r\n self.head = self.head.next\r\n print(self.head.value , end = '->')\r\n self.head= temp \r\n \r\n def rem_dup(self,value): \r\n if self.head != None:\r\n curr = prev = self.head\r\n foc = False\r\n if curr.value == value:\r\n foc = True\r\n while curr != None and curr.next != None:\r\n prev = curr\r\n curr = curr.next\r\n \r\n if foc == True and curr.value == value:\r\n prev.next = curr.next \r\n curr.next = None\r\n curr = prev.next\r\n if curr != None and curr.value == value:\r\n foc = True\r\n prev = curr = self.head\r\n \r\n def remall(self,value):\r\n curr = prev = self.head\r\n\r\n \r\n while curr.next != None:\r\n if curr.value != value:\r\n prev = curr\r\n curr = curr.next \r\n elif curr.value == value and prev.value != value:\r\n prev.next = curr.next \r\n curr.next = None\r\n curr = prev.next\r\n elif curr.value == value and prev.value == value:\r\n prev.next = curr.next\r\n prev = prev.next\r\n curr.next = None\r\n curr = prev\r\n if curr.value == value and curr.next == None:\r\n prev.next = None\r\n curr = None\r\n \r\n \r\n \r\n# \r\n curr = prev = self.head\r\n \r\n def deleteKey(self, key): \r\n temp = self.head \r\n prev = None\r\n \r\n # If head node itself holds the key \r\n # or multiple occurrences of key \r\n while (temp != None and temp.value == key): \r\n self.head = temp.next # Changed head \r\n temp = self.head # Change Temp \r\n \r\n # Delete occurrences other than head \r\n while (temp != None): \r\n # Search for the key to be deleted, \r\n # keep track of the previous node \r\n # as we need to change 'prev.next' \r\n while (temp != None and temp.value != key): \r\n prev = temp \r\n temp = temp.next\r\n \r\n # If key was not present in linked list \r\n if (temp == None): \r\n return self.head \r\n \r\n # Unlink the node from linked list \r\n prev.next = temp.next\r\n \r\n # Update Temp for next iteration of outer loop \r\n temp = prev.next\r\n return self.head \r\n \r\n \r\n \r\n \r\n \r\na = LinkList()\r\n\r\n\r\n\r\na.addnode(6)\r\na.addnode(7)\r\na.addnode(7)\r\na.addnode(8)\r\n\r\na.printlist()\r\n\r\n\r\n","sub_path":"LinkedList_.py","file_name":"LinkedList_.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"246706219","text":"import tkinter as tk\nfrom PIL import Image,ImageTk\nimport winsound\nfrom src.main.model.Game import Game\nfrom src.main.model.AIGame import AIGame\n\n# Constant Declarations\nSCALE_MULTIPLIER = 1.5\nDEFAULT_SIZE = 50\nSQUARESIZE = int(DEFAULT_SIZE*SCALE_MULTIPLIER)\n\n\nclass ChessApplication(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n # Create Main Window\n self.window = tk.Frame(self)\n self.window.pack(side=\"top\", fill=\"both\", expand=True)\n self.window.rowconfigure(0, weight=1)\n self.window.columnconfigure(0, weight=1)\n self.minsize(8*SQUARESIZE, 8*SQUARESIZE + SQUARESIZE//2)\n self.maxsize(8*SQUARESIZE, 8*SQUARESIZE + SQUARESIZE//2)\n self.wm_title(\"Chess\")\n self.iconphoto(self, ImageTk.PhotoImage(file=\".\\\\img\\\\blkKnight.png\"))\n\n # Initialize Scenes\n self.scenes = {}\n\n # For now, main page is a new game\n self.new_game(\"Normal\")\n\n # Create Option Menu\n menu_bar = tk.Menu(self)\n menu_bar.add_command(label=\"New Game\", command=lambda: self.new_game(\"Normal\"))\n ai_menu = tk.Menu(menu_bar, tearoff=0)\n easy_color_menu = tk.Menu(ai_menu, tearoff=0)\n easy_color_menu.add_command(label=\"Black\", command=lambda: self.new_game(\"BlackEasy\"))\n easy_color_menu.add_command(label=\"White\", command=lambda: self.new_game(\"WhiteEasy\"))\n hard_color_menu = tk.Menu(ai_menu, tearoff=0)\n hard_color_menu.add_command(label=\"Black\", command=lambda: self.new_game(\"BlackHard\"))\n hard_color_menu.add_command(label=\"White\", command=lambda: self.new_game(\"WhiteHard\"))\n ai_menu.add_cascade(label=\"Easy\", menu=easy_color_menu)\n ai_menu.add_cascade(label=\"Hard\", menu=hard_color_menu)\n menu_bar.add_cascade(label=\"Versus AI\", menu=ai_menu)\n self.config(menu=menu_bar)\n self.center()\n\n def center(win):\n win.update_idletasks()\n width = win.winfo_width()\n height = win.winfo_height()\n x = (win.winfo_screenwidth() // 2) - (width // 2)\n y = (win.winfo_screenheight() // 2) - (height // 3)\n win.geometry('{}x{}+{}+{}'.format(width, height, x, y-100))\n\n def show_scene(self,cont):\n scene = self.scenes[cont]\n self.tkraise(scene)\n # TODO: redraw scene on top\n\n def new_game(self,type):\n self.scenes[ChessGUI] = ChessGUI(self.window,self,type)\n\n\nclass ChessGUI(tk.Canvas):\n def __init__(self, parent,controller,type):\n tk.Canvas.__init__(self, parent)\n self.grid(row=0, column=0, sticky=\"nsew\")\n self.imgs = []\n self.first_input = None\n self.last_highlight = None\n self.bind(\"\", self.handle_click)\n games = {\n \"Normal\": Game(),\n \"BlackEasy\": AIGame(\"Black\",1),\n \"BlackHard\": AIGame(\"Black\",1),\n \"WhiteEasy\": AIGame(\"White\",2),\n \"WhiteHard\": AIGame(\"White\",2)\n }\n self.game = games.get(type)\n self.redraw()\n\n def handle_click(self, event):\n x_coord = int(event.x / SQUARESIZE)\n y_coord = int(event.y / SQUARESIZE)\n # Translate coords to squares\n letter = chr(x_coord+97)\n num = str(8-y_coord)\n square = letter+num\n if self.first_input is None:\n # First click:\n if self.game.board.has_color_piece(self.game.turn, square):\n # Your first click is on one of your pieces, highlight it\n self.first_input = square\n self.last_highlight = self.draw_image(\".\\\\img\\\\highlight.png\", x_coord, y_coord)\n else:\n self.last_highlight = None\n elif self.first_input is not None:\n # Second click:\n if self.game.make_move(self.first_input, square):\n # Second click is a valid move, make it and reset\n if self.game.turn == \"White\":\n winsound.Beep(260, 150)\n pass\n else:\n winsound.Beep(200, 150)\n pass\n self.last_highlight = None\n self.first_input = None\n self.redraw()\n elif self.game.board.has_color_piece(self.game.turn, square):\n # Second click is not a valid move, but one of your pieces. Highlight it\n self.first_input = square\n self.last_highlight = self.draw_image(\".\\\\img\\\\highlight.png\", x_coord, y_coord)\n else:\n # Second click is not one of your pieces, and not a valid move. Reset highlights/inputs\n self.first_input = None\n self.last_highlight = None\n\n def redraw(self):\n # Delete Old Images\n self.imgs.clear()\n # Redraw Squares\n for sqr in self.game.board.squares:\n if sqr.color == \"Black\":\n sqr_file = \".\\\\img\\\\black_square.png\"\n else:\n sqr_file = \".\\\\img\\\\white_square.png\"\n self.imgs.append(self.draw_image(sqr_file, sqr.x_coord, sqr.y_coord))\n # Redraw Pieces\n for piece in self.game.board.pieces:\n if piece.color == \"White\":\n piece_file = \".\\\\img\\\\wht\"+piece.name+\".png\"\n else:\n piece_file = \".\\\\img\\\\blk\"+piece.name+\".png\"\n x_coord = ord(piece.square[0]) - 97\n y_coord = 8 - int(piece.square[1])\n self.imgs.append(self.draw_image(piece_file, x_coord, y_coord))\n # Redraw Bottom Menu\n self.create_rectangle((0, 8*SQUARESIZE, 8*SQUARESIZE, 8*SQUARESIZE+SQUARESIZE//2), fill=\"Black\", outline=\"Black\")\n display_message = self.game.turn+\" To Move...\"\n if self.game.is_checked(self.game.turn):\n if self.game.is_checkmated(self.game.turn):\n display_message = \"Checkmate,\"\n winsound.Beep(500, 100)\n winsound.Beep(500, 100)\n if self.game.turn == \"White\":\n display_message += \" Black Wins!\"\n else:\n display_message += \" White Wins!\"\n else:\n display_message = \"Check! \" + display_message\n winsound.Beep(500, 150)\n if self.game.is_stalemated(self.game.turn):\n display_message = \"Stalemate, Draw!\"\n self.create_text((4*SQUARESIZE, 8*SQUARESIZE+(SQUARESIZE//4)), text=display_message, font=(\"Fixedsys\", str(int(16*SCALE_MULTIPLIER))), fill=\"White\")\n\n def draw_image(self, filename, x_coord, y_coord):\n raw_img = Image.open(filename)\n resized = raw_img.resize((SQUARESIZE, SQUARESIZE), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(resized)\n self.create_image(SQUARESIZE * x_coord, SQUARESIZE * y_coord, image=img, anchor=tk.NW)\n return img\n\n\napp = ChessApplication()\napp.mainloop()\n","sub_path":"src/main/view/ChessApplication.py","file_name":"ChessApplication.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"434088113","text":"class Solution:\n \"\"\"\n @param grid: a list of lists of integers\n @return: return an integer, denote the number of distinct islands\n \"\"\"\n def numberofDistinctIslands(self, grid):\n # write your code here\n \n #visited = [[ 0 for _ in range(grid[0])] for _ in range(grid)]\n \n res = []\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n tmp = self.dfs(grid,i,j,i,j,res)\n if tmp == '':\n continue\n if tmp not in res:\n res.append(tmp)\n \n return len(res)\n \n \n def dfs(self,grid,i,j,oi,oj,res):\n \n if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[0]) or grid[i][j] == 0:\n return ''\n \n grid[i][j] = 0\n \n\n child = self.dfs(grid,i+1,j,oi,oj,res) + self.dfs(grid,i,j+1,oi,oj,res) + self.dfs(grid,i-1,j,oi,oj,res) + self.dfs(grid,i,j-1,oi,oj,res)\n \n return str(i-oi) + \"_\" + str(j-oj) + child\n \n \n \n \n \n \n \n \n \n","sub_path":"Amazon/694.py","file_name":"694.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"414356176","text":"\"\"\"\nThis is the main logic of the APP\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QLabel, QMainWindow, QInputDialog, QMessageBox\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import QTextCursor, QTextDocument, QIcon, QTextCharFormat\nfrom PyQt5.QtMultimedia import QMediaPlayer, QMediaContent\nfrom UI.main_UI import *\nfrom bs4 import BeautifulSoup\nimport os\nimport fitz\nimport json\nimport re\nfrom gen_anki_deck import AnkiDeckGenerator\nfrom settings_page import SettingsPage\nfrom format_selected_text import FormatSelectedText\nimport mammoth\nfrom html2docx import html2docx\nfrom API.google_trans_API import GoogleTranslate\nfrom flashcard_list_manager import FlashcardManager\nfrom help_page import HelpWindow\nimport datetime\nfrom multi_threading import Worker\nfrom database_helper import Database\nfrom syntax_highlighter import SyntaxHighlighter\nimport logging\n# from BlurWindow.blurWindow import GlobalBlur\nlogging.basicConfig(level=logging.DEBUG,filename=\"app.log\",format='%(asctime)s - %(levelname)s - %(message)s')\nclass MainWindow(MainUIWidget):\n\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n # Create classes instances:\n self.audio_player = QMediaPlayer()\n self.anki_gen = AnkiDeckGenerator()\n self.settings = SettingsPage()\n self.translator = GoogleTranslate()\n self.flashcards_list = FlashcardManager()\n self.format_widget = FormatSelectedText(self.left_pane.browser)\n self.help_page = HelpWindow()\n self.db = Database()\n self.thread_pool = QtCore.QThreadPool()\n self.highlighter = SyntaxHighlighter(self.left_pane.browser.document())\n self.recent_files_widget = QListWidget()\n self.recent_files_widget.setAttribute(QtCore.Qt.WA_QuitOnClose,False)\n #connections\n self.left_pane.browser.clicked.connect(self.browser_clicked)\n self.left_pane.browser.hightlight.connect(self.highlight)\n self.left_pane.browser.clear_highlighting.connect(self.format_widget.clear_highlighting)\n self.left_pane.browser.scroll.connect(self.refresh_hightlight_on_screen)\n self.left_pane.browser.verticalScrollBar().sliderMoved.connect(self.refresh_hightlight_on_screen)\n self.left_pane.toolbar.actionTriggered[QAction].connect(self.handle_toolbar_click)\n self.top_right_pane.toolbar.actionTriggered[QAction].connect(self.handle_toolbar_click)\n self.top_right_pane.make_flash_btn.clicked.connect(self.add_flashcard_to_db)\n self.audio_player.durationChanged.connect(self.update_slider_duration)\n self.audio_player.positionChanged.connect(self.update_slider_position)\n self.left_pane.audio_slider.valueChanged.connect(self.audio_player.setPosition)\n self.settings.save_button.clicked.connect(self.save_settings_to_db)\n self.settings.other_tab.dark_theme_checkbox.stateChanged.connect(self.toggle_theme)\n self.settings.user_tab.user_combobox.currentTextChanged.connect(self.change_current_user)\n self.settings.user_tab.add_user_btn.clicked.connect(self.add_new_user)\n self.settings.cancel_button.clicked.connect(self.settings.close)\n self.top_right_pane.unknown_btn.clicked.connect(lambda: self.save_to_vocab(\"unknown\"))\n self.top_right_pane.semi_known_btn.clicked.connect(lambda: self.save_to_vocab(\"semi-known\"))\n self.top_right_pane.known_btn.clicked.connect(lambda: self.save_to_vocab(\"known\"))\n self.format_widget.font_size_box.valueChanged.connect(lambda x: self.left_pane.browser.setFontPointSize(x))\n self.settings.user_tab.user_delete.clicked.connect(self.delete_user)\n # other\n self.dark_theme_palette = self.setup_dark_theme()\n self.run_start_up_settings()\n self.vocab_or_grammar = \"vocab\"\n # keyboard shortcuts\n self.lookup_shortcut = QShortcut(QtGui.QKeySequence(\"Ctrl+Up\"),self)\n self.lookup_shortcut.activated.connect(self.browser_clicked)\n self.make_flashcard_shortcut = QShortcut(QtGui.QKeySequence(\"Ctrl+Down\"),self)\n self.make_flashcard_shortcut.activated.connect(self.add_flashcard_to_db)\n self.toggle_audio_shortcut = QShortcut(QtGui.QKeySequence(\"Alt+Up\"),self)\n self.toggle_audio_shortcut.activated.connect(self.toggle_play_audio)\n self.skip_back_shortcut = QShortcut(QtGui.QKeySequence(\"Alt+Left\"),self)\n self.skip_back_shortcut.activated.connect(self.skip_back)\n self.skip_forward_shortcut = QShortcut(QtGui.QKeySequence(\"Alt+Right\"),self)\n self.skip_forward_shortcut.activated.connect(self.skip_forward)\n \n\n # startup settings - last user, darktheme...\n def run_start_up_settings(self):\n self.current_user = self.db.get_last_user()\n self.all_users_names = self.db.get_all_users()\n self.current_online_tools = self.db.get_online_tools(self.current_user[\"id\"])\n self.current_grammar_rules = self.db.get_grammar_rules(self.current_user[\"id\"])\n self.current_other_settings = self.db.get_other_settings(self.current_user[\"id\"])\n self.current_highlighters = self.db.get_highlighters(self.current_user[\"id\"])\n self.recent_files = self.db.get_recent_files(self.current_user[\"id\"])\n self.current_selection = {\"selection\":\"\", \"db_findings\":[]}\n self.current_flashcard_audio = {\"start\":None,\"end\":None}\n # UI\n if self.current_other_settings[\"dark_theme\"]:\n self.setPalette(self.dark_theme_palette)\n self.settings.setPalette(self.dark_theme_palette)\n self.flashcards_list.setPalette(self.dark_theme_palette)\n self.recent_files_widget.setPalette(self.dark_theme_palette)\n self.set_icons(True)\n else:\n self.set_icons(False)\n # online tools\n self.bottom_right_pane.start_tabs(self.current_online_tools)\n\n def load_settings_to_settings_page(self):\n self.settings.load_online_tool_settings(self.current_online_tools)\n self.settings.load_other_settings(self.current_other_settings)\n self.settings.load_user(self.all_users_names,self.current_user)\n self.settings.load_grammar_rules(self.current_grammar_rules)\n \n def change_current_user(self):\n new_user_name = self.settings.user_tab.user_combobox.currentText()\n self.db.set_last_user(new_user_name)\n self.run_start_up_settings()\n self.load_settings_to_settings_page()\n \n def add_new_user(self):\n new_user_name = self.settings.user_tab.add_user_name.text()\n if self.db.add_new_user(new_user_name):# returns true if user already exists\n self.display_msg(\"Oops!\",\"That user name already exists\")\n else:\n self.db.set_last_user(new_user_name)\n self.run_start_up_settings()\n self.settings.user_tab.add_user_name.clear()\n self.load_settings_to_settings_page()\n \n def save_settings_to_db(self):\n self.settings.close()\n # online tools\n online_tools_list = []\n tools_row_count = self.settings.online_tools_tab.online_tools_table_widget.rowCount() -1\n for row in range(tools_row_count):\n online_tools_list.append([\n self.settings.online_tools_tab.online_tools_table_widget.item(row,1).text(),\n self.settings.online_tools_tab.online_tools_table_widget.item(row,2).text()\n ])\n self.db.save_online_tools(online_tools_list,self.current_user[\"id\"])\n\n # grammar settings\n grammar_rules_list = []\n rules_row_count = self.settings.grammar_tab.grammar_table_widget.rowCount() -1\n for row in range(rules_row_count):\n grammar_rules_list.append([\n self.settings.grammar_tab.on_widget[row].isChecked(),\n self.settings.grammar_tab.grammar_table_widget.item(row,2).text(),\n self.settings.grammar_tab.color_btn[row].styleSheet()[23:-1],\n self.settings.grammar_tab.opacity_widget[row].value(),\n self.settings.grammar_tab.style_widget[row].currentText(),\n self.settings.grammar_tab.grammar_table_widget.item(row,6).text()\n ])\n self.db.save_grammar_rules(grammar_rules_list,self.current_user[\"id\"])\n\n \n # other settings\n other_settings = {}\n other_settings[\"dark_theme\"] = self.settings.other_tab.dark_theme_checkbox.isChecked()\n other_settings[\"autofill_back_of_flashcard\"] = self.settings.other_tab.autofill_checkbox.isChecked()\n self.db.save_other_settings(self.current_user[\"id\"], other_settings)\n self.run_start_up_settings()\n self.start_update_highlight_words()\n \n def save_to_vocab(self,confid):\n definition_to_save = self.top_right_pane.flash_back.toPlainText()\n word_to_save = self.current_selection[\"selection\"]\n #check if it is a phrase\n pattern = re.compile(\"\\W\")\n if re.search(pattern, word_to_save) != None:\n confid = confid + \"-sent\"\n # get highlighter id\n for item in self.current_highlighters:\n if item[4] == confid:\n current_confidence = item[0]\n\n if self.current_selection[\"db_findings\"] != []:\n print(\"updated word in database\")\n self.db.update_word_to_vocab(self.current_selection[\"db_findings\"][0][0],definition_to_save,current_confidence)\n self.start_update_highlight_words()\n elif self.current_selection[\"selection\"] != \"\":\n self.db.save_word_to_vocabulary(self.current_user[\"id\"],word_to_save, definition_to_save,current_confidence)\n self.start_update_highlight_words()\n else:\n self.display_msg(\"sorry\",\"No word or phrase selected\")\n self.current_selection = {\"selection\":\"\", \"db_findings\":[]}\n\n def start_update_highlight_words(self):\n # start highlighter in another thread\n worker = Worker(self.update_highlight_words)\n worker.signals.finished.connect(self.refresh_hightlight_on_screen)\n self.thread_pool.start(worker)\n\n def refresh_hightlight_on_screen(self):\n \"\"\"The syntax highlighter class only highlights one block at a time and that makes it more responsive,\n this function should be called when the user scrolls. It finds how much of the screen is visible\n and then rehighlights all the blocks it finds\"\"\"\n start_pos = self.left_pane.browser.cursorForPosition(QtCore.QPoint(0, 0)).position()\n bottom_right= QtCore.QPoint(self.left_pane.browser.viewport().width() -1,self.left_pane.browser.viewport().height()-1)\n end_pos = self.left_pane.browser.cursorForPosition(bottom_right).position()\n\n cursor = self.left_pane.browser.textCursor()\n cursor.setPosition(start_pos)\n old_pos = cursor.position()\n while cursor.position() < end_pos:\n block = cursor.block()\n self.highlighter.rehighlightBlock(block)\n cursor.movePosition(QTextCursor.NextBlock)\n if cursor.position() == old_pos:\n break\n else:\n old_pos = cursor.position()\n \n def update_highlight_words(self):\n if self.vocab_or_grammar == \"vocab\":\n all_dicts = []\n for highlighter in self.current_highlighters:\n hl_id = highlighter[0]\n vocab_for_hl = self.db.get_list_of_vocab_by_highlighter(self.current_user[\"id\"], hl_id)\n hl_color = [float(s) for s in highlighter[2].split(\",\")] # converts color string to list of floats\n hl_style = highlighter[3]\n\n if vocab_for_hl != []:\n color = QtGui.QColor()\n color.setRgbF(hl_color[0],hl_color[1],hl_color[2],hl_color[3])\n the_format = QTextCharFormat()\n if hl_style == \"underline\":\n the_format.setUnderlineColor(color) \n the_format.setUnderlineStyle(QTextCharFormat.SingleUnderline)\n elif hl_style == \"background\":\n the_format.setBackground(color)\n \n all_dicts.append({\"vocab\":vocab_for_hl,\"fmt\":the_format})\n self.highlighter.set_state(all_dicts,[])\n\n elif self.vocab_or_grammar == \"grammar\":\n print(self.current_grammar_rules)\n all_dicts = []\n for rule in self.current_grammar_rules:\n if rule[2]:\n regex_list = [word.strip() for word in rule[7].split(',')]\n the_format = QTextCharFormat()\n color_from_str = [float(s) for s in rule[4].split(\",\")]\n color = QtGui.QColor()\n print(float(rule[5]))\n color.setRgbF(color_from_str[0],color_from_str[1],color_from_str[2],float(rule[5]))\n if rule[6] == \"underline\":\n the_format.setUnderlineColor(color)\n the_format.setUnderlineStyle(QTextCharFormat.SingleUnderline)\n elif rule[6] == \"highlight\":\n the_format.setBackground(color)\n all_dicts.append({\"words\":regex_list,\"fmt\":the_format})\n self.highlighter.set_state([],all_dicts)\n\n\n def toggle_vocab_grammar(self):\n if self.vocab_or_grammar == \"vocab\":\n self.vocab_or_grammar = \"grammar\"\n self.left_pane.vocab_grammar_toggle.setText(\"toggle_g\")\n elif self.vocab_or_grammar ==\"grammar\":\n self.vocab_or_grammar = \"vocab\"\n self.left_pane.vocab_grammar_toggle.setText(\"toggle_v\")\n\n if self.vocab_or_grammar == \"vocab\" and self.current_other_settings[\"dark_theme\"]:\n self.left_pane.vocab_grammar_toggle.setIcon(QIcon(os.path.join(\"src\",\"img\",\"toggle_v_dark.png\")))\n elif self.vocab_or_grammar == \"grammar\" and self.current_other_settings[\"dark_theme\"]:\n self.left_pane.vocab_grammar_toggle.setIcon(QIcon(os.path.join(\"src\",\"img\",\"toggle_g_dark.png\")))\n elif self.vocab_or_grammar == \"vocab\" and not self.current_other_settings[\"dark_theme\"]:\n self.left_pane.vocab_grammar_toggle.setIcon(QIcon(os.path.join(\"src\",\"img\",\"toggle_v.png\")))\n elif self.vocab_or_grammar == \"grammar\" and not self.current_other_settings[\"dark_theme\"]:\n self.left_pane.vocab_grammar_toggle.setIcon(QIcon(os.path.join(\"src\",\"img\",\"toggle_g.png\")))\n self.start_update_highlight_words()\n \n\n\n \n def highlight(self,color):\n cursor = self.left_pane.browser.textCursor()\n the_format = QTextCharFormat()\n the_format.setBackground(QtGui.QBrush(QtGui.QColor(color)))\n cursor.mergeCharFormat(the_format)\n\n def display_msg(self,title,text):\n msgBox = QMessageBox()\n msgBox.setText(text)\n msgBox.setStandardButtons(QMessageBox.Ok)\n msgBox.setWindowTitle(title)\n msgBox.exec()\n \n def browser_clicked(self):\n if self.left_pane.browser.textCursor().hasSelection():\n self.get_sel_in_context()\n else:\n self.get_word_in_context()\n\n def get_word_in_context(self):\n cursor = self.left_pane.browser.textCursor()\n cursor.select(QTextCursor.WordUnderCursor)\n if cursor.hasSelection():\n word = cursor.selectedText()\n context = self.get_context(cursor)\n split_context = re.split('(\\W)', context)\n for i, w in enumerate(split_context):\n if w == word:\n split_context[i] = \"\" + split_context[i] + \"\"\n context_bold = \"\".join(split_context)\n self.autofill_searchbar(word)\n self.autofill_flashcard(context_bold)\n self.handle_lookup(word, context)\n\n def get_sel_in_context(self):\n cursor = self.left_pane.browser.textCursor()\n selection = self.left_pane.browser.textCursor().selectedText()\n context = self.get_context(cursor)\n context_bold = context.replace(selection, \"\" + selection + \"\")\n self.autofill_searchbar(selection)\n self.autofill_flashcard(context_bold)\n self.handle_lookup(selection.strip(), context)\n\n def get_context(self,cursor):\n steps_to_move = 15 # <--- this could be changed in the settings\n not_reached_upper_limit = True\n not_reached_lower_limit = True\n for i in range(steps_to_move):\n cursor.movePosition(QTextCursor.WordLeft,QTextCursor.MoveAnchor)\n if cursor.atBlockStart():\n not_reached_upper_limit = False\n break\n for i in range(steps_to_move*2):\n cursor.movePosition(QTextCursor.WordRight,QTextCursor.KeepAnchor)\n if cursor.atBlockEnd():\n not_reached_lower_limit = False\n break\n context = cursor.selectedText()\n if not_reached_lower_limit:\n context = context.strip() + \"...\"\n if not_reached_upper_limit:\n context = \"...\" + context\n return context\n \n def autofill_searchbar(self,search_term):\n self.left_pane.searchbar_lineedit.setText(search_term)\n\n def autofill_flashcard(self,context):\n self.top_right_pane.flash_front.clear()\n self.top_right_pane.flash_front.setHtml(context)\n\n def handle_lookup(self, selection, context):\n #check database first\n try:\n db_findings = self.db.look_up_sel_in_db(selection,self.current_user[\"id\"])\n except Exception as e:\n logging.exception(\"while looking for definition in db\")\n\n for i, row in enumerate(self.current_online_tools):\n self.bottom_right_pane.my_tabs[i].setUrl(QUrl(row[2].replace(\"WORD\",selection).replace(\"SENT\",context)))\n if db_findings != []:\n self.top_right_pane.flash_back.clear()\n self.top_right_pane.flash_back.insertPlainText(db_findings[0][3])\n elif self.current_other_settings[\"autofill_back_of_flashcard\"]:\n self.top_right_pane.flash_back.clear()\n try:\n translation = self.translator.translate(selection)\n self.top_right_pane.flash_back.insertPlainText(translation)\n except Exception as e:\n logging.exception(\"google translate autofill api\")\n print(f\"there was an error with the autofill api: {e}\")\n self.current_selection = {\"selection\":selection,\"db_findings\":db_findings}\n\n def handle_toolbar_click(self,action):\n action = action.text()\n if action == 'play':\n self.toggle_play_audio()\n elif action == 'open':\n try:\n self.open_file()\n except Exception as e:\n logging.exception(\"when opening file\")\n self.display_msg(\"Error\",\"Failed to open file.\\n\" + str(e))\n elif action == 'save':\n try:\n self.save_file()\n except Exception as e:\n logging.exception(\"when saving file\")\n self.display_msg(\"Error\",\"Failed to save file.\\n\" + str(e))\n elif action == 'download':\n self.download_flashcards()\n elif action == 'skip_back':\n self.skip_back()\n elif action == 'skip_forward':\n self.skip_forward()\n elif action == 'settings':\n self.load_settings_to_settings_page()\n self.settings.show()\n elif action == 'list':\n self.flashcards_list.set_up(self.current_user[\"id\"])\n self.flashcards_list.show()\n # GlobalBlur(self.flashcards_list.winId(),Dark=True,QWidget=self.flashcards_list)\n elif action == 'help':\n self.help_page.show()\n elif action == 'highlight':\n self.start_update_highlight_words()\n elif action == 'format':\n self.format_widget.show()\n elif action == 'toggle_v' or action == 'toggle_g':\n self.toggle_vocab_grammar()\n elif action == 'no_sound':\n self.set_audio_for_flashcard()\n elif action == 'blank':\n self.new_blank_document()\n\n elif action == 'recent_file':\n self.open_recent_file_window()\n\n\n def save_file(self):\n filepath = QFileDialog.getSaveFileName(self, 'Save File','',\"HTML Files (*.html);; TXT Files (*.txt) ;; DOCX Files (*.docx)\")[0]\n if filepath:\n file_type = self.get_file_extension_from_path(filepath)\n filename = self.get_filename_from_path(filepath)\n if file_type == '.txt':\n file_data = self.left_pane.browser.toPlainText()\n with open(filepath, 'w', encoding='utf8', errors='ignore') as f:\n f.write(file_data)\n elif file_type == '.html':\n file_data = self.left_pane.browser.toHtml()\n with open(filepath, 'w', encoding='utf8', errors='ignore') as f:\n f.write(file_data)\n elif file_type == '.docx':\n file_data = self.left_pane.browser.toHtml()\n file_data = html2docx(file_data,title=filename).getvalue()\n with open(filepath, 'wb') as f:\n f.write(file_data)\n else:\n self.display_msg(\"Error\", f'Only \".txt\", \".html\", and \".docx\" file extensions are supported.')\n self.db.add_recent_file(filepath,self.current_user[\"id\"])\n \n def open_file(self,filepath_passed=None):\n if not filepath_passed:\n filepath = QFileDialog.getOpenFileName(self,'select a text document')[0]\n if not filepath:\n return\n else:\n filepath = filepath_passed\n resources_path = self.get_folder_from_path(filepath)\n filetype = self.get_file_extension_from_path(filepath)\n self.left_pane.browser.document().setMetaInformation(QTextDocument.DocumentUrl, QtCore.QUrl.fromLocalFile(resources_path).toString())\n is_html = True\n try:\n if filetype == \".txt\":\n with open(filepath,'r') as f:\n data = f.read()\n is_html = False\n elif filetype == \".html\" or filetype == \".htm\" or filetype == \".mhtml\" or filetype == \".mht\":\n with open(filepath,'r',encoding='utf8', errors='ignore') as f:\n data = f.read()\n elif filetype == \".docx\":\n with open(filepath,'rb') as f:\n data = mammoth.convert_to_html(f).value\n elif filetype == \".pdf\":\n with fitz.open(filepath) as doc:\n pages = []\n for i in range(doc.page_count):\n pages.append(doc.load_page(i))\n for page in pages:\n data = page.get_text(\"html\")\n self.left_pane.browser.clear()\n self.left_pane.browser.insertHtml(data)\n self.load_audio(filepath)\n self.db.add_recent_file(filepath,self.current_user[\"id\"])\n else:\n self.display_msg(\"Error\",f'Could not recognize file: \"{filetype}\"')\n return\n self.left_pane.browser.clear()\n if is_html:\n self.left_pane.browser.insertHtml(data)\n self.db.add_recent_file(filepath,self.current_user[\"id\"])\n else:\n self.left_pane.browser.insertPlainText(data)\n self.db.add_recent_file(filepath,self.current_user[\"id\"])\n self.load_audio(filepath)\n self.start_update_highlight_words()\n except Exception as e:\n self.display_msg(\"Error Loading file\",f\"{e}\")\n \n def set_audio_for_flashcard(self):\n if self.audio_player.isAudioAvailable():\n pos = self.audio_player.position()\n dur = self.audio_player.duration()\n start_time = self.get_start_time(pos,5000)\n end_time = self.get_end_time(pos,5000,dur)\n self.current_flashcard_audio = {\"start\":start_time,\"end\":end_time}\n \n if self.current_other_settings[\"dark_theme\"]:\n self.top_right_pane.add_sound_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"sound_dark.png\")))\n else:\n self.top_right_pane.add_sound_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"sound.png\")))\n else:\n self.current_flashcard_audio = {\"start\":None,\"end\":None}\n if self.current_other_settings[\"dark_theme\"]:\n self.top_right_pane.add_sound_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"no_sound_dark.png\")))\n else:\n self.top_right_pane.add_sound_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"no_sound.png\")))\n self.display_msg(\"sorry\",\"No Audio Found\")\n\n def add_flashcard_to_db(self):\n audio_file = self.current_audio\n audio_start = self.current_flashcard_audio[\"start\"]\n audio_end = self.current_flashcard_audio[\"end\"]\n front = self.top_right_pane.flash_front.toHtml()\n back = self.top_right_pane.flash_back.toHtml()\n front_html = BeautifulSoup(front,\"html.parser\")\n back_html = BeautifulSoup(back,\"html.parser\")\n front = front_html.body\n back = back_html.body\n front_text = front.getText()\n back_text = back.getText() \n image = back_html.find('img')\n try:\n img_source = image['src']\n image.decompose()\n except:\n img_source = \"\"\n\n if front_text == \"\\n\":\n self.display_msg(\"Oops!\",\"No text was found for the Flashcard.\")\n return 0\n if back_text == \"\\n\" and image is None:\n self.display_msg(\"Oops!\",\"No text or images were found for back of the Flashcard.\")\n return 0\n\n flashcard = {\n \"front\":str(front),\n \"back\":str(back),\n \"back_image\":img_source,\n \"audio_file\":audio_file,\n \"audio_start\":audio_start,\n \"audio_end\":audio_end\n }\n try:\n self.db.add_flashcard_to_db(flashcard,self.current_user[\"id\"])\n except Exception as e:\n print(e)\n logging.exception(\"adding flashcard to db\")\n self.top_right_pane.flash_front.clear()\n self.top_right_pane.flash_back.clear()\n self.current_flashcard_audio = {\"start\":None,\"end\":None}\n self.top_right_pane.add_sound_action.setIcon(QIcon(os.path.join(\"src\",\"img\",\"\")))\n if self.current_other_settings[\"dark_theme\"]:\n self.top_right_pane.add_sound_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"no_sound_dark.png\")))\n else:\n self.top_right_pane.add_sound_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"no_sound.png\")))\n\n\n def download_flashcards(self): #this creates anki deck\n filepath = QFileDialog.getSaveFileName(self, 'Download Anki Deck','',\"Anki File (*.apkg)\")[0]\n if filepath == \"\":\n return 0\n deck_name = self.get_filename_from_path(filepath)\n\n flashcards_list = self.db.get_flashcards_from_db(self.current_user[\"id\"])\n\n if flashcards_list == []:\n self.display_msg(\"Oops!\", \"There are no cards to make an anki deck with.\\nUse the flashcard generator in the top right corner to make some.\")\n return 0\n try:\n self.anki_gen.start_everything(flashcards_list,deck_name,filepath)\n self.db.delete_all_flashcards_for_user(self.current_user[\"id\"])\n self.display_msg(\"Ok\",f\"The anki deck was successfully created and saved in this location: \\n {filepath}\")\n except Exception as e:\n self.display_msg(\"oh dear\",f\"there was an error trying to export your flashcards.\\nWhy don't you try again? \\n\\nError Message: {e}\")\n logging.exception(\"while trying to export flashcards\")\n \n\n \n def load_audio(self, filePath):\n # check if audio file exists\n expected_filepath_wav = str(self.get_folder_from_path(filePath)) + str(self.get_filename_from_path(filePath)) + \".wav\"\n expected_filepath_mp3 = str(self.get_folder_from_path(filePath)) + str(self.get_filename_from_path(filePath)) + \".mp3\"\n if os.path.exists(expected_filepath_wav):\n self.audio_player.setMedia(QMediaContent(QUrl.fromLocalFile(expected_filepath_wav)))\n self.current_audio = expected_filepath_wav\n elif os.path.exists(expected_filepath_mp3):\n self.audio_player.setMedia(QMediaContent(QUrl.fromLocalFile(expected_filepath_mp3)))\n self.current_audio = expected_filepath_mp3\n else:\n self.audio_player.setMedia(QMediaContent(None))\n self.current_audio = None\n \n def toggle_play_audio(self):\n state = self.audio_player.state()\n if state == 1:\n self.audio_player.pause()\n if self.current_other_settings[\"dark_theme\"]:\n self.left_pane.play_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"play_dark.png\")))\n else:\n self.left_pane.play_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"play.png\")))\n if state == 0 or state == 2:\n self.audio_player.play()\n if self.current_other_settings[\"dark_theme\"]:\n self.left_pane.play_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"pause_dark.png\")))\n else:\n self.left_pane.play_action.setIcon(QIcon(os.path.join(\"src\", \"img\", \"pause.png\")))\n \n def skip_forward(self):\n skip_amount = 3000\n pos = self.audio_player.position()\n new_pos = pos + skip_amount\n self.audio_player.setPosition(new_pos)\n\n def skip_back(self):\n skip_amount = 3000\n pos = self.audio_player.position()\n print(pos)\n new_pos = pos - skip_amount\n self.audio_player.setPosition(new_pos)\n \n def update_slider_duration(self,duration):\n self.left_pane.audio_slider.setMaximum(duration)\n self.left_pane.audio_dur_label.setText(self.timestamp_from_milsec(duration))\n \n def update_slider_position(self,position):\n self.left_pane.audio_slider.blockSignals(True)\n self.left_pane.audio_slider.setValue(position)\n self.left_pane.audio_slider.blockSignals(False)\n self.left_pane.audio_pos_label.setText(self.timestamp_from_milsec(position))\n # get audio timestamp\n min_mil_sec = 5000\n current_pos_mil = position\n duration = self.audio_player.duration()\n start_time = self.get_start_time(current_pos_mil,min_mil_sec)\n end_time = self.get_end_time(current_pos_mil,min_mil_sec,duration)\n new_string = f\"Timestamps: {datetime.timedelta(seconds=round(start_time/1000))}--{datetime.timedelta(seconds=round(end_time/1000))}\"\n\n # self.flash_audio_label.setText(new_string)\n \n def timestamp_from_milsec(self,milsec):\n return str(datetime.timedelta(seconds=milsec/1000))[:7]\n\n \n def get_start_time(self,pos,min_mil):\n if pos < min_mil:\n return 0\n else:\n return pos - min_mil\n def get_end_time(self,pos,min_mil,dur):\n if pos < dur - min_mil:\n return pos + min_mil\n else:\n return dur\n\n def get_filename_from_path(self, path):\n filename = os.path.basename(path)\n extension_to_remove = self.get_file_extension_from_path(filename)\n name = filename.replace(extension_to_remove, '')\n return name\n \n def get_file_extension_from_path(self, path):\n parts = os.path.splitext(path)\n if len(parts) == 2:\n return parts[1]\n else:\n logging.debug(f\"there were {len(parts)} parts when trying to get the file extension. There should be 2.\")\n return None\n\n def get_folder_from_path(self, path):\n folder = os.path.dirname(path)\n return folder + \"/\"\n \n def new_blank_document(self):\n self.left_pane.browser.clear()\n self.audio_player.setMedia(QMediaContent(None))\n\n def open_recent_file_window(self):\n self.recent_files_widget.clear()\n self.recent_files_widget.itemClicked.connect(self.recent_file_item_clicked)\n self.recent_files_widget.setWindowTitle(\"Recent Files\")\n self.recent_files_widget.resize(400,500)\n self.recent_files = self.db.get_recent_files(self.current_user[\"id\"])\n if self.recent_files:\n for filepath in self.recent_files:\n if os.path.isfile(filepath[1]): # the file may have been moved, renamed or deleted\n name = self.get_filename_from_path(filepath[1]) + self.get_file_extension_from_path(filepath[1])\n date = filepath[2][:16]\n self.recent_files_widget.addItem(f\"{date} -- {name}\")\n else:\n self.recent_files_widget.addItem(\"You haven't opened or saved any files recently\")\n self.recent_files_widget.move(QtGui.QCursor.pos())\n self.recent_files_widget.show()\n \n def recent_file_item_clicked(self):\n item_clicked = self.recent_files_widget.currentIndex().row()\n if self.recent_files_widget.item(item_clicked).text() == \"You haven't opened or saved any files recently\":\n return\n file_clicked = self.recent_files[item_clicked][1]\n self.open_file(file_clicked)\n self.recent_files_widget.hide()\n \n def delete_user(self):\n name = self.current_user[\"name\"]\n if name == \"default_user\":\n self.display_msg(\"hmmm\",\"You are not allowed to delete this user.\")\n return\n answer = QMessageBox.question(self,\"Sure?\",f'Are you are you want to delete \"{name}\"?', QMessageBox.Yes | QMessageBox.No)\n if answer == QMessageBox.Yes:\n try:\n self.db.delete_user(self.current_user[\"id\"])\n self.settings.user_tab.user_combobox.blockSignals(True)\n self.current_user = {\"id\":1,\"name\":\"default_user\"}\n self.run_start_up_settings()\n self.load_settings_to_settings_page() \n self.settings.user_tab.user_combobox.blockSignals(False)\n except Exception as e:\n logging.exception(\"while trying to delete user\")\n self.display_msg(\"oh crumbs..\",f\"error while trying to delete user.\\n{e}\")\n self.settings.raise_()\n \n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n app.setStyle(\"fusion\")\n mainApp = MainWindow()\n mainApp.setWindowTitle(\"Langsoft\")\n mainApp.setWindowIcon(QtGui.QIcon(os.path.join(os.getcwd(),\"src\",\"img\",\"langsoft.png\")))\n mainApp.showMaximized()\n # mainApp.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)\n # GlobalBlur(mainApp.winId(),Dark=True,QWidget=mainApp)\n try:\n sys.exit(app.exec_())\n except Exception as e:\n logging.exception(\"app crashed\")\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":35100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"271473280","text":"from random import seed, random\nfrom math import sqrt, log\nfrom time import sleep\nfrom grafo import Grafo\nimport pdb\n\n\nSEMENTE = seed(100)\n\ndef geraVertice(grafo):\n x = round(random(), 2)\n y = round(random(), 2)\n vertice = (x, y)\n grafo.adicionaVertice(vertice)\n\ndef ligar(grafo):\n v1 = grafo.vertice1\n v2 = grafo.vertice2\n grafo.peso = sqrt((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2)\n\n if(v1[0] < v2[0]):\n grafo.contador+=1\n grafo.adicionaAresta()\n\ndef imprimeGrafo(grafo):\n print(\"Ligações possíveis: {}\".format(grafo.ligacoes))\n print(\"Numero de vertices: {}\".format(len(grafo.vertices)))\n print(\"Numero de arestas: {}\".format(len(grafo.arestas)))\n '''\n for i, ligacao in enumerate(grafo.arestas):\n print(\"{}: {}\".format(i, ligacao))\n sleep(1)\n '''\n print()\n\ndef geraGrafo(tamanho):\n grafo = Grafo()\n grafo.ligacoes = int(log(tamanho, 2))\n\n for _ in range(tamanho):\n geraVertice(grafo)\n\n for i in range(tamanho):\n grafo.vertice1 = grafo.vertices[i]\n grafo.contador = 0\n for j in range(tamanho):\n if(grafo.contador < grafo.ligacoes):\n grafo.vertice2 = grafo.vertices[j]\n ligar(grafo)\n else:\n break\n\n imprimeGrafo(grafo)\n\ngeraGrafo(50)\ngeraGrafo(100)\ngeraGrafo(500)\ngeraGrafo(1000)","sub_path":"Matérias/TEG/trabalho_final/gera.py","file_name":"gera.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"473518677","text":"from django.shortcuts import render\nfrom django.core.mail import mail_admins\n\nfrom facebook_albums.models import SkipAlbum\nfrom utils.facebook import get_access_token, get_albums, get_album, get_album_name\n\n\n# Create your views here.\ndef all_albums(request):\n navitas = \"273919669306465\"\n token = get_access_token()\n albums = get_albums(token, navitas)\n error = None\n try:\n albums = albums['albums']['data']\n albums[:] = [d for d in albums if d.get('id') not in SkipAlbum.objects.all().values_list('album_id', flat=True)]\n albums = sorted(albums, key=lambda k: k['updated_time'], reverse=True)\n except KeyError:\n error = albums['error']\n mail_admins(\"Navitas.se: Error\", str(error))\n return render(request, 'facebook_albums/landing.html', {'error': error, 'albums': albums})\n\n\ndef one_album(request, pk):\n token = get_access_token()\n return render(request, 'facebook_albums/album.html', {'album': get_album(token, pk), 'album_name': get_album_name(token, pk)})\n","sub_path":"navitas/facebook_albums/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"635825785","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0011_auto_20150306_1210'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='stock',\n name='start_date',\n field=models.DateField(default=django.utils.timezone.now, verbose_name='Дата начала'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='stock',\n name='title',\n field=models.CharField(null=True, max_length=128, blank=True, verbose_name='Название'),\n preserve_default=True,\n ),\n ]\n","sub_path":"shop/migrations/0012_auto_20150306_1326.py","file_name":"0012_auto_20150306_1326.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"306388076","text":"#-*- coding: utf-8 -*-\nimport json\nimport requests\nimport re\n\nfor line in open(\"jawiki-country.json\", \"r\"):\n uk = json.loads(line)\n if uk[\"title\"] == u\"イギリス\":\n break\n\nfor line in uk[\"text\"].split(\"\\n\"):\n if re.search(r\".+ = .+\", line):\n word = line.strip(\"|\").split(\" = \")\n if word[0] == \"国旗画像\":\n file_name = word[1]\n break\n\nendpoint = \"http://en.wikipedia.org/w/api.php\"\nparams = {'action': 'query', 'prop': 'imageinfo', 'iiprop': 'url', 'format': 'json', 'titles': 'File:{}'.format(file_name)}\n\nresponse = requests.get(endpoint, params=params)\ndic = response.json()\n\nprint (dic[\"query\"][\"pages\"][\"23473560\"][\"imageinfo\"][0][\"url\"])\n\n","sub_path":"Hayahide/chapter03/knock29.py","file_name":"knock29.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649971767","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/herbert/dev/python/sctdev/simpleproject/simpleproject/../../communitytools/sphenecoll/sphene/sphblog/utils.py\n# Compiled at: 2012-03-17 12:42:14\nimport re, unicodedata\nfrom htmlentitydefs import name2codepoint\nfrom django.utils.encoding import smart_unicode, force_unicode\nfrom slughifi import slughifi\n\ndef slugify(s, entities=True, decimal=True, hexadecimal=True, model=None, slug_field='slug', pk=None):\n s = smart_unicode(s)\n if len(s) > 40:\n s = s[:40]\n s = slughifi(s)\n slug = s\n if model:\n\n def get_query():\n query = model.objects.filter(**{slug_field: slug})\n if pk:\n query = query.exclude(pk=pk)\n return query\n\n counter = 2\n while get_query():\n slug = '%s-%s' % (s, counter)\n counter += 1\n\n return slug","sub_path":"pycfiles/django-sct-0.7.tar/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561530594","text":"from os import close\nimport pandas as pd\nimport pandas_datareader.data as web\nimport datetime as dt\n\ndef main():\n #-----------------------------------\n #1 - Get basic stock pricing on day-to-day basis\n #-----------------------------------\n \n startdate = dt.datetime(2021,1,1)\n enddate = dt.datetime(2021,7,1)\n\n df = web.DataReader('VIPS', 'yahoo', startdate, enddate)\n df.reset_index(inplace=True,drop=False)\n df['Date'] \n\n\n # Print first couple data entries to make sure data is correct\n print(df.head())\n\n # 1a - Save to CSV\n #df.to_csv('tesla.csv', )\n\n # Read from CSV\n #df = pd.read_csv('tesla.csv')\n print(df)\n\n # 1b - Find Average\n \n #Print average for 'High' column\n print(df[\"High\"].mean())\n #Print average for 'Low' column using dot notation\n print(df.Low.mean())\n #Print mean of multiple columns\n print(df[[\"Open\", \"Close\"]].mean())\n #General description of dataframe\n print(df.describe())\n\n #-----------------------------------\n # 4\n # Johnny's circumstances:\n # - He invests $1,000 at the start of each month, regardless of price (i.e., Dollar Cost Average (DCA))\n # How much is his investment worth at the end of 2020?\n #-----------------------------------\n\n #variable to hold data from the date column\n dateCol = df[\"Date\"]\n\n #variables for Johnny's savings, how much he invests each time, and his total return\n savingsJohnny = 12000\n invAmount = 1000\n resultJohnny = 0\n\n #array to hold the value of TSLA stock each time he trades\n investments = []\n\n #dynamic variable to see if the month has changed\n month_hold = 0\n\n #loop through the entire length of the dataframe\n for i in range(len(openCol-1)):\n if (dateCol[i].month != month_hold):\n investments.append(openCol[i])\n month_hold = dateCol[i].month\n\n #calculate how much each seperate investment has grown and add to the total\n for val in investments:\n resultJohnny += (lastDayVal - val)*invAmount/val\n\n #add how much Tammy has invested to reflect proper amount in her account\n resultJohnny += savingsJohnny\n print(\"Johnny's initial investment grew to ${:,.2f}\".format(resultJohnny))\n \n \nif __name__ == \"__main__\":\n main()","sub_path":".problems/strategy_johnny.py","file_name":"strategy_johnny.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"171142383","text":"from elasticsearch_dsl import DocType, Integer, Text, Keyword, Date, MetaField, GeoPoint, analyzer, tokenizer\n\n\"\"\"\nResponse Record mapping model\n///\nKeyword: Used for filtering\nTEXT: Used for analyzing and searching\nGeo-point: geo_point for lat/lon points\nGeo-Shape: geo_shape for complex shapes like polygons\n///\n\nEach fields have some features.\nReference: https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html\n\"\"\"\nsudachi_tokenizer = tokenizer('sudachi_tokenizer',\n mode='search',\n settings_path='/etc/elasticsearch/sudachi/sudachi.json',\n resources_path='/etc/elasticsearch/sudachi',\n type='sudachi_tokenizer',\n discard_punctuation=True)\n\nsudachi_analyzer = analyzer('sudachi_analyzer',\n tokenizer=sudachi_tokenizer,\n filter=[],\n type='custom')\n\n\nclass ResponseRecordMapping(DocType):\n pk = Integer()\n\n id = Integer() # unified id\n date = Date() # reported date\n\n classification = Keyword() # disaster phase for filtering\n disaster_name = Text(analyzer=sudachi_analyzer) # A disaster name related to a record\n\n title = Text(analyzer=sudachi_analyzer, fields={'raw': Keyword()}) # response section\n body = Text(analyzer=sudachi_analyzer) # record contents\n\n phase = Keyword() # disaster phase for filtering\n season = Keyword() # A season for filtering\n\n importance = Integer() # an importance of a report content\n location = GeoPoint() # A place\n\n created_at = Date()\n updated_at = Date()\n\n class Meta:\n index = 'ddss_response_records'\n\n","sub_path":"ElasticSearchPkg/ResponseRecordMappingModel.py","file_name":"ResponseRecordMappingModel.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"260705249","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2017/12/12 20:08\n# @Author : Shiyu Li\n# @Software: PyCharm\n\nimport tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nimport cv2\n\ndef dis(img):\n img2 = img\n if img.dtype != 'uint8':\n img2 = img2.astype(np.uint8)\n\n cv2.namedWindow(\"Image\")\n cv2.imshow(\"Image\", img2)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef read_and_decode(filename):\n \"\"\" Return tensor to read from TFRecord \"\"\"\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(serialized_example,\n features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw' : tf.FixedLenFeature([], tf.string),\n })\n img = tf.decode_raw(features['img_raw'], tf.float32)\n img = tf.reshape(img, [256, 256, 1])\n # img = tf.cast(img, tf.float32) # if you want to use tfrecords as input.\n label = tf.cast(features['label'], tf.int32)\n return img, label\n\n# visualize data\nimg, label = read_and_decode(\"own_CroppedBossBase-1.0-256x256.tfrecords\")\nimg_batch, label_batch = tf.train.shuffle_batch([img, label],\n batch_size=4,\n capacity=50000,\n min_after_dequeue=10000,\n num_threads=1)\nprint(\"img_batch : %s\" % img_batch._shape)\nprint(\"label_batch : %s\" % label_batch._shape)\n\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n val, l = sess.run([img_batch, label_batch])\n dis(val[0])\n\n coord.request_stop()\n coord.join(threads)\n sess.close()","sub_path":"Adversarial_sample/BP_based/Steganalysis/make_data/decode_and_visualize.py","file_name":"decode_and_visualize.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"463033767","text":"import analysistools as atools\nimport pandas as pd\nimport pickle\nimport os\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='')\n\nparser.add_argument('-o','--output', default=\"\", type=str, \n help='output directory for the generated files')\nparser.add_argument('-i','--input', default='', type=str, \n help='input directory, files must be in xyza format')\n\nargs = parser.parse_args()\n\nxyzaPath = args.input\noutPath = args.output\n\ns = {}\n\ntry:\n s = atools.generateSummaries(xyzaPath)\nexcept:\n print('something went wrong')\n\nwith open(os.path.join(outPath,'trajectories.pickle'), 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(s, f, pickle.HIGHEST_PROTOCOL)\n\ndf = pd.DataFrame()\ndata = []\nfor k,v in s.iteritems():\n data.append((k,v['density'],v['clustering'],v['bt'],'budding' if v['bt'] > 0.0 else 'non budding'))\n\ndf = pd.DataFrame(data, columns = ['file','den','cls','bud', 'cat' ]) \ndf.to_csv(os.path.join(outPath,'trajectories-summary.csv'),index=False)","sub_path":"tools/summarybuilder.py","file_name":"summarybuilder.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"613724354","text":"#Sample Test program to track prices on Amazon for a given product and send email for price drop\n\n\n#import urllib.request\n\n#importing requests library module\nimport requests\n\n#Importing BeautifulSoup library\n#BeautifulSoup is used for parsing and extracting data from HTML webpages\nfrom bs4 import BeautifulSoup\n\n#input the URL of the product to scrape web pages\nURL = 'https://www.amazon.com/dp/B07FPP6TB5/ref=gwd_dc_tve_cm?pf_rd_p=7d3126cc-ba3f-41cc-8f4a-dc96bd30fa11&pf_rd_r=K7Y5FJBRWB4QY21EM7FK'\n\n# URL = 'https://www.amazon.com/Echo-Dot/dp/B07FZ8S74R/ref=sr_1_1?keywords=alexa&qid=1575303017&smid=ATVPDKIKX0DER&sr=8-1'\n\n# URL = 'https://www.bestbuy.com/site/tcl-75-class-led-4-series-2160p-smart-4k-uhd-tv-with-hdr-roku-tv/6319340.p?skuId=6319340'\n\n# URL = 'https://www.google.com/shopping/product/1684588081497658436?psb=1&tbm=shop&prds=epd%3A8233995082323007187%2Cprmr%3A3&ved=0CGEQ0FUoAGoXChMIh_e677CX5gIVjgezAB0sFQs7EAM'\n\n#Configure a \"User-Agent\", which is a unique identifier for every device accessing a webpage\nheaders = {\"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}\n\n#Get requests of the webpage\npage = requests.get(URL, headers)\n\n#Parse the contents of the webpage, input is \"contents of the page\"\nsoup = BeautifulSoup(page.content, 'html.parser')\n\nprint(\"Printing status code response of the webpage\",page.status_code)\nprint(\"The response of the page content is...\", page.json)\n# print(\"START OF HTML PARSE WITHOUT PRETTIFY\")\n# p = print(\"Printing contents of the page without Prettify\", soup)\n# print(\"END OF HTML PARSE WITHOUT PRETTIFY\")\n# print(\"START OF HTML PARSE WITH PRETTIFY\")\n# #Display contents of the page as list using \"prettify\" method in \"BeautifulSoup\"\n# #prettify method from BeautifulSoup will convert a BS- BeautifulSoup?? Parse tree to a unicode string\n# q = print(\"Printing contents of the page using Prettify\", soup.prettify)\n# print(\"END OF HTML PARSE WITH PRETTIFY\")\n\n# #Testing to check both contents WITH and WITHOUT PRETTIFY are same, (contents are same)\n# if p==q:\n# print(\"Both texts are same, there's no difference with PRETTIFY\")\n\n#Find the first tag with the id=\"productTitle\" with the \"find\" method, use \"find_all\" to find list of all tags\nprint(\"Product Title is \", soup.find(id=\"productTitle\"))\n\n#Find the first tag with the id=\"price_inside_buybox\" with the \"find\" method\nprint(\"Product Price inside Buybox is \", soup.find(id=\"price_inside_buybox\"))\n\n\n#print(soup.find_all(id=\"productTitle\"))\n\n","sub_path":"Tracking Prices/price_tracking.py","file_name":"price_tracking.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"538623081","text":"import logging\n\nclass Robozinho():\n '''\n qtddeposicoes = Quantidade de posicoes/garrafas que o robo pode andar\n posicao = O robo sempre iniciará da posicao 0, ou posicao sem garrafa\n '''\n def __init__(self, qtdposicoes=6, posicao=0):\n self._posicoes = list(range(qtdposicoes))\n self._posicao = posicao\n\n def posicionar(self, posicao):\n if posicao <= self._posicoes[-1]:\n self._posicao = posicao\n # mover o robo até no maximo a ultima posicao da lista qtddeposicoes\n self.mover(self._posicao)\n else:\n logging.error('Posicao {} fora do range. Maximo={}'.format(posicao, self._posicoes[-1]))\n\n def verificar_posicao_atual(self):\n #print('Movendo carrinho para {}'.format(self._posicao))\n return self._posicao\n\n def mover(self, posicao):\n ##<<>>\n pass\n\n def despejar_conteudo(self):\n self.posicionar(self._posicao)\n self.levanta_braco(self._tempo)\n\n def levanta_braco(self, tempo):\n self._tempo = tempo\n #<<>>\n return 'Levantando braco por {} segundos ate o despejador'.format(tempo)\n\n\nif __name__ == '__main__':\n robo = Robozinho()\n robo.posicionar(1)\n print('Posicao Atual: ' + robo.verificar_posicao_atual().__str__())\n robo.posicionar(4)\n print('Posicao Atual: ' + robo.verificar_posicao_atual().__str__())\n print(robo.levanta_braco(10))\n robo.posicionar(7)\n print('Posicao Atual: ' + robo.verificar_posicao_atual().__str__())\n\n\n","sub_path":"robozinho.py","file_name":"robozinho.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"211540710","text":"from config import MONGODB_URL, MONGODB_DATABASE, MONGODB_COLLECTION\n\nimport pymongo\n\n\nclass MongoDbHandler:\n\n def send_mongodb(self, data):\n\n myClient = pymongo.MongoClient(MONGODB_URL)\n myDb = myClient[MONGODB_DATABASE]\n myCol = myDb[MONGODB_COLLECTION]\n\n myCol.insert_many(data)\n\n return myCol\n","sub_path":"mongodb_handler.py","file_name":"mongodb_handler.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"14273095","text":"from src.ada.core.data.generators import GeneratorHelper\n\n\nclass DownwardTrendGenerator:\n def __init__(self, file, length):\n self.file = file\n self.length = length\n self.waves = {\n 43200 : [10, 0],\n 780 : [5, 22],\n 12 : [.1, 2],\n 532 : [1.5, 32],\n 23 : [.5, 15]\n }\n\n def generate(self):\n data = self.base_function()\n plot_names = [\"downwardtrend_base_nosine\", \"downwardtrend_base\"]\n return GeneratorHelper.base_generate(data, self, plot_names)\n\n def base_function(self):\n data = [0] * self.length\n for i in range(0, self.length):\n data[i] = 100 - 50 * (i / self.length)\n\n return data\n","sub_path":"src/ada/core/data/generators/downwardtrend/DownwardTrendGenerator.py","file_name":"DownwardTrendGenerator.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"560972759","text":"import numpy as np\nimport pandas as pd\nimport pickle\nimport os\nimport argparse\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport collections\nimport csv\nimport math\n\nclass Explor():\n\n def __init__(self, terms, courses, demograph):\n self.terms = terms\n self.courses = courses\n self.demograph = demograph\n\n def Prob_Majors(self):\n # Exploratory Analysis -- Probation and Majors\n Probed_Students = self.terms[self.terms['probation'] != 0]['SubjectID'].unique()\n print(self.demograph['SubjectID'].nunique())\n print(self.courses['SubjectID'].nunique())\n print(self.terms['SubjectID'].nunique())\n Major_Population = self.demograph.groupby('B - Major1 Code')['B - Major1 Code'].count()\n Major_Probation = self.demograph[self.demograph['SubjectID'].isin(Probed_Students)] \\\n .groupby('B - Major1 Code')['B - Major1 Code'].count()\n Hajim_Majors = ['BME', 'CHE', 'CSC','CSA', 'ECE', 'IDE', 'ES', 'ME', 'OPT']\n with open('./Saved Models/Major_Probation.csv', 'w') as f:\n f.write('Major' + ' ' + 'Percentage' + ' ' + 'Aboslute_Number\\n')\n for name in Major_Probation.index:\n out = name + ' ' + (Major_Probation[name]/Major_Population[name]).astype(str) + ' ' + Major_Probation[name].astype(str)\n f.write(\"%s\\n\" % out)\n\n Hajim_Prob = Major_Probation[Hajim_Majors]\n Hajim_Popu = Major_Population[Hajim_Majors]\n with open('./Saved Models/Hajim_Probation.csv', 'w') as f:\n f.write('Major' + ' ' + 'Percentage' + ' ' + 'Aboslute_Number\\n')\n for name in Hajim_Prob.index:\n out = name + ' ' + (Hajim_Prob[name]/Hajim_Popu[name]).astype(str) + ' ' + Hajim_Prob[name].astype(str)\n f.write(\"%s\\n\" % out)\n print('Average probation rate for Hajim school is ' + (Hajim_Prob.sum()/Hajim_Popu.sum()).astype(str))\n print('Average probation rate in general is ' + str(len(Probed_Students)/self.courses['SubjectID'].nunique()))\n\n def Changing_Majors(self):\n def changed_major(ID):\n Course_Data = self.courses[self.courses['SubjectID'] == ID]['Ps1 Major1 Code']\n original_major = Course_Data.iloc[0]\n ended_major = Course_Data.iloc[-1]\n if original_major == 'UNC':\n return True\n return not original_major == ended_major\n\n def find_grades(ID, changed):\n Term_Data = self.terms[self.terms['SubjectID'] == ID]['Term GPA'].tolist()\n if not changed:\n pre_1, pre_2, aft_1, aft_2 = np.NaN, np.NaN, np.NaN, np.NaN\n else:\n pre_1, pre_2, aft_1, aft_2 = np.NaN, np.NaN, np.NaN, np.NaN\n for i in range(0, len(Term_Data)):\n if Term_Data[i] < 2.0 and not Term_Data[i] == 0:\n if i-1 > 0:\n if Term_Data[i-1] > 3.3:\n break\n pre_1 = Term_Data[i-1]\n if i-2 > 0:\n pre_2 = Term_Data[i-2]\n for j in range(i, len(Term_Data)):\n if Term_Data[j] >= 2.0 and Term_Data[j] < 3.5:\n aft_1 = Term_Data[j]\n break\n for j in range(i, len(Term_Data)-1):\n if Term_Data[j] >= 2.0 and Term_Data[j] < 3.5:\n aft_2 = Term_Data[j+1]\n break\n break\n return pd.DataFrame([[pre_1, pre_2, aft_1, aft_2]], columns=['pre_1', 'pre_2', 'aft_1', 'aft_2'])\n\n GPA_Changed = pd.DataFrame(columns=['pre_1', 'pre_2', 'aft_1', 'aft_2'])\n GPA_Unchanged = pd.DataFrame(columns=['pre_1', 'pre_2', 'aft_1', 'aft_2'])\n for ID in self.courses[self.courses['Ps1 Acad Standing Desc'] == 'PROBATION']['SubjectID'].unique()[:-1]:\n if changed_major(ID):\n GPA_Changed = GPA_Changed.append(find_grades(ID, True), ignore_index=True)\n else:\n GPA_Unchanged = GPA_Unchanged.append(find_grades(ID, False), ignore_index=True)\n GPA_Unchanged.fillna(value=GPA_Unchanged.mean(), inplace=True)\n GPA_Changed.fillna(value=GPA_Changed.mean(), inplace=True)\n def graph_changing(GPA_Changed, GPA_Unchanged):\n data = [\n go.Scatter(\n y=GPA_Unchanged.mean(), # assign x as the dataframe column 'x'\n x=GPA_Unchanged.mean().index,\n fill= None,\n line = dict(shape='spline'),\n name = 'Unchanged'\n ),\n go.Scatter(\n y=GPA_Changed.mean(), # assign x as the dataframe column 'x'\n x=GPA_Changed.mean().index,\n fill='tonexty',\n line = dict(shape='spline'),\n name = 'Changed'\n )\n ]\n url = py.plot(data)\n return\n\n graph_changing(GPA_Changed, GPA_Unchanged)\n with open('./Saved Models/Culm_GPAs.csv','w') as f:\n for index, rows in GPA_Unchanged.iterrows():\n if not rows[1] == 0.0:\n f.write(\"%s\\n\" % str(round(rows[1], 2)))\n for index, rows in GPA_Changed.iterrows():\n if not rows[1] == 0.0:\n f.write(\"%s\\n\" % str(round(rows[1], 2)))\n\n def Heatmap(self):\n # Exploratory Analysis -- Heatmap\n Heatmap_List = self.demograph[demograph['probation'] > 0]\n Heatmap_List = Heatmap_List.groupby('HS State Prov')['HS State Prov'].count()\n Heatmap_Total = self.demograph.groupby('HS State Prov')['HS State Prov'].count()\n General_Demo = self.demograph.groupby('HS State Prov')['HS State Prov'].count()\n with open('./Saved Models/heatmap.csv', 'w') as f:\n f.write('State'+' '+'Percentage'+' '+'Aboslute\\n')\n for i in Heatmap_List.index:\n out = i+' '+(Heatmap_List[i]/Heatmap_Total[i]*100).astype(str)+' '+Heatmap_List[i].astype(str)\n f.write(\"%s\\n\" % out)\n with open('./Saved Models/general_demo.csv', 'w') as f:\n for i in General_Demo.index:\n out = i + ' ' + General_Demo[i].astype(str)\n f.write(\"%s\\n\" % out)\n\n def Culm_GPA(self):\n # Exploratory Analysis -- Outlier\n Culm_GPAs = []\n Probed_Students = self.terms[self.terms['probation'] != 0]['SubjectID'].unique()\n #Probed_Students = self.terms['SubjectID'].unique()\n for ID in Probed_Students:\n Term_Data = self.terms[self.terms['SubjectID'] == ID]['Term GPA'].tolist()\n\n Culm_GPAs.append(Term_Data[-1])\n with open('./Saved Models/Culm_GPAs.csv','w') as f:\n for gpas in Culm_GPAs:\n f.write(\"%f\\n\" % gpas)\n def Sankey(self):\n prob_students = self.courses[self.courses['Ps1 Acad Standing Desc']=='PROBATION']['SubjectID']\n print(prob_students.shape)\n major_to_disp = collections.defaultdict(str)\n write_dic = collections.defaultdict(list)\n major_disp_series = self.demograph[['Degree Major1 Discipline Code', 'Degree Major1 Code']]\n for i, row in major_disp_series.iterrows():\n disp, major = row[0], row[1]\n if isinstance(disp, str):\n major_to_disp[major] = disp\n for id in prob_students:\n student_info = self.courses[self.courses['SubjectID']==id]\n for i, row in student_info.iterrows():\n if row['Ps1 Acad Standing Desc'] == 'PROBATION':\n write_dic[id].append(major_to_disp[student_info.at[i, 'Ps1 Major1 Code']])\n i += 1\n while i <= student_info.shape[0]:\n if student_info.at[i, 'Ps1 Major1 Code'] != 'PROBATION' or i == student_info.shape[0]:\n write_dic[id].append(major_to_disp[student_info.at[i, 'Ps1 Major1 Code']])\n if i == student_info.shape[0]:\n write_dic[id].append('FAILED')\n else:\n write_dic[id].append(major_to_disp[student_info.at[student_info.shape[0], 'Ps1 Major1 Code']])\n break\n i += 1\n break\n with open('./Saved Models/Sankey.csv', 'w', newline=\"\") as f:\n writer = csv.writer(f)\n for k,v in write_dic.items():\n writer.writerow([k, v])\n\n def analysis(self):\n self.Changing_Majors()\n","sub_path":"Code/exploratory.py","file_name":"exploratory.py","file_ext":"py","file_size_in_byte":8864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468959933","text":"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom jinja2 import Environment\n\nfrom flask import current_app as app, render_template\n\nclass Mailer():\n\n def send_email(self, receiver, subject, html):\n msg = MIMEMultipart('alternative')\n msg['Subject'] = subject\n msg['From'] = app.config.get('SMTP')['LOGIN']\n msg['To'] = receiver\n\n part = MIMEText(html, 'html', \"utf-8\")\n msg.attach(part)\n\n s = smtplib.SMTP(app.config.get('SMTP')['HOST'], app.config.get('SMTP')['PORT'])\n s.starttls()\n s.login(app.config.get('SMTP')['LOGIN'], app.config.get('SMTP')['PASSWORD'])\n s.sendmail(app.config.get('SMTP')['LOGIN'], \"savo_pusica@mail.ru\", msg.as_string().encode('ascii'))\n s.quit()\n\n def build_report(self, report_type, data):\n html = render_template(\"{report_type}.html\".format(report_type=report_type), data=data)\n self.send_email('savo_pusica@mail.ru', subject=report_type, html=html)\n","sub_path":"app/services/mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"190152824","text":"import logging\n\n'''\nusing the logger to log error,info and debug info \nto file\n'''\ndef log_lines_to_file(in_filename):\n \n logging.basicConfig(filename = in_filename,level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n\n logger.info('Start reading database')\n # read database here\n records = {'john': 55, 'tom': 66}\n logger.debug('Records: %s', records)\n logger.info('Updating records ...')\n # update records here\n logger.info('Finish updating records')\n\n'''\nUsing generators to avoid builiding the\nwhole list in memory.\nNote: gen can only be used once\n'''\ndef say_no_to_lists():\n gen = (i for i in range(10) if i>5)\n min(gen)\n #for i in gen:\n # print(i)\n\n\nif __name__ == '__main__':\n #log_lines_to_file('debug.log')\n say_no_to_lists()\n\n","sub_path":"Pro_Python/propy1.py","file_name":"propy1.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"459784188","text":"import requests\r\n\r\napi_key = 'asdf1234asdf1234asdf1234'\r\n\r\nurl = 'https://panacea.threatgrid.com/api/v2/samples'\r\n\r\nfile_name = 'file.exe'\r\n\r\nparameters = {'api_key': api_key}\r\n\r\nwith open(file_name, 'rb') as sample:\r\n\tr = requests.post(url, files={'sample': sample}, params=parameters)\r\n\r\nprint(r.json())\r\n","sub_path":"06_submit_sample.py","file_name":"06_submit_sample.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"342460497","text":"from robocode_ls_core.workspace import Workspace, Document\nfrom robocode_ls_core.basic import overrides\nfrom robocode_ls_core.cache import instance_cache\nfrom robotframework_ls.constants import NULL\nfrom robocode_ls_core.robotframework_log import get_logger\n\nlog = get_logger(__name__)\n\n\nclass RobotWorkspace(Workspace):\n def __init__(\n self, root_uri, workspace_folders=None, libspec_manager=NULL, generate_ast=True\n ):\n self.libspec_manager = libspec_manager\n\n Workspace.__init__(self, root_uri, workspace_folders=workspace_folders)\n self._generate_ast = generate_ast\n\n @overrides(Workspace.add_folder)\n def add_folder(self, folder):\n Workspace.add_folder(self, folder)\n self.libspec_manager.add_workspace_folder(folder.uri)\n\n @overrides(Workspace.remove_folder)\n def remove_folder(self, folder_uri):\n Workspace.remove_folder(self, folder_uri)\n self.libspec_manager.remove_workspace_folder(folder_uri)\n\n def _create_document(self, doc_uri, source=None, version=None):\n return RobotDocument(doc_uri, source, version, generate_ast=self._generate_ast)\n\n\nclass RobotDocument(Document):\n\n TYPE_TEST_CASE = \"test_case\"\n TYPE_INIT = \"init\"\n TYPE_RESOURCE = \"resource\"\n\n def __init__(self, uri, source=None, version=None, generate_ast=True):\n Document.__init__(self, uri, source=source, version=version)\n self._generate_ast = generate_ast\n self._ast = None\n\n @overrides(Document._clear_caches)\n def _clear_caches(self):\n Document._clear_caches(self)\n self.get_ast.cache_clear(self)\n\n def get_type(self):\n path = self.path\n if not path:\n log.info(\"RobotDocument path empty.\")\n return self.TYPE_TEST_CASE\n\n import os.path\n\n basename = os.path.basename(path)\n if basename.startswith(\"__init__\"):\n return self.TYPE_INIT\n\n if basename.endswith(\".resource\"):\n return self.TYPE_RESOURCE\n\n return self.TYPE_TEST_CASE\n\n @instance_cache\n def get_ast(self):\n if not self._generate_ast:\n raise AssertionError(\n \"The AST can only be accessed in the RobotFrameworkServerApi, not in the RobotFrameworkLanguageServer.\"\n )\n from robot.api import get_model, get_resource_model, get_init_model\n\n try:\n source = self.source\n except:\n log.exception(\"Error getting source for: %s\" % (self.uri,))\n source = \"\"\n\n t = self.get_type()\n if t == self.TYPE_TEST_CASE:\n return get_model(source)\n\n elif t == self.TYPE_RESOURCE:\n return get_resource_model(source)\n\n elif t == self.TYPE_INIT:\n return get_init_model(source)\n\n else:\n log.critical(\"Unrecognized section: %s\", t)\n return get_model(source)\n\n def find_line_with_contents(self, contents: str) -> int:\n \"\"\"\n :param contents:\n The contents to be found.\n \n :return:\n The 0-based index of the contents.\n \"\"\"\n for i, line in enumerate(self.iter_lines()):\n if contents in line:\n return i\n else:\n raise AssertionError(f\"Did not find >>{contents}<< in doc.\")\n","sub_path":"robotframework-ls/src/robotframework_ls/impl/robot_workspace.py","file_name":"robot_workspace.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"117204418","text":"import sys\n\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton\nfrom PyQt5.QtGui import QPainter, QColor\n\nfrom random import randint\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.d = randint(25, 250)\n self.do_paint = False\n self.setGeometry(282, 200, 300, 300)\n self.setWindowTitle('Git и желтые окружности')\n\n self.push_btn = QPushButton(self)\n self.push_btn.resize(170, 60)\n self.push_btn.move(65, 100)\n self.push_btn.setText('Нарисовать круг')\n self.push_btn.clicked.connect(self.paint)\n\n def draw_elipse(self, qp):\n qp.setBrush(QColor('yellow'))\n qp.drawEllipse(10, 10, self.d, self.d)\n\n def paintEvent(self, event):\n if self.do_paint:\n qp = QPainter()\n qp.begin(self)\n self.draw_elipse(qp)\n qp.end()\n\n def paint(self):\n self.do_paint = True\n self.repaint()\n self.push_btn.hide()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWindow()\n ex.show()\n sys.exit(app.exec())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"398196800","text":"class Node(object) :\r\n def __init__(self, data) :\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass Link(object) :\r\n def __init__(self) :\r\n self.head = None\r\n\r\n def push(self,node):\r\n if self.head == None :\r\n self.head = node\r\n return\r\n\r\n else :\r\n cur = self.head\r\n while cur.next != None :\r\n cur = cur.next\r\n cur.next = node\r\n\r\n def pop(self) :\r\n if self.head == None :\r\n print(\"No element\")\r\n return\r\n\r\n else :\r\n cur = self.head\r\n prev = None\r\n nextn = self.head.next\r\n while cur.next != None :\r\n prev = cur\r\n cur = cur.next\r\n\r\n if prev == None :\r\n self.head = self.head.next\r\n\r\n else :\r\n prev.next = None\r\n\r\n def print(self) :\r\n if self.head == None :\r\n print(\"No Stack\")\r\n return\r\n\r\n cur = self.head\r\n while cur != None :\r\n print(cur.data)\r\n cur = cur.next\r\n\r\nif __name__ == \"__main__\" :\r\n s1 = Link()\r\n s1.push(Node(3))\r\n s1.pop()\r\n s1.pop()\r\n s1.print()","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"351611081","text":"import csv\nfrom tokenizer import tokenizer\nimport string\nfrom collections import Counter\nSOS_IDX = 0\nPAD_IDX = 1 # PAD = EOS\n\ntokenizer = tokenizer.RedditTokenizer()\npunctuations = string.punctuation\npunctuations = ''.join(set(punctuations) - set(',.'))\n\nfpath = \"/scratch/yn811/shortjokes.csv\"\n\ndef tokenize(tokenizer, sent, punctuations):\n tokens = tokenizer.tokenize(sent)\n punc_cnt = sum((token in punctuations) for token in tokens)\n if punc_cnt > 0:\n return None\n return [token.lower() for token in tokens if (token not in punctuations)]\n\ndef tokenize_dataset(tokenizer, dataset, punctuations, gram=1):\n from tqdm import tqdm_notebook\n token_dataset = []\n all_tokens = []\n for sample in tqdm_notebook(dataset):\n tokens = tokenize(tokenizer, sample, punctuations)\n if tokens is None:\n continue\n if (len(tokens) <= 40) and (sum(len(w)<2 for w in tokens) <= len(tokens)/3):\n token_dataset.append(tokens)\n all_tokens.extend(tokens)\n return token_dataset, all_tokens\n\ndef build_vocab(all_tokens):\n token_counter = Counter(all_tokens)\n vocab, count = zip(*token_counter.most_common(len(token_counter)))\n id2token = list(vocab)\n token2id = dict(zip(vocab, range(2, 2+len(vocab)))) \n id2token = ['', ''] + id2token\n token2id[''] = PAD_IDX \n token2id[''] = SOS_IDX\n return token2id, id2token\n\n\ndef token2index_dataset(tokens_data):\n indices_data = []\n for tokens in tokens_data:\n index_list = [token2id[token] for token in tokens]\n indices_data.append(index_list)\n return indices_data\n\n# ! pip install git+https://github.com/erikavaris/tokenizer.git\njokes = []\nwith open(fpath) as f:\n reader = csv.reader(f) \n next(reader, None)\n for row in reader:\n jokes.append(row[1])\n\ntoken_dataset, all_tokens = tokenize_dataset(tokenizer, jokes, punctuations)\ntoken2id, id2token = build_vocab(all_tokens)\nidx_data = token2index_dataset(token_dataset)\nprint(\"length of dataset: \", len(id2token))\nimport pickle as pkl\npkl.dump([idx_data, token_dataset, token2id, id2token], open(\"short_jokes-40.pkl\", \"wb\"))\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"416810773","text":"#!/usr/bin/env python\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError as e:\n from distutils.core import setup\n\nrequirements = [\n 'stimela >= 1.0.0',\n 'astropy>=2.0.11',\n 'stimela>=1.3.1',\n 'numpy>=1.13.1',\n 'scipy>=0.19.1',\n 'nbconvert>=5.3.1',\n 'aplpy>=1.1.1',\n 'matplotlib>=2.1.0',\n 'jupyter>=1.0.0',\n 'python-casacore>=2.2.1',\n 'curses-menu>=0.5.0',\n 'lmfit>=0.9.8',\n 'GPy>=1.9.2',\n 'npyscreen @ git+https://github.com/bennahugo/npyscreen.git@6bff9d3b65879dc',\n]\n\nPACKAGE_NAME = 'vermeerkat'\n__version__ = '2.1.0'\n\nsetup(name = PACKAGE_NAME,\n version = __version__,\n description = \"MeerKAT VermeerKAT pipeline\",\n author = \"B. Hugo\",\n author_email = \"bhugo@ska.ac.za\",\n url = \"https://github.com/bennahugo/vermeerkat\",\n packages=[PACKAGE_NAME],\n install_requires = requirements,\n include_package_data = True,\n python_requires=\">=3.6\",\n entry_points={\n 'console_scripts': ['vermeerkat=vermeerkat.bin.vermeerkat:main'],\n },\n license=\"GNU GPL v2\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Topic :: Scientific/Engineering :: Astronomy\"\n ]\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166766303","text":"#!/usr/bin/python -O\n# -*- coding: iso-8859-1 -*-\n# -*- coding: latin-1 -*-\n# by Loreto Notarantonio 2013, February\n# ######################################################################################\nimport sys\nimport os, platform\nimport getpass\n\nimport pwd\n\n\ndef ottieniValori():\n # https://docs.python.org/3.3/library/os.html\n userLogin = os.getlogin()\n userLogin = pwd.getpwuid(os.getuid())[0]\n uid = os.getuid() # current process’s user id\n ruid, euid, suid = os.getresuid() # real, effective, and saved user ids\n rgid, egid, sgid = os.getresgid() # real, effective, and saved group ids.\n\n\n\n\n# ###################################################################################################################\n# # # https://docs.python.org/3.3/library/os.html\n# # # userName: nome dell'utente di cui si vuole il setUID\n# # # uid : uid dell'utente di cui si vuole il setUID\n# # # None : se userName==None e uid==None allora viene ppreso il SAVED-UID\n# #\n# # gv.LN.sys.setUID(gv, gv.JBOSS.userName, exitOnError=False)\n# # gv.LN.sys.setUID(gv, uid=48, exitOnError=True)\n# # gv.LN.sys.setUID(gv)\n# ###################################################################################################################\ndef setUID_OK(gv, userName=None, uid=None, exitOnError=False):\n global TAByel, TABerr, TAB\n\n logger = gv.LN.logger.setLogger(gv, package=__name__)\n calledBy = gv.LN.sys.calledBy\n logger.info('entered - [called by:%s]' % (calledBy(1)))\n\n TAByel = gv.LN.cYELLOW + ' '*8\n TABerr = gv.LN.cERROR + ' '*8\n TAB = gv.LN.cGREEN + ' '*8\n\n\n ruid, euid, suid = os.getresuid() # real, effective, and saved user ids\n\n # ------------------------------\n # - catturiamo lo UID\n # ------------------------------\n if userName != None:\n reqUID = pwd.getpwnam(userName).pw_uid\n elif uid != None:\n reqUID = uid\n else:\n reqUID = ruid # preleva il REAL-UID\n # reqUID = suid # preleva il SAVED-UID\n\n\n logger.info('')\n msg = 'Requested setUID for username:[{}:{}]'.format(pwd.getpwuid(reqUID).pw_name, reqUID)\n logger.info(TAB + msg)\n msg = 'currUser:[{}:{}] ruid:{} euid:{} suid:{} '.format(pwd.getpwuid(os.getuid()).pw_name, os.geteuid(), ruid, euid, suid)\n logger.info(TAB + msg)\n\n # --------------------------------------------\n # - Se il current euid!=0 non possiamo\n # - fare il setUID per un altro user\n # - Switch temporaneo su root per permetterlo\n # ---------------------------------------------\n rCode = 0\n\n if euid != 0 and euid != reqUID:\n rCode = processSetUid(gv, 0, exitOnError)\n\n if rCode == 0:\n rCode = processSetUid(gv, reqUID, exitOnError)\n\n return rCode\n\n\n\n#################################################\n#\n#################################################\ndef processSetUid(gv, reqUID, exitOnError=True):\n logger = gv.LN.logger.setLogger(gv, package=__name__)\n calledBy = gv.LN.sys.calledBy\n logger.info('entered - [called by:%s]' % (calledBy(1)))\n\n try:\n reqGID = pwd.getpwuid(reqUID).pw_gid\n os.setegid(reqGID) # mantenere l'ordine GID-UID\n os.seteuid(reqUID) # mantenere l'ordine GID-UID\n msg = 'After setUID: username:{}, euid:{}, egid:{}'.format(pwd.getpwuid(os.geteuid()).pw_name, os.geteuid(), os.getegid())\n logger.info(TAB + msg)\n retVal = 0\n\n except Exception as why:\n errMsg = \"setUID({}) - {} - Permissions Error\".format(pwd.getpwuid(reqUID).pw_name, str(why))\n logger.error(TABerr + errMsg)\n for line in sys.exc_info():\n logger.info (TAByel + str(line))\n if exitOnError:\n gv.LN.exit(gv, 7002, TAB + errMsg, console=False)\n retVal = 1\n\n return retVal\n\n\n\n#################################################\n#\n#################################################\ndef setUID(gv, userName=None, reqUID=None, exitOnError=False):\n logger = gv.LN.logger.setLogger(gv, package=__name__)\n calledBy = gv.LN.sys.calledBy\n logger.info('entered - [called by:%s]' % (calledBy(1)))\n\n TAByel = gv.LN.cYELLOW + ' '*8\n TABerr = gv.LN.cERROR + ' '*8\n TAB = gv.LN.cGREEN + ' '*8\n # ------------------------------\n # - catturiamo lo UID\n # ------------------------------\n if reqUID == None:\n if userName != None:\n reqUID = pwd.getpwnam(userName).pw_uid\n else:\n return 1\n\n try:\n reqGID = pwd.getpwuid(reqUID).pw_gid\n os.setegid(reqGID) # mantenere l'ordine GID-UID\n os.seteuid(reqUID) # mantenere l'ordine GID-UID\n msg = 'After setUID: username:{}, euid:{}, egid:{}'.format(pwd.getpwuid(os.geteuid()).pw_name, os.geteuid(), os.getegid())\n logger.info(TAB + msg)\n retVal = 0\n\n except Exception as why:\n errMsg = \"setUID({}) - {} - Permissions Error\".format(pwd.getpwuid(reqUID).pw_name, str(why))\n logger.error(TABerr + errMsg)\n for line in sys.exc_info():\n logger.info (TAByel + str(line))\n if exitOnError:\n gv.LN.exit(gv, 7002, TAB + errMsg, console=False)\n retVal = 1\n\n return retVal\n\n\n\n\n\n\n\n\n","sub_path":"LnUnixSys/SetUID.py","file_name":"SetUID.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"123120677","text":"list = ['s', 'e', 'e', 'y', 'o', 'u', 's', 'p', 'a', 'c', 'e', 'c', 'o', 'w', 'b', 'o', 'y']\nsearchSymb = input('Введите искомый символ: ')\nprint(list.index(searchSymb))\ndef lineSearch(list, x):\n\tresult = None\n\tfor index, string in enumerate(list):\n\t\tprint(index, string)\n\t\tif string == x:\n\t\t\tresult = index\n\treturn result\n","sub_path":"line search.py","file_name":"line search.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"228522684","text":"\n\nfrom xai.brain.wordbase.nouns._surfboard import _SURFBOARD\n\n#calss header\nclass _SURFBOARDING(_SURFBOARD, ):\n\tdef __init__(self,): \n\t\t_SURFBOARD.__init__(self)\n\t\tself.name = \"SURFBOARDING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"surfboard\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_surfboarding.py","file_name":"_surfboarding.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"426956631","text":"#!/usr/bin/python\n# written by: atholcomb\n# io_utf8.py\n# Printing non-english strings, using utf-8 format\n\n# encoding=utf-8\nimport io\n\nf = io.open(\"abc.txt\", \"wt\", encoding=\"utf-8\")\nf.write(u\"Imagine non-English language here\")\nf.close()\n\ntext = io.open(\"abc.txt\", encoding=\"utf-8\").read()\nprint(text)\n","sub_path":"input_output/io_utf8.py","file_name":"io_utf8.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"585353868","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 19 11:29:34 2019\n\n@author: tiagocabo\n\"\"\"\n\n# need to create an virtualenvironment\n\n\nfrom flask import Flask, render_template, url_for\napp = Flask(__name__) # create app variable\n\n\n# dictionaries to be upload to the blog\n# emulates a database call\nposts = [\n {\n 'author': 'Tiago Cabo',\n 'title': 'My first blog post',\n 'content':'First attempt to add content',\n 'date_posted': 'January 21, 2019'\n },\n {'author': 'Patricia Carneiro',\n 'title': 'Second blog post',\n 'content':'Second attempt to add content',\n 'date_posted': 'January 19, 2019'\n }\n ]\n\n\n\n\n\n\n\n@app.route('/') \n@app.route('/home') #renders backend. route page of our website. slash mean homepage\n# this kind of call are called decorators. \ndef home():\n return render_template('home.html', posts= posts)\n\n# need to make cs code directory\n# run: export FLASK_APP=flaskblog.py\n# then run flask run\n \n# the ip adress is http://127.0.0.1:5000\n# or http://localhost:5000\n\n#debug mode allows the weppage to refresh every change and update\n \n# USE: export FLASK_DEBUG=1\n \n## TO have the debug mode\n \n\n\n@app.route('/about') #renders backend. route page of our website. slash mean homepage\n# this kind of call are called decorators. \ndef about():\n return render_template('about.html', title=\"New About Page\")\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n \n \n ","sub_path":"flaskblog.py","file_name":"flaskblog.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153920079","text":"\"\"\"This file is used to scrape www.census.gov for state FIPS codes.\n\nThere are two ways this file gets the state FIPS codes:\n- Pandas only\n uses read_html to get dataframe from page source\n much more simple of two functions\n- Pandas and BeautifulSoup - add 'soup' as argument to use\n uses BeautifulSoup to read table\n more complex of two functions\n used primarily for learning basic BS4 applications\n\"\"\"\n\nimport sys\nsys.path.insert(0, '../')\n\nimport pandas as pd\nimport pickle_data\nimport requests\n\n\ndef get_fips(sauce, headers):\n '''Returns pandas dataframe with state FIPS codes.'''\n df = pd.read_html(sauce)[0]\n df.dropna(inplace=True)\n df.columns = headers\n return df\n\n\ndef get_fips_soup(sauce, table_summary, headers):\n '''Returns pandas dataframe with state FIPS codes.'''\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(sauce, 'html.parser')\n\n # set up lists to hold columns\n num_cols = len(headers)\n columns = [[] for _ in range(num_cols)]\n\n # get all rows from table\n row_data = [datum.text.strip() for datum in\n soup.find('table', {'summary':table_summary}).find_all('td')]\n rows = [row_data[num_cols*i:num_cols*(i+1)] for i in\n range(int(len(row_data)/num_cols))]\n\n # fill columns from rows\n for row in rows:\n for i in range(num_cols):\n columns[i].append(row[i])\n\n # create dataframe of state fip codes\n return pd.DataFrame(dict(zip(headers,columns)))\n\n \ndef main():\n # get page source\n sauce = requests.get('https://www.census.gov/geo/reference/ansi_statetables.html').text\n\n # table html summary headers of table\n table_summary = ('table showing ANSI state codes for the'\n ' states and the District of Columbia')\n headers = ['Name','Code','Abbr']\n\n # pickle a pandas DataFrame of the table\n if len(sys.argv) > 1 and sys.argv[1] == 'soup':\n pickle_data.pickle_data(get_fips_soup(sauce, table_summary, headers), './fips.pickle')\n else:\n pickle_data.pickle_data(get_fips(sauce, headers), './fips.pickle')\n print('State FIPS codes pickled successfully.')\n\n \nif __name__==\"__main__\":\n main()\nelse:\n print('File should not be imported. Only run directly.')\n","sub_path":"census/fips.py","file_name":"fips.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"632461295","text":"from django.core.exceptions import ObjectDoesNotExist\n\nfrom home.util_base import BaseUtil\n\nfrom home.forms.billing_forms import NewProjectBillForm, EditProjectBillForm, ProjectBillFileForm, \\\n ProjectBillReceiptFileForm, ProjectBillPaymentForm, AddProjectBillPaymentIdForm\nfrom home.forms.billing_forms import NewConsultBillForm, EditConsultBillForm, ConsultBillFileForm, \\\n ConsultBillReceiptFileForm, ConsultBillPaymentForm, AddConsultBillPaymentIdForm\nfrom home.forms.billing_forms import NewHostingBillForm, EditHostingBillForm, HostingBillFileForm, \\\n HostingBillReceiptFileForm, HostingBillPaymentForm, AddHostingBillPaymentIdForm\n\nfrom project.models import ProjectBill\nfrom consult.models import ConsultBill\nfrom hosting.models import HostingBill\n\nfrom home.helpers import Pagination\n\nfrom home.languages.util_status_messages import get_status_message\n\nfrom django.utils import timezone\n\n\nclass BillingUtil(BaseUtil):\n def __init__(self, user, scope, lang):\n super(BillingUtil, self).__init__()\n self._util_name = 'Billing Util'\n self._scope = scope\n self._bill = None\n self._user = user\n self._message_info = {\n 'code': 1,\n 'obj': 'bill',\n 'action': 'get'\n }\n self.language = lang\n\n #\n #\n # SETTER FUNCTIONS\n #\n #\n\n def new_bill(self, info):\n if self._scope:\n info['owner'] = self._user.id\n info['status'] = 'due'\n if self._scope == 'project':\n self._form = NewProjectBillForm(info)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n elif self._scope == 'consult':\n self._form = NewConsultBillForm(info)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n elif self._scope == 'hosting':\n self._form = NewHostingBillForm(info)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n self.add_error('Bill scope not found.')\n return False\n\n def edit_bill(self, info):\n if self._scope:\n if self._scope == 'project':\n self._form = EditProjectBillForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n else:\n return False\n elif self._scope == 'consult':\n self._form = EditConsultBillForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n else:\n return False\n elif self._scope == 'hosting':\n self._form = EditHostingBillForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n else:\n return False\n self.add_error('Bill scope not found.')\n return False\n\n def delete_bill(self):\n if self._bill:\n self._bill.is_delete = True\n self._bill.status = 'deleted'\n self._bill.save()\n else:\n return False\n\n #\n #\n # GETTER FUNCTIONS\n #\n #\n\n def get_bill(self, pk=None):\n if pk:\n if self._scope:\n if self._scope == 'project':\n try:\n self._bill = ProjectBill.objects.get(pk=pk)\n return self._bill\n except ObjectDoesNotExist:\n self.add_error(get_status_message(self._message_info, self.language)['message'])\n return False\n elif self._scope == 'consult':\n try:\n self._bill = ConsultBill.objects.get(pk=pk)\n return self._bill\n except ObjectDoesNotExist:\n self.add_error(get_status_message(self._message_info, self.language)['message'])\n return False\n elif self._scope == 'hosting':\n try:\n self._bill = HostingBill.objects.get(pk=pk)\n return self._bill\n except ObjectDoesNotExist:\n self.add_error(get_status_message(self._message_info, self.language)['message'])\n return False\n self.add_error('Bill scope not found.')\n return False\n return self._bill\n\n def get_object_bills(self, obj, count=False):\n if self._scope:\n if self._scope == 'project':\n if count:\n return ProjectBill.objects.filter(project=obj).count()\n return self.return_list(ProjectBill.objects.filter(project=obj).order_by('-id'))\n elif self._scope == 'consult':\n if count:\n return ConsultBill.objects.filter(consult=obj).count()\n return self.return_list(ConsultBill.objects.filter(consult=obj).order_by('-id'))\n elif self._scope == 'hosting':\n if count:\n return HostingBill.objects.filter(hosting=obj).count()\n return self.return_list(HostingBill.objects.filter(hosting=obj).order_by('-id'))\n self.add_error('Bill scope not found.')\n return []\n\n def get_reported_bills(self, count=False):\n return {\n 'project_bills': ProjectBill.objects.filter(reported=True).count() if count else self.return_list(ProjectBill.objects.filter(\n reported=True).order_by('-id')),\n 'consult_bills': ConsultBill.objects.filter(reported=True).count() if count else self.return_list(ConsultBill.objects.filter(\n reported=True).order_by('-id')),\n 'hosting_bills': HostingBill.objects.filter(reported=True).count() if count else self.return_list(HostingBill.objects.filter(\n reported=True).order_by('-id')),\n }\n\n def get_client_bills(self, client, count=False):\n return {\n 'project': ProjectBill.objects.filter(client=client).count() if count\n else\n self.return_list(ProjectBill.objects.filter(client=client).order_by('-id')),\n 'consult': ConsultBill.objects.filter(client=client).count() if count\n else\n self.return_list(ConsultBill.objects.filter(client=client).order_by('-id')),\n 'hosting': HostingBill.objects.filter(client=client).count() if count\n else\n self.return_list(HostingBill.objects.filter(client=client).order_by('-id')),\n }\n\n def get_client_balance(self, client):\n balance = 0\n if client:\n bills = self.get_client_bills(client)\n for x in bills['project']:\n if not x.payment_received:\n balance = balance + x.amount\n for x in bills['consult']:\n if not x.payment_received:\n balance = balance + x.amount\n for x in bills['hosting']:\n if not x.payment_received:\n balance = balance + x.amount\n else:\n self.add_error('Client not found.')\n return balance\n\n #\n #\n # UPDATE FUNCTIONS\n #\n #\n\n def upload_bill_file(self, info, files):\n if self._scope:\n if self._bill.bill_file:\n self._bill.bill_file.delete()\n if self._scope == 'project':\n self._form = ProjectBillFileForm(info, files=files, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n elif self._scope == 'consult':\n self._form = ConsultBillFileForm(info, files=files, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n elif self._scope == 'hosting':\n self._form = HostingBillFileForm(info, files=files, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n self.add_error('Bill scope not found.')\n return False\n\n def upload_receipt_file(self, info, files):\n if self._scope:\n if self._bill.receipt_file:\n self._bill.receipt_file.delete()\n if self._scope == 'project':\n self._form = ProjectBillReceiptFileForm(info, files=files, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n elif self._scope == 'consult':\n self._form = ConsultBillReceiptFileForm(info, files=files, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n elif self._scope == 'hosting':\n self._form = HostingBillReceiptFileForm(info, files=files, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n return False\n self.add_error('Bill scope not found.')\n return False\n\n def mark_as_received(self):\n if self._bill:\n self._bill.status = 'paid'\n self._bill.payment_submitted = True\n self._bill.payment_received = True\n self._bill.save()\n return True\n return False\n\n def report(self):\n if self._bill:\n self._bill.reported = True\n self._bill.save()\n return True\n return False\n\n def remove_reported(self):\n if self._bill:\n self._bill.reported = False\n self._bill.save()\n return True\n return False\n\n def submit_payment(self, info):\n if self._bill:\n if self._scope:\n info['payment_submitted'] = True\n info['payment_submission_date'] = timezone.now()\n info['payed_by'] = self._user.id\n info['status'] = 'payment submitted'\n if self._scope == 'project':\n self._form = ProjectBillPaymentForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n elif self._scope == 'consult':\n self._form = ConsultBillPaymentForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n elif self._scope == 'hosting':\n self._form = HostingBillPaymentForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n self.add_error('Bill scope not found.')\n return False\n else:\n return False\n\n def add_payment_confirmation_id(self, info):\n if self._bill:\n if not (self._bill.payment_received and self._bill.payment_submitted):\n self.add_error('The bill payment has to be marked as received.')\n return False\n if self._scope:\n info['payment_submission_date'] = timezone.now()\n if self._scope == 'project':\n self._form = AddProjectBillPaymentIdForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n elif self._scope == 'consult':\n self._form = AddConsultBillPaymentIdForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n elif self._scope == 'hosting':\n self._form = AddHostingBillPaymentIdForm(info, instance=self._bill)\n if self.validate_form():\n self._bill = self._object\n return True\n self.add_error('Bill scope not found.')\n return False\n","sub_path":"home/shared_utils/util_billing.py","file_name":"util_billing.py","file_ext":"py","file_size_in_byte":12806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"58959146","text":"\n\n#calss header\nclass _POSEUR():\n\tdef __init__(self,): \n\t\tself.name = \"POSEUR\"\n\t\tself.definitions = [u'someone who pretends to be something they are not, or to have qualities that they do not have: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_poseur.py","file_name":"_poseur.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"10547460","text":"import pika\nimport json\nimport os\nfrom threading import Thread\n\nthings = []\n\n# phase 3 consume response messages\ndef consume_response(ch, method, properties, body):\n things.insert(0, {\"response\": body.decode(\"utf-8\")})\n\n\n# this block is repeated for each new queue; parameterize?\n# this sets up the watcher for responses\namqp_url = os.environ['AMQP_URL']\nprint(\"opening connection to {}\".format(amqp_url))\nconnection = pika.BlockingConnection(pika.URLParameters(amqp_url))\nchannel = connection.channel()\nchannel.queue_declare(queue='responses')\nchannel.basic_qos(prefetch_count=1)\nchannel.basic_consume(consume_response,\n queue='responses',\n no_ack=True)\nthread = Thread(target=channel.start_consuming)\nthread.start()\nthread.join(0)\n\ndef get_things():\n return things\n","sub_path":"backend/app/modules/response_watcher.py","file_name":"response_watcher.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"335556149","text":"#!/usr/bin/env python \r\n# encoding: utf-8 \r\n\r\n\"\"\" \r\n@version: v1.0 \r\n@author: xiaxianba \r\n@license: Apache Licence \r\n@contact: scuxia@gmail.com \r\n@site: http://weibo.com/xiaxianba \r\n@software: PyCharm \r\n@file: ConvertBonds.py \r\n@time: 2018/5/21 9:19 \r\n@describe: \r\n 1.收集每天可转债的交易列表;\r\n 2.计算可转债的均价和方差,从这两个维度观察可转债的变化。\r\n\"\"\"\r\n\r\nimport csv\r\nimport numpy\r\nfrom WindPy import *\r\n\r\ndict_convertbonds = {'WIND':'a101020600000000', 'CIRC':'1000019571000000', 'SFC':'1000021892000000'}\r\n\r\n\r\n# 获取所有可转债列表\r\ndef get_convert_bonds(date):\r\n\r\n length_list = 0\r\n list_bonds = []\r\n list_subbonds = []\r\n\r\n prefix = \"date=\"\r\n suffix = \";sectorid=\"\r\n\r\n for value in dict_convertbonds.values():\r\n scope = prefix + date + suffix + value\r\n result_s = w.wset(\"sectorconstituent\", scope)\r\n list_bonds += result_s.Data[1]\r\n list_bonds = list(set(list_bonds))\r\n\r\n for bond in list_bonds:\r\n result_c = w.wsd(bond, \"close\", date, date, \"\")\r\n\r\n if result_c.Data[0][0] is not None:\r\n list_subbonds.append(bond)\r\n\r\n length_list = len(list_subbonds)\r\n return list_subbonds, length_list\r\n\r\n\r\n# 获取所有可转债的价格\r\ndef get_bonds_price(date):\r\n total_bonds = 0\r\n mean_bonds = 0.0\r\n median_bonds = 0.0\r\n std_bonds = 0.0\r\n list_bonds = []\r\n list_bondsprice = []\r\n list_csv = []\r\n\r\n list_bonds, total_bonds = get_convert_bonds(date)\r\n\r\n for key in list_bonds:\r\n result = w.wsd(key, \"close\", date, date, \"\")\r\n list_bondsprice += result.Data[0]\r\n\r\n if total_bonds == len(list_bondsprice):\r\n narray = numpy.array(list_bondsprice)\r\n mean_bonds = narray.mean()\r\n std_bonds = narray.std()\r\n median_bonds = numpy.median(narray)\r\n\r\n\r\n list_csv.append(date)\r\n list_csv.append(mean_bonds)\r\n list_csv.append(median_bonds)\r\n list_csv.append(std_bonds)\r\n write_to_file(list_csv)\r\n\r\n return mean_bonds, std_bonds\r\n\r\n\r\n# 将数据写入csv文件\r\ndef write_to_file(list):\r\n\r\n with open(\"convertbonds.csv\", \"ab+\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerow(list)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n BEGIN_DATE = \"2018-05-18\"\r\n END_DATE = \"2018-5-23\"\r\n\r\n w.start()\r\n list_trade_day = w.tdays(BEGIN_DATE, END_DATE, \"\")\r\n\r\n for day in list_trade_day.Data[0]:\r\n str_date = day.strftime('%Y-%m-%d')\r\n get_bonds_price(str_date)\r\n\r\n","sub_path":"2 Code/1.2 Python/Stock/ConvertBonds.py","file_name":"ConvertBonds.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"341091707","text":"# VTK - Labo 5 - Planeur\n# Author : Sathiya Kirushnapillai, Mathieu Monteverde\n\nimport vtk\n\n\nclass KeyPressInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):\n \"\"\"\n An interactor style class extending vtkInteractorStyleTrackballCamera\n that saves a screenshot of the window when the 's' or Return key are \n pressed.\n \"\"\"\n\n def __init__(self, renWin, parent=None):\n self.parent = parent\n self.AddObserver(\"KeyPressEvent\", self.keyPressEvent)\n self.OUTPUT_FILE_NAME = \"map_output.png\"\n self.renWin = renWin\n\n\n def keyPressEvent(self, obj, event):\n key = self.parent.GetKeySym()\n if (key == \"Return\" or key == \"s\"):\n # Resources to save the scene to a PDF file\n w2if = vtk.vtkWindowToImageFilter()\n w2if.SetInput(self.renWin)\n w2if.Update()\n\n writer = vtk.vtkPNGWriter()\n writer.SetFileName(self.OUTPUT_FILE_NAME)\n writer.SetInputConnection(w2if.GetOutputPort())\n writer.Write()\n","sub_path":"keypressInteractorStyle.py","file_name":"keypressInteractorStyle.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"327828034","text":"import numpy as np\nimport os\nimport torchvision.utils as vutils\nfrom tensorboardX import SummaryWriter\n# from torch.utils.tensorboard import SummaryWriter\nimport scipy\nimport matplotlib as plt\nfrom .misc import *\nimport torch\n\nclass Visualizer:\n def __init__(self, tb_path):\n self.tb_path = tb_path\n\n if os.path.exists(tb_path):\n # if prompt_yes_no('{} already exists. Proceed?'.format(tb_path)):\n os.system('rm -r {}'.format(tb_path))\n # else:\n # exit(0)\n\n self.writer = SummaryWriter(tb_path)\n self.savedir = '/storage/armand/results/thesis/loAE'\n self.eval_every = 20\n\n def add_scalar(self, scalar_dict, epoch, global_step=None):\n for tag, scalar in scalar_dict.items():\n if isinstance(scalar, dict):\n self.writer.add_scalars(tag, scalar, epoch)\n elif isinstance(scalar, plt.figure.Figure):\n self.writer.add_figure(tag, scalar, epoch)\n elif tag == 'Embedding' or tag == 'Original-Domain':\n # labels = np.linspace(0, scalar.shape[0], scalar.shape[0])\n # labels = np.expand_dims(np.arange(scalar.shape[0]), axis=1)\n # labels = np.expand_dims(labels, axis=1)\n # labels = torch.tensor(np.expand_dims(labels, axis=1))\n self.writer.add_embedding(\n scalar,\n tag = tag,\n global_step=global_step)\n elif isinstance(scalar, list) or isinstance(scalar, np.ndarray):\n continue\n else:\n self.writer.add_scalar(tag, scalar, epoch)\n\n def add_images(self, image_dict, epoch, global_step=None, prefix=None):\n for tag, images in image_dict.items():\n if prefix is not None:\n tag = '{}/{}'.format(prefix, tag)\n images = torch.clamp(images, -1, 1)\n images = vutils.make_grid(images, nrow=images.size(0), normalize=True, range=(-1, 1))\n\n '''Save images of results'''\n if epoch % self.eval_every == 0 and epoch != 0:\n case = self.tb_path.split('/')[-2]\n resImageDir = os.path.join(self.savedir, 'figures', case)\n if not os.path.exists(resImageDir):\n os.makedirs(resImageDir)\n scipy.misc.imsave(os.path.join(resImageDir, prefix + '_step-' + str(global_step).zfill(5) + '_epoch-' + str(epoch).zfill(3) + '.png'), images[:, :130].permute(1,2,0))\n\n self.writer.add_image(tag, images, global_step)\n","sub_path":"LDE-traj/utils/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"56357806","text":"class Product: \n \n def __init__(self, price, item_name, weight, brand):\n self.price = price\n self.item_name = item_name\n self.weight = weight\n self.brand = brand\n self.status = \"for sale\"\n\n # def sell():\n # if self.status\n def add_tax(self,tax):\n self.price=self.price+(self.price * tax)\n return self.price\n def sell(self,status):\n if status == \"sold\":\n self.status = \"sold!\"\n def return_item(self,x):\n if x == \"defective\":\n self.price = 0\n self.status = \"defective\"\n return\n elif x == \"like_new\":\n self.status = \"for sale\"\n return \n elif x == \"opened\":\n self.status = \"used\"\n self.price = self.price*0.8\n return\n\n def display_info(self):\n return(f\"Product's cost is {self.price}\\nProduct's item name is {self.item_name}\\nProduct's weight is {self.weight}\\nProduct's brand is {self.brand}\\nProduct's availability is {self.status}\\n\")\n\nproduct1 = Product(5,\"Apples\",10,\"Yup\")\nproduct2 = Product(7,\"Oranges\",3,\"Yuppers\")\nproduct3 = Product(9,\"Coffee\",2,\"Jopp\")\n\nprint(product1.display_info())\nproduct1.add_tax(0.45)\nprint(product1.display_info())\nproduct1.sell(\"sold\")\nprint(product1.display_info())\nproduct1.return_item(\"like_new\")\nprint(product1.display_info())\nproduct1.return_item(\"opened\")\nprint(product1.display_info())\nproduct1.return_item(\"defective\")\nprint(product1.display_info())\nprint(product2.display_info())\nprint(product3.display_info())\n","sub_path":"Python/python_OOP/product/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"487347683","text":"import numpy as np\nfrom scipy.special import gammaln\nfrom math import exp\nfrom collections import Counter\nfrom .crp import CRP\n\ndef Z(corpus_s, topic, alpha, beta):\n n_vocab = sum([len(x) for x in corpus_s])\n t_zm = np.zeros(n_vocab).astype('int')\n z_topic = [[] for _ in topic]\n z_doc = [[] for _ in topic]\n z_tmp = np.zeros((n_vocab, len(topic)))\n assigned = np.zeros((len(corpus_s), len(topic)))\n n = 0\n for i in range(len(corpus_s)):\n for d in range(len(corpus_s[i])): \n wi = corpus_s[i][d] \n for j in range(len(topic)):\n lik = (z_topic[j].count(wi) + beta) / (assigned[i, j] + n_vocab * beta)\n pri = (len(z_topic[j]) + alpha) / ((len(corpus_s[i]) - 1) + len(topic) * alpha)\n z_tmp[n, j] = lik * pri\n t_zm[n] = np.random.multinomial(1, (z_tmp[n,:]/sum(z_tmp[n,:]))).argmax()\n z_topic[t_zm[n]].append(wi)\n z_doc[t_zm[n]].append(i)\n assigned[i, t_zm[n]] += 1\n n += 1\n z_topic = [x for x in z_topic if x != []]\n z_doc = [x for x in z_doc if x != []]\n return z_topic, z_doc\n\ndef most_common(x):\n return Counter(x).most_common(1)[0][0]\n\ndef CRP_prior(corpus_s, doc, phi):\n cp = np.empty((len(corpus_s), len(doc)))\n for i, corpus in enumerate(corpus_s):\n p_topic = [[x for x in doc[j] if x != i] for j in range(len(doc))]\n tmp = CRP(p_topic, phi)\n cp[i,:] = tmp[1:]\n return cp\n\ndef likelihood(corpus_s, topic, eta):\n w_m = np.empty((len(corpus_s), len(topic)))\n allword_topic = [word for t in topic for word in t]\n n_vocab = sum([len(x) for x in corpus_s])\n for i, corpus in enumerate(corpus_s):\n prob_result = []\n for j in range(len(topic)):\n current_topic = topic[j]\n n_word_topic = len(current_topic)\n prev_dominator = 1\n later_numerator = 1\n prob_word = 1 \n\n overlap = [val for val in set(corpus) if val in current_topic]\n \n prev_numerator = gammaln(len(current_topic) - len(overlap) + n_vocab * eta)\n later_dominator = gammaln(len(current_topic) + n_vocab * eta)\n for word in corpus: \n corpus_list = corpus \n if current_topic.count(word) - corpus_list.count(word) < 0 :\n a = 0\n else:\n a = current_topic.count(word) - corpus_list.count(word)\n \n prev_dominator += gammaln(a + eta)\n later_numerator += gammaln(current_topic.count(word) + eta)\n \n prev = prev_numerator - prev_dominator\n later = later_numerator - later_dominator\n \n like = prev + later \n w_m[i, j] = like\n w_m[i, :] = np.add(w_m[i, :], abs(min(w_m[i, :]))+0.1)\n w_m = w_m/w_m.sum(axis = 1)[:, np.newaxis]\n return w_m\n\ndef post(w_m, c_p):\n c_m = (w_m * c_p) / (w_m * c_p).sum(axis = 1)[:, np.newaxis]\n return np.array(c_m)\n\ndef wn(c_m, corpus_s):\n wn_ass = []\n for i, corpus in enumerate(corpus_s):\n for word in corpus:\n if c_m[i].sum != 1:\n c_m[i] = c_m[i]/c_m[i].sum()\n theta = np.random.multinomial(1, c_m[i]).argmax()\n wn_ass.append(theta)\n return np.array(wn_ass)\n\ndef gibbs(corpus_s, topic, alpha, beta, phi, eta, ite):\n n_vocab = sum([len(x) for x in corpus_s])\n gibbs = np.empty((n_vocab, ite)).astype('int')\n \n for i in range(ite):\n z_topic, z_doc = Z(corpus_s, topic, alpha, beta)\n c_p = CRP_prior(corpus_s, z_doc, phi)\n w_m = likelihood(corpus_s, z_topic, eta)\n c_m = post(w_m, c_p)\n gibbs[:, i] = wn(c_m, corpus_s) \n # drop first 1/10 data\n gibbs = gibbs[:, int(ite/10):]\n theta = [most_common(gibbs[x]) for x in range(n_vocab)]\n \n n_topic = max(theta)+1\n \n wn_topic = [[] for _ in range(n_topic)]\n wn_doc_topic = [[] for _ in range(n_topic)]\n\n doc = 0\n n = 0\n for i, corpus_s in enumerate(corpus_s):\n if doc == i:\n for word in corpus_s:\n wn_doc_topic[theta[n]].append(word)\n n += 1\n for j in range(n_topic):\n if wn_doc_topic[j] != []:\n wn_topic[j].append(wn_doc_topic[j])\n wn_doc_topic = [[] for _ in range(n_topic)] \n doc += 1\n wn_topic = [x for x in wn_topic if x != []]\n return wn_topic\n\n\n\n","sub_path":"hLDA/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"176350994","text":"# 1 부터 n 까지 제곱수의 합을 구하는 프로그램\n\n# 1-1 O(n)\ndef sum_square_for(n):\n sum = 0\n for i in range(1, n+1):\n sum += i ** 2\n return sum\n\n# 1-3 O(1)\ndef sum_sqaure(n):\n sum = n * (n + 1) * (2*n + 1) // 6\n return sum\n\nprint(sum_sqaure(5))\nprint(sum_square_for(5))\n# 출력\n# 55\n# 55","sub_path":"STUDY/Python/Alogrithm Everyone/Chapter 1/Practice01/p01-p01-squaresum.py","file_name":"p01-p01-squaresum.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262679068","text":"\"\"\"\r\nYour task in this exercise has two steps:\r\n\r\n- audit the OSMFILE and change the variable 'mapping' to reflect the changes needed to fix \r\n the unexpected street types to the appropriate ones in the expected list.\r\n You have to add mappings only for the actual problems you find in this OSMFILE,\r\n not a generalized solution, since that may and will depend on the particular area you are auditing.\r\n- write the update_name function, to actually fix the street name.\r\n The function takes a string with street name as an argument and should return the fixed name\r\n We have provided a simple test so that you see what exactly is expected\r\n\"\"\"\r\nimport xml.etree.cElementTree as ET\r\nfrom collections import defaultdict\r\nimport re\r\nimport pprint\r\n\r\nOSMFILE = \"cleveland-original.osm\"\r\nlast_word_reg = re.compile(r'\\b\\S+\\.?$', re.IGNORECASE)\r\n\r\n\r\n\r\n#Definition of expected street-types\r\nexpected = [\"Avenue\",\r\n \"Boulevard\",\r\n \"Court\",\r\n \"Circle\",\r\n \"Drive\",\r\n \"Highway\",\r\n \"Lane\",\r\n \"Marketplace\",\r\n \"Place\",\r\n \"Parkway\",\r\n \"Road\",\r\n \"Street\",\r\n \"Square\",\r\n \"Terrace\",\r\n \"Trail\"]\r\n\r\n#Mapping to target the specific errors in the dataset:\r\nmapping_road = {\r\n \"ave\": \"Avenue\",\r\n \"Ave\": \"Avenue\",\r\n \"Ave.\": \"Avenue\",\r\n \"Blvd\": \"Boulevard\",\r\n \"Blvd.\": \"Boulevard\",\r\n \"ct\": \"Court\",\r\n \"Ct\": \"Court\",\r\n \"cir\":\"Circle\",\r\n \"Cir\": \"Circle\",\r\n \"Dr\": \"Drive\",\r\n \"Dr.\": \"Drive\",\r\n \"LANE\": \"Lane\",\r\n \"Ln\": \"Lane\",\r\n \"Pkwy\": \"Parkway\",\r\n \"Pl\": \"Place\",\r\n \"PL\": \"Place\",\r\n \"Rd.\": \"Road\",\r\n \"Rd\": \"Road\",\r\n \"St\": \"Street\",\r\n \"St \": \"Street \",\r\n \" St\": \" Street\",\r\n \"St.\": \"Street\",\r\n \" St.\": \" Street\",\r\n \"St. \": \"Street \",\r\n \"st.\": \"Street\",\r\n \r\n }\r\n\r\n#build a regex from the mapping_road dictionary:\r\nroad_possible = \"|\".join(mapping_road.keys()).replace('.', '') #replace point by \"nothing\", because the point is accounted for by the next regex\r\n#build a regex that matches anytwhere in the String\r\nroad_reg = re.compile(r'\\b(' + road_possible + r')\\b\\.?', re.IGNORECASE) \r\n\r\n\r\n#account for the directions\r\nmapping_directions = {\r\n \"N\": \"North\",\r\n \"N \": \"North \",\r\n \"N.\": \"North\",\r\n \"E\": \"East\",\r\n \"E.\": \"East\",\r\n \"S\": \"South\",\r\n \"S.\": \"South\",\r\n \"W\": \"West\",\r\n \"W.\": \"West\",\r\n \"NE\": \"North East\",\r\n \"NE.\": \"North East\",\r\n \"SE\": \"South East\",\r\n \"SE.\": \"South East\",\r\n \"NW\": \"North West\",\r\n \"NW.\": \"North West\",\r\n \"SW\": \"South West\",\r\n \"SW.\": \"South West\"\r\n \r\n }\r\n\r\n#build a regex from the mapping_directions dictionary:\r\ndirections_possible = \"|\".join(mapping_directions.keys()).replace('.', '') #replace point by \"nothing\", because the point is accounted for by the next regex\r\n#build a regex that matches anytwhere in the String\r\ndirection_reg = re.compile(r'\\b(' + directions_possible + r')\\b\\.?', re.IGNORECASE) \r\n\r\n\r\ndef audit_street_type(street_types, street_name):\r\n m = last_word_reg.search(street_name)\r\n if m:\r\n street_type = m.group()\r\n if street_type not in expected:\r\n street_types[street_type].add(street_name)\r\n\r\n \r\n\r\n\r\ndef is_street_name(elem):\r\n return (elem.attrib['k'] == \"addr:street\")\r\n\r\ndef is_postcode(elem):\r\n return (elem.attrib['k'] == \"addr:postcode\")\r\n\r\n\r\ndef audit(osmfile):\r\n osm_file = open(osmfile, \"r\", encoding=\"utf8\")\r\n street_types = defaultdict(set)\r\n global postcodes\r\n postcodes = set()\r\n \r\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\r\n\r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n for tag in elem.iter(\"tag\"):\r\n if is_street_name(tag):\r\n audit_street_type(street_types, tag.attrib['v'])\r\n if is_postcode(tag):\r\n postcodes.add(tag.attrib['v'])\r\n elem.clear() #prevents from memory error \r\n\r\n \r\n\r\n return street_types, postcodes\r\n\r\n#Update the abbreviation of direction or street-type\r\ndef update_direction(better_name,mapping_directions):\r\n m = direction_reg.search(better_name)\r\n better_name_direction = better_name\r\n if m:\r\n current_direction = m.group()\r\n\r\n if current_direction in mapping_directions:\r\n better_street_direction = mapping_directions[m.group()]\r\n better_name_direction = direction_reg.sub(better_street_direction, better_name)\r\n return better_name_direction\r\n\r\ndef update_name(name, mapping_road):\r\n m = road_reg.search(name)\r\n better_name = name\r\n if m:\r\n current_name = m.group()\r\n\r\n if current_name in mapping_road:\r\n if m.group(1) == \"St\" and m.span()[0]==0:\r\n pass #do nothing\r\n else: \r\n #if m.group(1) == (\"St\"|\"St.\") and m.span()[0]:print(\"yes\") #http://stackoverflow.com/questions/15340582/python-extract-pattern-matches\r\n better_street_type = mapping_road[m.group()]\r\n better_name = road_reg.sub(better_street_type, name)\r\n\r\n return better_name\r\n\r\n\r\n#audit street types and print these out to inspect visually. \r\ndef run():\r\n global st_types\r\n st_types, postcodes = audit(OSMFILE)\r\n \r\n pprint.pprint(dict(st_types))\r\n\r\n \r\n \r\n #Check for the changes of the names:\t\r\n for st_type, ways in st_types.items():\r\n for name in ways:\r\n better_name = update_name(name, mapping_road)\r\n better_name_direction = update_direction(better_name, mapping_directions)\r\n print (name, \"=>\", better_name_direction) \r\n \r\n\r\n#check for postcodes that do not match into the ohio-area 44xxx\r\n for i in list(postcodes):\r\n \r\n if \"44\" in i:\r\n pass\r\n else:\r\n print(\"Not a valid postcode: \",i)\r\n\r\n return better_name_direction\r\n \r\n\r\nif __name__ == '__main__':\r\n run()\r\n\r\n","sub_path":"audit.py","file_name":"audit.py","file_ext":"py","file_size_in_byte":6351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"392682426","text":"import requests\nfrom config import bot\n\ndef git(msg):\n if msg.get('text'):\n if msg['text'].startswith('/git ') or msg['text'].startswith('!git '):\n text = msg['text'][5:]\n res = requests.get('https://api.github.com/users/' + text).json()\n if not res.get('login'):\n return bot.sendMessage(msg['chat']['id'], 'Usuário \"{}\" não encontrado.'.format(text),\n reply_to_message_id=msg['message_id'])\n else:\n bot.sendMessage(msg['chat']['id'], '''*Nome:* `{}`\n*Login:* `{}`\n*Localização:* `{}`\n*Tipo:* `{}`\n*Bio:* `{}`'''.format(res['name'], res['login'],\n res['location'], res['type'],\n res['bio']), 'Markdown',\n reply_to_message_id=msg['message_id'])\n return True\n","sub_path":"plugins/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"271460009","text":"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\nimport random\nimport logging\nimport csv\nimport argparse\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom tqdm import tqdm, trange\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.modeling import BertForQuizbowl\nfrom pytorch_pretrained_bert.optimization import BertAdam\nfrom pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.label_id = label_id\n\n\nclass QuizbowlProcessor(object):\n \"\"\"Processor for the Quizbowl data set.\"\"\"\n\n def get_train_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"1000_train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"150_dev.tsv\")), \"dev\")\n\n def get_labels(self, data_dir):\n lines = self._read_tsv(os.path.join(data_dir, \"1000_train.tsv\"))\n return list({line[0] for line in lines})\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n label, text = line\n examples.append(InputExample(guid, text, label=label))\n return examples\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding='utf-8') as f:\n reader = csv.reader(f, delimiter='\\t', quotechar=quotechar)\n lines = [line for line in reader]\n return lines\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n label_map = defaultdict(lambda: -1)\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(tqdm(examples)):\n tokens = tokenizer.tokenize(example.text)\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens) > max_seq_length - 2:\n tokens = tokens[0: max_seq_length - 2]\n\n tokens.insert(0, \"[CLS]\")\n tokens.append(\"[SEP]\")\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n\n label_id = label_map[example.label]\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n label_id=label_id))\n return features\n\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=os.path.join(os.path.dirname(__file__), 'data'),\n type=str,\n # required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\",\n default='bert-base-uncased',\n type=str,\n # required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.\")\n parser.add_argument(\"--output_dir\",\n default=os.path.join(os.path.dirname(__file__), 'checkpoints'),\n type=str,\n # required=True,\n help=\"The output directory where the model checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=256,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n default=True,\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n default=False,\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--batch_size\",\n default=16,\n type=int,\n help=\"Total batch size for training/testing.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=100.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n default=False,\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n\n args = parser.parse_args()\n\n if args.no_cuda:\n device = torch.device(\"cpu\")\n else:\n device = torch.device(\"cuda\")\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if not args.no_cuda:\n torch.cuda.manual_seed_all(args.seed)\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n processor = QuizbowlProcessor()\n label_list = processor.get_labels(args.data_dir)\n tokenizer = BertTokenizer.from_pretrained(args.bert_model)\n\n # Prepare model\n model = BertForQuizbowl.from_pretrained(args.bert_model,\n cache_dir=PYTORCH_PRETRAINED_BERT_CACHE,\n num_labels=len(label_list))\n model.to(device)\n\n # Prepare optimizer\n train_examples = processor.get_train_examples(args.data_dir)\n num_train_steps = int(len(train_examples) * args.num_train_epochs / args.batch_size)\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}]\n\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_steps)\n\n checkpoint_file = os.path.join(args.output_dir, \"checkpoint.pt\")\n if os.path.isfile(checkpoint_file):\n print('loading model from pytorch checkpoint...')\n checkpoint = torch.load(checkpoint_file)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n def do_train():\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n train_data = TensorDataset(all_input_ids, all_input_mask, all_label_ids)\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size)\n\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss, nb_tr_examples, nb_tr_steps = 0, 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, label_ids = batch\n loss, _ = model(input_ids, input_mask, label_ids)\n loss.backward()\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n optimizer.step()\n model.zero_grad()\n\n if args.do_eval: do_eval()\n logger.info(\"train loss = %s\", tr_loss / nb_tr_examples)\n # torch.save({\n # 'model_state_dict': model.state_dict(),\n # 'optimizer_state_dict': optimizer.state_dict(),\n # 'train_loss': tr_loss / nb_tr_examples}, checkpoint_file)\n\n def do_eval():\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features = convert_examples_to_features(\n eval_examples, label_list, args.max_seq_length, tokenizer)\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_label_ids)\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)\n\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n for input_ids, input_mask, label_ids in tqdm(eval_dataloader):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n tmp_eval_loss, logits = model(input_ids, input_mask, label_ids)\n\n logits = logits.detach().cpu()\n label_ids = label_ids.to('cpu')\n tmp_eval_accuracy = accuracy(logits.numpy(), label_ids.numpy())\n\n eval_loss += tmp_eval_loss.mean().item()\n eval_accuracy += tmp_eval_accuracy\n\n nb_eval_examples += input_ids.size(0)\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_examples\n eval_accuracy = eval_accuracy / nb_eval_examples\n\n result = {'eval_loss': eval_loss,\n 'eval_accuracy': eval_accuracy}\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"a\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n do_train() if args.do_train else do_eval()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"BERT-DAN/examples/run_quizbowl.py","file_name":"run_quizbowl.py","file_ext":"py","file_size_in_byte":14278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"179962625","text":"# USDA_CoA_Livestock.py (flowsa)\n# !/usr/bin/env python3\n# coding=utf-8\n\nimport io\nimport pandas as pd\nimport json\nimport numpy as np\nfrom flowsa.common import *\nfrom flowsa.flowbyfunctions import assign_fips_location_system\n\n\ndef CoA_Livestock_URL_helper(build_url, config, args):\n \"\"\"This helper function uses the \"build_url\" input from flowbyactivity.py, which is a base url for coa cropland data\n that requires parts of the url text string to be replaced with info specific to the usda nass quickstats API.\n This function does not parse the data, only modifies the urls from which data is obtained. \"\"\"\n # initiate url list for coa cropland data\n urls_livestock = []\n\n # call on state acronyms from common.py (and remove entry for DC)\n state_abbrev = abbrev_us_state\n state_abbrev = {k: v for (k, v) in state_abbrev.items() if k != \"DC\"}\n\n # replace \"__aggLevel__\" in build_url to create three urls\n for x in config['agg_levels']:\n # at national level, remove the text string calling for state acronyms\n if x == 'NATIONAL':\n url_ls = build_url\n url_ls = url_ls.replace(\"__aggLevel__\", x)\n url_ls = url_ls.replace(\"&state_alpha=__stateAlpha__\", \"\")\n url_ls = url_ls.replace(\" \", \"%20\")\n urls_livestock.append(url_ls)\n else:\n # substitute in state acronyms for state and county url calls\n for y in state_abbrev:\n url_ls = build_url\n url_ls = url_ls.replace(\"__aggLevel__\", x)\n url_ls = url_ls.replace(\"__stateAlpha__\", y)\n url_ls = url_ls.replace(\" \", \"%20\")\n urls_livestock.append(url_ls)\n return urls_livestock\n\n\ndef coa_livestock_call(url, coa_response, args):\n livestock_json = json.loads(coa_response.text)\n # Convert response to dataframe\n df_livestock = pd.DataFrame(data=livestock_json[\"data\"])\n return df_livestock\n\n\ndef coa_livestock_parse(dataframe_list, args):\n \"\"\"Modify the imported data so it meets the flowbyactivity criteria and only includes data on harvested acreage\n (irrigated and total).\"\"\"\n df = pd.concat(dataframe_list, sort=False)\n # # specify desired data based on domain_desc\n df = df[df['domain_desc'].str.contains(\"INVENTORY|TOTAL\")]\n df = df[~df['domain_desc'].str.contains(\"ECONOMIC CLASS|NAICS|FARM SALES|AREA OPERATED\")]\n # drop any specialized production practices\n df = df[df['prodn_practice_desc'] == 'ALL PRODUCTION PRACTICES']\n # drop specialized class descriptions\n df = df[~df['class_desc'].str.contains(\"BREEDING|MARKET\")]\n # drop unused columns\n df = df.drop(columns=['agg_level_desc', 'location_desc', 'state_alpha', 'sector_desc',\n 'country_code', 'begin_code', 'watershed_code', 'reference_period_desc',\n 'asd_desc', 'county_name', 'source_desc', 'congr_district_code', 'asd_code',\n 'week_ending', 'freq_desc', 'load_time', 'zip_5', 'watershed_desc', 'region_desc',\n 'state_ansi', 'state_name', 'country_name', 'county_ansi', 'end_code', 'group_desc',\n 'util_practice_desc'])\n # create FIPS column by combining existing columns\n df.loc[df['county_code'] == '', 'county_code'] = '000' # add county fips when missing\n df['Location'] = df['state_fips_code'] + df['county_code']\n df.loc[df['Location'] == '99000', 'Location'] = US_FIPS # modify national level fips\n # combine column information to create activity information, and create two new columns for activities\n df['ActivityProducedBy'] = df['commodity_desc'] + ', ' + df['class_desc'] # drop this column later\n df['ActivityProducedBy'] = df['ActivityProducedBy'].str.replace(\", ALL CLASSES\", \"\", regex=True) # not interested in all data from class_desc\n # rename columns to match flowbyactivity format\n df = df.rename(columns={\"Value\": \"FlowAmount\",\n \"unit_desc\": \"FlowName\",\n \"year\": \"Year\",\n \"CV (%)\": \"Spread\",\n \"domaincat_desc\": \"Compartment\",\n \"short_desc\": \"Description\"})\n # drop remaining unused columns\n df = df.drop(columns=['class_desc', 'commodity_desc', 'state_fips_code', 'county_code',\n 'statisticcat_desc', 'prodn_practice_desc'])\n # modify contents of flowamount column, \"D\" is supressed data, \"z\" means less than half the unit is shown\n df['FlowAmount'] = df['FlowAmount'].str.strip() # trim whitespace\n df.loc[df['FlowAmount'] == \"(D)\", 'FlowAmount'] = withdrawn_keyword\n # df.loc[df['FlowAmount'] == \"(Z)\", 'FlowAmount'] = withdrawn_keyword\n df['FlowAmount'] = df['FlowAmount'].str.replace(\",\", \"\", regex=True)\n # # USDA CoA 2017 states that (H) means CV >= 99.95, therefore replacing with 99.95 so can convert column to int\n # # (L) is a CV of <= 0.05\n df['Spread'] = df['Spread'].str.strip() # trim whitespace\n df.loc[df['Spread'] == \"(H)\", 'Spread'] = 99.95\n df.loc[df['Spread'] == \"(L)\", 'Spread'] = 0.05\n df.loc[df['Spread'] == \"\", 'Spread'] = None # for instances where data is missing\n df.loc[df['Spread'] == \"(D)\", 'Spread'] = withdrawn_keyword\n # add location system based on year of data\n df = assign_fips_location_system(df, args['year'])\n # # Add hardcoded data\n df['Class'] = \"Other\"\n df['SourceName'] = \"USDA_CoA_Livestock\"\n df['Unit'] = \"p\"\n df['MeasureofSpread'] = \"RSD\"\n df['DataReliability'] = None\n df['DataCollection'] = 2\n return df\n","sub_path":"flowsa/USDA_CoA_Livestock.py","file_name":"USDA_CoA_Livestock.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"644682187","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport os\r\nimport csv\r\nimport time\r\nimport datetime\r\nfrom dataset import Dataset\r\nfrom utils import normalize\r\n\r\n# Parameters\r\n# ==================================================\r\n\r\n# Data loading params\r\ntf.flags.DEFINE_string(\"test_data\", \"data/yelp-2013-test.pkl\", \"Data source for the testing data.\")\r\n\r\n# Training parameters\r\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (Default: 64)\")\r\ntf.flags.DEFINE_string(\"checkpoint_dir\", \"runs/1563096406/checkpoints\", \"Checkpoint directory from training run\")\r\n\r\n# Misc Parameters\r\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\r\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\r\n\r\nFLAGS = tf.flags.FLAGS\r\n\r\nprint(\"Evaluating...\\n\")\r\n# Load test data\r\ntest = Dataset(filepath=FLAGS.test_data)\r\n# Evaluation\r\n# ==================================================\r\ncheckpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\r\ngraph = tf.Graph()\r\nwith graph.as_default():\r\n session_conf = tf.ConfigProto(\r\n allow_soft_placement=FLAGS.allow_soft_placement,\r\n log_device_placement=FLAGS.log_device_placement)\r\n sess = tf.Session(config=session_conf)\r\n with sess.as_default():\r\n # Load the saved meta graph and restore variables\r\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\r\n saver.restore(sess, checkpoint_file)\r\n\r\n # Get the placeholders from the graph by name\r\n context_placeholder = graph.get_operation_by_name(\"context\").outputs[0]\r\n query_placeholder = graph.get_operation_by_name(\"query\").outputs[0]\r\n num_sents_placeholder = graph.get_operation_by_name(\"num_sents\").outputs[0]\r\n labels_placeholder = graph.get_operation_by_name(\"labels\").outputs[0]\r\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\r\n\r\n\r\n # Tensors we want to evaluate\r\n predictions = graph.get_operation_by_name(\"predictions\").outputs[0]\r\n\r\n # Generate batches for one epoch\r\n all_labels = []\r\n all_predictions = []\r\n for batch in test.bacth_iter(FLAGS.batch_size, desc=\"Evaluating\", shuffle=False):\r\n labels, contexts, queries = zip(*batch)\r\n contexts, num_sents = normalize(contexts)\r\n feed_dict = {\r\n context_placeholder: contexts,\r\n query_placeholder: queries,\r\n num_sents_placeholder: num_sents,\r\n labels_placeholder: labels,\r\n dropout_keep_prob: 1.0\r\n }\r\n batch_predictions = sess.run(predictions, feed_dict)\r\n all_labels = np.concatenate([all_labels, labels])\r\n all_predictions = np.concatenate([all_predictions, batch_predictions])\r\n\r\n# Print accuracy\r\nif all_labels is not None:\r\n correct_predictions = float(sum(all_predictions == all_labels))\r\n print(\"Total number of test examples: {}\".format(len(all_labels)))\r\n print(\"Accuracy: {:g}\".format(correct_predictions/float(len(all_labels)) * 100))\r\n\r\n\r\n# Save the evaluation to a csv\r\nout_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"predictions.csv\")\r\nprint(\"Saving evaluation to {0}\".format(out_path))\r\nwith open(out_path, 'w') as f:\r\n csv.writer(f).writerows(map(lambda x: [x], all_predictions.astype(np.int32)))","sub_path":"textdmn/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"374199384","text":"import os.path as osp\nfrom waflib.Configure import conf\n\n\ndef options(opt):\n opt = opt.add_option_group('FFTW Options')\n opt.add_option('--with-fftw', type='string',\n help=\"give FFTW3 installation location\")\n opt.add_option('--with-fftw-include', type='string', default='',\n help=\"give FFTW3 include installation location\")\n opt.add_option('--with-fftw-lib', type='string', default='',\n help=\"give FFTW3 lib installation location\")\n\n\n@conf\ndef check_fftw(ctx, mandatory=True):\n instdir = ctx.options.with_fftw\n\n if instdir is None or instdir.lower() in ['yes', 'true', 'on']:\n ctx.start_msg('Checking for FFTW in PKG_CONFIG_PATH')\n ctx.check_cfg(package='fftw3', uselib_store='FFTW',\n args='--cflags --libs', mandatory=mandatory)\n elif instdir.lower() in ['no', 'off', 'false']:\n return\n else:\n ctx.start_msg('Checking for FFTW in %s' % instdir)\n if ctx.options.with_fftw_include:\n ctx.env.INCLUDES_FFTW = [ctx.options.with_fftw_include]\n else:\n ctx.env.INCLUDES_FFTW = [osp.join(instdir, 'include')]\n if ctx.options.with_fftw_lib:\n ctx.env.LIBPATH_FFTW = [ctx.options.with_fftw_lib]\n\n ctx.check(header_name=\"fftw3.h\", use='FFTW', mandatory=mandatory)\n # need to explicitly add floating point version of lib\n ctx.env.LIB_FFTW += ['fftw3f']\n # ibid if using double-precision:\n ctx.env.LIB_FFTW += ['fftw3']\n if len(ctx.env.INCLUDES_FFTW):\n ctx.end_msg(ctx.env.INCLUDES_FFTW[0])\n else:\n ctx.end_msg('FFTW3 not found')\n\n\ndef configure(cfg):\n cfg.check_fftw()\n","sub_path":"find_fftw3.py","file_name":"find_fftw3.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"532310765","text":"import math\nimport random\nimport matplotlib.pyplot as plt\nfrom random import shuffle\nimport pickle\n'''\n遗传算法解决旅行商问题\n'''\n#坐标纸大小\nX_MAX = 5000\nY_MAX = 5000\n\nclass city:\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n#CITY_LIST = create_city()\nwith open(\"./city_17.dump\",'rb') as f:\n CITY_LIST = pickle.load(f)\n\n#城市数目 5 8 11 14 17\n#CITY_SIZE = 17\nCITY_SIZE = len(CITY_LIST)\n\n#其他参数\npc = 0.25#交叉拼接概率\npm = 0.05#变异概率\npopulation_size =100#初始种群大小\nSTEPS = 1500 #遗传代数\nlength = CITY_SIZE#染色体长度\n\n\npopulation = [] #种群列表\n\ndef distance(sln):\n '''\n 求解方案成本\n '''\n dis = 0\n for i in range(CITY_SIZE-1):\n dis += math.sqrt((CITY_LIST[sln[i]].x-CITY_LIST[sln[i+1]].x)**2 + (CITY_LIST[sln[i]].y-CITY_LIST[sln[i+1]].y)**2)\n dis += math.sqrt((CITY_LIST[sln[CITY_SIZE-1]].x-CITY_LIST[sln[0]].x)**2 + (CITY_LIST[sln[CITY_SIZE-1]].y-CITY_LIST[sln[0]].y)**2)\n return dis\n\ndef init_population():\n '''\n 初始化种群\n '''\n pop = [i for i in range(length)]\n for i in range(population_size):\n tmp = list(pop)\n shuffle(tmp)\n population.append(tmp)\n \ndef selection():\n '''\n 根据距离进行适应度选择\n 这里将适应度函数选择为距离的倒数(为了防止太小 改成1000000.0/dis作为函数)\n 生成新种群\n '''\n new_popution = []\n global population\n prob = [] #占比表\n for idv in population:\n prob.append(1000000.0/distance(idv))\n prob = [i/sum(prob) for i in prob]\n #轮盘概率法\n for i in range(population_size):\n rand = random.random()\n index = -1\n prob_cur =0\n #因为浮点数会产生误差 需要处理下\n for i in range(len(prob)):\n prob_cur +=prob[i]\n if rand < prob_cur:\n index = i\n break\n if index !=-1:\n new_popution.append(population[index])\n else:\n new_popution.append(population[-1])\n population.clear()\n population = list(new_popution)\n\ndef crossover(pop_1,pop_2):\n '''\n 交叉两条染色体 Order Crossover\n 返回交换后的两条染色体\n '''\n child_1 = list(pop_1)\n child_2 = list(pop_2)\n index_1 = random.randint(0,len(pop_1)-1)\n index_2 = random.randint(0,len(pop_1)-1)\n while index_2==index_1:\n index_2 = random.randint(1,len(pop_1)-1)\n if index_1>index_2:\n start = index_2\n end = index_1\n else:\n start = index_1\n end=index_2\n sec_1 = list(child_1[start:end])#不变的部分\n sec_2 = list(child_2[start:end])\n left_1 = child_1[:start]+child_1[end:]#父染色体1剩余的元素\n left_2 = child_2[:start]+child_2[end:]#父染色体2剩余的元素\n #产生child_1\n for i in range(0,len(pop_1)):\n #选中部分不变\n if i>=start and i =start and ipc:\n continue\n else:\n population[2*i],population[2*i+1] = crossover(population[2*i],population[2*i+1])\n\ndef mutation():\n '''\n 小概率变异 采用选定两城市交换顺序的方式\n '''\n global population\n for i in range(0,population_size):\n if random.random() >pm:\n continue\n else:\n index_1 = random.randint(0,CITY_SIZE-1)\n index_2 = random.randint(0,CITY_SIZE-1)\n while index_2==index_1:\n index_2 = random.randint(1,CITY_SIZE-1)\n tmp = population[i][index_1]\n population[i][index_1] = population[i][index_2]\n population[i][index_2] = tmp\n#------------------------------------------------------------\ndef get_Max():\n '''\n 获取当前种群中最优解\n '''\n sln = list(population[0])\n dist = distance(sln)\n for i in range(1,population_size):\n dis = distance(population[i])\n if dis(%d,%d)\" %(CITY_LIST[sln[i]].x, CITY_LIST[sln[i]].y),end='' )\n dis = distance(sln)\n print(\", 距离:%.5f\" % dis )\n\ndef paint(sln):\n #画图\n x_point= []\n y_point = []\n for i in range(CITY_SIZE):\n x_point.append(CITY_LIST[sln[i]].x)\n y_point.append(CITY_LIST[sln[i]].y)\n x_point.append(CITY_LIST[sln[0]].x)\n y_point.append(CITY_LIST[sln[0]].y)\n fig = plt.figure()\n plt.plot(x_point, y_point, color='r', linestyle='-',marker='o',markerfacecolor='blue')\n for a, b in zip(x_point, y_point):\n plt.text(a, b, (a,b),ha='center', va='bottom', fontsize=10)\n #plt.annotate('start point', xy=(x_point[0], y_point[0]), xytext=(x_point[0]-300, y_point[0] - 300), arrowprops=dict(arrowstyle='->'))\n plt.show()\n#----------------------------------------------------------------\n\ndef main():\n init_population()#种群初始化\n step = STEPS\n H = 100000 #最优长度\n H_cur = []#最优解\n while step:\n selection()#适应度选择\n sln,dist=get_Max()#获取当前种群的最优解\n if dist0].sum(axis=1)\n return pd.concat([ew_absorb, ew_fs], axis=1)\n\ndef EWByRangeList(d, rangeList):\n ewfAll = pd.DataFrame()\n for r in rangeList:\n start, end = r\n ewf = intervalEWFeature(d, start, end)\n newNames = ['ew_' + str(start) + '_' + str(end) + '_' + str(v) for v in ewf.columns.values]\n ewf.columns = newNames\n ewfAll = pd.concat([ewfAll,ewf], axis=1)\n return ewfAll","sub_path":".ipynb_checkpoints/ew_utils-checkpoint.py","file_name":"ew_utils-checkpoint.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"43004149","text":"class node:\n def __init__(self, x = None):\n self.val = x\n self.next = None\n\n def __contains__(self, item):\n while self and self.val:\n if self.val == item:\n return True\n self = self.next\n\n return False\n\n def __add__(self, other):\n if self.__contains__(other):\n return False\n\n p = None\n while self and self.val:\n p = self\n self = self.next\n\n if p is None:\n self.val = other\n self.next = None\n else:\n p.next = node()\n p.next.val = other\n p.next.next = None\n return True\n\n def __str__(self):\n while self:\n print(self.val, end=\" \")\n self = self.next\n\n print()\n\n\ndef split(l, t):\n while l:\n t[l.val % 10].__add__(l.val)\n l = l.next\n\n\ndef merge(t):\n first = True\n merged = node()\n merged.__str__()\n head = merged\n for i in t:\n while i and i.val:\n if first:\n first = False\n merged.val = i.val\n else:\n merged.next = i\n merged = merged.next\n i = i.next\n\n return head\n\n\ntab = [node() for _ in range(10)]\nlist = node(4)\nlist.__add__(6)\nlist.__add__(3)\nlist.__add__(16)\nlist.__add__(0)\nlist.__str__()\n\nsplit(list, tab)\ntest = merge(tab)\ntest.__str__()","sub_path":"Ćwiczenia_9_&_10/Zadania_Podstawowe/Zadanie_5.py","file_name":"Zadanie_5.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"545262031","text":"\"\"\"\nFunctions for moving the robot FORWARD and BACKWARD.\nAuthors: David Fisher, David Mutchler and Jabari-Aman Delemore.\n\"\"\" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n\n# DONE: 2. Implment forward_seconds, then the relevant part of the test function.\n# Test and correct as needed.\n# Then repeat for forward_by_time.\n# Then repeat for forward_by_encoders.\n# Then repeat for the backward functions.\n\nimport ev3dev.ev3 as ev3\nimport time\n\n\ndef test_forward_backward():\n \"\"\"\n Tests the forward and backward functions, as follows:\n 1. Repeatedly:\n -- Prompts for and gets input from the console for:\n -- Seconds to travel\n -- If this is 0, BREAK out of the loop.\n -- Speed at which to travel (-100 to 100)\n -- Stop action (\"brake\", \"coast\" or \"hold\")\n -- Makes the robot run per the above.\n 2. Same as #1, but gets inches and runs forward_by_time.\n 3. Same as #2, but runs forward_by_encoders.\n 4. Same as #1, 2, 3, but tests the BACKWARD functions.\n \"\"\"\n left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n\n assert left_motor.connected\n assert right_motor.connected\n speed_sp = int(input('Input the speed of the motors:'))\n time_s = int(input('Input time for motors to run:'))\n position_sp = int(input('distance for robot to travel:'))\n forward_seconds(time_s, speed_sp, 'brake')\n backward_seconds(time_s, speed_sp, 'brake')\n forward_by_time(position_sp, speed_sp, 'brake')\n backward_by_time(position_sp, speed_sp, 'brake')\n ev3.Sound.beep().wait()\n forward_by_encoders(position_sp, speed_sp, 'brake')\n backward_by_encoders(position_sp, speed_sp, 'brake')\n ev3.Sound.beep().wait()\ndef forward_seconds(seconds, speed, stop_action):\n \"\"\"\n Makes the robot move forward for the given number of seconds at the given speed,\n where speed is between -100 (full speed backward) and 100 (full speed forward).\n Uses the given stop_action.\n \"\"\"\n left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n left_motor.run_forever(speed_sp=(speed * 8))\n right_motor.run_forever(speed_sp=(speed * 8))\n time.sleep(seconds)\n left_motor.stop()\n right_motor.stop()\n\n\ndef forward_by_time(inches, speed, stop_action):\n \"\"\"\n Makes the robot move forward the given number of inches at the given speed,\n where speed is between -100 (full speed backward) and 100 (full speed forward).\n Uses the algorithm:\n 0. Compute the number of seconds to move to achieve the desired distance.\n 1. Start moving.\n 2. Sleep for the computed number of seconds.\n 3. Stop moving.\n \"\"\"\n left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n time = (inches/speed)*4.5\n left_motor.run_timed(speed_sp=speed, time_sp=time)\n right_motor.run_timed(speed_sp=speed, time_sp=time)\n\ndef forward_by_encoders(inches, speed, stop_action):\n \"\"\"\n Makes the robot move forward the given number of inches at the given speed,\n where speed is between -100 (full speed backward) and 100 (full speed forward).\n Uses the algorithm:\n 1. Compute the number of degrees the wheels should spin to achieve the desired distance.\n 2. Move until the computed number of degrees is reached.\n \"\"\"\n left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n left_motor.run_to_rel_pos(position_sp=inches, speed_sp=speed)\n right_motor.run_to_rel_pos(position_sp=inches, speed_sp=speed)\n left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n right_motor.wait_while(ev3.Motor.STATE_RUNNING)\ndef backward_seconds(seconds, speed, stop_action):\n \"\"\" Calls forward_seconds with negative speeds to achieve backward motion. \"\"\"\n left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n left_motor.run_forever(speed_sp=-(speed * 8))\n right_motor.run_forever(speed_sp=-(speed * 8))\n time.sleep(seconds)\n left_motor.stop()\n right_motor.stop()\ndef backward_by_time(inches, speed, stop_action):\n \"\"\" Calls forward_by_time with negative speeds to achieve backward motion. \"\"\"\n left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n time = (inches/speed)*4.5\n left_motor.run_timed(speed_sp=-speed, time_sp=time)\n right_motor.run_timed(speed_sp=-speed, time_sp=time)\ndef backward_by_encoders(inches, speed, stop_action):\n \"\"\" Calls forward_by_encoders with negative speeds to achieve backward motion. \"\"\"\n left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n left_motor.run_to_rel_pos(position_sp=-inches, speed_sp=speed)\n right_motor.run_to_rel_pos(position_sp=-inches, speed_sp=speed)\n left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n\ntest_forward_backward()","sub_path":"sandbox_motors_new/person1_motors.py","file_name":"person1_motors.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649178491","text":"import maya.cmds as mc\n\ndrivers = ['pSphere1', 'pSphere2', 'pSphere3']\ndriven = 'pCube1'\n\ndriven_start = 2.0\ndriven_end = mc.getAttr('{}.ty'.format(driven))\n\nbw = mc.createNode('blendWeighted', n='test_BW')\n\ndriver_weight = (driven_end - driven_start) / len(drivers)\n\nfor index, driver in enumerate(drivers):\n driver_value = mc.getAttr('{}.ty'.format(driver))\n mc.setAttr('{}.weight[{}]'.format(bw, index), driver_weight / driver_value)\n mc.connectAttr('{}.ty'.format(driver), '{}.input[{}]'.format(bw, index))\n \nmc.setAttr('{}.weight[{}]'.format(bw, len(drivers)), driven_start)\nmc.setAttr('{}.input[{}]'.format(bw, len(drivers)), 1.0)\n\nmc.connectAttr('{}.output'.format(bw), '{}.ty'.format(driven))","sub_path":"maya/tools/poseDriver.py","file_name":"poseDriver.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"615313633","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n# Complete the timeInWords function below.\ndef timeInWords(h, m):\n nums = [ \"zero\", \"one\", \"two\", \"three\", \"four\", \n \"five\", \"six\", \"seven\", \"eight\", \"nine\", \n \"ten\", \"eleven\", \"twelve\", \"thirteen\", \n \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \n \"eighteen\", \"nineteen\", \"twenty\", \"twenty one\", \n \"twenty two\", \"twenty three\", \"twenty four\", \n \"twenty five\", \"twenty six\", \"twenty seven\", \n \"twenty eight\", \"twenty nine\"]\n if (m == 0):\n return nums[h]+\" o' clock\" \n \n elif (m == 1):\n return \"one minute past \"+nums[h] \n \n elif (m == 59):\n return \"one minute to \"+nums[(h % 12) + 1] \n \n elif (m == 15): \n return \"quarter past \"+ nums[h] \n \n elif (m == 30):\n return \"half past \"+nums[h] \n \n elif (m == 45):\n return \"quarter to \" + nums[(h % 12) + 1] \n \n elif (m <= 30):\n return nums[m]+\" minutes past \"+nums[h]\n \n elif (m > 30):\n return nums[60-m]+\" minutes to \"+nums[(h % 12) + 1]\n # d = { 0 : 'zero', 1 : 'one', 2 : 'two', 3 : 'three', 4 : 'four', 5 :'five',\n # 6 : 'six', 7 : 'seven', 8 : 'eight', 9 : 'nine', 10 : 'ten',\n # 11 : 'eleven', 12 : 'twelve', 13 : 'thirteen', 14 : 'fourteen',\n # 15 : 'fifteen', 16 : 'sixteen', 17 : 'seventeen', 18 : 'eighteen',\n # 19 : 'nineteen', 20 : 'twenty',\n # 30 : 'thirty', 40 : 'forty', 50 : 'fifty', 60 : 'sixty',\n # 70 : 'seventy', 80 : 'eighty', 90 : 'ninety' }\n # if m==15:\n # time = 'quarter past '+d[h]\n # return time\n # elif m==0:\n # time = d[h]+' o\\' clock'\n # return time\n # elif m==45:\n # time = 'quarter to '+d[h+1]\n # return time\n # elif m==30:\n # time = 'half past '+d[h+1]\n # elif 0=20 and m%10==0:\n # time = d[m]+' minutes past '+d[h]\n # return time\n # elif m>=20 and m%10!=0:\n # time = d[m]+' '+d[m%10]+' minutes past '+d[h]\n # return time\n # elif 3020:\n # time = d[temp]+' '+d[temp%10]+' minutes to '+d[h+1]\n # return time\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n h = int(input())\n\n m = int(input())\n\n result = timeInWords(h, m)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","sub_path":"Implementations-Python/the_time_in_words.py","file_name":"the_time_in_words.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"476272796","text":"from Exception.BaseException.SDKException import SDKException\n\nclass SDKUnsupportedRequestException(SDKException):\n\n\t_baseMessage = 'Unsupported request data'\n\n\tdef __init__(self, message = None):\n\t\tif isinstance(message, list):\n\t\t\tmessage = ', '.join(['`' + el + '`' for el in message])\n\n\t\tif message is None:\n\t\t\tmessage = self._baseMessage\n\t\telse:\n\t\t\tmessage = self._baseMessage + ' - invalid ' + message\n\n\t\tsuper().__init__(message)\n","sub_path":"leantesting/Exception/SDKUnsupportedRequestException.py","file_name":"SDKUnsupportedRequestException.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293179492","text":"from django.forms import ModelForm\nfrom dal import autocomplete\nfrom .models import ModelOne, ModelTwo, MasterModel\n\n\nclass Form1(ModelForm):\n class Meta:\n model = MasterModel\n fields = ('name', 'modelone')\n widgets = {\n 'modelone': autocomplete.ModelSelect2Multiple(\n url='modelone-autocomplete')\n }\n\n\nclass Form2(ModelForm):\n class Meta:\n model = MasterModel\n fields = ('modeltwo', )\n widgets = {\n 'modeltwo': autocomplete.ModelSelect2Multiple(\n url='modeltwo-autocomplete'),\n }\n","sub_path":"test_project/select2_outside_admin_multiple/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"414610588","text":"\"\"\"\n -----------------------------------------------------------------------------------------------------------\n Package: AequilibraE\n\n Name: Network preparation\n Purpose: Loads GUI for preparing networks (extracting nodes A and B from links)\n\n Original Author: Pedro Camargo (c@margo.co)\n Contributors:\n Last edited by: Pedro Camargo\n\n Website: www.AequilibraE.com\n Repository: https://github.com/AequilibraE/AequilibraE\n\n Created: 2014-03-19\n Updated: 21/12/2016\n Copyright: (c) AequilibraE authors\n Licence: See LICENSE.TXT\n -----------------------------------------------------------------------------------------------------------\n \"\"\"\n\nfrom qgis.core import *\nimport qgis\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom PyQt4 import uic\nfrom qgis.gui import QgsMapLayerProxyModel\n\nimport sys\nfrom ..common_tools.global_parameters import *\nfrom ..common_tools.auxiliary_functions import *\nfrom ..common_tools import ReportDialog\n\nfrom Network_preparation_procedure import NetworkPreparationProcedure\n\nsys.modules['qgsmaplayercombobox'] = qgis.gui\nsys.modules['qgsfieldcombobox'] = qgis.gui\nFORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_network_preparation.ui'))\n\n\nclass NetworkPreparationDialog(QDialog, FORM_CLASS):\n def __init__(self, iface):\n QDialog.__init__(self)\n self.iface = iface\n self.setupUi(self)\n\n self.filename = False\n self.new_layer = False\n self.radioUseNodes.clicked.connect(self.uses_nodes)\n self.radioNewNodes.clicked.connect(self.uses_nodes)\n\n self.cbb_node_layer.currentIndexChanged.connect(self.set_columns_nodes)\n\n self.pushOK.clicked.connect(self.run)\n self.pushClose.clicked.connect(self.exit_procedure)\n\n # We load the line and node layers existing in our canvas\n self.cbb_line_layer.setFilters(QgsMapLayerProxyModel.LineLayer)\n self.cbb_node_layer.setFilters(QgsMapLayerProxyModel.PointLayer)\n\n # loads default path from parameters\n self.path = standard_path()\n self.uses_nodes()\n self.set_columns_nodes()\n\n def run_thread(self):\n QObject.connect(self.worker_thread, SIGNAL(\"ProgressValue( PyQt_PyObject )\"), self.progress_value_from_thread)\n QObject.connect(self.worker_thread, SIGNAL(\"ProgressText( PyQt_PyObject )\"), self.progress_text_from_thread)\n QObject.connect(self.worker_thread, SIGNAL(\"ProgressMaxValue( PyQt_PyObject )\"), self.progress_range_from_thread)\n QObject.connect(self.worker_thread, SIGNAL(\"jobFinished( PyQt_PyObject )\"), self.job_finished_from_thread)\n self.worker_thread.start()\n self.show()\n\n def progress_range_from_thread(self, val):\n self.progressbar.setRange(0, val)\n\n def progress_value_from_thread(self, value):\n self.progressbar.setValue(value)\n\n def progress_text_from_thread(self, value):\n self.progress_label.setText(value)\n\n def set_columns_nodes(self):\n self.cbb_node_fields.clear()\n if self.cbb_node_layer.currentIndex() >= 0:\n layer = get_vector_layer_by_name(self.cbb_node_layer.currentText())\n self.cbb_node_fields.setLayer(layer)\n\n def uses_nodes(self):\n for_creating_nodes = [self.OutNodes, self.label_9, self.np_node_start, self.label_3]\n for_using_existing_nodes = [self.cbb_node_layer, self.cbb_node_fields, self.label_2, self.label_4]\n\n if self.radioUseNodes.isChecked():\n for i in for_creating_nodes:\n i.setVisible(False)\n for i in for_using_existing_nodes:\n i.setVisible(True)\n\n self.cbb_node_layer.clear()\n else:\n for i in for_creating_nodes:\n i.setVisible(True)\n for i in for_using_existing_nodes:\n i.setVisible(False)\n\n self.cbb_node_layer.clear()\n self.cbb_node_layer.hideEvent\n self.np_node_start.setEnabled(True)\n\n def job_finished_from_thread(self, success):\n\n if self.worker_thread.error is not None:\n qgis.utils.iface.messageBar().pushMessage(\"Node layer error: \", self.worker_thread.error, level=3)\n else:\n QgsMapLayerRegistry.instance().addMapLayer(self.worker_thread.new_line_layer)\n if self.worker_thread.new_node_layer:\n QgsMapLayerRegistry.instance().addMapLayer(self.worker_thread.new_node_layer)\n self.exit_procedure()\n if self.worker_thread.report:\n dlg2 = ReportDialog(self.iface, self.worker_thread.report)\n dlg2.show()\n dlg2.exec_()\n\n def run(self):\n if self.radioUseNodes.isChecked():\n self.pushOK.setEnabled(False)\n self.worker_thread = NetworkPreparationProcedure(qgis.utils.iface.mainWindow(), self.cbb_line_layer.currentText(),\n self.OutLinks.text(), self.cbb_node_layer.currentText(),\n self.cbb_node_fields.currentText())\n self.run_thread()\n\n else:\n self.pushOK.setEnabled(False)\n self.worker_thread = NetworkPreparationProcedure(qgis.utils.iface.mainWindow(), self.cbb_line_layer.currentText(),\n self.OutLinks.text(), new_node_layer=self.OutNodes.text(),\n node_start = int(self.np_node_start.text()))\n self.run_thread()\n\n def exit_procedure(self):\n self.close()\n\n","sub_path":"network/network_preparation_dialog.py","file_name":"network_preparation_dialog.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"173480964","text":"#!/usr/bin/env python\n\"\"\"GeoJSON service for HUC12 data\"\"\"\nimport json\nimport cgi\nimport datetime\n\nimport memcache\nfrom pyiem.util import get_dbconn, ssw\n\n\ndef do(huc12, mode):\n \"\"\"Do work\"\"\"\n pgconn = get_dbconn('idep')\n cursor = pgconn.cursor()\n utcnow = datetime.datetime.utcnow()\n if mode == 'daily':\n cursor.execute(\"\"\"\n SELECT valid, avg_loss * 4.463, avg_delivery * 4.463,\n qc_precip / 25.4, avg_runoff / 25.4, 1, 1, 1, 1\n from results_by_huc12 where huc_12 = %s and scenario = 0 ORDER\n by valid ASC\n \"\"\", (huc12, ))\n else:\n cursor.execute(\"\"\"\n SELECT extract(year from valid) as yr,\n sum(avg_loss) * 4.463,\n sum(avg_delivery) * 4.463,\n sum(qc_precip) / 25.4,\n sum(avg_runoff) / 25.4,\n sum(case when avg_loss > 0 then 1 else 0 end),\n sum(case when avg_delivery > 0 then 1 else 0 end),\n sum(case when qc_precip > 0 then 1 else 0 end),\n sum(case when avg_runoff > 0 then 1 else 0 end)\n from results_by_huc12 where huc_12 = %s and scenario = 0\n GROUP by yr ORDER by yr ASC\n \"\"\", (huc12, ))\n res = {'results': [],\n 'huc12': huc12,\n 'generation_time': utcnow.strftime(\"%Y-%m-%dT%H:%M:%SZ\")}\n for row in cursor:\n dt = row[0]\n if isinstance(row[0], float):\n dt = datetime.date(int(row[0]), 1, 1)\n res['results'].append(dict(date=dt.strftime(\"%m/%d/%Y\"),\n avg_loss=row[1],\n avg_loss_events=row[5],\n avg_delivery=row[2],\n avg_delivery_events=row[6],\n qc_precip=row[3],\n qc_precip_events=row[7],\n avg_runoff=row[4],\n avg_runoff_events=row[8]))\n return json.dumps(res)\n\n\ndef main():\n \"\"\"Do Fun things\"\"\"\n ssw(\"Content-Type: application/vnd.geo+json\\n\\n\")\n form = cgi.FieldStorage()\n cb = form.getfirst('callback', None)\n huc12 = form.getfirst('huc12', '000000000000')[:12]\n mode = form.getfirst('mode', 'daily')\n\n mckey = (\"/geojson/huc12_events/%s/%s\"\n ) % (huc12, mode)\n mc = memcache.Client(['iem-memcached:11211'], debug=0)\n res = mc.get(mckey)\n if not res:\n res = do(huc12, mode)\n mc.set(mckey, res, 15)\n\n if cb is None:\n ssw(res)\n else:\n ssw(\"%s(%s)\" % (cb, res))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"htdocs/geojson/huc12_events.py","file_name":"huc12_events.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"235223504","text":"# Jinwei Gu\r\n# 2016/7/18\r\n\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport math\r\nimport time\r\n\r\nimport keras \r\n\r\nfrom keras.callbacks import Callback \r\nfrom keras import backend as K\r\n\r\n#--------------------------------------------------------------------------------\r\ndef load_config_file(config_file):\r\n import yaml\r\n with open(config_file,'r') as f:\r\n cfg = yaml.load(f)\r\n return cfg\r\n\r\ndef get_logger_filename(prefix):\r\n a = time.localtime()\r\n filename = '%d-%02d-%02d-%02d:%02d:%02d.log'%(a.tm_year, a.tm_mon,\r\n a.tm_mday, a.tm_hour, a.tm_min, a.tm_sec)\r\n return prefix+'-'+filename\r\n\r\ndef init_logger(logfilename):\r\n import logging\r\n logger = logging.getLogger()\r\n\r\n logger.setLevel(logging.INFO)\r\n formatter = logging.Formatter('%(asctime)s - %(message)s')\r\n\r\n fh = logging.FileHandler(logfilename)\r\n fh.setLevel(logging.INFO)\r\n fh.setFormatter(formatter)\r\n logger.addHandler(fh)\r\n\r\n ch = logging.StreamHandler()\r\n ch.setLevel(logging.INFO)\r\n ch.setFormatter(formatter)\r\n logger.addHandler(ch)\r\n\r\n return logger\r\n\r\ndef print_cfg(cfg, logger=None):\r\n if logger:\r\n print = logger.info\r\n print('--------------- config ---------------')\r\n for x in cfg:\r\n print('%s : %s'%(x, cfg[x]))\r\n print('--------------- config ---------------')\r\n\r\n\r\n# convert the convolution weights from theano to tensorflow\r\n# (correlation, similar to caffe) for convolution layers\r\n#\r\n# copied code from \r\n#\r\n# https://github.com/fchollet/keras/wiki/Converting-convolution-kernels-from-Theano-to-TensorFlow-and-vice-versa\r\n#\r\n# Jinwei Gu. 2016/8/7\r\ndef theano2tensorflow(model, weightfile=None):\r\n from keras.utils.np_utils import convert_kernel\r\n import tensorflow as tf\r\n\r\n if weightfile:\r\n model.load_weights(weightfile)\r\n ops = []\r\n for layer in model.layers:\r\n if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:\r\n original_w = K.get_value(layer.W)\r\n converted_w = convert_kernel(original_w)\r\n ops.append(tf.assign(layer.W, converted_w).op) \r\n K.get_session().run(ops)\r\n return model\r\n\r\ndef tensorflow2theano(model, weightfile=None):\r\n from keras.utils.np_utils import convert_kernel\r\n\r\n if weightfile:\r\n model.load_weights(weightfile)\r\n for layer in model.layers:\r\n if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:\r\n original_w = K.get_value(layer.W)\r\n converted_w = convert_kernel(original_w)\r\n K.set_value(layer.W, converted_w)\r\n return model\r\n\r\ndef save_weights_to_pkl(net, pkl_filename):\r\n \"\"\"\r\n save net weight to pkl file. Currently only save weights for conv\r\n layer and dense layer.\r\n\r\n NOTE: for most applications, we should call net.save_weights() and\r\n save to *.h5 file directly. Often the file is much smaller\r\n \"\"\"\r\n dat={}\r\n for layer in net.layers:\r\n if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D', 'Dense']:\r\n param = [layer.W.get_value(), layer.b.get_value()]\r\n dat[layer.name] = param\r\n\r\n import cPickle\r\n with open(pkl_filename,'w') as f:\r\n cPickle.dump(dat, f)\r\n\r\ndef load_weights_from_pkl(net, pkl_filename):\r\n \"\"\"\r\n load weights from pkl file. Currently only support conv layer and\r\n dense layer.\r\n\r\n NOTE: for most applications, we should call net.load_weights() and\r\n load from *.h5 file directly.\r\n \"\"\"\r\n import cPickle\r\n with open(pkl_filename,'r') as f:\r\n dat = cPickle.load(f)\r\n\r\n for layer in net.layers:\r\n if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D', 'Dense']:\r\n if layer.name in dat.keys():\r\n layer.W.set_value(dat[layer.name][0])\r\n layer.b.set_value(dat[layer.name][1])\r\n return net\r\n\r\n\r\n\r\ndef weightfile_to_modelfile(net, weightfile, modelfile):\r\n \"\"\" convert weight file to a model file \"\"\"\r\n net.load_weights(weightfile)\r\n net.save(modelfile)\r\n\r\ndef modelfile_to_weightfile(modelfile, weightfile):\r\n \"\"\" convert a full model file to a weight only file \"\"\"\r\n net = keras.models.load_model(modelfile)\r\n net.save_weights(weightfile)\r\n\r\n\r\nclass MyLogger(keras.callbacks.Callback):\r\n def __init__(self, logfilename, display=1, logger=None):\r\n self.logfilename = logfilename\r\n self.display = display \r\n if logger:\r\n self.logger = logger\r\n else:\r\n self.logger = init_logger(self.logfilename)\r\n print (\"Logger========:\", str(self.logger))\r\n\r\n def on_train_begin(self, logs={}):\r\n #self.model.summary(logger=self.logger)\r\n self.best_val_acc = 1e-10\r\n #self.model.summary()\r\n \r\n def on_batch_end(self, batch, logs={}):\r\n if batch%self.display==0:\r\n lr = K.eval(self.model.optimizer.lr)#.get_value()\r\n self.logger.info('epoch %d batch %d lr %f acc %f'%(self.epoch, batch, lr, logs.get('acc')))\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n lr = K.eval(self.model.optimizer.lr)#.get_value()\r\n val_acc = logs.get('val_acc')\r\n if val_acc > self.best_val_acc:\r\n self.best_val_acc = val_acc\r\n #self.logger.info('epoch %d lr %f val_loss %f best_val_loss %f'%(epoch, lr, val_loss, self.best_val_loss))\r\n self.logger.info('epoch %d lr %f val_acc %f best_val_acc %f'%(epoch, lr, val_acc, self.best_val_acc))\r\n\r\n def on_epoch_begin(self, epoch, logs={}):\r\n self.seen = 0\r\n self.epoch = epoch\r\n\r\nclass StepLearningRateScheduler(Callback):\r\n \"\"\"\r\n learning rate is multiplied with lr_decay (e.g., 0.1) every lr_epoch\r\n (e.g., 10). \r\n \"\"\"\r\n def __init__(self, lr, lr_decay, lr_epoch):\r\n super(StepLearningRateScheduler, self).__init__()\r\n self.lr = lr\r\n self.lr_decay = lr_decay\r\n self.lr_epoch = lr_epoch\r\n\r\n def get_lrate(self, epoch):\r\n lrate = self.lr * math.pow(self.lr_decay, math.floor((1+epoch)/self.lr_epoch))\r\n return lrate\r\n\r\n def on_epoch_begin(self, epoch, logs={}):\r\n assert hasattr(self.model.optimizer, 'lr'), \\\r\n 'Optimizer must have a \"lr\" attribute.'\r\n lrate = self.get_lrate(epoch)\r\n K.set_value(self.model.optimizer.lr, lrate)\r\n \r\n\r\nclass VectorLearningRateScheduler(Callback):\r\n \"\"\"\r\n specify a vector for decay ratio and epoch. This is more flexible\r\n E.g., lr_decay = [0.1, 0.1, 0.5], lr_epoch=[5, 10, 20]\r\n means the lrate will multiple with 0.1 at the 5th epoch, and then\r\n multiple with 0.1 at the 10-th epoch, and then multiple with 0.5 at\r\n the 20th epoch.\r\n \"\"\"\r\n def __init__(self, lr, lr_decay, lr_epoch):\r\n super(VectorLearningRateScheduler, self).__init__()\r\n self.lr = lr\r\n self.lr_decay = lr_decay\r\n self.lr_epoch = lr_epoch\r\n\r\n self.lr_decay_cumprod = np.cumprod(np.array(lr_decay))\r\n\r\n def on_epoch_begin(self, epoch, logs={}):\r\n assert hasattr(self.model.optimizer, 'lr'), \\\r\n 'Optimizer must have a \"lr\" attribute.'\r\n\r\n k=[n for n,i in enumerate(self.lr_epoch) if i > epoch]\r\n if k==[]:\r\n lrate = self.lr * self.lr_decay_cumprod[-1]\r\n else:\r\n k=k[0]\r\n if k==0:\r\n lrate = self.lr\r\n else:\r\n lrate = self.lr * self.lr_decay_cumprod[k-1]\r\n\r\n K.set_value(self.model.optimizer.lr, lrate)\r\n\r\n\r\ndef my_fit_generator(net, train_data_generator, samples_per_epoch, nb_epoch,\r\n val_data_generator, nb_val_samples, model_filename, logger, lr_scheduler=None):\r\n '''\r\n Simple, single thread routine to do training. The keras\r\n fit_generator sometimes will crash, due to multi-threading. This is\r\n a simple alternative.\r\n\r\n Input:\r\n net -- net model\r\n train_data_generator -- generator to yield X,y minibatch for training\r\n nb_epoch -- how many epochs\r\n samples_per_epoch -- total number of training samples for each epoch\r\n val_data_generator -- generator to yield X,y minibatch for validation\r\n nb_val_samples -- number of validation samples\r\n model_filename -- filename to save the best model (the full model, including the weights, the model, and the optimizer states)\r\n logger -- logger\r\n lr_scheduler -- learning rate schedule function, default None\r\n '''\r\n\r\n best_val_acc = 1e-10\r\n epoch = 0 \r\n\r\n net.summary(logger=logger)\r\n\r\n while epoch < nb_epoch:\r\n # set learning rate if needed\r\n if lr_scheduler:\r\n lr = lr_scheduler.get_lrate(epoch)\r\n net.optimizer.lr.set_value(lr)\r\n else:\r\n lr = net.optimizer.lr.get_value()\r\n\r\n idx = 0\r\n i = 0\r\n while 1:\r\n X,y = next(train_data_generator)\r\n idx += y.shape[0] # batch_size\r\n i += 1\r\n loss = net.train_on_batch(X,y)\r\n if i%20 == 0:\r\n logger.info('epoch %d batch %d lr %f loss %f'%(epoch, i, lr, loss))\r\n\r\n if idx>=samples_per_epoch:\r\n break\r\n\r\n idx=0\r\n i=0\r\n val_loss=0\r\n while 1:\r\n X,y = next(val_data_generator)\r\n idx += y.shape[0] # batch_size\r\n i += 1\r\n val_loss += net.test_on_batch(X,y)\r\n if idx>=nb_val_samples:\r\n break\r\n val_loss/=i\r\n\r\n if val_loss<=best_val_loss:\r\n best_val_loss = val_loss\r\n net.save(model_filename)\r\n\r\n logger.info('epoch %d lr %f val_loss %f best_val_loss %f'%(epoch, lr, val_loss, best_val_loss))\r\n epoch += 1\r\n\r\n\r\ndef my_evaluate_generator(net, val_data_generator, val_samples):\r\n '''\r\n Simple, single thread routine to do evaluation. The keras\r\n evaluate_generator sometimes will crash, due to multi-threading. This is\r\n a simple alternative.\r\n\r\n Input:\r\n net -- net model\r\n val_data_generator -- generator to yield X,y minibatch for validation\r\n nb_val_samples -- number of validation samples\r\n '''\r\n \r\n idx=0\r\n i=0\r\n val_loss=0\r\n while 1:\r\n X,y = next(val_data_generator)\r\n idx += y.shape[0]\r\n i += 1\r\n val_loss += net.test_on_batch(X,y)\r\n if idx>=val_samples:\r\n break\r\n #print('%d %d'%(idx,val_samples))\r\n\r\n val_loss/=i\r\n return val_loss\r\n\r\n\r\nfrom multiprocessing import Process, Queue\r\n\r\ndef my_fit_generator_with_prefetch(net, train_data_generator, samples_per_epoch, nb_epoch, \r\n val_data_generator, nb_val_samples, model_filename, logger, lr_scheduler=None):\r\n '''\r\n Two threads routine to do training. The keras\r\n fit_generator sometimes will crash, due to multi-threading. This is\r\n a simple alternative. One for prefetching data\r\n\r\n Input:\r\n net -- net model\r\n train_data_generator -- generator to yield X,y minibatch for training\r\n nb_epoch -- how many epochs\r\n samples_per_epoch -- total number of training samples for each epoch\r\n val_data_generator -- generator to yield X,y minibatch for validation\r\n nb_val_samples -- number of validation samples\r\n model_filename -- filename to save the best model (the full model, including the weights, the model, and the optimizer states)\r\n logger -- logger\r\n lr_scheduler -- learning rate schedule function, default None\r\n '''\r\n\r\n train_blob_queue = Queue(10)\r\n train_prefetch_process = BlobFetcher(train_blob_queue, train_data_generator, logger)\r\n train_prefetch_process.start()\r\n\r\n val_blob_queue = Queue(10)\r\n val_prefetch_process = BlobFetcher(val_blob_queue, val_data_generator, logger)\r\n val_prefetch_process.start()\r\n\r\n # Terminate the child process when the parent exists\r\n def cleanup():\r\n logger.info('Terminating BlobFetcher')\r\n train_prefetch_process.terminate()\r\n val_prefetch_process.terminate()\r\n train_prefetch_process.join()\r\n val_prefetch_process.join()\r\n import atexit\r\n atexit.register(cleanup)\r\n\r\n best_val_loss = 1e+10\r\n epoch = 0 \r\n\r\n net.summary(logger=logger)\r\n\r\n while epoch < nb_epoch:\r\n # set learning rate if needed\r\n if lr_scheduler:\r\n lr = lr_scheduler.get_lrate(epoch)\r\n net.optimizer.lr.set_value(lr)\r\n else:\r\n lr = net.optimizer.lr.get_value()\r\n\r\n idx = 0\r\n i = 0\r\n while 1:\r\n X,y = train_blob_queue.get() \r\n idx += y.shape[0] # batch_size\r\n i += 1\r\n loss = net.train_on_batch(X,y)\r\n if i%20 == 0:\r\n logger.info('epoch %d batch %d lr %f loss %f'%(epoch, i, lr, loss))\r\n\r\n if idx>=samples_per_epoch:\r\n break\r\n\r\n idx=0\r\n i=0\r\n val_loss=0\r\n while 1:\r\n X,y = val_blob_queue.get()\r\n idx += y.shape[0] # batch_size\r\n i += 1\r\n val_loss += net.test_on_batch(X,y)\r\n if idx>=nb_val_samples:\r\n break\r\n val_loss/=i\r\n\r\n if val_loss<=best_val_loss:\r\n best_val_loss = val_loss\r\n net.save(model_filename)\r\n\r\n logger.info('epoch %d lr %f val_loss %f best_val_loss %f'%(epoch, lr, val_loss, best_val_loss))\r\n epoch += 1\r\n\r\n\r\ndef my_evaluate_generator_with_prefetch(net, val_data_generator, val_samples, logger):\r\n '''\r\n Two threads routine to do evaluation. The keras\r\n evaluate_generator sometimes will crash, due to multi-threading. This is\r\n a simple alternative.\r\n\r\n Input:\r\n net -- net model\r\n val_data_generator -- generator to yield X,y minibatch for validation\r\n val_samples -- number of validation samples\r\n '''\r\n \r\n val_blob_queue = Queue(10)\r\n val_prefetch_process = BlobFetcher(val_blob_queue, val_data_generator, logger)\r\n val_prefetch_process.start()\r\n\r\n # Terminate the child process when the parent exists\r\n def cleanup():\r\n logger.info('Terminating BlobFetcher')\r\n val_prefetch_process.terminate()\r\n val_prefetch_process.join()\r\n import atexit\r\n atexit.register(cleanup)\r\n\r\n \r\n idx=0\r\n i=0\r\n val_loss=0\r\n while 1:\r\n X,y = val_blob_queue.get()\r\n idx += y.shape[0]\r\n i += 1\r\n val_loss += net.test_on_batch(X,y)\r\n if idx>=val_samples:\r\n break\r\n val_loss/=i\r\n return val_loss\r\n\r\n\r\n \r\nclass BlobFetcher(Process):\r\n def __init__(self,queue,generator,logger):\r\n super(BlobFetcher, self).__init__()\r\n self._queue = queue\r\n self._generator = generator\r\n self._logger=logger\r\n \r\n def run(self):\r\n self._logger.info('BlobFetcher started')\r\n while True:\r\n X,y=next(self._generator)\r\n self._queue.put((X,y))\r\n","sub_path":"NN_train/keras_utils.py","file_name":"keras_utils.py","file_ext":"py","file_size_in_byte":14868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482768133","text":"from tkinter import *\nfrom functions import *\nimport math\n\n\nclass background:\n def __init__(self, parent):\n background_frame = Frame(parent, bd=0, bg=\"red\", height=600, width=800)\n background_frame.pack()\n self.background_canvas = Canvas(background_frame, bg=\"black\", height=550, width=550, bd=0)\n self.background_canvas.pack()\n self.current_page = standybyPage\n\n self.OBS_dict = {}\n self.border_offset = 10\n self.line_length = 25\n\n self.midpoint = (self.background_canvas.winfo_reqwidth() / 2, self.background_canvas.winfo_reqheight() / 2)\n\n self.screen_canvas_height = self.background_canvas.winfo_reqheight() - 2 * (\n 2 * self.border_offset + self.line_length)\n self.screen_canvas_width = self.background_canvas.winfo_reqwidth() - 2 * (\n 2 * self.border_offset + self.line_length)\n\n def create_button_guide(self, button_number, inp_text):\n screen = self.background_canvas\n y_guide_spacing = screen.winfo_reqwidth() / 6\n x_guide_spacing = screen.winfo_reqheight() / 6\n bar_loc = get_units(button_number)\n y_bar_loc = get_units(button_number - 5)\n\n line_length = self.line_length\n x_line_offset = 0\n y_line_offset = screen.winfo_reqwidth() - (2 * self.border_offset + line_length)\n\n if get_tens(button_number) != 0:\n x_line_offset = screen.winfo_reqheight() - (2 * self.border_offset + line_length)\n y_line_offset = 0\n\n if button_number in (1, 2, 3, 4, 5, 11, 12, 13, 14, 15):\n x_pos0 = ((bar_loc * x_guide_spacing) - x_guide_spacing / 2)\n x_pos1 = x_pos0\n y_pos0 = self.border_offset + x_line_offset\n y_pos1 = y_pos0 + line_length\n\n line1 = screen.create_line(x_pos0, y_pos0, x_pos1, y_pos1, fill=\"white\", width=4)\n line2 = screen.create_line(x_pos0 + x_guide_spacing, y_pos0, x_pos1 + x_guide_spacing, y_pos1, fill=\"white\",\n width=4)\n\n midpoint = ((x_pos0 + x_guide_spacing / 2), (y_pos0 + y_pos1) / 2)\n text = screen.create_text(midpoint, text=inp_text, fill=\"white\")\n else:\n x_pos0 = self.border_offset + y_line_offset\n x_pos1 = x_pos0 + line_length\n y_pos0 = (((y_bar_loc) * y_guide_spacing) - y_guide_spacing / 2)\n y_pos1 = y_pos0\n\n line1 = screen.create_line(x_pos0, y_pos0, x_pos1, y_pos1, fill=\"white\", width=4)\n line2 = screen.create_line(x_pos0, y_pos0 + y_guide_spacing, x_pos1, y_pos1 + y_guide_spacing, fill=\"white\",\n width=4)\n\n midpoint = ((x_pos0 + x_pos1) / 2, (y_pos0 + y_guide_spacing / 2))\n text = screen.create_text(midpoint, text=inp_text, fill=\"white\")\n\n self.OBS_dict[button_number] = (text, line1, line2)\n\n return (text, line1, line2)\n\n def clear_canvas(self):\n\n for x in self.OBS_dict.items():\n self.background_canvas.delete(x)\n\n def change_page(self, window_class):\n\n self.current_page = window_class(self)\n\n\nclass mfdPage():\n def __init__(self, master, button_dict, **kwargs):\n master.clear_canvas()\n self.active_buttons = {}\n self.updatable_widgets = {}\n self.updatable_widget_ids = {}\n\n for x, y in button_dict.items(): # Create all buttons on init\n self.active_buttons[x] = y\n master.create_button_guide(x, y)\n\n self.screen = Canvas(bg=\"black\", bd=0, height=master.screen_canvas_height, width=master.screen_canvas_width,\n highlightthickness=0)\n self.screen_id = master.background_canvas.create_window(master.midpoint, window=self.screen)\n\n self.midpoint = (self.screen.winfo_reqwidth() / 2, self.screen.winfo_reqheight() / 2)\n\n def relx(self, ratio):\n relx = self.midpoint[0] + (self.screen.winfo_reqwidth() * (ratio - 0.5))\n\n return relx\n\n def rely(self, ratio):\n rely = self.midpoint[1] + (self.screen.winfo_reqheight() * (ratio - 0.5))\n\n return rely\n\n def register_updatable_widget(self, widget, *args):\n id = len(self.updatable_widgets)\n self.updatable_widgets[id] = args\n self.updatable_widget_ids[id] = widget\n\n\n#\n# PAGES BELOW THIS LINE\n#\n\nclass standybyPage(mfdPage):\n def __init__(self, master):\n mfdPage.__init__(self, master, {})\n\n\nclass elevPage(mfdPage):\n def __init__(self, master):\n active_buttons = {11: \"MAP\", 12: \"HSI\", 13: \"DCRS\", 15: \"RWI\", 16:\"RNG\",17:\"TAC\",18:\"ILS\",1:\"WPT\",20:\"FLT\"}\n mfdPage.__init__(self, master, active_buttons)\n attitudeIndicator(self, (self.relx(0.55), self.rely(0.55)), ('pitch', 'roll', 'hdg'))\n updateableTextbox(self, (self.relx(0.85), self.rely(0.06)),('BRG'),height = 10)\n updateableTextbox(self, (self.relx(0.85),self.rely(0.1)),('NM'),height = 10)\n updateableTextbox(self, (self.relx(0.85),self.rely(0.14)),('MIN'),height = 10)\n updateableTextbox(self, (self.relx(0.5),self.rely(0.06)),('hdg'),height = 10)\n updateableTextbox(self, (self.relx(0.2),self.rely(0.06)),('CRS'),height = 10)\n updateableTextbox(self, (self.relx(0.2),self.rely(0.1)),('GS'),height = 10)\n\n self.screen.create_text((self.relx(0.95), self.rely(0.06)), text=\"BRG\", fill=\"green\")\n self.screen.create_text((self.relx(0.95), self.rely(0.1)), text=\"NM\", fill=\"green\")\n self.screen.create_text((self.relx(0.95), self.rely(0.14)), text=\"MIN\", fill=\"green\")\n self.screen.create_text((self.relx(0.4), self.rely(0.1)), text=\"CRS\", fill=\"green\")\n self.screen.create_text((self.relx(0.7), self.rely(0.1)), text=\"GS\", fill=\"green\")\n\nclass enginePage(mfdPage):\n def __init__(self, master):\n active_buttons = {11: \"MAP\", 12: \"HSI\", 13: \"ADI\", 14: \"\", 16: \"DISP\"}\n mfdPage.__init__(self, master, active_buttons)\n rotaryDial(self, (self.relx(0.25), self.rely(0.75)), \"engine_rpm_percentage\", unit_text=\"% RPM\", )\n rotaryDial(self, (self.relx(0.75), self.rely(0.75)), 'egt', unit_text=\"°C TGT\")\n updateableTextbox(self, (self.relx(0.4), self.rely(0.15)), 'port_fuel', boxed=True)\n updateableTextbox(self, (self.relx(0.7), self.rely(0.15)), 'stbd_fuel', boxed=True)\n updateableTextbox(self, (self.relx(0.55), self.rely(0.32)), 'total_fuel', boxed=True)\n updateableTextbox(self, (self.relx(0.8), self.rely(0.35)), 'fuel_flow', boxed=True)\n self.screen.create_line(self.relx(0), self.rely(0.5), self.relx(1), self.rely(0.5), fill=\"white\", width=3)\n self.screen.create_text((self.relx(0.55), self.rely(0.37)), text=\"TOTAL\", fill=\"green\")\n self.screen.create_text((self.relx(0.7), self.rely(0.35)), text=\"F/F\", fill=\"green\")\n self.screen.create_text((self.relx(0.93), self.rely(0.35)), text=\"LBS/MIN\", fill=\"green\")\n self.screen.create_text((self.relx(0.4), self.rely(0.2)), text=\"PORT\", fill=\"green\")\n self.screen.create_text((self.relx(0.7), self.rely(0.2)), text=\"STBD\", fill=\"green\")\n self.screen.create_text((self.relx(0.55), self.rely(0.08)), text=\"FUEL CONTENTS LBS\", fill=\"green\")\n\n\nclass storesPage(mfdPage):\n def __init__(self, master):\n active_buttons = {1: \"AIM\", 2: \"BFF\", 3: \"GUN\"}\n mfdPage.__init__(self, master, active_buttons)\n\n\n#\n# CUSTOM WIDGETS BELOW THIS LINE\n#\n\nclass rotaryDial():\n def __init__(self, master, coord, import_vars, **kwargs):\n\n ratio = 0\n # Default values\n self.params = {}\n self.params[\"width\"] = 180\n self.params[\"height\"] = 180\n self.params[\"bar_color\"] = \"light green\"\n self.params[\"outline_color\"] = \"light grey\"\n self.params[\"redline\"] = -1\n self.params[\"value_text\"] = ratio * 100\n self.params[\"import_vars\"] = import_vars\n self.params[\"unit_text\"] = \"%\"\n self.params[\"bar_width\"] = 10\n self.params[\"outline_width\"] = 1.5\n value_extent = -270 * ratio\n\n # Custom values\n if kwargs is not None:\n for arg, val in kwargs.items():\n self.params[arg] = val\n\n # Create Subcanvas and give it a midpoint\n sub_canvas = Canvas(bg=\"black\", width=self.params[\"width\"], height=self.params[\"height\"], highlightthickness=0)\n master.screen.create_window(coord, window=sub_canvas, anchor=\"center\")\n midpoint = (sub_canvas.winfo_reqwidth() / 2, sub_canvas.winfo_reqheight() / 2)\n\n self.sub_canvas = sub_canvas\n\n # Draw dial\n coord = self.params[\"bar_width\"], self.params[\"bar_width\"], sub_canvas.winfo_reqwidth() - self.params[\n \"bar_width\"], sub_canvas.winfo_reqheight() - self.params[\"bar_width\"]\n sub_canvas.create_arc(coord, start=-133, extent=-274, outline=self.params[\"outline_color\"], style=\"arc\",\n width=self.params[\"bar_width\"] + 2 * self.params[\"outline_width\"])\n sub_canvas.create_arc(coord, start=-135, extent=-270, outline=\"black\", style=\"arc\",\n width=self.params[\"bar_width\"] - 1)\n self.value_dial_id = sub_canvas.create_arc(coord, start=-135, extent=value_extent,\n outline=self.params[\"bar_color\"], style=\"arc\",\n width=self.params[\"bar_width\"])\n\n # Create text items\n self.value_text_id = sub_canvas.create_text((midpoint[0], midpoint[1] - 20), text=self.params[\"value_text\"],\n fill=\"green\")\n sub_canvas.create_text((midpoint[0], midpoint[1] + 15), text=self.params[\"unit_text\"], fill=\"green\")\n\n master.register_updatable_widget(self, self.params[\"import_vars\"])\n\n def update(self, vars, **kwargs):\n percentage = float(vars.get(self.params[\"import_vars\"]))\n myExtent = self.get_extent(percentage)\n self.params[\"value_text\"] = percentage\n self.sub_canvas.itemconfig(self.value_dial_id, extent=myExtent)\n self.sub_canvas.itemconfig(self.value_text_id, text=int(percentage * 100))\n\n def get_extent(self, percentage):\n return -270 * percentage\n\n\nclass updateableTextbox():\n def __init__(self, master, coord, update_variable, **kwargs):\n\n self.params = {}\n self.params[\"width\"] = 50\n self.params[\"height\"] = 20\n self.params[\"color\"] = \"white\"\n self.params[\"disp_text\"] = 'NaN'\n self.params[\"update_variable\"] = update_variable\n self.params[\"boxed\"] = False\n self.params[\"justify\"] = \"right\"\n self.params[\"font\"] = \"arial 10\"\n\n # Custom values\n if kwargs is not None:\n for arg, val in kwargs.items():\n self.params[arg] = val\n\n if self.params[\"boxed\"]:\n highThick = 2\n else:\n highThick = 0\n\n self.sub_canvas = Canvas(bg=\"black\", width=self.params[\"width\"], height=self.params[\"height\"],\n highlightthickness=highThick)\n master.screen.create_window(coord, window=self.sub_canvas, anchor=\"center\")\n midpoint = (self.sub_canvas.winfo_reqwidth() / 2, self.sub_canvas.winfo_reqheight() / 2)\n\n self.text_id = self.sub_canvas.create_text((midpoint[0], midpoint[1]), text=self.params[\"disp_text\"],\n fill=self.params[\"color\"], justify=self.params[\"justify\"] ,font=self.params[\"font\"])\n\n master.register_updatable_widget(self, self.params[\"update_variable\"])\n\n def update(self, vars, **kwargs):\n self.params[\"disp_text\"] = vars.get(self.params[\"update_variable\"])\n self.sub_canvas.itemconfig(self.text_id, text=self.params[\"disp_text\"])\n self.box_update()\n\n def box_update(self):\n update_decider = self.params[\"boxed\"]\n if update_decider:\n self.sub_canvas.highlightthickness = 0\n else:\n self.sub_canvas.highlightthickness = 2\n\n\nclass attitudeIndicator():\n def __init__(self, master, coord, update_variable, **kwargs):\n\n self.params = {}\n self.params[\"width\"] = master.relx(0.9)\n self.params[\"height\"] = master.rely(0.9)\n self.params[\"color\"] = \"white\"\n self.params[\"disp_text\"] = 'NaN'\n self.params[\"update_variable\"] = update_variable\n self.params[\"angleLines\"] = {}\n self.params[\"angleText\"] = {}\n self.params[\"headingLines\"] = {}\n self.params[\"hdg_text\"] = {}\n self.params[\"pitch_range\"] = 45\n self.params[\"hdg_range\"] = 35\n\n # Custom values\n if kwargs is not None:\n for arg, val in kwargs.items():\n self.params[arg] = val\n\n self.sub_canvas = Canvas(bg=\"black\", width=self.params[\"width\"], height=self.params[\"height\"],\n highlightthickness=0)\n master.screen.create_window(coord, window=self.sub_canvas, anchor=\"center\")\n midpoint = (self.sub_canvas.winfo_reqwidth() / 2, self.sub_canvas.winfo_reqheight() / 2)\n self.midpoint = midpoint\n\n self.radii = ((self.params[\"height\"] - 150) / 2)\n\n bounding_box = (\n midpoint[0] - self.radii, midpoint[1] - self.radii,\n midpoint[0] + self.radii, midpoint[1] + self.radii)\n\n\n\n self.sub_canvas.create_oval(bounding_box, fill=\"#F2BF18\", outline=\"white\", width=5)\n self.sky = self.sub_canvas.create_arc(bounding_box, fill=\"#1873F2\", outline=\"white\", style=CHORD, width=3)\n\n self.create_pitch_lines()\n self.create_heading_lines()\n\n self.sub_canvas.create_rectangle((midpoint[0] - 5, midpoint[1] - 5, midpoint[0] + 5, midpoint[1] + 5),\n outline=\"black\")\n self.sub_canvas.create_line(midpoint[0] - 100, midpoint[1], midpoint[0] - 30, midpoint[1], midpoint[0] - 30,\n midpoint[1] + 30, fill=\"black\")\n self.sub_canvas.create_line(midpoint[0] + 100, midpoint[1], midpoint[0] + 30, midpoint[1], midpoint[0] + 30,\n midpoint[1] + 30, fill=\"black\")\n\n for each in self.params[\"update_variable\"]:\n master.register_updatable_widget(self, self.params[\"update_variable\"])\n\n def update(self, vars, **kwargs):\n sky_vals = self.get_chord_start(vars[\"pitch\"], vars[\"roll\"])\n self.sub_canvas.itemconfig(self.sky, start=sky_vals[0], extent=sky_vals[1])\n self.update_angle_lines(vars[\"pitch\"], vars[\"roll\"])\n self.update_heading_lines(vars[\"hdg\"])\n\n def get_chord_start(self, pitch, roll):\n pitch_range = self.params[\"pitch_range\"]\n roll += 180 # Flip sky\n\n if abs(pitch) < pitch_range:\n roll_midline = roll - 90\n start = degrees(acos(-pitch / pitch_range)) + roll_midline\n extent = -2 * (start - roll_midline)\n return (start, extent)\n elif pitch >= pitch_range:\n return (0, -359.99)\n elif pitch < pitch_range:\n return (0, 0)\n\n def get_horizon_midpoint(self, pitch, roll):\n mid_delta = pol2cart(self.radii * (pitch / self.params[\"pitch_range\"]), 180 - roll)\n mid = (self.midpoint[0] + mid_delta[0], self.midpoint[1] + mid_delta[1])\n return mid\n\n def create_pitch_lines(self):\n for each in range(-90, 90):\n if each % 10 == 0:\n self.params[\"angleLines\"][each] = self.sub_canvas.create_line((0, 0, 0, 0),\n width=2, fill=\"white\")\n elif each % 5 == 0:\n self.params[\"angleLines\"][each] = self.sub_canvas.create_line((0, 0, 0, 0),\n width=2, fill=\"white\")\n else:\n pass\n\n def update_angle_lines(self, pitch, roll):\n # Refresh or create the pitch and roll angle lines\n angleLines = self.params[\"angleLines\"]\n horiz = self.get_horizon_midpoint(pitch, roll + 90)\n for lineIncrement, id in angleLines.items():\n if self.params[\"pitch_range\"] * 0.95 - pitch > lineIncrement > -self.params[\"pitch_range\"] * 0.95 - pitch: # If the line can be displayed on screen\n\n self.sub_canvas.itemconfig(id, fill=\"white\")\n\n if lineIncrement % 10 == 0:\n pitch_line_length = 50\n else:\n pitch_line_length = 20\n\n line_offset = pol2cart(self.radii * (lineIncrement / self.params[\"pitch_range\"]), 90 - roll)\n\n midx = line_offset[0] + horiz[0]\n midy = line_offset[1] + horiz[1]\n\n self.sub_canvas.coords(id, line_coords(pitch_line_length, (midx, midy), -roll))\n # print(\"Adjusting the %s pitch line\"%lineIncrement)\n pass\n else:\n # \"Switch off\" the bar\n self.sub_canvas.itemconfigure(id, fill=\"\")\n pass\n return\n\n def create_heading_lines(self):\n for each in range(0, 360):\n if each % 30 == 0:\n self.params[\"headingLines\"][each] = self.sub_canvas.create_line((0, 0, 0, 0),\n width=3, fill=\"white\")\n\n self.params[\"hdg_text\"][each] = self.sub_canvas.create_text((0, 0), text=str(each),fill=\"white\",font=(\"Helvetica\", 10,\"bold\"))\n elif each % 5 == 0:\n self.params[\"headingLines\"][each] = self.sub_canvas.create_line((0, 0, 0, 0),\n width=2, fill=\"white\")\n else:\n pass\n\n def update_heading_lines(self, hdg):\n heading_lines = self.params[\"headingLines\"]\n\n for headingVal, id in heading_lines.items():\n if (hdg + self.params[\"hdg_range\"] > headingVal > hdg - self.params[\"hdg_range\"]):\n self.sub_canvas.itemconfig(id, fill=\"light green\")\n\n hdg_angle = headingVal - hdg\n\n if headingVal % 30 == 0: # If the heading line is a 30 degree division, update the text as well\n line_length = 25\n text_id = self.params[\"hdg_text\"][headingVal]\n text_offset = pol2cart(self.radii + 22, (hdg_angle * 2) - 90)\n self.sub_canvas.itemconfigure(text_id, fill=\"light green\")\n self.sub_canvas.coords(text_id,(self.midpoint[0]+text_offset[0],self.midpoint[1]+text_offset[1]))\n elif headingVal % 10 == 0:\n line_length = 17\n else:\n line_length = 12\n\n\n self.sub_canvas.coords(id, radius_line_coords(self.midpoint,self.radii+60,(hdg_angle*2)-90,line_length))\n\n else:\n self.sub_canvas.itemconfigure(id, fill=\"\")\n try:\n self.sub_canvas.itemconfigure(self.params[\"hdg_text\"][headingVal],fill=\"\")\n pass\n except:\n pass\n pass","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":19185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"379977034","text":"\"\"\"empty message\n\nRevision ID: 5f00836ba009\nRevises: 2719f8dfddc8\nCreate Date: 2021-05-03 21:19:28.728259\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"5f00836ba009\"\ndown_revision = \"2719f8dfddc8\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"student_course_record\",\n sa.Column(\"student_id\", sa.Integer(), nullable=False),\n sa.Column(\"course_id\", sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"course_id\"],\n [\"course.id\"],\n ),\n sa.ForeignKeyConstraint(\n [\"student_id\"],\n [\"student.id\"],\n ),\n sa.PrimaryKeyConstraint(\"student_id\", \"course_id\"),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"student_course_record\")\n # ### end Alembic commands ###\n","sub_path":"app/migrations/versions/5f00836ba009_.py","file_name":"5f00836ba009_.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409734538","text":"from LoadAddresses import load_contacts, get_address_list\nfrom GeoLocationModule import load_geo_location_data, save_geo_location_data, parse_geo_location_data, address_in_geo_data, retrieve_geo_location_data\nfrom json import load\n\nwith open(\"c:\\\\src\\\\PVData\\\\pv-geo-location-config.json\") as config_file:\n config_json = load(config_file)\n\ncity_state = config_json[\"configuration\"][\"city_state\"]\napi_key = config_json[\"configuration\"][\"api_key\"]\n\nall_geo_data = load_geo_location_data(\"c:\\\\src\\\\PVData\\\\ContactGeoData.csv\")\n\nall_contacts = load_contacts(\"c:\\\\src\\\\PVData\\\\ContactList.csv\")\nall_addresses = get_address_list(all_contacts)\n\nnew_address_count = 0\nfor address in all_addresses:\n if address_in_geo_data(address, all_geo_data):\n print(f\"Existing address: {address}\")\n else:\n print(f\"New address: {address}\")\n new_address_count += 1\n json_response = retrieve_geo_location_data(address + \", \" + city_state, api_key)\n all_geo_data.append(parse_geo_location_data(address, json_response))\n\n # Safety check to make sure I don't try to look up too many new addresses\n if new_address_count > 2:\n break;\n\nprint(f\"Found {new_address_count} new addresses\")\n\nsave_geo_location_data(all_geo_data, \"c:\\\\src\\\\PVData\\\\ContactGeoDataNew.csv\")\n\n","sub_path":"GetMapCoordinates/GetMapCoordinates.py","file_name":"GetMapCoordinates.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"535073719","text":"import keras\nfrom keras.models import Sequential, load_model, Model\nfrom keras.layers import Input, Add, Multiply, Dense, MaxPooling3D, BatchNormalization, Reshape\nfrom keras.layers.convolutional import Conv1D, Conv2D, Conv3D, Convolution2D\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers.convolutional import ZeroPadding3D, ZeroPadding2D, ZeroPadding1D, UpSampling2D, Cropping2D\nfrom keras.layers.core import Dropout, Flatten\nfrom keras.layers import LeakyReLU, MaxPooling2D, MaxPooling3D,concatenate, Conv2DTranspose, Concatenate\nfrom keras.activations import relu\nfrom keras.callbacks import History, ModelCheckpoint, TensorBoard\nfrom keras import regularizers\nfrom keras.optimizers import Adadelta, RMSprop,SGD,Adam\nfrom sklearn.utils import shuffle\nimport numpy as np\n#from predict import save_image\n# from custom_loss import *\n# from models.neurotech_models import *\nfrom math import sqrt\nfrom helpers import *\nimport json\n\n\ndef encoder(input_img,k):\n\n conv1 = Conv2D(32, (k, k), activation='relu', padding='same')(input_img)\n # conv1 = BatchNormalization()(conv1)\n conv1 = Conv2D(64, (k, k), activation='relu', padding='same')(conv1) # 28 x 28 x 32\n # conv1 = BatchNormalization()(conv1)\n conv2 = Conv2D(128, (k, k), activation='relu', padding='same')(conv1) # 14 x 14 x 64\n # conv2 = BatchNormalization()(conv2)\n conv3 = Conv2D(256, (k, k), activation='relu', padding='same')(conv2) # 7 x 7 x 128 (small and thick)\n # conv3 = BatchNormalization()(conv3)\n # conv3 = Conv2D(256, (k, k), activation='relu', padding='same')(conv3)\n # conv3 = BatchNormalization()(conv3)\n\n return conv3\n\ndef decoder(conv3,k):\n # decoder\n conv4 = Conv2D(128, (k, k), activation='relu', padding='same')(conv3) # 7 x 7 x 128\n # conv4 = BatchNormalization()(conv4)\n conv4 = Conv2D(64, (k, k), activation='relu', padding='same')(conv4)\n # conv4 = BatchNormalization()(conv4)\n conv5 = Conv2D(32, (k, k), activation='relu', padding='same')(conv4) # 14 x 14 x 64\n # conv5 = BatchNormalization()(convk)\n conv5 = Conv2D(16, (k, k), activation='relu', padding='same')(conv5)\n # conv5 = BatchNormalization()(conv5)\n\n decoded = Conv2D(1, (k, k), activation='linear', padding='same')(conv5) # 28 x 28 x 1\n\n return decoded\n\ndef fc(encoded):\n drop1 = Dropout(0.2)(encoded)\n flat = Flatten()(drop1)\n dense1 = Dense(128, activation='relu')(flat)\n drop2 = Dropout(0.2)(dense1)\n dense2 = Dense(64, activation='relu')(drop2)\n drop3 = Dropout(0.2)(dense2)\n out = Dense(2, activation='softmax')(drop3)\n\n return out\n\ndef cnn_binary_classifier(image_dim,verbose=1):\n\n if True:\n autoencoder = load_model('/home/spinney/scripts/python/MRI_Deep_Learning/processed/model/ac_NVY2.hdf5')\n\n input_img = Input(shape=(image_dim[0], image_dim[1], image_dim[2], 1))\n k = 3\n encoded = encoder(input_img,k)\n out = fc(encoded)\n\n classifier = Model(input_img, out)\n\n for l1, l2 in zip(classifier.layers[:5], autoencoder.layers[:5]):\n l1.set_weights(l2.get_weights())\n\n for layer in classifier.layers[:5]:\n layer.trainable = False\n\n\n if verbose > 0:\n print(classifier.summary())\n\n return classifier\n\n\ndef cnn_3D_classifier(image_dim,num_classes,verbose=1):\n\n model = Sequential()\n model.add(\n Conv3D(16, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(image_dim[0],image_dim[1],image_dim[2],1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2),strides=(2,2,2)))\n model.add(Conv3D(16, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform'))\n model.add(MaxPooling3D(pool_size=(2, 2, 2),strides=(2,2,2)))\n model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform'))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))\n model.add(Flatten())\n model.add(Dense(256, activation='relu', kernel_initializer='he_uniform'))\n model.add(Dense(num_classes, activation='softmax'))\n return model\n\ndef cnn_autoencoder(image_dim,verbose=1):\n\n padSize = int(image_dim[0] % 4 / 2)\n\n input_img = Input(shape=(image_dim[0], image_dim[1], image_dim[2],1))\n k = 3\n\n encoded = encoder(input_img,k)\n decoded = decoder(encoded,k)\n\n autoencoder = Model(input_img, decoded)\n\n if verbose > 0:\n print(autoencoder.summary())\n\n return autoencoder\n\n\n\n\ndef build_model(image_dim, nlabels,nK, n_dil, kernel_size, drop_out, model_type, activation_hidden, activation_output, loss, verbose=0):\n if model_type == 'cnn-autoencoder':\n model = cnn_autoencoder(image_dim)\n elif model_type == 'cnn-binary-classifier':\n model = cnn_binary_classifier(image_dim)\n elif model_type == 'cnn_3D_classifier':\n model = cnn_3D_classifier(image_dim,nlabels)\n\n return model\n\ndef compile_and_run(target_dir, model, model_name, model_type, history_fn, X_train, Y_train, X_validate, Y_validate, nb_epoch, batch_size, nlabels, loss, verbose=0, metric=\"accuracy\", lr=0.005, nGPU=1):\n\n #set compiler\n ada = keras.optimizers.Adam(0.01)\n\n #set checkpoint filename\n checkpoint_fn = str(os.path.join(target_dir, 'model', str(os.path.basename(model_name).split('.')[0]) +\"_checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5\"))\n\n #create checkpoint callback for model\n checkpoint = ModelCheckpoint(checkpoint_fn, monitor='val_loss', verbose=0, save_best_only=True, mode='max')\n\n if nGPU == 0:\n steps_per_epoch = batch_size\n nGPU = 1\n elif nGPU == 1:\n steps_per_epoch = len(X_train) // (batch_size*16)\n else:\n steps_per_epoch = len(X_train) // (batch_size * nGPU)\n\n #compile the model\n #model.compile(loss = , optimizer=ada, metrics=[metric])\n print(\"Compiling model {}...\".format(model_name))\n\n if 'autoencoder' in model_type:\n\n model.compile(loss=loss, optimizer=Adam(0.001))\n\n elif 'classifier' in model_type:\n\n model.compile(loss=loss, optimizer='rmsprop', metrics=[metric])\n\n\n print(\"Training size: {}\\n Validation size: {}\\n\".format(X_train.shape, X_validate.shape))\n\n # train model\n # augmentation generator\n aug = ImageDataGenerator(rotation_range=1,\n #width_shift_range=0.01,\n #height_shift_range=0.01,\n #zoom_range=0,\n fill_mode=\"nearest\")\n #if nGPU > 1:\n if 'autoencoder' in model_type:\n history = model.fit_generator(\n aug.flow(X_train,\n X_train,\n batch_size = batch_size*nGPU),\n validation_data= (X_validate, X_validate),\n steps_per_epoch= steps_per_epoch,\n epochs= nb_epoch,\n callbacks= [checkpoint])\n #callbacks= [TensorBoard(log_dir='/home/spinney/scripts/python/MRI_Deep_Learning/logs/autoencoder')])\n\n elif 'classifier' in model_type:\n history = model.fit_generator(\n aug.flow(X_train,\n Y_train,\n batch_size=batch_size * nGPU),\n validation_data=(X_validate, Y_validate),\n steps_per_epoch=steps_per_epoch,\n epochs=nb_epoch,\n callbacks=[checkpoint])\n\n\n # save model\n model.save(model_name)\n\n with open(history_fn, 'w+') as fp: json.dump(history.history, fp)\n\n return [model, history]\n\n","sub_path":"keras_models.py","file_name":"keras_models.py","file_ext":"py","file_size_in_byte":7463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"406274550","text":"\"\"\"\n\nEverything related to the instance of our problem is written in here.\n\n\"\"\"\n\n# Docplex 2.4.56 must be used on the cluster to match the CPLEX' version installed\n# To install it type: pip3 install docplex=2.4.56 --user\nfrom docplex.mp.model import Model\ntry:\n import plotly.offline as py\n from plotly.graph_objs import *\n import matplotlib.pyplot as plt\nexcept:\n pass\nimport networkx as nx\nimport os.path\nimport warnings\nimport time\nimport argparse\nimport cplex\nimport math\nfrom collections import namedtuple\nimport multiprocessing\nfrom .callback import LazyCallback\nimport random\nimport sys\n\n\nclass ValueWarning(UserWarning):\n pass\n\n\nclass ParseWarning(UserWarning):\n pass\n\n\nclass WindFarm:\n\n \"\"\"\n\n py:class:: instance()\n\n This class stores all the useful information about input data and parameters\n\n \"\"\"\n\n # Class constructor\n def __init__(self):\n\n # The CPLEX/DOCPLEX model itself\n self.__model = None\n\n # Model variables/parameters\n self.__name = ''\n self.__y_start = 0\n self.__f_start = 0\n self.__x_start = 0\n self.__substation_slack_start = 0\n self.__flux_slack_start = 0\n self.__points = []\n self.__cables = []\n\n # Data counters\n self.__n_nodes = 0\n self.__n_cables = 0\n self.__n_turbines = 0\n self.__n_substations = 0\n self.__best_sol = []\n self.__best_incumbent = 10e12\n self.__true_gap = 1\n self.__starting_time = 0\n\n # \"Named tuples\", very useful for describing input data\n # without creating new classes\n self.__EdgeSol = namedtuple(\"EdgeSol\", [\"idx\", \"s\", \"d\"])\n self.__CableSol = namedtuple(\"CableSol\", [\"idx\", \"s\", \"d\", \"capacity\"])\n self.__FluxSol = namedtuple(\"FluxSol\", [\"idx\", \"s\", \"d\"])\n self.__Point = namedtuple(\"Point\", [\"x\", \"y\", \"power\"])\n self.__Cable = namedtuple(\"Cable\", [\"capacity\", \"price\", \"max_usage\"])\n\n # Operating parameters, commenting out whatever we don't use\n self.__project_path = ''\n self.__cluster = False\n self.__time_limit = 60\n self.__rins = 7\n self.__polishtime = int(1e12)\n self.__substation_slack = True\n self.__flux_slack = True\n self.__cross_mode = 'no'\n self.__debug_mode = False\n self.__interface = 'cplex'\n self.__matheuristic = None\n self.__overall_wait_time = 1200\n\n # Dataset selection and consequent input files building, and output parameters\n self.__data_select = 1\n\n # Building the input/output files, while parsing the command line.\n self.__build_input_files()\n self.__out_dir_name = 'test'\n\n # Private methods, internal to our class:\n def __read_turbines_file(self):\n\n \"\"\"\n\n py:function:: read_turbines_file(self)\n\n Read the turbines file\n\n \"\"\"\n\n points = []\n\n # the following opens and closes the file within the block\n with open(self.turb_file, \"r\") as fp:\n for line in fp:\n words = list(map(int, line.split()))\n points.append(self.__Point(words[0], words[1], words[2]))\n if int(words[2]) < 0.5:\n self.__n_substations += 1\n\n self.__n_nodes = len(points)\n self.__n_turbines = self.__n_nodes - self.__n_substations\n self.__points = points\n\n def __read_cables_file(self):\n\n \"\"\"\n\n py:function:: read_cables_file(self)\n\n Read the cables file\n\n \"\"\"\n\n cables = []\n\n # the following opens and closes the file within the block\n with open(self.cbl_file, \"r\") as fp:\n for line in fp:\n words = line.split()\n cables.append(self.__Cable(int(words[0]), float(words[1]), int(words[2])))\n\n self.__n_cables = len(cables)\n self.__cables = cables\n\n def __fpos(self, i_off, j_off):\n\n \"\"\"\n\n Get the position of the column inside the model.\n Must be used the number starting from 0.\n\n :param i_off: First turbine number\n :param j_off: Second turbine number\n\n :return: Column index\n\n \"\"\"\n\n return self.__f_start + i_off*self.__n_nodes + j_off\n\n def __xpos(self, i_off, j_off, k_off):\n\n \"\"\"\n\n Get the position of the column inside the model\n\n :param i_off: First turbine number\n :param j_off: Second turbine number\n :param k_off: Cable index\n\n :return: Column index\n\n \"\"\"\n\n return self.__x_start + i_off*self.__n_nodes*self.__n_cables + j_off*self.__n_cables + k_off\n\n def __ypos(self, i_off, j_off):\n\n \"\"\"\n\n Get the position of the column inside the model.\n Must be used the number starting from 0.\n\n :param i_off: First turbine number\n :param j_off: Second turbine number\n\n :return: Column index\n\n \"\"\"\n\n return self.__y_start + i_off*self.__n_nodes + j_off\n\n def __substation_slackpos(self, slack_off):\n\n \"\"\"\n\n Get the position of the column inside the model.\n\n :param slack_off: Offset w.r.t to the substation/turbine indexed by slack_off\n\n :return: Offset w.r.t slackstart\n\n \"\"\"\n\n return self.__substation_slack_start + slack_off\n\n def __flux_slackpos(self, slack_off):\n\n \"\"\"\n\n Get the position of the column inside the model.\n\n :param slack_off: Offset w.r.t to the substation indexed by slack_off\n\n :return: Offset w.r.t slackstart\n\n \"\"\"\n\n return self.__flux_slack_start + slack_off\n\n def __build_model_cplex(self):\n\n \"\"\"\n\n Build the model using classical cplex API\n\n :return:\n\n \"\"\"\n if self.__cross_mode == 'lazy':\n start = time.clock()\n\n if not self.__interface == 'cplex':\n raise NameError(\"For some reason the classical model has been called when \" +\n \"the 'interface' variable has been set to: \" + self.__interface)\n self.__model = cplex.Cplex()\n\n self.__model.set_problem_name(self.__name)\n self.__model.objective.set_sense(self.__model.objective.sense.minimize)\n\n # Add y(i,j) variables\n self.__y_start = self.__model.variables.get_num()\n self.__model.variables.add(\n types=[self.__model.variables.type.binary]\n * (self.__n_nodes**2),\n names=[\"y({0},{1})\".format(i+1, j+1)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)]\n )\n\n # Add f(i,j) variables\n self.__f_start = self.__model.variables.get_num()\n self.__model.variables.add(\n types=[self.__model.variables.type.continuous]\n * (self.__n_nodes**2),\n names=[\"f({0},{1})\".format(i+1, j+1)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)],\n lb=[0] * (self.__n_nodes**2),\n ub=[max([cable.capacity for cable in self.__cables])] * (self.__n_nodes**2),\n )\n\n # Add x(i,j,k) variables\n self.__x_start = self.__model.variables.get_num()\n self.__model.variables.add(\n types=[self.__model.variables.type.binary]\n * (self.__n_nodes**2)\n * self.__n_cables,\n names=[\"x({0},{1},{2})\".format(i+1, j+1, k+1)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n for k in range(self.__n_cables)],\n obj=[cable.price * WindFarm.get_distance(v,u)\n for v in self.__points\n for u in self.__points\n for cable in self.__cables]\n )\n\n # Slack variables on parameter C\n if self.__substation_slack:\n # Add s(h) (slack) variables\n self.__substation_slack_start = self.__model.variables.get_num()\n self.__model.variables.add(\n types=[self.__model.variables.type.continuous]\n * self.__n_substations,\n names=[\"sc({0})\".format(h+1)\n for h, point in enumerate(self.__points)\n if point.power < -0.5],\n obj=[1e9] * self.__n_substations,\n lb=[0] * self.__n_substations,\n ub=[self.__n_turbines] * self.__n_substations\n )\n else:\n # No variables should be added, then.\n self.__substation_slack_start = -1\n\n # Slack variables on flux\n if self.__flux_slack:\n # Add slack variables for flux\n self.__flux_slack_start = self.__model.variables.get_num()\n max_capacity = max([cable.capacity for cable in self.__cables])\n self.__model.variables.add(\n types=[self.__model.variables.type.continuous]\n * self.__n_nodes,\n names=[\"s2({0})\".format(h+1)\n for h in range(self.__n_nodes)],\n obj=[1e9] * self.__n_nodes,\n ub=[max_capacity] * self.__n_nodes,\n lb=[0] * self.__n_nodes\n )\n else:\n # No variables should be added, then.\n self.__flux_slack_start = -1\n\n # No self-loop constraints on y(i,i) = 0 \\forall i\n self.__model.variables.set_upper_bounds([\n (self.__ypos(i,i), 0)\n for i in range(self.__n_nodes)\n ])\n\n # No self-loop constraints on f(i,i) = 0 \\forall i\n self.__model.variables.set_upper_bounds([\n (self.__fpos(i,i), 0)\n for i in range(self.__n_nodes)\n ])\n\n # No self-loop constraints on x(i,i,k) = 0 \\forall i,k\n self.__model.variables.set_upper_bounds([\n (self.__xpos(i,i,k), 0)\n for i in range(self.__n_nodes)\n for k in range(self.__n_cables)\n ])\n\n if self.__flux_slack:\n # No slack variable on the substation\n self.__model.variables.set_upper_bounds([\n (self.__flux_slackpos(h), 0)\n for h,point in enumerate(self.__points)\n if point.power<-0.5\n ])\n\n # Out-degree constraints (substations)\n self.__model.linear_constraints.add(\n lin_expr=[cplex.SparsePair(\n ind=[self.__ypos(h,j) for j in range(self.__n_nodes)],\n val=[1.0] * self.__n_nodes\n )\n for h,point in enumerate(self.__points)\n if point.power<-0.5\n ],\n senses=[\"E\"] * self.__n_substations,\n rhs=[0] * self.__n_substations\n )\n\n # Out-degree constraints (turbines)\n self.__model.linear_constraints.add(\n lin_expr=[cplex.SparsePair(\n ind=[self.__ypos(h,j) for j in range(self.__n_nodes)],\n val=[1.0] * self.__n_nodes\n )\n for h,point in enumerate(self.__points)\n if point.power>=0.5\n ],\n senses=([\"L\"] if self.__flux_slack else [\"E\"]) * self.__n_turbines,\n rhs=[1] * self.__n_turbines\n )\n\n # Flow balancing constraint\n self.__model.linear_constraints.add(\n lin_expr=[cplex.SparsePair(\n ind=[self.__fpos(h,j) for j in range(self.__n_nodes) if h!=j] +\n [self.__fpos(j,h) for j in range(self.__n_nodes) if j!=h] +\n ([self.__flux_slackpos(h)] if self.__flux_slack else []),\n val=[1] * (self.__n_nodes - 1) + [-1] * (self.__n_nodes - 1) +\n ([1] if self.__flux_slack else [])\n )\n for h, point in enumerate(self.__points)\n if point.power > 0.5\n ],\n senses=[\"E\"] * self.__n_turbines,\n rhs=[point.power for point in self.__points if point.power > 0.5]\n )\n\n # Maximum number of cables linked to a substation\n self.__model.linear_constraints.add(\n lin_expr=[cplex.SparsePair(\n ind=[self.__ypos(i,h) for i in range(self.__n_nodes)] +\n ([self.__substation_slackpos(h)] if self.__substation_slack else []),\n val=[1] * self.__n_nodes +\n ([-1] if self.__substation_slack else [])\n )\n for h,point in enumerate(self.__points)\n if point.power < -0.5\n ],\n senses=[\"L\"] * self.__n_substations,\n rhs=[self.c] * self.__n_substations\n )\n\n # Avoid double cables between two points\n self.__model.linear_constraints.add(\n lin_expr=[cplex.SparsePair(\n ind=[self.__ypos(i,j)] + [self.__xpos(i,j,k) for k in range(self.__n_cables)],\n val=[1] + [-1] * self.__n_cables\n )\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n ],\n senses=[\"E\"] * (self.__n_nodes**2),\n rhs=[0] * (self.__n_nodes**2)\n )\n\n # We want to guarantee that the cable is enough for the connection\n self.__model.linear_constraints.add(\n lin_expr=[cplex.SparsePair(\n ind=[self.__fpos(i,j)] + [self.__xpos(i,j,k) for k in range(self.__n_cables)],\n val=[-1] + [cable.capacity for cable in self.__cables]\n )\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n ],\n senses=[\"G\"] * (self.__n_nodes**2),\n rhs=[0] * (self.__n_nodes**2)\n )\n\n # (in case) No-crossing lazy constraints\n if self.__cross_mode == 'lazy' or self.__cross_mode == 'normal':\n for i, a in enumerate(self.__points):\n # We want to lighten these constraints as much as possible; for this reason, j=i+1\n for j, b in enumerate(self.__points[i+1:], start=i+1):\n # EdgeSol has this structure: [\"idx\", \"s\", \"d\"]\n current_couple = [\n self.__EdgeSol(self.__ypos(i,j), i, j),\n self.__EdgeSol(self.__ypos(j,i), j, i)\n ]\n for k, c in enumerate(self.__points):\n if not (c == a or c == b):\n violating_cds = [self.__EdgeSol(self.__ypos(k,l), k, l)\n for l,d in enumerate(self.__points[k+1:], start=k+1)\n if WindFarm.are_crossing(a,b,c,d)]\n if len(violating_cds) > 0:\n self.__add_violating_constraint(current_couple + violating_cds)\n\n # Adding the parameters to the model\n self.__model.parameters.mip.strategy.rinsheur.set(self.__rins)\n if self.__polishtime < self.__time_limit:\n self.__model.parameters.mip.polishafter.time.set(self.__polishtime)\n\n # The following asks CPLEX to measure its time with REAL CPU time.\n self.__model.parameters.clocktype.set(1)\n\n if self.__cross_mode == 'lazy':\n end = time.clock()\n time_spent = end - start\n else:\n time_spent = 0\n\n self.__model.parameters.timelimit.set(self.__time_limit - time_spent)\n print(self.__model.parameters.timelimit.get())\n # Writing the model to a proper location\n #self.__model.write(self.__project_path + \"/out/\" + self.__out_dir_name + \"/lpmodel.lp\")\n\n def __build_model_docplex(self):\n\n \"\"\"\n\n Build the model using docplex API\n\n :return: None\n\n \"\"\"\n\n if not self.__interface == 'docplex':\n raise NameError(\"For some reason the docplex model has been called when \" +\n \"the 'interface' variable has been set to: \" + self.__interface)\n\n self.__model = Model(name=self.__name)\n\n # Time measurement within the solver\n self.__model.set_TimeMode('CPUTime')\n self.__model.set_time_limit(self.__time_limit)\n\n # Add y(i,j) variables\n self.__y_start = self.__model.get_statistics().number_of_variables\n self.__model.binary_var_list(\n ((i+1, j+1)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)),\n name=\"y%s\"\n )\n\n # Add f(i,j) variables\n self.__f_start = self.__model.get_statistics().number_of_variables\n self.__model.continuous_var_list(\n ((i+1,j+1)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)),\n name=\"f%s\",\n ub=max([cable.capacity for cable in self.__cables]),\n lb=0\n )\n\n # Add x(i,j,k) variables\n self.__x_start = self.__model.get_statistics().number_of_variables\n self.__model.binary_var_list(\n ((i+1,j+1,k+1)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n for k in range(self.__n_cables)),\n name=\"x%s\"\n )\n\n # Slack variables on parameter C\n if self.__substation_slack:\n self.__substation_slack_start = self.__model.get_statistics().number_of_variables\n self.__model.continuous_var_list(\n (h+1\n for h, point in enumerate(self.__points)\n if point.power < -0.5),\n name=\"s1(%s)\",\n ub=self.__n_turbines,\n lb=0\n )\n else:\n # No slack variables, then.\n self.__substation_slack_start = -1\n\n # Slack variables on flux\n if self.__flux_slack:\n # Add slack variables for flux\n self.__flux_slack_start = self.__model.get_statistics().number_of_variables\n self.__model.continuous_var_list(\n (h+1 for h in range(self.__n_turbines)),\n name=\"s2(%s)\",\n ub=max([cable.capacity for cable in self.__cables]),\n lb=0\n )\n else:\n # No variables should be added, then.\n self.__flux_slack_start = -1\n\n # No self-loops constraints on y(i,i) variables (\\forall i)\n self.__model.add_constraints(\n self.__model.get_var_by_index(self.__ypos(i,i)) == 0\n for i in range(self.__n_nodes)\n )\n\n # No self-loops constraints on f(i,i) variables (\\forall i)\n self.__model.add_constraints(\n self.__model.get_var_by_index(self.__fpos(i,i)) == 0\n for i in range(self.__n_nodes)\n )\n\n # No self-loops constraints on x(i,i, k) variables (\\forall i, \\forall k))\n self.__model.add_constraints(\n self.__model.get_var_by_index(self.__xpos(i,i,k)) == 0\n for i in range(self.__n_nodes)\n for k in range(self.__n_cables)\n )\n\n # Out-degree constraints (substations)\n self.__model.add_constraints(\n self.__model.sum(\n self.__model.get_var_by_index(self.__ypos(h,j))\n for j in range(self.__n_nodes)\n )\n ==\n 0\n for h, point in enumerate(self.__points)\n if point.power < -0.5\n )\n\n # Out-degree constraints (turbines)\n self.__model.add_constraints(\n self.__model.sum(\n self.__model.get_var_by_index(self.__ypos(h,j))\n for j in range(self.__n_nodes)\n )\n ==\n 1\n for h, point in enumerate(self.__points)\n if point.power > 0.5\n )\n\n # Maximum number of cables linked to a substation\n self.__model.add_constraints(\n self.__model.sum(\n self.__model.get_var_by_index(self.__ypos(i,h))\n for i in range(self.__n_nodes)\n )\n <=\n self.c + (self.__model.get_var_by_index(self.__substation_slackpos(h))\n if self.__substation_slack else 0)\n for h,point in enumerate(self.__points)\n if point.power < -0.5\n )\n\n # Flow balancing constraint\n self.__model.add_constraints(\n self.__model.sum(\n self.__model.get_var_by_index(self.__fpos(h,j))\n for j in range(self.__n_nodes)\n )\n ==\n self.__model.sum(\n self.__model.get_var_by_index(self.__fpos(j,h))\n for j in range(self.__n_nodes)\n ) + self.__points[h].power\n - (self.__model.get_var_by_index(self.__flux_slackpos(h))\n if self.__flux_slack else 0)\n for h,point in enumerate(self.__points)\n if point.power > 0.5\n )\n\n # Avoid double cable between two points\n self.__model.add_constraints(\n self.__model.get_var_by_index(self.__ypos(i,j))\n ==\n self.__model.sum(\n self.__model.get_var_by_index(self.__xpos(i,j,k))\n for k in range(self.__n_cables)\n )\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n )\n\n # Guarantee that the cable is enough for the connection\n self.__model.add_constraints(\n self.__model.sum(\n cable.capacity * self.__model.get_var_by_index(self.__xpos(i,j,k))\n for k,cable in enumerate(self.__cables)\n )\n >=\n self.__model.get_var_by_index(self.__fpos(i,j))\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n )\n\n # No-crossing lazy constraints don't work in docplex.\n if not self.__cross_mode == 'no':\n raise ValueError(\"No crossings when using docplex.\")\n\n # Objective function\n self.__model.minimize(\n self.__model.sum(\n cable.price * WindFarm.get_distance(u, v) * self.__model.get_var_by_index(self.__xpos(i, j, k))\n for k, cable in enumerate(self.__cables)\n for i, u in enumerate(self.__points)\n for j, v in enumerate(self.__points)\n )\n + (self.__model.sum(\n 1e9 * self.__model.get_var_by_index(self.__substation_slackpos(h))\n for h,point in enumerate(self.__points)\n if point.power < 0.5\n ) if self.__substation_slack else 0)\n + (self.__model.sum(\n 1e9 * self.__model.get_var_by_index(self.__flux_slackpos(h))\n for h in range(self.__n_turbines)\n ) if self.__flux_slack else 0)\n )\n\n # Adding the parameters to the model\n self.__model.parameters.mip.strategy.rinsheur.set(self.rins)\n if self.__polishtime < self.__time_limit:\n self.__model.parameters.mip.polishafter.time.set(self.__polishtime)\n self.__model.parameters.timelimit.set(self.__time_limit)\n\n # Writing the model to a proper location\n self.__model.export_as_lp(path=self.__project_path+\"/out/\" + self.__out_dir_name + \"/lpmodel.lp\")\n\n def __get_solution(self, var='x'):\n\n \"\"\"\n\n Reads the solution from CPLEX or DOCPLEX and stores it in three appropriate lists.\n (recall that a selected solution is a variable set to '1').\n\n - If var is set to 'x', a list of \"CableSol\" named tuples will be returned.\n - If var is set to 'y', a list of \"EdgeSol\" named tuples will be returned.\n - If var is set to 'f', a list of \"FluxSol\" named tuples will be returned.\n\n WARNING:\n If you call this function BEFORE CPLEX or DOCPLEX find an incumbent,\n or just after adding a constraint to the model (see loop method),\n an error will be raised.\n\n :param var: Default = 'x'. May be set to 'x', 'y' or 'f'.\n :return: The corresponding solution list.\n\n \"\"\"\n\n if self.__interface == 'cplex':\n get_value = self.__model.solution.get_values\n xpos = self.__xpos\n ypos = self.__ypos\n fpos = self.__fpos\n elif self.__interface == 'docplex':\n get_value = self.__model.solution.get_value\n xpos = lambda i, j, k: self.__model.get_var_by_index(self.__xpos(i, j, k))\n ypos = lambda i, j: self.__model.get_var_by_index(self.__ypos(i, j))\n fpos = lambda i, j: self.__model.get_var_by_index(self.__fpos(i, j))\n else:\n raise ValueError(\"Unknown interface. I've got: \" + str(self.__interface))\n\n if var == 'x':\n sol = [self.__CableSol(self.__xpos(i, j, k), i, j, k)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n for k in range(self.__n_cables)\n if get_value(xpos(i, j, k)) > 0.5]\n elif var == 'y':\n sol = [self.__EdgeSol(self.__ypos(i, j), i, j)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n if get_value(ypos(i, j)) > 0.5]\n elif var == 'f':\n sol = [self.__FluxSol(self.__fpos(i, j), i, j)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)\n if get_value(fpos(i, j)) > 0.5]\n else:\n raise ValueError(\"Invalid solution request. 'x', 'y' or 'f' are possible values, given \" + str(var))\n\n return sol\n\n def __build_input_files(self):\n\n \"\"\"\n\n py:function:: __build_input_files(self)\n\n Sets the input file correctly, based on the dataset selection\n\n \"\"\"\n\n if not type(self.__data_select) == int:\n raise TypeError(\"Expecting an integer value representing the dataset. Given: \" + str(self.__data_select))\n if self.__data_select <= 0 or self.__data_select >= 32:\n raise ValueError(\"The dataset you're trying to reach is out of range.\\n\" +\n \"Range: [1-31]. Given: \" + str(self.__data_select))\n\n data_tostring = str(self.__data_select)\n if 1 <= self.__data_select <= 9:\n data_tostring = \"0\" + data_tostring\n\n abspath = os.path.abspath(os.path.dirname(__file__)).strip()\n\n path_dirs = abspath.split('/')\n path_dirs = [str(el) for el in path_dirs]\n path_dirs.remove('')\n\n self.__project_path = ''\n or2_found = False\n i = 0\n while not or2_found:\n if path_dirs[i] == 'OR2':\n or2_found = True\n self.__project_path += '/' + path_dirs[i]\n i += 1\n\n self.turb_file = self.__project_path + \"/data/data_\" + data_tostring + \".turb\"\n self.cbl_file = self.__project_path + \"/data/data_\" + data_tostring + \".cbl\"\n\n def __build_custom_parameters(self):\n\n \"\"\"\n\n py:function:: __build_custom_parameters(self)\n\n Set the name and some constant parameters of the wind farm correctly,\n based on the dataset selection\n\n \"\"\"\n\n if not type(self.__data_select) == int:\n raise TypeError(\"Expecting an integer value representing the dataset. Given: \" + str(self.__data_select))\n if self.__data_select <= 0 or self.__data_select >= 32:\n raise ValueError(\"The dataset you're trying to reach is out of range.\\n\" +\n \"Range: [1-31]. Given: \" + str(self.__data_select))\n\n # We assume that, in this context, we'll never have a WF >=10\n wf_number = 0\n if 0 <= self.__data_select <= 6:\n wf_number = 1\n self.c = 10\n elif 7 <= self.__data_select <= 15:\n wf_number = 2\n self.c = 100\n elif 16 <= self.__data_select <= 19:\n wf_number = 3\n self.c = 4\n elif 20 <= self.__data_select <= 21:\n wf_number = 4\n self.c = 10\n elif 26 <= self.__data_select <= 29:\n wf_number = 5\n self.c = 10\n elif 30 <= self.__data_select <= 31:\n wf_number = 6\n self.c = 12\n\n if wf_number == 0:\n raise ValueError(\"Something went wrong with the Wind Farm number;\\n\" +\n \"check the dataset selection parameter: \" + str(self.__data_select))\n\n self.__name = \"Wind Farm 0\" + str(wf_number)\n\n def __plot_high_quality(self, edges):\n\n \"\"\"\n\n py:function:: plot_high_quality(inst, edges)\n\n Plot the solution using standard libraries\n\n :param export: whatever this means\n\n \"\"\"\n\n G = nx.DiGraph()\n\n mapping = {}\n\n for i in range(self.__n_nodes):\n if self.__points[i].power < -0.5:\n mapping[i] = 'S{0}'.format(i + 1)\n else:\n mapping[i] = 'T{0}'.format(i + 1)\n\n for index, node in enumerate(self.__points):\n G.add_node(index)\n\n available_colours = ['black', 'red', 'gray', 'sienna', 'purple', 'navy', 'darkcyan', 'darkkhaki',\n 'coral', 'chocolate', 'salmon', 'olive', 'fuchsia', 'blue', 'seagreen', 'pink']\n for edge in edges:\n G.add_edge(edge.s, edge.d, color=available_colours[edge.capacity])\n\n pos = {i: (point.x, point.y) for i, point in enumerate(self.__points)}\n\n # Avoid re scaling of axes\n plt.gca().set_aspect('equal', adjustable='box')\n\n # draw graph\n net_edges = G.edges()\n colors = [G[u][v]['color'] for u, v in net_edges]\n\n hfont = {'fontname':'sans-serif'}\n plt.title(self.__name + \" (\" + str(self.__model.solution.get_objective_value()) + \")\", **hfont, fontsize=16)\n\n nx.draw(G, pos, with_labels=True, node_size=1300, alpha=0.3, arrows=True, labels=mapping, node_color='g',\n linewidth=20, edge_color=colors)\n plt.show()\n\n def __get_violated_edges(self, selected_edges):\n\n \"\"\"\n\n When called, this function returns a list of violations, which are a list of y_pos indexes,\n ready to be added to CPLEX or DOCPLEX.\n\n :param selected_edges:\n :return: [list_1, ..., list2_m], where each list_i is a list:\n [idx_1, ..., idx_m], where idx_i = y_pos(some_turb, some_other_turb)\n\n \"\"\"\n\n constraints_to_be_added = []\n\n for ab in selected_edges:\n edges_violating_ab = [e2 for e2 in selected_edges\n # Filter out anything that goes to/comes from a and b.\n if not (e2.s == ab.s or e2.d == ab.s or e2.s == ab.d or e2.d == ab.d)\n # Extract the violated edges only.\n and WindFarm.are_crossing(self.__points[ab.s],\n self.__points[ab.d],\n self.__points[e2.s],\n self.__points[e2.d])]\n\n if len(edges_violating_ab) > 0:\n for violating_edge in edges_violating_ab:\n constraints_to_be_added.append([\n violating_edge,\n self.__EdgeSol(self.__ypos(ab.s, ab.d), ab.s, ab.d),\n self.__EdgeSol(self.__ypos(ab.d, ab.s), ab.d, ab.s)\n ])\n\n return constraints_to_be_added\n\n def __reduce_metaheuristic_iteration(self, edge_ab):\n\n \"\"\"\n\n :param edge_ab: The edge selected by the metaheuristic algorithm\n :return: List of EdgeSol => all the violations through edge_ab\n\n \"\"\"\n\n full_graph = [self.__EdgeSol(self.__ypos(i, j), i, j)\n for i in range(self.__n_nodes)\n for j in range(self.__n_nodes)]\n\n filtered_edges = [edge_cd for edge_cd in full_graph\n if not (edge_cd.s == edge_ab.s or edge_cd.d == edge_ab.s or edge_cd.s == edge_ab.d or edge_cd.d == edge_ab.d)]\n\n violations = [edge_cd for edge_cd in filtered_edges\n if WindFarm.are_crossing(self.__points[edge_ab.s],\n self.__points[edge_ab.d],\n self.__points[edge_cd.s],\n self.__points[edge_cd.d])]\n\n return violations\n\n def __add_violating_constraint(self, crossings):\n\n \"\"\"\n\n Adds the violating constraint by index.\n Crossings contains the index of every edge variable to be added.\n\n :param crossings: a list [EdgeSol_1, ..., EdgeSol_n],\n where EdgeSol_i are the conflicting edges, containing their idx and names\n :return: None\n\n \"\"\"\n\n if self.__cross_mode == 'lazy':\n if self.__cluster is True:\n constraint_add = self.__model.linear_constraints.advanced.add_lazy_cuts\n else:\n constraint_add = self.__model.linear_constraints.advanced.add_lazy_constraints\n else:\n constraint_add = self.__model.linear_constraints.add\n\n if len(crossings) > 0:\n crossings = [cross.idx for cross in crossings]\n coefficients = [1] * len(crossings)\n constraint_add(\n lin_expr=[cplex.SparsePair(\n ind=crossings,\n val=coefficients\n )],\n senses=[\"L\"],\n rhs=[1]\n )\n\n\n def parse_command_line(self):\n\n \"\"\"\n\n py:function:: parse_command_line(self)\n\n Parses the command line.\n\n :return: None\n\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Process details about instance and interface.')\n\n parser.add_argument('--dataset', type=int,\n help=\"dataset selection; datasets available: [1,29]. \" +\n \"You can use '30' for debug purposes\")\n parser.add_argument('--cluster', action=\"store_true\",\n help='type --cluster if you want to use the cluster')\n parser.add_argument('--interface', choices=['docplex', 'cplex'],\n help='Choose the interface ')\n parser.add_argument('--rins', type=int,\n help='the frequency with which the RINS Heuristic will be applied')\n parser.add_argument('--timeout', type=int,\n help='timeout in which the optimizer will stop iterating')\n parser.add_argument('--polishtime', type=int,\n help='the time to wait before applying polishing')\n parser.add_argument('--outfolder', type=str,\n help='name of the folder to be created inside the /out' +\n ' directory, which contains everything related to this run')\n parser.add_argument('--nosubstationslack', action=\"store_true\",\n help='type --noslack if you do not want the soft version of the problem')\n parser.add_argument('--nofluxslack', action=\"store_true\",\n help='type --noslack if you do not want the slack on the flux')\n parser.add_argument('--crossings', choices=['no', 'lazy', 'loop', 'callback', 'normal'],\n help='Choose how you want to address the crossing problem')\n parser.add_argument('--matheuristic', choices=['hard', 'soft'],\n help='Choose one matheuristic method to wrap the execution')\n parser.add_argument('--overall_wait_time', type=int,\n help='Used in hard/soft fixing and loop method to stop the execution')\n\n args, unknown = parser.parse_known_args()\n\n if args.outfolder:\n self.__out_dir_name = args.outfolder\n\n if args.dataset:\n self.__data_select = args.dataset\n\n if args.cluster:\n self.__cluster = True\n\n if args.interface == 'docplex' or args.interface == 'cplex':\n self.__interface = args.interface\n else:\n warnings.warn(\"Invalid interface; '\" + str(args.interface) + \"' given. \"\n + \"Using the default value: \" + self.__interface,\n ParseWarning)\n self.__interface = 'cplex'\n\n if args.rins:\n if args.rins <= -2:\n warnings.warn(\"RINS parameter given is not valid. Given: \" + str(args.rins) +\n \". Using the default value: \" + str(self.__rins))\n else:\n self.__rins = args.rins\n\n if args.timeout:\n self.__time_limit = args.timeout\n\n if args.polishtime:\n self.__polishtime = args.polishtime\n\n if args.nosubstationslack:\n self.__substation_slack = False\n\n if args.nofluxslack:\n self.__flux_slack = False\n\n if args.crossings:\n self.__cross_mode = args.crossings\n\n if args.matheuristic:\n self.__matheuristic = args.matheuristic\n\n if args.overall_wait_time:\n self.__overall_wait_time = args.overall_wait_time\n if self.__overall_wait_time= 0.3 and time.clock() - self.__starting_time < self.__overall_wait_time:\n self.__model.solve()\n starting_gap = self.__model.solution.MIP.get_mip_relative_gap()\n\n while (xs or not opt) and time.clock() - self.__starting_time < self.__overall_wait_time:\n self.__model.solve()\n #self.plot_solution(edges=self.__get_solution(var='x'), high=False)\n\n violations = self.__get_violated_edges(self.__get_solution(var='y'))\n\n if len(violations) > 0:\n xs = True\n for violation in violations:\n self.__add_violating_constraint(violation)\n else:\n xs = False\n\n if self.__best_incumbent > self.__model.solution.get_objective_value():\n self.__best_sol = self.__get_solution()\n self.__best_incumbent = self.__model.solution.get_objective_value()\n\n if self.__model.solution.get_status() == self.__model.solution.status.MIP_optimal:\n opt = True\n\n self.__model.parameters.advance.set(0)\n else:\n raise ValueError(\"Unrecognized cross-strategy; given: \" + str(self.__cross_mode))\n\n def __hard_fix(self):\n\n \"\"\"\n\n Solve the problem using the hard fixing math-heuristic\n\n :return: None\n\n \"\"\"\n print(\"Hard fixing\")\n\n optimum = False # \"has the optimum been reached?\"\n probability = 0.9\n self.__model.parameters.advance.set(1)\n starting_gap = 1\n while starting_gap >= 0.3 and time.clock() - self.__starting_time < self.__overall_wait_time:\n self.__exact_solve()\n starting_gap = self.__model.solution.MIP.get_mip_relative_gap()\n\n starting_best_bound = starting_gap * self.__model.solution.get_objective_value()\n\n while not optimum and time.clock() - self.__starting_time < self.__overall_wait_time:\n\n sol = self.__get_solution(var='y')\n #self.plot_solution(self.__get_solution(var='x'))\n\n for edge in sol:\n if random.random() > probability:\n\n self.__model.variables.set_lower_bounds(\n edge.idx,\n 1\n )\n\n if self.__cross_mode is not 'no':\n violations = self.__reduce_metaheuristic_iteration(edge)\n\n for violation in violations:\n self.__model.variables.set_upper_bounds(\n violation.idx,\n 0\n )\n\n self.__exact_solve()\n\n if self.__model.solution.get_status() == self.__model.solution.status.MIP_optimal:\n optimum = True\n\n if self.__best_incumbent > self.__model.solution.get_objective_value():\n self.__best_sol = self.__get_solution()\n self.__best_incumbent = self.__model.solution.get_objective_value()\n\n probability -= 0.05\n probability = max(probability, 0.5)\n\n self.__true_gap = starting_best_bound / self.__best_incumbent\n print(\"True gap: \", self.__true_gap)\n\n def __soft_fix(self):\n\n \"\"\"\n\n Solve the problem using the soft fixing math-heuristic\n\n :return: None\n\n \"\"\"\n\n optimum = False # \"has the optimum been reached?\"\n k = 0.4\n self.__model.parameters.advance.set(1)\n starting_gap = 1\n while starting_gap >= 0.30 and time.clock() - self.__starting_time < self.__overall_wait_time: # We do this to AT LEAST have a feasible solution (with no slacks==1)\n self.__exact_solve()\n starting_gap = self.__model.solution.MIP.get_mip_relative_gap()\n\n starting_best_bound = starting_gap * self.__model.solution.get_objective_value()\n while not optimum and time.clock() - self.__starting_time < self.__overall_wait_time:\n\n selected_edges = self.__get_solution(var='y')\n\n self.__model.linear_constraints.add(\n lin_expr=[cplex.SparsePair(\n ind=[edge.idx for edge in selected_edges],\n val=[1] * len(selected_edges)\n )],\n senses=[\"G\"],\n rhs=[self.__n_nodes*(1-k)-1]\n )\n\n self.__exact_solve()\n\n if self.__model.solution.get_status() == self.__model.solution.status.MIP_optimal:\n optimum = True\n\n if self.__best_incumbent > self.__model.solution.get_objective_value():\n self.__best_sol = self.__get_solution()\n self.__best_incumbent = self.__model.solution.get_objective_value()\n\n k -= 0.10\n k = max(k, 0.1)\n\n self.__true_gap = starting_best_bound / self.__best_incumbent\n print(\"True gap: \", self.__true_gap)\n #self.plot_solution(self.__best_sol)\n\n def solve(self):\n\n \"\"\"\n\n Call the right method to solve the problem\n\n :return: None\n\n \"\"\"\n print(self.__model.get_version())\n print(\"Rins = \", self.__model.parameters.mip.strategy.rinsheur.get())\n self.__model.parameters.threads.set(8)\n start_time = time.time()\n if self.__cluster:\n self.__model.parameters.randomseed = random.randint(0, sys.maxsize)\n self.__model.parameters.advance.set(0)\n print(\"Advanced model:\", self.__model.parameters.advance.get())\n print(\"Dataset:\", self.__data_select)\n\n self.__starting_time = time.clock()\n if self.__matheuristic is None:\n self.__exact_solve()\n elif self.__matheuristic == 'hard':\n try:\n self.__hard_fix()\n except:\n pass\n elif self.__matheuristic == 'soft':\n self.__soft_fix()\n else:\n raise ValueError(\"Unrecognized heuristic technique; given: \" + str(self.__matheuristic))\n print(self.__best_incumbent)\n print(\"Elapsed time CPU: \", time.clock() - self.__starting_time)\n print(\"Elpased true time: \", time.time() - start_time)\n\n def release(self):\n\n \"\"\"\n\n Release all the information about the model\n\n :return: None\n\n \"\"\"\n\n self.__model.end()\n\n def write_results(self, file_name='results.csv'):\n\n \"\"\"\n Write the file to be used for perfromance profiling script\n\n :param file_name: Output file name\n :return: None\n\n \"\"\"\n\n with open(self.__project_path + \"/out/\" + self.__out_dir_name + \"/\" + file_name, 'r') as fp:\n file_content = fp.readlines()\n\n num_columns = len(file_content[0].split(sep=','))\n\n with open(self.__project_path + \"/out/\" + self.__out_dir_name + \"/\" + file_name, 'a') as fp:\n\n num_tokens_last_line = len(file_content[-1].split(sep=','))\n\n if num_tokens_last_line == num_columns: # Write down the instance name\n fp.write(\"\\ndata\" + str(self.__data_select))\n\n if (self.__matheuristic == 'hard' or self.__matheuristic == 'soft' or self.__cross_mode == 'loop'):\n fp.write(\",\" + str(self.__best_incumbent))\n else:\n fp.write(\",\" + str(self.__model.solution.get_objective_value()))\n\n def write_solutions(self):\n\n \"\"\"\n\n Writes the solutions obtained by first invoking the built-in function of the model,\n and then by returning our private __get_solution() method, which returns the list of the\n x(i,j,k) variables set to one.\n\n :return: the list of x(i,j,k) variables set to one from the solution\n\n \"\"\"\n\n self.__model.solution.write(self.__project_path + \"/out/\" + self.__out_dir_name + \"/mysol.sol\", 'r')\n return self.__get_solution()\n\n def read_input(self):\n\n \"\"\"\n\n This function reads the input files by invoking the private methods which read\n both the turbines and the cables files.\n\n :return: None\n\n \"\"\"\n\n self.__read_turbines_file()\n self.__read_cables_file()\n\n def plot_solution(self, edges=None, high=False):\n\n \"\"\"\n\n py:function:: plot_solution(inst, edges)\n Plots the solution using the plot.ly library\n\n :param show: if =True, the exported plot will be shown right away.\n :param edges: list of edges to be plotted\n :param high: if =True, an high-quality img will be plotted, also\n\n :return: None\n\n \"\"\"\n\n if edges is None:\n edges = self.__get_solution(var='x')\n\n G = nx.DiGraph()\n\n for index, node in enumerate(self.__points):\n G.add_node(index, pos=(node.x, node.y))\n\n for edge in edges:\n G.add_edge(edge.s, edge.d, weight=edge.capacity)\n\n edge_trace = Scatter(\n x=[],\n y=[],\n line=Line(width=0.5, color='#888'),\n hoverinfo='none',\n mode='lines'\n )\n\n for edge in G.edges():\n x0, y0 = G.node[edge[0]]['pos']\n x1, y1 = G.node[edge[1]]['pos']\n edge_trace['x'] += [x0, x1, None]\n edge_trace['y'] += [y0, y1, None]\n\n node_trace = Scatter(\n x=[],\n y=[],\n text=[\"Substation #{0}\".format(i + 1) if self.__points[i].power < -0.5 else \"Turbine #{0}\".format(i + 1) for\n i in range(self.__n_nodes)],\n mode='markers',\n hoverinfo='text',\n marker=Marker(\n showscale=False,\n colorscale='Greens',\n reversescale=True,\n color=[],\n size=10,\n line=dict(width=2))\n )\n\n # Prepare data structure for plotting (x, y, color)\n for node in G.nodes():\n x, y = G.node[node]['pos']\n node_trace['x'].append(x)\n node_trace['y'].append(y)\n node_trace['marker']['color'].append(\"#32CD32\")\n\n # Create figure\n fig = Figure(data=Data(\n [edge_trace, node_trace]),\n layout=Layout(\n title='
' + self.__name + '',\n titlefont=dict(size=16),\n showlegend=False,\n hovermode='closest',\n margin=dict(b=20, l=5, r=5, t=40),\n xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),\n yaxis=dict(scaleanchor=\"x\", scaleratio=1, showgrid=False, zeroline=False, showticklabels=False)\n )\n )\n\n py.plot(fig, filename=self.__project_path + '/out/' + self.__out_dir_name + '/img/wind_farm.html')\n\n if high:\n self.__plot_high_quality(edges=self.__get_solution(var='x'))\n\n\n @staticmethod\n def get_distance(point1, point2):\n\n \"\"\"\n py:function:: get_distance(point1, point2)\n Get the distance between two given points\n\n :param point1: First point\n :param point2: Second point\n\n \"\"\"\n\n return math.sqrt(\n (point1.x - point2.x) ** 2\n +\n (point1.y - point2.y) ** 2\n )\n\n @staticmethod\n def are_crossing(pt1, pt2, pt3, pt4):\n\n \"\"\"\n\n Recall that one edge has its extremes on (x_1,y_1) and (x_2,y_2);\n the same goes for the second edge, which extremes are (x_3,y_3) and (x_4,y_4).\n\n :param pt1: first point\n :param pt2: second point\n :param pt3: third point\n :param pt4: fourth point\n\n :return:\n\n \"\"\"\n\n det_A = (pt4.x - pt3.x) * (pt1.y - pt2.y) - (pt4.y - pt3.y) * (pt1.x - pt2.x)\n if det_A == 0:\n return False\n\n # If it's not zero, then the 2x2 system has exactly one solution, which is:\n det_mu = (pt1.x - pt3.x) * (pt1.y - pt2.y) - (pt1.y - pt3.y) * (pt1.x - pt2.x)\n det_lambda = (pt4.x - pt3.x) * (pt1.y - pt3.y) - (pt4.y - pt3.y) * (pt1.x - pt3.x)\n\n mu = det_mu / det_A\n lambd = det_lambda / det_A\n\n if 1e-3 < lambd < 1-1e-3 and 1e-3 < mu < 1-1e-3:\n return True\n else:\n return False\n\n # Get and set methods, in the Pythonic way\n\n @property\n def cross_mode(self):\n return self.__cross_mode\n\n @cross_mode.setter\n def cross_mode(self, cm):\n if not (\n cm == 'no'\n or\n cm == 'lazy'\n or\n cm == 'loop'\n or\n cm == 'callback'\n or\n cm == 'normal'\n ):\n raise ValueError(\"Unrecognized crossing strategy; given: \"+str(cm))\n self.__cross_mode = cm\n\n @property\n def cluster(self):\n return self.__cluster\n\n @cluster.setter\n def cluster(self, c):\n if not type(c) == bool:\n raise TypeError(\"Expecting 'cluster' to be a boolean, either set True or False; given:\" + str(c))\n self.__cluster = c\n\n @property\n def data_select(self):\n return self.__data_select\n\n @data_select.setter\n def data_select(self, d):\n if not type(d) == int:\n raise TypeError(\"Expecting an integer value representing the dataset. Given: \" + str(d))\n if d <= 0 or d >= 32:\n raise ValueError(\"The dataset you're trying to reach is out of range.\\n\" +\n \"Range: [1-31]. Given: \" + str(d))\n self.__data_select = d\n\n @property\n def time_limit(self):\n return self.__time_limit\n\n @time_limit.setter\n def time_limit(self, t):\n if not type(t) == int:\n raise TypeError(\"Timeout time should be given as an integer; given: \" + str(t))\n if t <= 3:\n raise TimeoutError(\"It doesn't make sense to run this program for less than 3 seconds; given: \"\n + str(t))\n self.__time_limit = t\n\n @property\n def rins(self):\n return self.__rins\n\n @rins.setter\n def rins(self, freq):\n if not type(freq) == int:\n raise TypeError(\"The frequency at which RINS will be applied should be an integer; given: \" + str(freq))\n if freq < -1:\n raise ValueError(\"Invalid RINS parameter. Integer values above -1 are OK; given: \" + str(freq))\n self.__rins = freq\n\n @property\n def polishtime(self):\n return self.__polishtime\n\n @polishtime.setter\n def polishtime(self, t):\n if not type(t) == int:\n raise TypeError(\"Polish time should be an integer; given: \" + str(t))\n if t <= 3:\n raise TimeoutError(\"Can't start polishing within just 3 seconds; given: \" + str(t))\n\n self.__polishtime = t\n\n @property\n def num_cables(self):\n return self.__n_cables\n\n @num_cables.setter\n def num_cables(self, nc):\n if not type(nc) == int:\n raise TypeError(\"The number of cables must be a positive integer number; given:\" + str(nc))\n if not nc >= 0:\n raise AttributeError(\"The number of cables must be a positive integer number; given:\" + str(nc))\n self.__n_cables = nc\n\n @property\n def c(self):\n return self.__c\n\n @c.setter\n def c(self, c):\n if not type(c) == int:\n raise TypeError(\"The parameter 'c' must be a positive, INTEGER number; given: \" + str(c))\n if c <= 0:\n warnings.warn(\"Substations must accept at least one cable; setting 'c' to its default value (10)\",\n ValueWarning)\n self.__c = 10\n else:\n self.__c = c\n\n @property\n def interface(self):\n return self.__interface\n\n @interface.setter\n def interface(self, choice):\n if not type(choice) == str:\n warnings.warn(\"Choice not given as a string. Trying a conversion.\", ValueWarning)\n choice = str(choice)\n if choice != \"cplex\" and choice != \"docplex\":\n raise NameError(\n \"It is possible to choose either 'cplex' or 'docplex' as opt libraries; given: \" + choice)\n self.__interface = choice\n\n @property\n def turb_file(self):\n return self.__turb_file\n\n @turb_file.setter\n def turb_file(self, fname):\n if not type(fname) == str:\n warnings.warn(\"Turbines filename not given as string. Trying a conversion.\", ValueWarning)\n fname = str(fname)\n if not os.path.isfile(fname):\n raise FileNotFoundError(\"Can't find the '.trb' file; filename given: \" + fname)\n self.__turb_file = fname\n\n @property\n def cbl_file(self):\n return self.__cbl_file\n\n @cbl_file.setter\n def cbl_file(self, fname):\n if not type(fname) == str:\n warnings.warn(\"Cables filename not given as string. Trying a conversion.\", ValueWarning)\n fname = str(fname)\n if not os.path.isfile(fname):\n raise FileNotFoundError(\"Can't find the '.cbl' file; filename given: \" + fname)\n self.__cbl_file = fname\n\n @property\n def out_dir_name(self):\n return self.__out_dir_name\n\n @out_dir_name.setter\n def out_dir_name(self, d):\n if not type(d) == str:\n warnings.warn(\"Out path not given as string. Trying a conversion.\", ValueWarning)\n d = str(d)\n if not os.path.exists(self.__project_path + '/out/' + d):\n os.makedirs(self.__project_path + '/out/' + d)\n if not os.path.exists(self.__project_path + '/out/' + d + '/img'):\n os.makedirs(self.__project_path + '/out/' + d + '/img')\n self.__out_dir_name = d\n","sub_path":"src/lib/WindFarm.py","file_name":"WindFarm.py","file_ext":"py","file_size_in_byte":57185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"558794350","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Time:2021/8/4 6:22\n# Description:\n\nimport yaml\n\n# yaml.safe_dump()\n\nwith open(\"./datas/data.yaml\", mode=\"r\", encoding='utf-8') as f:\n print(yaml.safe_load(f))\n\ndic1 = {'name': 'hogwt', 'age': '20', 'gender': 'male'}\nwith open(\"./datas/data2.yaml\", mode='w', encoding='utf-8') as f:\n data2 = yaml.safe_dump(dic1, f)\n","sub_path":"pythoncode/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"563678640","text":"'''Returns project values across multiple masters for specified keys of interest. Return for each key is provided\non a separate wb. Code can handle both standard and project milestone keys, as well as project name lists across\nmultiple quarters.\n\nThere are two outputs.\n1) wb containing all values\n2) wb containing bl values only\n\nConditional formatting is placed in the files as follows:\nrag_rating colours\nmissing data (md) = black grey\nproject not reporting (pnr) = light grey\nkey not collected (knc) = light blue grey\n'''\n\n\nfrom openpyxl import Workbook\nfrom analysis.data import list_of_masters_all, root_path, gen_txt_list, \\\n gen_txt_colours, gen_fill_colours, list_column_ltrs, list_of_rag_keys, rag_txt_list_full, \\\n rag_fill_colours, rag_txt_colours, salmon_fill\nfrom analysis.engine_functions import all_milestone_data_bulk, conditional_formatting, get_quarter_stamp\n\ndef return_data(project_name_list, data_key_list):\n \"\"\"Returns project values across multiple masters for specified keys of interest:\n project_names_list: list of project names\n data_key_list: list of data keys\n \"\"\"\n wb = Workbook()\n\n for i, key in enumerate(data_key_list):\n '''worksheet is created for each project'''\n ws = wb.create_sheet(key[:29], i) # creating worksheets\n ws.title = key[:29] # title of worksheet\n\n '''list project names, groups and stage in ws'''\n for y, project_name in enumerate(project_name_list):\n\n # get project group info\n try:\n group = list_of_masters_all[0].data[project_name]['DfT Group']\n except KeyError:\n for m, master in enumerate(list_of_masters_all):\n if project_name in master.projects:\n group = list_of_masters_all[m].data[project_name]['DfT Group']\n\n ws.cell(row=2 + y, column=1, value=group) # group info return\n ws.cell(row=2 + y, column=2, value=project_name) # project name returned\n\n for x, master in enumerate(list_of_masters_all):\n if project_name in master.projects:\n try:\n #standard keys\n if key in list_of_masters_all[x].data[project_name].keys():\n value = list_of_masters_all[x].data[project_name][key]\n ws.cell(row=2 + y, column=3 + x, value=value) # returns value\n\n if value is None:\n ws.cell(row=2 + y, column=3 + x, value='md')\n\n try: # checks for change against last quarter\n lst_value = list_of_masters_all[x + 1].data[project_name][key]\n if value != lst_value:\n ws.cell(row=2 + y, column=3 + x).fill = salmon_fill\n except (KeyError, IndexError):\n pass\n\n # milestone keys\n else:\n milestones = all_milestone_data_bulk([project_name], list_of_masters_all[x])\n value = tuple(milestones[project_name][key])[0]\n ws.cell(row=2 + y, column=3 + x, value=value)\n ws.cell(row=2 + y, column=3 + x).number_format = 'dd/mm/yy'\n if value is None:\n ws.cell(row=2 + y, column=3 + x, value='md')\n\n try: # loop checks if value has changed since last quarter\n old_milestones = all_milestone_data_bulk([project_name], list_of_masters_all[x + 1])\n lst_value = tuple(old_milestones[project_name][key])[0]\n if value != lst_value:\n ws.cell(row=2 + y, column=3 + x).fill = salmon_fill\n except (KeyError, IndexError):\n pass\n\n except KeyError:\n if project_name in master.projects:\n #loop calculates if project was not reporting or data missing\n ws.cell(row=2 + y, column=3 + x, value='knc')\n else:\n ws.cell(row=2 + y, column=3 + x, value='pnr')\n\n else:\n ws.cell(row=2 + y, column=3 + x, value='pnr')\n\n '''quarter tag information'''\n ws.cell(row=1, column=1, value='Group')\n ws.cell(row=1, column=2, value='Projects')\n quarter_labels = get_quarter_stamp(list_of_masters_all)\n for l, label in enumerate(quarter_labels):\n ws.cell(row=1, column=l + 3, value=label)\n\n list_columns = list_column_ltrs[2:len(list_of_masters_all)+2]\n\n if key in list_of_rag_keys:\n conditional_formatting(ws, list_columns, rag_txt_list_full, rag_txt_colours, rag_fill_colours, '1', '60')\n\n conditional_formatting(ws, list_columns, gen_txt_list, gen_txt_colours, gen_fill_colours, '1', '60')\n\n return wb\n\ndef return_baseline_data(project_name_list, data_key_list):\n '''\n returns values of interest across multiple ws for baseline values only.\n project_name_list: list of project names\n data_key_list: list of data keys containing values of interest.\n '''\n wb = Workbook()\n\n for i, key in enumerate(data_key_list):\n '''worksheet is created for each project'''\n ws = wb.create_sheet(key[:29], i) # creating worksheets\n ws.title = key[:29] # title of worksheet\n\n '''list project names, groups and stage in ws'''\n for y, project_name in enumerate(project_name_list):\n\n # get project group info\n try:\n group = list_of_masters_all[0].data[project_name]['DfT Group']\n except KeyError:\n for m, master in enumerate(list_of_masters_all):\n if project_name in master.projects:\n group = list_of_masters_all[m].data[project_name]['DfT Group']\n\n ws.cell(row=2 + y, column=1, value=group) # group info\n ws.cell(row=2 + y, column=2, value=project_name) # project name returned\n\n for x in range(0, len(bc_index[project_name])):\n index = bc_index[project_name][x]\n try: # standard keys\n value = list_of_masters_all[index].data[project_name][key]\n if value is None:\n ws.cell(row=2 + y, column=3 + x).value = 'md'\n else:\n ws.cell(row=2 + y, column=3 + x, value=value)\n except KeyError:\n try: # milestone keys\n milestones = all_milestone_data_bulk([project_name], list_of_masters_all[index])\n value = tuple(milestones[project_name][key])[0]\n if value is None:\n ws.cell(row=2 + y, column=3 + x).value = 'md'\n else:\n ws.cell(row=2 + y, column=3 + x).value = value\n ws.cell(row=2 + y, column=3 + x).number_format = 'dd/mm/yy'\n except KeyError: # exception catches both standard and milestone keys\n ws.cell(row=2 + y, column=3 + x).value = 'knc'\n except TypeError:\n ws.cell(row=2 + y, column=3 + x).value = 'pnr'\n\n ws.cell(row=1, column=1, value='Group')\n ws.cell(row=1, column=2, value='Project')\n ws.cell(row=1, column=3, value='Latest')\n ws.cell(row=1, column=4, value='Last quarter')\n ws.cell(row=1, column=5, value='BL 1')\n ws.cell(row=1, column=6, value='BL 2')\n ws.cell(row=1, column=7, value='BL 3')\n ws.cell(row=1, column=8, value='BL 4')\n ws.cell(row=1, column=9, value='BL 5')\n\n list_columns = list_column_ltrs[2:10] # hard coded so not ideal\n\n if key in list_of_rag_keys:\n conditional_formatting(ws, list_columns, rag_txt_list_full, rag_txt_colours, rag_fill_colours, '1', '60')\n\n conditional_formatting(ws, list_columns, gen_txt_list, gen_txt_colours, gen_fill_colours, '1', '60')\n\n return wb\n\n'''Running the programme'''\n'''Place all keys of interest as stings in to a list or use one of the imported lists from the data file'''\ndata_interest = ['Adjusted Benefits Cost Ratio (BCR)',\n 'Initial Benefits Cost Ratio (BCR)',\n 'VfM Category single entry']\n\n'''output one - all data. \nfirst variable = list of project names. There are two options. 1) latest_quarter_project_names 2) all_projects_names\n(which includes older projects that are not currently reporting. \nsecond variable = data_interest. This name does not change. List compiled above'''\nrun_standard = return_data(list_of_masters_all[0].projects, data_interest)\n\n'''output two - bl data\nfirst variable = list of project names. There are two options. 1) latest_quarter_project_names 2) all_projects_names\n(which includes older projects that are not currently reporting. \nsecond variable = data_interest. This name does not change. List compiled above'''\n#run_baseline = return_baseline_data(list_of_masters_all[0].projects, data_interest)\n\n'''Specify name of the output document here. See general guidance re saving output files'''\nrun_standard.save(root_path/'output/vfm_data_query_output.xlsx')\n#run_baseline.save(root_path/'output/data_query_output_bls.xlsx')\n","sub_path":"data_query/data_query.py","file_name":"data_query.py","file_ext":"py","file_size_in_byte":9596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330631751","text":"import csv\nimport os\n\nimport pymysql\nimport requests\nimport random\n\nimport time\n\nfrom settings import headers,save_path,filename,db_conf,table_name\nimport json\n\n\nclass MT_spider:\n\n base_url = \"http://api.meituan.com/group/v4/deal/select/city/30/cate/1?sort=solds&hasGroup=true&mpt_cate1=1&offset={0}&limit=100\"\n mode_list = ['txt','csv','db']\n table_name = table_name\n\n\n #美团深圳地区美食爬虫\n def __init__(self,save_mode = 'txt'):\n if save_mode not in self.mode_list:\n raise RuntimeError('存储模式指定有误,请输入txt、csv或者db')\n self.save_mode = save_mode\n\n if self.save_mode == 'db':\n self.conn = pymysql.connect(**db_conf)\n self.cur = self.conn.cursor()\n\n sql = '''CREATE TABLE IF NOT EXISTS {0}( \n id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, \n shopName VARCHAR(60), \n cateName VARCHAR(30), \n avgScore FLOAT, \n areaName VARCHAR(30), \n lat FLOAT, \n lng FLOAT,\n addr VARCHAR(128), \n abstracts TEXT, \n openInfo VARCHAR(128),\n phone VARCHAR(60),\n historyCouponCount INTEGER,\n introduction TEXT,\n featureMenus TEXT\n );'''.format(self.table_name)\n self.cur.execute(sql)\n self.conn.commit()\n else:\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n file_path = os.path.join(save_path,filename+'.'+self.save_mode)\n self.file = open(file_path,'w',encoding='utf-8',newline='')\n if self.save_mode == 'csv':\n self.csvwriter = csv.writer(self.file)\n self.csvwriter.writerow(['店铺名称','类别','评分','所属片区','纬度','经度','详细地址','优惠套餐情况','营业时间','联系电话','累计售出份数','餐厅简介','特色菜'])\n\n def run(self):\n i = 0\n while True:\n url = self.base_url.format(str(i*100))\n itemlist = self.parse(url)\n if not itemlist:\n break\n for item in itemlist:\n self.save_item(item)\n print('已成功获取%d个商家信息'%((i+1)*100))\n i += 1\n time.sleep(random.randint(2,5))\n\n def save_item(self,item):\n if self.save_mode == 'txt':\n for k,v in item.items():\n self.file.write(str(k)+':'+str(v) + '\\n')\n self.file.write('\\n\\n-----------------------------\\n\\n\\n')\n elif self.save_mode == 'csv':\n self.csvwriter.writerow(item.values())\n else:\n sql = '''\n INSERT INTO {0}(shopName,cateName,avgScore,areaName,lat,lng,addr,abstracts,openInfo,phone,historyCouponCount,introduction,featureMenus)\n VALUES ('{店铺名称}','{类别}','{评分}','{所属片区}','{纬度}','{经度}','{详细地址}','{优惠套餐情况}','{营业时间}','{联系电话}','{累计售出份数}','{餐厅简介}','{特色菜}')\n '''.format(self.table_name,**item)\n self.cur.execute(sql)\n self.conn.commit()\n\n\n def parse(self,url):\n response = requests.get(url,headers=random.choice(headers))\n number = 0\n while True:\n try:\n info_dict = json.loads(response.text)\n info_list = info_dict['data']\n if info_list:\n break\n else:\n number += 1\n if number >= 10:\n return None\n time.sleep(10)\n response = requests.get(url, headers=random.choice(headers))\n except:\n number += 1\n if number >= 10:\n return None\n time.sleep(10)\n response = requests.get(url, headers=random.choice(headers))\n\n itemlist = []\n for info in info_list:\n # 店铺名称\n name = info['poi']['name']\n # 所属片区\n areaName = info['poi']['areaName']\n # 详细地址\n addr = info['poi']['addr']\n # 纬度\n lat = info['poi']['lat']\n # 经度\n lng = info['poi']['lng']\n # 餐厅类别\n cateName = info['poi']['cateName']\n # 优惠套餐情况\n abstracts = ''\n for abstract in info['poi']['payAbstracts']:\n # abstracts.append(abstract['abstract'])\n abstracts = abstracts + abstract['abstract'] + ';'\n\n # 评分\n avgScore = info['poi']['avgScore']\n # 营业时间\n openInfo = info['poi']['openInfo'].replace('\\n',' ')\n # 联系电话\n phone = info['poi']['phone']\n # 累计售出份数\n historyCouponCount = info['poi']['historyCouponCount']\n # 餐厅简介\n introduction = info['poi']['introduction']\n # 特色菜\n featureMenus = info['poi']['featureMenus']\n item = {\n '店铺名称': name,\n '类别': cateName,\n '评分': avgScore,\n '所属片区': areaName,\n '纬度': lat,\n '经度': lng,\n '详细地址': addr,\n '优惠套餐情况': abstracts,\n '营业时间': openInfo,\n '联系电话': phone,\n '累计售出份数': historyCouponCount,\n '餐厅简介': introduction,\n '特色菜': featureMenus\n }\n\n itemlist.append(item)\n # 返回当前页面item列表\n return itemlist\n\n def __del__(self):\n if self.save_mode == 'db':\n self.cur.close()\n self.conn.close()\n else:\n self.file.close()\n","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"138464146","text":"from python_pratice.homework_oop.animal import Animals\n\n\"\"\"\n比如创建一个类(Animal)【动物类】,类里有属性(名称,颜色,年龄,性别),\n类方法(会叫,会跑)\n创建子类【猫】,继承【动物类】,\n重写父类的__init__方法,继承父类的属性,\n添加一个新的属性,毛发 = 短毛,\n添加一个新的方法, 会捉老鼠,\n重写父类的【会叫】的方法,改成【喵喵叫】\n\"\"\"\n\n\nclass Cat(Animals):\n\n def __init__(self, nimal_name, animal_colour, animal_age, animal_sex, animal_hail):\n self.animal_hail = animal_hail\n super().__init__(nimal_name, animal_colour, animal_age, animal_sex)\n print(f\"动物名字:{self.animal_name}\\n动物颜色:{self.animal_colour}\\n\"\n f\"动物年龄: {self.animal_age}\\n动物性别: {self.animal_sex}\\n\"\n f\"动物毛发: {self.animal_hail}\")\n\n def call(self, animal_call):\n self.animal_call = animal_call\n print(f\"what dose the {self.animal_name} say ? {self.animal_call}\")\n\n def skill(self):\n print(f\"我是可爱的{self.animal_name},会捉老鼠。喵~\")\n\n\nif __name__ == '__main__':\n cat = Cat('cat','yellow',2,'male','short hail')\n cat.call('喵喵喵喵喵喵喵喵喵喵喵喵')\n cat.run()\n cat.skill()\n","sub_path":"python_pratice/homework_oop/animal_cat.py","file_name":"animal_cat.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"331218752","text":"# dojo/urls.py\n\nfrom django.conf.urls import url\nfrom . import views, views_cbv\n\nurlpatterns = [ # 패턴과 주소가 꼭 1:1대응일 필요 없다!!\n url(r'^new/$', views.post_new), # ep21. form용\n url(r'^(?P\\d+)/edit/$', views.post_edit), # ep22 form 수정용\n\n url(r'^sum/(?P[\\d/]+)/$', views.mysum), \n # 끝에 왜 /$ 인지 \"/\"이게 없으면 왜 안되는지 모르겠다.\n url(r'^hello/(?P[a-zA-Z]+)/(?P\\d+)/$', views.hello),\n\n url(r'^list1/$', views.post_list1),\n url(r'^list2/$', views.post_list2),\n url(r'^list3/$', views.post_list3),\n url(r'^excel/$', views.excel_download),\n\n url(r'^cbv/list1/$', views_cbv.post_list1),\n url(r'^cbv/list2/$', views_cbv.post_list2), # AttributeError: module 'dojo.views_cbv' has no attribute 'post_list2'\n url(r'^cbv/list3/$', views_cbv.post_list3),\n url(r'^cbv/excel$', views_cbv.excel_downlaod),\n]","sub_path":"dojo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"513158541","text":"\r\n\r\n\r\ndef factorial(x):\r\n y = 1\r\n for i in range (1, x+1): \r\n y *= i \r\n print(y)\r\n return(y)\r\n\r\n\r\n\r\ndef fibonacci(n):\r\n\r\n a = 1\r\n b = 1\r\n\r\n for i in range(3, n+1):\r\n z=b\r\n b= a+b\r\n a=z\r\n \r\n return(b)\r\n\r\n\r\n\r\ndef fizzbuzz1(n):\r\n\r\n for i in range(n):\r\n \r\n if i%3==0 and i%5!=0:\r\n print(i,' is the fizz')\r\n\r\n elif i%5==0 and i%3!=0:\r\n print(i,' is the buzz')\r\n\r\n elif i%5==0 and i%3==0:\r\n print(i,' is the fizzbuzz')\r\n else:\r\n print(i)\r\n\r\n\r\ndef fizzbuzz2(n):\r\n for i in range(1,n+1):\r\n \r\n if int(i/3)*1.0==(i/3) and int(i/5)*1.0!=(i/5) :\r\n print(i,' is the fizz')\r\n elif int(i/5)*1.0==(i/5) and int(i/3)*1.0!=(i/3):\r\n print(i,' is the buzz')\r\n elif int(i/5)*1.0==(i/5) and int(i/3)*1.0==(i/3):\r\n print(i, 'fizzbuzz')\r\n else:\r\n print(i)\r\n\r\nprint(fizzbuzz2(10))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"Python/simpleMethods.py","file_name":"simpleMethods.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"46156771","text":"# EECSE4764 Lab2 Check2\n# Group 6: NetSpeed Fast\n# Group members: Ruochen You (ry2349), Penghe Zhang (pz2244), Linnan Li(ll3235)\n# Date: 9/25/2018\n\n\nimport time\nimport machine\nfrom machine import Pin\n\n\ncur_state = 1\n\n\ndef callback(p):\n global cur_state\n\n active = 0\n cur_value = p.value()\n\n # debounce\n while active < 20:\n if p.value() == cur_value:\n active += 1\n else:\n active = 0\n time.sleep_ms(1)\n\n cur_state = 1 - cur_state\n\n\ndef main():\n switch = Pin(13, Pin.IN, Pin.PULL_UP)\n pwm = machine.PWM(Pin(15))\n pwm.freq(60)\n\n switch.irq(trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=callback)\n\n while True:\n if cur_state == 1:\n pwm.duty(1023)\n else:\n pwm.duty(0)\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"Lab2/lab2_group6_check2.py","file_name":"lab2_group6_check2.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360999750","text":"#!/usr/bin/env python3\n# Python version: python3\n# Auther: sunjb\nfrom django.contrib import admin\nfrom django.urls import path\nfrom app01 import views\nfrom django.urls import re_path,path,include\n\nurlpatterns = [\n path('login', views.login),\n path('index', views.index),\n path('register', views.register),\n path('upload', views.upload),\n # 这里的Home是我们定义的函数名,as_view()是固定的方法\n path('home/', views.Home.as_view()),\n path('dict/', views.dict),\n # 注意在2.2.1版本的django中,要导入re_path,然后用re_path才能使得正则生效,用path正则是不生效的.这个和老版本的有区别\n re_path('detail-(\\d+).html', views.detail),\n # 可以同时传递多个参数,通过?P将正则匹配到的值与nid匹配,形成一个字典{'nid':数字1,'uid':'数字2'},这样在detail2中,就不用担心接受的形参的位置了\n # 在detail中,只需要取key值nid就能够得到第一个实参,取uid就得到第二个实参\n re_path('detail-(?P\\d+)-(?P\\d+).html', views.detail2),\n re_path('reverse$', views.url1, name='u1'),\n re_path('reverse/(\\d+)/(\\d+)', views.url1, name='u2'),\n re_path('reverse3/(?P\\d+)/(?P\\d+)', views.url1, name='u3'),\n re_path('urlmatch$', views.url2, name='i1'),\n re_path('urlmatch/(\\d+)/(\\d+)', views.url2, name='i2'),\n re_path('urlmatch3/(?P\\d+)/(?P\\d+)', views.url2, name='i3'),\n]","sub_path":"week19/mydjango/app01/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370781873","text":"from util.common_imports import *\n\n\nclass Solution:\n def numTrees(self, n: int) -> int:\n D = defaultdict(int)\n D[0] = 1\n D[1] = 1\n D[2] = 2\n # D[3] = 5\n\n def visit(x):\n if x in D:\n return D[x]\n else:\n ans = 0\n for root in range(x):\n left = root\n right = x - 1 - root\n ans += visit(left) * visit(right)\n D[x] = ans\n return ans\n\n visit(n)\n return D[n]\n\n\n# print(Solution().numTrees(3))\nprint(Solution().numTrees(4))\n","sub_path":"tree/96_不同的二叉搜索树.py","file_name":"96_不同的二叉搜索树.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318953337","text":"\"\"\"\n\n Collaborative-based filtering for item recommendation.\n\n Author: Team SS3 JHB\n\n Description: Provided within this file is a baseline memory-based \n collaborative filtering algorithm for rating predictions on Movie data.\n\n\"\"\" \n \n# Script dependencies\nimport pandas as pd\nimport numpy as np\n\n#modelling libraries\nfrom scipy.sparse import csr_matrix\nfrom sklearn.neighbors import NearestNeighbors\n\n#other libraries\nimport re \nfrom collections import Counter\nfrom fuzzywuzzy import process\n\n# Importing data\nmovies = pd.read_csv('resources/data/movies.csv',sep = ',',delimiter=',')\nratings = pd.read_csv('resources/data/ratings.csv')\nratings.drop(['timestamp'], axis=1,inplace=True) \n\nN = ratings['userId'].nunique()\nM = ratings['movieId'].nunique()\n\nuser_mapper = dict(zip(np.unique(ratings['userId']), list(range(N))))\nmovie_mapper = dict(zip(np.unique(ratings['movieId']), list(range(M))))\n \nuser_inv_mapper = dict(zip(list(range(N)), np.unique(ratings['userId'])))\nmovie_inv_mapper = dict(zip(list(range(M)), np.unique(ratings['movieId'])))\n\ndef load_sparse_csr(filename):\n # here we need to add .npz extension manually\n loader = np.load(filename + '.npz')\n return csr_matrix((loader['data'], loader['indices'], loader['indptr']),\n shape=loader['shape'])\n\nX = load_sparse_csr('resources/models/sparse_matrix')\n\ndef find_similar_movies(movie_id, k, metric='cosine', show_distance=False):\n \"\"\"\n Finds k-nearest neighbours for a given movie id.\n \n Args:\n movie_id: id of the movie of interest\n X: user-item utility matrix\n k: number of similar movies to retrieve\n metric: distance metric for kNN calculations\n \n Returns:\n list of k similar movie ID's\n \"\"\"\n neighbour_ids = []\n \n movie_ind = movie_mapper[movie_id]\n movie_vec = X[movie_ind]\n k+=1\n kNN = NearestNeighbors(n_neighbors=k, algorithm='brute', metric=metric)\n kNN.fit(X)\n if isinstance(movie_vec, (np.ndarray)):\n movie_vec = movie_vec.reshape(1,-1)\n neighbour = kNN.kneighbors(movie_vec, return_distance=show_distance)\n for i in range(0,k):\n n = neighbour.item(i)\n neighbour_ids.append(movie_inv_mapper[n])\n neighbour_ids.pop(0)\n return neighbour_ids\n\ndef movie_finder(title):\n all_titles = movies['title'].tolist()\n closest_match = process.extractOne(title,all_titles)\n return closest_match[0]\n\nmovie_idx = dict(zip(movies['title'], movies['movieId']))\nmovie_titles = dict(zip(movies['movieId'], movies['title']))\n\ndef get_content_based_recommendations(title_string):\n title = movie_finder(title_string)\n idx = movie_idx[title]\n movie_id = idx\n similar_ids = find_similar_movies(movie_id, k=10)\n movie_title = movie_titles[movie_id]\n y = []\n for i in similar_ids:\n y.append(movie_titles[i])\n return y\n\ndef extract_year_from_title(title):\n t = title.split('(')\n year = None\n if re.search(r'\\d+\\)', t[-1]):\n year = t[-1].strip(')')\n year = year.replace(')', ' ')\n return year\n\nmovies['genres'] = movies['genres'].apply(lambda x: x.split('|'))\nmovies['year'] = movies['title'].apply(extract_year_from_title)\nmovies = movies[~movies['year'].isnull()]\n\nrating = ratings.merge(movies, on ='movieId')\nrating.dropna(inplace=True)\n\ndef top_rated(years, genre):\n df = rating[rating['year'] == years]\n comedy_movies = []\n \n for row,col in df.iterrows():\n if genre in col['genres']:\n if col['title'] not in comedy_movies:\n comedy_movies.append(col['title'])\n \n comedy_m = {}\n for i in comedy_movies:\n comedy_m[i] = df[df['title']==i]['rating'].sum()/ len(df[df['title'] == i]['rating'])\n \n c_mm = {k: v for k, v in sorted(comedy_m.items(), key=lambda item: item[1])}\n g = list(c_mm.keys())\n y = (g[::-1])[:10]\n \n return \"\\n\".join(y)\n\ngenres_counts = Counter(g for genres in movies['genres'] for g in genres)\ndel genres_counts['(no genres listed)']\n\ndef genres():\n return list(genres_counts.keys())","sub_path":"recommenders/memory_based_recommender.py","file_name":"memory_based_recommender.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511954839","text":"class Solution(object):\n def findTargetSumWays(self, nums, S):\n matrix = []\n for i in xrange(len(nums)):\n if i == 0:\n if nums[i] == 0:\n matrix.append({0:2})\n else:\n matrix.append({nums[i]:1, -nums[i]:1})\n else:\n cur = {}\n for p in matrix[i-1]:\n a = p + nums[i]\n b = p - nums[i]\n cur[a] = cur[a] + matrix[i-1][p] if a in cur else matrix[i-1][p]\n cur[b] = cur[b] + matrix[i-1][p] if b in cur else matrix[i-1][p]\n matrix.append(cur)\n return matrix[-1][S] if S in matrix[-1] else 0\n\n","sub_path":"LeetCode/Solved/oj494.py","file_name":"oj494.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47632400","text":"#\n# @lc app=leetcode id=2 lang=python3\n#\n# [2] Add Two Numbers\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n overnum = 0\n num1 = 0\n num2 = 0\n new = ListNode(0)\n node = new\n while True:\n if not (l1 or l2):\n if overnum:\n node.next = ListNode(overnum)\n break\n if l1:\n num1 = l1.val\n l1 = l1.next\n else:\n num1 = 0\n if l2:\n num2 = l2.val\n l2 = l2.next\n else:\n num2 = 0\n res = num1 + num2 + overnum\n overnum = int(res / 10)\n node.next = ListNode(res % 10)\n node = node.next\n return new.next\n \n# @lc code=end\n\n","sub_path":"2.add-two-numbers.py","file_name":"2.add-two-numbers.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"648283084","text":"# coding: utf-8\n\"\"\" Rule the class 'food.Food' \"\"\"\n\n\nclass Food():\n \"\"\"\n Class 'food.Food'\n\n Attributs:\n id (int), name(str), cat(str), cat_id(int), market(str),\n descriptions(str), nutriscore(str), url_id(int)\n\n All attributs are default str('none')\n\n Class methods:\n -food_item_request\n\n Example:\n food_item = Food()\n \"\"\"\n def __init__(self):\n self.id = \"none\"\n self.name = \"none\"\n self.cat = []\n self.cat_id = \"none\"\n self.market = \"none\"\n self.descriptions = \"none\"\n self.nutriscore = \"none\"\n self.url_id = \"none\"\n\n def food_item_request(self, cursor):\n \"\"\" Implement all Food object's attributs with a picking\n row of the table Food in Pur_Beurre database.\n\n Args:\n self: class 'food.Food'\n cursor: class 'pymysql.cursors.DictCursor'\n connection: class 'pymysql.connections.Connection'\n v_cat: str\n v_cat_id: int\n\n Return:\n /\n\n Example:\n self.food_item_request(cursor,v_cat, v_cat_id)\n \"\"\"\n sql = \"SELECT * FROM Food \\\n WHERE food.id = %s ;\"\n cursor.execute(sql, self.id)\n for element in cursor:\n self.nutriscore = (element['nutriscore'].upper())\n self.descriptions = (element['descriptions'])\n self.market = (element['market'])\n self.url_id = (element['url_id'])\n","sub_path":"python/Food.py","file_name":"Food.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"609901334","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 30 18:38:26 2018\n\n@author: Hp\n\"\"\"\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ngreen = (0,255,255)\n\ndef contador():\n i = 0\n while 1:\n yield i\n i += 1\n\ndef overlay_mask(mask, image):\n # make the mask rgb\n rgb_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)\n # calculates the weightes sum of two arrays. in our case image arrays\n # input, how much to weight each.\n # optional depth value set to 0 no need\n img = cv2.addWeighted(rgb_mask, 0.5, image, 0.5, 0)\n return img\n\ndef show(image):\n # Figure size in inches\n plt.figure(figsize=(10, 10))\n\n # Show image, with nearest neighbour interpolation\n plt.imshow(image, interpolation='nearest')\n\ndef circle_contour(image, contour):\n # Bounding ellipse\n image_c = image.copy()\n # easy function\n rect = cv2.minAreaRect(contour)\n #ellipse = cv2.fitEllipse(contour)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n # add it\n cv2.drawContours(image_c,[box],0,(0,0,255),3)\n #cv2.ellipse(image_with_ellipse, ellipse, green, 2, cv2.LINE_AA)\n return image_c\n\ndef find_biggest_contour(image):\n # Copy\n image = image.copy()\n # input, gives all the contours, contour approximation compresses horizontal,\n # vertical, and diagonal segments and leaves only their end points. For example,\n # an up-right rectangular contour is encoded with 4 points.\n # Optional output vector, containing information about the image topology.\n # It has as many elements as the number of contours.\n # we dont need it\n image, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # Isolate largest contour\n contour_sizes = [(cv2.contourArea(contour), contour) for contour in\n contours]\n biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]\n mask = np.zeros(image.shape, np.uint8)\n cv2.drawContours(mask, [biggest_contour], -1, 255, -1)\n return biggest_contour, mask\n\ndef resize_image(frame):\n frame_gaussian = cv2.GaussianBlur(frame,(7,7),0)\n max_d = max(frame_gaussian.shape)\n scale = 700/max_d\n frame_e = cv2.resize(frame_gaussian,None,fx=scale,fy=scale)\n return frame_e\n\ndef contour(mask):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))\n # morph the image. closing operation Dilation followed by Erosion.\n # It is useful in closing small holes inside the foreground objects,\n # or small black points on the object.\n mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n # erosion followed by dilation. It is useful in removing noise\n mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)\n big_bug_contour, mask_bug = find_biggest_contour(\n 255-mask_clean)\n overlay = overlay_mask(mask_clean, frame)\n circled = circle_contour(frame, big_bug_contour)\n return circled,overlay\n\ndef scale_image(frame):\n frame_gaussian = cv2.GaussianBlur(frame, (7, 7), 0)\n max_d = max(frame_gaussian.shape)\n scale = 700 / max_d\n frame_o = cv2.resize(frame_gaussian, None, fx=scale, fy=scale)\n return frame_o\n\ndef anomaly_contour(mask,frame):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)\n big_bug_contour, mask_bug = find_biggest_contour(\n 255 - mask_clean)\n overlay = overlay_mask(mask_clean, frame)\n circled = circle_contour(frame, big_bug_contour)\n res = cv2.bitwise_and(frame, frame, mask=mask_clean)\n return res,overlay,circled,mask_clean\n\n\ndef image_processing(path):\n frame_i = cv2.imread(path)\n frame = scale_image(frame_i)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n lower_green = np.array([30, 30, 30])\n upper_green = np.array([140, 255, 255])\n green_mask = cv2.inRange(hsv, lower_green,upper_green)\n mask = green_mask\n a, b, c, d = anomaly_contour(mask,frame)\n #cv2.imwrite('mask.jpeg', 255 - d)\n cv2.imwrite('res.jpg', c)\n\ndef video_processing():\n cap = cv2.VideoCapture(0)\n while 1:\n _, frame = cap.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n lower_green = np.array([30, 30, 30])\n upper_green = np.array([140, 255, 255])\n green_mask = cv2.inRange(hsv, lower_green, upper_green)\n mask = green_mask\n a, b, c, d = anomaly_contour(mask,frame)\n res = cv2.bitwise_and(frame, frame, mask=d)\n cv2.imshow('frame', frame)\n cv2.imshow('res', res)\n k = cv2.waitKey(5) & 0xFF\n if k == 99:\n x = contador()\n cv2.imwrite('mask{}.jpg'.format(next(x)), 255 - d)\n cv2.imwrite('res{}.jpg'.format(next(x)), c) \n if k == 27:\n break\n cv2.destroyAllWindows()\n del cap\n\n\nif __name__ == \"__main__\":\n path = \"dataset/val/data/images.jpg\"\n image_processing(path)\n video_processing()\n","sub_path":"cam_detecter.py","file_name":"cam_detecter.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"364030752","text":"import torch\nimport torch.nn as nn\nfrom pathlib import Path\nimport os\nfrom torchvision.utils import save_image\nfrom PIL import Image\nfrom torchvision import transforms\nfrom IPython.display import clear_output\nfrom shutil import copyfile\n\n\n\ndef create_folders(path,direc_types,model_info):\n \"\"\"\n create the directories to stock the images\n Args:\n origin_path(str): where to create these dirs\n direc_types(list:str): \"gradients\",\"max_activ\",\"cropped\"\n \n \"\"\"\n Path(path).mkdir(parents=False, exist_ok=True)\n for dirtype in direc_types:\n subpath = os.path.join(path,dirtype)\n Path(subpath).mkdir(parents=False, exist_ok=True)\n for i,lay_info in enumerate(model_info):\n if (type(lay_info['lay']) == nn.Conv2d):\n subpath2 = os.path.join(subpath,lay_info['name'])\n Path(subpath2).mkdir(parents=False, exist_ok=True)\n for j,filt in enumerate(lay_info['filters']):\n subpath3 = os.path.join(subpath2,str(filt[\"id\"]))\n Path(subpath3).mkdir(parents=False, exist_ok=True)\n \ndef clean_bw_imgs(path_to_imgs_dirs):\n \"\"\"\n Cleans the tinyimagenet from its bw images.\n Args:\n path_to_imgs_dirs(str):path to the train or val folder of ImageNet\n \"\"\"\n print(\"BW cleaning started\")\n list_bw = []\n for i,(root, dirs, files) in enumerate(os.walk(path_to_imgs_dirs,topdown=False)):\n clear_output(wait=True)\n print(\"Progression:{:.2f}%\".format(i/1000*100))\n for name in files:\n path = os.path.join(root, name)\n image = Image.open(path)\n image = transforms.ToTensor()(image)\n if image.shape != torch.Size([3,224,224]):\n list_bw.append(path)\n image = torch.stack([image,image,image],dim =1).squeeze(0)\n assert(image.shape ==torch.Size([3,224,224]))\n save_image(image,path)\n print(\"BW files found:\")\n for i in list_bw:\n print(i)\n print(\"BW cleaning terminated.\")\n \ndef sample_imagenet(src_path_imgs,trgt_pathname_imgs,img_num_per_dir = 1):\n \"\"\"\n create another directory similar to imagenet with a smaller number of images per class\n Args:\n src_path_imgs(str): path to train or val directory of ImageNet\n trgt_path_imgs(str): path + name of the folder which will keep the imagenet samples\n \"\"\"\n assert(img_num_per_dir >=1)\n #target_path = \"./data/exsmallimagenet\"\n #src_path = \"./data/tinyimagenet/train/\"\n print(\"Start sampling\")\n Path(trgt_pathname_imgs).mkdir(parents=True, exist_ok=True)\n for num_subfold,subfold in enumerate(os.listdir(src_path_imgs)):\n clear_output(wait=True)\n print(\"Progression:{:.2f}%\".format(num_subfold/1000*100))\n subfold_trget_path = os.path.join(trgt_pathname_imgs,subfold)\n subfold_src_path = os.path.join(src_path_imgs,subfold)\n # create the directory\n Path(subfold_trget_path).mkdir(parents=True,exist_ok=True)\n for i,file in enumerate(os.listdir(subfold_src_path)):\n if i >= img_num_per_dir:\n break\n copyfile(os.path.join(subfold_src_path,file),os.path.join(subfold_trget_path,file))\n print(\"Sampling terminated.\")","sub_path":"src/misc_funcs.py","file_name":"misc_funcs.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"382509510","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: cuiyiwork@foxmail.com\n# Created Time: 6/3/2019 3:04 PM\nimport logging\nimport os\nimport time\nimport math\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom context import Context\nfrom hparams import Hparams\nfrom utils import save_hparams, save_variable_specs, save_operation_specs, load_hparams\nfrom data_load import get_batch2\nfrom lr.model import LR\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nlogging.info(\"# hparams\")\nhparams = Hparams()\nparser = hparams.parser\nhp = parser.parse_args()\nrun_type = hp.run_type\nlogdir = hp.logdir\nbatch_size = hp.batch_size\nnum_epochs = hp.num_epochs\nassert hp.run_type in (\"new\", \"continue\", \"finetune\")\nif \"continue\" == hp.run_type:\n load_hparams(hp, logdir)\n batch_size = hp.batch_size\ncontext = Context(hp)\n\nlogging.info(\"# Prepare train/eval batches\")\nlogging.info(\"Use %s for training set\", hp.train_data)\nparams = {\"maxlens\": 100}\ntrain_batches, num_train_batches, num_train_samples = get_batch2(fpath=hp.train_data,\n task_type=\"set2sca\",\n input_indices=context.input_indices,\n vocabs=context.vocabs,\n params=params,\n batch_size=batch_size, shuffle=True)\n\n# create a iterator of the correct shape and type\niterr = tf.data.Iterator.from_structure(train_batches.output_types, train_batches.output_shapes)\ninputs_and_target = iterr.get_next()\n\n# 照抄即可,目前不是很熟悉这些接口\ntrain_init_op = iterr.make_initializer(train_batches)\n\nmodel = LR(context)\nloss, train_op, global_step, train_summaries = model.train(inputs=inputs_and_target[:-1], targets=inputs_and_target[-1])\n\nlogging.info(\"# Session\")\nsaver = tf.train.Saver(max_to_keep=num_epochs)\nconfig = tf.ConfigProto(allow_soft_placement=True)\nwith tf.Session(config=config) as sess:\n time_sess = time.time()\n ckpt = tf.train.latest_checkpoint(logdir)\n if ckpt is None or \"new\" == run_type: # 新建\n save_hparams(hp, logdir)\n logging.info(\"Initializing from scratch\")\n sess.run(tf.global_variables_initializer())\n else: # continue OR finetune\n saver.restore(sess, ckpt)\n if \"finetune\" == hp.run_type: # finetune\n save_hparams(hp, logdir)\n else: # continue\n batch_size = hp.batch_size\n\n save_variable_specs(os.path.join(logdir, \"var_specs\"))\n save_operation_specs(os.path.join(logdir, \"op_specs\"))\n f_debug = open(os.path.join(logdir, \"debug.txt\"), \"a\")\n summary_writer = tf.summary.FileWriter(logdir, sess.graph)\n if hp.zero_step:\n sess.run(global_step.assign(0))\n\n sess.run(train_init_op)\n total_steps = num_epochs * num_train_batches\n logging.info(\"total_steps:%s, num_epochs:%s, num_train_batches:%s\", total_steps, num_epochs, num_train_batches)\n _gs = sess.run(global_step)\n logging.info(\"global_step is stated at %s\", _gs)\n t_epoch = time.time()\n model_output = 'default'\n for i in tqdm(range(_gs, total_steps + 1)):\n ts = time.time()\n # f_debug.write(\"loss\\n\")\n # tensor_tmp = tf.get_default_graph().get_tensor_by_name(\"loss:0\")\n # np.savetxt(f_debug, tensor_tmp.eval().reshape([1]), delimiter=', ', footer=\"=\" * 64)\n _, _gs, _summary = sess.run([train_op, global_step, train_summaries])\n epoch = math.ceil(_gs / num_train_batches)\n f_debug.write(\"train: epoch %s takes %s\\n\" % (epoch, time.time() - ts))\n summary_writer.add_summary(_summary, _gs)\n\n if _gs and _gs % num_train_batches == 0:\n logging.info(\"epoch {} is done\".format(epoch))\n _loss = sess.run(loss) # train loss\n\n logging.info(\"# save models\")\n model_output = \"model%02dL%.2f\" % (epoch, _loss)\n ckpt_name = os.path.join(logdir, model_output)\n saver.save(sess, ckpt_name, global_step=_gs)\n logging.info(\"after training of {} epochs, {} has been saved.\".format(epoch, ckpt_name))\n\n logging.info(\"# fall back to train mode\")\n ts = time.time()\n sess.run(train_init_op)\n logging.info(\"fallback_train: %s\\t%s\\t%s takes %s\" % (i, _gs, epoch, time.time() - ts))\n logging.info(\"epoch %s takes %s\", epoch, time.time() - t_epoch)\n t_epoch = time.time()\n summary_writer.close()\n logging.info(\"Session runs for %s\", time.time() - time_sess)\n graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names=[\"inferences\"])\n tf.train.write_graph(graph_def, logdir, '%s.pb' % model_output, as_text=False)\nf_debug.close()\nlogging.info(\"Done\")\n","sub_path":"lr/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"431150007","text":"from crawler import Crawler\nfrom executor import Executor\nimport argparse\n\n\ndef setup(args):\n contest_name = args.contest_name\n crawler = Crawler(contest_name)\n crawler.run()\n\n\ndef run_test(args):\n excutor = Executor(args.contest_name)\n excutor.excute_test_cases(args.problem_name)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"contest_name\", type=str)\n parser.add_argument(\"-s\", \"--setup\", action=\"store_true\")\n parser.add_argument(\"-p\", \"--problem_name\", type=str)\n\n args = parser.parse_args()\n\n if args.setup:\n setup(args)\n\n if args.problem_name:\n run_test(args)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122561047","text":"# -*- coding: utf-8 -*-\n\n'''\n1. Пользователь вводит данные о количестве предприятий,\nих наименования и прибыль за 4 квартала (т.е. 4 отдельных числа)\nдля каждого предприятия.. Программа должна определить среднюю прибыль\n(за год для всех предприятий) и вывести наименования предприятий,\nчья прибыль выше среднего и отдельно вывести наименования предприятий, чья прибыль ниже среднего.\n'''\nimport collections\n\n# без коллекций.\ndef calc_profit():\n\tquantity_of_companies = int(input('Введите количество предприятий: '))\n\tdict_of_companies_n_quarter_profit = {}\n\n\tfor i in range(1, quantity_of_companies + 1):\n\t\tname = input('Введите название предприятия: ')\n\t\tcompany_year_profit = []\n\t\tfor quarter in range(1, 5):\n\t\t\tquarter_profit = int(input(f'Доход за {quarter} квартал: '))\n\t\t\tcompany_year_profit.append(quarter_profit)\n\t\tdict_of_companies_n_quarter_profit[name] = company_year_profit\n\n\t# создаем список сумм (хотя можно было в цикле аппендить уже общую сумму, но предположим,\n\t# что необходимо сохранять квартальные значения)\n\tlist_of_all_profits = [item for sublist in dict_of_companies_n_quarter_profit.values() for item in [sum(sublist)]]\n\taverage_profit = sum(list_of_all_profits) / len(list_of_all_profits)\n\tprint(f'\\nСредняя прибыль организаций за год: {average_profit} руб.\\n')\n\n\tcompanies_year_profit = dict(zip(dict_of_companies_n_quarter_profit, list_of_all_profits))\n\n\tcompanies_with_less_than_av = {k: v for k, v in companies_year_profit.items() if v < average_profit}\n\tcompanies_with_more_than_av = {k: v for k, v in companies_year_profit.items() if v > average_profit}\n\tprint(f'Прибыль ниже средней: {companies_with_less_than_av}\\n')\n\tprint(f'Прибыль выше средней: {companies_with_more_than_av}\\n')\n\ncalc_profit()\n\n# используя коллекции.\ndef calc_profit_with_col():\n\tquantity_of_companies = int(input('Введите количество предприятий: '))\n\tCompany = collections.namedtuple('Company', ['name', 'profit'])\n\tlst_of_comps = []\n\tfor i in range(1, quantity_of_companies + 1):\n\t\tname = input('Введите имя: ')\n\t\tprofit = 0\n\t\tfor quarter in range(1, 5):\n\t\t\tprofit += int(input(f'Доход за {quarter} квартал: '))\n\t\tcompany = Company(name, profit)\n\t\tlst_of_comps.append(company)\n\n\taverage_profit = sum([company.profit for company in lst_of_comps]) / len(lst_of_comps)\n\tprint(f'\\nСредняя прибыль организаций за год: {average_profit} руб.\\n')\n\n\tcomps_with_less_than_av = [company for company in lst_of_comps if company.profit < average_profit]\n\tcomps_with_more_than_av = [company for company in lst_of_comps if company.profit > average_profit]\n\tprint(f'Прибыль ниже средней: {comps_with_less_than_av}\\n')\n\tprint(f'Прибыль выше средней: {comps_with_more_than_av}\\n')\n\ncalc_profit_with_col()\n# Итого: использование именнованного кортежа позволило значительно упростить генераторные выражения.\n# алгоритмы решений с DefaultDict и OrderedDict по сути не будет отличаться от первоначального и при этом не будут\n# использованы их особенности, поэтому не буду простыню из кода присылать.\n","sub_path":"5/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"341103316","text":"#!/usr/bin/env python\nimport pika\n\ncredentials = pika.PlainCredentials(\"avani\", \"avani\")\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters(\n\t\t\"192.168.1.10\",\n\t\t5672,\n\t\t\"/\",\n\t\tcredentials,\n\t\theartbeat=10,\n\t\tblocked_connection_timeout=10,\n\t))\nchannel = connection.channel()\n\nchannel.queue_declare(queue='hello')\n\nchannel.basic_publish(exchange='', routing_key='hello', body='Hello World!')\nprint(\" [x] Sent 'Hello World!'\")\nconnection.close()","sub_path":"wifi_code/2021-02-18-demo/0218-demo-01/rabbit-demo.py","file_name":"rabbit-demo.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"646477376","text":"\nfrom flask import Flask\nfrom flask import request\nimport json # json을 쓰기 위한 모듈\napp = Flask(__name__)\n\n@app.route('/') # 라우팅\ndef main(): # 해당 라우팅에서 실행할 함수\n result = {\"id\": \"123\", \"name\": \"delryn\"}\n jsonString = json.dumps(result) # = JSON.stringfy()\n print(type(jsonString)) # \n return jsonString \n\n'''\nhttp method\n@app.route에서 methods = ['GET'] 또는 methods = ['POST'] 로 정할 수 있다. 그리고 짬뽕도 된다! methods = ['GET', 'POST']\n'''\n\n@app.route('/get', methods = ['GET'])\ndef queryString():\n a = request.args.get('a') # 원하는 쿼리스트링의 값 가지고 오기.\n queryString = request.query_string # 쿼리스트링 전체 가지고 오기 자주 쓰이진 않지만 매모.\n print(a)\n print('-----------')\n print(queryString) \n return queryString\n\n@app.route('/post', methods = ['POST'])\ndef post():\n val = request.json\n val2 = request.data\n print(val)\n print('---------')\n print(val2)\n print(json.loads(val2)) # = JSON.parse()\n print(json.loads(val2)['a'])\n print('---------')\n print(request.json['a'])\n result = json.dumps(val)\n return result\n\n# GET, POST 동시에 허용 하고 들어오는 메소드에 따라 처리 가능.\n@app.route('/get-post', methods = ['GET', 'POST'])\ndef queryPost():\n if request.method == 'GET':\n return \"get\"\n else:\n return \"post\"\n\nif __name__ == '__main__':\n app.debug = True # debug 모드 on 오류 났을 때 웹에 출력 해주는 그런 거 -_-.\n\n '''\n host default는 로컬호스트, 근데 변경할려면 요렇게 host='아이피 주소' 넣고 하면 된다. 근데 딱히 바꿀 일은 업을 꺼 같지만.\n port는 요렇게 변경 가능 하다. 기입 안 하면 default는 5000\n '''\n app.run(host='127.0.0.1', port=3000)\n\n","sub_path":"flask_study/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582571377","text":"class Solution:\n def fairCandySwap(self, A, B):\n diff = sum(A) - sum(B)\n ab = int(diff / 2)\n dict_A = set(A)\n # if ab == 0: return []\n \n for b in B:\n if b + ab in dict_A:\n return [b + ab, b]\n","sub_path":"LC888_candy_swap.py","file_name":"LC888_candy_swap.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314990126","text":"#!/usr/bin/python\n\nimport rospy, tf_conversions\nfrom motive_ur_calib.srv import *\nfrom geometry_msgs.msg import Point, Quaternion, Pose\n\nimport numpy as np\n\ndef ROSPts_to_4xN_arr(P):\n ls_ = [ [ p.x, p.y, p.z, 1.0 ] for p in P ]\n return np.asarray(ls_).T\n \ndef x1_arr_to_ROSPt(M):\n return Point(x=M[0], y=M[1], z=M[2])\n\ndef xN_arr_to_ROSPts(M):\n return [ x1_arr_to_ROSPt(M[:,i]) for i in range(M.shape[1]) ]\n \ndef ROSQuat_to_xyzw(q):\n return [ q.x, q.y, q.z, q.w ]\n \ndef ROSPose_to_TM(pose):\n f_ = tf_conversions.posemath.fromMsg(pose)\n return tf_conversions.posemath.toMatrix(f_)\n \ndef TM_to_ROSPose(M):\n f_ = tf_conversions.posemath.fromMatrix(M)\n return tf_conversions.posemath.toMsg(f_)\n\nclass MotiveTF_node():\n def __init__(self):\n rospy.init_node('motiveTF_node', anonymous=True)\n \n htm_list_ = rospy.get_param(\"~htm\").split(',')\n self._htm = np.asarray(map(float,htm_list_)).reshape(4,4)\n\n self._rb_rot_adj = tf_conversions.transformations.quaternion_from_matrix(self._htm)\n self._rb_rot_adj[3] = -self._rb_rot_adj[3] # invert by taking w = -w\n \n rospy.Service('motiveTF', MotiveTF, self._tf_cb)\n \n rospy.spin()\n \n def _tf_cb(self, req):\n res_ = MotiveTFResponse(markers=[], rigid_bodies=[])\n \n # marker positions\n if (len(req.markers) != 0):\n res_.markers = xN_arr_to_ROSPts(\n np.matmul(self._htm, ROSPts_to_4xN_arr(req.markers)))\n \n # rigid body poses\n for rbp in req.rigid_bodies:\n quat_rot_adj_ = tf_conversions.transformations.quaternion_multiply(\n ROSQuat_to_xyzw(rbp.orientation), self._rb_rot_adj)\n req_pose_rot_adj_ = Pose(position=rbp.position, orientation=Quaternion(*quat_rot_adj_))\n M_tf_ = np.matmul(self._htm, ROSPose_to_TM(req_pose_rot_adj_))\n res_.rigid_bodies.append(TM_to_ROSPose(M_tf_))\n \n return res_\n \nif __name__ == '__main__':\n mTF_node_ = MotiveTF_node()\n","sub_path":"src/tf_node.py","file_name":"tf_node.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"332550249","text":"# -*- mode: python -*-\n\nimport os\nimport jieba\n\njieba_path = os.path.dirname(jieba.__file__)\n\nblock_cipher = None\n\n\na = Analysis(['app/MyNote.py'],\n pathex=['/home/breeze/Develop/MyNote'],\n binaries=[],\n datas=[('README.md', '.'),\n ('app/keys', 'keys'),\n ('app/static', 'static'),\n ('app/templates', 'templates'),\n ('app/translations', 'translations'),\n ('app/configuration.yml', '.'),\n ('app/configuration.yml.readme', '.'),\n (os.path.join(jieba_path, 'dict.txt'), 'jieba'),\n (os.path.join(jieba_path, 'analyse', 'idf.txt'), os.path.join('jieba', 'analyse')),\n ('app/Install.sh', '.'),\n ('app/MakeShortcut.sh', '.'),\n ('app/Uninstall.sh', '.'),],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='MyNote',\n debug=False,\n strip=False,\n upx=True,\n console=True)\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n name='MyNote')\n","sub_path":"MyNote.spec","file_name":"MyNote.spec","file_ext":"spec","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"246887832","text":"from django.shortcuts import get_object_or_404, render, get_list_or_404\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom serwis.models import Movie, Actor, Series,Director\nfrom random import shuffle\nfrom django.db.models import get_model\nfrom datetime import datetime\nfrom django.shortcuts import redirect\nfrom django.contrib.auth import logout\n\n\n\ndef load_example_data(request):\n #Gladiator\n actor = Actor(name=\"Russell\", last_name=\"Crowe\", born=datetime.strptime('1964-04-07','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n actor.save()\n \n director = Director(name=\"Ridley\", last_name=\"Scott\", born=datetime.strptime('1937-11-30','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n director.save()\n \n movie = Movie(title=\"Gladiator\", release_date=datetime.strptime('2000', '%Y'), director=director, description=\"General Maximus - prawa reka cesarza, szczesliwy maz i ojciec w jednej chwili traci wszystko. Jako niewolnik-gladiator musi walczyc na arenie o przezycie.\", genre=\"historyczny\")\n movie.save()\n movie.actors.add(actor)\n movie.save()\n \n #movie_actor = Movie_Actor(id_movie = movie, id_actor = actor, character = \"Maximus\")\n #movie_actor.save()\n \n \n #Ojciec chrzestny\n actor_1 = Actor(name=\"Marlon\", last_name=\"Brando\", born=datetime.strptime('1924-04-03','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n actor_1.save()\n actor_2 = Actor(name=\"Al\", last_name=\"Pacino\", born=datetime.strptime('1940-04-25','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n actor_2.save()\n \n director = Director(name=\"Francis Ford\", last_name=\"Coppola\", born=datetime.strptime('1939-04-07','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n director.save()\n \n movie = Movie(title=\"Ojciec chrzestny\", release_date=datetime.strptime('1972', '%Y'), director=director, description=\"Opowiesc o nowojorskiej rodzinie mafijnej. Starzejacy sie Don Corleone pragnie przekazac wladze swojemu synowi.\", genre=\"gangsterski\")\n movie.save()\n movie.actors.add(actor_1, actor_2)\n movie.save()\n \n #movie_actor = Movie_Actor(id_movie = movie, id_actor = actor_1, character = \"Don Vito Corleone\")\n #movie_actor.save()\n #movie_actor = Movie_Actor(id_movie = movie, id_actor = actor_2, character = \"Michael Corleone\")\n #movie_actor.save()\n \n \n \n #Janosik\n actor_1 = Actor(name=\"Marek\", last_name=\"Perepeczko\", born=datetime.strptime('1942-04-03','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n actor_1.save()\n actor_2 = Actor(name=\"Bogusz\", last_name=\"Bilewski\", born=datetime.strptime('1930-09-25','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n actor_2.save()\n actor_3 = Actor(name=\"Witold\", last_name=\"Pyrkosz\", born=datetime.strptime('1926-12-24','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n actor_3.save()\n \n director = Director(name=\"Jerzy\", last_name=\"Passendorfer\", born=datetime.strptime('1923-04-08','%Y-%m-%d'), biography=\"Tutaj jakas przykladowa biografia. A tutaj cos innego.\")\n director.save()\n \n series = Series(title=\"Janosik\", release_date=datetime.strptime('1973', '%Y'), director=director, description=\"Tatrzanski zbojnik staje w obronie ucisnionych chlopow.\", genre=\"kostiumowy\")\n series.save()\n series.actors.add(actor_1, actor_2, actor_3)\n series.save()\n \n #series_actor = Series_Actor(id_series = series, id_actor = actor_1, character = \"Janosik\")\n #series_actor.save()\n #series_actor = Series_Actor(id_series = series, id_actor = actor_2, character = \"Walus Kwiczol\")\n #series_actor.save()\n #series_actor = Series_Actor(id_series = series, id_actor = actor_3, character = \"Jedrus Pyzdra\")\n #series_actor.save()\n \n return render(request, 'serwis/load.html')\n \n\ndef get_short_description(s):\n get_split = s.split('.')\n return get_split[0]\n \ndef get_whole_name(item):\n return str(item.name)+' '+str(item.last_name)\n\n@login_required\ndef index(request):\n movies = get_list_or_404(Movie)\n shuffle(movies)\n slider = []\n for (counter, movie) in enumerate(movies):\n if counter == 5:\n break\n middle_movie = { 'title' : movie.title, 'id' : movie.id, 'src' : 'images/slideshow/0'+str(counter+1)+'.jpg'}\n slider.append(middle_movie)\n \n movies = get_list_or_404(Movie)\n recent_movies = []\n for (counter, movie) in enumerate(movies):\n if counter == 4:\n break\n rec_movie = { 'title' : movie.title, 'id' : movie.id, 'src' : 'images/templatemo_image_0'+str(counter+1)+'.jpg'}\n recent_movies.append(rec_movie)\n \n try:\n movie = Movie.objects.get(id=1) \n best_movie = {'title': movie.title, 'short_desc' : get_short_description(movie.description), 'id' : movie.id}\n except Movie.DoesNotExist:\n best_movie = None\n \n try:\n actor = Actor.objects.get(id=1)\n best_actor = {'title': get_whole_name(actor), 'short_desc' : get_short_description(actor.biography), 'id' : actor.id}\n except Actor.DoesNotExist:\n best_actor = None\n \n try:\n director = Director.objects.get(id=1)\n best_director = {'title': get_whole_name(director), 'short_desc' : get_short_description(director.biography), 'id' : director.id}\n except Director.DoesNotExist:\n best_director = None\n \n context = {'slider' : slider, 'recent_movies' : recent_movies, 'best_movie': best_movie, 'best_actor': best_actor, 'best_director' : best_director}\n \n return render(request, 'serwis/index.html', context)\n \ndef get_item_from_model(item, counter, type, user):\n if type == 'Movie' or type == 'Series':\n item = {'id' : item.id, 'title' : item.title, 'src' : 'images/news_image_0' + str(counter+1) + '.jpg', 'short_desc' : get_short_description(item.description), 'vote' : get_votes(item.id, type, user), 'date' : 'Wydany: ' + str(item.release_date.year) } \n else :\n item = {'id' : item.id, 'title' : get_whole_name(item), 'src' : 'images/news_image_0' + str(counter+1) + '.jpg', 'short_desc' : get_short_description(item.biography), 'vote' : get_votes(item.id, type, user), 'date' : 'Urodzony: ' + str(item.born) } \n return item \n\n@login_required \ndef ranking(request, type):\n model_class = get_model('serwis', type)\n lists = model_class.objects.all()\n title = 'Ranking' + type\n rank = []\n for (counter, item) in enumerate(lists):\n rank_item = get_item_from_model(item, counter, type, request.user) \n rank.append(rank_item)\n context = {'title' : title, 'type' : type, 'rank' : rank}\n return render(request, 'serwis/ranking.html', context)\n\n@login_required \ndef baza(request, type):\n model_class = get_model('serwis', type)\n lists = model_class.objects.all()\n title = 'Baza' + type\n data = []\n for (counter, item) in enumerate(lists):\n data_item = get_item_from_model(item, counter, type, request.user)\n data.append(data_item)\n context = {'title' : title, 'type' : type, 'data' : data}\n return render(request, 'serwis/baza.html', context)\n \ndef context_for_detail(item, type, user):\n vote = get_votes(item.id, type, user) \n if type == 'Movie' or type == 'Series':\n director = {'name' : 'Rezyser: ' + get_whole_name(item.director), 'id' : item.director.id}\n got_actors = item.actors.all()\n actors = []\n for actor in got_actors :\n actors.append({'name' : get_whole_name(actor), 'id' : actor.id})\n context = {'page_title' : item.title, 'title' : item.title, 'date' : 'Data produkcji: '+str(item.release_date.year), 'director' : director, 'genre' : 'Gatunek: ' + str(item.genre), 'beforedesc' : 'Opis:','description' : item.description, 'src' : 'images/test.jpg', 'actors' : actors, 'vote' : vote, 'this_id' : item.id, 'type' : type }\n else: \n productions = []\n got_movies = item.movie_set.all()\n for movie in got_movies:\n productions.append({'name': movie.title, 'type':'Movie', 'id':movie.id}) \n got_series = item.series_set.all()\n for series in got_series:\n productions.append({'name': series.title, 'type':'Series', 'id':series.id}) \n context = {'page_title' : get_whole_name(item), 'title' : get_whole_name(item), 'date' : 'Data urodzenia: '+str(item.born), 'beforedesc' : 'Biografia:', 'description' : item.biography, 'src' : 'images/test.jpg', 'productions' : productions, 'vote' : vote, 'this_id' : item.id, 'type' : type}\n return context\n\n@login_required \ndef detail(request, type, id_number):\n model_class = get_model('serwis', type)\n item = model_class.objects.get(id=id_number)\n context = context_for_detail(item, type, request.user)\n return render(request, 'serwis/detail.html', context)\n\n@login_required \ndef panel(request):\n return render(request, 'serwis/panel.html')\n\n@login_required\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(\"/serwis/\")\n \ndef add_vote(id_number, type, vote, user):\n model_class = get_model('serwis', 'Votes_'+type)\n model_class_object = get_model('serwis', type)\n got_object = model_class_object.objects.get(id=id_number)\n try:\n model_class.objects.get(user=user, to=got_object)\n except model_class.DoesNotExist:\n model_class.objects.create(user=user, to=got_object, vote=vote) \n\ndef get_votes(id_number, type, user):\n model_class = get_model('serwis', 'Votes_'+type) \n model_class_object = get_model('serwis', type)\n got_object = model_class_object.objects.get(id=id_number)\n yes = model_class.objects.filter(user=user, to=got_object, vote='yes').count()\n no = model_class.objects.filter(user=user, to=got_object, vote='no').count()\n return {'yes':yes, 'no':no}\n\n@login_required \ndef vote(request, type, id_number):\n add_vote(id_number, type, request.POST['choice'], request.user)\n return HttpResponseRedirect(reverse('serwis:detail', args=(type,id_number)))\n \n \n","sub_path":"serwis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"463131536","text":"import sys\n\nimport spacy\nimport torch\n\nfrom tqdm import tqdm\n\nfrom dataloader import vocabs\nfrom dataloader import utils\n\nimport helper\n\n\nclass PredictCommandLineParser(helper.ModelCommandLineParser):\n def __init__(self):\n super().__init__()\n self.build_predict_parser(self.parser)\n\n def build_predict_parser(self, parser):\n self.parser.add_argument(\"source_lang\", type=str, help=\"Source language\")\n self.parser.add_argument(\n \"source_vocab\",\n type=self.possible_gzipped_file,\n help=\"Source vocabulary file\",\n )\n self.parser.add_argument(\n \"target_vocab\",\n type=self.possible_gzipped_file,\n help=\"Target vocabulary file\",\n )\n parser.add_argument(\n \"model_path\",\n type=lambda p: self.possible_gzipped_file(p, \"rb\", encoding=None),\n help=\"Where the model was stored after training. Model parameters \"\n \"passed via command line should match those from training\",\n )\n parser.add_argument(\n \"--batch-size\",\n metavar=\"N\",\n type=self.lower_bound,\n default=100,\n help=\"The number of sequences to process at once\",\n )\n parser.add_argument(\n \"--device\",\n metavar=\"DEV\",\n type=torch.device,\n default=torch.device(\"cpu\"),\n help='Where to do training (e.g. \"cpu\", \"cuda\")',\n )\n parser.add_argument(\n \"--input-text\", type=str, default=\"Hello\", help=\"Text to translate to\"\n )\n\n\ndef predict(opts):\n\n # Get our current version of spacy\n spacy_instance = utils.get_spacy_instance(opts.source_lang)\n\n # Make the text lowercase and no EOF\n input_text = opts.input_text.lower().strip()\n\n # Parse input into tokens with spacy\n input_tokens = [token.text for token in spacy_instance.tokenizer(input_text)]\n\n print(\"Input:\", \" \".join(input_tokens))\n\n # Get the vocabs\n # TODO: Handle the case of translating from fr to en\n source_vocab = vocabs.load_vocabs_from_file(opts.source_vocab)\n target_vocab = vocabs.load_vocabs_from_file(opts.target_vocab)\n\n # Get the mappings\n source_word2id = source_vocab.get_word2id()\n target_word2id = target_vocab.get_word2id()\n\n source_id2word = source_vocab.get_id2word()\n target_id2word = target_vocab.get_id2word()\n\n source_vocab_size = len(source_word2id) + 2\n target_vocab_size = len(target_word2id) + 4\n\n src_unk, src_pad = range(len(source_word2id), source_vocab_size)\n trg_unk, trg_sos, trg_eos, trg_pad = range(len(target_word2id), target_vocab_size)\n\n model = helper.build_model(\n opts,\n source_vocab_size,\n target_vocab_size,\n src_pad,\n trg_sos,\n trg_eos,\n trg_pad,\n opts.device,\n )\n model.load_state_dict(torch.load(opts.model_path))\n model.eval()\n\n src = [torch.tensor([source_word2id[word] for word in input_tokens])]\n src_lens = torch.tensor([len(input_tokens)])\n src = torch.nn.utils.rnn.pad_sequence(src, padding_value=src_pad)\n\n predicted_words = None\n with torch.no_grad():\n\n # Get the output\n logits = model(src, src_lens)\n predicted_trg = logits.argmax(2)[0, :]\n\n # Remove the EOS and SOS\n predicted_trg = predicted_trg[1:-1]\n\n # Get the resultant sequence of words\n predicted_words = [\n target_id2word.get(word_id.item(), \"NAN\") for word_id in predicted_trg\n ]\n\n return predicted_words\n\n\nif __name__ == \"__main__\":\n parser = PredictCommandLineParser()\n opts = parser.get_options(sys.argv[1:])\n print(\"Predicted word:\", \" \".join(predict(opts)))\n","sub_path":"Translator/src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429037938","text":"from django.conf.urls import url\nfrom . import views\nfrom django.views.generic.base import TemplateView\nurlpatterns = [\n url(r'^headparser$',views.headparser,name='headparser'),\n]\nurlpatterns += [\n url(r'^timestamp/(?P